1
2
3
4
5
6
7
8
9
10#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11#include <linux/module.h>
12
13#include "../host/nvme.h"
14#include "nvmet.h"
15
16MODULE_IMPORT_NS(NVME_TARGET_PASSTHRU);
17
18
19
20
21static DEFINE_XARRAY(passthru_subsystems);
22
23static u16 nvmet_passthru_override_id_ctrl(struct nvmet_req *req)
24{
25 struct nvmet_ctrl *ctrl = req->sq->ctrl;
26 struct nvme_ctrl *pctrl = ctrl->subsys->passthru_ctrl;
27 u16 status = NVME_SC_SUCCESS;
28 struct nvme_id_ctrl *id;
29 u32 max_hw_sectors;
30 int page_shift;
31
32 id = kzalloc(sizeof(*id), GFP_KERNEL);
33 if (!id)
34 return NVME_SC_INTERNAL;
35
36 status = nvmet_copy_from_sgl(req, 0, id, sizeof(*id));
37 if (status)
38 goto out_free;
39
40 id->cntlid = cpu_to_le16(ctrl->cntlid);
41 id->ver = cpu_to_le32(ctrl->subsys->ver);
42
43
44
45
46
47
48 max_hw_sectors = min_not_zero(pctrl->max_segments << (PAGE_SHIFT - 9),
49 pctrl->max_hw_sectors);
50
51 page_shift = NVME_CAP_MPSMIN(ctrl->cap) + 12;
52
53 id->mdts = ilog2(max_hw_sectors) + 9 - page_shift;
54
55 id->acl = 3;
56
57
58
59
60 id->aerl = NVMET_ASYNC_EVENTS - 1;
61
62
63 id->kas = cpu_to_le16(NVMET_KAS);
64
65
66 id->hmpre = 0;
67 id->hmmin = 0;
68
69 id->sqes = min_t(__u8, ((0x6 << 4) | 0x6), id->sqes);
70 id->cqes = min_t(__u8, ((0x4 << 4) | 0x4), id->cqes);
71 id->maxcmd = cpu_to_le16(NVMET_MAX_CMD);
72
73
74 id->fuses = 0;
75
76 id->sgls = cpu_to_le32(1 << 0);
77 if (ctrl->ops->flags & NVMF_KEYED_SGLS)
78 id->sgls |= cpu_to_le32(1 << 2);
79 if (req->port->inline_data_size)
80 id->sgls |= cpu_to_le32(1 << 20);
81
82
83
84
85
86
87
88
89 memcpy(id->subnqn, ctrl->subsysnqn, sizeof(id->subnqn));
90
91
92 id->ioccsz = cpu_to_le32((sizeof(struct nvme_command) +
93 req->port->inline_data_size) / 16);
94 id->iorcsz = cpu_to_le32(sizeof(struct nvme_completion) / 16);
95
96 id->msdbd = ctrl->ops->msdbd;
97
98
99 id->cmic |= 1 << 1;
100
101
102 id->oncs &= cpu_to_le16(~NVME_CTRL_ONCS_RESERVATIONS);
103
104 status = nvmet_copy_to_sgl(req, 0, id, sizeof(struct nvme_id_ctrl));
105
106out_free:
107 kfree(id);
108 return status;
109}
110
111static u16 nvmet_passthru_override_id_ns(struct nvmet_req *req)
112{
113 u16 status = NVME_SC_SUCCESS;
114 struct nvme_id_ns *id;
115 int i;
116
117 id = kzalloc(sizeof(*id), GFP_KERNEL);
118 if (!id)
119 return NVME_SC_INTERNAL;
120
121 status = nvmet_copy_from_sgl(req, 0, id, sizeof(struct nvme_id_ns));
122 if (status)
123 goto out_free;
124
125 for (i = 0; i < (id->nlbaf + 1); i++)
126 if (id->lbaf[i].ms)
127 memset(&id->lbaf[i], 0, sizeof(id->lbaf[i]));
128
129 id->flbas = id->flbas & ~(1 << 4);
130
131
132
133
134
135
136 id->mc = 0;
137
138 status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
139
140out_free:
141 kfree(id);
142 return status;
143}
144
145static void nvmet_passthru_execute_cmd_work(struct work_struct *w)
146{
147 struct nvmet_req *req = container_of(w, struct nvmet_req, p.work);
148 struct request *rq = req->p.rq;
149 u16 status;
150
151 nvme_execute_passthru_rq(rq);
152
153 status = nvme_req(rq)->status;
154 if (status == NVME_SC_SUCCESS &&
155 req->cmd->common.opcode == nvme_admin_identify) {
156 switch (req->cmd->identify.cns) {
157 case NVME_ID_CNS_CTRL:
158 nvmet_passthru_override_id_ctrl(req);
159 break;
160 case NVME_ID_CNS_NS:
161 nvmet_passthru_override_id_ns(req);
162 break;
163 }
164 }
165
166 req->cqe->result = nvme_req(rq)->result;
167 nvmet_req_complete(req, status);
168 blk_mq_free_request(rq);
169}
170
171static void nvmet_passthru_req_done(struct request *rq,
172 blk_status_t blk_status)
173{
174 struct nvmet_req *req = rq->end_io_data;
175
176 req->cqe->result = nvme_req(rq)->result;
177 nvmet_req_complete(req, nvme_req(rq)->status);
178 blk_mq_free_request(rq);
179}
180
181static int nvmet_passthru_map_sg(struct nvmet_req *req, struct request *rq)
182{
183 int sg_cnt = req->sg_cnt;
184 struct scatterlist *sg;
185 int op_flags = 0;
186 struct bio *bio;
187 int i, ret;
188
189 if (req->cmd->common.opcode == nvme_cmd_flush)
190 op_flags = REQ_FUA;
191 else if (nvme_is_write(req->cmd))
192 op_flags = REQ_SYNC | REQ_IDLE;
193
194 bio = bio_alloc(GFP_KERNEL, min(sg_cnt, BIO_MAX_PAGES));
195 bio->bi_end_io = bio_put;
196 bio->bi_opf = req_op(rq) | op_flags;
197
198 for_each_sg(req->sg, sg, req->sg_cnt, i) {
199 if (bio_add_pc_page(rq->q, bio, sg_page(sg), sg->length,
200 sg->offset) < sg->length) {
201 bio_put(bio);
202 return -EINVAL;
203 }
204 sg_cnt--;
205 }
206
207 ret = blk_rq_append_bio(rq, &bio);
208 if (unlikely(ret)) {
209 bio_put(bio);
210 return ret;
211 }
212
213 return 0;
214}
215
216static void nvmet_passthru_execute_cmd(struct nvmet_req *req)
217{
218 struct nvme_ctrl *ctrl = nvmet_req_passthru_ctrl(req);
219 struct request_queue *q = ctrl->admin_q;
220 struct nvme_ns *ns = NULL;
221 struct request *rq = NULL;
222 u32 effects;
223 u16 status;
224 int ret;
225
226 if (likely(req->sq->qid != 0)) {
227 u32 nsid = le32_to_cpu(req->cmd->common.nsid);
228
229 ns = nvme_find_get_ns(ctrl, nsid);
230 if (unlikely(!ns)) {
231 pr_err("failed to get passthru ns nsid:%u\n", nsid);
232 status = NVME_SC_INVALID_NS | NVME_SC_DNR;
233 goto out;
234 }
235
236 q = ns->queue;
237 }
238
239 rq = nvme_alloc_request(q, req->cmd, BLK_MQ_REQ_NOWAIT, NVME_QID_ANY);
240 if (IS_ERR(rq)) {
241 status = NVME_SC_INTERNAL;
242 goto out_put_ns;
243 }
244
245 if (req->sg_cnt) {
246 ret = nvmet_passthru_map_sg(req, rq);
247 if (unlikely(ret)) {
248 status = NVME_SC_INTERNAL;
249 goto out_put_req;
250 }
251 }
252
253
254
255
256
257
258
259
260 effects = nvme_command_effects(ctrl, ns, req->cmd->common.opcode);
261 if (req->p.use_workqueue || effects) {
262 INIT_WORK(&req->p.work, nvmet_passthru_execute_cmd_work);
263 req->p.rq = rq;
264 schedule_work(&req->p.work);
265 } else {
266 rq->end_io_data = req;
267 blk_execute_rq_nowait(rq->q, ns ? ns->disk : NULL, rq, 0,
268 nvmet_passthru_req_done);
269 }
270
271 if (ns)
272 nvme_put_ns(ns);
273
274 return;
275
276out_put_req:
277 blk_mq_free_request(rq);
278out_put_ns:
279 if (ns)
280 nvme_put_ns(ns);
281out:
282 nvmet_req_complete(req, status);
283}
284
285
286
287
288
289
290static void nvmet_passthru_set_host_behaviour(struct nvmet_req *req)
291{
292 struct nvme_ctrl *ctrl = nvmet_req_passthru_ctrl(req);
293 struct nvme_feat_host_behavior *host;
294 u16 status = NVME_SC_INTERNAL;
295 int ret;
296
297 host = kzalloc(sizeof(*host) * 2, GFP_KERNEL);
298 if (!host)
299 goto out_complete_req;
300
301 ret = nvme_get_features(ctrl, NVME_FEAT_HOST_BEHAVIOR, 0,
302 host, sizeof(*host), NULL);
303 if (ret)
304 goto out_free_host;
305
306 status = nvmet_copy_from_sgl(req, 0, &host[1], sizeof(*host));
307 if (status)
308 goto out_free_host;
309
310 if (memcmp(&host[0], &host[1], sizeof(host[0]))) {
311 pr_warn("target host has requested different behaviour from the local host\n");
312 status = NVME_SC_INTERNAL;
313 }
314
315out_free_host:
316 kfree(host);
317out_complete_req:
318 nvmet_req_complete(req, status);
319}
320
321static u16 nvmet_setup_passthru_command(struct nvmet_req *req)
322{
323 req->p.use_workqueue = false;
324 req->execute = nvmet_passthru_execute_cmd;
325 return NVME_SC_SUCCESS;
326}
327
328u16 nvmet_parse_passthru_io_cmd(struct nvmet_req *req)
329{
330
331 if (req->cmd->common.flags & ~NVME_CMD_SGL_ALL)
332 return NVME_SC_INVALID_FIELD;
333
334 switch (req->cmd->common.opcode) {
335 case nvme_cmd_resv_register:
336 case nvme_cmd_resv_report:
337 case nvme_cmd_resv_acquire:
338 case nvme_cmd_resv_release:
339
340
341
342
343
344
345
346 return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
347 }
348
349 return nvmet_setup_passthru_command(req);
350}
351
352
353
354
355
356
357static u16 nvmet_passthru_get_set_features(struct nvmet_req *req)
358{
359 switch (le32_to_cpu(req->cmd->features.fid)) {
360 case NVME_FEAT_ARBITRATION:
361 case NVME_FEAT_POWER_MGMT:
362 case NVME_FEAT_LBA_RANGE:
363 case NVME_FEAT_TEMP_THRESH:
364 case NVME_FEAT_ERR_RECOVERY:
365 case NVME_FEAT_VOLATILE_WC:
366 case NVME_FEAT_WRITE_ATOMIC:
367 case NVME_FEAT_AUTO_PST:
368 case NVME_FEAT_TIMESTAMP:
369 case NVME_FEAT_HCTM:
370 case NVME_FEAT_NOPSC:
371 case NVME_FEAT_RRL:
372 case NVME_FEAT_PLM_CONFIG:
373 case NVME_FEAT_PLM_WINDOW:
374 case NVME_FEAT_HOST_BEHAVIOR:
375 case NVME_FEAT_SANITIZE:
376 case NVME_FEAT_VENDOR_START ... NVME_FEAT_VENDOR_END:
377 return nvmet_setup_passthru_command(req);
378
379 case NVME_FEAT_ASYNC_EVENT:
380
381 case NVME_FEAT_IRQ_COALESCE:
382 case NVME_FEAT_IRQ_CONFIG:
383
384 case NVME_FEAT_HOST_MEM_BUF:
385
386
387
388
389 case NVME_FEAT_SW_PROGRESS:
390
391
392
393
394 case NVME_FEAT_RESV_MASK:
395 case NVME_FEAT_RESV_PERSIST:
396
397 default:
398 return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
399 }
400}
401
402u16 nvmet_parse_passthru_admin_cmd(struct nvmet_req *req)
403{
404
405 if (req->cmd->common.flags & ~NVME_CMD_SGL_ALL)
406 return NVME_SC_INVALID_FIELD;
407
408
409
410
411 if (req->cmd->common.opcode >= nvme_admin_vendor_start)
412 return nvmet_setup_passthru_command(req);
413
414 switch (req->cmd->common.opcode) {
415 case nvme_admin_async_event:
416 req->execute = nvmet_execute_async_event;
417 return NVME_SC_SUCCESS;
418 case nvme_admin_keep_alive:
419
420
421
422
423
424 req->execute = nvmet_execute_keep_alive;
425 return NVME_SC_SUCCESS;
426 case nvme_admin_set_features:
427 switch (le32_to_cpu(req->cmd->features.fid)) {
428 case NVME_FEAT_ASYNC_EVENT:
429 case NVME_FEAT_KATO:
430 case NVME_FEAT_NUM_QUEUES:
431 case NVME_FEAT_HOST_ID:
432 req->execute = nvmet_execute_set_features;
433 return NVME_SC_SUCCESS;
434 case NVME_FEAT_HOST_BEHAVIOR:
435 req->execute = nvmet_passthru_set_host_behaviour;
436 return NVME_SC_SUCCESS;
437 default:
438 return nvmet_passthru_get_set_features(req);
439 }
440 break;
441 case nvme_admin_get_features:
442 switch (le32_to_cpu(req->cmd->features.fid)) {
443 case NVME_FEAT_ASYNC_EVENT:
444 case NVME_FEAT_KATO:
445 case NVME_FEAT_NUM_QUEUES:
446 case NVME_FEAT_HOST_ID:
447 req->execute = nvmet_execute_get_features;
448 return NVME_SC_SUCCESS;
449 default:
450 return nvmet_passthru_get_set_features(req);
451 }
452 break;
453 case nvme_admin_identify:
454 switch (req->cmd->identify.cns) {
455 case NVME_ID_CNS_CTRL:
456 req->execute = nvmet_passthru_execute_cmd;
457 req->p.use_workqueue = true;
458 return NVME_SC_SUCCESS;
459 case NVME_ID_CNS_NS:
460 req->execute = nvmet_passthru_execute_cmd;
461 req->p.use_workqueue = true;
462 return NVME_SC_SUCCESS;
463 default:
464 return nvmet_setup_passthru_command(req);
465 }
466 case nvme_admin_get_log_page:
467 return nvmet_setup_passthru_command(req);
468 default:
469
470 return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
471 }
472}
473
474int nvmet_passthru_ctrl_enable(struct nvmet_subsys *subsys)
475{
476 struct nvme_ctrl *ctrl;
477 int ret = -EINVAL;
478 void *old;
479
480 mutex_lock(&subsys->lock);
481 if (!subsys->passthru_ctrl_path)
482 goto out_unlock;
483 if (subsys->passthru_ctrl)
484 goto out_unlock;
485
486 if (subsys->nr_namespaces) {
487 pr_info("cannot enable both passthru and regular namespaces for a single subsystem");
488 goto out_unlock;
489 }
490
491 ctrl = nvme_ctrl_get_by_path(subsys->passthru_ctrl_path);
492 if (IS_ERR(ctrl)) {
493 ret = PTR_ERR(ctrl);
494 pr_err("failed to open nvme controller %s\n",
495 subsys->passthru_ctrl_path);
496
497 goto out_unlock;
498 }
499
500 old = xa_cmpxchg(&passthru_subsystems, ctrl->cntlid, NULL,
501 subsys, GFP_KERNEL);
502 if (xa_is_err(old)) {
503 ret = xa_err(old);
504 goto out_put_ctrl;
505 }
506
507 if (old)
508 goto out_put_ctrl;
509
510 subsys->passthru_ctrl = ctrl;
511 subsys->ver = ctrl->vs;
512
513 if (subsys->ver < NVME_VS(1, 2, 1)) {
514 pr_warn("nvme controller version is too old: %llu.%llu.%llu, advertising 1.2.1\n",
515 NVME_MAJOR(subsys->ver), NVME_MINOR(subsys->ver),
516 NVME_TERTIARY(subsys->ver));
517 subsys->ver = NVME_VS(1, 2, 1);
518 }
519
520 mutex_unlock(&subsys->lock);
521 return 0;
522
523out_put_ctrl:
524 nvme_put_ctrl(ctrl);
525out_unlock:
526 mutex_unlock(&subsys->lock);
527 return ret;
528}
529
530static void __nvmet_passthru_ctrl_disable(struct nvmet_subsys *subsys)
531{
532 if (subsys->passthru_ctrl) {
533 xa_erase(&passthru_subsystems, subsys->passthru_ctrl->cntlid);
534 nvme_put_ctrl(subsys->passthru_ctrl);
535 }
536 subsys->passthru_ctrl = NULL;
537 subsys->ver = NVMET_DEFAULT_VS;
538}
539
540void nvmet_passthru_ctrl_disable(struct nvmet_subsys *subsys)
541{
542 mutex_lock(&subsys->lock);
543 __nvmet_passthru_ctrl_disable(subsys);
544 mutex_unlock(&subsys->lock);
545}
546
547void nvmet_passthru_subsys_free(struct nvmet_subsys *subsys)
548{
549 mutex_lock(&subsys->lock);
550 __nvmet_passthru_ctrl_disable(subsys);
551 mutex_unlock(&subsys->lock);
552 kfree(subsys->passthru_ctrl_path);
553}
554