1
2
3
4
5
6#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7#include <linux/blkdev.h>
8#include "nvmet.h"
9
10static void nvmet_execute_prop_set(struct nvmet_req *req)
11{
12 u64 val = le64_to_cpu(req->cmd->prop_set.value);
13 u16 status = 0;
14
15 if (req->cmd->prop_set.attrib & 1) {
16 req->error_loc =
17 offsetof(struct nvmf_property_set_command, attrib);
18 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
19 goto out;
20 }
21
22 switch (le32_to_cpu(req->cmd->prop_set.offset)) {
23 case NVME_REG_CC:
24 nvmet_update_cc(req->sq->ctrl, val);
25 break;
26 default:
27 req->error_loc =
28 offsetof(struct nvmf_property_set_command, offset);
29 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
30 }
31out:
32 nvmet_req_complete(req, status);
33}
34
35static void nvmet_execute_prop_get(struct nvmet_req *req)
36{
37 struct nvmet_ctrl *ctrl = req->sq->ctrl;
38 u16 status = 0;
39 u64 val = 0;
40
41 if (req->cmd->prop_get.attrib & 1) {
42 switch (le32_to_cpu(req->cmd->prop_get.offset)) {
43 case NVME_REG_CAP:
44 val = ctrl->cap;
45 break;
46 default:
47 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
48 break;
49 }
50 } else {
51 switch (le32_to_cpu(req->cmd->prop_get.offset)) {
52 case NVME_REG_VS:
53 val = ctrl->subsys->ver;
54 break;
55 case NVME_REG_CC:
56 val = ctrl->cc;
57 break;
58 case NVME_REG_CSTS:
59 val = ctrl->csts;
60 break;
61 default:
62 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
63 break;
64 }
65 }
66
67 if (status && req->cmd->prop_get.attrib & 1) {
68 req->error_loc =
69 offsetof(struct nvmf_property_get_command, offset);
70 } else {
71 req->error_loc =
72 offsetof(struct nvmf_property_get_command, attrib);
73 }
74
75 req->rsp->result.u64 = cpu_to_le64(val);
76 nvmet_req_complete(req, status);
77}
78
79u16 nvmet_parse_fabrics_cmd(struct nvmet_req *req)
80{
81 struct nvme_command *cmd = req->cmd;
82
83 switch (cmd->fabrics.fctype) {
84 case nvme_fabrics_type_property_set:
85 req->data_len = 0;
86 req->execute = nvmet_execute_prop_set;
87 break;
88 case nvme_fabrics_type_property_get:
89 req->data_len = 0;
90 req->execute = nvmet_execute_prop_get;
91 break;
92 default:
93 pr_err("received unknown capsule type 0x%x\n",
94 cmd->fabrics.fctype);
95 req->error_loc = offsetof(struct nvmf_common_command, fctype);
96 return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
97 }
98
99 return 0;
100}
101
102static u16 nvmet_install_queue(struct nvmet_ctrl *ctrl, struct nvmet_req *req)
103{
104 struct nvmf_connect_command *c = &req->cmd->connect;
105 u16 qid = le16_to_cpu(c->qid);
106 u16 sqsize = le16_to_cpu(c->sqsize);
107 struct nvmet_ctrl *old;
108
109 old = cmpxchg(&req->sq->ctrl, NULL, ctrl);
110 if (old) {
111 pr_warn("queue already connected!\n");
112 req->error_loc = offsetof(struct nvmf_connect_command, opcode);
113 return NVME_SC_CONNECT_CTRL_BUSY | NVME_SC_DNR;
114 }
115 if (!sqsize) {
116 pr_warn("queue size zero!\n");
117 req->error_loc = offsetof(struct nvmf_connect_command, sqsize);
118 return NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
119 }
120
121
122 nvmet_cq_setup(ctrl, req->cq, qid, sqsize + 1);
123 nvmet_sq_setup(ctrl, req->sq, qid, sqsize + 1);
124
125 if (c->cattr & NVME_CONNECT_DISABLE_SQFLOW) {
126 req->sq->sqhd_disabled = true;
127 req->rsp->sq_head = cpu_to_le16(0xffff);
128 }
129
130 if (ctrl->ops->install_queue) {
131 u16 ret = ctrl->ops->install_queue(req->sq);
132
133 if (ret) {
134 pr_err("failed to install queue %d cntlid %d ret %x\n",
135 qid, ret, ctrl->cntlid);
136 return ret;
137 }
138 }
139
140 return 0;
141}
142
143static void nvmet_execute_admin_connect(struct nvmet_req *req)
144{
145 struct nvmf_connect_command *c = &req->cmd->connect;
146 struct nvmf_connect_data *d;
147 struct nvmet_ctrl *ctrl = NULL;
148 u16 status = 0;
149
150 d = kmalloc(sizeof(*d), GFP_KERNEL);
151 if (!d) {
152 status = NVME_SC_INTERNAL;
153 goto complete;
154 }
155
156 status = nvmet_copy_from_sgl(req, 0, d, sizeof(*d));
157 if (status)
158 goto out;
159
160
161 req->rsp->result.u32 = 0;
162
163 if (c->recfmt != 0) {
164 pr_warn("invalid connect version (%d).\n",
165 le16_to_cpu(c->recfmt));
166 req->error_loc = offsetof(struct nvmf_connect_command, recfmt);
167 status = NVME_SC_CONNECT_FORMAT | NVME_SC_DNR;
168 goto out;
169 }
170
171 if (unlikely(d->cntlid != cpu_to_le16(0xffff))) {
172 pr_warn("connect attempt for invalid controller ID %#x\n",
173 d->cntlid);
174 status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
175 req->rsp->result.u32 = IPO_IATTR_CONNECT_DATA(cntlid);
176 goto out;
177 }
178
179 status = nvmet_alloc_ctrl(d->subsysnqn, d->hostnqn, req,
180 le32_to_cpu(c->kato), &ctrl);
181 if (status) {
182 if (status == (NVME_SC_INVALID_FIELD | NVME_SC_DNR))
183 req->error_loc =
184 offsetof(struct nvme_common_command, opcode);
185 goto out;
186 }
187
188 uuid_copy(&ctrl->hostid, &d->hostid);
189
190 status = nvmet_install_queue(ctrl, req);
191 if (status) {
192 nvmet_ctrl_put(ctrl);
193 goto out;
194 }
195
196 pr_info("creating controller %d for subsystem %s for NQN %s.\n",
197 ctrl->cntlid, ctrl->subsys->subsysnqn, ctrl->hostnqn);
198 req->rsp->result.u16 = cpu_to_le16(ctrl->cntlid);
199
200out:
201 kfree(d);
202complete:
203 nvmet_req_complete(req, status);
204}
205
206static void nvmet_execute_io_connect(struct nvmet_req *req)
207{
208 struct nvmf_connect_command *c = &req->cmd->connect;
209 struct nvmf_connect_data *d;
210 struct nvmet_ctrl *ctrl = NULL;
211 u16 qid = le16_to_cpu(c->qid);
212 u16 status = 0;
213
214 d = kmalloc(sizeof(*d), GFP_KERNEL);
215 if (!d) {
216 status = NVME_SC_INTERNAL;
217 goto complete;
218 }
219
220 status = nvmet_copy_from_sgl(req, 0, d, sizeof(*d));
221 if (status)
222 goto out;
223
224
225 req->rsp->result.u32 = 0;
226
227 if (c->recfmt != 0) {
228 pr_warn("invalid connect version (%d).\n",
229 le16_to_cpu(c->recfmt));
230 status = NVME_SC_CONNECT_FORMAT | NVME_SC_DNR;
231 goto out;
232 }
233
234 status = nvmet_ctrl_find_get(d->subsysnqn, d->hostnqn,
235 le16_to_cpu(d->cntlid),
236 req, &ctrl);
237 if (status)
238 goto out;
239
240 if (unlikely(qid > ctrl->subsys->max_qid)) {
241 pr_warn("invalid queue id (%d)\n", qid);
242 status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
243 req->rsp->result.u32 = IPO_IATTR_CONNECT_SQE(qid);
244 goto out_ctrl_put;
245 }
246
247 status = nvmet_install_queue(ctrl, req);
248 if (status) {
249
250 req->rsp->result.u16 = cpu_to_le16(ctrl->cntlid);
251 goto out_ctrl_put;
252 }
253
254 pr_debug("adding queue %d to ctrl %d.\n", qid, ctrl->cntlid);
255
256out:
257 kfree(d);
258complete:
259 nvmet_req_complete(req, status);
260 return;
261
262out_ctrl_put:
263 nvmet_ctrl_put(ctrl);
264 goto out;
265}
266
267u16 nvmet_parse_connect_cmd(struct nvmet_req *req)
268{
269 struct nvme_command *cmd = req->cmd;
270
271 if (cmd->common.opcode != nvme_fabrics_command) {
272 pr_err("invalid command 0x%x on unconnected queue.\n",
273 cmd->fabrics.opcode);
274 req->error_loc = offsetof(struct nvme_common_command, opcode);
275 return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
276 }
277 if (cmd->fabrics.fctype != nvme_fabrics_type_connect) {
278 pr_err("invalid capsule type 0x%x on unconnected queue.\n",
279 cmd->fabrics.fctype);
280 req->error_loc = offsetof(struct nvmf_common_command, fctype);
281 return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
282 }
283
284 req->data_len = sizeof(struct nvmf_connect_data);
285 if (cmd->connect.qid == 0)
286 req->execute = nvmet_execute_admin_connect;
287 else
288 req->execute = nvmet_execute_io_connect;
289 return 0;
290}
291