linux/drivers/nvme/target/fabrics-cmd.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * NVMe Fabrics command implementation.
   4 * Copyright (c) 2015-2016 HGST, a Western Digital Company.
   5 */
   6#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
   7#include <linux/blkdev.h>
   8#include "nvmet.h"
   9
  10static void nvmet_execute_prop_set(struct nvmet_req *req)
  11{
  12        u64 val = le64_to_cpu(req->cmd->prop_set.value);
  13        u16 status = 0;
  14
  15        if (!nvmet_check_transfer_len(req, 0))
  16                return;
  17
  18        if (req->cmd->prop_set.attrib & 1) {
  19                req->error_loc =
  20                        offsetof(struct nvmf_property_set_command, attrib);
  21                status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
  22                goto out;
  23        }
  24
  25        switch (le32_to_cpu(req->cmd->prop_set.offset)) {
  26        case NVME_REG_CC:
  27                nvmet_update_cc(req->sq->ctrl, val);
  28                break;
  29        default:
  30                req->error_loc =
  31                        offsetof(struct nvmf_property_set_command, offset);
  32                status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
  33        }
  34out:
  35        nvmet_req_complete(req, status);
  36}
  37
  38static void nvmet_execute_prop_get(struct nvmet_req *req)
  39{
  40        struct nvmet_ctrl *ctrl = req->sq->ctrl;
  41        u16 status = 0;
  42        u64 val = 0;
  43
  44        if (!nvmet_check_transfer_len(req, 0))
  45                return;
  46
  47        if (req->cmd->prop_get.attrib & 1) {
  48                switch (le32_to_cpu(req->cmd->prop_get.offset)) {
  49                case NVME_REG_CAP:
  50                        val = ctrl->cap;
  51                        break;
  52                default:
  53                        status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
  54                        break;
  55                }
  56        } else {
  57                switch (le32_to_cpu(req->cmd->prop_get.offset)) {
  58                case NVME_REG_VS:
  59                        val = ctrl->subsys->ver;
  60                        break;
  61                case NVME_REG_CC:
  62                        val = ctrl->cc;
  63                        break;
  64                case NVME_REG_CSTS:
  65                        val = ctrl->csts;
  66                        break;
  67                default:
  68                        status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
  69                        break;
  70                }
  71        }
  72
  73        if (status && req->cmd->prop_get.attrib & 1) {
  74                req->error_loc =
  75                        offsetof(struct nvmf_property_get_command, offset);
  76        } else {
  77                req->error_loc =
  78                        offsetof(struct nvmf_property_get_command, attrib);
  79        }
  80
  81        req->cqe->result.u64 = cpu_to_le64(val);
  82        nvmet_req_complete(req, status);
  83}
  84
  85u16 nvmet_parse_fabrics_cmd(struct nvmet_req *req)
  86{
  87        struct nvme_command *cmd = req->cmd;
  88
  89        switch (cmd->fabrics.fctype) {
  90        case nvme_fabrics_type_property_set:
  91                req->execute = nvmet_execute_prop_set;
  92                break;
  93        case nvme_fabrics_type_property_get:
  94                req->execute = nvmet_execute_prop_get;
  95                break;
  96        default:
  97                pr_debug("received unknown capsule type 0x%x\n",
  98                        cmd->fabrics.fctype);
  99                req->error_loc = offsetof(struct nvmf_common_command, fctype);
 100                return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
 101        }
 102
 103        return 0;
 104}
 105
 106static u16 nvmet_install_queue(struct nvmet_ctrl *ctrl, struct nvmet_req *req)
 107{
 108        struct nvmf_connect_command *c = &req->cmd->connect;
 109        u16 qid = le16_to_cpu(c->qid);
 110        u16 sqsize = le16_to_cpu(c->sqsize);
 111        struct nvmet_ctrl *old;
 112        u16 mqes = NVME_CAP_MQES(ctrl->cap);
 113        u16 ret;
 114
 115        if (!sqsize) {
 116                pr_warn("queue size zero!\n");
 117                req->error_loc = offsetof(struct nvmf_connect_command, sqsize);
 118                req->cqe->result.u32 = IPO_IATTR_CONNECT_SQE(sqsize);
 119                ret = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
 120                goto err;
 121        }
 122
 123        if (ctrl->sqs[qid] != NULL) {
 124                pr_warn("qid %u has already been created\n", qid);
 125                req->error_loc = offsetof(struct nvmf_connect_command, qid);
 126                return NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR;
 127        }
 128
 129        if (sqsize > mqes) {
 130                pr_warn("sqsize %u is larger than MQES supported %u cntlid %d\n",
 131                                sqsize, mqes, ctrl->cntlid);
 132                req->error_loc = offsetof(struct nvmf_connect_command, sqsize);
 133                req->cqe->result.u32 = IPO_IATTR_CONNECT_SQE(sqsize);
 134                return NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
 135        }
 136
 137        old = cmpxchg(&req->sq->ctrl, NULL, ctrl);
 138        if (old) {
 139                pr_warn("queue already connected!\n");
 140                req->error_loc = offsetof(struct nvmf_connect_command, opcode);
 141                return NVME_SC_CONNECT_CTRL_BUSY | NVME_SC_DNR;
 142        }
 143
 144        /* note: convert queue size from 0's-based value to 1's-based value */
 145        nvmet_cq_setup(ctrl, req->cq, qid, sqsize + 1);
 146        nvmet_sq_setup(ctrl, req->sq, qid, sqsize + 1);
 147
 148        if (c->cattr & NVME_CONNECT_DISABLE_SQFLOW) {
 149                req->sq->sqhd_disabled = true;
 150                req->cqe->sq_head = cpu_to_le16(0xffff);
 151        }
 152
 153        if (ctrl->ops->install_queue) {
 154                ret = ctrl->ops->install_queue(req->sq);
 155                if (ret) {
 156                        pr_err("failed to install queue %d cntlid %d ret %x\n",
 157                                qid, ctrl->cntlid, ret);
 158                        ctrl->sqs[qid] = NULL;
 159                        goto err;
 160                }
 161        }
 162
 163        return 0;
 164
 165err:
 166        req->sq->ctrl = NULL;
 167        return ret;
 168}
 169
 170static void nvmet_execute_admin_connect(struct nvmet_req *req)
 171{
 172        struct nvmf_connect_command *c = &req->cmd->connect;
 173        struct nvmf_connect_data *d;
 174        struct nvmet_ctrl *ctrl = NULL;
 175        u16 status = 0;
 176
 177        if (!nvmet_check_transfer_len(req, sizeof(struct nvmf_connect_data)))
 178                return;
 179
 180        d = kmalloc(sizeof(*d), GFP_KERNEL);
 181        if (!d) {
 182                status = NVME_SC_INTERNAL;
 183                goto complete;
 184        }
 185
 186        status = nvmet_copy_from_sgl(req, 0, d, sizeof(*d));
 187        if (status)
 188                goto out;
 189
 190        /* zero out initial completion result, assign values as needed */
 191        req->cqe->result.u32 = 0;
 192
 193        if (c->recfmt != 0) {
 194                pr_warn("invalid connect version (%d).\n",
 195                        le16_to_cpu(c->recfmt));
 196                req->error_loc = offsetof(struct nvmf_connect_command, recfmt);
 197                status = NVME_SC_CONNECT_FORMAT | NVME_SC_DNR;
 198                goto out;
 199        }
 200
 201        if (unlikely(d->cntlid != cpu_to_le16(0xffff))) {
 202                pr_warn("connect attempt for invalid controller ID %#x\n",
 203                        d->cntlid);
 204                status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
 205                req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(cntlid);
 206                goto out;
 207        }
 208
 209        status = nvmet_alloc_ctrl(d->subsysnqn, d->hostnqn, req,
 210                                  le32_to_cpu(c->kato), &ctrl);
 211        if (status)
 212                goto out;
 213
 214        ctrl->pi_support = ctrl->port->pi_enable && ctrl->subsys->pi_support;
 215
 216        uuid_copy(&ctrl->hostid, &d->hostid);
 217
 218        status = nvmet_install_queue(ctrl, req);
 219        if (status) {
 220                nvmet_ctrl_put(ctrl);
 221                goto out;
 222        }
 223
 224        pr_info("creating controller %d for subsystem %s for NQN %s%s.\n",
 225                ctrl->cntlid, ctrl->subsys->subsysnqn, ctrl->hostnqn,
 226                ctrl->pi_support ? " T10-PI is enabled" : "");
 227        req->cqe->result.u16 = cpu_to_le16(ctrl->cntlid);
 228
 229out:
 230        kfree(d);
 231complete:
 232        nvmet_req_complete(req, status);
 233}
 234
 235static void nvmet_execute_io_connect(struct nvmet_req *req)
 236{
 237        struct nvmf_connect_command *c = &req->cmd->connect;
 238        struct nvmf_connect_data *d;
 239        struct nvmet_ctrl *ctrl;
 240        u16 qid = le16_to_cpu(c->qid);
 241        u16 status = 0;
 242
 243        if (!nvmet_check_transfer_len(req, sizeof(struct nvmf_connect_data)))
 244                return;
 245
 246        d = kmalloc(sizeof(*d), GFP_KERNEL);
 247        if (!d) {
 248                status = NVME_SC_INTERNAL;
 249                goto complete;
 250        }
 251
 252        status = nvmet_copy_from_sgl(req, 0, d, sizeof(*d));
 253        if (status)
 254                goto out;
 255
 256        /* zero out initial completion result, assign values as needed */
 257        req->cqe->result.u32 = 0;
 258
 259        if (c->recfmt != 0) {
 260                pr_warn("invalid connect version (%d).\n",
 261                        le16_to_cpu(c->recfmt));
 262                status = NVME_SC_CONNECT_FORMAT | NVME_SC_DNR;
 263                goto out;
 264        }
 265
 266        ctrl = nvmet_ctrl_find_get(d->subsysnqn, d->hostnqn,
 267                                   le16_to_cpu(d->cntlid), req);
 268        if (!ctrl) {
 269                status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
 270                goto out;
 271        }
 272
 273        if (unlikely(qid > ctrl->subsys->max_qid)) {
 274                pr_warn("invalid queue id (%d)\n", qid);
 275                status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
 276                req->cqe->result.u32 = IPO_IATTR_CONNECT_SQE(qid);
 277                goto out_ctrl_put;
 278        }
 279
 280        status = nvmet_install_queue(ctrl, req);
 281        if (status)
 282                goto out_ctrl_put;
 283
 284        /* pass back cntlid for successful completion */
 285        req->cqe->result.u16 = cpu_to_le16(ctrl->cntlid);
 286
 287        pr_debug("adding queue %d to ctrl %d.\n", qid, ctrl->cntlid);
 288
 289out:
 290        kfree(d);
 291complete:
 292        nvmet_req_complete(req, status);
 293        return;
 294
 295out_ctrl_put:
 296        nvmet_ctrl_put(ctrl);
 297        goto out;
 298}
 299
 300u16 nvmet_parse_connect_cmd(struct nvmet_req *req)
 301{
 302        struct nvme_command *cmd = req->cmd;
 303
 304        if (!nvme_is_fabrics(cmd)) {
 305                pr_debug("invalid command 0x%x on unconnected queue.\n",
 306                        cmd->fabrics.opcode);
 307                req->error_loc = offsetof(struct nvme_common_command, opcode);
 308                return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
 309        }
 310        if (cmd->fabrics.fctype != nvme_fabrics_type_connect) {
 311                pr_debug("invalid capsule type 0x%x on unconnected queue.\n",
 312                        cmd->fabrics.fctype);
 313                req->error_loc = offsetof(struct nvmf_common_command, fctype);
 314                return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
 315        }
 316
 317        if (cmd->connect.qid == 0)
 318                req->execute = nvmet_execute_admin_connect;
 319        else
 320                req->execute = nvmet_execute_io_connect;
 321        return 0;
 322}
 323