linux/drivers/nvme/target/fabrics-cmd.c
<<
>>
Prefs
   1/*
   2 * NVMe Fabrics command implementation.
   3 * Copyright (c) 2015-2016 HGST, a Western Digital Company.
   4 *
   5 * This program is free software; you can redistribute it and/or modify it
   6 * under the terms and conditions of the GNU General Public License,
   7 * version 2, as published by the Free Software Foundation.
   8 *
   9 * This program is distributed in the hope it will be useful, but WITHOUT
  10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  12 * more details.
  13 */
  14#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  15#include <linux/blkdev.h>
  16#include "nvmet.h"
  17
  18static void nvmet_execute_prop_set(struct nvmet_req *req)
  19{
  20        u16 status = 0;
  21
  22        if (!(req->cmd->prop_set.attrib & 1)) {
  23                u64 val = le64_to_cpu(req->cmd->prop_set.value);
  24
  25                switch (le32_to_cpu(req->cmd->prop_set.offset)) {
  26                case NVME_REG_CC:
  27                        nvmet_update_cc(req->sq->ctrl, val);
  28                        break;
  29                default:
  30                        status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
  31                        break;
  32                }
  33        } else {
  34                status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
  35        }
  36
  37        nvmet_req_complete(req, status);
  38}
  39
  40static void nvmet_execute_prop_get(struct nvmet_req *req)
  41{
  42        struct nvmet_ctrl *ctrl = req->sq->ctrl;
  43        u16 status = 0;
  44        u64 val = 0;
  45
  46        if (req->cmd->prop_get.attrib & 1) {
  47                switch (le32_to_cpu(req->cmd->prop_get.offset)) {
  48                case NVME_REG_CAP:
  49                        val = ctrl->cap;
  50                        break;
  51                default:
  52                        status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
  53                        break;
  54                }
  55        } else {
  56                switch (le32_to_cpu(req->cmd->prop_get.offset)) {
  57                case NVME_REG_VS:
  58                        val = ctrl->subsys->ver;
  59                        break;
  60                case NVME_REG_CC:
  61                        val = ctrl->cc;
  62                        break;
  63                case NVME_REG_CSTS:
  64                        val = ctrl->csts;
  65                        break;
  66                default:
  67                        status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
  68                        break;
  69                }
  70        }
  71
  72        req->rsp->result.u64 = cpu_to_le64(val);
  73        nvmet_req_complete(req, status);
  74}
  75
  76u16 nvmet_parse_fabrics_cmd(struct nvmet_req *req)
  77{
  78        struct nvme_command *cmd = req->cmd;
  79
  80        req->ns = NULL;
  81
  82        switch (cmd->fabrics.fctype) {
  83        case nvme_fabrics_type_property_set:
  84                req->data_len = 0;
  85                req->execute = nvmet_execute_prop_set;
  86                break;
  87        case nvme_fabrics_type_property_get:
  88                req->data_len = 0;
  89                req->execute = nvmet_execute_prop_get;
  90                break;
  91        default:
  92                pr_err("received unknown capsule type 0x%x\n",
  93                        cmd->fabrics.fctype);
  94                return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
  95        }
  96
  97        return 0;
  98}
  99
 100static u16 nvmet_install_queue(struct nvmet_ctrl *ctrl, struct nvmet_req *req)
 101{
 102        struct nvmf_connect_command *c = &req->cmd->connect;
 103        u16 qid = le16_to_cpu(c->qid);
 104        u16 sqsize = le16_to_cpu(c->sqsize);
 105        struct nvmet_ctrl *old;
 106
 107        old = cmpxchg(&req->sq->ctrl, NULL, ctrl);
 108        if (old) {
 109                pr_warn("queue already connected!\n");
 110                return NVME_SC_CONNECT_CTRL_BUSY | NVME_SC_DNR;
 111        }
 112        if (!sqsize) {
 113                pr_warn("queue size zero!\n");
 114                return NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
 115        }
 116
 117        /* note: convert queue size from 0's-based value to 1's-based value */
 118        nvmet_cq_setup(ctrl, req->cq, qid, sqsize + 1);
 119        nvmet_sq_setup(ctrl, req->sq, qid, sqsize + 1);
 120        return 0;
 121}
 122
 123static void nvmet_execute_admin_connect(struct nvmet_req *req)
 124{
 125        struct nvmf_connect_command *c = &req->cmd->connect;
 126        struct nvmf_connect_data *d;
 127        struct nvmet_ctrl *ctrl = NULL;
 128        u16 status = 0;
 129
 130        d = kmalloc(sizeof(*d), GFP_KERNEL);
 131        if (!d) {
 132                status = NVME_SC_INTERNAL;
 133                goto complete;
 134        }
 135
 136        status = nvmet_copy_from_sgl(req, 0, d, sizeof(*d));
 137        if (status)
 138                goto out;
 139
 140        /* zero out initial completion result, assign values as needed */
 141        req->rsp->result.u32 = 0;
 142
 143        if (c->recfmt != 0) {
 144                pr_warn("invalid connect version (%d).\n",
 145                        le16_to_cpu(c->recfmt));
 146                status = NVME_SC_CONNECT_FORMAT | NVME_SC_DNR;
 147                goto out;
 148        }
 149
 150        if (unlikely(d->cntlid != cpu_to_le16(0xffff))) {
 151                pr_warn("connect attempt for invalid controller ID %#x\n",
 152                        d->cntlid);
 153                status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
 154                req->rsp->result.u32 = IPO_IATTR_CONNECT_DATA(cntlid);
 155                goto out;
 156        }
 157
 158        status = nvmet_alloc_ctrl(d->subsysnqn, d->hostnqn, req,
 159                                  le32_to_cpu(c->kato), &ctrl);
 160        if (status)
 161                goto out;
 162        uuid_copy(&ctrl->hostid, &d->hostid);
 163
 164        status = nvmet_install_queue(ctrl, req);
 165        if (status) {
 166                nvmet_ctrl_put(ctrl);
 167                goto out;
 168        }
 169
 170        pr_info("creating controller %d for subsystem %s for NQN %s.\n",
 171                ctrl->cntlid, ctrl->subsys->subsysnqn, ctrl->hostnqn);
 172        req->rsp->result.u16 = cpu_to_le16(ctrl->cntlid);
 173
 174out:
 175        kfree(d);
 176complete:
 177        nvmet_req_complete(req, status);
 178}
 179
 180static void nvmet_execute_io_connect(struct nvmet_req *req)
 181{
 182        struct nvmf_connect_command *c = &req->cmd->connect;
 183        struct nvmf_connect_data *d;
 184        struct nvmet_ctrl *ctrl = NULL;
 185        u16 qid = le16_to_cpu(c->qid);
 186        u16 status = 0;
 187
 188        d = kmalloc(sizeof(*d), GFP_KERNEL);
 189        if (!d) {
 190                status = NVME_SC_INTERNAL;
 191                goto complete;
 192        }
 193
 194        status = nvmet_copy_from_sgl(req, 0, d, sizeof(*d));
 195        if (status)
 196                goto out;
 197
 198        /* zero out initial completion result, assign values as needed */
 199        req->rsp->result.u32 = 0;
 200
 201        if (c->recfmt != 0) {
 202                pr_warn("invalid connect version (%d).\n",
 203                        le16_to_cpu(c->recfmt));
 204                status = NVME_SC_CONNECT_FORMAT | NVME_SC_DNR;
 205                goto out;
 206        }
 207
 208        status = nvmet_ctrl_find_get(d->subsysnqn, d->hostnqn,
 209                                     le16_to_cpu(d->cntlid),
 210                                     req, &ctrl);
 211        if (status)
 212                goto out;
 213
 214        if (unlikely(qid > ctrl->subsys->max_qid)) {
 215                pr_warn("invalid queue id (%d)\n", qid);
 216                status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
 217                req->rsp->result.u32 = IPO_IATTR_CONNECT_SQE(qid);
 218                goto out_ctrl_put;
 219        }
 220
 221        status = nvmet_install_queue(ctrl, req);
 222        if (status) {
 223                /* pass back cntlid that had the issue of installing queue */
 224                req->rsp->result.u16 = cpu_to_le16(ctrl->cntlid);
 225                goto out_ctrl_put;
 226        }
 227
 228        pr_info("adding queue %d to ctrl %d.\n", qid, ctrl->cntlid);
 229
 230out:
 231        kfree(d);
 232complete:
 233        nvmet_req_complete(req, status);
 234        return;
 235
 236out_ctrl_put:
 237        nvmet_ctrl_put(ctrl);
 238        goto out;
 239}
 240
 241u16 nvmet_parse_connect_cmd(struct nvmet_req *req)
 242{
 243        struct nvme_command *cmd = req->cmd;
 244
 245        req->ns = NULL;
 246
 247        if (cmd->common.opcode != nvme_fabrics_command) {
 248                pr_err("invalid command 0x%x on unconnected queue.\n",
 249                        cmd->fabrics.opcode);
 250                return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
 251        }
 252        if (cmd->fabrics.fctype != nvme_fabrics_type_connect) {
 253                pr_err("invalid capsule type 0x%x on unconnected queue.\n",
 254                        cmd->fabrics.fctype);
 255                return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
 256        }
 257
 258        req->data_len = sizeof(struct nvmf_connect_data);
 259        if (cmd->connect.qid == 0)
 260                req->execute = nvmet_execute_admin_connect;
 261        else
 262                req->execute = nvmet_execute_io_connect;
 263        return 0;
 264}
 265