linux/drivers/nvme/target/admin-cmd.c
<<
>>
Prefs
   1/*
   2 * NVMe admin command implementation.
   3 * Copyright (c) 2015-2016 HGST, a Western Digital Company.
   4 *
   5 * This program is free software; you can redistribute it and/or modify it
   6 * under the terms and conditions of the GNU General Public License,
   7 * version 2, as published by the Free Software Foundation.
   8 *
   9 * This program is distributed in the hope it will be useful, but WITHOUT
  10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  12 * more details.
  13 */
  14#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  15#include <linux/module.h>
  16#include <linux/rculist.h>
  17
  18#include <generated/utsrelease.h>
  19#include <asm/unaligned.h>
  20#include "nvmet.h"
  21
  22/*
  23 * This helper allows us to clear the AEN based on the RAE bit,
  24 * Please use this helper when processing the log pages which are
  25 * associated with the AEN.
  26 */
  27static inline void nvmet_clear_aen(struct nvmet_req *req, u32 aen_bit)
  28{
  29        int rae = le32_to_cpu(req->cmd->common.cdw10[0]) & 1 << 15;
  30
  31        if (!rae)
  32                clear_bit(aen_bit, &req->sq->ctrl->aen_masked);
  33}
  34
  35u32 nvmet_get_log_page_len(struct nvme_command *cmd)
  36{
  37        u32 len = le16_to_cpu(cmd->get_log_page.numdu);
  38
  39        len <<= 16;
  40        len += le16_to_cpu(cmd->get_log_page.numdl);
  41        /* NUMD is a 0's based value */
  42        len += 1;
  43        len *= sizeof(u32);
  44
  45        return len;
  46}
  47
  48static void nvmet_execute_get_log_page_noop(struct nvmet_req *req)
  49{
  50        nvmet_req_complete(req, nvmet_zero_sgl(req, 0, req->data_len));
  51}
  52
  53static u16 nvmet_get_smart_log_nsid(struct nvmet_req *req,
  54                struct nvme_smart_log *slog)
  55{
  56        struct nvmet_ns *ns;
  57        u64 host_reads, host_writes, data_units_read, data_units_written;
  58
  59        ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->get_log_page.nsid);
  60        if (!ns) {
  61                pr_err("nvmet : Could not find namespace id : %d\n",
  62                                le32_to_cpu(req->cmd->get_log_page.nsid));
  63                return NVME_SC_INVALID_NS;
  64        }
  65
  66        /* we don't have the right data for file backed ns */
  67        if (!ns->bdev)
  68                goto out;
  69
  70        host_reads = part_stat_read(ns->bdev->bd_part, ios[READ]);
  71        data_units_read = part_stat_read(ns->bdev->bd_part, sectors[READ]);
  72        host_writes = part_stat_read(ns->bdev->bd_part, ios[WRITE]);
  73        data_units_written = part_stat_read(ns->bdev->bd_part, sectors[WRITE]);
  74
  75        put_unaligned_le64(host_reads, &slog->host_reads[0]);
  76        put_unaligned_le64(data_units_read, &slog->data_units_read[0]);
  77        put_unaligned_le64(host_writes, &slog->host_writes[0]);
  78        put_unaligned_le64(data_units_written, &slog->data_units_written[0]);
  79out:
  80        nvmet_put_namespace(ns);
  81
  82        return NVME_SC_SUCCESS;
  83}
  84
  85static u16 nvmet_get_smart_log_all(struct nvmet_req *req,
  86                struct nvme_smart_log *slog)
  87{
  88        u64 host_reads = 0, host_writes = 0;
  89        u64 data_units_read = 0, data_units_written = 0;
  90        struct nvmet_ns *ns;
  91        struct nvmet_ctrl *ctrl;
  92
  93        ctrl = req->sq->ctrl;
  94
  95        rcu_read_lock();
  96        list_for_each_entry_rcu(ns, &ctrl->subsys->namespaces, dev_link) {
  97                /* we don't have the right data for file backed ns */
  98                if (!ns->bdev)
  99                        continue;
 100                host_reads += part_stat_read(ns->bdev->bd_part, ios[READ]);
 101                data_units_read +=
 102                        part_stat_read(ns->bdev->bd_part, sectors[READ]);
 103                host_writes += part_stat_read(ns->bdev->bd_part, ios[WRITE]);
 104                data_units_written +=
 105                        part_stat_read(ns->bdev->bd_part, sectors[WRITE]);
 106
 107        }
 108        rcu_read_unlock();
 109
 110        put_unaligned_le64(host_reads, &slog->host_reads[0]);
 111        put_unaligned_le64(data_units_read, &slog->data_units_read[0]);
 112        put_unaligned_le64(host_writes, &slog->host_writes[0]);
 113        put_unaligned_le64(data_units_written, &slog->data_units_written[0]);
 114
 115        return NVME_SC_SUCCESS;
 116}
 117
 118static void nvmet_execute_get_log_page_smart(struct nvmet_req *req)
 119{
 120        struct nvme_smart_log *log;
 121        u16 status = NVME_SC_INTERNAL;
 122
 123        if (req->data_len != sizeof(*log))
 124                goto out;
 125
 126        log = kzalloc(sizeof(*log), GFP_KERNEL);
 127        if (!log)
 128                goto out;
 129
 130        if (req->cmd->get_log_page.nsid == cpu_to_le32(NVME_NSID_ALL))
 131                status = nvmet_get_smart_log_all(req, log);
 132        else
 133                status = nvmet_get_smart_log_nsid(req, log);
 134        if (status)
 135                goto out_free_log;
 136
 137        status = nvmet_copy_to_sgl(req, 0, log, sizeof(*log));
 138out_free_log:
 139        kfree(log);
 140out:
 141        nvmet_req_complete(req, status);
 142}
 143
 144static void nvmet_execute_get_log_cmd_effects_ns(struct nvmet_req *req)
 145{
 146        u16 status = NVME_SC_INTERNAL;
 147        struct nvme_effects_log *log;
 148
 149        log = kzalloc(sizeof(*log), GFP_KERNEL);
 150        if (!log)
 151                goto out;
 152
 153        log->acs[nvme_admin_get_log_page]       = cpu_to_le32(1 << 0);
 154        log->acs[nvme_admin_identify]           = cpu_to_le32(1 << 0);
 155        log->acs[nvme_admin_abort_cmd]          = cpu_to_le32(1 << 0);
 156        log->acs[nvme_admin_set_features]       = cpu_to_le32(1 << 0);
 157        log->acs[nvme_admin_get_features]       = cpu_to_le32(1 << 0);
 158        log->acs[nvme_admin_async_event]        = cpu_to_le32(1 << 0);
 159        log->acs[nvme_admin_keep_alive]         = cpu_to_le32(1 << 0);
 160
 161        log->iocs[nvme_cmd_read]                = cpu_to_le32(1 << 0);
 162        log->iocs[nvme_cmd_write]               = cpu_to_le32(1 << 0);
 163        log->iocs[nvme_cmd_flush]               = cpu_to_le32(1 << 0);
 164        log->iocs[nvme_cmd_dsm]                 = cpu_to_le32(1 << 0);
 165        log->iocs[nvme_cmd_write_zeroes]        = cpu_to_le32(1 << 0);
 166
 167        status = nvmet_copy_to_sgl(req, 0, log, sizeof(*log));
 168
 169        kfree(log);
 170out:
 171        nvmet_req_complete(req, status);
 172}
 173
 174static void nvmet_execute_get_log_changed_ns(struct nvmet_req *req)
 175{
 176        struct nvmet_ctrl *ctrl = req->sq->ctrl;
 177        u16 status = NVME_SC_INTERNAL;
 178        size_t len;
 179
 180        if (req->data_len != NVME_MAX_CHANGED_NAMESPACES * sizeof(__le32))
 181                goto out;
 182
 183        mutex_lock(&ctrl->lock);
 184        if (ctrl->nr_changed_ns == U32_MAX)
 185                len = sizeof(__le32);
 186        else
 187                len = ctrl->nr_changed_ns * sizeof(__le32);
 188        status = nvmet_copy_to_sgl(req, 0, ctrl->changed_ns_list, len);
 189        if (!status)
 190                status = nvmet_zero_sgl(req, len, req->data_len - len);
 191        ctrl->nr_changed_ns = 0;
 192        nvmet_clear_aen(req, NVME_AEN_CFG_NS_ATTR);
 193        mutex_unlock(&ctrl->lock);
 194out:
 195        nvmet_req_complete(req, status);
 196}
 197
 198static u32 nvmet_format_ana_group(struct nvmet_req *req, u32 grpid,
 199                struct nvme_ana_group_desc *desc)
 200{
 201        struct nvmet_ctrl *ctrl = req->sq->ctrl;
 202        struct nvmet_ns *ns;
 203        u32 count = 0;
 204
 205        if (!(req->cmd->get_log_page.lsp & NVME_ANA_LOG_RGO)) {
 206                rcu_read_lock();
 207                list_for_each_entry_rcu(ns, &ctrl->subsys->namespaces, dev_link)
 208                        if (ns->anagrpid == grpid)
 209                                desc->nsids[count++] = cpu_to_le32(ns->nsid);
 210                rcu_read_unlock();
 211        }
 212
 213        desc->grpid = cpu_to_le32(grpid);
 214        desc->nnsids = cpu_to_le32(count);
 215        desc->chgcnt = cpu_to_le64(nvmet_ana_chgcnt);
 216        desc->state = req->port->ana_state[grpid];
 217        memset(desc->rsvd17, 0, sizeof(desc->rsvd17));
 218        return sizeof(struct nvme_ana_group_desc) + count * sizeof(__le32);
 219}
 220
 221static void nvmet_execute_get_log_page_ana(struct nvmet_req *req)
 222{
 223        struct nvme_ana_rsp_hdr hdr = { 0, };
 224        struct nvme_ana_group_desc *desc;
 225        size_t offset = sizeof(struct nvme_ana_rsp_hdr); /* start beyond hdr */
 226        size_t len;
 227        u32 grpid;
 228        u16 ngrps = 0;
 229        u16 status;
 230
 231        status = NVME_SC_INTERNAL;
 232        desc = kmalloc(sizeof(struct nvme_ana_group_desc) +
 233                        NVMET_MAX_NAMESPACES * sizeof(__le32), GFP_KERNEL);
 234        if (!desc)
 235                goto out;
 236
 237        down_read(&nvmet_ana_sem);
 238        for (grpid = 1; grpid <= NVMET_MAX_ANAGRPS; grpid++) {
 239                if (!nvmet_ana_group_enabled[grpid])
 240                        continue;
 241                len = nvmet_format_ana_group(req, grpid, desc);
 242                status = nvmet_copy_to_sgl(req, offset, desc, len);
 243                if (status)
 244                        break;
 245                offset += len;
 246                ngrps++;
 247        }
 248        for ( ; grpid <= NVMET_MAX_ANAGRPS; grpid++) {
 249                if (nvmet_ana_group_enabled[grpid])
 250                        ngrps++;
 251        }
 252
 253        hdr.chgcnt = cpu_to_le64(nvmet_ana_chgcnt);
 254        hdr.ngrps = cpu_to_le16(ngrps);
 255        nvmet_clear_aen(req, NVME_AEN_CFG_ANA_CHANGE);
 256        up_read(&nvmet_ana_sem);
 257
 258        kfree(desc);
 259
 260        /* copy the header last once we know the number of groups */
 261        status = nvmet_copy_to_sgl(req, 0, &hdr, sizeof(hdr));
 262out:
 263        nvmet_req_complete(req, status);
 264}
 265
 266static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
 267{
 268        struct nvmet_ctrl *ctrl = req->sq->ctrl;
 269        struct nvme_id_ctrl *id;
 270        u16 status = 0;
 271        const char model[] = "Linux";
 272
 273        id = kzalloc(sizeof(*id), GFP_KERNEL);
 274        if (!id) {
 275                status = NVME_SC_INTERNAL;
 276                goto out;
 277        }
 278
 279        /* XXX: figure out how to assign real vendors IDs. */
 280        id->vid = 0;
 281        id->ssvid = 0;
 282
 283        memset(id->sn, ' ', sizeof(id->sn));
 284        bin2hex(id->sn, &ctrl->subsys->serial,
 285                min(sizeof(ctrl->subsys->serial), sizeof(id->sn) / 2));
 286        memcpy_and_pad(id->mn, sizeof(id->mn), model, sizeof(model) - 1, ' ');
 287        memcpy_and_pad(id->fr, sizeof(id->fr),
 288                       UTS_RELEASE, strlen(UTS_RELEASE), ' ');
 289
 290        id->rab = 6;
 291
 292        /*
 293         * XXX: figure out how we can assign a IEEE OUI, but until then
 294         * the safest is to leave it as zeroes.
 295         */
 296
 297        /* we support multiple ports, multiples hosts and ANA: */
 298        id->cmic = (1 << 0) | (1 << 1) | (1 << 3);
 299
 300        /* no limit on data transfer sizes for now */
 301        id->mdts = 0;
 302        id->cntlid = cpu_to_le16(ctrl->cntlid);
 303        id->ver = cpu_to_le32(ctrl->subsys->ver);
 304
 305        /* XXX: figure out what to do about RTD3R/RTD3 */
 306        id->oaes = cpu_to_le32(NVMET_AEN_CFG_OPTIONAL);
 307        id->ctratt = cpu_to_le32(1 << 0);
 308
 309        id->oacs = 0;
 310
 311        /*
 312         * We don't really have a practical limit on the number of abort
 313         * comands.  But we don't do anything useful for abort either, so
 314         * no point in allowing more abort commands than the spec requires.
 315         */
 316        id->acl = 3;
 317
 318        id->aerl = NVMET_ASYNC_EVENTS - 1;
 319
 320        /* first slot is read-only, only one slot supported */
 321        id->frmw = (1 << 0) | (1 << 1);
 322        id->lpa = (1 << 0) | (1 << 1) | (1 << 2);
 323        id->elpe = NVMET_ERROR_LOG_SLOTS - 1;
 324        id->npss = 0;
 325
 326        /* We support keep-alive timeout in granularity of seconds */
 327        id->kas = cpu_to_le16(NVMET_KAS);
 328
 329        id->sqes = (0x6 << 4) | 0x6;
 330        id->cqes = (0x4 << 4) | 0x4;
 331
 332        /* no enforcement soft-limit for maxcmd - pick arbitrary high value */
 333        id->maxcmd = cpu_to_le16(NVMET_MAX_CMD);
 334
 335        id->nn = cpu_to_le32(ctrl->subsys->max_nsid);
 336        id->mnan = cpu_to_le32(NVMET_MAX_NAMESPACES);
 337        id->oncs = cpu_to_le16(NVME_CTRL_ONCS_DSM |
 338                        NVME_CTRL_ONCS_WRITE_ZEROES);
 339
 340        /* XXX: don't report vwc if the underlying device is write through */
 341        id->vwc = NVME_CTRL_VWC_PRESENT;
 342
 343        /*
 344         * We can't support atomic writes bigger than a LBA without support
 345         * from the backend device.
 346         */
 347        id->awun = 0;
 348        id->awupf = 0;
 349
 350        id->sgls = cpu_to_le32(1 << 0); /* we always support SGLs */
 351        if (ctrl->ops->has_keyed_sgls)
 352                id->sgls |= cpu_to_le32(1 << 2);
 353        if (req->port->inline_data_size)
 354                id->sgls |= cpu_to_le32(1 << 20);
 355
 356        strcpy(id->subnqn, ctrl->subsys->subsysnqn);
 357
 358        /* Max command capsule size is sqe + single page of in-capsule data */
 359        id->ioccsz = cpu_to_le32((sizeof(struct nvme_command) +
 360                                  req->port->inline_data_size) / 16);
 361        /* Max response capsule size is cqe */
 362        id->iorcsz = cpu_to_le32(sizeof(struct nvme_completion) / 16);
 363
 364        id->msdbd = ctrl->ops->msdbd;
 365
 366        id->anacap = (1 << 0) | (1 << 1) | (1 << 2) | (1 << 3) | (1 << 4);
 367        id->anatt = 10; /* random value */
 368        id->anagrpmax = cpu_to_le32(NVMET_MAX_ANAGRPS);
 369        id->nanagrpid = cpu_to_le32(NVMET_MAX_ANAGRPS);
 370
 371        /*
 372         * Meh, we don't really support any power state.  Fake up the same
 373         * values that qemu does.
 374         */
 375        id->psd[0].max_power = cpu_to_le16(0x9c4);
 376        id->psd[0].entry_lat = cpu_to_le32(0x10);
 377        id->psd[0].exit_lat = cpu_to_le32(0x4);
 378
 379        id->nwpc = 1 << 0; /* write protect and no write protect */
 380
 381        status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
 382
 383        kfree(id);
 384out:
 385        nvmet_req_complete(req, status);
 386}
 387
 388static void nvmet_execute_identify_ns(struct nvmet_req *req)
 389{
 390        struct nvmet_ns *ns;
 391        struct nvme_id_ns *id;
 392        u16 status = 0;
 393
 394        if (le32_to_cpu(req->cmd->identify.nsid) == NVME_NSID_ALL) {
 395                status = NVME_SC_INVALID_NS | NVME_SC_DNR;
 396                goto out;
 397        }
 398
 399        id = kzalloc(sizeof(*id), GFP_KERNEL);
 400        if (!id) {
 401                status = NVME_SC_INTERNAL;
 402                goto out;
 403        }
 404
 405        /* return an all zeroed buffer if we can't find an active namespace */
 406        ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->identify.nsid);
 407        if (!ns)
 408                goto done;
 409
 410        /*
 411         * nuse = ncap = nsze isn't always true, but we have no way to find
 412         * that out from the underlying device.
 413         */
 414        id->ncap = id->nsze = cpu_to_le64(ns->size >> ns->blksize_shift);
 415        switch (req->port->ana_state[ns->anagrpid]) {
 416        case NVME_ANA_INACCESSIBLE:
 417        case NVME_ANA_PERSISTENT_LOSS:
 418                break;
 419        default:
 420                id->nuse = id->nsze;
 421                break;
 422        }
 423
 424        /*
 425         * We just provide a single LBA format that matches what the
 426         * underlying device reports.
 427         */
 428        id->nlbaf = 0;
 429        id->flbas = 0;
 430
 431        /*
 432         * Our namespace might always be shared.  Not just with other
 433         * controllers, but also with any other user of the block device.
 434         */
 435        id->nmic = (1 << 0);
 436        id->anagrpid = cpu_to_le32(ns->anagrpid);
 437
 438        memcpy(&id->nguid, &ns->nguid, sizeof(id->nguid));
 439
 440        id->lbaf[0].ds = ns->blksize_shift;
 441
 442        if (ns->readonly)
 443                id->nsattr |= (1 << 0);
 444        nvmet_put_namespace(ns);
 445done:
 446        status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
 447        kfree(id);
 448out:
 449        nvmet_req_complete(req, status);
 450}
 451
 452static void nvmet_execute_identify_nslist(struct nvmet_req *req)
 453{
 454        static const int buf_size = NVME_IDENTIFY_DATA_SIZE;
 455        struct nvmet_ctrl *ctrl = req->sq->ctrl;
 456        struct nvmet_ns *ns;
 457        u32 min_nsid = le32_to_cpu(req->cmd->identify.nsid);
 458        __le32 *list;
 459        u16 status = 0;
 460        int i = 0;
 461
 462        list = kzalloc(buf_size, GFP_KERNEL);
 463        if (!list) {
 464                status = NVME_SC_INTERNAL;
 465                goto out;
 466        }
 467
 468        rcu_read_lock();
 469        list_for_each_entry_rcu(ns, &ctrl->subsys->namespaces, dev_link) {
 470                if (ns->nsid <= min_nsid)
 471                        continue;
 472                list[i++] = cpu_to_le32(ns->nsid);
 473                if (i == buf_size / sizeof(__le32))
 474                        break;
 475        }
 476        rcu_read_unlock();
 477
 478        status = nvmet_copy_to_sgl(req, 0, list, buf_size);
 479
 480        kfree(list);
 481out:
 482        nvmet_req_complete(req, status);
 483}
 484
 485static u16 nvmet_copy_ns_identifier(struct nvmet_req *req, u8 type, u8 len,
 486                                    void *id, off_t *off)
 487{
 488        struct nvme_ns_id_desc desc = {
 489                .nidt = type,
 490                .nidl = len,
 491        };
 492        u16 status;
 493
 494        status = nvmet_copy_to_sgl(req, *off, &desc, sizeof(desc));
 495        if (status)
 496                return status;
 497        *off += sizeof(desc);
 498
 499        status = nvmet_copy_to_sgl(req, *off, id, len);
 500        if (status)
 501                return status;
 502        *off += len;
 503
 504        return 0;
 505}
 506
 507static void nvmet_execute_identify_desclist(struct nvmet_req *req)
 508{
 509        struct nvmet_ns *ns;
 510        u16 status = 0;
 511        off_t off = 0;
 512
 513        ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->identify.nsid);
 514        if (!ns) {
 515                status = NVME_SC_INVALID_NS | NVME_SC_DNR;
 516                goto out;
 517        }
 518
 519        if (memchr_inv(&ns->uuid, 0, sizeof(ns->uuid))) {
 520                status = nvmet_copy_ns_identifier(req, NVME_NIDT_UUID,
 521                                                  NVME_NIDT_UUID_LEN,
 522                                                  &ns->uuid, &off);
 523                if (status)
 524                        goto out_put_ns;
 525        }
 526        if (memchr_inv(ns->nguid, 0, sizeof(ns->nguid))) {
 527                status = nvmet_copy_ns_identifier(req, NVME_NIDT_NGUID,
 528                                                  NVME_NIDT_NGUID_LEN,
 529                                                  &ns->nguid, &off);
 530                if (status)
 531                        goto out_put_ns;
 532        }
 533
 534        if (sg_zero_buffer(req->sg, req->sg_cnt, NVME_IDENTIFY_DATA_SIZE - off,
 535                        off) != NVME_IDENTIFY_DATA_SIZE - off)
 536                status = NVME_SC_INTERNAL | NVME_SC_DNR;
 537out_put_ns:
 538        nvmet_put_namespace(ns);
 539out:
 540        nvmet_req_complete(req, status);
 541}
 542
 543/*
 544 * A "minimum viable" abort implementation: the command is mandatory in the
 545 * spec, but we are not required to do any useful work.  We couldn't really
 546 * do a useful abort, so don't bother even with waiting for the command
 547 * to be exectuted and return immediately telling the command to abort
 548 * wasn't found.
 549 */
 550static void nvmet_execute_abort(struct nvmet_req *req)
 551{
 552        nvmet_set_result(req, 1);
 553        nvmet_req_complete(req, 0);
 554}
 555
 556static u16 nvmet_write_protect_flush_sync(struct nvmet_req *req)
 557{
 558        u16 status;
 559
 560        if (req->ns->file)
 561                status = nvmet_file_flush(req);
 562        else
 563                status = nvmet_bdev_flush(req);
 564
 565        if (status)
 566                pr_err("write protect flush failed nsid: %u\n", req->ns->nsid);
 567        return status;
 568}
 569
 570static u16 nvmet_set_feat_write_protect(struct nvmet_req *req)
 571{
 572        u32 write_protect = le32_to_cpu(req->cmd->common.cdw10[1]);
 573        struct nvmet_subsys *subsys = req->sq->ctrl->subsys;
 574        u16 status = NVME_SC_FEATURE_NOT_CHANGEABLE;
 575
 576        req->ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->rw.nsid);
 577        if (unlikely(!req->ns))
 578                return status;
 579
 580        mutex_lock(&subsys->lock);
 581        switch (write_protect) {
 582        case NVME_NS_WRITE_PROTECT:
 583                req->ns->readonly = true;
 584                status = nvmet_write_protect_flush_sync(req);
 585                if (status)
 586                        req->ns->readonly = false;
 587                break;
 588        case NVME_NS_NO_WRITE_PROTECT:
 589                req->ns->readonly = false;
 590                status = 0;
 591                break;
 592        default:
 593                break;
 594        }
 595
 596        if (!status)
 597                nvmet_ns_changed(subsys, req->ns->nsid);
 598        mutex_unlock(&subsys->lock);
 599        return status;
 600}
 601
 602static void nvmet_execute_set_features(struct nvmet_req *req)
 603{
 604        struct nvmet_subsys *subsys = req->sq->ctrl->subsys;
 605        u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10[0]);
 606        u32 val32;
 607        u16 status = 0;
 608
 609        switch (cdw10 & 0xff) {
 610        case NVME_FEAT_NUM_QUEUES:
 611                nvmet_set_result(req,
 612                        (subsys->max_qid - 1) | ((subsys->max_qid - 1) << 16));
 613                break;
 614        case NVME_FEAT_KATO:
 615                val32 = le32_to_cpu(req->cmd->common.cdw10[1]);
 616                req->sq->ctrl->kato = DIV_ROUND_UP(val32, 1000);
 617                nvmet_set_result(req, req->sq->ctrl->kato);
 618                break;
 619        case NVME_FEAT_ASYNC_EVENT:
 620                val32 = le32_to_cpu(req->cmd->common.cdw10[1]);
 621                if (val32 & ~NVMET_AEN_CFG_ALL) {
 622                        status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
 623                        break;
 624                }
 625
 626                WRITE_ONCE(req->sq->ctrl->aen_enabled, val32);
 627                nvmet_set_result(req, val32);
 628                break;
 629        case NVME_FEAT_HOST_ID:
 630                status = NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR;
 631                break;
 632        case NVME_FEAT_WRITE_PROTECT:
 633                status = nvmet_set_feat_write_protect(req);
 634                break;
 635        default:
 636                status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
 637                break;
 638        }
 639
 640        nvmet_req_complete(req, status);
 641}
 642
 643static u16 nvmet_get_feat_write_protect(struct nvmet_req *req)
 644{
 645        struct nvmet_subsys *subsys = req->sq->ctrl->subsys;
 646        u32 result;
 647
 648        req->ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->common.nsid);
 649        if (!req->ns)
 650                return NVME_SC_INVALID_NS | NVME_SC_DNR;
 651
 652        mutex_lock(&subsys->lock);
 653        if (req->ns->readonly == true)
 654                result = NVME_NS_WRITE_PROTECT;
 655        else
 656                result = NVME_NS_NO_WRITE_PROTECT;
 657        nvmet_set_result(req, result);
 658        mutex_unlock(&subsys->lock);
 659
 660        return 0;
 661}
 662
 663static void nvmet_execute_get_features(struct nvmet_req *req)
 664{
 665        struct nvmet_subsys *subsys = req->sq->ctrl->subsys;
 666        u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10[0]);
 667        u16 status = 0;
 668
 669        switch (cdw10 & 0xff) {
 670        /*
 671         * These features are mandatory in the spec, but we don't
 672         * have a useful way to implement them.  We'll eventually
 673         * need to come up with some fake values for these.
 674         */
 675#if 0
 676        case NVME_FEAT_ARBITRATION:
 677                break;
 678        case NVME_FEAT_POWER_MGMT:
 679                break;
 680        case NVME_FEAT_TEMP_THRESH:
 681                break;
 682        case NVME_FEAT_ERR_RECOVERY:
 683                break;
 684        case NVME_FEAT_IRQ_COALESCE:
 685                break;
 686        case NVME_FEAT_IRQ_CONFIG:
 687                break;
 688        case NVME_FEAT_WRITE_ATOMIC:
 689                break;
 690#endif
 691        case NVME_FEAT_ASYNC_EVENT:
 692                nvmet_set_result(req, READ_ONCE(req->sq->ctrl->aen_enabled));
 693                break;
 694        case NVME_FEAT_VOLATILE_WC:
 695                nvmet_set_result(req, 1);
 696                break;
 697        case NVME_FEAT_NUM_QUEUES:
 698                nvmet_set_result(req,
 699                        (subsys->max_qid-1) | ((subsys->max_qid-1) << 16));
 700                break;
 701        case NVME_FEAT_KATO:
 702                nvmet_set_result(req, req->sq->ctrl->kato * 1000);
 703                break;
 704        case NVME_FEAT_HOST_ID:
 705                /* need 128-bit host identifier flag */
 706                if (!(req->cmd->common.cdw10[1] & cpu_to_le32(1 << 0))) {
 707                        status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
 708                        break;
 709                }
 710
 711                status = nvmet_copy_to_sgl(req, 0, &req->sq->ctrl->hostid,
 712                                sizeof(req->sq->ctrl->hostid));
 713                break;
 714        case NVME_FEAT_WRITE_PROTECT:
 715                status = nvmet_get_feat_write_protect(req);
 716                break;
 717        default:
 718                status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
 719                break;
 720        }
 721
 722        nvmet_req_complete(req, status);
 723}
 724
 725static void nvmet_execute_async_event(struct nvmet_req *req)
 726{
 727        struct nvmet_ctrl *ctrl = req->sq->ctrl;
 728
 729        mutex_lock(&ctrl->lock);
 730        if (ctrl->nr_async_event_cmds >= NVMET_ASYNC_EVENTS) {
 731                mutex_unlock(&ctrl->lock);
 732                nvmet_req_complete(req, NVME_SC_ASYNC_LIMIT | NVME_SC_DNR);
 733                return;
 734        }
 735        ctrl->async_event_cmds[ctrl->nr_async_event_cmds++] = req;
 736        mutex_unlock(&ctrl->lock);
 737
 738        schedule_work(&ctrl->async_event_work);
 739}
 740
 741static void nvmet_execute_keep_alive(struct nvmet_req *req)
 742{
 743        struct nvmet_ctrl *ctrl = req->sq->ctrl;
 744
 745        pr_debug("ctrl %d update keep-alive timer for %d secs\n",
 746                ctrl->cntlid, ctrl->kato);
 747
 748        mod_delayed_work(system_wq, &ctrl->ka_work, ctrl->kato * HZ);
 749        nvmet_req_complete(req, 0);
 750}
 751
 752u16 nvmet_parse_admin_cmd(struct nvmet_req *req)
 753{
 754        struct nvme_command *cmd = req->cmd;
 755        u16 ret;
 756
 757        ret = nvmet_check_ctrl_status(req, cmd);
 758        if (unlikely(ret))
 759                return ret;
 760
 761        switch (cmd->common.opcode) {
 762        case nvme_admin_get_log_page:
 763                req->data_len = nvmet_get_log_page_len(cmd);
 764
 765                switch (cmd->get_log_page.lid) {
 766                case NVME_LOG_ERROR:
 767                        /*
 768                         * We currently never set the More bit in the status
 769                         * field, so all error log entries are invalid and can
 770                         * be zeroed out.  This is called a minum viable
 771                         * implementation (TM) of this mandatory log page.
 772                         */
 773                        req->execute = nvmet_execute_get_log_page_noop;
 774                        return 0;
 775                case NVME_LOG_SMART:
 776                        req->execute = nvmet_execute_get_log_page_smart;
 777                        return 0;
 778                case NVME_LOG_FW_SLOT:
 779                        /*
 780                         * We only support a single firmware slot which always
 781                         * is active, so we can zero out the whole firmware slot
 782                         * log and still claim to fully implement this mandatory
 783                         * log page.
 784                         */
 785                        req->execute = nvmet_execute_get_log_page_noop;
 786                        return 0;
 787                case NVME_LOG_CHANGED_NS:
 788                        req->execute = nvmet_execute_get_log_changed_ns;
 789                        return 0;
 790                case NVME_LOG_CMD_EFFECTS:
 791                        req->execute = nvmet_execute_get_log_cmd_effects_ns;
 792                        return 0;
 793                case NVME_LOG_ANA:
 794                        req->execute = nvmet_execute_get_log_page_ana;
 795                        return 0;
 796                }
 797                break;
 798        case nvme_admin_identify:
 799                req->data_len = NVME_IDENTIFY_DATA_SIZE;
 800                switch (cmd->identify.cns) {
 801                case NVME_ID_CNS_NS:
 802                        req->execute = nvmet_execute_identify_ns;
 803                        return 0;
 804                case NVME_ID_CNS_CTRL:
 805                        req->execute = nvmet_execute_identify_ctrl;
 806                        return 0;
 807                case NVME_ID_CNS_NS_ACTIVE_LIST:
 808                        req->execute = nvmet_execute_identify_nslist;
 809                        return 0;
 810                case NVME_ID_CNS_NS_DESC_LIST:
 811                        req->execute = nvmet_execute_identify_desclist;
 812                        return 0;
 813                }
 814                break;
 815        case nvme_admin_abort_cmd:
 816                req->execute = nvmet_execute_abort;
 817                req->data_len = 0;
 818                return 0;
 819        case nvme_admin_set_features:
 820                req->execute = nvmet_execute_set_features;
 821                req->data_len = 0;
 822                return 0;
 823        case nvme_admin_get_features:
 824                req->execute = nvmet_execute_get_features;
 825                req->data_len = 0;
 826                return 0;
 827        case nvme_admin_async_event:
 828                req->execute = nvmet_execute_async_event;
 829                req->data_len = 0;
 830                return 0;
 831        case nvme_admin_keep_alive:
 832                req->execute = nvmet_execute_keep_alive;
 833                req->data_len = 0;
 834                return 0;
 835        }
 836
 837        pr_err("unhandled cmd %d on qid %d\n", cmd->common.opcode,
 838               req->sq->qid);
 839        return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
 840}
 841