linux/drivers/nvme/host/lightnvm.c
<<
>>
Prefs
   1/*
   2 * nvme-lightnvm.c - LightNVM NVMe device
   3 *
   4 * Copyright (C) 2014-2015 IT University of Copenhagen
   5 * Initial release: Matias Bjorling <mb@lightnvm.io>
   6 *
   7 * This program is free software; you can redistribute it and/or
   8 * modify it under the terms of the GNU General Public License version
   9 * 2 as published by the Free Software Foundation.
  10 *
  11 * This program is distributed in the hope that it will be useful, but
  12 * WITHOUT ANY WARRANTY; without even the implied warranty of
  13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  14 * General Public License for more details.
  15 *
  16 * You should have received a copy of the GNU General Public License
  17 * along with this program; see the file COPYING.  If not, write to
  18 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
  19 * USA.
  20 *
  21 */
  22
  23#include "nvme.h"
  24
  25#include <linux/nvme.h>
  26#include <linux/bitops.h>
  27#include <linux/lightnvm.h>
  28#include <linux/vmalloc.h>
  29#include <linux/sched/sysctl.h>
  30#include <uapi/linux/lightnvm.h>
  31
  32enum nvme_nvm_admin_opcode {
  33        nvme_nvm_admin_identity         = 0xe2,
  34        nvme_nvm_admin_get_l2p_tbl      = 0xea,
  35        nvme_nvm_admin_get_bb_tbl       = 0xf2,
  36        nvme_nvm_admin_set_bb_tbl       = 0xf1,
  37};
  38
  39struct nvme_nvm_hb_rw {
  40        __u8                    opcode;
  41        __u8                    flags;
  42        __u16                   command_id;
  43        __le32                  nsid;
  44        __u64                   rsvd2;
  45        __le64                  metadata;
  46        __le64                  prp1;
  47        __le64                  prp2;
  48        __le64                  spba;
  49        __le16                  length;
  50        __le16                  control;
  51        __le32                  dsmgmt;
  52        __le64                  slba;
  53};
  54
  55struct nvme_nvm_ph_rw {
  56        __u8                    opcode;
  57        __u8                    flags;
  58        __u16                   command_id;
  59        __le32                  nsid;
  60        __u64                   rsvd2;
  61        __le64                  metadata;
  62        __le64                  prp1;
  63        __le64                  prp2;
  64        __le64                  spba;
  65        __le16                  length;
  66        __le16                  control;
  67        __le32                  dsmgmt;
  68        __le64                  resv;
  69};
  70
  71struct nvme_nvm_identity {
  72        __u8                    opcode;
  73        __u8                    flags;
  74        __u16                   command_id;
  75        __le32                  nsid;
  76        __u64                   rsvd[2];
  77        __le64                  prp1;
  78        __le64                  prp2;
  79        __le32                  chnl_off;
  80        __u32                   rsvd11[5];
  81};
  82
  83struct nvme_nvm_l2ptbl {
  84        __u8                    opcode;
  85        __u8                    flags;
  86        __u16                   command_id;
  87        __le32                  nsid;
  88        __le32                  cdw2[4];
  89        __le64                  prp1;
  90        __le64                  prp2;
  91        __le64                  slba;
  92        __le32                  nlb;
  93        __le16                  cdw14[6];
  94};
  95
  96struct nvme_nvm_getbbtbl {
  97        __u8                    opcode;
  98        __u8                    flags;
  99        __u16                   command_id;
 100        __le32                  nsid;
 101        __u64                   rsvd[2];
 102        __le64                  prp1;
 103        __le64                  prp2;
 104        __le64                  spba;
 105        __u32                   rsvd4[4];
 106};
 107
 108struct nvme_nvm_setbbtbl {
 109        __u8                    opcode;
 110        __u8                    flags;
 111        __u16                   command_id;
 112        __le32                  nsid;
 113        __le64                  rsvd[2];
 114        __le64                  prp1;
 115        __le64                  prp2;
 116        __le64                  spba;
 117        __le16                  nlb;
 118        __u8                    value;
 119        __u8                    rsvd3;
 120        __u32                   rsvd4[3];
 121};
 122
 123struct nvme_nvm_erase_blk {
 124        __u8                    opcode;
 125        __u8                    flags;
 126        __u16                   command_id;
 127        __le32                  nsid;
 128        __u64                   rsvd[2];
 129        __le64                  prp1;
 130        __le64                  prp2;
 131        __le64                  spba;
 132        __le16                  length;
 133        __le16                  control;
 134        __le32                  dsmgmt;
 135        __le64                  resv;
 136};
 137
 138struct nvme_nvm_command {
 139        union {
 140                struct nvme_common_command common;
 141                struct nvme_nvm_identity identity;
 142                struct nvme_nvm_hb_rw hb_rw;
 143                struct nvme_nvm_ph_rw ph_rw;
 144                struct nvme_nvm_l2ptbl l2p;
 145                struct nvme_nvm_getbbtbl get_bb;
 146                struct nvme_nvm_setbbtbl set_bb;
 147                struct nvme_nvm_erase_blk erase;
 148        };
 149};
 150
 151#define NVME_NVM_LP_MLC_PAIRS 886
 152struct nvme_nvm_lp_mlc {
 153        __le16                  num_pairs;
 154        __u8                    pairs[NVME_NVM_LP_MLC_PAIRS];
 155};
 156
 157struct nvme_nvm_lp_tbl {
 158        __u8                    id[8];
 159        struct nvme_nvm_lp_mlc  mlc;
 160};
 161
 162struct nvme_nvm_id_group {
 163        __u8                    mtype;
 164        __u8                    fmtype;
 165        __le16                  res16;
 166        __u8                    num_ch;
 167        __u8                    num_lun;
 168        __u8                    num_pln;
 169        __u8                    rsvd1;
 170        __le16                  num_blk;
 171        __le16                  num_pg;
 172        __le16                  fpg_sz;
 173        __le16                  csecs;
 174        __le16                  sos;
 175        __le16                  rsvd2;
 176        __le32                  trdt;
 177        __le32                  trdm;
 178        __le32                  tprt;
 179        __le32                  tprm;
 180        __le32                  tbet;
 181        __le32                  tbem;
 182        __le32                  mpos;
 183        __le32                  mccap;
 184        __le16                  cpar;
 185        __u8                    reserved[10];
 186        struct nvme_nvm_lp_tbl lptbl;
 187} __packed;
 188
 189struct nvme_nvm_addr_format {
 190        __u8                    ch_offset;
 191        __u8                    ch_len;
 192        __u8                    lun_offset;
 193        __u8                    lun_len;
 194        __u8                    pln_offset;
 195        __u8                    pln_len;
 196        __u8                    blk_offset;
 197        __u8                    blk_len;
 198        __u8                    pg_offset;
 199        __u8                    pg_len;
 200        __u8                    sect_offset;
 201        __u8                    sect_len;
 202        __u8                    res[4];
 203} __packed;
 204
 205struct nvme_nvm_id {
 206        __u8                    ver_id;
 207        __u8                    vmnt;
 208        __u8                    cgrps;
 209        __u8                    res;
 210        __le32                  cap;
 211        __le32                  dom;
 212        struct nvme_nvm_addr_format ppaf;
 213        __u8                    resv[228];
 214        struct nvme_nvm_id_group groups[4];
 215} __packed;
 216
 217struct nvme_nvm_bb_tbl {
 218        __u8    tblid[4];
 219        __le16  verid;
 220        __le16  revid;
 221        __le32  rvsd1;
 222        __le32  tblks;
 223        __le32  tfact;
 224        __le32  tgrown;
 225        __le32  tdresv;
 226        __le32  thresv;
 227        __le32  rsvd2[8];
 228        __u8    blk[0];
 229};
 230
 231/*
 232 * Check we didn't inadvertently grow the command struct
 233 */
 234static inline void _nvme_nvm_check_size(void)
 235{
 236        BUILD_BUG_ON(sizeof(struct nvme_nvm_identity) != 64);
 237        BUILD_BUG_ON(sizeof(struct nvme_nvm_hb_rw) != 64);
 238        BUILD_BUG_ON(sizeof(struct nvme_nvm_ph_rw) != 64);
 239        BUILD_BUG_ON(sizeof(struct nvme_nvm_getbbtbl) != 64);
 240        BUILD_BUG_ON(sizeof(struct nvme_nvm_setbbtbl) != 64);
 241        BUILD_BUG_ON(sizeof(struct nvme_nvm_l2ptbl) != 64);
 242        BUILD_BUG_ON(sizeof(struct nvme_nvm_erase_blk) != 64);
 243        BUILD_BUG_ON(sizeof(struct nvme_nvm_id_group) != 960);
 244        BUILD_BUG_ON(sizeof(struct nvme_nvm_addr_format) != 16);
 245        BUILD_BUG_ON(sizeof(struct nvme_nvm_id) != 4096);
 246        BUILD_BUG_ON(sizeof(struct nvme_nvm_bb_tbl) != 64);
 247}
 248
 249static int init_grps(struct nvm_id *nvm_id, struct nvme_nvm_id *nvme_nvm_id)
 250{
 251        struct nvme_nvm_id_group *src;
 252        struct nvm_id_group *dst;
 253
 254        if (nvme_nvm_id->cgrps != 1)
 255                return -EINVAL;
 256
 257        src = &nvme_nvm_id->groups[0];
 258        dst = &nvm_id->grp;
 259
 260        dst->mtype = src->mtype;
 261        dst->fmtype = src->fmtype;
 262        dst->num_ch = src->num_ch;
 263        dst->num_lun = src->num_lun;
 264        dst->num_pln = src->num_pln;
 265
 266        dst->num_pg = le16_to_cpu(src->num_pg);
 267        dst->num_blk = le16_to_cpu(src->num_blk);
 268        dst->fpg_sz = le16_to_cpu(src->fpg_sz);
 269        dst->csecs = le16_to_cpu(src->csecs);
 270        dst->sos = le16_to_cpu(src->sos);
 271
 272        dst->trdt = le32_to_cpu(src->trdt);
 273        dst->trdm = le32_to_cpu(src->trdm);
 274        dst->tprt = le32_to_cpu(src->tprt);
 275        dst->tprm = le32_to_cpu(src->tprm);
 276        dst->tbet = le32_to_cpu(src->tbet);
 277        dst->tbem = le32_to_cpu(src->tbem);
 278        dst->mpos = le32_to_cpu(src->mpos);
 279        dst->mccap = le32_to_cpu(src->mccap);
 280
 281        dst->cpar = le16_to_cpu(src->cpar);
 282
 283        if (dst->fmtype == NVM_ID_FMTYPE_MLC) {
 284                memcpy(dst->lptbl.id, src->lptbl.id, 8);
 285                dst->lptbl.mlc.num_pairs =
 286                                le16_to_cpu(src->lptbl.mlc.num_pairs);
 287
 288                if (dst->lptbl.mlc.num_pairs > NVME_NVM_LP_MLC_PAIRS) {
 289                        pr_err("nvm: number of MLC pairs not supported\n");
 290                        return -EINVAL;
 291                }
 292
 293                memcpy(dst->lptbl.mlc.pairs, src->lptbl.mlc.pairs,
 294                                        dst->lptbl.mlc.num_pairs);
 295        }
 296
 297        return 0;
 298}
 299
 300static int nvme_nvm_identity(struct nvm_dev *nvmdev, struct nvm_id *nvm_id)
 301{
 302        struct nvme_ns *ns = nvmdev->q->queuedata;
 303        struct nvme_nvm_id *nvme_nvm_id;
 304        struct nvme_nvm_command c = {};
 305        int ret;
 306
 307        c.identity.opcode = nvme_nvm_admin_identity;
 308        c.identity.nsid = cpu_to_le32(ns->ns_id);
 309        c.identity.chnl_off = 0;
 310
 311        nvme_nvm_id = kmalloc(sizeof(struct nvme_nvm_id), GFP_KERNEL);
 312        if (!nvme_nvm_id)
 313                return -ENOMEM;
 314
 315        ret = nvme_submit_sync_cmd(ns->ctrl->admin_q, (struct nvme_command *)&c,
 316                                nvme_nvm_id, sizeof(struct nvme_nvm_id));
 317        if (ret) {
 318                ret = -EIO;
 319                goto out;
 320        }
 321
 322        nvm_id->ver_id = nvme_nvm_id->ver_id;
 323        nvm_id->vmnt = nvme_nvm_id->vmnt;
 324        nvm_id->cap = le32_to_cpu(nvme_nvm_id->cap);
 325        nvm_id->dom = le32_to_cpu(nvme_nvm_id->dom);
 326        memcpy(&nvm_id->ppaf, &nvme_nvm_id->ppaf,
 327                                        sizeof(struct nvm_addr_format));
 328
 329        ret = init_grps(nvm_id, nvme_nvm_id);
 330out:
 331        kfree(nvme_nvm_id);
 332        return ret;
 333}
 334
 335static int nvme_nvm_get_l2p_tbl(struct nvm_dev *nvmdev, u64 slba, u32 nlb,
 336                                nvm_l2p_update_fn *update_l2p, void *priv)
 337{
 338        struct nvme_ns *ns = nvmdev->q->queuedata;
 339        struct nvme_nvm_command c = {};
 340        u32 len = queue_max_hw_sectors(ns->ctrl->admin_q) << 9;
 341        u32 nlb_pr_rq = len / sizeof(u64);
 342        u64 cmd_slba = slba;
 343        void *entries;
 344        int ret = 0;
 345
 346        c.l2p.opcode = nvme_nvm_admin_get_l2p_tbl;
 347        c.l2p.nsid = cpu_to_le32(ns->ns_id);
 348        entries = kmalloc(len, GFP_KERNEL);
 349        if (!entries)
 350                return -ENOMEM;
 351
 352        while (nlb) {
 353                u32 cmd_nlb = min(nlb_pr_rq, nlb);
 354                u64 elba = slba + cmd_nlb;
 355
 356                c.l2p.slba = cpu_to_le64(cmd_slba);
 357                c.l2p.nlb = cpu_to_le32(cmd_nlb);
 358
 359                ret = nvme_submit_sync_cmd(ns->ctrl->admin_q,
 360                                (struct nvme_command *)&c, entries, len);
 361                if (ret) {
 362                        dev_err(ns->ctrl->device,
 363                                "L2P table transfer failed (%d)\n", ret);
 364                        ret = -EIO;
 365                        goto out;
 366                }
 367
 368                if (unlikely(elba > nvmdev->total_secs)) {
 369                        pr_err("nvm: L2P data from device is out of bounds!\n");
 370                        ret = -EINVAL;
 371                        goto out;
 372                }
 373
 374                /* Transform physical address to target address space */
 375                nvm_part_to_tgt(nvmdev, entries, cmd_nlb);
 376
 377                if (update_l2p(cmd_slba, cmd_nlb, entries, priv)) {
 378                        ret = -EINTR;
 379                        goto out;
 380                }
 381
 382                cmd_slba += cmd_nlb;
 383                nlb -= cmd_nlb;
 384        }
 385
 386out:
 387        kfree(entries);
 388        return ret;
 389}
 390
 391static int nvme_nvm_get_bb_tbl(struct nvm_dev *nvmdev, struct ppa_addr ppa,
 392                                                                u8 *blks)
 393{
 394        struct request_queue *q = nvmdev->q;
 395        struct nvm_geo *geo = &nvmdev->geo;
 396        struct nvme_ns *ns = q->queuedata;
 397        struct nvme_ctrl *ctrl = ns->ctrl;
 398        struct nvme_nvm_command c = {};
 399        struct nvme_nvm_bb_tbl *bb_tbl;
 400        int nr_blks = geo->blks_per_lun * geo->plane_mode;
 401        int tblsz = sizeof(struct nvme_nvm_bb_tbl) + nr_blks;
 402        int ret = 0;
 403
 404        c.get_bb.opcode = nvme_nvm_admin_get_bb_tbl;
 405        c.get_bb.nsid = cpu_to_le32(ns->ns_id);
 406        c.get_bb.spba = cpu_to_le64(ppa.ppa);
 407
 408        bb_tbl = kzalloc(tblsz, GFP_KERNEL);
 409        if (!bb_tbl)
 410                return -ENOMEM;
 411
 412        ret = nvme_submit_sync_cmd(ctrl->admin_q, (struct nvme_command *)&c,
 413                                                                bb_tbl, tblsz);
 414        if (ret) {
 415                dev_err(ctrl->device, "get bad block table failed (%d)\n", ret);
 416                ret = -EIO;
 417                goto out;
 418        }
 419
 420        if (bb_tbl->tblid[0] != 'B' || bb_tbl->tblid[1] != 'B' ||
 421                bb_tbl->tblid[2] != 'L' || bb_tbl->tblid[3] != 'T') {
 422                dev_err(ctrl->device, "bbt format mismatch\n");
 423                ret = -EINVAL;
 424                goto out;
 425        }
 426
 427        if (le16_to_cpu(bb_tbl->verid) != 1) {
 428                ret = -EINVAL;
 429                dev_err(ctrl->device, "bbt version not supported\n");
 430                goto out;
 431        }
 432
 433        if (le32_to_cpu(bb_tbl->tblks) != nr_blks) {
 434                ret = -EINVAL;
 435                dev_err(ctrl->device,
 436                                "bbt unsuspected blocks returned (%u!=%u)",
 437                                le32_to_cpu(bb_tbl->tblks), nr_blks);
 438                goto out;
 439        }
 440
 441        memcpy(blks, bb_tbl->blk, geo->blks_per_lun * geo->plane_mode);
 442out:
 443        kfree(bb_tbl);
 444        return ret;
 445}
 446
 447static int nvme_nvm_set_bb_tbl(struct nvm_dev *nvmdev, struct ppa_addr *ppas,
 448                                                        int nr_ppas, int type)
 449{
 450        struct nvme_ns *ns = nvmdev->q->queuedata;
 451        struct nvme_nvm_command c = {};
 452        int ret = 0;
 453
 454        c.set_bb.opcode = nvme_nvm_admin_set_bb_tbl;
 455        c.set_bb.nsid = cpu_to_le32(ns->ns_id);
 456        c.set_bb.spba = cpu_to_le64(ppas->ppa);
 457        c.set_bb.nlb = cpu_to_le16(nr_ppas - 1);
 458        c.set_bb.value = type;
 459
 460        ret = nvme_submit_sync_cmd(ns->ctrl->admin_q, (struct nvme_command *)&c,
 461                                                                NULL, 0);
 462        if (ret)
 463                dev_err(ns->ctrl->device, "set bad block table failed (%d)\n",
 464                                                                        ret);
 465        return ret;
 466}
 467
 468static inline void nvme_nvm_rqtocmd(struct nvm_rq *rqd, struct nvme_ns *ns,
 469                                    struct nvme_nvm_command *c)
 470{
 471        c->ph_rw.opcode = rqd->opcode;
 472        c->ph_rw.nsid = cpu_to_le32(ns->ns_id);
 473        c->ph_rw.spba = cpu_to_le64(rqd->ppa_addr.ppa);
 474        c->ph_rw.metadata = cpu_to_le64(rqd->dma_meta_list);
 475        c->ph_rw.control = cpu_to_le16(rqd->flags);
 476        c->ph_rw.length = cpu_to_le16(rqd->nr_ppas - 1);
 477
 478        if (rqd->opcode == NVM_OP_HBWRITE || rqd->opcode == NVM_OP_HBREAD)
 479                c->hb_rw.slba = cpu_to_le64(nvme_block_nr(ns,
 480                                        rqd->bio->bi_iter.bi_sector));
 481}
 482
 483static void nvme_nvm_end_io(struct request *rq, int error)
 484{
 485        struct nvm_rq *rqd = rq->end_io_data;
 486
 487        rqd->ppa_status = le64_to_cpu(nvme_req(rq)->result.u64);
 488        rqd->error = nvme_req(rq)->status;
 489        nvm_end_io(rqd);
 490
 491        kfree(nvme_req(rq)->cmd);
 492        blk_mq_free_request(rq);
 493}
 494
 495static int nvme_nvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd)
 496{
 497        struct request_queue *q = dev->q;
 498        struct nvme_ns *ns = q->queuedata;
 499        struct request *rq;
 500        struct bio *bio = rqd->bio;
 501        struct nvme_nvm_command *cmd;
 502
 503        cmd = kzalloc(sizeof(struct nvme_nvm_command), GFP_KERNEL);
 504        if (!cmd)
 505                return -ENOMEM;
 506
 507        nvme_nvm_rqtocmd(rqd, ns, cmd);
 508
 509        rq = nvme_alloc_request(q, (struct nvme_command *)cmd, 0, NVME_QID_ANY);
 510        if (IS_ERR(rq)) {
 511                kfree(cmd);
 512                return -ENOMEM;
 513        }
 514        rq->cmd_flags &= ~REQ_FAILFAST_DRIVER;
 515
 516        if (bio) {
 517                blk_init_request_from_bio(rq, bio);
 518        } else {
 519                rq->ioprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, IOPRIO_NORM);
 520                rq->__data_len = 0;
 521        }
 522
 523        rq->end_io_data = rqd;
 524
 525        blk_execute_rq_nowait(q, NULL, rq, 0, nvme_nvm_end_io);
 526
 527        return 0;
 528}
 529
 530static void *nvme_nvm_create_dma_pool(struct nvm_dev *nvmdev, char *name)
 531{
 532        struct nvme_ns *ns = nvmdev->q->queuedata;
 533
 534        return dma_pool_create(name, ns->ctrl->dev, PAGE_SIZE, PAGE_SIZE, 0);
 535}
 536
 537static void nvme_nvm_destroy_dma_pool(void *pool)
 538{
 539        struct dma_pool *dma_pool = pool;
 540
 541        dma_pool_destroy(dma_pool);
 542}
 543
 544static void *nvme_nvm_dev_dma_alloc(struct nvm_dev *dev, void *pool,
 545                                    gfp_t mem_flags, dma_addr_t *dma_handler)
 546{
 547        return dma_pool_alloc(pool, mem_flags, dma_handler);
 548}
 549
 550static void nvme_nvm_dev_dma_free(void *pool, void *addr,
 551                                                        dma_addr_t dma_handler)
 552{
 553        dma_pool_free(pool, addr, dma_handler);
 554}
 555
 556static struct nvm_dev_ops nvme_nvm_dev_ops = {
 557        .identity               = nvme_nvm_identity,
 558
 559        .get_l2p_tbl            = nvme_nvm_get_l2p_tbl,
 560
 561        .get_bb_tbl             = nvme_nvm_get_bb_tbl,
 562        .set_bb_tbl             = nvme_nvm_set_bb_tbl,
 563
 564        .submit_io              = nvme_nvm_submit_io,
 565
 566        .create_dma_pool        = nvme_nvm_create_dma_pool,
 567        .destroy_dma_pool       = nvme_nvm_destroy_dma_pool,
 568        .dev_dma_alloc          = nvme_nvm_dev_dma_alloc,
 569        .dev_dma_free           = nvme_nvm_dev_dma_free,
 570
 571        .max_phys_sect          = 64,
 572};
 573
 574static void nvme_nvm_end_user_vio(struct request *rq, int error)
 575{
 576        struct completion *waiting = rq->end_io_data;
 577
 578        complete(waiting);
 579}
 580
 581static int nvme_nvm_submit_user_cmd(struct request_queue *q,
 582                                struct nvme_ns *ns,
 583                                struct nvme_nvm_command *vcmd,
 584                                void __user *ubuf, unsigned int bufflen,
 585                                void __user *meta_buf, unsigned int meta_len,
 586                                void __user *ppa_buf, unsigned int ppa_len,
 587                                u32 *result, u64 *status, unsigned int timeout)
 588{
 589        bool write = nvme_is_write((struct nvme_command *)vcmd);
 590        struct nvm_dev *dev = ns->ndev;
 591        struct gendisk *disk = ns->disk;
 592        struct request *rq;
 593        struct bio *bio = NULL;
 594        __le64 *ppa_list = NULL;
 595        dma_addr_t ppa_dma;
 596        __le64 *metadata = NULL;
 597        dma_addr_t metadata_dma;
 598        DECLARE_COMPLETION_ONSTACK(wait);
 599        int ret = 0;
 600
 601        rq = nvme_alloc_request(q, (struct nvme_command *)vcmd, 0,
 602                        NVME_QID_ANY);
 603        if (IS_ERR(rq)) {
 604                ret = -ENOMEM;
 605                goto err_cmd;
 606        }
 607
 608        rq->timeout = timeout ? timeout : ADMIN_TIMEOUT;
 609
 610        rq->cmd_flags &= ~REQ_FAILFAST_DRIVER;
 611        rq->end_io_data = &wait;
 612
 613        if (ppa_buf && ppa_len) {
 614                ppa_list = dma_pool_alloc(dev->dma_pool, GFP_KERNEL, &ppa_dma);
 615                if (!ppa_list) {
 616                        ret = -ENOMEM;
 617                        goto err_rq;
 618                }
 619                if (copy_from_user(ppa_list, (void __user *)ppa_buf,
 620                                                sizeof(u64) * (ppa_len + 1))) {
 621                        ret = -EFAULT;
 622                        goto err_ppa;
 623                }
 624                vcmd->ph_rw.spba = cpu_to_le64(ppa_dma);
 625        } else {
 626                vcmd->ph_rw.spba = cpu_to_le64((uintptr_t)ppa_buf);
 627        }
 628
 629        if (ubuf && bufflen) {
 630                ret = blk_rq_map_user(q, rq, NULL, ubuf, bufflen, GFP_KERNEL);
 631                if (ret)
 632                        goto err_ppa;
 633                bio = rq->bio;
 634
 635                if (meta_buf && meta_len) {
 636                        metadata = dma_pool_alloc(dev->dma_pool, GFP_KERNEL,
 637                                                                &metadata_dma);
 638                        if (!metadata) {
 639                                ret = -ENOMEM;
 640                                goto err_map;
 641                        }
 642
 643                        if (write) {
 644                                if (copy_from_user(metadata,
 645                                                (void __user *)meta_buf,
 646                                                meta_len)) {
 647                                        ret = -EFAULT;
 648                                        goto err_meta;
 649                                }
 650                        }
 651                        vcmd->ph_rw.metadata = cpu_to_le64(metadata_dma);
 652                }
 653
 654                if (!disk)
 655                        goto submit;
 656
 657                bio->bi_bdev = bdget_disk(disk, 0);
 658                if (!bio->bi_bdev) {
 659                        ret = -ENODEV;
 660                        goto err_meta;
 661                }
 662        }
 663
 664submit:
 665        blk_execute_rq_nowait(q, NULL, rq, 0, nvme_nvm_end_user_vio);
 666
 667        wait_for_completion_io(&wait);
 668
 669        if (nvme_req(rq)->flags & NVME_REQ_CANCELLED)
 670                ret = -EINTR;
 671        else if (nvme_req(rq)->status & 0x7ff)
 672                ret = -EIO;
 673        if (result)
 674                *result = nvme_req(rq)->status & 0x7ff;
 675        if (status)
 676                *status = le64_to_cpu(nvme_req(rq)->result.u64);
 677
 678        if (metadata && !ret && !write) {
 679                if (copy_to_user(meta_buf, (void *)metadata, meta_len))
 680                        ret = -EFAULT;
 681        }
 682err_meta:
 683        if (meta_buf && meta_len)
 684                dma_pool_free(dev->dma_pool, metadata, metadata_dma);
 685err_map:
 686        if (bio) {
 687                if (disk && bio->bi_bdev)
 688                        bdput(bio->bi_bdev);
 689                blk_rq_unmap_user(bio);
 690        }
 691err_ppa:
 692        if (ppa_buf && ppa_len)
 693                dma_pool_free(dev->dma_pool, ppa_list, ppa_dma);
 694err_rq:
 695        blk_mq_free_request(rq);
 696err_cmd:
 697        return ret;
 698}
 699
 700static int nvme_nvm_submit_vio(struct nvme_ns *ns,
 701                                        struct nvm_user_vio __user *uvio)
 702{
 703        struct nvm_user_vio vio;
 704        struct nvme_nvm_command c;
 705        unsigned int length;
 706        int ret;
 707
 708        if (copy_from_user(&vio, uvio, sizeof(vio)))
 709                return -EFAULT;
 710        if (vio.flags)
 711                return -EINVAL;
 712
 713        memset(&c, 0, sizeof(c));
 714        c.ph_rw.opcode = vio.opcode;
 715        c.ph_rw.nsid = cpu_to_le32(ns->ns_id);
 716        c.ph_rw.control = cpu_to_le16(vio.control);
 717        c.ph_rw.length = cpu_to_le16(vio.nppas);
 718
 719        length = (vio.nppas + 1) << ns->lba_shift;
 720
 721        ret = nvme_nvm_submit_user_cmd(ns->queue, ns, &c,
 722                        (void __user *)(uintptr_t)vio.addr, length,
 723                        (void __user *)(uintptr_t)vio.metadata,
 724                                                        vio.metadata_len,
 725                        (void __user *)(uintptr_t)vio.ppa_list, vio.nppas,
 726                        &vio.result, &vio.status, 0);
 727
 728        if (ret && copy_to_user(uvio, &vio, sizeof(vio)))
 729                return -EFAULT;
 730
 731        return ret;
 732}
 733
 734static int nvme_nvm_user_vcmd(struct nvme_ns *ns, int admin,
 735                                        struct nvm_passthru_vio __user *uvcmd)
 736{
 737        struct nvm_passthru_vio vcmd;
 738        struct nvme_nvm_command c;
 739        struct request_queue *q;
 740        unsigned int timeout = 0;
 741        int ret;
 742
 743        if (copy_from_user(&vcmd, uvcmd, sizeof(vcmd)))
 744                return -EFAULT;
 745        if ((vcmd.opcode != 0xF2) && (!capable(CAP_SYS_ADMIN)))
 746                return -EACCES;
 747        if (vcmd.flags)
 748                return -EINVAL;
 749
 750        memset(&c, 0, sizeof(c));
 751        c.common.opcode = vcmd.opcode;
 752        c.common.nsid = cpu_to_le32(ns->ns_id);
 753        c.common.cdw2[0] = cpu_to_le32(vcmd.cdw2);
 754        c.common.cdw2[1] = cpu_to_le32(vcmd.cdw3);
 755        /* cdw11-12 */
 756        c.ph_rw.length = cpu_to_le16(vcmd.nppas);
 757        c.ph_rw.control  = cpu_to_le16(vcmd.control);
 758        c.common.cdw10[3] = cpu_to_le32(vcmd.cdw13);
 759        c.common.cdw10[4] = cpu_to_le32(vcmd.cdw14);
 760        c.common.cdw10[5] = cpu_to_le32(vcmd.cdw15);
 761
 762        if (vcmd.timeout_ms)
 763                timeout = msecs_to_jiffies(vcmd.timeout_ms);
 764
 765        q = admin ? ns->ctrl->admin_q : ns->queue;
 766
 767        ret = nvme_nvm_submit_user_cmd(q, ns,
 768                        (struct nvme_nvm_command *)&c,
 769                        (void __user *)(uintptr_t)vcmd.addr, vcmd.data_len,
 770                        (void __user *)(uintptr_t)vcmd.metadata,
 771                                                        vcmd.metadata_len,
 772                        (void __user *)(uintptr_t)vcmd.ppa_list, vcmd.nppas,
 773                        &vcmd.result, &vcmd.status, timeout);
 774
 775        if (ret && copy_to_user(uvcmd, &vcmd, sizeof(vcmd)))
 776                return -EFAULT;
 777
 778        return ret;
 779}
 780
 781int nvme_nvm_ioctl(struct nvme_ns *ns, unsigned int cmd, unsigned long arg)
 782{
 783        switch (cmd) {
 784        case NVME_NVM_IOCTL_ADMIN_VIO:
 785                return nvme_nvm_user_vcmd(ns, 1, (void __user *)arg);
 786        case NVME_NVM_IOCTL_IO_VIO:
 787                return nvme_nvm_user_vcmd(ns, 0, (void __user *)arg);
 788        case NVME_NVM_IOCTL_SUBMIT_VIO:
 789                return nvme_nvm_submit_vio(ns, (void __user *)arg);
 790        default:
 791                return -ENOTTY;
 792        }
 793}
 794
 795int nvme_nvm_register(struct nvme_ns *ns, char *disk_name, int node)
 796{
 797        struct request_queue *q = ns->queue;
 798        struct nvm_dev *dev;
 799
 800        _nvme_nvm_check_size();
 801
 802        dev = nvm_alloc_dev(node);
 803        if (!dev)
 804                return -ENOMEM;
 805
 806        dev->q = q;
 807        memcpy(dev->name, disk_name, DISK_NAME_LEN);
 808        dev->ops = &nvme_nvm_dev_ops;
 809        dev->private_data = ns;
 810        ns->ndev = dev;
 811
 812        return nvm_register(dev);
 813}
 814
 815void nvme_nvm_unregister(struct nvme_ns *ns)
 816{
 817        nvm_unregister(ns->ndev);
 818}
 819
 820static ssize_t nvm_dev_attr_show(struct device *dev,
 821                                 struct device_attribute *dattr, char *page)
 822{
 823        struct nvme_ns *ns = nvme_get_ns_from_dev(dev);
 824        struct nvm_dev *ndev = ns->ndev;
 825        struct nvm_id *id;
 826        struct nvm_id_group *grp;
 827        struct attribute *attr;
 828
 829        if (!ndev)
 830                return 0;
 831
 832        id = &ndev->identity;
 833        grp = &id->grp;
 834        attr = &dattr->attr;
 835
 836        if (strcmp(attr->name, "version") == 0) {
 837                return scnprintf(page, PAGE_SIZE, "%u\n", id->ver_id);
 838        } else if (strcmp(attr->name, "vendor_opcode") == 0) {
 839                return scnprintf(page, PAGE_SIZE, "%u\n", id->vmnt);
 840        } else if (strcmp(attr->name, "capabilities") == 0) {
 841                return scnprintf(page, PAGE_SIZE, "%u\n", id->cap);
 842        } else if (strcmp(attr->name, "device_mode") == 0) {
 843                return scnprintf(page, PAGE_SIZE, "%u\n", id->dom);
 844        /* kept for compatibility */
 845        } else if (strcmp(attr->name, "media_manager") == 0) {
 846                return scnprintf(page, PAGE_SIZE, "%s\n", "gennvm");
 847        } else if (strcmp(attr->name, "ppa_format") == 0) {
 848                return scnprintf(page, PAGE_SIZE,
 849                        "0x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x\n",
 850                        id->ppaf.ch_offset, id->ppaf.ch_len,
 851                        id->ppaf.lun_offset, id->ppaf.lun_len,
 852                        id->ppaf.pln_offset, id->ppaf.pln_len,
 853                        id->ppaf.blk_offset, id->ppaf.blk_len,
 854                        id->ppaf.pg_offset, id->ppaf.pg_len,
 855                        id->ppaf.sect_offset, id->ppaf.sect_len);
 856        } else if (strcmp(attr->name, "media_type") == 0) {     /* u8 */
 857                return scnprintf(page, PAGE_SIZE, "%u\n", grp->mtype);
 858        } else if (strcmp(attr->name, "flash_media_type") == 0) {
 859                return scnprintf(page, PAGE_SIZE, "%u\n", grp->fmtype);
 860        } else if (strcmp(attr->name, "num_channels") == 0) {
 861                return scnprintf(page, PAGE_SIZE, "%u\n", grp->num_ch);
 862        } else if (strcmp(attr->name, "num_luns") == 0) {
 863                return scnprintf(page, PAGE_SIZE, "%u\n", grp->num_lun);
 864        } else if (strcmp(attr->name, "num_planes") == 0) {
 865                return scnprintf(page, PAGE_SIZE, "%u\n", grp->num_pln);
 866        } else if (strcmp(attr->name, "num_blocks") == 0) {     /* u16 */
 867                return scnprintf(page, PAGE_SIZE, "%u\n", grp->num_blk);
 868        } else if (strcmp(attr->name, "num_pages") == 0) {
 869                return scnprintf(page, PAGE_SIZE, "%u\n", grp->num_pg);
 870        } else if (strcmp(attr->name, "page_size") == 0) {
 871                return scnprintf(page, PAGE_SIZE, "%u\n", grp->fpg_sz);
 872        } else if (strcmp(attr->name, "hw_sector_size") == 0) {
 873                return scnprintf(page, PAGE_SIZE, "%u\n", grp->csecs);
 874        } else if (strcmp(attr->name, "oob_sector_size") == 0) {/* u32 */
 875                return scnprintf(page, PAGE_SIZE, "%u\n", grp->sos);
 876        } else if (strcmp(attr->name, "read_typ") == 0) {
 877                return scnprintf(page, PAGE_SIZE, "%u\n", grp->trdt);
 878        } else if (strcmp(attr->name, "read_max") == 0) {
 879                return scnprintf(page, PAGE_SIZE, "%u\n", grp->trdm);
 880        } else if (strcmp(attr->name, "prog_typ") == 0) {
 881                return scnprintf(page, PAGE_SIZE, "%u\n", grp->tprt);
 882        } else if (strcmp(attr->name, "prog_max") == 0) {
 883                return scnprintf(page, PAGE_SIZE, "%u\n", grp->tprm);
 884        } else if (strcmp(attr->name, "erase_typ") == 0) {
 885                return scnprintf(page, PAGE_SIZE, "%u\n", grp->tbet);
 886        } else if (strcmp(attr->name, "erase_max") == 0) {
 887                return scnprintf(page, PAGE_SIZE, "%u\n", grp->tbem);
 888        } else if (strcmp(attr->name, "multiplane_modes") == 0) {
 889                return scnprintf(page, PAGE_SIZE, "0x%08x\n", grp->mpos);
 890        } else if (strcmp(attr->name, "media_capabilities") == 0) {
 891                return scnprintf(page, PAGE_SIZE, "0x%08x\n", grp->mccap);
 892        } else if (strcmp(attr->name, "max_phys_secs") == 0) {
 893                return scnprintf(page, PAGE_SIZE, "%u\n",
 894                                ndev->ops->max_phys_sect);
 895        } else {
 896                return scnprintf(page,
 897                                 PAGE_SIZE,
 898                                 "Unhandled attr(%s) in `nvm_dev_attr_show`\n",
 899                                 attr->name);
 900        }
 901}
 902
 903#define NVM_DEV_ATTR_RO(_name)                                          \
 904        DEVICE_ATTR(_name, S_IRUGO, nvm_dev_attr_show, NULL)
 905
 906static NVM_DEV_ATTR_RO(version);
 907static NVM_DEV_ATTR_RO(vendor_opcode);
 908static NVM_DEV_ATTR_RO(capabilities);
 909static NVM_DEV_ATTR_RO(device_mode);
 910static NVM_DEV_ATTR_RO(ppa_format);
 911static NVM_DEV_ATTR_RO(media_manager);
 912
 913static NVM_DEV_ATTR_RO(media_type);
 914static NVM_DEV_ATTR_RO(flash_media_type);
 915static NVM_DEV_ATTR_RO(num_channels);
 916static NVM_DEV_ATTR_RO(num_luns);
 917static NVM_DEV_ATTR_RO(num_planes);
 918static NVM_DEV_ATTR_RO(num_blocks);
 919static NVM_DEV_ATTR_RO(num_pages);
 920static NVM_DEV_ATTR_RO(page_size);
 921static NVM_DEV_ATTR_RO(hw_sector_size);
 922static NVM_DEV_ATTR_RO(oob_sector_size);
 923static NVM_DEV_ATTR_RO(read_typ);
 924static NVM_DEV_ATTR_RO(read_max);
 925static NVM_DEV_ATTR_RO(prog_typ);
 926static NVM_DEV_ATTR_RO(prog_max);
 927static NVM_DEV_ATTR_RO(erase_typ);
 928static NVM_DEV_ATTR_RO(erase_max);
 929static NVM_DEV_ATTR_RO(multiplane_modes);
 930static NVM_DEV_ATTR_RO(media_capabilities);
 931static NVM_DEV_ATTR_RO(max_phys_secs);
 932
 933static struct attribute *nvm_dev_attrs[] = {
 934        &dev_attr_version.attr,
 935        &dev_attr_vendor_opcode.attr,
 936        &dev_attr_capabilities.attr,
 937        &dev_attr_device_mode.attr,
 938        &dev_attr_media_manager.attr,
 939
 940        &dev_attr_ppa_format.attr,
 941        &dev_attr_media_type.attr,
 942        &dev_attr_flash_media_type.attr,
 943        &dev_attr_num_channels.attr,
 944        &dev_attr_num_luns.attr,
 945        &dev_attr_num_planes.attr,
 946        &dev_attr_num_blocks.attr,
 947        &dev_attr_num_pages.attr,
 948        &dev_attr_page_size.attr,
 949        &dev_attr_hw_sector_size.attr,
 950        &dev_attr_oob_sector_size.attr,
 951        &dev_attr_read_typ.attr,
 952        &dev_attr_read_max.attr,
 953        &dev_attr_prog_typ.attr,
 954        &dev_attr_prog_max.attr,
 955        &dev_attr_erase_typ.attr,
 956        &dev_attr_erase_max.attr,
 957        &dev_attr_multiplane_modes.attr,
 958        &dev_attr_media_capabilities.attr,
 959        &dev_attr_max_phys_secs.attr,
 960        NULL,
 961};
 962
 963static const struct attribute_group nvm_dev_attr_group = {
 964        .name           = "lightnvm",
 965        .attrs          = nvm_dev_attrs,
 966};
 967
 968int nvme_nvm_register_sysfs(struct nvme_ns *ns)
 969{
 970        return sysfs_create_group(&disk_to_dev(ns->disk)->kobj,
 971                                        &nvm_dev_attr_group);
 972}
 973
 974void nvme_nvm_unregister_sysfs(struct nvme_ns *ns)
 975{
 976        sysfs_remove_group(&disk_to_dev(ns->disk)->kobj,
 977                                        &nvm_dev_attr_group);
 978}
 979
 980/* move to shared place when used in multiple places. */
 981#define PCI_VENDOR_ID_CNEX 0x1d1d
 982#define PCI_DEVICE_ID_CNEX_WL 0x2807
 983#define PCI_DEVICE_ID_CNEX_QEMU 0x1f1f
 984
 985int nvme_nvm_ns_supported(struct nvme_ns *ns, struct nvme_id_ns *id)
 986{
 987        struct nvme_ctrl *ctrl = ns->ctrl;
 988        /* XXX: this is poking into PCI structures from generic code! */
 989        struct pci_dev *pdev = to_pci_dev(ctrl->dev);
 990
 991        /* QEMU NVMe simulator - PCI ID + Vendor specific bit */
 992        if (pdev->vendor == PCI_VENDOR_ID_CNEX &&
 993                                pdev->device == PCI_DEVICE_ID_CNEX_QEMU &&
 994                                                        id->vs[0] == 0x1)
 995                return 1;
 996
 997        /* CNEX Labs - PCI ID + Vendor specific bit */
 998        if (pdev->vendor == PCI_VENDOR_ID_CNEX &&
 999                                pdev->device == PCI_DEVICE_ID_CNEX_WL &&
1000                                                        id->vs[0] == 0x1)
1001                return 1;
1002
1003        return 0;
1004}
1005