linux/drivers/nvme/host/lightnvm.c
<<
>>
Prefs
   1/*
   2 * nvme-lightnvm.c - LightNVM NVMe device
   3 *
   4 * Copyright (C) 2014-2015 IT University of Copenhagen
   5 * Initial release: Matias Bjorling <mb@lightnvm.io>
   6 *
   7 * This program is free software; you can redistribute it and/or
   8 * modify it under the terms of the GNU General Public License version
   9 * 2 as published by the Free Software Foundation.
  10 *
  11 * This program is distributed in the hope that it will be useful, but
  12 * WITHOUT ANY WARRANTY; without even the implied warranty of
  13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  14 * General Public License for more details.
  15 *
  16 * You should have received a copy of the GNU General Public License
  17 * along with this program; see the file COPYING.  If not, write to
  18 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
  19 * USA.
  20 *
  21 */
  22
  23#include "nvme.h"
  24
  25#include <linux/nvme.h>
  26#include <linux/bitops.h>
  27#include <linux/lightnvm.h>
  28#include <linux/vmalloc.h>
  29#include <linux/sched/sysctl.h>
  30#include <uapi/linux/lightnvm.h>
  31
  32enum nvme_nvm_admin_opcode {
  33        nvme_nvm_admin_identity         = 0xe2,
  34        nvme_nvm_admin_get_bb_tbl       = 0xf2,
  35        nvme_nvm_admin_set_bb_tbl       = 0xf1,
  36};
  37
  38enum nvme_nvm_log_page {
  39        NVME_NVM_LOG_REPORT_CHUNK       = 0xca,
  40};
  41
  42struct nvme_nvm_ph_rw {
  43        __u8                    opcode;
  44        __u8                    flags;
  45        __u16                   command_id;
  46        __le32                  nsid;
  47        __u64                   rsvd2;
  48        __le64                  metadata;
  49        __le64                  prp1;
  50        __le64                  prp2;
  51        __le64                  spba;
  52        __le16                  length;
  53        __le16                  control;
  54        __le32                  dsmgmt;
  55        __le64                  resv;
  56};
  57
  58struct nvme_nvm_erase_blk {
  59        __u8                    opcode;
  60        __u8                    flags;
  61        __u16                   command_id;
  62        __le32                  nsid;
  63        __u64                   rsvd[2];
  64        __le64                  prp1;
  65        __le64                  prp2;
  66        __le64                  spba;
  67        __le16                  length;
  68        __le16                  control;
  69        __le32                  dsmgmt;
  70        __le64                  resv;
  71};
  72
  73struct nvme_nvm_identity {
  74        __u8                    opcode;
  75        __u8                    flags;
  76        __u16                   command_id;
  77        __le32                  nsid;
  78        __u64                   rsvd[2];
  79        __le64                  prp1;
  80        __le64                  prp2;
  81        __u32                   rsvd11[6];
  82};
  83
  84struct nvme_nvm_getbbtbl {
  85        __u8                    opcode;
  86        __u8                    flags;
  87        __u16                   command_id;
  88        __le32                  nsid;
  89        __u64                   rsvd[2];
  90        __le64                  prp1;
  91        __le64                  prp2;
  92        __le64                  spba;
  93        __u32                   rsvd4[4];
  94};
  95
  96struct nvme_nvm_setbbtbl {
  97        __u8                    opcode;
  98        __u8                    flags;
  99        __u16                   command_id;
 100        __le32                  nsid;
 101        __le64                  rsvd[2];
 102        __le64                  prp1;
 103        __le64                  prp2;
 104        __le64                  spba;
 105        __le16                  nlb;
 106        __u8                    value;
 107        __u8                    rsvd3;
 108        __u32                   rsvd4[3];
 109};
 110
 111struct nvme_nvm_command {
 112        union {
 113                struct nvme_common_command common;
 114                struct nvme_nvm_ph_rw ph_rw;
 115                struct nvme_nvm_erase_blk erase;
 116                struct nvme_nvm_identity identity;
 117                struct nvme_nvm_getbbtbl get_bb;
 118                struct nvme_nvm_setbbtbl set_bb;
 119        };
 120};
 121
 122struct nvme_nvm_id12_grp {
 123        __u8                    mtype;
 124        __u8                    fmtype;
 125        __le16                  res16;
 126        __u8                    num_ch;
 127        __u8                    num_lun;
 128        __u8                    num_pln;
 129        __u8                    rsvd1;
 130        __le16                  num_chk;
 131        __le16                  num_pg;
 132        __le16                  fpg_sz;
 133        __le16                  csecs;
 134        __le16                  sos;
 135        __le16                  rsvd2;
 136        __le32                  trdt;
 137        __le32                  trdm;
 138        __le32                  tprt;
 139        __le32                  tprm;
 140        __le32                  tbet;
 141        __le32                  tbem;
 142        __le32                  mpos;
 143        __le32                  mccap;
 144        __le16                  cpar;
 145        __u8                    reserved[906];
 146} __packed;
 147
 148struct nvme_nvm_id12_addrf {
 149        __u8                    ch_offset;
 150        __u8                    ch_len;
 151        __u8                    lun_offset;
 152        __u8                    lun_len;
 153        __u8                    pln_offset;
 154        __u8                    pln_len;
 155        __u8                    blk_offset;
 156        __u8                    blk_len;
 157        __u8                    pg_offset;
 158        __u8                    pg_len;
 159        __u8                    sec_offset;
 160        __u8                    sec_len;
 161        __u8                    res[4];
 162} __packed;
 163
 164struct nvme_nvm_id12 {
 165        __u8                    ver_id;
 166        __u8                    vmnt;
 167        __u8                    cgrps;
 168        __u8                    res;
 169        __le32                  cap;
 170        __le32                  dom;
 171        struct nvme_nvm_id12_addrf ppaf;
 172        __u8                    resv[228];
 173        struct nvme_nvm_id12_grp grp;
 174        __u8                    resv2[2880];
 175} __packed;
 176
 177struct nvme_nvm_bb_tbl {
 178        __u8    tblid[4];
 179        __le16  verid;
 180        __le16  revid;
 181        __le32  rvsd1;
 182        __le32  tblks;
 183        __le32  tfact;
 184        __le32  tgrown;
 185        __le32  tdresv;
 186        __le32  thresv;
 187        __le32  rsvd2[8];
 188        __u8    blk[0];
 189};
 190
 191struct nvme_nvm_id20_addrf {
 192        __u8                    grp_len;
 193        __u8                    pu_len;
 194        __u8                    chk_len;
 195        __u8                    lba_len;
 196        __u8                    resv[4];
 197};
 198
 199struct nvme_nvm_id20 {
 200        __u8                    mjr;
 201        __u8                    mnr;
 202        __u8                    resv[6];
 203
 204        struct nvme_nvm_id20_addrf lbaf;
 205
 206        __le32                  mccap;
 207        __u8                    resv2[12];
 208
 209        __u8                    wit;
 210        __u8                    resv3[31];
 211
 212        /* Geometry */
 213        __le16                  num_grp;
 214        __le16                  num_pu;
 215        __le32                  num_chk;
 216        __le32                  clba;
 217        __u8                    resv4[52];
 218
 219        /* Write data requirements */
 220        __le32                  ws_min;
 221        __le32                  ws_opt;
 222        __le32                  mw_cunits;
 223        __le32                  maxoc;
 224        __le32                  maxocpu;
 225        __u8                    resv5[44];
 226
 227        /* Performance related metrics */
 228        __le32                  trdt;
 229        __le32                  trdm;
 230        __le32                  twrt;
 231        __le32                  twrm;
 232        __le32                  tcrst;
 233        __le32                  tcrsm;
 234        __u8                    resv6[40];
 235
 236        /* Reserved area */
 237        __u8                    resv7[2816];
 238
 239        /* Vendor specific */
 240        __u8                    vs[1024];
 241};
 242
 243struct nvme_nvm_chk_meta {
 244        __u8    state;
 245        __u8    type;
 246        __u8    wi;
 247        __u8    rsvd[5];
 248        __le64  slba;
 249        __le64  cnlb;
 250        __le64  wp;
 251};
 252
 253/*
 254 * Check we didn't inadvertently grow the command struct
 255 */
 256static inline void _nvme_nvm_check_size(void)
 257{
 258        BUILD_BUG_ON(sizeof(struct nvme_nvm_identity) != 64);
 259        BUILD_BUG_ON(sizeof(struct nvme_nvm_ph_rw) != 64);
 260        BUILD_BUG_ON(sizeof(struct nvme_nvm_erase_blk) != 64);
 261        BUILD_BUG_ON(sizeof(struct nvme_nvm_getbbtbl) != 64);
 262        BUILD_BUG_ON(sizeof(struct nvme_nvm_setbbtbl) != 64);
 263        BUILD_BUG_ON(sizeof(struct nvme_nvm_id12_grp) != 960);
 264        BUILD_BUG_ON(sizeof(struct nvme_nvm_id12_addrf) != 16);
 265        BUILD_BUG_ON(sizeof(struct nvme_nvm_id12) != NVME_IDENTIFY_DATA_SIZE);
 266        BUILD_BUG_ON(sizeof(struct nvme_nvm_bb_tbl) != 64);
 267        BUILD_BUG_ON(sizeof(struct nvme_nvm_id20_addrf) != 8);
 268        BUILD_BUG_ON(sizeof(struct nvme_nvm_id20) != NVME_IDENTIFY_DATA_SIZE);
 269        BUILD_BUG_ON(sizeof(struct nvme_nvm_chk_meta) != 32);
 270        BUILD_BUG_ON(sizeof(struct nvme_nvm_chk_meta) !=
 271                                                sizeof(struct nvm_chk_meta));
 272}
 273
 274static void nvme_nvm_set_addr_12(struct nvm_addrf_12 *dst,
 275                                 struct nvme_nvm_id12_addrf *src)
 276{
 277        dst->ch_len = src->ch_len;
 278        dst->lun_len = src->lun_len;
 279        dst->blk_len = src->blk_len;
 280        dst->pg_len = src->pg_len;
 281        dst->pln_len = src->pln_len;
 282        dst->sec_len = src->sec_len;
 283
 284        dst->ch_offset = src->ch_offset;
 285        dst->lun_offset = src->lun_offset;
 286        dst->blk_offset = src->blk_offset;
 287        dst->pg_offset = src->pg_offset;
 288        dst->pln_offset = src->pln_offset;
 289        dst->sec_offset = src->sec_offset;
 290
 291        dst->ch_mask = ((1ULL << dst->ch_len) - 1) << dst->ch_offset;
 292        dst->lun_mask = ((1ULL << dst->lun_len) - 1) << dst->lun_offset;
 293        dst->blk_mask = ((1ULL << dst->blk_len) - 1) << dst->blk_offset;
 294        dst->pg_mask = ((1ULL << dst->pg_len) - 1) << dst->pg_offset;
 295        dst->pln_mask = ((1ULL << dst->pln_len) - 1) << dst->pln_offset;
 296        dst->sec_mask = ((1ULL << dst->sec_len) - 1) << dst->sec_offset;
 297}
 298
 299static int nvme_nvm_setup_12(struct nvme_nvm_id12 *id,
 300                             struct nvm_geo *geo)
 301{
 302        struct nvme_nvm_id12_grp *src;
 303        int sec_per_pg, sec_per_pl, pg_per_blk;
 304
 305        if (id->cgrps != 1)
 306                return -EINVAL;
 307
 308        src = &id->grp;
 309
 310        if (src->mtype != 0) {
 311                pr_err("nvm: memory type not supported\n");
 312                return -EINVAL;
 313        }
 314
 315        /* 1.2 spec. only reports a single version id - unfold */
 316        geo->major_ver_id = id->ver_id;
 317        geo->minor_ver_id = 2;
 318
 319        /* Set compacted version for upper layers */
 320        geo->version = NVM_OCSSD_SPEC_12;
 321
 322        geo->num_ch = src->num_ch;
 323        geo->num_lun = src->num_lun;
 324        geo->all_luns = geo->num_ch * geo->num_lun;
 325
 326        geo->num_chk = le16_to_cpu(src->num_chk);
 327
 328        geo->csecs = le16_to_cpu(src->csecs);
 329        geo->sos = le16_to_cpu(src->sos);
 330
 331        pg_per_blk = le16_to_cpu(src->num_pg);
 332        sec_per_pg = le16_to_cpu(src->fpg_sz) / geo->csecs;
 333        sec_per_pl = sec_per_pg * src->num_pln;
 334        geo->clba = sec_per_pl * pg_per_blk;
 335
 336        geo->all_chunks = geo->all_luns * geo->num_chk;
 337        geo->total_secs = geo->clba * geo->all_chunks;
 338
 339        geo->ws_min = sec_per_pg;
 340        geo->ws_opt = sec_per_pg;
 341        geo->mw_cunits = geo->ws_opt << 3;      /* default to MLC safe values */
 342
 343        /* Do not impose values for maximum number of open blocks as it is
 344         * unspecified in 1.2. Users of 1.2 must be aware of this and eventually
 345         * specify these values through a quirk if restrictions apply.
 346         */
 347        geo->maxoc = geo->all_luns * geo->num_chk;
 348        geo->maxocpu = geo->num_chk;
 349
 350        geo->mccap = le32_to_cpu(src->mccap);
 351
 352        geo->trdt = le32_to_cpu(src->trdt);
 353        geo->trdm = le32_to_cpu(src->trdm);
 354        geo->tprt = le32_to_cpu(src->tprt);
 355        geo->tprm = le32_to_cpu(src->tprm);
 356        geo->tbet = le32_to_cpu(src->tbet);
 357        geo->tbem = le32_to_cpu(src->tbem);
 358
 359        /* 1.2 compatibility */
 360        geo->vmnt = id->vmnt;
 361        geo->cap = le32_to_cpu(id->cap);
 362        geo->dom = le32_to_cpu(id->dom);
 363
 364        geo->mtype = src->mtype;
 365        geo->fmtype = src->fmtype;
 366
 367        geo->cpar = le16_to_cpu(src->cpar);
 368        geo->mpos = le32_to_cpu(src->mpos);
 369
 370        geo->pln_mode = NVM_PLANE_SINGLE;
 371
 372        if (geo->mpos & 0x020202) {
 373                geo->pln_mode = NVM_PLANE_DOUBLE;
 374                geo->ws_opt <<= 1;
 375        } else if (geo->mpos & 0x040404) {
 376                geo->pln_mode = NVM_PLANE_QUAD;
 377                geo->ws_opt <<= 2;
 378        }
 379
 380        geo->num_pln = src->num_pln;
 381        geo->num_pg = le16_to_cpu(src->num_pg);
 382        geo->fpg_sz = le16_to_cpu(src->fpg_sz);
 383
 384        nvme_nvm_set_addr_12((struct nvm_addrf_12 *)&geo->addrf, &id->ppaf);
 385
 386        return 0;
 387}
 388
 389static void nvme_nvm_set_addr_20(struct nvm_addrf *dst,
 390                                 struct nvme_nvm_id20_addrf *src)
 391{
 392        dst->ch_len = src->grp_len;
 393        dst->lun_len = src->pu_len;
 394        dst->chk_len = src->chk_len;
 395        dst->sec_len = src->lba_len;
 396
 397        dst->sec_offset = 0;
 398        dst->chk_offset = dst->sec_len;
 399        dst->lun_offset = dst->chk_offset + dst->chk_len;
 400        dst->ch_offset = dst->lun_offset + dst->lun_len;
 401
 402        dst->ch_mask = ((1ULL << dst->ch_len) - 1) << dst->ch_offset;
 403        dst->lun_mask = ((1ULL << dst->lun_len) - 1) << dst->lun_offset;
 404        dst->chk_mask = ((1ULL << dst->chk_len) - 1) << dst->chk_offset;
 405        dst->sec_mask = ((1ULL << dst->sec_len) - 1) << dst->sec_offset;
 406}
 407
 408static int nvme_nvm_setup_20(struct nvme_nvm_id20 *id,
 409                             struct nvm_geo *geo)
 410{
 411        geo->major_ver_id = id->mjr;
 412        geo->minor_ver_id = id->mnr;
 413
 414        /* Set compacted version for upper layers */
 415        geo->version = NVM_OCSSD_SPEC_20;
 416
 417        if (!(geo->major_ver_id == 2 && geo->minor_ver_id == 0)) {
 418                pr_err("nvm: OCSSD version not supported (v%d.%d)\n",
 419                                geo->major_ver_id, geo->minor_ver_id);
 420                return -EINVAL;
 421        }
 422
 423        geo->num_ch = le16_to_cpu(id->num_grp);
 424        geo->num_lun = le16_to_cpu(id->num_pu);
 425        geo->all_luns = geo->num_ch * geo->num_lun;
 426
 427        geo->num_chk = le32_to_cpu(id->num_chk);
 428        geo->clba = le32_to_cpu(id->clba);
 429
 430        geo->all_chunks = geo->all_luns * geo->num_chk;
 431        geo->total_secs = geo->clba * geo->all_chunks;
 432
 433        geo->ws_min = le32_to_cpu(id->ws_min);
 434        geo->ws_opt = le32_to_cpu(id->ws_opt);
 435        geo->mw_cunits = le32_to_cpu(id->mw_cunits);
 436        geo->maxoc = le32_to_cpu(id->maxoc);
 437        geo->maxocpu = le32_to_cpu(id->maxocpu);
 438
 439        geo->trdt = le32_to_cpu(id->trdt);
 440        geo->trdm = le32_to_cpu(id->trdm);
 441        geo->tprt = le32_to_cpu(id->twrt);
 442        geo->tprm = le32_to_cpu(id->twrm);
 443        geo->tbet = le32_to_cpu(id->tcrst);
 444        geo->tbem = le32_to_cpu(id->tcrsm);
 445
 446        nvme_nvm_set_addr_20(&geo->addrf, &id->lbaf);
 447
 448        return 0;
 449}
 450
 451static int nvme_nvm_identity(struct nvm_dev *nvmdev)
 452{
 453        struct nvme_ns *ns = nvmdev->q->queuedata;
 454        struct nvme_nvm_id12 *id;
 455        struct nvme_nvm_command c = {};
 456        int ret;
 457
 458        c.identity.opcode = nvme_nvm_admin_identity;
 459        c.identity.nsid = cpu_to_le32(ns->head->ns_id);
 460
 461        id = kmalloc(sizeof(struct nvme_nvm_id12), GFP_KERNEL);
 462        if (!id)
 463                return -ENOMEM;
 464
 465        ret = nvme_submit_sync_cmd(ns->ctrl->admin_q, (struct nvme_command *)&c,
 466                                id, sizeof(struct nvme_nvm_id12));
 467        if (ret) {
 468                ret = -EIO;
 469                goto out;
 470        }
 471
 472        /*
 473         * The 1.2 and 2.0 specifications share the first byte in their geometry
 474         * command to make it possible to know what version a device implements.
 475         */
 476        switch (id->ver_id) {
 477        case 1:
 478                ret = nvme_nvm_setup_12(id, &nvmdev->geo);
 479                break;
 480        case 2:
 481                ret = nvme_nvm_setup_20((struct nvme_nvm_id20 *)id,
 482                                                        &nvmdev->geo);
 483                break;
 484        default:
 485                dev_err(ns->ctrl->device, "OCSSD revision not supported (%d)\n",
 486                                                        id->ver_id);
 487                ret = -EINVAL;
 488        }
 489
 490out:
 491        kfree(id);
 492        return ret;
 493}
 494
 495static int nvme_nvm_get_bb_tbl(struct nvm_dev *nvmdev, struct ppa_addr ppa,
 496                                                                u8 *blks)
 497{
 498        struct request_queue *q = nvmdev->q;
 499        struct nvm_geo *geo = &nvmdev->geo;
 500        struct nvme_ns *ns = q->queuedata;
 501        struct nvme_ctrl *ctrl = ns->ctrl;
 502        struct nvme_nvm_command c = {};
 503        struct nvme_nvm_bb_tbl *bb_tbl;
 504        int nr_blks = geo->num_chk * geo->num_pln;
 505        int tblsz = sizeof(struct nvme_nvm_bb_tbl) + nr_blks;
 506        int ret = 0;
 507
 508        c.get_bb.opcode = nvme_nvm_admin_get_bb_tbl;
 509        c.get_bb.nsid = cpu_to_le32(ns->head->ns_id);
 510        c.get_bb.spba = cpu_to_le64(ppa.ppa);
 511
 512        bb_tbl = kzalloc(tblsz, GFP_KERNEL);
 513        if (!bb_tbl)
 514                return -ENOMEM;
 515
 516        ret = nvme_submit_sync_cmd(ctrl->admin_q, (struct nvme_command *)&c,
 517                                                                bb_tbl, tblsz);
 518        if (ret) {
 519                dev_err(ctrl->device, "get bad block table failed (%d)\n", ret);
 520                ret = -EIO;
 521                goto out;
 522        }
 523
 524        if (bb_tbl->tblid[0] != 'B' || bb_tbl->tblid[1] != 'B' ||
 525                bb_tbl->tblid[2] != 'L' || bb_tbl->tblid[3] != 'T') {
 526                dev_err(ctrl->device, "bbt format mismatch\n");
 527                ret = -EINVAL;
 528                goto out;
 529        }
 530
 531        if (le16_to_cpu(bb_tbl->verid) != 1) {
 532                ret = -EINVAL;
 533                dev_err(ctrl->device, "bbt version not supported\n");
 534                goto out;
 535        }
 536
 537        if (le32_to_cpu(bb_tbl->tblks) != nr_blks) {
 538                ret = -EINVAL;
 539                dev_err(ctrl->device,
 540                                "bbt unsuspected blocks returned (%u!=%u)",
 541                                le32_to_cpu(bb_tbl->tblks), nr_blks);
 542                goto out;
 543        }
 544
 545        memcpy(blks, bb_tbl->blk, geo->num_chk * geo->num_pln);
 546out:
 547        kfree(bb_tbl);
 548        return ret;
 549}
 550
 551static int nvme_nvm_set_bb_tbl(struct nvm_dev *nvmdev, struct ppa_addr *ppas,
 552                                                        int nr_ppas, int type)
 553{
 554        struct nvme_ns *ns = nvmdev->q->queuedata;
 555        struct nvme_nvm_command c = {};
 556        int ret = 0;
 557
 558        c.set_bb.opcode = nvme_nvm_admin_set_bb_tbl;
 559        c.set_bb.nsid = cpu_to_le32(ns->head->ns_id);
 560        c.set_bb.spba = cpu_to_le64(ppas->ppa);
 561        c.set_bb.nlb = cpu_to_le16(nr_ppas - 1);
 562        c.set_bb.value = type;
 563
 564        ret = nvme_submit_sync_cmd(ns->ctrl->admin_q, (struct nvme_command *)&c,
 565                                                                NULL, 0);
 566        if (ret)
 567                dev_err(ns->ctrl->device, "set bad block table failed (%d)\n",
 568                                                                        ret);
 569        return ret;
 570}
 571
 572/*
 573 * Expect the lba in device format
 574 */
 575static int nvme_nvm_get_chk_meta(struct nvm_dev *ndev,
 576                                 struct nvm_chk_meta *meta,
 577                                 sector_t slba, int nchks)
 578{
 579        struct nvm_geo *geo = &ndev->geo;
 580        struct nvme_ns *ns = ndev->q->queuedata;
 581        struct nvme_ctrl *ctrl = ns->ctrl;
 582        struct nvme_nvm_chk_meta *dev_meta = (struct nvme_nvm_chk_meta *)meta;
 583        struct ppa_addr ppa;
 584        size_t left = nchks * sizeof(struct nvme_nvm_chk_meta);
 585        size_t log_pos, offset, len;
 586        int ret, i;
 587
 588        /* Normalize lba address space to obtain log offset */
 589        ppa.ppa = slba;
 590        ppa = dev_to_generic_addr(ndev, ppa);
 591
 592        log_pos = ppa.m.chk;
 593        log_pos += ppa.m.pu * geo->num_chk;
 594        log_pos += ppa.m.grp * geo->num_lun * geo->num_chk;
 595
 596        offset = log_pos * sizeof(struct nvme_nvm_chk_meta);
 597
 598        while (left) {
 599                len = min_t(unsigned int, left, ctrl->max_hw_sectors << 9);
 600
 601                ret = nvme_get_log_ext(ctrl, ns, NVME_NVM_LOG_REPORT_CHUNK,
 602                                dev_meta, len, offset);
 603                if (ret) {
 604                        dev_err(ctrl->device, "Get REPORT CHUNK log error\n");
 605                        break;
 606                }
 607
 608                for (i = 0; i < len; i += sizeof(struct nvme_nvm_chk_meta)) {
 609                        meta->state = dev_meta->state;
 610                        meta->type = dev_meta->type;
 611                        meta->wi = dev_meta->wi;
 612                        meta->slba = le64_to_cpu(dev_meta->slba);
 613                        meta->cnlb = le64_to_cpu(dev_meta->cnlb);
 614                        meta->wp = le64_to_cpu(dev_meta->wp);
 615
 616                        meta++;
 617                        dev_meta++;
 618                }
 619
 620                offset += len;
 621                left -= len;
 622        }
 623
 624        return ret;
 625}
 626
 627static inline void nvme_nvm_rqtocmd(struct nvm_rq *rqd, struct nvme_ns *ns,
 628                                    struct nvme_nvm_command *c)
 629{
 630        c->ph_rw.opcode = rqd->opcode;
 631        c->ph_rw.nsid = cpu_to_le32(ns->head->ns_id);
 632        c->ph_rw.spba = cpu_to_le64(rqd->ppa_addr.ppa);
 633        c->ph_rw.metadata = cpu_to_le64(rqd->dma_meta_list);
 634        c->ph_rw.control = cpu_to_le16(rqd->flags);
 635        c->ph_rw.length = cpu_to_le16(rqd->nr_ppas - 1);
 636}
 637
 638static void nvme_nvm_end_io(struct request *rq, blk_status_t status)
 639{
 640        struct nvm_rq *rqd = rq->end_io_data;
 641
 642        rqd->ppa_status = le64_to_cpu(nvme_req(rq)->result.u64);
 643        rqd->error = nvme_req(rq)->status;
 644        nvm_end_io(rqd);
 645
 646        kfree(nvme_req(rq)->cmd);
 647        blk_mq_free_request(rq);
 648}
 649
 650static struct request *nvme_nvm_alloc_request(struct request_queue *q,
 651                                              struct nvm_rq *rqd,
 652                                              struct nvme_nvm_command *cmd)
 653{
 654        struct nvme_ns *ns = q->queuedata;
 655        struct request *rq;
 656
 657        nvme_nvm_rqtocmd(rqd, ns, cmd);
 658
 659        rq = nvme_alloc_request(q, (struct nvme_command *)cmd, 0, NVME_QID_ANY);
 660        if (IS_ERR(rq))
 661                return rq;
 662
 663        rq->cmd_flags &= ~REQ_FAILFAST_DRIVER;
 664
 665        if (rqd->bio) {
 666                blk_init_request_from_bio(rq, rqd->bio);
 667        } else {
 668                rq->ioprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, IOPRIO_NORM);
 669                rq->__data_len = 0;
 670        }
 671
 672        return rq;
 673}
 674
 675static int nvme_nvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd)
 676{
 677        struct request_queue *q = dev->q;
 678        struct nvme_nvm_command *cmd;
 679        struct request *rq;
 680
 681        cmd = kzalloc(sizeof(struct nvme_nvm_command), GFP_KERNEL);
 682        if (!cmd)
 683                return -ENOMEM;
 684
 685        rq = nvme_nvm_alloc_request(q, rqd, cmd);
 686        if (IS_ERR(rq)) {
 687                kfree(cmd);
 688                return PTR_ERR(rq);
 689        }
 690
 691        rq->end_io_data = rqd;
 692
 693        blk_execute_rq_nowait(q, NULL, rq, 0, nvme_nvm_end_io);
 694
 695        return 0;
 696}
 697
 698static int nvme_nvm_submit_io_sync(struct nvm_dev *dev, struct nvm_rq *rqd)
 699{
 700        struct request_queue *q = dev->q;
 701        struct request *rq;
 702        struct nvme_nvm_command cmd;
 703        int ret = 0;
 704
 705        memset(&cmd, 0, sizeof(struct nvme_nvm_command));
 706
 707        rq = nvme_nvm_alloc_request(q, rqd, &cmd);
 708        if (IS_ERR(rq))
 709                return PTR_ERR(rq);
 710
 711        /* I/Os can fail and the error is signaled through rqd. Callers must
 712         * handle the error accordingly.
 713         */
 714        blk_execute_rq(q, NULL, rq, 0);
 715        if (nvme_req(rq)->flags & NVME_REQ_CANCELLED)
 716                ret = -EINTR;
 717
 718        rqd->ppa_status = le64_to_cpu(nvme_req(rq)->result.u64);
 719        rqd->error = nvme_req(rq)->status;
 720
 721        blk_mq_free_request(rq);
 722
 723        return ret;
 724}
 725
 726static void *nvme_nvm_create_dma_pool(struct nvm_dev *nvmdev, char *name)
 727{
 728        struct nvme_ns *ns = nvmdev->q->queuedata;
 729
 730        return dma_pool_create(name, ns->ctrl->dev, PAGE_SIZE, PAGE_SIZE, 0);
 731}
 732
 733static void nvme_nvm_destroy_dma_pool(void *pool)
 734{
 735        struct dma_pool *dma_pool = pool;
 736
 737        dma_pool_destroy(dma_pool);
 738}
 739
 740static void *nvme_nvm_dev_dma_alloc(struct nvm_dev *dev, void *pool,
 741                                    gfp_t mem_flags, dma_addr_t *dma_handler)
 742{
 743        return dma_pool_alloc(pool, mem_flags, dma_handler);
 744}
 745
 746static void nvme_nvm_dev_dma_free(void *pool, void *addr,
 747                                                        dma_addr_t dma_handler)
 748{
 749        dma_pool_free(pool, addr, dma_handler);
 750}
 751
 752static struct nvm_dev_ops nvme_nvm_dev_ops = {
 753        .identity               = nvme_nvm_identity,
 754
 755        .get_bb_tbl             = nvme_nvm_get_bb_tbl,
 756        .set_bb_tbl             = nvme_nvm_set_bb_tbl,
 757
 758        .get_chk_meta           = nvme_nvm_get_chk_meta,
 759
 760        .submit_io              = nvme_nvm_submit_io,
 761        .submit_io_sync         = nvme_nvm_submit_io_sync,
 762
 763        .create_dma_pool        = nvme_nvm_create_dma_pool,
 764        .destroy_dma_pool       = nvme_nvm_destroy_dma_pool,
 765        .dev_dma_alloc          = nvme_nvm_dev_dma_alloc,
 766        .dev_dma_free           = nvme_nvm_dev_dma_free,
 767};
 768
 769static int nvme_nvm_submit_user_cmd(struct request_queue *q,
 770                                struct nvme_ns *ns,
 771                                struct nvme_nvm_command *vcmd,
 772                                void __user *ubuf, unsigned int bufflen,
 773                                void __user *meta_buf, unsigned int meta_len,
 774                                void __user *ppa_buf, unsigned int ppa_len,
 775                                u32 *result, u64 *status, unsigned int timeout)
 776{
 777        bool write = nvme_is_write((struct nvme_command *)vcmd);
 778        struct nvm_dev *dev = ns->ndev;
 779        struct gendisk *disk = ns->disk;
 780        struct request *rq;
 781        struct bio *bio = NULL;
 782        __le64 *ppa_list = NULL;
 783        dma_addr_t ppa_dma;
 784        __le64 *metadata = NULL;
 785        dma_addr_t metadata_dma;
 786        DECLARE_COMPLETION_ONSTACK(wait);
 787        int ret = 0;
 788
 789        rq = nvme_alloc_request(q, (struct nvme_command *)vcmd, 0,
 790                        NVME_QID_ANY);
 791        if (IS_ERR(rq)) {
 792                ret = -ENOMEM;
 793                goto err_cmd;
 794        }
 795
 796        rq->timeout = timeout ? timeout : ADMIN_TIMEOUT;
 797
 798        if (ppa_buf && ppa_len) {
 799                ppa_list = dma_pool_alloc(dev->dma_pool, GFP_KERNEL, &ppa_dma);
 800                if (!ppa_list) {
 801                        ret = -ENOMEM;
 802                        goto err_rq;
 803                }
 804                if (copy_from_user(ppa_list, (void __user *)ppa_buf,
 805                                                sizeof(u64) * (ppa_len + 1))) {
 806                        ret = -EFAULT;
 807                        goto err_ppa;
 808                }
 809                vcmd->ph_rw.spba = cpu_to_le64(ppa_dma);
 810        } else {
 811                vcmd->ph_rw.spba = cpu_to_le64((uintptr_t)ppa_buf);
 812        }
 813
 814        if (ubuf && bufflen) {
 815                ret = blk_rq_map_user(q, rq, NULL, ubuf, bufflen, GFP_KERNEL);
 816                if (ret)
 817                        goto err_ppa;
 818                bio = rq->bio;
 819
 820                if (meta_buf && meta_len) {
 821                        metadata = dma_pool_alloc(dev->dma_pool, GFP_KERNEL,
 822                                                                &metadata_dma);
 823                        if (!metadata) {
 824                                ret = -ENOMEM;
 825                                goto err_map;
 826                        }
 827
 828                        if (write) {
 829                                if (copy_from_user(metadata,
 830                                                (void __user *)meta_buf,
 831                                                meta_len)) {
 832                                        ret = -EFAULT;
 833                                        goto err_meta;
 834                                }
 835                        }
 836                        vcmd->ph_rw.metadata = cpu_to_le64(metadata_dma);
 837                }
 838
 839                bio->bi_disk = disk;
 840        }
 841
 842        blk_execute_rq(q, NULL, rq, 0);
 843
 844        if (nvme_req(rq)->flags & NVME_REQ_CANCELLED)
 845                ret = -EINTR;
 846        else if (nvme_req(rq)->status & 0x7ff)
 847                ret = -EIO;
 848        if (result)
 849                *result = nvme_req(rq)->status & 0x7ff;
 850        if (status)
 851                *status = le64_to_cpu(nvme_req(rq)->result.u64);
 852
 853        if (metadata && !ret && !write) {
 854                if (copy_to_user(meta_buf, (void *)metadata, meta_len))
 855                        ret = -EFAULT;
 856        }
 857err_meta:
 858        if (meta_buf && meta_len)
 859                dma_pool_free(dev->dma_pool, metadata, metadata_dma);
 860err_map:
 861        if (bio)
 862                blk_rq_unmap_user(bio);
 863err_ppa:
 864        if (ppa_buf && ppa_len)
 865                dma_pool_free(dev->dma_pool, ppa_list, ppa_dma);
 866err_rq:
 867        blk_mq_free_request(rq);
 868err_cmd:
 869        return ret;
 870}
 871
 872static int nvme_nvm_submit_vio(struct nvme_ns *ns,
 873                                        struct nvm_user_vio __user *uvio)
 874{
 875        struct nvm_user_vio vio;
 876        struct nvme_nvm_command c;
 877        unsigned int length;
 878        int ret;
 879
 880        if (copy_from_user(&vio, uvio, sizeof(vio)))
 881                return -EFAULT;
 882        if (vio.flags)
 883                return -EINVAL;
 884
 885        memset(&c, 0, sizeof(c));
 886        c.ph_rw.opcode = vio.opcode;
 887        c.ph_rw.nsid = cpu_to_le32(ns->head->ns_id);
 888        c.ph_rw.control = cpu_to_le16(vio.control);
 889        c.ph_rw.length = cpu_to_le16(vio.nppas);
 890
 891        length = (vio.nppas + 1) << ns->lba_shift;
 892
 893        ret = nvme_nvm_submit_user_cmd(ns->queue, ns, &c,
 894                        (void __user *)(uintptr_t)vio.addr, length,
 895                        (void __user *)(uintptr_t)vio.metadata,
 896                                                        vio.metadata_len,
 897                        (void __user *)(uintptr_t)vio.ppa_list, vio.nppas,
 898                        &vio.result, &vio.status, 0);
 899
 900        if (ret && copy_to_user(uvio, &vio, sizeof(vio)))
 901                return -EFAULT;
 902
 903        return ret;
 904}
 905
 906static int nvme_nvm_user_vcmd(struct nvme_ns *ns, int admin,
 907                                        struct nvm_passthru_vio __user *uvcmd)
 908{
 909        struct nvm_passthru_vio vcmd;
 910        struct nvme_nvm_command c;
 911        struct request_queue *q;
 912        unsigned int timeout = 0;
 913        int ret;
 914
 915        if (copy_from_user(&vcmd, uvcmd, sizeof(vcmd)))
 916                return -EFAULT;
 917        if ((vcmd.opcode != 0xF2) && (!capable(CAP_SYS_ADMIN)))
 918                return -EACCES;
 919        if (vcmd.flags)
 920                return -EINVAL;
 921
 922        memset(&c, 0, sizeof(c));
 923        c.common.opcode = vcmd.opcode;
 924        c.common.nsid = cpu_to_le32(ns->head->ns_id);
 925        c.common.cdw2[0] = cpu_to_le32(vcmd.cdw2);
 926        c.common.cdw2[1] = cpu_to_le32(vcmd.cdw3);
 927        /* cdw11-12 */
 928        c.ph_rw.length = cpu_to_le16(vcmd.nppas);
 929        c.ph_rw.control  = cpu_to_le16(vcmd.control);
 930        c.common.cdw10[3] = cpu_to_le32(vcmd.cdw13);
 931        c.common.cdw10[4] = cpu_to_le32(vcmd.cdw14);
 932        c.common.cdw10[5] = cpu_to_le32(vcmd.cdw15);
 933
 934        if (vcmd.timeout_ms)
 935                timeout = msecs_to_jiffies(vcmd.timeout_ms);
 936
 937        q = admin ? ns->ctrl->admin_q : ns->queue;
 938
 939        ret = nvme_nvm_submit_user_cmd(q, ns,
 940                        (struct nvme_nvm_command *)&c,
 941                        (void __user *)(uintptr_t)vcmd.addr, vcmd.data_len,
 942                        (void __user *)(uintptr_t)vcmd.metadata,
 943                                                        vcmd.metadata_len,
 944                        (void __user *)(uintptr_t)vcmd.ppa_list, vcmd.nppas,
 945                        &vcmd.result, &vcmd.status, timeout);
 946
 947        if (ret && copy_to_user(uvcmd, &vcmd, sizeof(vcmd)))
 948                return -EFAULT;
 949
 950        return ret;
 951}
 952
 953int nvme_nvm_ioctl(struct nvme_ns *ns, unsigned int cmd, unsigned long arg)
 954{
 955        switch (cmd) {
 956        case NVME_NVM_IOCTL_ADMIN_VIO:
 957                return nvme_nvm_user_vcmd(ns, 1, (void __user *)arg);
 958        case NVME_NVM_IOCTL_IO_VIO:
 959                return nvme_nvm_user_vcmd(ns, 0, (void __user *)arg);
 960        case NVME_NVM_IOCTL_SUBMIT_VIO:
 961                return nvme_nvm_submit_vio(ns, (void __user *)arg);
 962        default:
 963                return -ENOTTY;
 964        }
 965}
 966
 967void nvme_nvm_update_nvm_info(struct nvme_ns *ns)
 968{
 969        struct nvm_dev *ndev = ns->ndev;
 970        struct nvm_geo *geo = &ndev->geo;
 971
 972        geo->csecs = 1 << ns->lba_shift;
 973        geo->sos = ns->ms;
 974}
 975
 976int nvme_nvm_register(struct nvme_ns *ns, char *disk_name, int node)
 977{
 978        struct request_queue *q = ns->queue;
 979        struct nvm_dev *dev;
 980
 981        _nvme_nvm_check_size();
 982
 983        dev = nvm_alloc_dev(node);
 984        if (!dev)
 985                return -ENOMEM;
 986
 987        dev->q = q;
 988        memcpy(dev->name, disk_name, DISK_NAME_LEN);
 989        dev->ops = &nvme_nvm_dev_ops;
 990        dev->private_data = ns;
 991        ns->ndev = dev;
 992
 993        return nvm_register(dev);
 994}
 995
 996void nvme_nvm_unregister(struct nvme_ns *ns)
 997{
 998        nvm_unregister(ns->ndev);
 999}
1000
1001static ssize_t nvm_dev_attr_show(struct device *dev,
1002                struct device_attribute *dattr, char *page)
1003{
1004        struct nvme_ns *ns = nvme_get_ns_from_dev(dev);
1005        struct nvm_dev *ndev = ns->ndev;
1006        struct nvm_geo *geo = &ndev->geo;
1007        struct attribute *attr;
1008
1009        if (!ndev)
1010                return 0;
1011
1012        attr = &dattr->attr;
1013
1014        if (strcmp(attr->name, "version") == 0) {
1015                if (geo->major_ver_id == 1)
1016                        return scnprintf(page, PAGE_SIZE, "%u\n",
1017                                                geo->major_ver_id);
1018                else
1019                        return scnprintf(page, PAGE_SIZE, "%u.%u\n",
1020                                                geo->major_ver_id,
1021                                                geo->minor_ver_id);
1022        } else if (strcmp(attr->name, "capabilities") == 0) {
1023                return scnprintf(page, PAGE_SIZE, "%u\n", geo->cap);
1024        } else if (strcmp(attr->name, "read_typ") == 0) {
1025                return scnprintf(page, PAGE_SIZE, "%u\n", geo->trdt);
1026        } else if (strcmp(attr->name, "read_max") == 0) {
1027                return scnprintf(page, PAGE_SIZE, "%u\n", geo->trdm);
1028        } else {
1029                return scnprintf(page,
1030                                 PAGE_SIZE,
1031                                 "Unhandled attr(%s) in `%s`\n",
1032                                 attr->name, __func__);
1033        }
1034}
1035
1036static ssize_t nvm_dev_attr_show_ppaf(struct nvm_addrf_12 *ppaf, char *page)
1037{
1038        return scnprintf(page, PAGE_SIZE,
1039                "0x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x\n",
1040                                ppaf->ch_offset, ppaf->ch_len,
1041                                ppaf->lun_offset, ppaf->lun_len,
1042                                ppaf->pln_offset, ppaf->pln_len,
1043                                ppaf->blk_offset, ppaf->blk_len,
1044                                ppaf->pg_offset, ppaf->pg_len,
1045                                ppaf->sec_offset, ppaf->sec_len);
1046}
1047
1048static ssize_t nvm_dev_attr_show_12(struct device *dev,
1049                struct device_attribute *dattr, char *page)
1050{
1051        struct nvme_ns *ns = nvme_get_ns_from_dev(dev);
1052        struct nvm_dev *ndev = ns->ndev;
1053        struct nvm_geo *geo = &ndev->geo;
1054        struct attribute *attr;
1055
1056        if (!ndev)
1057                return 0;
1058
1059        attr = &dattr->attr;
1060
1061        if (strcmp(attr->name, "vendor_opcode") == 0) {
1062                return scnprintf(page, PAGE_SIZE, "%u\n", geo->vmnt);
1063        } else if (strcmp(attr->name, "device_mode") == 0) {
1064                return scnprintf(page, PAGE_SIZE, "%u\n", geo->dom);
1065        /* kept for compatibility */
1066        } else if (strcmp(attr->name, "media_manager") == 0) {
1067                return scnprintf(page, PAGE_SIZE, "%s\n", "gennvm");
1068        } else if (strcmp(attr->name, "ppa_format") == 0) {
1069                return nvm_dev_attr_show_ppaf((void *)&geo->addrf, page);
1070        } else if (strcmp(attr->name, "media_type") == 0) {     /* u8 */
1071                return scnprintf(page, PAGE_SIZE, "%u\n", geo->mtype);
1072        } else if (strcmp(attr->name, "flash_media_type") == 0) {
1073                return scnprintf(page, PAGE_SIZE, "%u\n", geo->fmtype);
1074        } else if (strcmp(attr->name, "num_channels") == 0) {
1075                return scnprintf(page, PAGE_SIZE, "%u\n", geo->num_ch);
1076        } else if (strcmp(attr->name, "num_luns") == 0) {
1077                return scnprintf(page, PAGE_SIZE, "%u\n", geo->num_lun);
1078        } else if (strcmp(attr->name, "num_planes") == 0) {
1079                return scnprintf(page, PAGE_SIZE, "%u\n", geo->num_pln);
1080        } else if (strcmp(attr->name, "num_blocks") == 0) {     /* u16 */
1081                return scnprintf(page, PAGE_SIZE, "%u\n", geo->num_chk);
1082        } else if (strcmp(attr->name, "num_pages") == 0) {
1083                return scnprintf(page, PAGE_SIZE, "%u\n", geo->num_pg);
1084        } else if (strcmp(attr->name, "page_size") == 0) {
1085                return scnprintf(page, PAGE_SIZE, "%u\n", geo->fpg_sz);
1086        } else if (strcmp(attr->name, "hw_sector_size") == 0) {
1087                return scnprintf(page, PAGE_SIZE, "%u\n", geo->csecs);
1088        } else if (strcmp(attr->name, "oob_sector_size") == 0) {/* u32 */
1089                return scnprintf(page, PAGE_SIZE, "%u\n", geo->sos);
1090        } else if (strcmp(attr->name, "prog_typ") == 0) {
1091                return scnprintf(page, PAGE_SIZE, "%u\n", geo->tprt);
1092        } else if (strcmp(attr->name, "prog_max") == 0) {
1093                return scnprintf(page, PAGE_SIZE, "%u\n", geo->tprm);
1094        } else if (strcmp(attr->name, "erase_typ") == 0) {
1095                return scnprintf(page, PAGE_SIZE, "%u\n", geo->tbet);
1096        } else if (strcmp(attr->name, "erase_max") == 0) {
1097                return scnprintf(page, PAGE_SIZE, "%u\n", geo->tbem);
1098        } else if (strcmp(attr->name, "multiplane_modes") == 0) {
1099                return scnprintf(page, PAGE_SIZE, "0x%08x\n", geo->mpos);
1100        } else if (strcmp(attr->name, "media_capabilities") == 0) {
1101                return scnprintf(page, PAGE_SIZE, "0x%08x\n", geo->mccap);
1102        } else if (strcmp(attr->name, "max_phys_secs") == 0) {
1103                return scnprintf(page, PAGE_SIZE, "%u\n", NVM_MAX_VLBA);
1104        } else {
1105                return scnprintf(page, PAGE_SIZE,
1106                        "Unhandled attr(%s) in `%s`\n",
1107                        attr->name, __func__);
1108        }
1109}
1110
1111static ssize_t nvm_dev_attr_show_20(struct device *dev,
1112                struct device_attribute *dattr, char *page)
1113{
1114        struct nvme_ns *ns = nvme_get_ns_from_dev(dev);
1115        struct nvm_dev *ndev = ns->ndev;
1116        struct nvm_geo *geo = &ndev->geo;
1117        struct attribute *attr;
1118
1119        if (!ndev)
1120                return 0;
1121
1122        attr = &dattr->attr;
1123
1124        if (strcmp(attr->name, "groups") == 0) {
1125                return scnprintf(page, PAGE_SIZE, "%u\n", geo->num_ch);
1126        } else if (strcmp(attr->name, "punits") == 0) {
1127                return scnprintf(page, PAGE_SIZE, "%u\n", geo->num_lun);
1128        } else if (strcmp(attr->name, "chunks") == 0) {
1129                return scnprintf(page, PAGE_SIZE, "%u\n", geo->num_chk);
1130        } else if (strcmp(attr->name, "clba") == 0) {
1131                return scnprintf(page, PAGE_SIZE, "%u\n", geo->clba);
1132        } else if (strcmp(attr->name, "ws_min") == 0) {
1133                return scnprintf(page, PAGE_SIZE, "%u\n", geo->ws_min);
1134        } else if (strcmp(attr->name, "ws_opt") == 0) {
1135                return scnprintf(page, PAGE_SIZE, "%u\n", geo->ws_opt);
1136        } else if (strcmp(attr->name, "maxoc") == 0) {
1137                return scnprintf(page, PAGE_SIZE, "%u\n", geo->maxoc);
1138        } else if (strcmp(attr->name, "maxocpu") == 0) {
1139                return scnprintf(page, PAGE_SIZE, "%u\n", geo->maxocpu);
1140        } else if (strcmp(attr->name, "mw_cunits") == 0) {
1141                return scnprintf(page, PAGE_SIZE, "%u\n", geo->mw_cunits);
1142        } else if (strcmp(attr->name, "write_typ") == 0) {
1143                return scnprintf(page, PAGE_SIZE, "%u\n", geo->tprt);
1144        } else if (strcmp(attr->name, "write_max") == 0) {
1145                return scnprintf(page, PAGE_SIZE, "%u\n", geo->tprm);
1146        } else if (strcmp(attr->name, "reset_typ") == 0) {
1147                return scnprintf(page, PAGE_SIZE, "%u\n", geo->tbet);
1148        } else if (strcmp(attr->name, "reset_max") == 0) {
1149                return scnprintf(page, PAGE_SIZE, "%u\n", geo->tbem);
1150        } else {
1151                return scnprintf(page, PAGE_SIZE,
1152                        "Unhandled attr(%s) in `%s`\n",
1153                        attr->name, __func__);
1154        }
1155}
1156
1157#define NVM_DEV_ATTR_RO(_name)                                  \
1158        DEVICE_ATTR(_name, S_IRUGO, nvm_dev_attr_show, NULL)
1159#define NVM_DEV_ATTR_12_RO(_name)                                       \
1160        DEVICE_ATTR(_name, S_IRUGO, nvm_dev_attr_show_12, NULL)
1161#define NVM_DEV_ATTR_20_RO(_name)                                       \
1162        DEVICE_ATTR(_name, S_IRUGO, nvm_dev_attr_show_20, NULL)
1163
1164/* general attributes */
1165static NVM_DEV_ATTR_RO(version);
1166static NVM_DEV_ATTR_RO(capabilities);
1167
1168static NVM_DEV_ATTR_RO(read_typ);
1169static NVM_DEV_ATTR_RO(read_max);
1170
1171/* 1.2 values */
1172static NVM_DEV_ATTR_12_RO(vendor_opcode);
1173static NVM_DEV_ATTR_12_RO(device_mode);
1174static NVM_DEV_ATTR_12_RO(ppa_format);
1175static NVM_DEV_ATTR_12_RO(media_manager);
1176static NVM_DEV_ATTR_12_RO(media_type);
1177static NVM_DEV_ATTR_12_RO(flash_media_type);
1178static NVM_DEV_ATTR_12_RO(num_channels);
1179static NVM_DEV_ATTR_12_RO(num_luns);
1180static NVM_DEV_ATTR_12_RO(num_planes);
1181static NVM_DEV_ATTR_12_RO(num_blocks);
1182static NVM_DEV_ATTR_12_RO(num_pages);
1183static NVM_DEV_ATTR_12_RO(page_size);
1184static NVM_DEV_ATTR_12_RO(hw_sector_size);
1185static NVM_DEV_ATTR_12_RO(oob_sector_size);
1186static NVM_DEV_ATTR_12_RO(prog_typ);
1187static NVM_DEV_ATTR_12_RO(prog_max);
1188static NVM_DEV_ATTR_12_RO(erase_typ);
1189static NVM_DEV_ATTR_12_RO(erase_max);
1190static NVM_DEV_ATTR_12_RO(multiplane_modes);
1191static NVM_DEV_ATTR_12_RO(media_capabilities);
1192static NVM_DEV_ATTR_12_RO(max_phys_secs);
1193
1194static struct attribute *nvm_dev_attrs_12[] = {
1195        &dev_attr_version.attr,
1196        &dev_attr_capabilities.attr,
1197
1198        &dev_attr_vendor_opcode.attr,
1199        &dev_attr_device_mode.attr,
1200        &dev_attr_media_manager.attr,
1201        &dev_attr_ppa_format.attr,
1202        &dev_attr_media_type.attr,
1203        &dev_attr_flash_media_type.attr,
1204        &dev_attr_num_channels.attr,
1205        &dev_attr_num_luns.attr,
1206        &dev_attr_num_planes.attr,
1207        &dev_attr_num_blocks.attr,
1208        &dev_attr_num_pages.attr,
1209        &dev_attr_page_size.attr,
1210        &dev_attr_hw_sector_size.attr,
1211        &dev_attr_oob_sector_size.attr,
1212        &dev_attr_read_typ.attr,
1213        &dev_attr_read_max.attr,
1214        &dev_attr_prog_typ.attr,
1215        &dev_attr_prog_max.attr,
1216        &dev_attr_erase_typ.attr,
1217        &dev_attr_erase_max.attr,
1218        &dev_attr_multiplane_modes.attr,
1219        &dev_attr_media_capabilities.attr,
1220        &dev_attr_max_phys_secs.attr,
1221
1222        NULL,
1223};
1224
1225static const struct attribute_group nvm_dev_attr_group_12 = {
1226        .name           = "lightnvm",
1227        .attrs          = nvm_dev_attrs_12,
1228};
1229
1230/* 2.0 values */
1231static NVM_DEV_ATTR_20_RO(groups);
1232static NVM_DEV_ATTR_20_RO(punits);
1233static NVM_DEV_ATTR_20_RO(chunks);
1234static NVM_DEV_ATTR_20_RO(clba);
1235static NVM_DEV_ATTR_20_RO(ws_min);
1236static NVM_DEV_ATTR_20_RO(ws_opt);
1237static NVM_DEV_ATTR_20_RO(maxoc);
1238static NVM_DEV_ATTR_20_RO(maxocpu);
1239static NVM_DEV_ATTR_20_RO(mw_cunits);
1240static NVM_DEV_ATTR_20_RO(write_typ);
1241static NVM_DEV_ATTR_20_RO(write_max);
1242static NVM_DEV_ATTR_20_RO(reset_typ);
1243static NVM_DEV_ATTR_20_RO(reset_max);
1244
1245static struct attribute *nvm_dev_attrs_20[] = {
1246        &dev_attr_version.attr,
1247        &dev_attr_capabilities.attr,
1248
1249        &dev_attr_groups.attr,
1250        &dev_attr_punits.attr,
1251        &dev_attr_chunks.attr,
1252        &dev_attr_clba.attr,
1253        &dev_attr_ws_min.attr,
1254        &dev_attr_ws_opt.attr,
1255        &dev_attr_maxoc.attr,
1256        &dev_attr_maxocpu.attr,
1257        &dev_attr_mw_cunits.attr,
1258
1259        &dev_attr_read_typ.attr,
1260        &dev_attr_read_max.attr,
1261        &dev_attr_write_typ.attr,
1262        &dev_attr_write_max.attr,
1263        &dev_attr_reset_typ.attr,
1264        &dev_attr_reset_max.attr,
1265
1266        NULL,
1267};
1268
1269static const struct attribute_group nvm_dev_attr_group_20 = {
1270        .name           = "lightnvm",
1271        .attrs          = nvm_dev_attrs_20,
1272};
1273
1274int nvme_nvm_register_sysfs(struct nvme_ns *ns)
1275{
1276        struct nvm_dev *ndev = ns->ndev;
1277        struct nvm_geo *geo = &ndev->geo;
1278
1279        if (!ndev)
1280                return -EINVAL;
1281
1282        switch (geo->major_ver_id) {
1283        case 1:
1284                return sysfs_create_group(&disk_to_dev(ns->disk)->kobj,
1285                                        &nvm_dev_attr_group_12);
1286        case 2:
1287                return sysfs_create_group(&disk_to_dev(ns->disk)->kobj,
1288                                        &nvm_dev_attr_group_20);
1289        }
1290
1291        return -EINVAL;
1292}
1293
1294void nvme_nvm_unregister_sysfs(struct nvme_ns *ns)
1295{
1296        struct nvm_dev *ndev = ns->ndev;
1297        struct nvm_geo *geo = &ndev->geo;
1298
1299        switch (geo->major_ver_id) {
1300        case 1:
1301                sysfs_remove_group(&disk_to_dev(ns->disk)->kobj,
1302                                        &nvm_dev_attr_group_12);
1303                break;
1304        case 2:
1305                sysfs_remove_group(&disk_to_dev(ns->disk)->kobj,
1306                                        &nvm_dev_attr_group_20);
1307                break;
1308        }
1309}
1310