linux/drivers/nvme/host/lightnvm.c
<<
>>
Prefs
   1/*
   2 * nvme-lightnvm.c - LightNVM NVMe device
   3 *
   4 * Copyright (C) 2014-2015 IT University of Copenhagen
   5 * Initial release: Matias Bjorling <mb@lightnvm.io>
   6 *
   7 * This program is free software; you can redistribute it and/or
   8 * modify it under the terms of the GNU General Public License version
   9 * 2 as published by the Free Software Foundation.
  10 *
  11 * This program is distributed in the hope that it will be useful, but
  12 * WITHOUT ANY WARRANTY; without even the implied warranty of
  13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  14 * General Public License for more details.
  15 *
  16 * You should have received a copy of the GNU General Public License
  17 * along with this program; see the file COPYING.  If not, write to
  18 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
  19 * USA.
  20 *
  21 */
  22
  23#include "nvme.h"
  24
  25#include <linux/nvme.h>
  26#include <linux/bitops.h>
  27#include <linux/lightnvm.h>
  28#include <linux/vmalloc.h>
  29#include <linux/sched/sysctl.h>
  30#include <uapi/linux/lightnvm.h>
  31
  32enum nvme_nvm_admin_opcode {
  33        nvme_nvm_admin_identity         = 0xe2,
  34        nvme_nvm_admin_get_bb_tbl       = 0xf2,
  35        nvme_nvm_admin_set_bb_tbl       = 0xf1,
  36};
  37
  38enum nvme_nvm_log_page {
  39        NVME_NVM_LOG_REPORT_CHUNK       = 0xca,
  40};
  41
  42struct nvme_nvm_ph_rw {
  43        __u8                    opcode;
  44        __u8                    flags;
  45        __u16                   command_id;
  46        __le32                  nsid;
  47        __u64                   rsvd2;
  48        __le64                  metadata;
  49        __le64                  prp1;
  50        __le64                  prp2;
  51        __le64                  spba;
  52        __le16                  length;
  53        __le16                  control;
  54        __le32                  dsmgmt;
  55        __le64                  resv;
  56};
  57
  58struct nvme_nvm_erase_blk {
  59        __u8                    opcode;
  60        __u8                    flags;
  61        __u16                   command_id;
  62        __le32                  nsid;
  63        __u64                   rsvd[2];
  64        __le64                  prp1;
  65        __le64                  prp2;
  66        __le64                  spba;
  67        __le16                  length;
  68        __le16                  control;
  69        __le32                  dsmgmt;
  70        __le64                  resv;
  71};
  72
  73struct nvme_nvm_identity {
  74        __u8                    opcode;
  75        __u8                    flags;
  76        __u16                   command_id;
  77        __le32                  nsid;
  78        __u64                   rsvd[2];
  79        __le64                  prp1;
  80        __le64                  prp2;
  81        __u32                   rsvd11[6];
  82};
  83
  84struct nvme_nvm_getbbtbl {
  85        __u8                    opcode;
  86        __u8                    flags;
  87        __u16                   command_id;
  88        __le32                  nsid;
  89        __u64                   rsvd[2];
  90        __le64                  prp1;
  91        __le64                  prp2;
  92        __le64                  spba;
  93        __u32                   rsvd4[4];
  94};
  95
  96struct nvme_nvm_setbbtbl {
  97        __u8                    opcode;
  98        __u8                    flags;
  99        __u16                   command_id;
 100        __le32                  nsid;
 101        __le64                  rsvd[2];
 102        __le64                  prp1;
 103        __le64                  prp2;
 104        __le64                  spba;
 105        __le16                  nlb;
 106        __u8                    value;
 107        __u8                    rsvd3;
 108        __u32                   rsvd4[3];
 109};
 110
 111struct nvme_nvm_command {
 112        union {
 113                struct nvme_common_command common;
 114                struct nvme_nvm_ph_rw ph_rw;
 115                struct nvme_nvm_erase_blk erase;
 116                struct nvme_nvm_identity identity;
 117                struct nvme_nvm_getbbtbl get_bb;
 118                struct nvme_nvm_setbbtbl set_bb;
 119        };
 120};
 121
 122struct nvme_nvm_id12_grp {
 123        __u8                    mtype;
 124        __u8                    fmtype;
 125        __le16                  res16;
 126        __u8                    num_ch;
 127        __u8                    num_lun;
 128        __u8                    num_pln;
 129        __u8                    rsvd1;
 130        __le16                  num_chk;
 131        __le16                  num_pg;
 132        __le16                  fpg_sz;
 133        __le16                  csecs;
 134        __le16                  sos;
 135        __le16                  rsvd2;
 136        __le32                  trdt;
 137        __le32                  trdm;
 138        __le32                  tprt;
 139        __le32                  tprm;
 140        __le32                  tbet;
 141        __le32                  tbem;
 142        __le32                  mpos;
 143        __le32                  mccap;
 144        __le16                  cpar;
 145        __u8                    reserved[906];
 146} __packed;
 147
 148struct nvme_nvm_id12_addrf {
 149        __u8                    ch_offset;
 150        __u8                    ch_len;
 151        __u8                    lun_offset;
 152        __u8                    lun_len;
 153        __u8                    pln_offset;
 154        __u8                    pln_len;
 155        __u8                    blk_offset;
 156        __u8                    blk_len;
 157        __u8                    pg_offset;
 158        __u8                    pg_len;
 159        __u8                    sec_offset;
 160        __u8                    sec_len;
 161        __u8                    res[4];
 162} __packed;
 163
 164struct nvme_nvm_id12 {
 165        __u8                    ver_id;
 166        __u8                    vmnt;
 167        __u8                    cgrps;
 168        __u8                    res;
 169        __le32                  cap;
 170        __le32                  dom;
 171        struct nvme_nvm_id12_addrf ppaf;
 172        __u8                    resv[228];
 173        struct nvme_nvm_id12_grp grp;
 174        __u8                    resv2[2880];
 175} __packed;
 176
 177struct nvme_nvm_bb_tbl {
 178        __u8    tblid[4];
 179        __le16  verid;
 180        __le16  revid;
 181        __le32  rvsd1;
 182        __le32  tblks;
 183        __le32  tfact;
 184        __le32  tgrown;
 185        __le32  tdresv;
 186        __le32  thresv;
 187        __le32  rsvd2[8];
 188        __u8    blk[0];
 189};
 190
 191struct nvme_nvm_id20_addrf {
 192        __u8                    grp_len;
 193        __u8                    pu_len;
 194        __u8                    chk_len;
 195        __u8                    lba_len;
 196        __u8                    resv[4];
 197};
 198
 199struct nvme_nvm_id20 {
 200        __u8                    mjr;
 201        __u8                    mnr;
 202        __u8                    resv[6];
 203
 204        struct nvme_nvm_id20_addrf lbaf;
 205
 206        __le32                  mccap;
 207        __u8                    resv2[12];
 208
 209        __u8                    wit;
 210        __u8                    resv3[31];
 211
 212        /* Geometry */
 213        __le16                  num_grp;
 214        __le16                  num_pu;
 215        __le32                  num_chk;
 216        __le32                  clba;
 217        __u8                    resv4[52];
 218
 219        /* Write data requirements */
 220        __le32                  ws_min;
 221        __le32                  ws_opt;
 222        __le32                  mw_cunits;
 223        __le32                  maxoc;
 224        __le32                  maxocpu;
 225        __u8                    resv5[44];
 226
 227        /* Performance related metrics */
 228        __le32                  trdt;
 229        __le32                  trdm;
 230        __le32                  twrt;
 231        __le32                  twrm;
 232        __le32                  tcrst;
 233        __le32                  tcrsm;
 234        __u8                    resv6[40];
 235
 236        /* Reserved area */
 237        __u8                    resv7[2816];
 238
 239        /* Vendor specific */
 240        __u8                    vs[1024];
 241};
 242
 243struct nvme_nvm_chk_meta {
 244        __u8    state;
 245        __u8    type;
 246        __u8    wi;
 247        __u8    rsvd[5];
 248        __le64  slba;
 249        __le64  cnlb;
 250        __le64  wp;
 251};
 252
 253/*
 254 * Check we didn't inadvertently grow the command struct
 255 */
 256static inline void _nvme_nvm_check_size(void)
 257{
 258        BUILD_BUG_ON(sizeof(struct nvme_nvm_identity) != 64);
 259        BUILD_BUG_ON(sizeof(struct nvme_nvm_ph_rw) != 64);
 260        BUILD_BUG_ON(sizeof(struct nvme_nvm_erase_blk) != 64);
 261        BUILD_BUG_ON(sizeof(struct nvme_nvm_getbbtbl) != 64);
 262        BUILD_BUG_ON(sizeof(struct nvme_nvm_setbbtbl) != 64);
 263        BUILD_BUG_ON(sizeof(struct nvme_nvm_id12_grp) != 960);
 264        BUILD_BUG_ON(sizeof(struct nvme_nvm_id12_addrf) != 16);
 265        BUILD_BUG_ON(sizeof(struct nvme_nvm_id12) != NVME_IDENTIFY_DATA_SIZE);
 266        BUILD_BUG_ON(sizeof(struct nvme_nvm_bb_tbl) != 64);
 267        BUILD_BUG_ON(sizeof(struct nvme_nvm_id20_addrf) != 8);
 268        BUILD_BUG_ON(sizeof(struct nvme_nvm_id20) != NVME_IDENTIFY_DATA_SIZE);
 269        BUILD_BUG_ON(sizeof(struct nvme_nvm_chk_meta) != 32);
 270        BUILD_BUG_ON(sizeof(struct nvme_nvm_chk_meta) !=
 271                                                sizeof(struct nvm_chk_meta));
 272}
 273
 274static void nvme_nvm_set_addr_12(struct nvm_addrf_12 *dst,
 275                                 struct nvme_nvm_id12_addrf *src)
 276{
 277        dst->ch_len = src->ch_len;
 278        dst->lun_len = src->lun_len;
 279        dst->blk_len = src->blk_len;
 280        dst->pg_len = src->pg_len;
 281        dst->pln_len = src->pln_len;
 282        dst->sec_len = src->sec_len;
 283
 284        dst->ch_offset = src->ch_offset;
 285        dst->lun_offset = src->lun_offset;
 286        dst->blk_offset = src->blk_offset;
 287        dst->pg_offset = src->pg_offset;
 288        dst->pln_offset = src->pln_offset;
 289        dst->sec_offset = src->sec_offset;
 290
 291        dst->ch_mask = ((1ULL << dst->ch_len) - 1) << dst->ch_offset;
 292        dst->lun_mask = ((1ULL << dst->lun_len) - 1) << dst->lun_offset;
 293        dst->blk_mask = ((1ULL << dst->blk_len) - 1) << dst->blk_offset;
 294        dst->pg_mask = ((1ULL << dst->pg_len) - 1) << dst->pg_offset;
 295        dst->pln_mask = ((1ULL << dst->pln_len) - 1) << dst->pln_offset;
 296        dst->sec_mask = ((1ULL << dst->sec_len) - 1) << dst->sec_offset;
 297}
 298
 299static int nvme_nvm_setup_12(struct nvme_nvm_id12 *id,
 300                             struct nvm_geo *geo)
 301{
 302        struct nvme_nvm_id12_grp *src;
 303        int sec_per_pg, sec_per_pl, pg_per_blk;
 304
 305        if (id->cgrps != 1)
 306                return -EINVAL;
 307
 308        src = &id->grp;
 309
 310        if (src->mtype != 0) {
 311                pr_err("nvm: memory type not supported\n");
 312                return -EINVAL;
 313        }
 314
 315        /* 1.2 spec. only reports a single version id - unfold */
 316        geo->major_ver_id = id->ver_id;
 317        geo->minor_ver_id = 2;
 318
 319        /* Set compacted version for upper layers */
 320        geo->version = NVM_OCSSD_SPEC_12;
 321
 322        geo->num_ch = src->num_ch;
 323        geo->num_lun = src->num_lun;
 324        geo->all_luns = geo->num_ch * geo->num_lun;
 325
 326        geo->num_chk = le16_to_cpu(src->num_chk);
 327
 328        geo->csecs = le16_to_cpu(src->csecs);
 329        geo->sos = le16_to_cpu(src->sos);
 330
 331        pg_per_blk = le16_to_cpu(src->num_pg);
 332        sec_per_pg = le16_to_cpu(src->fpg_sz) / geo->csecs;
 333        sec_per_pl = sec_per_pg * src->num_pln;
 334        geo->clba = sec_per_pl * pg_per_blk;
 335
 336        geo->all_chunks = geo->all_luns * geo->num_chk;
 337        geo->total_secs = geo->clba * geo->all_chunks;
 338
 339        geo->ws_min = sec_per_pg;
 340        geo->ws_opt = sec_per_pg;
 341        geo->mw_cunits = geo->ws_opt << 3;      /* default to MLC safe values */
 342
 343        /* Do not impose values for maximum number of open blocks as it is
 344         * unspecified in 1.2. Users of 1.2 must be aware of this and eventually
 345         * specify these values through a quirk if restrictions apply.
 346         */
 347        geo->maxoc = geo->all_luns * geo->num_chk;
 348        geo->maxocpu = geo->num_chk;
 349
 350        geo->mccap = le32_to_cpu(src->mccap);
 351
 352        geo->trdt = le32_to_cpu(src->trdt);
 353        geo->trdm = le32_to_cpu(src->trdm);
 354        geo->tprt = le32_to_cpu(src->tprt);
 355        geo->tprm = le32_to_cpu(src->tprm);
 356        geo->tbet = le32_to_cpu(src->tbet);
 357        geo->tbem = le32_to_cpu(src->tbem);
 358
 359        /* 1.2 compatibility */
 360        geo->vmnt = id->vmnt;
 361        geo->cap = le32_to_cpu(id->cap);
 362        geo->dom = le32_to_cpu(id->dom);
 363
 364        geo->mtype = src->mtype;
 365        geo->fmtype = src->fmtype;
 366
 367        geo->cpar = le16_to_cpu(src->cpar);
 368        geo->mpos = le32_to_cpu(src->mpos);
 369
 370        geo->pln_mode = NVM_PLANE_SINGLE;
 371
 372        if (geo->mpos & 0x020202) {
 373                geo->pln_mode = NVM_PLANE_DOUBLE;
 374                geo->ws_opt <<= 1;
 375        } else if (geo->mpos & 0x040404) {
 376                geo->pln_mode = NVM_PLANE_QUAD;
 377                geo->ws_opt <<= 2;
 378        }
 379
 380        geo->num_pln = src->num_pln;
 381        geo->num_pg = le16_to_cpu(src->num_pg);
 382        geo->fpg_sz = le16_to_cpu(src->fpg_sz);
 383
 384        nvme_nvm_set_addr_12((struct nvm_addrf_12 *)&geo->addrf, &id->ppaf);
 385
 386        return 0;
 387}
 388
 389static void nvme_nvm_set_addr_20(struct nvm_addrf *dst,
 390                                 struct nvme_nvm_id20_addrf *src)
 391{
 392        dst->ch_len = src->grp_len;
 393        dst->lun_len = src->pu_len;
 394        dst->chk_len = src->chk_len;
 395        dst->sec_len = src->lba_len;
 396
 397        dst->sec_offset = 0;
 398        dst->chk_offset = dst->sec_len;
 399        dst->lun_offset = dst->chk_offset + dst->chk_len;
 400        dst->ch_offset = dst->lun_offset + dst->lun_len;
 401
 402        dst->ch_mask = ((1ULL << dst->ch_len) - 1) << dst->ch_offset;
 403        dst->lun_mask = ((1ULL << dst->lun_len) - 1) << dst->lun_offset;
 404        dst->chk_mask = ((1ULL << dst->chk_len) - 1) << dst->chk_offset;
 405        dst->sec_mask = ((1ULL << dst->sec_len) - 1) << dst->sec_offset;
 406}
 407
 408static int nvme_nvm_setup_20(struct nvme_nvm_id20 *id,
 409                             struct nvm_geo *geo)
 410{
 411        geo->major_ver_id = id->mjr;
 412        geo->minor_ver_id = id->mnr;
 413
 414        /* Set compacted version for upper layers */
 415        geo->version = NVM_OCSSD_SPEC_20;
 416
 417        geo->num_ch = le16_to_cpu(id->num_grp);
 418        geo->num_lun = le16_to_cpu(id->num_pu);
 419        geo->all_luns = geo->num_ch * geo->num_lun;
 420
 421        geo->num_chk = le32_to_cpu(id->num_chk);
 422        geo->clba = le32_to_cpu(id->clba);
 423
 424        geo->all_chunks = geo->all_luns * geo->num_chk;
 425        geo->total_secs = geo->clba * geo->all_chunks;
 426
 427        geo->ws_min = le32_to_cpu(id->ws_min);
 428        geo->ws_opt = le32_to_cpu(id->ws_opt);
 429        geo->mw_cunits = le32_to_cpu(id->mw_cunits);
 430        geo->maxoc = le32_to_cpu(id->maxoc);
 431        geo->maxocpu = le32_to_cpu(id->maxocpu);
 432
 433        geo->trdt = le32_to_cpu(id->trdt);
 434        geo->trdm = le32_to_cpu(id->trdm);
 435        geo->tprt = le32_to_cpu(id->twrt);
 436        geo->tprm = le32_to_cpu(id->twrm);
 437        geo->tbet = le32_to_cpu(id->tcrst);
 438        geo->tbem = le32_to_cpu(id->tcrsm);
 439
 440        nvme_nvm_set_addr_20(&geo->addrf, &id->lbaf);
 441
 442        return 0;
 443}
 444
 445static int nvme_nvm_identity(struct nvm_dev *nvmdev)
 446{
 447        struct nvme_ns *ns = nvmdev->q->queuedata;
 448        struct nvme_nvm_id12 *id;
 449        struct nvme_nvm_command c = {};
 450        int ret;
 451
 452        c.identity.opcode = nvme_nvm_admin_identity;
 453        c.identity.nsid = cpu_to_le32(ns->head->ns_id);
 454
 455        id = kmalloc(sizeof(struct nvme_nvm_id12), GFP_KERNEL);
 456        if (!id)
 457                return -ENOMEM;
 458
 459        ret = nvme_submit_sync_cmd(ns->ctrl->admin_q, (struct nvme_command *)&c,
 460                                id, sizeof(struct nvme_nvm_id12));
 461        if (ret) {
 462                ret = -EIO;
 463                goto out;
 464        }
 465
 466        /*
 467         * The 1.2 and 2.0 specifications share the first byte in their geometry
 468         * command to make it possible to know what version a device implements.
 469         */
 470        switch (id->ver_id) {
 471        case 1:
 472                ret = nvme_nvm_setup_12(id, &nvmdev->geo);
 473                break;
 474        case 2:
 475                ret = nvme_nvm_setup_20((struct nvme_nvm_id20 *)id,
 476                                                        &nvmdev->geo);
 477                break;
 478        default:
 479                dev_err(ns->ctrl->device, "OCSSD revision not supported (%d)\n",
 480                                                        id->ver_id);
 481                ret = -EINVAL;
 482        }
 483
 484out:
 485        kfree(id);
 486        return ret;
 487}
 488
 489static int nvme_nvm_get_bb_tbl(struct nvm_dev *nvmdev, struct ppa_addr ppa,
 490                                                                u8 *blks)
 491{
 492        struct request_queue *q = nvmdev->q;
 493        struct nvm_geo *geo = &nvmdev->geo;
 494        struct nvme_ns *ns = q->queuedata;
 495        struct nvme_ctrl *ctrl = ns->ctrl;
 496        struct nvme_nvm_command c = {};
 497        struct nvme_nvm_bb_tbl *bb_tbl;
 498        int nr_blks = geo->num_chk * geo->num_pln;
 499        int tblsz = sizeof(struct nvme_nvm_bb_tbl) + nr_blks;
 500        int ret = 0;
 501
 502        c.get_bb.opcode = nvme_nvm_admin_get_bb_tbl;
 503        c.get_bb.nsid = cpu_to_le32(ns->head->ns_id);
 504        c.get_bb.spba = cpu_to_le64(ppa.ppa);
 505
 506        bb_tbl = kzalloc(tblsz, GFP_KERNEL);
 507        if (!bb_tbl)
 508                return -ENOMEM;
 509
 510        ret = nvme_submit_sync_cmd(ctrl->admin_q, (struct nvme_command *)&c,
 511                                                                bb_tbl, tblsz);
 512        if (ret) {
 513                dev_err(ctrl->device, "get bad block table failed (%d)\n", ret);
 514                ret = -EIO;
 515                goto out;
 516        }
 517
 518        if (bb_tbl->tblid[0] != 'B' || bb_tbl->tblid[1] != 'B' ||
 519                bb_tbl->tblid[2] != 'L' || bb_tbl->tblid[3] != 'T') {
 520                dev_err(ctrl->device, "bbt format mismatch\n");
 521                ret = -EINVAL;
 522                goto out;
 523        }
 524
 525        if (le16_to_cpu(bb_tbl->verid) != 1) {
 526                ret = -EINVAL;
 527                dev_err(ctrl->device, "bbt version not supported\n");
 528                goto out;
 529        }
 530
 531        if (le32_to_cpu(bb_tbl->tblks) != nr_blks) {
 532                ret = -EINVAL;
 533                dev_err(ctrl->device,
 534                                "bbt unsuspected blocks returned (%u!=%u)",
 535                                le32_to_cpu(bb_tbl->tblks), nr_blks);
 536                goto out;
 537        }
 538
 539        memcpy(blks, bb_tbl->blk, geo->num_chk * geo->num_pln);
 540out:
 541        kfree(bb_tbl);
 542        return ret;
 543}
 544
 545static int nvme_nvm_set_bb_tbl(struct nvm_dev *nvmdev, struct ppa_addr *ppas,
 546                                                        int nr_ppas, int type)
 547{
 548        struct nvme_ns *ns = nvmdev->q->queuedata;
 549        struct nvme_nvm_command c = {};
 550        int ret = 0;
 551
 552        c.set_bb.opcode = nvme_nvm_admin_set_bb_tbl;
 553        c.set_bb.nsid = cpu_to_le32(ns->head->ns_id);
 554        c.set_bb.spba = cpu_to_le64(ppas->ppa);
 555        c.set_bb.nlb = cpu_to_le16(nr_ppas - 1);
 556        c.set_bb.value = type;
 557
 558        ret = nvme_submit_sync_cmd(ns->ctrl->admin_q, (struct nvme_command *)&c,
 559                                                                NULL, 0);
 560        if (ret)
 561                dev_err(ns->ctrl->device, "set bad block table failed (%d)\n",
 562                                                                        ret);
 563        return ret;
 564}
 565
 566/*
 567 * Expect the lba in device format
 568 */
 569static int nvme_nvm_get_chk_meta(struct nvm_dev *ndev,
 570                                 struct nvm_chk_meta *meta,
 571                                 sector_t slba, int nchks)
 572{
 573        struct nvm_geo *geo = &ndev->geo;
 574        struct nvme_ns *ns = ndev->q->queuedata;
 575        struct nvme_ctrl *ctrl = ns->ctrl;
 576        struct nvme_nvm_chk_meta *dev_meta = (struct nvme_nvm_chk_meta *)meta;
 577        struct ppa_addr ppa;
 578        size_t left = nchks * sizeof(struct nvme_nvm_chk_meta);
 579        size_t log_pos, offset, len;
 580        int ret, i, max_len;
 581
 582        /*
 583         * limit requests to maximum 256K to avoid issuing arbitrary large
 584         * requests when the device does not specific a maximum transfer size.
 585         */
 586        max_len = min_t(unsigned int, ctrl->max_hw_sectors << 9, 256 * 1024);
 587
 588        /* Normalize lba address space to obtain log offset */
 589        ppa.ppa = slba;
 590        ppa = dev_to_generic_addr(ndev, ppa);
 591
 592        log_pos = ppa.m.chk;
 593        log_pos += ppa.m.pu * geo->num_chk;
 594        log_pos += ppa.m.grp * geo->num_lun * geo->num_chk;
 595
 596        offset = log_pos * sizeof(struct nvme_nvm_chk_meta);
 597
 598        while (left) {
 599                len = min_t(unsigned int, left, max_len);
 600
 601                ret = nvme_get_log(ctrl, ns->head->ns_id,
 602                                NVME_NVM_LOG_REPORT_CHUNK, 0, dev_meta, len,
 603                                offset);
 604                if (ret) {
 605                        dev_err(ctrl->device, "Get REPORT CHUNK log error\n");
 606                        break;
 607                }
 608
 609                for (i = 0; i < len; i += sizeof(struct nvme_nvm_chk_meta)) {
 610                        meta->state = dev_meta->state;
 611                        meta->type = dev_meta->type;
 612                        meta->wi = dev_meta->wi;
 613                        meta->slba = le64_to_cpu(dev_meta->slba);
 614                        meta->cnlb = le64_to_cpu(dev_meta->cnlb);
 615                        meta->wp = le64_to_cpu(dev_meta->wp);
 616
 617                        meta++;
 618                        dev_meta++;
 619                }
 620
 621                offset += len;
 622                left -= len;
 623        }
 624
 625        return ret;
 626}
 627
 628static inline void nvme_nvm_rqtocmd(struct nvm_rq *rqd, struct nvme_ns *ns,
 629                                    struct nvme_nvm_command *c)
 630{
 631        c->ph_rw.opcode = rqd->opcode;
 632        c->ph_rw.nsid = cpu_to_le32(ns->head->ns_id);
 633        c->ph_rw.spba = cpu_to_le64(rqd->ppa_addr.ppa);
 634        c->ph_rw.metadata = cpu_to_le64(rqd->dma_meta_list);
 635        c->ph_rw.control = cpu_to_le16(rqd->flags);
 636        c->ph_rw.length = cpu_to_le16(rqd->nr_ppas - 1);
 637}
 638
 639static void nvme_nvm_end_io(struct request *rq, blk_status_t status)
 640{
 641        struct nvm_rq *rqd = rq->end_io_data;
 642
 643        rqd->ppa_status = le64_to_cpu(nvme_req(rq)->result.u64);
 644        rqd->error = nvme_req(rq)->status;
 645        nvm_end_io(rqd);
 646
 647        kfree(nvme_req(rq)->cmd);
 648        blk_mq_free_request(rq);
 649}
 650
 651static struct request *nvme_nvm_alloc_request(struct request_queue *q,
 652                                              struct nvm_rq *rqd,
 653                                              struct nvme_nvm_command *cmd)
 654{
 655        struct nvme_ns *ns = q->queuedata;
 656        struct request *rq;
 657
 658        nvme_nvm_rqtocmd(rqd, ns, cmd);
 659
 660        rq = nvme_alloc_request(q, (struct nvme_command *)cmd, 0, NVME_QID_ANY);
 661        if (IS_ERR(rq))
 662                return rq;
 663
 664        rq->cmd_flags &= ~REQ_FAILFAST_DRIVER;
 665
 666        if (rqd->bio)
 667                blk_init_request_from_bio(rq, rqd->bio);
 668        else
 669                rq->ioprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, IOPRIO_NORM);
 670
 671        return rq;
 672}
 673
 674static int nvme_nvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd)
 675{
 676        struct request_queue *q = dev->q;
 677        struct nvme_nvm_command *cmd;
 678        struct request *rq;
 679
 680        cmd = kzalloc(sizeof(struct nvme_nvm_command), GFP_KERNEL);
 681        if (!cmd)
 682                return -ENOMEM;
 683
 684        rq = nvme_nvm_alloc_request(q, rqd, cmd);
 685        if (IS_ERR(rq)) {
 686                kfree(cmd);
 687                return PTR_ERR(rq);
 688        }
 689
 690        rq->end_io_data = rqd;
 691
 692        blk_execute_rq_nowait(q, NULL, rq, 0, nvme_nvm_end_io);
 693
 694        return 0;
 695}
 696
 697static int nvme_nvm_submit_io_sync(struct nvm_dev *dev, struct nvm_rq *rqd)
 698{
 699        struct request_queue *q = dev->q;
 700        struct request *rq;
 701        struct nvme_nvm_command cmd;
 702        int ret = 0;
 703
 704        memset(&cmd, 0, sizeof(struct nvme_nvm_command));
 705
 706        rq = nvme_nvm_alloc_request(q, rqd, &cmd);
 707        if (IS_ERR(rq))
 708                return PTR_ERR(rq);
 709
 710        /* I/Os can fail and the error is signaled through rqd. Callers must
 711         * handle the error accordingly.
 712         */
 713        blk_execute_rq(q, NULL, rq, 0);
 714        if (nvme_req(rq)->flags & NVME_REQ_CANCELLED)
 715                ret = -EINTR;
 716
 717        rqd->ppa_status = le64_to_cpu(nvme_req(rq)->result.u64);
 718        rqd->error = nvme_req(rq)->status;
 719
 720        blk_mq_free_request(rq);
 721
 722        return ret;
 723}
 724
 725static void *nvme_nvm_create_dma_pool(struct nvm_dev *nvmdev, char *name)
 726{
 727        struct nvme_ns *ns = nvmdev->q->queuedata;
 728
 729        return dma_pool_create(name, ns->ctrl->dev, PAGE_SIZE, PAGE_SIZE, 0);
 730}
 731
 732static void nvme_nvm_destroy_dma_pool(void *pool)
 733{
 734        struct dma_pool *dma_pool = pool;
 735
 736        dma_pool_destroy(dma_pool);
 737}
 738
 739static void *nvme_nvm_dev_dma_alloc(struct nvm_dev *dev, void *pool,
 740                                    gfp_t mem_flags, dma_addr_t *dma_handler)
 741{
 742        return dma_pool_alloc(pool, mem_flags, dma_handler);
 743}
 744
 745static void nvme_nvm_dev_dma_free(void *pool, void *addr,
 746                                                        dma_addr_t dma_handler)
 747{
 748        dma_pool_free(pool, addr, dma_handler);
 749}
 750
 751static struct nvm_dev_ops nvme_nvm_dev_ops = {
 752        .identity               = nvme_nvm_identity,
 753
 754        .get_bb_tbl             = nvme_nvm_get_bb_tbl,
 755        .set_bb_tbl             = nvme_nvm_set_bb_tbl,
 756
 757        .get_chk_meta           = nvme_nvm_get_chk_meta,
 758
 759        .submit_io              = nvme_nvm_submit_io,
 760        .submit_io_sync         = nvme_nvm_submit_io_sync,
 761
 762        .create_dma_pool        = nvme_nvm_create_dma_pool,
 763        .destroy_dma_pool       = nvme_nvm_destroy_dma_pool,
 764        .dev_dma_alloc          = nvme_nvm_dev_dma_alloc,
 765        .dev_dma_free           = nvme_nvm_dev_dma_free,
 766};
 767
 768static int nvme_nvm_submit_user_cmd(struct request_queue *q,
 769                                struct nvme_ns *ns,
 770                                struct nvme_nvm_command *vcmd,
 771                                void __user *ubuf, unsigned int bufflen,
 772                                void __user *meta_buf, unsigned int meta_len,
 773                                void __user *ppa_buf, unsigned int ppa_len,
 774                                u32 *result, u64 *status, unsigned int timeout)
 775{
 776        bool write = nvme_is_write((struct nvme_command *)vcmd);
 777        struct nvm_dev *dev = ns->ndev;
 778        struct gendisk *disk = ns->disk;
 779        struct request *rq;
 780        struct bio *bio = NULL;
 781        __le64 *ppa_list = NULL;
 782        dma_addr_t ppa_dma;
 783        __le64 *metadata = NULL;
 784        dma_addr_t metadata_dma;
 785        DECLARE_COMPLETION_ONSTACK(wait);
 786        int ret = 0;
 787
 788        rq = nvme_alloc_request(q, (struct nvme_command *)vcmd, 0,
 789                        NVME_QID_ANY);
 790        if (IS_ERR(rq)) {
 791                ret = -ENOMEM;
 792                goto err_cmd;
 793        }
 794
 795        rq->timeout = timeout ? timeout : ADMIN_TIMEOUT;
 796
 797        if (ppa_buf && ppa_len) {
 798                ppa_list = dma_pool_alloc(dev->dma_pool, GFP_KERNEL, &ppa_dma);
 799                if (!ppa_list) {
 800                        ret = -ENOMEM;
 801                        goto err_rq;
 802                }
 803                if (copy_from_user(ppa_list, (void __user *)ppa_buf,
 804                                                sizeof(u64) * (ppa_len + 1))) {
 805                        ret = -EFAULT;
 806                        goto err_ppa;
 807                }
 808                vcmd->ph_rw.spba = cpu_to_le64(ppa_dma);
 809        } else {
 810                vcmd->ph_rw.spba = cpu_to_le64((uintptr_t)ppa_buf);
 811        }
 812
 813        if (ubuf && bufflen) {
 814                ret = blk_rq_map_user(q, rq, NULL, ubuf, bufflen, GFP_KERNEL);
 815                if (ret)
 816                        goto err_ppa;
 817                bio = rq->bio;
 818
 819                if (meta_buf && meta_len) {
 820                        metadata = dma_pool_alloc(dev->dma_pool, GFP_KERNEL,
 821                                                                &metadata_dma);
 822                        if (!metadata) {
 823                                ret = -ENOMEM;
 824                                goto err_map;
 825                        }
 826
 827                        if (write) {
 828                                if (copy_from_user(metadata,
 829                                                (void __user *)meta_buf,
 830                                                meta_len)) {
 831                                        ret = -EFAULT;
 832                                        goto err_meta;
 833                                }
 834                        }
 835                        vcmd->ph_rw.metadata = cpu_to_le64(metadata_dma);
 836                }
 837
 838                bio->bi_disk = disk;
 839        }
 840
 841        blk_execute_rq(q, NULL, rq, 0);
 842
 843        if (nvme_req(rq)->flags & NVME_REQ_CANCELLED)
 844                ret = -EINTR;
 845        else if (nvme_req(rq)->status & 0x7ff)
 846                ret = -EIO;
 847        if (result)
 848                *result = nvme_req(rq)->status & 0x7ff;
 849        if (status)
 850                *status = le64_to_cpu(nvme_req(rq)->result.u64);
 851
 852        if (metadata && !ret && !write) {
 853                if (copy_to_user(meta_buf, (void *)metadata, meta_len))
 854                        ret = -EFAULT;
 855        }
 856err_meta:
 857        if (meta_buf && meta_len)
 858                dma_pool_free(dev->dma_pool, metadata, metadata_dma);
 859err_map:
 860        if (bio)
 861                blk_rq_unmap_user(bio);
 862err_ppa:
 863        if (ppa_buf && ppa_len)
 864                dma_pool_free(dev->dma_pool, ppa_list, ppa_dma);
 865err_rq:
 866        blk_mq_free_request(rq);
 867err_cmd:
 868        return ret;
 869}
 870
 871static int nvme_nvm_submit_vio(struct nvme_ns *ns,
 872                                        struct nvm_user_vio __user *uvio)
 873{
 874        struct nvm_user_vio vio;
 875        struct nvme_nvm_command c;
 876        unsigned int length;
 877        int ret;
 878
 879        if (copy_from_user(&vio, uvio, sizeof(vio)))
 880                return -EFAULT;
 881        if (vio.flags)
 882                return -EINVAL;
 883
 884        memset(&c, 0, sizeof(c));
 885        c.ph_rw.opcode = vio.opcode;
 886        c.ph_rw.nsid = cpu_to_le32(ns->head->ns_id);
 887        c.ph_rw.control = cpu_to_le16(vio.control);
 888        c.ph_rw.length = cpu_to_le16(vio.nppas);
 889
 890        length = (vio.nppas + 1) << ns->lba_shift;
 891
 892        ret = nvme_nvm_submit_user_cmd(ns->queue, ns, &c,
 893                        (void __user *)(uintptr_t)vio.addr, length,
 894                        (void __user *)(uintptr_t)vio.metadata,
 895                                                        vio.metadata_len,
 896                        (void __user *)(uintptr_t)vio.ppa_list, vio.nppas,
 897                        &vio.result, &vio.status, 0);
 898
 899        if (ret && copy_to_user(uvio, &vio, sizeof(vio)))
 900                return -EFAULT;
 901
 902        return ret;
 903}
 904
 905static int nvme_nvm_user_vcmd(struct nvme_ns *ns, int admin,
 906                                        struct nvm_passthru_vio __user *uvcmd)
 907{
 908        struct nvm_passthru_vio vcmd;
 909        struct nvme_nvm_command c;
 910        struct request_queue *q;
 911        unsigned int timeout = 0;
 912        int ret;
 913
 914        if (copy_from_user(&vcmd, uvcmd, sizeof(vcmd)))
 915                return -EFAULT;
 916        if ((vcmd.opcode != 0xF2) && (!capable(CAP_SYS_ADMIN)))
 917                return -EACCES;
 918        if (vcmd.flags)
 919                return -EINVAL;
 920
 921        memset(&c, 0, sizeof(c));
 922        c.common.opcode = vcmd.opcode;
 923        c.common.nsid = cpu_to_le32(ns->head->ns_id);
 924        c.common.cdw2[0] = cpu_to_le32(vcmd.cdw2);
 925        c.common.cdw2[1] = cpu_to_le32(vcmd.cdw3);
 926        /* cdw11-12 */
 927        c.ph_rw.length = cpu_to_le16(vcmd.nppas);
 928        c.ph_rw.control  = cpu_to_le16(vcmd.control);
 929        c.common.cdw10[3] = cpu_to_le32(vcmd.cdw13);
 930        c.common.cdw10[4] = cpu_to_le32(vcmd.cdw14);
 931        c.common.cdw10[5] = cpu_to_le32(vcmd.cdw15);
 932
 933        if (vcmd.timeout_ms)
 934                timeout = msecs_to_jiffies(vcmd.timeout_ms);
 935
 936        q = admin ? ns->ctrl->admin_q : ns->queue;
 937
 938        ret = nvme_nvm_submit_user_cmd(q, ns,
 939                        (struct nvme_nvm_command *)&c,
 940                        (void __user *)(uintptr_t)vcmd.addr, vcmd.data_len,
 941                        (void __user *)(uintptr_t)vcmd.metadata,
 942                                                        vcmd.metadata_len,
 943                        (void __user *)(uintptr_t)vcmd.ppa_list, vcmd.nppas,
 944                        &vcmd.result, &vcmd.status, timeout);
 945
 946        if (ret && copy_to_user(uvcmd, &vcmd, sizeof(vcmd)))
 947                return -EFAULT;
 948
 949        return ret;
 950}
 951
 952int nvme_nvm_ioctl(struct nvme_ns *ns, unsigned int cmd, unsigned long arg)
 953{
 954        switch (cmd) {
 955        case NVME_NVM_IOCTL_ADMIN_VIO:
 956                return nvme_nvm_user_vcmd(ns, 1, (void __user *)arg);
 957        case NVME_NVM_IOCTL_IO_VIO:
 958                return nvme_nvm_user_vcmd(ns, 0, (void __user *)arg);
 959        case NVME_NVM_IOCTL_SUBMIT_VIO:
 960                return nvme_nvm_submit_vio(ns, (void __user *)arg);
 961        default:
 962                return -ENOTTY;
 963        }
 964}
 965
 966void nvme_nvm_update_nvm_info(struct nvme_ns *ns)
 967{
 968        struct nvm_dev *ndev = ns->ndev;
 969        struct nvm_geo *geo = &ndev->geo;
 970
 971        geo->csecs = 1 << ns->lba_shift;
 972        geo->sos = ns->ms;
 973}
 974
 975int nvme_nvm_register(struct nvme_ns *ns, char *disk_name, int node)
 976{
 977        struct request_queue *q = ns->queue;
 978        struct nvm_dev *dev;
 979
 980        _nvme_nvm_check_size();
 981
 982        dev = nvm_alloc_dev(node);
 983        if (!dev)
 984                return -ENOMEM;
 985
 986        dev->q = q;
 987        memcpy(dev->name, disk_name, DISK_NAME_LEN);
 988        dev->ops = &nvme_nvm_dev_ops;
 989        dev->private_data = ns;
 990        ns->ndev = dev;
 991
 992        return nvm_register(dev);
 993}
 994
 995void nvme_nvm_unregister(struct nvme_ns *ns)
 996{
 997        nvm_unregister(ns->ndev);
 998}
 999
1000static ssize_t nvm_dev_attr_show(struct device *dev,
1001                struct device_attribute *dattr, char *page)
1002{
1003        struct nvme_ns *ns = nvme_get_ns_from_dev(dev);
1004        struct nvm_dev *ndev = ns->ndev;
1005        struct nvm_geo *geo = &ndev->geo;
1006        struct attribute *attr;
1007
1008        if (!ndev)
1009                return 0;
1010
1011        attr = &dattr->attr;
1012
1013        if (strcmp(attr->name, "version") == 0) {
1014                if (geo->major_ver_id == 1)
1015                        return scnprintf(page, PAGE_SIZE, "%u\n",
1016                                                geo->major_ver_id);
1017                else
1018                        return scnprintf(page, PAGE_SIZE, "%u.%u\n",
1019                                                geo->major_ver_id,
1020                                                geo->minor_ver_id);
1021        } else if (strcmp(attr->name, "capabilities") == 0) {
1022                return scnprintf(page, PAGE_SIZE, "%u\n", geo->cap);
1023        } else if (strcmp(attr->name, "read_typ") == 0) {
1024                return scnprintf(page, PAGE_SIZE, "%u\n", geo->trdt);
1025        } else if (strcmp(attr->name, "read_max") == 0) {
1026                return scnprintf(page, PAGE_SIZE, "%u\n", geo->trdm);
1027        } else {
1028                return scnprintf(page,
1029                                 PAGE_SIZE,
1030                                 "Unhandled attr(%s) in `%s`\n",
1031                                 attr->name, __func__);
1032        }
1033}
1034
1035static ssize_t nvm_dev_attr_show_ppaf(struct nvm_addrf_12 *ppaf, char *page)
1036{
1037        return scnprintf(page, PAGE_SIZE,
1038                "0x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x\n",
1039                                ppaf->ch_offset, ppaf->ch_len,
1040                                ppaf->lun_offset, ppaf->lun_len,
1041                                ppaf->pln_offset, ppaf->pln_len,
1042                                ppaf->blk_offset, ppaf->blk_len,
1043                                ppaf->pg_offset, ppaf->pg_len,
1044                                ppaf->sec_offset, ppaf->sec_len);
1045}
1046
1047static ssize_t nvm_dev_attr_show_12(struct device *dev,
1048                struct device_attribute *dattr, char *page)
1049{
1050        struct nvme_ns *ns = nvme_get_ns_from_dev(dev);
1051        struct nvm_dev *ndev = ns->ndev;
1052        struct nvm_geo *geo = &ndev->geo;
1053        struct attribute *attr;
1054
1055        if (!ndev)
1056                return 0;
1057
1058        attr = &dattr->attr;
1059
1060        if (strcmp(attr->name, "vendor_opcode") == 0) {
1061                return scnprintf(page, PAGE_SIZE, "%u\n", geo->vmnt);
1062        } else if (strcmp(attr->name, "device_mode") == 0) {
1063                return scnprintf(page, PAGE_SIZE, "%u\n", geo->dom);
1064        /* kept for compatibility */
1065        } else if (strcmp(attr->name, "media_manager") == 0) {
1066                return scnprintf(page, PAGE_SIZE, "%s\n", "gennvm");
1067        } else if (strcmp(attr->name, "ppa_format") == 0) {
1068                return nvm_dev_attr_show_ppaf((void *)&geo->addrf, page);
1069        } else if (strcmp(attr->name, "media_type") == 0) {     /* u8 */
1070                return scnprintf(page, PAGE_SIZE, "%u\n", geo->mtype);
1071        } else if (strcmp(attr->name, "flash_media_type") == 0) {
1072                return scnprintf(page, PAGE_SIZE, "%u\n", geo->fmtype);
1073        } else if (strcmp(attr->name, "num_channels") == 0) {
1074                return scnprintf(page, PAGE_SIZE, "%u\n", geo->num_ch);
1075        } else if (strcmp(attr->name, "num_luns") == 0) {
1076                return scnprintf(page, PAGE_SIZE, "%u\n", geo->num_lun);
1077        } else if (strcmp(attr->name, "num_planes") == 0) {
1078                return scnprintf(page, PAGE_SIZE, "%u\n", geo->num_pln);
1079        } else if (strcmp(attr->name, "num_blocks") == 0) {     /* u16 */
1080                return scnprintf(page, PAGE_SIZE, "%u\n", geo->num_chk);
1081        } else if (strcmp(attr->name, "num_pages") == 0) {
1082                return scnprintf(page, PAGE_SIZE, "%u\n", geo->num_pg);
1083        } else if (strcmp(attr->name, "page_size") == 0) {
1084                return scnprintf(page, PAGE_SIZE, "%u\n", geo->fpg_sz);
1085        } else if (strcmp(attr->name, "hw_sector_size") == 0) {
1086                return scnprintf(page, PAGE_SIZE, "%u\n", geo->csecs);
1087        } else if (strcmp(attr->name, "oob_sector_size") == 0) {/* u32 */
1088                return scnprintf(page, PAGE_SIZE, "%u\n", geo->sos);
1089        } else if (strcmp(attr->name, "prog_typ") == 0) {
1090                return scnprintf(page, PAGE_SIZE, "%u\n", geo->tprt);
1091        } else if (strcmp(attr->name, "prog_max") == 0) {
1092                return scnprintf(page, PAGE_SIZE, "%u\n", geo->tprm);
1093        } else if (strcmp(attr->name, "erase_typ") == 0) {
1094                return scnprintf(page, PAGE_SIZE, "%u\n", geo->tbet);
1095        } else if (strcmp(attr->name, "erase_max") == 0) {
1096                return scnprintf(page, PAGE_SIZE, "%u\n", geo->tbem);
1097        } else if (strcmp(attr->name, "multiplane_modes") == 0) {
1098                return scnprintf(page, PAGE_SIZE, "0x%08x\n", geo->mpos);
1099        } else if (strcmp(attr->name, "media_capabilities") == 0) {
1100                return scnprintf(page, PAGE_SIZE, "0x%08x\n", geo->mccap);
1101        } else if (strcmp(attr->name, "max_phys_secs") == 0) {
1102                return scnprintf(page, PAGE_SIZE, "%u\n", NVM_MAX_VLBA);
1103        } else {
1104                return scnprintf(page, PAGE_SIZE,
1105                        "Unhandled attr(%s) in `%s`\n",
1106                        attr->name, __func__);
1107        }
1108}
1109
1110static ssize_t nvm_dev_attr_show_20(struct device *dev,
1111                struct device_attribute *dattr, char *page)
1112{
1113        struct nvme_ns *ns = nvme_get_ns_from_dev(dev);
1114        struct nvm_dev *ndev = ns->ndev;
1115        struct nvm_geo *geo = &ndev->geo;
1116        struct attribute *attr;
1117
1118        if (!ndev)
1119                return 0;
1120
1121        attr = &dattr->attr;
1122
1123        if (strcmp(attr->name, "groups") == 0) {
1124                return scnprintf(page, PAGE_SIZE, "%u\n", geo->num_ch);
1125        } else if (strcmp(attr->name, "punits") == 0) {
1126                return scnprintf(page, PAGE_SIZE, "%u\n", geo->num_lun);
1127        } else if (strcmp(attr->name, "chunks") == 0) {
1128                return scnprintf(page, PAGE_SIZE, "%u\n", geo->num_chk);
1129        } else if (strcmp(attr->name, "clba") == 0) {
1130                return scnprintf(page, PAGE_SIZE, "%u\n", geo->clba);
1131        } else if (strcmp(attr->name, "ws_min") == 0) {
1132                return scnprintf(page, PAGE_SIZE, "%u\n", geo->ws_min);
1133        } else if (strcmp(attr->name, "ws_opt") == 0) {
1134                return scnprintf(page, PAGE_SIZE, "%u\n", geo->ws_opt);
1135        } else if (strcmp(attr->name, "maxoc") == 0) {
1136                return scnprintf(page, PAGE_SIZE, "%u\n", geo->maxoc);
1137        } else if (strcmp(attr->name, "maxocpu") == 0) {
1138                return scnprintf(page, PAGE_SIZE, "%u\n", geo->maxocpu);
1139        } else if (strcmp(attr->name, "mw_cunits") == 0) {
1140                return scnprintf(page, PAGE_SIZE, "%u\n", geo->mw_cunits);
1141        } else if (strcmp(attr->name, "write_typ") == 0) {
1142                return scnprintf(page, PAGE_SIZE, "%u\n", geo->tprt);
1143        } else if (strcmp(attr->name, "write_max") == 0) {
1144                return scnprintf(page, PAGE_SIZE, "%u\n", geo->tprm);
1145        } else if (strcmp(attr->name, "reset_typ") == 0) {
1146                return scnprintf(page, PAGE_SIZE, "%u\n", geo->tbet);
1147        } else if (strcmp(attr->name, "reset_max") == 0) {
1148                return scnprintf(page, PAGE_SIZE, "%u\n", geo->tbem);
1149        } else {
1150                return scnprintf(page, PAGE_SIZE,
1151                        "Unhandled attr(%s) in `%s`\n",
1152                        attr->name, __func__);
1153        }
1154}
1155
1156#define NVM_DEV_ATTR_RO(_name)                                  \
1157        DEVICE_ATTR(_name, S_IRUGO, nvm_dev_attr_show, NULL)
1158#define NVM_DEV_ATTR_12_RO(_name)                                       \
1159        DEVICE_ATTR(_name, S_IRUGO, nvm_dev_attr_show_12, NULL)
1160#define NVM_DEV_ATTR_20_RO(_name)                                       \
1161        DEVICE_ATTR(_name, S_IRUGO, nvm_dev_attr_show_20, NULL)
1162
1163/* general attributes */
1164static NVM_DEV_ATTR_RO(version);
1165static NVM_DEV_ATTR_RO(capabilities);
1166
1167static NVM_DEV_ATTR_RO(read_typ);
1168static NVM_DEV_ATTR_RO(read_max);
1169
1170/* 1.2 values */
1171static NVM_DEV_ATTR_12_RO(vendor_opcode);
1172static NVM_DEV_ATTR_12_RO(device_mode);
1173static NVM_DEV_ATTR_12_RO(ppa_format);
1174static NVM_DEV_ATTR_12_RO(media_manager);
1175static NVM_DEV_ATTR_12_RO(media_type);
1176static NVM_DEV_ATTR_12_RO(flash_media_type);
1177static NVM_DEV_ATTR_12_RO(num_channels);
1178static NVM_DEV_ATTR_12_RO(num_luns);
1179static NVM_DEV_ATTR_12_RO(num_planes);
1180static NVM_DEV_ATTR_12_RO(num_blocks);
1181static NVM_DEV_ATTR_12_RO(num_pages);
1182static NVM_DEV_ATTR_12_RO(page_size);
1183static NVM_DEV_ATTR_12_RO(hw_sector_size);
1184static NVM_DEV_ATTR_12_RO(oob_sector_size);
1185static NVM_DEV_ATTR_12_RO(prog_typ);
1186static NVM_DEV_ATTR_12_RO(prog_max);
1187static NVM_DEV_ATTR_12_RO(erase_typ);
1188static NVM_DEV_ATTR_12_RO(erase_max);
1189static NVM_DEV_ATTR_12_RO(multiplane_modes);
1190static NVM_DEV_ATTR_12_RO(media_capabilities);
1191static NVM_DEV_ATTR_12_RO(max_phys_secs);
1192
1193static struct attribute *nvm_dev_attrs_12[] = {
1194        &dev_attr_version.attr,
1195        &dev_attr_capabilities.attr,
1196
1197        &dev_attr_vendor_opcode.attr,
1198        &dev_attr_device_mode.attr,
1199        &dev_attr_media_manager.attr,
1200        &dev_attr_ppa_format.attr,
1201        &dev_attr_media_type.attr,
1202        &dev_attr_flash_media_type.attr,
1203        &dev_attr_num_channels.attr,
1204        &dev_attr_num_luns.attr,
1205        &dev_attr_num_planes.attr,
1206        &dev_attr_num_blocks.attr,
1207        &dev_attr_num_pages.attr,
1208        &dev_attr_page_size.attr,
1209        &dev_attr_hw_sector_size.attr,
1210        &dev_attr_oob_sector_size.attr,
1211        &dev_attr_read_typ.attr,
1212        &dev_attr_read_max.attr,
1213        &dev_attr_prog_typ.attr,
1214        &dev_attr_prog_max.attr,
1215        &dev_attr_erase_typ.attr,
1216        &dev_attr_erase_max.attr,
1217        &dev_attr_multiplane_modes.attr,
1218        &dev_attr_media_capabilities.attr,
1219        &dev_attr_max_phys_secs.attr,
1220
1221        NULL,
1222};
1223
1224static const struct attribute_group nvm_dev_attr_group_12 = {
1225        .name           = "lightnvm",
1226        .attrs          = nvm_dev_attrs_12,
1227};
1228
1229/* 2.0 values */
1230static NVM_DEV_ATTR_20_RO(groups);
1231static NVM_DEV_ATTR_20_RO(punits);
1232static NVM_DEV_ATTR_20_RO(chunks);
1233static NVM_DEV_ATTR_20_RO(clba);
1234static NVM_DEV_ATTR_20_RO(ws_min);
1235static NVM_DEV_ATTR_20_RO(ws_opt);
1236static NVM_DEV_ATTR_20_RO(maxoc);
1237static NVM_DEV_ATTR_20_RO(maxocpu);
1238static NVM_DEV_ATTR_20_RO(mw_cunits);
1239static NVM_DEV_ATTR_20_RO(write_typ);
1240static NVM_DEV_ATTR_20_RO(write_max);
1241static NVM_DEV_ATTR_20_RO(reset_typ);
1242static NVM_DEV_ATTR_20_RO(reset_max);
1243
1244static struct attribute *nvm_dev_attrs_20[] = {
1245        &dev_attr_version.attr,
1246        &dev_attr_capabilities.attr,
1247
1248        &dev_attr_groups.attr,
1249        &dev_attr_punits.attr,
1250        &dev_attr_chunks.attr,
1251        &dev_attr_clba.attr,
1252        &dev_attr_ws_min.attr,
1253        &dev_attr_ws_opt.attr,
1254        &dev_attr_maxoc.attr,
1255        &dev_attr_maxocpu.attr,
1256        &dev_attr_mw_cunits.attr,
1257
1258        &dev_attr_read_typ.attr,
1259        &dev_attr_read_max.attr,
1260        &dev_attr_write_typ.attr,
1261        &dev_attr_write_max.attr,
1262        &dev_attr_reset_typ.attr,
1263        &dev_attr_reset_max.attr,
1264
1265        NULL,
1266};
1267
1268static const struct attribute_group nvm_dev_attr_group_20 = {
1269        .name           = "lightnvm",
1270        .attrs          = nvm_dev_attrs_20,
1271};
1272
1273int nvme_nvm_register_sysfs(struct nvme_ns *ns)
1274{
1275        struct nvm_dev *ndev = ns->ndev;
1276        struct nvm_geo *geo = &ndev->geo;
1277
1278        if (!ndev)
1279                return -EINVAL;
1280
1281        switch (geo->major_ver_id) {
1282        case 1:
1283                return sysfs_create_group(&disk_to_dev(ns->disk)->kobj,
1284                                        &nvm_dev_attr_group_12);
1285        case 2:
1286                return sysfs_create_group(&disk_to_dev(ns->disk)->kobj,
1287                                        &nvm_dev_attr_group_20);
1288        }
1289
1290        return -EINVAL;
1291}
1292
1293void nvme_nvm_unregister_sysfs(struct nvme_ns *ns)
1294{
1295        struct nvm_dev *ndev = ns->ndev;
1296        struct nvm_geo *geo = &ndev->geo;
1297
1298        switch (geo->major_ver_id) {
1299        case 1:
1300                sysfs_remove_group(&disk_to_dev(ns->disk)->kobj,
1301                                        &nvm_dev_attr_group_12);
1302                break;
1303        case 2:
1304                sysfs_remove_group(&disk_to_dev(ns->disk)->kobj,
1305                                        &nvm_dev_attr_group_20);
1306                break;
1307        }
1308}
1309