linux/drivers/nvme/host/lightnvm.c
<<
>>
Prefs
   1/*
   2 * nvme-lightnvm.c - LightNVM NVMe device
   3 *
   4 * Copyright (C) 2014-2015 IT University of Copenhagen
   5 * Initial release: Matias Bjorling <mb@lightnvm.io>
   6 *
   7 * This program is free software; you can redistribute it and/or
   8 * modify it under the terms of the GNU General Public License version
   9 * 2 as published by the Free Software Foundation.
  10 *
  11 * This program is distributed in the hope that it will be useful, but
  12 * WITHOUT ANY WARRANTY; without even the implied warranty of
  13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  14 * General Public License for more details.
  15 *
  16 * You should have received a copy of the GNU General Public License
  17 * along with this program; see the file COPYING.  If not, write to
  18 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
  19 * USA.
  20 *
  21 */
  22
  23#include "nvme.h"
  24
  25#include <linux/nvme.h>
  26#include <linux/bitops.h>
  27#include <linux/lightnvm.h>
  28#include <linux/vmalloc.h>
  29
  30enum nvme_nvm_admin_opcode {
  31        nvme_nvm_admin_identity         = 0xe2,
  32        nvme_nvm_admin_get_l2p_tbl      = 0xea,
  33        nvme_nvm_admin_get_bb_tbl       = 0xf2,
  34        nvme_nvm_admin_set_bb_tbl       = 0xf1,
  35};
  36
  37struct nvme_nvm_hb_rw {
  38        __u8                    opcode;
  39        __u8                    flags;
  40        __u16                   command_id;
  41        __le32                  nsid;
  42        __u64                   rsvd2;
  43        __le64                  metadata;
  44        __le64                  prp1;
  45        __le64                  prp2;
  46        __le64                  spba;
  47        __le16                  length;
  48        __le16                  control;
  49        __le32                  dsmgmt;
  50        __le64                  slba;
  51};
  52
  53struct nvme_nvm_ph_rw {
  54        __u8                    opcode;
  55        __u8                    flags;
  56        __u16                   command_id;
  57        __le32                  nsid;
  58        __u64                   rsvd2;
  59        __le64                  metadata;
  60        __le64                  prp1;
  61        __le64                  prp2;
  62        __le64                  spba;
  63        __le16                  length;
  64        __le16                  control;
  65        __le32                  dsmgmt;
  66        __le64                  resv;
  67};
  68
  69struct nvme_nvm_identity {
  70        __u8                    opcode;
  71        __u8                    flags;
  72        __u16                   command_id;
  73        __le32                  nsid;
  74        __u64                   rsvd[2];
  75        __le64                  prp1;
  76        __le64                  prp2;
  77        __le32                  chnl_off;
  78        __u32                   rsvd11[5];
  79};
  80
  81struct nvme_nvm_l2ptbl {
  82        __u8                    opcode;
  83        __u8                    flags;
  84        __u16                   command_id;
  85        __le32                  nsid;
  86        __le32                  cdw2[4];
  87        __le64                  prp1;
  88        __le64                  prp2;
  89        __le64                  slba;
  90        __le32                  nlb;
  91        __le16                  cdw14[6];
  92};
  93
  94struct nvme_nvm_getbbtbl {
  95        __u8                    opcode;
  96        __u8                    flags;
  97        __u16                   command_id;
  98        __le32                  nsid;
  99        __u64                   rsvd[2];
 100        __le64                  prp1;
 101        __le64                  prp2;
 102        __le64                  spba;
 103        __u32                   rsvd4[4];
 104};
 105
 106struct nvme_nvm_setbbtbl {
 107        __u8                    opcode;
 108        __u8                    flags;
 109        __u16                   command_id;
 110        __le32                  nsid;
 111        __le64                  rsvd[2];
 112        __le64                  prp1;
 113        __le64                  prp2;
 114        __le64                  spba;
 115        __le16                  nlb;
 116        __u8                    value;
 117        __u8                    rsvd3;
 118        __u32                   rsvd4[3];
 119};
 120
 121struct nvme_nvm_erase_blk {
 122        __u8                    opcode;
 123        __u8                    flags;
 124        __u16                   command_id;
 125        __le32                  nsid;
 126        __u64                   rsvd[2];
 127        __le64                  prp1;
 128        __le64                  prp2;
 129        __le64                  spba;
 130        __le16                  length;
 131        __le16                  control;
 132        __le32                  dsmgmt;
 133        __le64                  resv;
 134};
 135
 136struct nvme_nvm_command {
 137        union {
 138                struct nvme_common_command common;
 139                struct nvme_nvm_identity identity;
 140                struct nvme_nvm_hb_rw hb_rw;
 141                struct nvme_nvm_ph_rw ph_rw;
 142                struct nvme_nvm_l2ptbl l2p;
 143                struct nvme_nvm_getbbtbl get_bb;
 144                struct nvme_nvm_setbbtbl set_bb;
 145                struct nvme_nvm_erase_blk erase;
 146        };
 147};
 148
 149struct nvme_nvm_completion {
 150        __le64  result;         /* Used by LightNVM to return ppa completions */
 151        __le16  sq_head;        /* how much of this queue may be reclaimed */
 152        __le16  sq_id;          /* submission queue that generated this entry */
 153        __u16   command_id;     /* of the command which completed */
 154        __le16  status;         /* did the command fail, and if so, why? */
 155};
 156
 157#define NVME_NVM_LP_MLC_PAIRS 886
 158struct nvme_nvm_lp_mlc {
 159        __le16                  num_pairs;
 160        __u8                    pairs[NVME_NVM_LP_MLC_PAIRS];
 161};
 162
 163struct nvme_nvm_lp_tbl {
 164        __u8                    id[8];
 165        struct nvme_nvm_lp_mlc  mlc;
 166};
 167
 168struct nvme_nvm_id_group {
 169        __u8                    mtype;
 170        __u8                    fmtype;
 171        __le16                  res16;
 172        __u8                    num_ch;
 173        __u8                    num_lun;
 174        __u8                    num_pln;
 175        __u8                    rsvd1;
 176        __le16                  num_blk;
 177        __le16                  num_pg;
 178        __le16                  fpg_sz;
 179        __le16                  csecs;
 180        __le16                  sos;
 181        __le16                  rsvd2;
 182        __le32                  trdt;
 183        __le32                  trdm;
 184        __le32                  tprt;
 185        __le32                  tprm;
 186        __le32                  tbet;
 187        __le32                  tbem;
 188        __le32                  mpos;
 189        __le32                  mccap;
 190        __le16                  cpar;
 191        __u8                    reserved[10];
 192        struct nvme_nvm_lp_tbl lptbl;
 193} __packed;
 194
 195struct nvme_nvm_addr_format {
 196        __u8                    ch_offset;
 197        __u8                    ch_len;
 198        __u8                    lun_offset;
 199        __u8                    lun_len;
 200        __u8                    pln_offset;
 201        __u8                    pln_len;
 202        __u8                    blk_offset;
 203        __u8                    blk_len;
 204        __u8                    pg_offset;
 205        __u8                    pg_len;
 206        __u8                    sect_offset;
 207        __u8                    sect_len;
 208        __u8                    res[4];
 209} __packed;
 210
 211struct nvme_nvm_id {
 212        __u8                    ver_id;
 213        __u8                    vmnt;
 214        __u8                    cgrps;
 215        __u8                    res;
 216        __le32                  cap;
 217        __le32                  dom;
 218        struct nvme_nvm_addr_format ppaf;
 219        __u8                    resv[228];
 220        struct nvme_nvm_id_group groups[4];
 221} __packed;
 222
 223struct nvme_nvm_bb_tbl {
 224        __u8    tblid[4];
 225        __le16  verid;
 226        __le16  revid;
 227        __le32  rvsd1;
 228        __le32  tblks;
 229        __le32  tfact;
 230        __le32  tgrown;
 231        __le32  tdresv;
 232        __le32  thresv;
 233        __le32  rsvd2[8];
 234        __u8    blk[0];
 235};
 236
 237/*
 238 * Check we didn't inadvertently grow the command struct
 239 */
 240static inline void _nvme_nvm_check_size(void)
 241{
 242        BUILD_BUG_ON(sizeof(struct nvme_nvm_identity) != 64);
 243        BUILD_BUG_ON(sizeof(struct nvme_nvm_hb_rw) != 64);
 244        BUILD_BUG_ON(sizeof(struct nvme_nvm_ph_rw) != 64);
 245        BUILD_BUG_ON(sizeof(struct nvme_nvm_getbbtbl) != 64);
 246        BUILD_BUG_ON(sizeof(struct nvme_nvm_setbbtbl) != 64);
 247        BUILD_BUG_ON(sizeof(struct nvme_nvm_l2ptbl) != 64);
 248        BUILD_BUG_ON(sizeof(struct nvme_nvm_erase_blk) != 64);
 249        BUILD_BUG_ON(sizeof(struct nvme_nvm_id_group) != 960);
 250        BUILD_BUG_ON(sizeof(struct nvme_nvm_addr_format) != 128);
 251        BUILD_BUG_ON(sizeof(struct nvme_nvm_id) != 4096);
 252        BUILD_BUG_ON(sizeof(struct nvme_nvm_bb_tbl) != 512);
 253}
 254
 255static int init_grps(struct nvm_id *nvm_id, struct nvme_nvm_id *nvme_nvm_id)
 256{
 257        struct nvme_nvm_id_group *src;
 258        struct nvm_id_group *dst;
 259        int i, end;
 260
 261        end = min_t(u32, 4, nvm_id->cgrps);
 262
 263        for (i = 0; i < end; i++) {
 264                src = &nvme_nvm_id->groups[i];
 265                dst = &nvm_id->groups[i];
 266
 267                dst->mtype = src->mtype;
 268                dst->fmtype = src->fmtype;
 269                dst->num_ch = src->num_ch;
 270                dst->num_lun = src->num_lun;
 271                dst->num_pln = src->num_pln;
 272
 273                dst->num_pg = le16_to_cpu(src->num_pg);
 274                dst->num_blk = le16_to_cpu(src->num_blk);
 275                dst->fpg_sz = le16_to_cpu(src->fpg_sz);
 276                dst->csecs = le16_to_cpu(src->csecs);
 277                dst->sos = le16_to_cpu(src->sos);
 278
 279                dst->trdt = le32_to_cpu(src->trdt);
 280                dst->trdm = le32_to_cpu(src->trdm);
 281                dst->tprt = le32_to_cpu(src->tprt);
 282                dst->tprm = le32_to_cpu(src->tprm);
 283                dst->tbet = le32_to_cpu(src->tbet);
 284                dst->tbem = le32_to_cpu(src->tbem);
 285                dst->mpos = le32_to_cpu(src->mpos);
 286                dst->mccap = le32_to_cpu(src->mccap);
 287
 288                dst->cpar = le16_to_cpu(src->cpar);
 289
 290                if (dst->fmtype == NVM_ID_FMTYPE_MLC) {
 291                        memcpy(dst->lptbl.id, src->lptbl.id, 8);
 292                        dst->lptbl.mlc.num_pairs =
 293                                        le16_to_cpu(src->lptbl.mlc.num_pairs);
 294
 295                        if (dst->lptbl.mlc.num_pairs > NVME_NVM_LP_MLC_PAIRS) {
 296                                pr_err("nvm: number of MLC pairs not supported\n");
 297                                return -EINVAL;
 298                        }
 299
 300                        memcpy(dst->lptbl.mlc.pairs, src->lptbl.mlc.pairs,
 301                                                dst->lptbl.mlc.num_pairs);
 302                }
 303        }
 304
 305        return 0;
 306}
 307
 308static int nvme_nvm_identity(struct nvm_dev *nvmdev, struct nvm_id *nvm_id)
 309{
 310        struct nvme_ns *ns = nvmdev->q->queuedata;
 311        struct nvme_nvm_id *nvme_nvm_id;
 312        struct nvme_nvm_command c = {};
 313        int ret;
 314
 315        c.identity.opcode = nvme_nvm_admin_identity;
 316        c.identity.nsid = cpu_to_le32(ns->ns_id);
 317        c.identity.chnl_off = 0;
 318
 319        nvme_nvm_id = kmalloc(sizeof(struct nvme_nvm_id), GFP_KERNEL);
 320        if (!nvme_nvm_id)
 321                return -ENOMEM;
 322
 323        ret = nvme_submit_sync_cmd(ns->ctrl->admin_q, (struct nvme_command *)&c,
 324                                nvme_nvm_id, sizeof(struct nvme_nvm_id));
 325        if (ret) {
 326                ret = -EIO;
 327                goto out;
 328        }
 329
 330        nvm_id->ver_id = nvme_nvm_id->ver_id;
 331        nvm_id->vmnt = nvme_nvm_id->vmnt;
 332        nvm_id->cgrps = nvme_nvm_id->cgrps;
 333        nvm_id->cap = le32_to_cpu(nvme_nvm_id->cap);
 334        nvm_id->dom = le32_to_cpu(nvme_nvm_id->dom);
 335        memcpy(&nvm_id->ppaf, &nvme_nvm_id->ppaf,
 336                                        sizeof(struct nvme_nvm_addr_format));
 337
 338        ret = init_grps(nvm_id, nvme_nvm_id);
 339out:
 340        kfree(nvme_nvm_id);
 341        return ret;
 342}
 343
 344static int nvme_nvm_get_l2p_tbl(struct nvm_dev *nvmdev, u64 slba, u32 nlb,
 345                                nvm_l2p_update_fn *update_l2p, void *priv)
 346{
 347        struct nvme_ns *ns = nvmdev->q->queuedata;
 348        struct nvme_nvm_command c = {};
 349        u32 len = queue_max_hw_sectors(ns->ctrl->admin_q) << 9;
 350        u32 nlb_pr_rq = len / sizeof(u64);
 351        u64 cmd_slba = slba;
 352        void *entries;
 353        int ret = 0;
 354
 355        c.l2p.opcode = nvme_nvm_admin_get_l2p_tbl;
 356        c.l2p.nsid = cpu_to_le32(ns->ns_id);
 357        entries = kmalloc(len, GFP_KERNEL);
 358        if (!entries)
 359                return -ENOMEM;
 360
 361        while (nlb) {
 362                u32 cmd_nlb = min(nlb_pr_rq, nlb);
 363
 364                c.l2p.slba = cpu_to_le64(cmd_slba);
 365                c.l2p.nlb = cpu_to_le32(cmd_nlb);
 366
 367                ret = nvme_submit_sync_cmd(ns->ctrl->admin_q,
 368                                (struct nvme_command *)&c, entries, len);
 369                if (ret) {
 370                        dev_err(ns->ctrl->device,
 371                                "L2P table transfer failed (%d)\n", ret);
 372                        ret = -EIO;
 373                        goto out;
 374                }
 375
 376                if (update_l2p(cmd_slba, cmd_nlb, entries, priv)) {
 377                        ret = -EINTR;
 378                        goto out;
 379                }
 380
 381                cmd_slba += cmd_nlb;
 382                nlb -= cmd_nlb;
 383        }
 384
 385out:
 386        kfree(entries);
 387        return ret;
 388}
 389
 390static int nvme_nvm_get_bb_tbl(struct nvm_dev *nvmdev, struct ppa_addr ppa,
 391                                                                u8 *blks)
 392{
 393        struct request_queue *q = nvmdev->q;
 394        struct nvme_ns *ns = q->queuedata;
 395        struct nvme_ctrl *ctrl = ns->ctrl;
 396        struct nvme_nvm_command c = {};
 397        struct nvme_nvm_bb_tbl *bb_tbl;
 398        int nr_blks = nvmdev->blks_per_lun * nvmdev->plane_mode;
 399        int tblsz = sizeof(struct nvme_nvm_bb_tbl) + nr_blks;
 400        int ret = 0;
 401
 402        c.get_bb.opcode = nvme_nvm_admin_get_bb_tbl;
 403        c.get_bb.nsid = cpu_to_le32(ns->ns_id);
 404        c.get_bb.spba = cpu_to_le64(ppa.ppa);
 405
 406        bb_tbl = kzalloc(tblsz, GFP_KERNEL);
 407        if (!bb_tbl)
 408                return -ENOMEM;
 409
 410        ret = nvme_submit_sync_cmd(ctrl->admin_q, (struct nvme_command *)&c,
 411                                                                bb_tbl, tblsz);
 412        if (ret) {
 413                dev_err(ctrl->device, "get bad block table failed (%d)\n", ret);
 414                ret = -EIO;
 415                goto out;
 416        }
 417
 418        if (bb_tbl->tblid[0] != 'B' || bb_tbl->tblid[1] != 'B' ||
 419                bb_tbl->tblid[2] != 'L' || bb_tbl->tblid[3] != 'T') {
 420                dev_err(ctrl->device, "bbt format mismatch\n");
 421                ret = -EINVAL;
 422                goto out;
 423        }
 424
 425        if (le16_to_cpu(bb_tbl->verid) != 1) {
 426                ret = -EINVAL;
 427                dev_err(ctrl->device, "bbt version not supported\n");
 428                goto out;
 429        }
 430
 431        if (le32_to_cpu(bb_tbl->tblks) != nr_blks) {
 432                ret = -EINVAL;
 433                dev_err(ctrl->device,
 434                                "bbt unsuspected blocks returned (%u!=%u)",
 435                                le32_to_cpu(bb_tbl->tblks), nr_blks);
 436                goto out;
 437        }
 438
 439        memcpy(blks, bb_tbl->blk, nvmdev->blks_per_lun * nvmdev->plane_mode);
 440out:
 441        kfree(bb_tbl);
 442        return ret;
 443}
 444
 445static int nvme_nvm_set_bb_tbl(struct nvm_dev *nvmdev, struct ppa_addr *ppas,
 446                                                        int nr_ppas, int type)
 447{
 448        struct nvme_ns *ns = nvmdev->q->queuedata;
 449        struct nvme_nvm_command c = {};
 450        int ret = 0;
 451
 452        c.set_bb.opcode = nvme_nvm_admin_set_bb_tbl;
 453        c.set_bb.nsid = cpu_to_le32(ns->ns_id);
 454        c.set_bb.spba = cpu_to_le64(ppas->ppa);
 455        c.set_bb.nlb = cpu_to_le16(nr_ppas - 1);
 456        c.set_bb.value = type;
 457
 458        ret = nvme_submit_sync_cmd(ns->ctrl->admin_q, (struct nvme_command *)&c,
 459                                                                NULL, 0);
 460        if (ret)
 461                dev_err(ns->ctrl->device, "set bad block table failed (%d)\n",
 462                                                                        ret);
 463        return ret;
 464}
 465
 466static inline void nvme_nvm_rqtocmd(struct request *rq, struct nvm_rq *rqd,
 467                                struct nvme_ns *ns, struct nvme_nvm_command *c)
 468{
 469        c->ph_rw.opcode = rqd->opcode;
 470        c->ph_rw.nsid = cpu_to_le32(ns->ns_id);
 471        c->ph_rw.spba = cpu_to_le64(rqd->ppa_addr.ppa);
 472        c->ph_rw.metadata = cpu_to_le64(rqd->dma_meta_list);
 473        c->ph_rw.control = cpu_to_le16(rqd->flags);
 474        c->ph_rw.length = cpu_to_le16(rqd->nr_ppas - 1);
 475
 476        if (rqd->opcode == NVM_OP_HBWRITE || rqd->opcode == NVM_OP_HBREAD)
 477                c->hb_rw.slba = cpu_to_le64(nvme_block_nr(ns,
 478                                                rqd->bio->bi_iter.bi_sector));
 479}
 480
 481static void nvme_nvm_end_io(struct request *rq, int error)
 482{
 483        struct nvm_rq *rqd = rq->end_io_data;
 484        struct nvme_nvm_completion *cqe = rq->special;
 485
 486        if (cqe)
 487                rqd->ppa_status = le64_to_cpu(cqe->result);
 488
 489        nvm_end_io(rqd, error);
 490
 491        kfree(rq->cmd);
 492        blk_mq_free_request(rq);
 493}
 494
 495static int nvme_nvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd)
 496{
 497        struct request_queue *q = dev->q;
 498        struct nvme_ns *ns = q->queuedata;
 499        struct request *rq;
 500        struct bio *bio = rqd->bio;
 501        struct nvme_nvm_command *cmd;
 502
 503        rq = blk_mq_alloc_request(q, bio_data_dir(bio), 0);
 504        if (IS_ERR(rq))
 505                return -ENOMEM;
 506
 507        cmd = kzalloc(sizeof(struct nvme_nvm_command) +
 508                                sizeof(struct nvme_nvm_completion), GFP_KERNEL);
 509        if (!cmd) {
 510                blk_mq_free_request(rq);
 511                return -ENOMEM;
 512        }
 513
 514        rq->cmd_type = REQ_TYPE_DRV_PRIV;
 515        rq->ioprio = bio_prio(bio);
 516
 517        if (bio_has_data(bio))
 518                rq->nr_phys_segments = bio_phys_segments(q, bio);
 519
 520        rq->__data_len = bio->bi_iter.bi_size;
 521        rq->bio = rq->biotail = bio;
 522
 523        nvme_nvm_rqtocmd(rq, rqd, ns, cmd);
 524
 525        rq->cmd = (unsigned char *)cmd;
 526        rq->cmd_len = sizeof(struct nvme_nvm_command);
 527        rq->special = cmd + 1;
 528
 529        rq->end_io_data = rqd;
 530
 531        blk_execute_rq_nowait(q, NULL, rq, 0, nvme_nvm_end_io);
 532
 533        return 0;
 534}
 535
 536static int nvme_nvm_erase_block(struct nvm_dev *dev, struct nvm_rq *rqd)
 537{
 538        struct request_queue *q = dev->q;
 539        struct nvme_ns *ns = q->queuedata;
 540        struct nvme_nvm_command c = {};
 541
 542        c.erase.opcode = NVM_OP_ERASE;
 543        c.erase.nsid = cpu_to_le32(ns->ns_id);
 544        c.erase.spba = cpu_to_le64(rqd->ppa_addr.ppa);
 545        c.erase.length = cpu_to_le16(rqd->nr_ppas - 1);
 546
 547        return nvme_submit_sync_cmd(q, (struct nvme_command *)&c, NULL, 0);
 548}
 549
 550static void *nvme_nvm_create_dma_pool(struct nvm_dev *nvmdev, char *name)
 551{
 552        struct nvme_ns *ns = nvmdev->q->queuedata;
 553
 554        return dma_pool_create(name, ns->ctrl->dev, PAGE_SIZE, PAGE_SIZE, 0);
 555}
 556
 557static void nvme_nvm_destroy_dma_pool(void *pool)
 558{
 559        struct dma_pool *dma_pool = pool;
 560
 561        dma_pool_destroy(dma_pool);
 562}
 563
 564static void *nvme_nvm_dev_dma_alloc(struct nvm_dev *dev, void *pool,
 565                                    gfp_t mem_flags, dma_addr_t *dma_handler)
 566{
 567        return dma_pool_alloc(pool, mem_flags, dma_handler);
 568}
 569
 570static void nvme_nvm_dev_dma_free(void *pool, void *addr,
 571                                                        dma_addr_t dma_handler)
 572{
 573        dma_pool_free(pool, addr, dma_handler);
 574}
 575
 576static struct nvm_dev_ops nvme_nvm_dev_ops = {
 577        .identity               = nvme_nvm_identity,
 578
 579        .get_l2p_tbl            = nvme_nvm_get_l2p_tbl,
 580
 581        .get_bb_tbl             = nvme_nvm_get_bb_tbl,
 582        .set_bb_tbl             = nvme_nvm_set_bb_tbl,
 583
 584        .submit_io              = nvme_nvm_submit_io,
 585        .erase_block            = nvme_nvm_erase_block,
 586
 587        .create_dma_pool        = nvme_nvm_create_dma_pool,
 588        .destroy_dma_pool       = nvme_nvm_destroy_dma_pool,
 589        .dev_dma_alloc          = nvme_nvm_dev_dma_alloc,
 590        .dev_dma_free           = nvme_nvm_dev_dma_free,
 591
 592        .max_phys_sect          = 64,
 593};
 594
 595int nvme_nvm_register(struct request_queue *q, char *disk_name)
 596{
 597        return nvm_register(q, disk_name, &nvme_nvm_dev_ops);
 598}
 599
 600void nvme_nvm_unregister(struct request_queue *q, char *disk_name)
 601{
 602        nvm_unregister(disk_name);
 603}
 604
 605/* move to shared place when used in multiple places. */
 606#define PCI_VENDOR_ID_CNEX 0x1d1d
 607#define PCI_DEVICE_ID_CNEX_WL 0x2807
 608#define PCI_DEVICE_ID_CNEX_QEMU 0x1f1f
 609
 610int nvme_nvm_ns_supported(struct nvme_ns *ns, struct nvme_id_ns *id)
 611{
 612        struct nvme_ctrl *ctrl = ns->ctrl;
 613        /* XXX: this is poking into PCI structures from generic code! */
 614        struct pci_dev *pdev = to_pci_dev(ctrl->dev);
 615
 616        /* QEMU NVMe simulator - PCI ID + Vendor specific bit */
 617        if (pdev->vendor == PCI_VENDOR_ID_CNEX &&
 618                                pdev->device == PCI_DEVICE_ID_CNEX_QEMU &&
 619                                                        id->vs[0] == 0x1)
 620                return 1;
 621
 622        /* CNEX Labs - PCI ID + Vendor specific bit */
 623        if (pdev->vendor == PCI_VENDOR_ID_CNEX &&
 624                                pdev->device == PCI_DEVICE_ID_CNEX_WL &&
 625                                                        id->vs[0] == 0x1)
 626                return 1;
 627
 628        return 0;
 629}
 630