linux/include/linux/lightnvm.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#ifndef NVM_H
   3#define NVM_H
   4
   5#include <linux/blkdev.h>
   6#include <linux/types.h>
   7#include <uapi/linux/lightnvm.h>
   8
   9enum {
  10        NVM_IO_OK = 0,
  11        NVM_IO_REQUEUE = 1,
  12        NVM_IO_DONE = 2,
  13        NVM_IO_ERR = 3,
  14
  15        NVM_IOTYPE_NONE = 0,
  16        NVM_IOTYPE_GC = 1,
  17};
  18
  19/* common format */
  20#define NVM_GEN_CH_BITS  (8)
  21#define NVM_GEN_LUN_BITS (8)
  22#define NVM_GEN_BLK_BITS (16)
  23#define NVM_GEN_RESERVED (32)
  24
  25/* 1.2 format */
  26#define NVM_12_PG_BITS  (16)
  27#define NVM_12_PL_BITS  (4)
  28#define NVM_12_SEC_BITS (4)
  29#define NVM_12_RESERVED (8)
  30
  31/* 2.0 format */
  32#define NVM_20_SEC_BITS (24)
  33#define NVM_20_RESERVED (8)
  34
  35enum {
  36        NVM_OCSSD_SPEC_12 = 12,
  37        NVM_OCSSD_SPEC_20 = 20,
  38};
  39
  40struct ppa_addr {
  41        /* Generic structure for all addresses */
  42        union {
  43                /* generic device format */
  44                struct {
  45                        u64 ch          : NVM_GEN_CH_BITS;
  46                        u64 lun         : NVM_GEN_LUN_BITS;
  47                        u64 blk         : NVM_GEN_BLK_BITS;
  48                        u64 reserved    : NVM_GEN_RESERVED;
  49                } a;
  50
  51                /* 1.2 device format */
  52                struct {
  53                        u64 ch          : NVM_GEN_CH_BITS;
  54                        u64 lun         : NVM_GEN_LUN_BITS;
  55                        u64 blk         : NVM_GEN_BLK_BITS;
  56                        u64 pg          : NVM_12_PG_BITS;
  57                        u64 pl          : NVM_12_PL_BITS;
  58                        u64 sec         : NVM_12_SEC_BITS;
  59                        u64 reserved    : NVM_12_RESERVED;
  60                } g;
  61
  62                /* 2.0 device format */
  63                struct {
  64                        u64 grp         : NVM_GEN_CH_BITS;
  65                        u64 pu          : NVM_GEN_LUN_BITS;
  66                        u64 chk         : NVM_GEN_BLK_BITS;
  67                        u64 sec         : NVM_20_SEC_BITS;
  68                        u64 reserved    : NVM_20_RESERVED;
  69                } m;
  70
  71                struct {
  72                        u64 line        : 63;
  73                        u64 is_cached   : 1;
  74                } c;
  75
  76                u64 ppa;
  77        };
  78};
  79
  80struct nvm_rq;
  81struct nvm_id;
  82struct nvm_dev;
  83struct nvm_tgt_dev;
  84struct nvm_chk_meta;
  85
  86typedef int (nvm_id_fn)(struct nvm_dev *);
  87typedef int (nvm_op_bb_tbl_fn)(struct nvm_dev *, struct ppa_addr, u8 *);
  88typedef int (nvm_op_set_bb_fn)(struct nvm_dev *, struct ppa_addr *, int, int);
  89typedef int (nvm_get_chk_meta_fn)(struct nvm_dev *, sector_t, int,
  90                                                        struct nvm_chk_meta *);
  91typedef int (nvm_submit_io_fn)(struct nvm_dev *, struct nvm_rq *, void *);
  92typedef void *(nvm_create_dma_pool_fn)(struct nvm_dev *, char *, int);
  93typedef void (nvm_destroy_dma_pool_fn)(void *);
  94typedef void *(nvm_dev_dma_alloc_fn)(struct nvm_dev *, void *, gfp_t,
  95                                                                dma_addr_t *);
  96typedef void (nvm_dev_dma_free_fn)(void *, void*, dma_addr_t);
  97
  98struct nvm_dev_ops {
  99        nvm_id_fn               *identity;
 100        nvm_op_bb_tbl_fn        *get_bb_tbl;
 101        nvm_op_set_bb_fn        *set_bb_tbl;
 102
 103        nvm_get_chk_meta_fn     *get_chk_meta;
 104
 105        nvm_submit_io_fn        *submit_io;
 106
 107        nvm_create_dma_pool_fn  *create_dma_pool;
 108        nvm_destroy_dma_pool_fn *destroy_dma_pool;
 109        nvm_dev_dma_alloc_fn    *dev_dma_alloc;
 110        nvm_dev_dma_free_fn     *dev_dma_free;
 111};
 112
 113#ifdef CONFIG_NVM
 114
 115#include <linux/blkdev.h>
 116#include <linux/file.h>
 117#include <linux/dmapool.h>
 118#include <uapi/linux/lightnvm.h>
 119
 120enum {
 121        /* HW Responsibilities */
 122        NVM_RSP_L2P     = 1 << 0,
 123        NVM_RSP_ECC     = 1 << 1,
 124
 125        /* Physical Adressing Mode */
 126        NVM_ADDRMODE_LINEAR     = 0,
 127        NVM_ADDRMODE_CHANNEL    = 1,
 128
 129        /* Plane programming mode for LUN */
 130        NVM_PLANE_SINGLE        = 1,
 131        NVM_PLANE_DOUBLE        = 2,
 132        NVM_PLANE_QUAD          = 4,
 133
 134        /* Status codes */
 135        NVM_RSP_SUCCESS         = 0x0,
 136        NVM_RSP_NOT_CHANGEABLE  = 0x1,
 137        NVM_RSP_ERR_FAILWRITE   = 0x40ff,
 138        NVM_RSP_ERR_EMPTYPAGE   = 0x42ff,
 139        NVM_RSP_ERR_FAILECC     = 0x4281,
 140        NVM_RSP_ERR_FAILCRC     = 0x4004,
 141        NVM_RSP_WARN_HIGHECC    = 0x4700,
 142
 143        /* Device opcodes */
 144        NVM_OP_PWRITE           = 0x91,
 145        NVM_OP_PREAD            = 0x92,
 146        NVM_OP_ERASE            = 0x90,
 147
 148        /* PPA Command Flags */
 149        NVM_IO_SNGL_ACCESS      = 0x0,
 150        NVM_IO_DUAL_ACCESS      = 0x1,
 151        NVM_IO_QUAD_ACCESS      = 0x2,
 152
 153        /* NAND Access Modes */
 154        NVM_IO_SUSPEND          = 0x80,
 155        NVM_IO_SLC_MODE         = 0x100,
 156        NVM_IO_SCRAMBLE_ENABLE  = 0x200,
 157
 158        /* Block Types */
 159        NVM_BLK_T_FREE          = 0x0,
 160        NVM_BLK_T_BAD           = 0x1,
 161        NVM_BLK_T_GRWN_BAD      = 0x2,
 162        NVM_BLK_T_DEV           = 0x4,
 163        NVM_BLK_T_HOST          = 0x8,
 164
 165        /* Memory capabilities */
 166        NVM_ID_CAP_SLC          = 0x1,
 167        NVM_ID_CAP_CMD_SUSPEND  = 0x2,
 168        NVM_ID_CAP_SCRAMBLE     = 0x4,
 169        NVM_ID_CAP_ENCRYPT      = 0x8,
 170
 171        /* Memory types */
 172        NVM_ID_FMTYPE_SLC       = 0,
 173        NVM_ID_FMTYPE_MLC       = 1,
 174
 175        /* Device capabilities */
 176        NVM_ID_DCAP_BBLKMGMT    = 0x1,
 177        NVM_UD_DCAP_ECC         = 0x2,
 178};
 179
 180struct nvm_id_lp_mlc {
 181        u16     num_pairs;
 182        u8      pairs[886];
 183};
 184
 185struct nvm_id_lp_tbl {
 186        __u8    id[8];
 187        struct nvm_id_lp_mlc mlc;
 188};
 189
 190struct nvm_addrf_12 {
 191        u8      ch_len;
 192        u8      lun_len;
 193        u8      blk_len;
 194        u8      pg_len;
 195        u8      pln_len;
 196        u8      sec_len;
 197
 198        u8      ch_offset;
 199        u8      lun_offset;
 200        u8      blk_offset;
 201        u8      pg_offset;
 202        u8      pln_offset;
 203        u8      sec_offset;
 204
 205        u64     ch_mask;
 206        u64     lun_mask;
 207        u64     blk_mask;
 208        u64     pg_mask;
 209        u64     pln_mask;
 210        u64     sec_mask;
 211};
 212
 213struct nvm_addrf {
 214        u8      ch_len;
 215        u8      lun_len;
 216        u8      chk_len;
 217        u8      sec_len;
 218        u8      rsv_len[2];
 219
 220        u8      ch_offset;
 221        u8      lun_offset;
 222        u8      chk_offset;
 223        u8      sec_offset;
 224        u8      rsv_off[2];
 225
 226        u64     ch_mask;
 227        u64     lun_mask;
 228        u64     chk_mask;
 229        u64     sec_mask;
 230        u64     rsv_mask[2];
 231};
 232
 233enum {
 234        /* Chunk states */
 235        NVM_CHK_ST_FREE =       1 << 0,
 236        NVM_CHK_ST_CLOSED =     1 << 1,
 237        NVM_CHK_ST_OPEN =       1 << 2,
 238        NVM_CHK_ST_OFFLINE =    1 << 3,
 239
 240        /* Chunk types */
 241        NVM_CHK_TP_W_SEQ =      1 << 0,
 242        NVM_CHK_TP_W_RAN =      1 << 1,
 243        NVM_CHK_TP_SZ_SPEC =    1 << 4,
 244};
 245
 246/*
 247 * Note: The structure size is linked to nvme_nvm_chk_meta such that the same
 248 * buffer can be used when converting from little endian to cpu addressing.
 249 */
 250struct nvm_chk_meta {
 251        u8      state;
 252        u8      type;
 253        u8      wi;
 254        u8      rsvd[5];
 255        u64     slba;
 256        u64     cnlb;
 257        u64     wp;
 258};
 259
 260struct nvm_target {
 261        struct list_head list;
 262        struct nvm_tgt_dev *dev;
 263        struct nvm_tgt_type *type;
 264        struct gendisk *disk;
 265};
 266
 267#define ADDR_EMPTY (~0ULL)
 268
 269#define NVM_TARGET_DEFAULT_OP (101)
 270#define NVM_TARGET_MIN_OP (3)
 271#define NVM_TARGET_MAX_OP (80)
 272
 273#define NVM_VERSION_MAJOR 1
 274#define NVM_VERSION_MINOR 0
 275#define NVM_VERSION_PATCH 0
 276
 277#define NVM_MAX_VLBA (64) /* max logical blocks in a vector command */
 278
 279struct nvm_rq;
 280typedef void (nvm_end_io_fn)(struct nvm_rq *);
 281
 282struct nvm_rq {
 283        struct nvm_tgt_dev *dev;
 284
 285        struct bio *bio;
 286
 287        union {
 288                struct ppa_addr ppa_addr;
 289                dma_addr_t dma_ppa_list;
 290        };
 291
 292        struct ppa_addr *ppa_list;
 293
 294        void *meta_list;
 295        dma_addr_t dma_meta_list;
 296
 297        nvm_end_io_fn *end_io;
 298
 299        uint8_t opcode;
 300        uint16_t nr_ppas;
 301        uint16_t flags;
 302
 303        u64 ppa_status; /* ppa media status */
 304        int error;
 305
 306        int is_seq; /* Sequential hint flag. 1.2 only */
 307
 308        void *private;
 309};
 310
 311static inline struct nvm_rq *nvm_rq_from_pdu(void *pdu)
 312{
 313        return pdu - sizeof(struct nvm_rq);
 314}
 315
 316static inline void *nvm_rq_to_pdu(struct nvm_rq *rqdata)
 317{
 318        return rqdata + 1;
 319}
 320
 321static inline struct ppa_addr *nvm_rq_to_ppa_list(struct nvm_rq *rqd)
 322{
 323        return (rqd->nr_ppas > 1) ? rqd->ppa_list : &rqd->ppa_addr;
 324}
 325
 326enum {
 327        NVM_BLK_ST_FREE =       0x1,    /* Free block */
 328        NVM_BLK_ST_TGT =        0x2,    /* Block in use by target */
 329        NVM_BLK_ST_BAD =        0x8,    /* Bad block */
 330};
 331
 332/* Instance geometry */
 333struct nvm_geo {
 334        /* device reported version */
 335        u8      major_ver_id;
 336        u8      minor_ver_id;
 337
 338        /* kernel short version */
 339        u8      version;
 340
 341        /* instance specific geometry */
 342        int num_ch;
 343        int num_lun;            /* per channel */
 344
 345        /* calculated values */
 346        int all_luns;           /* across channels */
 347        int all_chunks;         /* across channels */
 348
 349        int op;                 /* over-provision in instance */
 350
 351        sector_t total_secs;    /* across channels */
 352
 353        /* chunk geometry */
 354        u32     num_chk;        /* chunks per lun */
 355        u32     clba;           /* sectors per chunk */
 356        u16     csecs;          /* sector size */
 357        u16     sos;            /* out-of-band area size */
 358        bool    ext;            /* metadata in extended data buffer */
 359        u32     mdts;           /* Max data transfer size*/
 360
 361        /* device write constrains */
 362        u32     ws_min;         /* minimum write size */
 363        u32     ws_opt;         /* optimal write size */
 364        u32     mw_cunits;      /* distance required for successful read */
 365        u32     maxoc;          /* maximum open chunks */
 366        u32     maxocpu;        /* maximum open chunks per parallel unit */
 367
 368        /* device capabilities */
 369        u32     mccap;
 370
 371        /* device timings */
 372        u32     trdt;           /* Avg. Tread (ns) */
 373        u32     trdm;           /* Max Tread (ns) */
 374        u32     tprt;           /* Avg. Tprog (ns) */
 375        u32     tprm;           /* Max Tprog (ns) */
 376        u32     tbet;           /* Avg. Terase (ns) */
 377        u32     tbem;           /* Max Terase (ns) */
 378
 379        /* generic address format */
 380        struct nvm_addrf addrf;
 381
 382        /* 1.2 compatibility */
 383        u8      vmnt;
 384        u32     cap;
 385        u32     dom;
 386
 387        u8      mtype;
 388        u8      fmtype;
 389
 390        u16     cpar;
 391        u32     mpos;
 392
 393        u8      num_pln;
 394        u8      pln_mode;
 395        u16     num_pg;
 396        u16     fpg_sz;
 397};
 398
 399/* sub-device structure */
 400struct nvm_tgt_dev {
 401        /* Device information */
 402        struct nvm_geo geo;
 403
 404        /* Base ppas for target LUNs */
 405        struct ppa_addr *luns;
 406
 407        struct request_queue *q;
 408
 409        struct nvm_dev *parent;
 410        void *map;
 411};
 412
 413struct nvm_dev {
 414        struct nvm_dev_ops *ops;
 415
 416        struct list_head devices;
 417
 418        /* Device information */
 419        struct nvm_geo geo;
 420
 421        unsigned long *lun_map;
 422        void *dma_pool;
 423
 424        /* Backend device */
 425        struct request_queue *q;
 426        char name[DISK_NAME_LEN];
 427        void *private_data;
 428
 429        struct kref ref;
 430        void *rmap;
 431
 432        struct mutex mlock;
 433        spinlock_t lock;
 434
 435        /* target management */
 436        struct list_head area_list;
 437        struct list_head targets;
 438};
 439
 440static inline struct ppa_addr generic_to_dev_addr(struct nvm_dev *dev,
 441                                                  struct ppa_addr r)
 442{
 443        struct nvm_geo *geo = &dev->geo;
 444        struct ppa_addr l;
 445
 446        if (geo->version == NVM_OCSSD_SPEC_12) {
 447                struct nvm_addrf_12 *ppaf = (struct nvm_addrf_12 *)&geo->addrf;
 448
 449                l.ppa = ((u64)r.g.ch) << ppaf->ch_offset;
 450                l.ppa |= ((u64)r.g.lun) << ppaf->lun_offset;
 451                l.ppa |= ((u64)r.g.blk) << ppaf->blk_offset;
 452                l.ppa |= ((u64)r.g.pg) << ppaf->pg_offset;
 453                l.ppa |= ((u64)r.g.pl) << ppaf->pln_offset;
 454                l.ppa |= ((u64)r.g.sec) << ppaf->sec_offset;
 455        } else {
 456                struct nvm_addrf *lbaf = &geo->addrf;
 457
 458                l.ppa = ((u64)r.m.grp) << lbaf->ch_offset;
 459                l.ppa |= ((u64)r.m.pu) << lbaf->lun_offset;
 460                l.ppa |= ((u64)r.m.chk) << lbaf->chk_offset;
 461                l.ppa |= ((u64)r.m.sec) << lbaf->sec_offset;
 462        }
 463
 464        return l;
 465}
 466
 467static inline struct ppa_addr dev_to_generic_addr(struct nvm_dev *dev,
 468                                                  struct ppa_addr r)
 469{
 470        struct nvm_geo *geo = &dev->geo;
 471        struct ppa_addr l;
 472
 473        l.ppa = 0;
 474
 475        if (geo->version == NVM_OCSSD_SPEC_12) {
 476                struct nvm_addrf_12 *ppaf = (struct nvm_addrf_12 *)&geo->addrf;
 477
 478                l.g.ch = (r.ppa & ppaf->ch_mask) >> ppaf->ch_offset;
 479                l.g.lun = (r.ppa & ppaf->lun_mask) >> ppaf->lun_offset;
 480                l.g.blk = (r.ppa & ppaf->blk_mask) >> ppaf->blk_offset;
 481                l.g.pg = (r.ppa & ppaf->pg_mask) >> ppaf->pg_offset;
 482                l.g.pl = (r.ppa & ppaf->pln_mask) >> ppaf->pln_offset;
 483                l.g.sec = (r.ppa & ppaf->sec_mask) >> ppaf->sec_offset;
 484        } else {
 485                struct nvm_addrf *lbaf = &geo->addrf;
 486
 487                l.m.grp = (r.ppa & lbaf->ch_mask) >> lbaf->ch_offset;
 488                l.m.pu = (r.ppa & lbaf->lun_mask) >> lbaf->lun_offset;
 489                l.m.chk = (r.ppa & lbaf->chk_mask) >> lbaf->chk_offset;
 490                l.m.sec = (r.ppa & lbaf->sec_mask) >> lbaf->sec_offset;
 491        }
 492
 493        return l;
 494}
 495
 496static inline u64 dev_to_chunk_addr(struct nvm_dev *dev, void *addrf,
 497                                    struct ppa_addr p)
 498{
 499        struct nvm_geo *geo = &dev->geo;
 500        u64 caddr;
 501
 502        if (geo->version == NVM_OCSSD_SPEC_12) {
 503                struct nvm_addrf_12 *ppaf = (struct nvm_addrf_12 *)addrf;
 504
 505                caddr = (u64)p.g.pg << ppaf->pg_offset;
 506                caddr |= (u64)p.g.pl << ppaf->pln_offset;
 507                caddr |= (u64)p.g.sec << ppaf->sec_offset;
 508        } else {
 509                caddr = p.m.sec;
 510        }
 511
 512        return caddr;
 513}
 514
 515static inline struct ppa_addr nvm_ppa32_to_ppa64(struct nvm_dev *dev,
 516                                                 void *addrf, u32 ppa32)
 517{
 518        struct ppa_addr ppa64;
 519
 520        ppa64.ppa = 0;
 521
 522        if (ppa32 == -1) {
 523                ppa64.ppa = ADDR_EMPTY;
 524        } else if (ppa32 & (1U << 31)) {
 525                ppa64.c.line = ppa32 & ((~0U) >> 1);
 526                ppa64.c.is_cached = 1;
 527        } else {
 528                struct nvm_geo *geo = &dev->geo;
 529
 530                if (geo->version == NVM_OCSSD_SPEC_12) {
 531                        struct nvm_addrf_12 *ppaf = addrf;
 532
 533                        ppa64.g.ch = (ppa32 & ppaf->ch_mask) >>
 534                                                        ppaf->ch_offset;
 535                        ppa64.g.lun = (ppa32 & ppaf->lun_mask) >>
 536                                                        ppaf->lun_offset;
 537                        ppa64.g.blk = (ppa32 & ppaf->blk_mask) >>
 538                                                        ppaf->blk_offset;
 539                        ppa64.g.pg = (ppa32 & ppaf->pg_mask) >>
 540                                                        ppaf->pg_offset;
 541                        ppa64.g.pl = (ppa32 & ppaf->pln_mask) >>
 542                                                        ppaf->pln_offset;
 543                        ppa64.g.sec = (ppa32 & ppaf->sec_mask) >>
 544                                                        ppaf->sec_offset;
 545                } else {
 546                        struct nvm_addrf *lbaf = addrf;
 547
 548                        ppa64.m.grp = (ppa32 & lbaf->ch_mask) >>
 549                                                        lbaf->ch_offset;
 550                        ppa64.m.pu = (ppa32 & lbaf->lun_mask) >>
 551                                                        lbaf->lun_offset;
 552                        ppa64.m.chk = (ppa32 & lbaf->chk_mask) >>
 553                                                        lbaf->chk_offset;
 554                        ppa64.m.sec = (ppa32 & lbaf->sec_mask) >>
 555                                                        lbaf->sec_offset;
 556                }
 557        }
 558
 559        return ppa64;
 560}
 561
 562static inline u32 nvm_ppa64_to_ppa32(struct nvm_dev *dev,
 563                                     void *addrf, struct ppa_addr ppa64)
 564{
 565        u32 ppa32 = 0;
 566
 567        if (ppa64.ppa == ADDR_EMPTY) {
 568                ppa32 = ~0U;
 569        } else if (ppa64.c.is_cached) {
 570                ppa32 |= ppa64.c.line;
 571                ppa32 |= 1U << 31;
 572        } else {
 573                struct nvm_geo *geo = &dev->geo;
 574
 575                if (geo->version == NVM_OCSSD_SPEC_12) {
 576                        struct nvm_addrf_12 *ppaf = addrf;
 577
 578                        ppa32 |= ppa64.g.ch << ppaf->ch_offset;
 579                        ppa32 |= ppa64.g.lun << ppaf->lun_offset;
 580                        ppa32 |= ppa64.g.blk << ppaf->blk_offset;
 581                        ppa32 |= ppa64.g.pg << ppaf->pg_offset;
 582                        ppa32 |= ppa64.g.pl << ppaf->pln_offset;
 583                        ppa32 |= ppa64.g.sec << ppaf->sec_offset;
 584                } else {
 585                        struct nvm_addrf *lbaf = addrf;
 586
 587                        ppa32 |= ppa64.m.grp << lbaf->ch_offset;
 588                        ppa32 |= ppa64.m.pu << lbaf->lun_offset;
 589                        ppa32 |= ppa64.m.chk << lbaf->chk_offset;
 590                        ppa32 |= ppa64.m.sec << lbaf->sec_offset;
 591                }
 592        }
 593
 594        return ppa32;
 595}
 596
 597static inline int nvm_next_ppa_in_chk(struct nvm_tgt_dev *dev,
 598                                      struct ppa_addr *ppa)
 599{
 600        struct nvm_geo *geo = &dev->geo;
 601        int last = 0;
 602
 603        if (geo->version == NVM_OCSSD_SPEC_12) {
 604                int sec = ppa->g.sec;
 605
 606                sec++;
 607                if (sec == geo->ws_min) {
 608                        int pg = ppa->g.pg;
 609
 610                        sec = 0;
 611                        pg++;
 612                        if (pg == geo->num_pg) {
 613                                int pl = ppa->g.pl;
 614
 615                                pg = 0;
 616                                pl++;
 617                                if (pl == geo->num_pln)
 618                                        last = 1;
 619
 620                                ppa->g.pl = pl;
 621                        }
 622                        ppa->g.pg = pg;
 623                }
 624                ppa->g.sec = sec;
 625        } else {
 626                ppa->m.sec++;
 627                if (ppa->m.sec == geo->clba)
 628                        last = 1;
 629        }
 630
 631        return last;
 632}
 633
 634typedef sector_t (nvm_tgt_capacity_fn)(void *);
 635typedef void *(nvm_tgt_init_fn)(struct nvm_tgt_dev *, struct gendisk *,
 636                                int flags);
 637typedef void (nvm_tgt_exit_fn)(void *, bool);
 638typedef int (nvm_tgt_sysfs_init_fn)(struct gendisk *);
 639typedef void (nvm_tgt_sysfs_exit_fn)(struct gendisk *);
 640
 641enum {
 642        NVM_TGT_F_DEV_L2P = 0,
 643        NVM_TGT_F_HOST_L2P = 1 << 0,
 644};
 645
 646struct nvm_tgt_type {
 647        const char *name;
 648        unsigned int version[3];
 649        int flags;
 650
 651        /* target entry points */
 652        const struct block_device_operations *bops;
 653        nvm_tgt_capacity_fn *capacity;
 654
 655        /* module-specific init/teardown */
 656        nvm_tgt_init_fn *init;
 657        nvm_tgt_exit_fn *exit;
 658
 659        /* sysfs */
 660        nvm_tgt_sysfs_init_fn *sysfs_init;
 661        nvm_tgt_sysfs_exit_fn *sysfs_exit;
 662
 663        /* For internal use */
 664        struct list_head list;
 665        struct module *owner;
 666};
 667
 668extern int nvm_register_tgt_type(struct nvm_tgt_type *);
 669extern void nvm_unregister_tgt_type(struct nvm_tgt_type *);
 670
 671extern void *nvm_dev_dma_alloc(struct nvm_dev *, gfp_t, dma_addr_t *);
 672extern void nvm_dev_dma_free(struct nvm_dev *, void *, dma_addr_t);
 673
 674extern struct nvm_dev *nvm_alloc_dev(int);
 675extern int nvm_register(struct nvm_dev *);
 676extern void nvm_unregister(struct nvm_dev *);
 677
 678extern int nvm_get_chunk_meta(struct nvm_tgt_dev *, struct ppa_addr,
 679                              int, struct nvm_chk_meta *);
 680extern int nvm_set_chunk_meta(struct nvm_tgt_dev *, struct ppa_addr *,
 681                              int, int);
 682extern int nvm_submit_io(struct nvm_tgt_dev *, struct nvm_rq *, void *);
 683extern int nvm_submit_io_sync(struct nvm_tgt_dev *, struct nvm_rq *, void *);
 684extern void nvm_end_io(struct nvm_rq *);
 685
 686#else /* CONFIG_NVM */
 687struct nvm_dev_ops;
 688
 689static inline struct nvm_dev *nvm_alloc_dev(int node)
 690{
 691        return ERR_PTR(-EINVAL);
 692}
 693static inline int nvm_register(struct nvm_dev *dev)
 694{
 695        return -EINVAL;
 696}
 697static inline void nvm_unregister(struct nvm_dev *dev) {}
 698#endif /* CONFIG_NVM */
 699#endif /* LIGHTNVM.H */
 700