linux/include/linux/lightnvm.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#ifndef NVM_H
   3#define NVM_H
   4
   5#include <linux/blkdev.h>
   6#include <linux/types.h>
   7#include <uapi/linux/lightnvm.h>
   8
   9enum {
  10        NVM_IO_OK = 0,
  11        NVM_IO_REQUEUE = 1,
  12        NVM_IO_DONE = 2,
  13        NVM_IO_ERR = 3,
  14
  15        NVM_IOTYPE_NONE = 0,
  16        NVM_IOTYPE_GC = 1,
  17};
  18
  19/* common format */
  20#define NVM_GEN_CH_BITS  (8)
  21#define NVM_GEN_LUN_BITS (8)
  22#define NVM_GEN_BLK_BITS (16)
  23#define NVM_GEN_RESERVED (32)
  24
  25/* 1.2 format */
  26#define NVM_12_PG_BITS  (16)
  27#define NVM_12_PL_BITS  (4)
  28#define NVM_12_SEC_BITS (4)
  29#define NVM_12_RESERVED (8)
  30
  31/* 2.0 format */
  32#define NVM_20_SEC_BITS (24)
  33#define NVM_20_RESERVED (8)
  34
  35enum {
  36        NVM_OCSSD_SPEC_12 = 12,
  37        NVM_OCSSD_SPEC_20 = 20,
  38};
  39
  40struct ppa_addr {
  41        /* Generic structure for all addresses */
  42        union {
  43                /* generic device format */
  44                struct {
  45                        u64 ch          : NVM_GEN_CH_BITS;
  46                        u64 lun         : NVM_GEN_LUN_BITS;
  47                        u64 blk         : NVM_GEN_BLK_BITS;
  48                        u64 reserved    : NVM_GEN_RESERVED;
  49                } a;
  50
  51                /* 1.2 device format */
  52                struct {
  53                        u64 ch          : NVM_GEN_CH_BITS;
  54                        u64 lun         : NVM_GEN_LUN_BITS;
  55                        u64 blk         : NVM_GEN_BLK_BITS;
  56                        u64 pg          : NVM_12_PG_BITS;
  57                        u64 pl          : NVM_12_PL_BITS;
  58                        u64 sec         : NVM_12_SEC_BITS;
  59                        u64 reserved    : NVM_12_RESERVED;
  60                } g;
  61
  62                /* 2.0 device format */
  63                struct {
  64                        u64 grp         : NVM_GEN_CH_BITS;
  65                        u64 pu          : NVM_GEN_LUN_BITS;
  66                        u64 chk         : NVM_GEN_BLK_BITS;
  67                        u64 sec         : NVM_20_SEC_BITS;
  68                        u64 reserved    : NVM_20_RESERVED;
  69                } m;
  70
  71                struct {
  72                        u64 line        : 63;
  73                        u64 is_cached   : 1;
  74                } c;
  75
  76                u64 ppa;
  77        };
  78};
  79
  80struct nvm_rq;
  81struct nvm_id;
  82struct nvm_dev;
  83struct nvm_tgt_dev;
  84struct nvm_chk_meta;
  85
  86typedef int (nvm_id_fn)(struct nvm_dev *);
  87typedef int (nvm_op_bb_tbl_fn)(struct nvm_dev *, struct ppa_addr, u8 *);
  88typedef int (nvm_op_set_bb_fn)(struct nvm_dev *, struct ppa_addr *, int, int);
  89typedef int (nvm_get_chk_meta_fn)(struct nvm_dev *, sector_t, int,
  90                                                        struct nvm_chk_meta *);
  91typedef int (nvm_submit_io_fn)(struct nvm_dev *, struct nvm_rq *);
  92typedef int (nvm_submit_io_sync_fn)(struct nvm_dev *, struct nvm_rq *);
  93typedef void *(nvm_create_dma_pool_fn)(struct nvm_dev *, char *, int);
  94typedef void (nvm_destroy_dma_pool_fn)(void *);
  95typedef void *(nvm_dev_dma_alloc_fn)(struct nvm_dev *, void *, gfp_t,
  96                                                                dma_addr_t *);
  97typedef void (nvm_dev_dma_free_fn)(void *, void*, dma_addr_t);
  98
  99struct nvm_dev_ops {
 100        nvm_id_fn               *identity;
 101        nvm_op_bb_tbl_fn        *get_bb_tbl;
 102        nvm_op_set_bb_fn        *set_bb_tbl;
 103
 104        nvm_get_chk_meta_fn     *get_chk_meta;
 105
 106        nvm_submit_io_fn        *submit_io;
 107        nvm_submit_io_sync_fn   *submit_io_sync;
 108
 109        nvm_create_dma_pool_fn  *create_dma_pool;
 110        nvm_destroy_dma_pool_fn *destroy_dma_pool;
 111        nvm_dev_dma_alloc_fn    *dev_dma_alloc;
 112        nvm_dev_dma_free_fn     *dev_dma_free;
 113};
 114
 115#ifdef CONFIG_NVM
 116
 117#include <linux/blkdev.h>
 118#include <linux/file.h>
 119#include <linux/dmapool.h>
 120#include <uapi/linux/lightnvm.h>
 121
 122enum {
 123        /* HW Responsibilities */
 124        NVM_RSP_L2P     = 1 << 0,
 125        NVM_RSP_ECC     = 1 << 1,
 126
 127        /* Physical Adressing Mode */
 128        NVM_ADDRMODE_LINEAR     = 0,
 129        NVM_ADDRMODE_CHANNEL    = 1,
 130
 131        /* Plane programming mode for LUN */
 132        NVM_PLANE_SINGLE        = 1,
 133        NVM_PLANE_DOUBLE        = 2,
 134        NVM_PLANE_QUAD          = 4,
 135
 136        /* Status codes */
 137        NVM_RSP_SUCCESS         = 0x0,
 138        NVM_RSP_NOT_CHANGEABLE  = 0x1,
 139        NVM_RSP_ERR_FAILWRITE   = 0x40ff,
 140        NVM_RSP_ERR_EMPTYPAGE   = 0x42ff,
 141        NVM_RSP_ERR_FAILECC     = 0x4281,
 142        NVM_RSP_ERR_FAILCRC     = 0x4004,
 143        NVM_RSP_WARN_HIGHECC    = 0x4700,
 144
 145        /* Device opcodes */
 146        NVM_OP_PWRITE           = 0x91,
 147        NVM_OP_PREAD            = 0x92,
 148        NVM_OP_ERASE            = 0x90,
 149
 150        /* PPA Command Flags */
 151        NVM_IO_SNGL_ACCESS      = 0x0,
 152        NVM_IO_DUAL_ACCESS      = 0x1,
 153        NVM_IO_QUAD_ACCESS      = 0x2,
 154
 155        /* NAND Access Modes */
 156        NVM_IO_SUSPEND          = 0x80,
 157        NVM_IO_SLC_MODE         = 0x100,
 158        NVM_IO_SCRAMBLE_ENABLE  = 0x200,
 159
 160        /* Block Types */
 161        NVM_BLK_T_FREE          = 0x0,
 162        NVM_BLK_T_BAD           = 0x1,
 163        NVM_BLK_T_GRWN_BAD      = 0x2,
 164        NVM_BLK_T_DEV           = 0x4,
 165        NVM_BLK_T_HOST          = 0x8,
 166
 167        /* Memory capabilities */
 168        NVM_ID_CAP_SLC          = 0x1,
 169        NVM_ID_CAP_CMD_SUSPEND  = 0x2,
 170        NVM_ID_CAP_SCRAMBLE     = 0x4,
 171        NVM_ID_CAP_ENCRYPT      = 0x8,
 172
 173        /* Memory types */
 174        NVM_ID_FMTYPE_SLC       = 0,
 175        NVM_ID_FMTYPE_MLC       = 1,
 176
 177        /* Device capabilities */
 178        NVM_ID_DCAP_BBLKMGMT    = 0x1,
 179        NVM_UD_DCAP_ECC         = 0x2,
 180};
 181
 182struct nvm_id_lp_mlc {
 183        u16     num_pairs;
 184        u8      pairs[886];
 185};
 186
 187struct nvm_id_lp_tbl {
 188        __u8    id[8];
 189        struct nvm_id_lp_mlc mlc;
 190};
 191
 192struct nvm_addrf_12 {
 193        u8      ch_len;
 194        u8      lun_len;
 195        u8      blk_len;
 196        u8      pg_len;
 197        u8      pln_len;
 198        u8      sec_len;
 199
 200        u8      ch_offset;
 201        u8      lun_offset;
 202        u8      blk_offset;
 203        u8      pg_offset;
 204        u8      pln_offset;
 205        u8      sec_offset;
 206
 207        u64     ch_mask;
 208        u64     lun_mask;
 209        u64     blk_mask;
 210        u64     pg_mask;
 211        u64     pln_mask;
 212        u64     sec_mask;
 213};
 214
 215struct nvm_addrf {
 216        u8      ch_len;
 217        u8      lun_len;
 218        u8      chk_len;
 219        u8      sec_len;
 220        u8      rsv_len[2];
 221
 222        u8      ch_offset;
 223        u8      lun_offset;
 224        u8      chk_offset;
 225        u8      sec_offset;
 226        u8      rsv_off[2];
 227
 228        u64     ch_mask;
 229        u64     lun_mask;
 230        u64     chk_mask;
 231        u64     sec_mask;
 232        u64     rsv_mask[2];
 233};
 234
 235enum {
 236        /* Chunk states */
 237        NVM_CHK_ST_FREE =       1 << 0,
 238        NVM_CHK_ST_CLOSED =     1 << 1,
 239        NVM_CHK_ST_OPEN =       1 << 2,
 240        NVM_CHK_ST_OFFLINE =    1 << 3,
 241
 242        /* Chunk types */
 243        NVM_CHK_TP_W_SEQ =      1 << 0,
 244        NVM_CHK_TP_W_RAN =      1 << 1,
 245        NVM_CHK_TP_SZ_SPEC =    1 << 4,
 246};
 247
 248/*
 249 * Note: The structure size is linked to nvme_nvm_chk_meta such that the same
 250 * buffer can be used when converting from little endian to cpu addressing.
 251 */
 252struct nvm_chk_meta {
 253        u8      state;
 254        u8      type;
 255        u8      wi;
 256        u8      rsvd[5];
 257        u64     slba;
 258        u64     cnlb;
 259        u64     wp;
 260};
 261
 262struct nvm_target {
 263        struct list_head list;
 264        struct nvm_tgt_dev *dev;
 265        struct nvm_tgt_type *type;
 266        struct gendisk *disk;
 267};
 268
 269#define ADDR_EMPTY (~0ULL)
 270
 271#define NVM_TARGET_DEFAULT_OP (101)
 272#define NVM_TARGET_MIN_OP (3)
 273#define NVM_TARGET_MAX_OP (80)
 274
 275#define NVM_VERSION_MAJOR 1
 276#define NVM_VERSION_MINOR 0
 277#define NVM_VERSION_PATCH 0
 278
 279#define NVM_MAX_VLBA (64) /* max logical blocks in a vector command */
 280
 281struct nvm_rq;
 282typedef void (nvm_end_io_fn)(struct nvm_rq *);
 283
 284struct nvm_rq {
 285        struct nvm_tgt_dev *dev;
 286
 287        struct bio *bio;
 288
 289        union {
 290                struct ppa_addr ppa_addr;
 291                dma_addr_t dma_ppa_list;
 292        };
 293
 294        struct ppa_addr *ppa_list;
 295
 296        void *meta_list;
 297        dma_addr_t dma_meta_list;
 298
 299        nvm_end_io_fn *end_io;
 300
 301        uint8_t opcode;
 302        uint16_t nr_ppas;
 303        uint16_t flags;
 304
 305        u64 ppa_status; /* ppa media status */
 306        int error;
 307
 308        int is_seq; /* Sequential hint flag. 1.2 only */
 309
 310        void *private;
 311};
 312
 313static inline struct nvm_rq *nvm_rq_from_pdu(void *pdu)
 314{
 315        return pdu - sizeof(struct nvm_rq);
 316}
 317
 318static inline void *nvm_rq_to_pdu(struct nvm_rq *rqdata)
 319{
 320        return rqdata + 1;
 321}
 322
 323static inline struct ppa_addr *nvm_rq_to_ppa_list(struct nvm_rq *rqd)
 324{
 325        return (rqd->nr_ppas > 1) ? rqd->ppa_list : &rqd->ppa_addr;
 326}
 327
 328enum {
 329        NVM_BLK_ST_FREE =       0x1,    /* Free block */
 330        NVM_BLK_ST_TGT =        0x2,    /* Block in use by target */
 331        NVM_BLK_ST_BAD =        0x8,    /* Bad block */
 332};
 333
 334/* Instance geometry */
 335struct nvm_geo {
 336        /* device reported version */
 337        u8      major_ver_id;
 338        u8      minor_ver_id;
 339
 340        /* kernel short version */
 341        u8      version;
 342
 343        /* instance specific geometry */
 344        int num_ch;
 345        int num_lun;            /* per channel */
 346
 347        /* calculated values */
 348        int all_luns;           /* across channels */
 349        int all_chunks;         /* across channels */
 350
 351        int op;                 /* over-provision in instance */
 352
 353        sector_t total_secs;    /* across channels */
 354
 355        /* chunk geometry */
 356        u32     num_chk;        /* chunks per lun */
 357        u32     clba;           /* sectors per chunk */
 358        u16     csecs;          /* sector size */
 359        u16     sos;            /* out-of-band area size */
 360        bool    ext;            /* metadata in extended data buffer */
 361        u32     mdts;           /* Max data transfer size*/
 362
 363        /* device write constrains */
 364        u32     ws_min;         /* minimum write size */
 365        u32     ws_opt;         /* optimal write size */
 366        u32     mw_cunits;      /* distance required for successful read */
 367        u32     maxoc;          /* maximum open chunks */
 368        u32     maxocpu;        /* maximum open chunks per parallel unit */
 369
 370        /* device capabilities */
 371        u32     mccap;
 372
 373        /* device timings */
 374        u32     trdt;           /* Avg. Tread (ns) */
 375        u32     trdm;           /* Max Tread (ns) */
 376        u32     tprt;           /* Avg. Tprog (ns) */
 377        u32     tprm;           /* Max Tprog (ns) */
 378        u32     tbet;           /* Avg. Terase (ns) */
 379        u32     tbem;           /* Max Terase (ns) */
 380
 381        /* generic address format */
 382        struct nvm_addrf addrf;
 383
 384        /* 1.2 compatibility */
 385        u8      vmnt;
 386        u32     cap;
 387        u32     dom;
 388
 389        u8      mtype;
 390        u8      fmtype;
 391
 392        u16     cpar;
 393        u32     mpos;
 394
 395        u8      num_pln;
 396        u8      pln_mode;
 397        u16     num_pg;
 398        u16     fpg_sz;
 399};
 400
 401/* sub-device structure */
 402struct nvm_tgt_dev {
 403        /* Device information */
 404        struct nvm_geo geo;
 405
 406        /* Base ppas for target LUNs */
 407        struct ppa_addr *luns;
 408
 409        struct request_queue *q;
 410
 411        struct nvm_dev *parent;
 412        void *map;
 413};
 414
 415struct nvm_dev {
 416        struct nvm_dev_ops *ops;
 417
 418        struct list_head devices;
 419
 420        /* Device information */
 421        struct nvm_geo geo;
 422
 423        unsigned long *lun_map;
 424        void *dma_pool;
 425
 426        /* Backend device */
 427        struct request_queue *q;
 428        char name[DISK_NAME_LEN];
 429        void *private_data;
 430
 431        struct kref ref;
 432        void *rmap;
 433
 434        struct mutex mlock;
 435        spinlock_t lock;
 436
 437        /* target management */
 438        struct list_head area_list;
 439        struct list_head targets;
 440};
 441
 442static inline struct ppa_addr generic_to_dev_addr(struct nvm_dev *dev,
 443                                                  struct ppa_addr r)
 444{
 445        struct nvm_geo *geo = &dev->geo;
 446        struct ppa_addr l;
 447
 448        if (geo->version == NVM_OCSSD_SPEC_12) {
 449                struct nvm_addrf_12 *ppaf = (struct nvm_addrf_12 *)&geo->addrf;
 450
 451                l.ppa = ((u64)r.g.ch) << ppaf->ch_offset;
 452                l.ppa |= ((u64)r.g.lun) << ppaf->lun_offset;
 453                l.ppa |= ((u64)r.g.blk) << ppaf->blk_offset;
 454                l.ppa |= ((u64)r.g.pg) << ppaf->pg_offset;
 455                l.ppa |= ((u64)r.g.pl) << ppaf->pln_offset;
 456                l.ppa |= ((u64)r.g.sec) << ppaf->sec_offset;
 457        } else {
 458                struct nvm_addrf *lbaf = &geo->addrf;
 459
 460                l.ppa = ((u64)r.m.grp) << lbaf->ch_offset;
 461                l.ppa |= ((u64)r.m.pu) << lbaf->lun_offset;
 462                l.ppa |= ((u64)r.m.chk) << lbaf->chk_offset;
 463                l.ppa |= ((u64)r.m.sec) << lbaf->sec_offset;
 464        }
 465
 466        return l;
 467}
 468
 469static inline struct ppa_addr dev_to_generic_addr(struct nvm_dev *dev,
 470                                                  struct ppa_addr r)
 471{
 472        struct nvm_geo *geo = &dev->geo;
 473        struct ppa_addr l;
 474
 475        l.ppa = 0;
 476
 477        if (geo->version == NVM_OCSSD_SPEC_12) {
 478                struct nvm_addrf_12 *ppaf = (struct nvm_addrf_12 *)&geo->addrf;
 479
 480                l.g.ch = (r.ppa & ppaf->ch_mask) >> ppaf->ch_offset;
 481                l.g.lun = (r.ppa & ppaf->lun_mask) >> ppaf->lun_offset;
 482                l.g.blk = (r.ppa & ppaf->blk_mask) >> ppaf->blk_offset;
 483                l.g.pg = (r.ppa & ppaf->pg_mask) >> ppaf->pg_offset;
 484                l.g.pl = (r.ppa & ppaf->pln_mask) >> ppaf->pln_offset;
 485                l.g.sec = (r.ppa & ppaf->sec_mask) >> ppaf->sec_offset;
 486        } else {
 487                struct nvm_addrf *lbaf = &geo->addrf;
 488
 489                l.m.grp = (r.ppa & lbaf->ch_mask) >> lbaf->ch_offset;
 490                l.m.pu = (r.ppa & lbaf->lun_mask) >> lbaf->lun_offset;
 491                l.m.chk = (r.ppa & lbaf->chk_mask) >> lbaf->chk_offset;
 492                l.m.sec = (r.ppa & lbaf->sec_mask) >> lbaf->sec_offset;
 493        }
 494
 495        return l;
 496}
 497
 498static inline u64 dev_to_chunk_addr(struct nvm_dev *dev, void *addrf,
 499                                    struct ppa_addr p)
 500{
 501        struct nvm_geo *geo = &dev->geo;
 502        u64 caddr;
 503
 504        if (geo->version == NVM_OCSSD_SPEC_12) {
 505                struct nvm_addrf_12 *ppaf = (struct nvm_addrf_12 *)addrf;
 506
 507                caddr = (u64)p.g.pg << ppaf->pg_offset;
 508                caddr |= (u64)p.g.pl << ppaf->pln_offset;
 509                caddr |= (u64)p.g.sec << ppaf->sec_offset;
 510        } else {
 511                caddr = p.m.sec;
 512        }
 513
 514        return caddr;
 515}
 516
 517static inline struct ppa_addr nvm_ppa32_to_ppa64(struct nvm_dev *dev,
 518                                                 void *addrf, u32 ppa32)
 519{
 520        struct ppa_addr ppa64;
 521
 522        ppa64.ppa = 0;
 523
 524        if (ppa32 == -1) {
 525                ppa64.ppa = ADDR_EMPTY;
 526        } else if (ppa32 & (1U << 31)) {
 527                ppa64.c.line = ppa32 & ((~0U) >> 1);
 528                ppa64.c.is_cached = 1;
 529        } else {
 530                struct nvm_geo *geo = &dev->geo;
 531
 532                if (geo->version == NVM_OCSSD_SPEC_12) {
 533                        struct nvm_addrf_12 *ppaf = addrf;
 534
 535                        ppa64.g.ch = (ppa32 & ppaf->ch_mask) >>
 536                                                        ppaf->ch_offset;
 537                        ppa64.g.lun = (ppa32 & ppaf->lun_mask) >>
 538                                                        ppaf->lun_offset;
 539                        ppa64.g.blk = (ppa32 & ppaf->blk_mask) >>
 540                                                        ppaf->blk_offset;
 541                        ppa64.g.pg = (ppa32 & ppaf->pg_mask) >>
 542                                                        ppaf->pg_offset;
 543                        ppa64.g.pl = (ppa32 & ppaf->pln_mask) >>
 544                                                        ppaf->pln_offset;
 545                        ppa64.g.sec = (ppa32 & ppaf->sec_mask) >>
 546                                                        ppaf->sec_offset;
 547                } else {
 548                        struct nvm_addrf *lbaf = addrf;
 549
 550                        ppa64.m.grp = (ppa32 & lbaf->ch_mask) >>
 551                                                        lbaf->ch_offset;
 552                        ppa64.m.pu = (ppa32 & lbaf->lun_mask) >>
 553                                                        lbaf->lun_offset;
 554                        ppa64.m.chk = (ppa32 & lbaf->chk_mask) >>
 555                                                        lbaf->chk_offset;
 556                        ppa64.m.sec = (ppa32 & lbaf->sec_mask) >>
 557                                                        lbaf->sec_offset;
 558                }
 559        }
 560
 561        return ppa64;
 562}
 563
 564static inline u32 nvm_ppa64_to_ppa32(struct nvm_dev *dev,
 565                                     void *addrf, struct ppa_addr ppa64)
 566{
 567        u32 ppa32 = 0;
 568
 569        if (ppa64.ppa == ADDR_EMPTY) {
 570                ppa32 = ~0U;
 571        } else if (ppa64.c.is_cached) {
 572                ppa32 |= ppa64.c.line;
 573                ppa32 |= 1U << 31;
 574        } else {
 575                struct nvm_geo *geo = &dev->geo;
 576
 577                if (geo->version == NVM_OCSSD_SPEC_12) {
 578                        struct nvm_addrf_12 *ppaf = addrf;
 579
 580                        ppa32 |= ppa64.g.ch << ppaf->ch_offset;
 581                        ppa32 |= ppa64.g.lun << ppaf->lun_offset;
 582                        ppa32 |= ppa64.g.blk << ppaf->blk_offset;
 583                        ppa32 |= ppa64.g.pg << ppaf->pg_offset;
 584                        ppa32 |= ppa64.g.pl << ppaf->pln_offset;
 585                        ppa32 |= ppa64.g.sec << ppaf->sec_offset;
 586                } else {
 587                        struct nvm_addrf *lbaf = addrf;
 588
 589                        ppa32 |= ppa64.m.grp << lbaf->ch_offset;
 590                        ppa32 |= ppa64.m.pu << lbaf->lun_offset;
 591                        ppa32 |= ppa64.m.chk << lbaf->chk_offset;
 592                        ppa32 |= ppa64.m.sec << lbaf->sec_offset;
 593                }
 594        }
 595
 596        return ppa32;
 597}
 598
 599static inline int nvm_next_ppa_in_chk(struct nvm_tgt_dev *dev,
 600                                      struct ppa_addr *ppa)
 601{
 602        struct nvm_geo *geo = &dev->geo;
 603        int last = 0;
 604
 605        if (geo->version == NVM_OCSSD_SPEC_12) {
 606                int sec = ppa->g.sec;
 607
 608                sec++;
 609                if (sec == geo->ws_min) {
 610                        int pg = ppa->g.pg;
 611
 612                        sec = 0;
 613                        pg++;
 614                        if (pg == geo->num_pg) {
 615                                int pl = ppa->g.pl;
 616
 617                                pg = 0;
 618                                pl++;
 619                                if (pl == geo->num_pln)
 620                                        last = 1;
 621
 622                                ppa->g.pl = pl;
 623                        }
 624                        ppa->g.pg = pg;
 625                }
 626                ppa->g.sec = sec;
 627        } else {
 628                ppa->m.sec++;
 629                if (ppa->m.sec == geo->clba)
 630                        last = 1;
 631        }
 632
 633        return last;
 634}
 635
 636typedef blk_qc_t (nvm_tgt_make_rq_fn)(struct request_queue *, struct bio *);
 637typedef sector_t (nvm_tgt_capacity_fn)(void *);
 638typedef void *(nvm_tgt_init_fn)(struct nvm_tgt_dev *, struct gendisk *,
 639                                int flags);
 640typedef void (nvm_tgt_exit_fn)(void *, bool);
 641typedef int (nvm_tgt_sysfs_init_fn)(struct gendisk *);
 642typedef void (nvm_tgt_sysfs_exit_fn)(struct gendisk *);
 643
 644enum {
 645        NVM_TGT_F_DEV_L2P = 0,
 646        NVM_TGT_F_HOST_L2P = 1 << 0,
 647};
 648
 649struct nvm_tgt_type {
 650        const char *name;
 651        unsigned int version[3];
 652        int flags;
 653
 654        /* target entry points */
 655        nvm_tgt_make_rq_fn *make_rq;
 656        nvm_tgt_capacity_fn *capacity;
 657
 658        /* module-specific init/teardown */
 659        nvm_tgt_init_fn *init;
 660        nvm_tgt_exit_fn *exit;
 661
 662        /* sysfs */
 663        nvm_tgt_sysfs_init_fn *sysfs_init;
 664        nvm_tgt_sysfs_exit_fn *sysfs_exit;
 665
 666        /* For internal use */
 667        struct list_head list;
 668        struct module *owner;
 669};
 670
 671extern int nvm_register_tgt_type(struct nvm_tgt_type *);
 672extern void nvm_unregister_tgt_type(struct nvm_tgt_type *);
 673
 674extern void *nvm_dev_dma_alloc(struct nvm_dev *, gfp_t, dma_addr_t *);
 675extern void nvm_dev_dma_free(struct nvm_dev *, void *, dma_addr_t);
 676
 677extern struct nvm_dev *nvm_alloc_dev(int);
 678extern int nvm_register(struct nvm_dev *);
 679extern void nvm_unregister(struct nvm_dev *);
 680
 681extern int nvm_get_chunk_meta(struct nvm_tgt_dev *, struct ppa_addr,
 682                              int, struct nvm_chk_meta *);
 683extern int nvm_set_chunk_meta(struct nvm_tgt_dev *, struct ppa_addr *,
 684                              int, int);
 685extern int nvm_submit_io(struct nvm_tgt_dev *, struct nvm_rq *);
 686extern int nvm_submit_io_sync(struct nvm_tgt_dev *, struct nvm_rq *);
 687extern void nvm_end_io(struct nvm_rq *);
 688
 689#else /* CONFIG_NVM */
 690struct nvm_dev_ops;
 691
 692static inline struct nvm_dev *nvm_alloc_dev(int node)
 693{
 694        return ERR_PTR(-EINVAL);
 695}
 696static inline int nvm_register(struct nvm_dev *dev)
 697{
 698        return -EINVAL;
 699}
 700static inline void nvm_unregister(struct nvm_dev *dev) {}
 701#endif /* CONFIG_NVM */
 702#endif /* LIGHTNVM.H */
 703