linux/drivers/lightnvm/pblk.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2/*
   3 * Copyright (C) 2015 IT University of Copenhagen (rrpc.h)
   4 * Copyright (C) 2016 CNEX Labs
   5 * Initial release: Matias Bjorling <matias@cnexlabs.com>
   6 * Write buffering: Javier Gonzalez <javier@cnexlabs.com>
   7 *
   8 * This program is free software; you can redistribute it and/or
   9 * modify it under the terms of the GNU General Public License version
  10 * 2 as published by the Free Software Foundation.
  11 *
  12 * This program is distributed in the hope that it will be useful, but
  13 * WITHOUT ANY WARRANTY; without even the implied warranty of
  14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  15 * General Public License for more details.
  16 *
  17 * Implementation of a Physical Block-device target for Open-channel SSDs.
  18 *
  19 */
  20
  21#ifndef PBLK_H_
  22#define PBLK_H_
  23
  24#include <linux/blkdev.h>
  25#include <linux/blk-mq.h>
  26#include <linux/bio.h>
  27#include <linux/module.h>
  28#include <linux/kthread.h>
  29#include <linux/vmalloc.h>
  30#include <linux/crc32.h>
  31#include <linux/uuid.h>
  32
  33#include <linux/lightnvm.h>
  34
  35/* Run only GC if less than 1/X blocks are free */
  36#define GC_LIMIT_INVERSE 5
  37#define GC_TIME_MSECS 1000
  38
  39#define PBLK_SECTOR (512)
  40#define PBLK_EXPOSED_PAGE_SIZE (4096)
  41
  42#define PBLK_NR_CLOSE_JOBS (4)
  43
  44#define PBLK_CACHE_NAME_LEN (DISK_NAME_LEN + 16)
  45
  46#define PBLK_COMMAND_TIMEOUT_MS 30000
  47
  48/* Max 512 LUNs per device */
  49#define PBLK_MAX_LUNS_BITMAP (4)
  50
  51#define NR_PHY_IN_LOG (PBLK_EXPOSED_PAGE_SIZE / PBLK_SECTOR)
  52
  53/* Static pool sizes */
  54#define PBLK_GEN_WS_POOL_SIZE (2)
  55
  56#define PBLK_DEFAULT_OP (11)
  57
  58enum {
  59        PBLK_READ               = READ,
  60        PBLK_WRITE              = WRITE,/* Write from write buffer */
  61        PBLK_WRITE_INT,                 /* Internal write - no write buffer */
  62        PBLK_READ_RECOV,                /* Recovery read - errors allowed */
  63        PBLK_ERASE,
  64};
  65
  66enum {
  67        /* IO Types */
  68        PBLK_IOTYPE_USER        = 1 << 0,
  69        PBLK_IOTYPE_GC          = 1 << 1,
  70
  71        /* Write buffer flags */
  72        PBLK_FLUSH_ENTRY        = 1 << 2,
  73        PBLK_WRITTEN_DATA       = 1 << 3,
  74        PBLK_SUBMITTED_ENTRY    = 1 << 4,
  75        PBLK_WRITABLE_ENTRY     = 1 << 5,
  76};
  77
  78enum {
  79        PBLK_BLK_ST_OPEN =      0x1,
  80        PBLK_BLK_ST_CLOSED =    0x2,
  81};
  82
  83enum {
  84        PBLK_CHUNK_RESET_START,
  85        PBLK_CHUNK_RESET_DONE,
  86        PBLK_CHUNK_RESET_FAILED,
  87};
  88
  89struct pblk_sec_meta {
  90        u64 reserved;
  91        __le64 lba;
  92};
  93
  94/* The number of GC lists and the rate-limiter states go together. This way the
  95 * rate-limiter can dictate how much GC is needed based on resource utilization.
  96 */
  97#define PBLK_GC_NR_LISTS 4
  98
  99enum {
 100        PBLK_RL_OFF = 0,
 101        PBLK_RL_WERR = 1,
 102        PBLK_RL_HIGH = 2,
 103        PBLK_RL_MID = 3,
 104        PBLK_RL_LOW = 4
 105};
 106
 107#define pblk_dma_meta_size (sizeof(struct pblk_sec_meta) * NVM_MAX_VLBA)
 108#define pblk_dma_ppa_size (sizeof(u64) * NVM_MAX_VLBA)
 109
 110/* write buffer completion context */
 111struct pblk_c_ctx {
 112        struct list_head list;          /* Head for out-of-order completion */
 113
 114        unsigned long *lun_bitmap;      /* Luns used on current request */
 115        unsigned int sentry;
 116        unsigned int nr_valid;
 117        unsigned int nr_padded;
 118};
 119
 120/* read context */
 121struct pblk_g_ctx {
 122        void *private;
 123        unsigned long start_time;
 124        u64 lba;
 125};
 126
 127/* partial read context */
 128struct pblk_pr_ctx {
 129        struct bio *orig_bio;
 130        DECLARE_BITMAP(bitmap, NVM_MAX_VLBA);
 131        unsigned int orig_nr_secs;
 132        unsigned int bio_init_idx;
 133        void *ppa_ptr;
 134        dma_addr_t dma_ppa_list;
 135};
 136
 137/* Pad context */
 138struct pblk_pad_rq {
 139        struct pblk *pblk;
 140        struct completion wait;
 141        struct kref ref;
 142};
 143
 144/* Recovery context */
 145struct pblk_rec_ctx {
 146        struct pblk *pblk;
 147        struct nvm_rq *rqd;
 148        struct work_struct ws_rec;
 149};
 150
 151/* Write context */
 152struct pblk_w_ctx {
 153        struct bio_list bios;           /* Original bios - used for completion
 154                                         * in REQ_FUA, REQ_FLUSH case
 155                                         */
 156        u64 lba;                        /* Logic addr. associated with entry */
 157        struct ppa_addr ppa;            /* Physic addr. associated with entry */
 158        int flags;                      /* Write context flags */
 159};
 160
 161struct pblk_rb_entry {
 162        struct ppa_addr cacheline;      /* Cacheline for this entry */
 163        void *data;                     /* Pointer to data on this entry */
 164        struct pblk_w_ctx w_ctx;        /* Context for this entry */
 165        struct list_head index;         /* List head to enable indexes */
 166};
 167
 168#define EMPTY_ENTRY (~0U)
 169
 170struct pblk_rb_pages {
 171        struct page *pages;
 172        int order;
 173        struct list_head list;
 174};
 175
 176struct pblk_rb {
 177        struct pblk_rb_entry *entries;  /* Ring buffer entries */
 178        unsigned int mem;               /* Write offset - points to next
 179                                         * writable entry in memory
 180                                         */
 181        unsigned int subm;              /* Read offset - points to last entry
 182                                         * that has been submitted to the media
 183                                         * to be persisted
 184                                         */
 185        unsigned int sync;              /* Synced - backpointer that signals
 186                                         * the last submitted entry that has
 187                                         * been successfully persisted to media
 188                                         */
 189        unsigned int flush_point;       /* Sync point - last entry that must be
 190                                         * flushed to the media. Used with
 191                                         * REQ_FLUSH and REQ_FUA
 192                                         */
 193        unsigned int l2p_update;        /* l2p update point - next entry for
 194                                         * which l2p mapping will be updated to
 195                                         * contain a device ppa address (instead
 196                                         * of a cacheline
 197                                         */
 198        unsigned int nr_entries;        /* Number of entries in write buffer -
 199                                         * must be a power of two
 200                                         */
 201        unsigned int seg_size;          /* Size of the data segments being
 202                                         * stored on each entry. Typically this
 203                                         * will be 4KB
 204                                         */
 205
 206        unsigned int back_thres;        /* Threshold that shall be maintained by
 207                                         * the backpointer in order to respect
 208                                         * geo->mw_cunits on a per chunk basis
 209                                         */
 210
 211        struct list_head pages;         /* List of data pages */
 212
 213        spinlock_t w_lock;              /* Write lock */
 214        spinlock_t s_lock;              /* Sync lock */
 215
 216#ifdef CONFIG_NVM_PBLK_DEBUG
 217        atomic_t inflight_flush_point;  /* Not served REQ_FLUSH | REQ_FUA */
 218#endif
 219};
 220
 221#define PBLK_RECOVERY_SECTORS 16
 222
 223struct pblk_lun {
 224        struct ppa_addr bppa;
 225        struct semaphore wr_sem;
 226};
 227
 228struct pblk_gc_rq {
 229        struct pblk_line *line;
 230        void *data;
 231        u64 paddr_list[NVM_MAX_VLBA];
 232        u64 lba_list[NVM_MAX_VLBA];
 233        int nr_secs;
 234        int secs_to_gc;
 235        struct list_head list;
 236};
 237
 238struct pblk_gc {
 239        /* These states are not protected by a lock since (i) they are in the
 240         * fast path, and (ii) they are not critical.
 241         */
 242        int gc_active;
 243        int gc_enabled;
 244        int gc_forced;
 245
 246        struct task_struct *gc_ts;
 247        struct task_struct *gc_writer_ts;
 248        struct task_struct *gc_reader_ts;
 249
 250        struct workqueue_struct *gc_line_reader_wq;
 251        struct workqueue_struct *gc_reader_wq;
 252
 253        struct timer_list gc_timer;
 254
 255        struct semaphore gc_sem;
 256        atomic_t read_inflight_gc; /* Number of lines with inflight GC reads */
 257        atomic_t pipeline_gc;      /* Number of lines in the GC pipeline -
 258                                    * started reads to finished writes
 259                                    */
 260        int w_entries;
 261
 262        struct list_head w_list;
 263        struct list_head r_list;
 264
 265        spinlock_t lock;
 266        spinlock_t w_lock;
 267        spinlock_t r_lock;
 268};
 269
 270struct pblk_rl {
 271        unsigned int high;      /* Upper threshold for rate limiter (free run -
 272                                 * user I/O rate limiter
 273                                 */
 274        unsigned int high_pw;   /* High rounded up as a power of 2 */
 275
 276#define PBLK_USER_HIGH_THRS 8   /* Begin write limit at 12% available blks */
 277#define PBLK_USER_LOW_THRS 10   /* Aggressive GC at 10% available blocks */
 278
 279        int rb_windows_pw;      /* Number of rate windows in the write buffer
 280                                 * given as a power-of-2. This guarantees that
 281                                 * when user I/O is being rate limited, there
 282                                 * will be reserved enough space for the GC to
 283                                 * place its payload. A window is of
 284                                 * pblk->max_write_pgs size, which in NVMe is
 285                                 * 64, i.e., 256kb.
 286                                 */
 287        int rb_budget;          /* Total number of entries available for I/O */
 288        int rb_user_max;        /* Max buffer entries available for user I/O */
 289        int rb_gc_max;          /* Max buffer entries available for GC I/O */
 290        int rb_gc_rsv;          /* Reserved buffer entries for GC I/O */
 291        int rb_state;           /* Rate-limiter current state */
 292        int rb_max_io;          /* Maximum size for an I/O giving the config */
 293
 294        atomic_t rb_user_cnt;   /* User I/O buffer counter */
 295        atomic_t rb_gc_cnt;     /* GC I/O buffer counter */
 296        atomic_t rb_space;      /* Space limit in case of reaching capacity */
 297
 298        int rsv_blocks;         /* Reserved blocks for GC */
 299
 300        int rb_user_active;
 301        int rb_gc_active;
 302
 303        atomic_t werr_lines;    /* Number of write error lines that needs gc */
 304
 305        struct timer_list u_timer;
 306
 307        unsigned long long nr_secs;
 308        unsigned long total_blocks;
 309
 310        atomic_t free_blocks;           /* Total number of free blocks (+ OP) */
 311        atomic_t free_user_blocks;      /* Number of user free blocks (no OP) */
 312};
 313
 314#define PBLK_LINE_EMPTY (~0U)
 315
 316enum {
 317        /* Line Types */
 318        PBLK_LINETYPE_FREE = 0,
 319        PBLK_LINETYPE_LOG = 1,
 320        PBLK_LINETYPE_DATA = 2,
 321
 322        /* Line state */
 323        PBLK_LINESTATE_NEW = 9,
 324        PBLK_LINESTATE_FREE = 10,
 325        PBLK_LINESTATE_OPEN = 11,
 326        PBLK_LINESTATE_CLOSED = 12,
 327        PBLK_LINESTATE_GC = 13,
 328        PBLK_LINESTATE_BAD = 14,
 329        PBLK_LINESTATE_CORRUPT = 15,
 330
 331        /* GC group */
 332        PBLK_LINEGC_NONE = 20,
 333        PBLK_LINEGC_EMPTY = 21,
 334        PBLK_LINEGC_LOW = 22,
 335        PBLK_LINEGC_MID = 23,
 336        PBLK_LINEGC_HIGH = 24,
 337        PBLK_LINEGC_FULL = 25,
 338        PBLK_LINEGC_WERR = 26
 339};
 340
 341#define PBLK_MAGIC 0x70626c6b /*pblk*/
 342
 343/* emeta/smeta persistent storage format versions:
 344 * Changes in major version requires offline migration.
 345 * Changes in minor version are handled automatically during
 346 * recovery.
 347 */
 348
 349#define SMETA_VERSION_MAJOR (0)
 350#define SMETA_VERSION_MINOR (1)
 351
 352#define EMETA_VERSION_MAJOR (0)
 353#define EMETA_VERSION_MINOR (2)
 354
 355struct line_header {
 356        __le32 crc;
 357        __le32 identifier;      /* pblk identifier */
 358        __u8 uuid[16];          /* instance uuid */
 359        __le16 type;            /* line type */
 360        __u8 version_major;     /* version major */
 361        __u8 version_minor;     /* version minor */
 362        __le32 id;              /* line id for current line */
 363};
 364
 365struct line_smeta {
 366        struct line_header header;
 367
 368        __le32 crc;             /* Full structure including struct crc */
 369        /* Previous line metadata */
 370        __le32 prev_id;         /* Line id for previous line */
 371
 372        /* Current line metadata */
 373        __le64 seq_nr;          /* Sequence number for current line */
 374
 375        /* Active writers */
 376        __le32 window_wr_lun;   /* Number of parallel LUNs to write */
 377
 378        __le32 rsvd[2];
 379
 380        __le64 lun_bitmap[];
 381};
 382
 383
 384/*
 385 * Metadata layout in media:
 386 *      First sector:
 387 *              1. struct line_emeta
 388 *              2. bad block bitmap (u64 * window_wr_lun)
 389 *              3. write amplification counters
 390 *      Mid sectors (start at lbas_sector):
 391 *              3. nr_lbas (u64) forming lba list
 392 *      Last sectors (start at vsc_sector):
 393 *              4. u32 valid sector count (vsc) for all lines (~0U: free line)
 394 */
 395struct line_emeta {
 396        struct line_header header;
 397
 398        __le32 crc;             /* Full structure including struct crc */
 399
 400        /* Previous line metadata */
 401        __le32 prev_id;         /* Line id for prev line */
 402
 403        /* Current line metadata */
 404        __le64 seq_nr;          /* Sequence number for current line */
 405
 406        /* Active writers */
 407        __le32 window_wr_lun;   /* Number of parallel LUNs to write */
 408
 409        /* Bookkeeping for recovery */
 410        __le32 next_id;         /* Line id for next line */
 411        __le64 nr_lbas;         /* Number of lbas mapped in line */
 412        __le64 nr_valid_lbas;   /* Number of valid lbas mapped in line */
 413        __le64 bb_bitmap[];     /* Updated bad block bitmap for line */
 414};
 415
 416
 417/* Write amplification counters stored on media */
 418struct wa_counters {
 419        __le64 user;            /* Number of user written sectors */
 420        __le64 gc;              /* Number of sectors written by GC*/
 421        __le64 pad;             /* Number of padded sectors */
 422};
 423
 424struct pblk_emeta {
 425        struct line_emeta *buf;         /* emeta buffer in media format */
 426        int mem;                        /* Write offset - points to next
 427                                         * writable entry in memory
 428                                         */
 429        atomic_t sync;                  /* Synced - backpointer that signals the
 430                                         * last entry that has been successfully
 431                                         * persisted to media
 432                                         */
 433        unsigned int nr_entries;        /* Number of emeta entries */
 434};
 435
 436struct pblk_smeta {
 437        struct line_smeta *buf;         /* smeta buffer in persistent format */
 438};
 439
 440struct pblk_w_err_gc {
 441        int has_write_err;
 442        __le64 *lba_list;
 443};
 444
 445struct pblk_line {
 446        struct pblk *pblk;
 447        unsigned int id;                /* Line number corresponds to the
 448                                         * block line
 449                                         */
 450        unsigned int seq_nr;            /* Unique line sequence number */
 451
 452        int state;                      /* PBLK_LINESTATE_X */
 453        int type;                       /* PBLK_LINETYPE_X */
 454        int gc_group;                   /* PBLK_LINEGC_X */
 455        struct list_head list;          /* Free, GC lists */
 456
 457        unsigned long *lun_bitmap;      /* Bitmap for LUNs mapped in line */
 458
 459        struct nvm_chk_meta *chks;      /* Chunks forming line */
 460
 461        struct pblk_smeta *smeta;       /* Start metadata */
 462        struct pblk_emeta *emeta;       /* End medatada */
 463
 464        int meta_line;                  /* Metadata line id */
 465        int meta_distance;              /* Distance between data and metadata */
 466
 467        u64 smeta_ssec;                 /* Sector where smeta starts */
 468        u64 emeta_ssec;                 /* Sector where emeta starts */
 469
 470        unsigned int sec_in_line;       /* Number of usable secs in line */
 471
 472        atomic_t blk_in_line;           /* Number of good blocks in line */
 473        unsigned long *blk_bitmap;      /* Bitmap for valid/invalid blocks */
 474        unsigned long *erase_bitmap;    /* Bitmap for erased blocks */
 475
 476        unsigned long *map_bitmap;      /* Bitmap for mapped sectors in line */
 477        unsigned long *invalid_bitmap;  /* Bitmap for invalid sectors in line */
 478
 479        atomic_t left_eblks;            /* Blocks left for erasing */
 480        atomic_t left_seblks;           /* Blocks left for sync erasing */
 481
 482        int left_msecs;                 /* Sectors left for mapping */
 483        unsigned int cur_sec;           /* Sector map pointer */
 484        unsigned int nr_valid_lbas;     /* Number of valid lbas in line */
 485
 486        __le32 *vsc;                    /* Valid sector count in line */
 487
 488        struct kref ref;                /* Write buffer L2P references */
 489
 490        struct pblk_w_err_gc *w_err_gc; /* Write error gc recovery metadata */
 491
 492        spinlock_t lock;                /* Necessary for invalid_bitmap only */
 493};
 494
 495#define PBLK_DATA_LINES 4
 496
 497enum {
 498        PBLK_KMALLOC_META = 1,
 499        PBLK_VMALLOC_META = 2,
 500};
 501
 502enum {
 503        PBLK_EMETA_TYPE_HEADER = 1,     /* struct line_emeta first sector */
 504        PBLK_EMETA_TYPE_LLBA = 2,       /* lba list - type: __le64 */
 505        PBLK_EMETA_TYPE_VSC = 3,        /* vsc list - type: __le32 */
 506};
 507
 508struct pblk_line_mgmt {
 509        int nr_lines;                   /* Total number of full lines */
 510        int nr_free_lines;              /* Number of full lines in free list */
 511
 512        /* Free lists - use free_lock */
 513        struct list_head free_list;     /* Full lines ready to use */
 514        struct list_head corrupt_list;  /* Full lines corrupted */
 515        struct list_head bad_list;      /* Full lines bad */
 516
 517        /* GC lists - use gc_lock */
 518        struct list_head *gc_lists[PBLK_GC_NR_LISTS];
 519        struct list_head gc_high_list;  /* Full lines ready to GC, high isc */
 520        struct list_head gc_mid_list;   /* Full lines ready to GC, mid isc */
 521        struct list_head gc_low_list;   /* Full lines ready to GC, low isc */
 522
 523        struct list_head gc_werr_list;  /* Write err recovery list */
 524
 525        struct list_head gc_full_list;  /* Full lines ready to GC, no valid */
 526        struct list_head gc_empty_list; /* Full lines close, all valid */
 527
 528        struct pblk_line *log_line;     /* Current FTL log line */
 529        struct pblk_line *data_line;    /* Current data line */
 530        struct pblk_line *log_next;     /* Next FTL log line */
 531        struct pblk_line *data_next;    /* Next data line */
 532
 533        struct list_head emeta_list;    /* Lines queued to schedule emeta */
 534
 535        __le32 *vsc_list;               /* Valid sector counts for all lines */
 536
 537        /* Metadata allocation type: VMALLOC | KMALLOC */
 538        int emeta_alloc_type;
 539
 540        /* Pre-allocated metadata for data lines */
 541        struct pblk_smeta *sline_meta[PBLK_DATA_LINES];
 542        struct pblk_emeta *eline_meta[PBLK_DATA_LINES];
 543        unsigned long meta_bitmap;
 544
 545        /* Cache and mempool for map/invalid bitmaps */
 546        struct kmem_cache *bitmap_cache;
 547        mempool_t *bitmap_pool;
 548
 549        /* Helpers for fast bitmap calculations */
 550        unsigned long *bb_template;
 551        unsigned long *bb_aux;
 552
 553        unsigned long d_seq_nr;         /* Data line unique sequence number */
 554        unsigned long l_seq_nr;         /* Log line unique sequence number */
 555
 556        spinlock_t free_lock;
 557        spinlock_t close_lock;
 558        spinlock_t gc_lock;
 559};
 560
 561struct pblk_line_meta {
 562        unsigned int smeta_len;         /* Total length for smeta */
 563        unsigned int smeta_sec;         /* Sectors needed for smeta */
 564
 565        unsigned int emeta_len[4];      /* Lengths for emeta:
 566                                         *  [0]: Total
 567                                         *  [1]: struct line_emeta +
 568                                         *       bb_bitmap + struct wa_counters
 569                                         *  [2]: L2P portion
 570                                         *  [3]: vsc
 571                                         */
 572        unsigned int emeta_sec[4];      /* Sectors needed for emeta. Same layout
 573                                         * as emeta_len
 574                                         */
 575
 576        unsigned int emeta_bb;          /* Boundary for bb that affects emeta */
 577
 578        unsigned int vsc_list_len;      /* Length for vsc list */
 579        unsigned int sec_bitmap_len;    /* Length for sector bitmap in line */
 580        unsigned int blk_bitmap_len;    /* Length for block bitmap in line */
 581        unsigned int lun_bitmap_len;    /* Length for lun bitmap in line */
 582
 583        unsigned int blk_per_line;      /* Number of blocks in a full line */
 584        unsigned int sec_per_line;      /* Number of sectors in a line */
 585        unsigned int dsec_per_line;     /* Number of data sectors in a line */
 586        unsigned int min_blk_line;      /* Min. number of good blocks in line */
 587
 588        unsigned int mid_thrs;          /* Threshold for GC mid list */
 589        unsigned int high_thrs;         /* Threshold for GC high list */
 590
 591        unsigned int meta_distance;     /* Distance between data and metadata */
 592};
 593
 594enum {
 595        PBLK_STATE_RUNNING = 0,
 596        PBLK_STATE_STOPPING = 1,
 597        PBLK_STATE_RECOVERING = 2,
 598        PBLK_STATE_STOPPED = 3,
 599};
 600
 601/* Internal format to support not power-of-2 device formats */
 602struct pblk_addrf {
 603        /* gen to dev */
 604        int sec_stripe;
 605        int ch_stripe;
 606        int lun_stripe;
 607
 608        /* dev to gen */
 609        int sec_lun_stripe;
 610        int sec_ws_stripe;
 611};
 612
 613struct pblk {
 614        struct nvm_tgt_dev *dev;
 615        struct gendisk *disk;
 616
 617        struct kobject kobj;
 618
 619        struct pblk_lun *luns;
 620
 621        struct pblk_line *lines;                /* Line array */
 622        struct pblk_line_mgmt l_mg;             /* Line management */
 623        struct pblk_line_meta lm;               /* Line metadata */
 624
 625        struct nvm_addrf addrf;         /* Aligned address format */
 626        struct pblk_addrf uaddrf;       /* Unaligned address format */
 627        int addrf_len;
 628
 629        struct pblk_rb rwb;
 630
 631        int state;                      /* pblk line state */
 632
 633        int min_write_pgs; /* Minimum amount of pages required by controller */
 634        int max_write_pgs; /* Maximum amount of pages supported by controller */
 635
 636        sector_t capacity; /* Device capacity when bad blocks are subtracted */
 637
 638        int op;      /* Percentage of device used for over-provisioning */
 639        int op_blks; /* Number of blocks used for over-provisioning */
 640
 641        /* pblk provisioning values. Used by rate limiter */
 642        struct pblk_rl rl;
 643
 644        int sec_per_write;
 645
 646        unsigned char instance_uuid[16];
 647
 648        /* Persistent write amplification counters, 4kb sector I/Os */
 649        atomic64_t user_wa;             /* Sectors written by user */
 650        atomic64_t gc_wa;               /* Sectors written by GC */
 651        atomic64_t pad_wa;              /* Padded sectors written */
 652
 653        /* Reset values for delta write amplification measurements */
 654        u64 user_rst_wa;
 655        u64 gc_rst_wa;
 656        u64 pad_rst_wa;
 657
 658        /* Counters used for calculating padding distribution */
 659        atomic64_t *pad_dist;           /* Padding distribution buckets */
 660        u64 nr_flush_rst;               /* Flushes reset value for pad dist.*/
 661        atomic64_t nr_flush;            /* Number of flush/fua I/O */
 662
 663#ifdef CONFIG_NVM_PBLK_DEBUG
 664        /* Non-persistent debug counters, 4kb sector I/Os */
 665        atomic_long_t inflight_writes;  /* Inflight writes (user and gc) */
 666        atomic_long_t padded_writes;    /* Sectors padded due to flush/fua */
 667        atomic_long_t padded_wb;        /* Sectors padded in write buffer */
 668        atomic_long_t req_writes;       /* Sectors stored on write buffer */
 669        atomic_long_t sub_writes;       /* Sectors submitted from buffer */
 670        atomic_long_t sync_writes;      /* Sectors synced to media */
 671        atomic_long_t inflight_reads;   /* Inflight sector read requests */
 672        atomic_long_t cache_reads;      /* Read requests that hit the cache */
 673        atomic_long_t sync_reads;       /* Completed sector read requests */
 674        atomic_long_t recov_writes;     /* Sectors submitted from recovery */
 675        atomic_long_t recov_gc_writes;  /* Sectors submitted from write GC */
 676        atomic_long_t recov_gc_reads;   /* Sectors submitted from read GC */
 677#endif
 678
 679        spinlock_t lock;
 680
 681        atomic_long_t read_failed;
 682        atomic_long_t read_empty;
 683        atomic_long_t read_high_ecc;
 684        atomic_long_t read_failed_gc;
 685        atomic_long_t write_failed;
 686        atomic_long_t erase_failed;
 687
 688        atomic_t inflight_io;           /* General inflight I/O counter */
 689
 690        struct task_struct *writer_ts;
 691
 692        /* Simple translation map of logical addresses to physical addresses.
 693         * The logical addresses is known by the host system, while the physical
 694         * addresses are used when writing to the disk block device.
 695         */
 696        unsigned char *trans_map;
 697        spinlock_t trans_lock;
 698
 699        struct list_head compl_list;
 700
 701        spinlock_t resubmit_lock;        /* Resubmit list lock */
 702        struct list_head resubmit_list; /* Resubmit list for failed writes*/
 703
 704        mempool_t page_bio_pool;
 705        mempool_t gen_ws_pool;
 706        mempool_t rec_pool;
 707        mempool_t r_rq_pool;
 708        mempool_t w_rq_pool;
 709        mempool_t e_rq_pool;
 710
 711        struct workqueue_struct *close_wq;
 712        struct workqueue_struct *bb_wq;
 713        struct workqueue_struct *r_end_wq;
 714
 715        struct timer_list wtimer;
 716
 717        struct pblk_gc gc;
 718};
 719
 720struct pblk_line_ws {
 721        struct pblk *pblk;
 722        struct pblk_line *line;
 723        void *priv;
 724        struct work_struct ws;
 725};
 726
 727#define pblk_g_rq_size (sizeof(struct nvm_rq) + sizeof(struct pblk_g_ctx))
 728#define pblk_w_rq_size (sizeof(struct nvm_rq) + sizeof(struct pblk_c_ctx))
 729
 730#define pblk_err(pblk, fmt, ...)                        \
 731        pr_err("pblk %s: " fmt, pblk->disk->disk_name, ##__VA_ARGS__)
 732#define pblk_info(pblk, fmt, ...)                       \
 733        pr_info("pblk %s: " fmt, pblk->disk->disk_name, ##__VA_ARGS__)
 734#define pblk_warn(pblk, fmt, ...)                       \
 735        pr_warn("pblk %s: " fmt, pblk->disk->disk_name, ##__VA_ARGS__)
 736#define pblk_debug(pblk, fmt, ...)                      \
 737        pr_debug("pblk %s: " fmt, pblk->disk->disk_name, ##__VA_ARGS__)
 738
 739/*
 740 * pblk ring buffer operations
 741 */
 742int pblk_rb_init(struct pblk_rb *rb, unsigned int size, unsigned int threshold,
 743                 unsigned int seg_sz);
 744int pblk_rb_may_write_user(struct pblk_rb *rb, struct bio *bio,
 745                           unsigned int nr_entries, unsigned int *pos);
 746int pblk_rb_may_write_gc(struct pblk_rb *rb, unsigned int nr_entries,
 747                         unsigned int *pos);
 748void pblk_rb_write_entry_user(struct pblk_rb *rb, void *data,
 749                              struct pblk_w_ctx w_ctx, unsigned int pos);
 750void pblk_rb_write_entry_gc(struct pblk_rb *rb, void *data,
 751                            struct pblk_w_ctx w_ctx, struct pblk_line *line,
 752                            u64 paddr, unsigned int pos);
 753struct pblk_w_ctx *pblk_rb_w_ctx(struct pblk_rb *rb, unsigned int pos);
 754void pblk_rb_flush(struct pblk_rb *rb);
 755
 756void pblk_rb_sync_l2p(struct pblk_rb *rb);
 757unsigned int pblk_rb_read_to_bio(struct pblk_rb *rb, struct nvm_rq *rqd,
 758                                 unsigned int pos, unsigned int nr_entries,
 759                                 unsigned int count);
 760int pblk_rb_copy_to_bio(struct pblk_rb *rb, struct bio *bio, sector_t lba,
 761                        struct ppa_addr ppa, int bio_iter, bool advanced_bio);
 762unsigned int pblk_rb_read_commit(struct pblk_rb *rb, unsigned int entries);
 763
 764unsigned int pblk_rb_sync_init(struct pblk_rb *rb, unsigned long *flags);
 765unsigned int pblk_rb_sync_advance(struct pblk_rb *rb, unsigned int nr_entries);
 766unsigned int pblk_rb_ptr_wrap(struct pblk_rb *rb, unsigned int p,
 767                              unsigned int nr_entries);
 768void pblk_rb_sync_end(struct pblk_rb *rb, unsigned long *flags);
 769unsigned int pblk_rb_flush_point_count(struct pblk_rb *rb);
 770
 771unsigned int pblk_rb_read_count(struct pblk_rb *rb);
 772unsigned int pblk_rb_sync_count(struct pblk_rb *rb);
 773unsigned int pblk_rb_wrap_pos(struct pblk_rb *rb, unsigned int pos);
 774
 775int pblk_rb_tear_down_check(struct pblk_rb *rb);
 776int pblk_rb_pos_oob(struct pblk_rb *rb, u64 pos);
 777void pblk_rb_free(struct pblk_rb *rb);
 778ssize_t pblk_rb_sysfs(struct pblk_rb *rb, char *buf);
 779
 780/*
 781 * pblk core
 782 */
 783struct nvm_rq *pblk_alloc_rqd(struct pblk *pblk, int type);
 784void pblk_free_rqd(struct pblk *pblk, struct nvm_rq *rqd, int type);
 785int pblk_alloc_rqd_meta(struct pblk *pblk, struct nvm_rq *rqd);
 786void pblk_free_rqd_meta(struct pblk *pblk, struct nvm_rq *rqd);
 787void pblk_set_sec_per_write(struct pblk *pblk, int sec_per_write);
 788int pblk_setup_w_rec_rq(struct pblk *pblk, struct nvm_rq *rqd,
 789                        struct pblk_c_ctx *c_ctx);
 790void pblk_discard(struct pblk *pblk, struct bio *bio);
 791struct nvm_chk_meta *pblk_get_chunk_meta(struct pblk *pblk);
 792struct nvm_chk_meta *pblk_chunk_get_off(struct pblk *pblk,
 793                                              struct nvm_chk_meta *lp,
 794                                              struct ppa_addr ppa);
 795void pblk_log_write_err(struct pblk *pblk, struct nvm_rq *rqd);
 796void pblk_log_read_err(struct pblk *pblk, struct nvm_rq *rqd);
 797int pblk_submit_io(struct pblk *pblk, struct nvm_rq *rqd);
 798int pblk_submit_io_sync(struct pblk *pblk, struct nvm_rq *rqd);
 799int pblk_submit_io_sync_sem(struct pblk *pblk, struct nvm_rq *rqd);
 800int pblk_submit_meta_io(struct pblk *pblk, struct pblk_line *meta_line);
 801void pblk_check_chunk_state_update(struct pblk *pblk, struct nvm_rq *rqd);
 802struct bio *pblk_bio_map_addr(struct pblk *pblk, void *data,
 803                              unsigned int nr_secs, unsigned int len,
 804                              int alloc_type, gfp_t gfp_mask);
 805struct pblk_line *pblk_line_get(struct pblk *pblk);
 806struct pblk_line *pblk_line_get_first_data(struct pblk *pblk);
 807struct pblk_line *pblk_line_replace_data(struct pblk *pblk);
 808void pblk_ppa_to_line_put(struct pblk *pblk, struct ppa_addr ppa);
 809void pblk_rq_to_line_put(struct pblk *pblk, struct nvm_rq *rqd);
 810int pblk_line_recov_alloc(struct pblk *pblk, struct pblk_line *line);
 811void pblk_line_recov_close(struct pblk *pblk, struct pblk_line *line);
 812struct pblk_line *pblk_line_get_data(struct pblk *pblk);
 813struct pblk_line *pblk_line_get_erase(struct pblk *pblk);
 814int pblk_line_erase(struct pblk *pblk, struct pblk_line *line);
 815int pblk_line_is_full(struct pblk_line *line);
 816void pblk_line_free(struct pblk_line *line);
 817void pblk_line_close_meta(struct pblk *pblk, struct pblk_line *line);
 818void pblk_line_close(struct pblk *pblk, struct pblk_line *line);
 819void pblk_line_close_ws(struct work_struct *work);
 820void pblk_pipeline_stop(struct pblk *pblk);
 821void __pblk_pipeline_stop(struct pblk *pblk);
 822void __pblk_pipeline_flush(struct pblk *pblk);
 823void pblk_gen_run_ws(struct pblk *pblk, struct pblk_line *line, void *priv,
 824                     void (*work)(struct work_struct *), gfp_t gfp_mask,
 825                     struct workqueue_struct *wq);
 826u64 pblk_line_smeta_start(struct pblk *pblk, struct pblk_line *line);
 827int pblk_line_smeta_read(struct pblk *pblk, struct pblk_line *line);
 828int pblk_line_emeta_read(struct pblk *pblk, struct pblk_line *line,
 829                         void *emeta_buf);
 830int pblk_blk_erase_async(struct pblk *pblk, struct ppa_addr erase_ppa);
 831void pblk_line_put(struct kref *ref);
 832void pblk_line_put_wq(struct kref *ref);
 833struct list_head *pblk_line_gc_list(struct pblk *pblk, struct pblk_line *line);
 834u64 pblk_lookup_page(struct pblk *pblk, struct pblk_line *line);
 835void pblk_dealloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs);
 836u64 pblk_alloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs);
 837u64 __pblk_alloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs);
 838int pblk_calc_secs(struct pblk *pblk, unsigned long secs_avail,
 839                   unsigned long secs_to_flush);
 840void pblk_down_rq(struct pblk *pblk, struct ppa_addr ppa,
 841                  unsigned long *lun_bitmap);
 842void pblk_down_chunk(struct pblk *pblk, struct ppa_addr ppa);
 843void pblk_up_chunk(struct pblk *pblk, struct ppa_addr ppa);
 844void pblk_up_rq(struct pblk *pblk, unsigned long *lun_bitmap);
 845int pblk_bio_add_pages(struct pblk *pblk, struct bio *bio, gfp_t flags,
 846                       int nr_pages);
 847void pblk_bio_free_pages(struct pblk *pblk, struct bio *bio, int off,
 848                         int nr_pages);
 849void pblk_map_invalidate(struct pblk *pblk, struct ppa_addr ppa);
 850void __pblk_map_invalidate(struct pblk *pblk, struct pblk_line *line,
 851                           u64 paddr);
 852void pblk_update_map(struct pblk *pblk, sector_t lba, struct ppa_addr ppa);
 853void pblk_update_map_cache(struct pblk *pblk, sector_t lba,
 854                           struct ppa_addr ppa);
 855void pblk_update_map_dev(struct pblk *pblk, sector_t lba,
 856                         struct ppa_addr ppa, struct ppa_addr entry_line);
 857int pblk_update_map_gc(struct pblk *pblk, sector_t lba, struct ppa_addr ppa,
 858                       struct pblk_line *gc_line, u64 paddr);
 859void pblk_lookup_l2p_rand(struct pblk *pblk, struct ppa_addr *ppas,
 860                          u64 *lba_list, int nr_secs);
 861void pblk_lookup_l2p_seq(struct pblk *pblk, struct ppa_addr *ppas,
 862                         sector_t blba, int nr_secs);
 863
 864/*
 865 * pblk user I/O write path
 866 */
 867int pblk_write_to_cache(struct pblk *pblk, struct bio *bio,
 868                        unsigned long flags);
 869int pblk_write_gc_to_cache(struct pblk *pblk, struct pblk_gc_rq *gc_rq);
 870
 871/*
 872 * pblk map
 873 */
 874void pblk_map_erase_rq(struct pblk *pblk, struct nvm_rq *rqd,
 875                       unsigned int sentry, unsigned long *lun_bitmap,
 876                       unsigned int valid_secs, struct ppa_addr *erase_ppa);
 877void pblk_map_rq(struct pblk *pblk, struct nvm_rq *rqd, unsigned int sentry,
 878                 unsigned long *lun_bitmap, unsigned int valid_secs,
 879                 unsigned int off);
 880
 881/*
 882 * pblk write thread
 883 */
 884int pblk_write_ts(void *data);
 885void pblk_write_timer_fn(struct timer_list *t);
 886void pblk_write_should_kick(struct pblk *pblk);
 887void pblk_write_kick(struct pblk *pblk);
 888
 889/*
 890 * pblk read path
 891 */
 892extern struct bio_set pblk_bio_set;
 893int pblk_submit_read(struct pblk *pblk, struct bio *bio);
 894int pblk_submit_read_gc(struct pblk *pblk, struct pblk_gc_rq *gc_rq);
 895/*
 896 * pblk recovery
 897 */
 898struct pblk_line *pblk_recov_l2p(struct pblk *pblk);
 899int pblk_recov_pad(struct pblk *pblk);
 900int pblk_recov_check_emeta(struct pblk *pblk, struct line_emeta *emeta);
 901
 902/*
 903 * pblk gc
 904 */
 905#define PBLK_GC_MAX_READERS 8   /* Max number of outstanding GC reader jobs */
 906#define PBLK_GC_RQ_QD 128       /* Queue depth for inflight GC requests */
 907#define PBLK_GC_L_QD 4          /* Queue depth for inflight GC lines */
 908#define PBLK_GC_RSV_LINE 1      /* Reserved lines for GC */
 909
 910int pblk_gc_init(struct pblk *pblk);
 911void pblk_gc_exit(struct pblk *pblk, bool graceful);
 912void pblk_gc_should_start(struct pblk *pblk);
 913void pblk_gc_should_stop(struct pblk *pblk);
 914void pblk_gc_should_kick(struct pblk *pblk);
 915void pblk_gc_free_full_lines(struct pblk *pblk);
 916void pblk_gc_sysfs_state_show(struct pblk *pblk, int *gc_enabled,
 917                              int *gc_active);
 918int pblk_gc_sysfs_force(struct pblk *pblk, int force);
 919
 920/*
 921 * pblk rate limiter
 922 */
 923void pblk_rl_init(struct pblk_rl *rl, int budget);
 924void pblk_rl_free(struct pblk_rl *rl);
 925void pblk_rl_update_rates(struct pblk_rl *rl);
 926int pblk_rl_high_thrs(struct pblk_rl *rl);
 927unsigned long pblk_rl_nr_free_blks(struct pblk_rl *rl);
 928unsigned long pblk_rl_nr_user_free_blks(struct pblk_rl *rl);
 929int pblk_rl_user_may_insert(struct pblk_rl *rl, int nr_entries);
 930void pblk_rl_inserted(struct pblk_rl *rl, int nr_entries);
 931void pblk_rl_user_in(struct pblk_rl *rl, int nr_entries);
 932int pblk_rl_gc_may_insert(struct pblk_rl *rl, int nr_entries);
 933void pblk_rl_gc_in(struct pblk_rl *rl, int nr_entries);
 934void pblk_rl_out(struct pblk_rl *rl, int nr_user, int nr_gc);
 935int pblk_rl_max_io(struct pblk_rl *rl);
 936void pblk_rl_free_lines_inc(struct pblk_rl *rl, struct pblk_line *line);
 937void pblk_rl_free_lines_dec(struct pblk_rl *rl, struct pblk_line *line,
 938                            bool used);
 939int pblk_rl_is_limit(struct pblk_rl *rl);
 940
 941void pblk_rl_werr_line_in(struct pblk_rl *rl);
 942void pblk_rl_werr_line_out(struct pblk_rl *rl);
 943
 944/*
 945 * pblk sysfs
 946 */
 947int pblk_sysfs_init(struct gendisk *tdisk);
 948void pblk_sysfs_exit(struct gendisk *tdisk);
 949
 950static inline void *pblk_malloc(size_t size, int type, gfp_t flags)
 951{
 952        if (type == PBLK_KMALLOC_META)
 953                return kmalloc(size, flags);
 954        return vmalloc(size);
 955}
 956
 957static inline void pblk_mfree(void *ptr, int type)
 958{
 959        if (type == PBLK_KMALLOC_META)
 960                kfree(ptr);
 961        else
 962                vfree(ptr);
 963}
 964
 965static inline struct nvm_rq *nvm_rq_from_c_ctx(void *c_ctx)
 966{
 967        return c_ctx - sizeof(struct nvm_rq);
 968}
 969
 970static inline void *emeta_to_bb(struct line_emeta *emeta)
 971{
 972        return emeta->bb_bitmap;
 973}
 974
 975static inline void *emeta_to_wa(struct pblk_line_meta *lm,
 976                                struct line_emeta *emeta)
 977{
 978        return emeta->bb_bitmap + lm->blk_bitmap_len;
 979}
 980
 981static inline void *emeta_to_lbas(struct pblk *pblk, struct line_emeta *emeta)
 982{
 983        return ((void *)emeta + pblk->lm.emeta_len[1]);
 984}
 985
 986static inline void *emeta_to_vsc(struct pblk *pblk, struct line_emeta *emeta)
 987{
 988        return (emeta_to_lbas(pblk, emeta) + pblk->lm.emeta_len[2]);
 989}
 990
 991static inline int pblk_line_vsc(struct pblk_line *line)
 992{
 993        return le32_to_cpu(*line->vsc);
 994}
 995
 996static inline int pblk_ppa_to_line_id(struct ppa_addr p)
 997{
 998        return p.a.blk;
 999}
1000
1001static inline struct pblk_line *pblk_ppa_to_line(struct pblk *pblk,
1002                                                 struct ppa_addr p)
1003{
1004        return &pblk->lines[pblk_ppa_to_line_id(p)];
1005}
1006
1007static inline int pblk_ppa_to_pos(struct nvm_geo *geo, struct ppa_addr p)
1008{
1009        return p.a.lun * geo->num_ch + p.a.ch;
1010}
1011
1012static inline struct ppa_addr addr_to_gen_ppa(struct pblk *pblk, u64 paddr,
1013                                              u64 line_id)
1014{
1015        struct nvm_tgt_dev *dev = pblk->dev;
1016        struct nvm_geo *geo = &dev->geo;
1017        struct ppa_addr ppa;
1018
1019        if (geo->version == NVM_OCSSD_SPEC_12) {
1020                struct nvm_addrf_12 *ppaf = (struct nvm_addrf_12 *)&pblk->addrf;
1021
1022                ppa.ppa = 0;
1023                ppa.g.blk = line_id;
1024                ppa.g.pg = (paddr & ppaf->pg_mask) >> ppaf->pg_offset;
1025                ppa.g.lun = (paddr & ppaf->lun_mask) >> ppaf->lun_offset;
1026                ppa.g.ch = (paddr & ppaf->ch_mask) >> ppaf->ch_offset;
1027                ppa.g.pl = (paddr & ppaf->pln_mask) >> ppaf->pln_offset;
1028                ppa.g.sec = (paddr & ppaf->sec_mask) >> ppaf->sec_offset;
1029        } else {
1030                struct pblk_addrf *uaddrf = &pblk->uaddrf;
1031                int secs, chnls, luns;
1032
1033                ppa.ppa = 0;
1034
1035                ppa.m.chk = line_id;
1036
1037                paddr = div_u64_rem(paddr, uaddrf->sec_stripe, &secs);
1038                ppa.m.sec = secs;
1039
1040                paddr = div_u64_rem(paddr, uaddrf->ch_stripe, &chnls);
1041                ppa.m.grp = chnls;
1042
1043                paddr = div_u64_rem(paddr, uaddrf->lun_stripe, &luns);
1044                ppa.m.pu = luns;
1045
1046                ppa.m.sec += uaddrf->sec_stripe * paddr;
1047        }
1048
1049        return ppa;
1050}
1051
1052static inline struct nvm_chk_meta *pblk_dev_ppa_to_chunk(struct pblk *pblk,
1053                                                        struct ppa_addr p)
1054{
1055        struct nvm_tgt_dev *dev = pblk->dev;
1056        struct nvm_geo *geo = &dev->geo;
1057        struct pblk_line *line = pblk_ppa_to_line(pblk, p);
1058        int pos = pblk_ppa_to_pos(geo, p);
1059
1060        return &line->chks[pos];
1061}
1062
1063static inline u64 pblk_dev_ppa_to_chunk_addr(struct pblk *pblk,
1064                                                        struct ppa_addr p)
1065{
1066        struct nvm_tgt_dev *dev = pblk->dev;
1067
1068        return dev_to_chunk_addr(dev->parent, &pblk->addrf, p);
1069}
1070
1071static inline u64 pblk_dev_ppa_to_line_addr(struct pblk *pblk,
1072                                                        struct ppa_addr p)
1073{
1074        struct nvm_tgt_dev *dev = pblk->dev;
1075        struct nvm_geo *geo = &dev->geo;
1076        u64 paddr;
1077
1078        if (geo->version == NVM_OCSSD_SPEC_12) {
1079                struct nvm_addrf_12 *ppaf = (struct nvm_addrf_12 *)&pblk->addrf;
1080
1081                paddr = (u64)p.g.ch << ppaf->ch_offset;
1082                paddr |= (u64)p.g.lun << ppaf->lun_offset;
1083                paddr |= (u64)p.g.pg << ppaf->pg_offset;
1084                paddr |= (u64)p.g.pl << ppaf->pln_offset;
1085                paddr |= (u64)p.g.sec << ppaf->sec_offset;
1086        } else {
1087                struct pblk_addrf *uaddrf = &pblk->uaddrf;
1088                u64 secs = p.m.sec;
1089                int sec_stripe;
1090
1091                paddr = (u64)p.m.grp * uaddrf->sec_stripe;
1092                paddr += (u64)p.m.pu * uaddrf->sec_lun_stripe;
1093
1094                secs = div_u64_rem(secs, uaddrf->sec_stripe, &sec_stripe);
1095                paddr += secs * uaddrf->sec_ws_stripe;
1096                paddr += sec_stripe;
1097        }
1098
1099        return paddr;
1100}
1101
1102static inline struct ppa_addr pblk_ppa32_to_ppa64(struct pblk *pblk, u32 ppa32)
1103{
1104        struct nvm_tgt_dev *dev = pblk->dev;
1105
1106        return nvm_ppa32_to_ppa64(dev->parent, &pblk->addrf, ppa32);
1107}
1108
1109static inline u32 pblk_ppa64_to_ppa32(struct pblk *pblk, struct ppa_addr ppa64)
1110{
1111        struct nvm_tgt_dev *dev = pblk->dev;
1112
1113        return nvm_ppa64_to_ppa32(dev->parent, &pblk->addrf, ppa64);
1114}
1115
1116static inline struct ppa_addr pblk_trans_map_get(struct pblk *pblk,
1117                                                                sector_t lba)
1118{
1119        struct ppa_addr ppa;
1120
1121        if (pblk->addrf_len < 32) {
1122                u32 *map = (u32 *)pblk->trans_map;
1123
1124                ppa = pblk_ppa32_to_ppa64(pblk, map[lba]);
1125        } else {
1126                struct ppa_addr *map = (struct ppa_addr *)pblk->trans_map;
1127
1128                ppa = map[lba];
1129        }
1130
1131        return ppa;
1132}
1133
1134static inline void pblk_trans_map_set(struct pblk *pblk, sector_t lba,
1135                                                struct ppa_addr ppa)
1136{
1137        if (pblk->addrf_len < 32) {
1138                u32 *map = (u32 *)pblk->trans_map;
1139
1140                map[lba] = pblk_ppa64_to_ppa32(pblk, ppa);
1141        } else {
1142                u64 *map = (u64 *)pblk->trans_map;
1143
1144                map[lba] = ppa.ppa;
1145        }
1146}
1147
1148static inline int pblk_ppa_empty(struct ppa_addr ppa_addr)
1149{
1150        return (ppa_addr.ppa == ADDR_EMPTY);
1151}
1152
1153static inline void pblk_ppa_set_empty(struct ppa_addr *ppa_addr)
1154{
1155        ppa_addr->ppa = ADDR_EMPTY;
1156}
1157
1158static inline bool pblk_ppa_comp(struct ppa_addr lppa, struct ppa_addr rppa)
1159{
1160        return (lppa.ppa == rppa.ppa);
1161}
1162
1163static inline int pblk_addr_in_cache(struct ppa_addr ppa)
1164{
1165        return (ppa.ppa != ADDR_EMPTY && ppa.c.is_cached);
1166}
1167
1168static inline int pblk_addr_to_cacheline(struct ppa_addr ppa)
1169{
1170        return ppa.c.line;
1171}
1172
1173static inline struct ppa_addr pblk_cacheline_to_addr(int addr)
1174{
1175        struct ppa_addr p;
1176
1177        p.c.line = addr;
1178        p.c.is_cached = 1;
1179
1180        return p;
1181}
1182
1183static inline u32 pblk_calc_meta_header_crc(struct pblk *pblk,
1184                                            struct line_header *header)
1185{
1186        u32 crc = ~(u32)0;
1187
1188        crc = crc32_le(crc, (unsigned char *)header + sizeof(crc),
1189                                sizeof(struct line_header) - sizeof(crc));
1190
1191        return crc;
1192}
1193
1194static inline u32 pblk_calc_smeta_crc(struct pblk *pblk,
1195                                      struct line_smeta *smeta)
1196{
1197        struct pblk_line_meta *lm = &pblk->lm;
1198        u32 crc = ~(u32)0;
1199
1200        crc = crc32_le(crc, (unsigned char *)smeta +
1201                                sizeof(struct line_header) + sizeof(crc),
1202                                lm->smeta_len -
1203                                sizeof(struct line_header) - sizeof(crc));
1204
1205        return crc;
1206}
1207
1208static inline u32 pblk_calc_emeta_crc(struct pblk *pblk,
1209                                      struct line_emeta *emeta)
1210{
1211        struct pblk_line_meta *lm = &pblk->lm;
1212        u32 crc = ~(u32)0;
1213
1214        crc = crc32_le(crc, (unsigned char *)emeta +
1215                                sizeof(struct line_header) + sizeof(crc),
1216                                lm->emeta_len[0] -
1217                                sizeof(struct line_header) - sizeof(crc));
1218
1219        return crc;
1220}
1221
1222static inline int pblk_io_aligned(struct pblk *pblk, int nr_secs)
1223{
1224        return !(nr_secs % pblk->min_write_pgs);
1225}
1226
1227#ifdef CONFIG_NVM_PBLK_DEBUG
1228static inline void print_ppa(struct pblk *pblk, struct ppa_addr *p,
1229                             char *msg, int error)
1230{
1231        struct nvm_geo *geo = &pblk->dev->geo;
1232
1233        if (p->c.is_cached) {
1234                pblk_err(pblk, "ppa: (%s: %x) cache line: %llu\n",
1235                                msg, error, (u64)p->c.line);
1236        } else if (geo->version == NVM_OCSSD_SPEC_12) {
1237                pblk_err(pblk, "ppa: (%s: %x):ch:%d,lun:%d,blk:%d,pg:%d,pl:%d,sec:%d\n",
1238                        msg, error,
1239                        p->g.ch, p->g.lun, p->g.blk,
1240                        p->g.pg, p->g.pl, p->g.sec);
1241        } else {
1242                pblk_err(pblk, "ppa: (%s: %x):ch:%d,lun:%d,chk:%d,sec:%d\n",
1243                        msg, error,
1244                        p->m.grp, p->m.pu, p->m.chk, p->m.sec);
1245        }
1246}
1247
1248static inline void pblk_print_failed_rqd(struct pblk *pblk, struct nvm_rq *rqd,
1249                                         int error)
1250{
1251        int bit = -1;
1252
1253        if (rqd->nr_ppas ==  1) {
1254                print_ppa(pblk, &rqd->ppa_addr, "rqd", error);
1255                return;
1256        }
1257
1258        while ((bit = find_next_bit((void *)&rqd->ppa_status, rqd->nr_ppas,
1259                                                bit + 1)) < rqd->nr_ppas) {
1260                print_ppa(pblk, &rqd->ppa_list[bit], "rqd", error);
1261        }
1262
1263        pblk_err(pblk, "error:%d, ppa_status:%llx\n", error, rqd->ppa_status);
1264}
1265
1266static inline int pblk_boundary_ppa_checks(struct nvm_tgt_dev *tgt_dev,
1267                                       struct ppa_addr *ppas, int nr_ppas)
1268{
1269        struct nvm_geo *geo = &tgt_dev->geo;
1270        struct ppa_addr *ppa;
1271        int i;
1272
1273        for (i = 0; i < nr_ppas; i++) {
1274                ppa = &ppas[i];
1275
1276                if (geo->version == NVM_OCSSD_SPEC_12) {
1277                        if (!ppa->c.is_cached &&
1278                                        ppa->g.ch < geo->num_ch &&
1279                                        ppa->g.lun < geo->num_lun &&
1280                                        ppa->g.pl < geo->num_pln &&
1281                                        ppa->g.blk < geo->num_chk &&
1282                                        ppa->g.pg < geo->num_pg &&
1283                                        ppa->g.sec < geo->ws_min)
1284                                continue;
1285                } else {
1286                        if (!ppa->c.is_cached &&
1287                                        ppa->m.grp < geo->num_ch &&
1288                                        ppa->m.pu < geo->num_lun &&
1289                                        ppa->m.chk < geo->num_chk &&
1290                                        ppa->m.sec < geo->clba)
1291                                continue;
1292                }
1293
1294                print_ppa(tgt_dev->q->queuedata, ppa, "boundary", i);
1295
1296                return 1;
1297        }
1298        return 0;
1299}
1300
1301static inline int pblk_check_io(struct pblk *pblk, struct nvm_rq *rqd)
1302{
1303        struct nvm_tgt_dev *dev = pblk->dev;
1304        struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
1305
1306        if (pblk_boundary_ppa_checks(dev, ppa_list, rqd->nr_ppas)) {
1307                WARN_ON(1);
1308                return -EINVAL;
1309        }
1310
1311        if (rqd->opcode == NVM_OP_PWRITE) {
1312                struct pblk_line *line;
1313                int i;
1314
1315                for (i = 0; i < rqd->nr_ppas; i++) {
1316                        line = pblk_ppa_to_line(pblk, ppa_list[i]);
1317
1318                        spin_lock(&line->lock);
1319                        if (line->state != PBLK_LINESTATE_OPEN) {
1320                                pblk_err(pblk, "bad ppa: line:%d,state:%d\n",
1321                                                        line->id, line->state);
1322                                WARN_ON(1);
1323                                spin_unlock(&line->lock);
1324                                return -EINVAL;
1325                        }
1326                        spin_unlock(&line->lock);
1327                }
1328        }
1329
1330        return 0;
1331}
1332#endif
1333
1334static inline int pblk_boundary_paddr_checks(struct pblk *pblk, u64 paddr)
1335{
1336        struct pblk_line_meta *lm = &pblk->lm;
1337
1338        if (paddr > lm->sec_per_line)
1339                return 1;
1340
1341        return 0;
1342}
1343
1344static inline unsigned int pblk_get_bi_idx(struct bio *bio)
1345{
1346        return bio->bi_iter.bi_idx;
1347}
1348
1349static inline sector_t pblk_get_lba(struct bio *bio)
1350{
1351        return bio->bi_iter.bi_sector / NR_PHY_IN_LOG;
1352}
1353
1354static inline unsigned int pblk_get_secs(struct bio *bio)
1355{
1356        return  bio->bi_iter.bi_size / PBLK_EXPOSED_PAGE_SIZE;
1357}
1358
1359static inline void pblk_setup_uuid(struct pblk *pblk)
1360{
1361        uuid_le uuid;
1362
1363        uuid_le_gen(&uuid);
1364        memcpy(pblk->instance_uuid, uuid.b, 16);
1365}
1366
1367static inline char *pblk_disk_name(struct pblk *pblk)
1368{
1369        struct gendisk *disk = pblk->disk;
1370
1371        return disk->disk_name;
1372}
1373#endif /* PBLK_H_ */
1374