linux/drivers/lightnvm/pblk.h
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2015 IT University of Copenhagen (rrpc.h)
   3 * Copyright (C) 2016 CNEX Labs
   4 * Initial release: Matias Bjorling <matias@cnexlabs.com>
   5 * Write buffering: Javier Gonzalez <javier@cnexlabs.com>
   6 *
   7 * This program is free software; you can redistribute it and/or
   8 * modify it under the terms of the GNU General Public License version
   9 * 2 as published by the Free Software Foundation.
  10 *
  11 * This program is distributed in the hope that it will be useful, but
  12 * WITHOUT ANY WARRANTY; without even the implied warranty of
  13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  14 * General Public License for more details.
  15 *
  16 * Implementation of a Physical Block-device target for Open-channel SSDs.
  17 *
  18 */
  19
  20#ifndef PBLK_H_
  21#define PBLK_H_
  22
  23#include <linux/blkdev.h>
  24#include <linux/blk-mq.h>
  25#include <linux/bio.h>
  26#include <linux/module.h>
  27#include <linux/kthread.h>
  28#include <linux/vmalloc.h>
  29#include <linux/crc32.h>
  30#include <linux/uuid.h>
  31
  32#include <linux/lightnvm.h>
  33
  34/* Run only GC if less than 1/X blocks are free */
  35#define GC_LIMIT_INVERSE 5
  36#define GC_TIME_MSECS 1000
  37
  38#define PBLK_SECTOR (512)
  39#define PBLK_EXPOSED_PAGE_SIZE (4096)
  40#define PBLK_MAX_REQ_ADDRS (64)
  41#define PBLK_MAX_REQ_ADDRS_PW (6)
  42
  43#define PBLK_NR_CLOSE_JOBS (4)
  44
  45#define PBLK_CACHE_NAME_LEN (DISK_NAME_LEN + 16)
  46
  47#define PBLK_COMMAND_TIMEOUT_MS 30000
  48
  49/* Max 512 LUNs per device */
  50#define PBLK_MAX_LUNS_BITMAP (4)
  51
  52#define NR_PHY_IN_LOG (PBLK_EXPOSED_PAGE_SIZE / PBLK_SECTOR)
  53
  54/* Static pool sizes */
  55#define PBLK_GEN_WS_POOL_SIZE (2)
  56
  57#define PBLK_DEFAULT_OP (11)
  58
  59enum {
  60        PBLK_READ               = READ,
  61        PBLK_WRITE              = WRITE,/* Write from write buffer */
  62        PBLK_WRITE_INT,                 /* Internal write - no write buffer */
  63        PBLK_READ_RECOV,                /* Recovery read - errors allowed */
  64        PBLK_ERASE,
  65};
  66
  67enum {
  68        /* IO Types */
  69        PBLK_IOTYPE_USER        = 1 << 0,
  70        PBLK_IOTYPE_GC          = 1 << 1,
  71
  72        /* Write buffer flags */
  73        PBLK_FLUSH_ENTRY        = 1 << 2,
  74        PBLK_WRITTEN_DATA       = 1 << 3,
  75        PBLK_SUBMITTED_ENTRY    = 1 << 4,
  76        PBLK_WRITABLE_ENTRY     = 1 << 5,
  77};
  78
  79enum {
  80        PBLK_BLK_ST_OPEN =      0x1,
  81        PBLK_BLK_ST_CLOSED =    0x2,
  82};
  83
  84struct pblk_sec_meta {
  85        u64 reserved;
  86        __le64 lba;
  87};
  88
  89/* The number of GC lists and the rate-limiter states go together. This way the
  90 * rate-limiter can dictate how much GC is needed based on resource utilization.
  91 */
  92#define PBLK_GC_NR_LISTS 4
  93
  94enum {
  95        PBLK_RL_OFF = 0,
  96        PBLK_RL_WERR = 1,
  97        PBLK_RL_HIGH = 2,
  98        PBLK_RL_MID = 3,
  99        PBLK_RL_LOW = 4
 100};
 101
 102#define pblk_dma_meta_size (sizeof(struct pblk_sec_meta) * PBLK_MAX_REQ_ADDRS)
 103#define pblk_dma_ppa_size (sizeof(u64) * PBLK_MAX_REQ_ADDRS)
 104
 105/* write buffer completion context */
 106struct pblk_c_ctx {
 107        struct list_head list;          /* Head for out-of-order completion */
 108
 109        unsigned long *lun_bitmap;      /* Luns used on current request */
 110        unsigned int sentry;
 111        unsigned int nr_valid;
 112        unsigned int nr_padded;
 113};
 114
 115/* read context */
 116struct pblk_g_ctx {
 117        void *private;
 118        unsigned long start_time;
 119        u64 lba;
 120};
 121
 122/* Pad context */
 123struct pblk_pad_rq {
 124        struct pblk *pblk;
 125        struct completion wait;
 126        struct kref ref;
 127};
 128
 129/* Recovery context */
 130struct pblk_rec_ctx {
 131        struct pblk *pblk;
 132        struct nvm_rq *rqd;
 133        struct work_struct ws_rec;
 134};
 135
 136/* Write context */
 137struct pblk_w_ctx {
 138        struct bio_list bios;           /* Original bios - used for completion
 139                                         * in REQ_FUA, REQ_FLUSH case
 140                                         */
 141        u64 lba;                        /* Logic addr. associated with entry */
 142        struct ppa_addr ppa;            /* Physic addr. associated with entry */
 143        int flags;                      /* Write context flags */
 144};
 145
 146struct pblk_rb_entry {
 147        struct ppa_addr cacheline;      /* Cacheline for this entry */
 148        void *data;                     /* Pointer to data on this entry */
 149        struct pblk_w_ctx w_ctx;        /* Context for this entry */
 150        struct list_head index;         /* List head to enable indexes */
 151};
 152
 153#define EMPTY_ENTRY (~0U)
 154
 155struct pblk_rb_pages {
 156        struct page *pages;
 157        int order;
 158        struct list_head list;
 159};
 160
 161struct pblk_rb {
 162        struct pblk_rb_entry *entries;  /* Ring buffer entries */
 163        unsigned int mem;               /* Write offset - points to next
 164                                         * writable entry in memory
 165                                         */
 166        unsigned int subm;              /* Read offset - points to last entry
 167                                         * that has been submitted to the media
 168                                         * to be persisted
 169                                         */
 170        unsigned int sync;              /* Synced - backpointer that signals
 171                                         * the last submitted entry that has
 172                                         * been successfully persisted to media
 173                                         */
 174        unsigned int flush_point;       /* Sync point - last entry that must be
 175                                         * flushed to the media. Used with
 176                                         * REQ_FLUSH and REQ_FUA
 177                                         */
 178        unsigned int l2p_update;        /* l2p update point - next entry for
 179                                         * which l2p mapping will be updated to
 180                                         * contain a device ppa address (instead
 181                                         * of a cacheline
 182                                         */
 183        unsigned int nr_entries;        /* Number of entries in write buffer -
 184                                         * must be a power of two
 185                                         */
 186        unsigned int seg_size;          /* Size of the data segments being
 187                                         * stored on each entry. Typically this
 188                                         * will be 4KB
 189                                         */
 190
 191        struct list_head pages;         /* List of data pages */
 192
 193        spinlock_t w_lock;              /* Write lock */
 194        spinlock_t s_lock;              /* Sync lock */
 195
 196#ifdef CONFIG_NVM_DEBUG
 197        atomic_t inflight_flush_point;  /* Not served REQ_FLUSH | REQ_FUA */
 198#endif
 199};
 200
 201#define PBLK_RECOVERY_SECTORS 16
 202
 203struct pblk_lun {
 204        struct ppa_addr bppa;
 205        struct semaphore wr_sem;
 206};
 207
 208struct pblk_gc_rq {
 209        struct pblk_line *line;
 210        void *data;
 211        u64 paddr_list[PBLK_MAX_REQ_ADDRS];
 212        u64 lba_list[PBLK_MAX_REQ_ADDRS];
 213        int nr_secs;
 214        int secs_to_gc;
 215        struct list_head list;
 216};
 217
 218struct pblk_gc {
 219        /* These states are not protected by a lock since (i) they are in the
 220         * fast path, and (ii) they are not critical.
 221         */
 222        int gc_active;
 223        int gc_enabled;
 224        int gc_forced;
 225
 226        struct task_struct *gc_ts;
 227        struct task_struct *gc_writer_ts;
 228        struct task_struct *gc_reader_ts;
 229
 230        struct workqueue_struct *gc_line_reader_wq;
 231        struct workqueue_struct *gc_reader_wq;
 232
 233        struct timer_list gc_timer;
 234
 235        struct semaphore gc_sem;
 236        atomic_t read_inflight_gc; /* Number of lines with inflight GC reads */
 237        atomic_t pipeline_gc;      /* Number of lines in the GC pipeline -
 238                                    * started reads to finished writes
 239                                    */
 240        int w_entries;
 241
 242        struct list_head w_list;
 243        struct list_head r_list;
 244
 245        spinlock_t lock;
 246        spinlock_t w_lock;
 247        spinlock_t r_lock;
 248};
 249
 250struct pblk_rl {
 251        unsigned int high;      /* Upper threshold for rate limiter (free run -
 252                                 * user I/O rate limiter
 253                                 */
 254        unsigned int high_pw;   /* High rounded up as a power of 2 */
 255
 256#define PBLK_USER_HIGH_THRS 8   /* Begin write limit at 12% available blks */
 257#define PBLK_USER_LOW_THRS 10   /* Aggressive GC at 10% available blocks */
 258
 259        int rb_windows_pw;      /* Number of rate windows in the write buffer
 260                                 * given as a power-of-2. This guarantees that
 261                                 * when user I/O is being rate limited, there
 262                                 * will be reserved enough space for the GC to
 263                                 * place its payload. A window is of
 264                                 * pblk->max_write_pgs size, which in NVMe is
 265                                 * 64, i.e., 256kb.
 266                                 */
 267        int rb_budget;          /* Total number of entries available for I/O */
 268        int rb_user_max;        /* Max buffer entries available for user I/O */
 269        int rb_gc_max;          /* Max buffer entries available for GC I/O */
 270        int rb_gc_rsv;          /* Reserved buffer entries for GC I/O */
 271        int rb_state;           /* Rate-limiter current state */
 272        int rb_max_io;          /* Maximum size for an I/O giving the config */
 273
 274        atomic_t rb_user_cnt;   /* User I/O buffer counter */
 275        atomic_t rb_gc_cnt;     /* GC I/O buffer counter */
 276        atomic_t rb_space;      /* Space limit in case of reaching capacity */
 277
 278        int rsv_blocks;         /* Reserved blocks for GC */
 279
 280        int rb_user_active;
 281        int rb_gc_active;
 282
 283        atomic_t werr_lines;    /* Number of write error lines that needs gc */
 284
 285        struct timer_list u_timer;
 286
 287        unsigned long long nr_secs;
 288        unsigned long total_blocks;
 289
 290        atomic_t free_blocks;           /* Total number of free blocks (+ OP) */
 291        atomic_t free_user_blocks;      /* Number of user free blocks (no OP) */
 292};
 293
 294#define PBLK_LINE_EMPTY (~0U)
 295
 296enum {
 297        /* Line Types */
 298        PBLK_LINETYPE_FREE = 0,
 299        PBLK_LINETYPE_LOG = 1,
 300        PBLK_LINETYPE_DATA = 2,
 301
 302        /* Line state */
 303        PBLK_LINESTATE_NEW = 9,
 304        PBLK_LINESTATE_FREE = 10,
 305        PBLK_LINESTATE_OPEN = 11,
 306        PBLK_LINESTATE_CLOSED = 12,
 307        PBLK_LINESTATE_GC = 13,
 308        PBLK_LINESTATE_BAD = 14,
 309        PBLK_LINESTATE_CORRUPT = 15,
 310
 311        /* GC group */
 312        PBLK_LINEGC_NONE = 20,
 313        PBLK_LINEGC_EMPTY = 21,
 314        PBLK_LINEGC_LOW = 22,
 315        PBLK_LINEGC_MID = 23,
 316        PBLK_LINEGC_HIGH = 24,
 317        PBLK_LINEGC_FULL = 25,
 318        PBLK_LINEGC_WERR = 26
 319};
 320
 321#define PBLK_MAGIC 0x70626c6b /*pblk*/
 322
 323/* emeta/smeta persistent storage format versions:
 324 * Changes in major version requires offline migration.
 325 * Changes in minor version are handled automatically during
 326 * recovery.
 327 */
 328
 329#define SMETA_VERSION_MAJOR (0)
 330#define SMETA_VERSION_MINOR (1)
 331
 332#define EMETA_VERSION_MAJOR (0)
 333#define EMETA_VERSION_MINOR (2)
 334
 335struct line_header {
 336        __le32 crc;
 337        __le32 identifier;      /* pblk identifier */
 338        __u8 uuid[16];          /* instance uuid */
 339        __le16 type;            /* line type */
 340        __u8 version_major;     /* version major */
 341        __u8 version_minor;     /* version minor */
 342        __le32 id;              /* line id for current line */
 343};
 344
 345struct line_smeta {
 346        struct line_header header;
 347
 348        __le32 crc;             /* Full structure including struct crc */
 349        /* Previous line metadata */
 350        __le32 prev_id;         /* Line id for previous line */
 351
 352        /* Current line metadata */
 353        __le64 seq_nr;          /* Sequence number for current line */
 354
 355        /* Active writers */
 356        __le32 window_wr_lun;   /* Number of parallel LUNs to write */
 357
 358        __le32 rsvd[2];
 359
 360        __le64 lun_bitmap[];
 361};
 362
 363
 364/*
 365 * Metadata layout in media:
 366 *      First sector:
 367 *              1. struct line_emeta
 368 *              2. bad block bitmap (u64 * window_wr_lun)
 369 *              3. write amplification counters
 370 *      Mid sectors (start at lbas_sector):
 371 *              3. nr_lbas (u64) forming lba list
 372 *      Last sectors (start at vsc_sector):
 373 *              4. u32 valid sector count (vsc) for all lines (~0U: free line)
 374 */
 375struct line_emeta {
 376        struct line_header header;
 377
 378        __le32 crc;             /* Full structure including struct crc */
 379
 380        /* Previous line metadata */
 381        __le32 prev_id;         /* Line id for prev line */
 382
 383        /* Current line metadata */
 384        __le64 seq_nr;          /* Sequence number for current line */
 385
 386        /* Active writers */
 387        __le32 window_wr_lun;   /* Number of parallel LUNs to write */
 388
 389        /* Bookkeeping for recovery */
 390        __le32 next_id;         /* Line id for next line */
 391        __le64 nr_lbas;         /* Number of lbas mapped in line */
 392        __le64 nr_valid_lbas;   /* Number of valid lbas mapped in line */
 393        __le64 bb_bitmap[];     /* Updated bad block bitmap for line */
 394};
 395
 396
 397/* Write amplification counters stored on media */
 398struct wa_counters {
 399        __le64 user;            /* Number of user written sectors */
 400        __le64 gc;              /* Number of sectors written by GC*/
 401        __le64 pad;             /* Number of padded sectors */
 402};
 403
 404struct pblk_emeta {
 405        struct line_emeta *buf;         /* emeta buffer in media format */
 406        int mem;                        /* Write offset - points to next
 407                                         * writable entry in memory
 408                                         */
 409        atomic_t sync;                  /* Synced - backpointer that signals the
 410                                         * last entry that has been successfully
 411                                         * persisted to media
 412                                         */
 413        unsigned int nr_entries;        /* Number of emeta entries */
 414};
 415
 416struct pblk_smeta {
 417        struct line_smeta *buf;         /* smeta buffer in persistent format */
 418};
 419
 420struct pblk_w_err_gc {
 421        int has_write_err;
 422        __le64 *lba_list;
 423};
 424
 425struct pblk_line {
 426        struct pblk *pblk;
 427        unsigned int id;                /* Line number corresponds to the
 428                                         * block line
 429                                         */
 430        unsigned int seq_nr;            /* Unique line sequence number */
 431
 432        int state;                      /* PBLK_LINESTATE_X */
 433        int type;                       /* PBLK_LINETYPE_X */
 434        int gc_group;                   /* PBLK_LINEGC_X */
 435        struct list_head list;          /* Free, GC lists */
 436
 437        unsigned long *lun_bitmap;      /* Bitmap for LUNs mapped in line */
 438
 439        struct nvm_chk_meta *chks;      /* Chunks forming line */
 440
 441        struct pblk_smeta *smeta;       /* Start metadata */
 442        struct pblk_emeta *emeta;       /* End medatada */
 443
 444        int meta_line;                  /* Metadata line id */
 445        int meta_distance;              /* Distance between data and metadata */
 446
 447        u64 smeta_ssec;                 /* Sector where smeta starts */
 448        u64 emeta_ssec;                 /* Sector where emeta starts */
 449
 450        unsigned int sec_in_line;       /* Number of usable secs in line */
 451
 452        atomic_t blk_in_line;           /* Number of good blocks in line */
 453        unsigned long *blk_bitmap;      /* Bitmap for valid/invalid blocks */
 454        unsigned long *erase_bitmap;    /* Bitmap for erased blocks */
 455
 456        unsigned long *map_bitmap;      /* Bitmap for mapped sectors in line */
 457        unsigned long *invalid_bitmap;  /* Bitmap for invalid sectors in line */
 458
 459        atomic_t left_eblks;            /* Blocks left for erasing */
 460        atomic_t left_seblks;           /* Blocks left for sync erasing */
 461
 462        int left_msecs;                 /* Sectors left for mapping */
 463        unsigned int cur_sec;           /* Sector map pointer */
 464        unsigned int nr_valid_lbas;     /* Number of valid lbas in line */
 465
 466        __le32 *vsc;                    /* Valid sector count in line */
 467
 468        struct kref ref;                /* Write buffer L2P references */
 469
 470        struct pblk_w_err_gc *w_err_gc; /* Write error gc recovery metadata */
 471
 472        spinlock_t lock;                /* Necessary for invalid_bitmap only */
 473};
 474
 475#define PBLK_DATA_LINES 4
 476
 477enum {
 478        PBLK_KMALLOC_META = 1,
 479        PBLK_VMALLOC_META = 2,
 480};
 481
 482enum {
 483        PBLK_EMETA_TYPE_HEADER = 1,     /* struct line_emeta first sector */
 484        PBLK_EMETA_TYPE_LLBA = 2,       /* lba list - type: __le64 */
 485        PBLK_EMETA_TYPE_VSC = 3,        /* vsc list - type: __le32 */
 486};
 487
 488struct pblk_line_mgmt {
 489        int nr_lines;                   /* Total number of full lines */
 490        int nr_free_lines;              /* Number of full lines in free list */
 491
 492        /* Free lists - use free_lock */
 493        struct list_head free_list;     /* Full lines ready to use */
 494        struct list_head corrupt_list;  /* Full lines corrupted */
 495        struct list_head bad_list;      /* Full lines bad */
 496
 497        /* GC lists - use gc_lock */
 498        struct list_head *gc_lists[PBLK_GC_NR_LISTS];
 499        struct list_head gc_high_list;  /* Full lines ready to GC, high isc */
 500        struct list_head gc_mid_list;   /* Full lines ready to GC, mid isc */
 501        struct list_head gc_low_list;   /* Full lines ready to GC, low isc */
 502
 503        struct list_head gc_werr_list;  /* Write err recovery list */
 504
 505        struct list_head gc_full_list;  /* Full lines ready to GC, no valid */
 506        struct list_head gc_empty_list; /* Full lines close, all valid */
 507
 508        struct pblk_line *log_line;     /* Current FTL log line */
 509        struct pblk_line *data_line;    /* Current data line */
 510        struct pblk_line *log_next;     /* Next FTL log line */
 511        struct pblk_line *data_next;    /* Next data line */
 512
 513        struct list_head emeta_list;    /* Lines queued to schedule emeta */
 514
 515        __le32 *vsc_list;               /* Valid sector counts for all lines */
 516
 517        /* Metadata allocation type: VMALLOC | KMALLOC */
 518        int emeta_alloc_type;
 519
 520        /* Pre-allocated metadata for data lines */
 521        struct pblk_smeta *sline_meta[PBLK_DATA_LINES];
 522        struct pblk_emeta *eline_meta[PBLK_DATA_LINES];
 523        unsigned long meta_bitmap;
 524
 525        /* Helpers for fast bitmap calculations */
 526        unsigned long *bb_template;
 527        unsigned long *bb_aux;
 528
 529        unsigned long d_seq_nr;         /* Data line unique sequence number */
 530        unsigned long l_seq_nr;         /* Log line unique sequence number */
 531
 532        spinlock_t free_lock;
 533        spinlock_t close_lock;
 534        spinlock_t gc_lock;
 535};
 536
 537struct pblk_line_meta {
 538        unsigned int smeta_len;         /* Total length for smeta */
 539        unsigned int smeta_sec;         /* Sectors needed for smeta */
 540
 541        unsigned int emeta_len[4];      /* Lengths for emeta:
 542                                         *  [0]: Total
 543                                         *  [1]: struct line_emeta +
 544                                         *       bb_bitmap + struct wa_counters
 545                                         *  [2]: L2P portion
 546                                         *  [3]: vsc
 547                                         */
 548        unsigned int emeta_sec[4];      /* Sectors needed for emeta. Same layout
 549                                         * as emeta_len
 550                                         */
 551
 552        unsigned int emeta_bb;          /* Boundary for bb that affects emeta */
 553
 554        unsigned int vsc_list_len;      /* Length for vsc list */
 555        unsigned int sec_bitmap_len;    /* Length for sector bitmap in line */
 556        unsigned int blk_bitmap_len;    /* Length for block bitmap in line */
 557        unsigned int lun_bitmap_len;    /* Length for lun bitmap in line */
 558
 559        unsigned int blk_per_line;      /* Number of blocks in a full line */
 560        unsigned int sec_per_line;      /* Number of sectors in a line */
 561        unsigned int dsec_per_line;     /* Number of data sectors in a line */
 562        unsigned int min_blk_line;      /* Min. number of good blocks in line */
 563
 564        unsigned int mid_thrs;          /* Threshold for GC mid list */
 565        unsigned int high_thrs;         /* Threshold for GC high list */
 566
 567        unsigned int meta_distance;     /* Distance between data and metadata */
 568};
 569
 570enum {
 571        PBLK_STATE_RUNNING = 0,
 572        PBLK_STATE_STOPPING = 1,
 573        PBLK_STATE_RECOVERING = 2,
 574        PBLK_STATE_STOPPED = 3,
 575};
 576
 577/* Internal format to support not power-of-2 device formats */
 578struct pblk_addrf {
 579        /* gen to dev */
 580        int sec_stripe;
 581        int ch_stripe;
 582        int lun_stripe;
 583
 584        /* dev to gen */
 585        int sec_lun_stripe;
 586        int sec_ws_stripe;
 587};
 588
 589struct pblk {
 590        struct nvm_tgt_dev *dev;
 591        struct gendisk *disk;
 592
 593        struct kobject kobj;
 594
 595        struct pblk_lun *luns;
 596
 597        struct pblk_line *lines;                /* Line array */
 598        struct pblk_line_mgmt l_mg;             /* Line management */
 599        struct pblk_line_meta lm;               /* Line metadata */
 600
 601        struct nvm_addrf addrf;         /* Aligned address format */
 602        struct pblk_addrf uaddrf;       /* Unaligned address format */
 603        int addrf_len;
 604
 605        struct pblk_rb rwb;
 606
 607        int state;                      /* pblk line state */
 608
 609        int min_write_pgs; /* Minimum amount of pages required by controller */
 610        int max_write_pgs; /* Maximum amount of pages supported by controller */
 611        int pgs_in_buffer; /* Number of pages that need to be held in buffer to
 612                            * guarantee successful reads.
 613                            */
 614
 615        sector_t capacity; /* Device capacity when bad blocks are subtracted */
 616
 617        int op;      /* Percentage of device used for over-provisioning */
 618        int op_blks; /* Number of blocks used for over-provisioning */
 619
 620        /* pblk provisioning values. Used by rate limiter */
 621        struct pblk_rl rl;
 622
 623        int sec_per_write;
 624
 625        unsigned char instance_uuid[16];
 626
 627        /* Persistent write amplification counters, 4kb sector I/Os */
 628        atomic64_t user_wa;             /* Sectors written by user */
 629        atomic64_t gc_wa;               /* Sectors written by GC */
 630        atomic64_t pad_wa;              /* Padded sectors written */
 631
 632        /* Reset values for delta write amplification measurements */
 633        u64 user_rst_wa;
 634        u64 gc_rst_wa;
 635        u64 pad_rst_wa;
 636
 637        /* Counters used for calculating padding distribution */
 638        atomic64_t *pad_dist;           /* Padding distribution buckets */
 639        u64 nr_flush_rst;               /* Flushes reset value for pad dist.*/
 640        atomic64_t nr_flush;            /* Number of flush/fua I/O */
 641
 642#ifdef CONFIG_NVM_DEBUG
 643        /* Non-persistent debug counters, 4kb sector I/Os */
 644        atomic_long_t inflight_writes;  /* Inflight writes (user and gc) */
 645        atomic_long_t padded_writes;    /* Sectors padded due to flush/fua */
 646        atomic_long_t padded_wb;        /* Sectors padded in write buffer */
 647        atomic_long_t req_writes;       /* Sectors stored on write buffer */
 648        atomic_long_t sub_writes;       /* Sectors submitted from buffer */
 649        atomic_long_t sync_writes;      /* Sectors synced to media */
 650        atomic_long_t inflight_reads;   /* Inflight sector read requests */
 651        atomic_long_t cache_reads;      /* Read requests that hit the cache */
 652        atomic_long_t sync_reads;       /* Completed sector read requests */
 653        atomic_long_t recov_writes;     /* Sectors submitted from recovery */
 654        atomic_long_t recov_gc_writes;  /* Sectors submitted from write GC */
 655        atomic_long_t recov_gc_reads;   /* Sectors submitted from read GC */
 656#endif
 657
 658        spinlock_t lock;
 659
 660        atomic_long_t read_failed;
 661        atomic_long_t read_empty;
 662        atomic_long_t read_high_ecc;
 663        atomic_long_t read_failed_gc;
 664        atomic_long_t write_failed;
 665        atomic_long_t erase_failed;
 666
 667        atomic_t inflight_io;           /* General inflight I/O counter */
 668
 669        struct task_struct *writer_ts;
 670
 671        /* Simple translation map of logical addresses to physical addresses.
 672         * The logical addresses is known by the host system, while the physical
 673         * addresses are used when writing to the disk block device.
 674         */
 675        unsigned char *trans_map;
 676        spinlock_t trans_lock;
 677
 678        struct list_head compl_list;
 679
 680        spinlock_t resubmit_lock;        /* Resubmit list lock */
 681        struct list_head resubmit_list; /* Resubmit list for failed writes*/
 682
 683        mempool_t page_bio_pool;
 684        mempool_t gen_ws_pool;
 685        mempool_t rec_pool;
 686        mempool_t r_rq_pool;
 687        mempool_t w_rq_pool;
 688        mempool_t e_rq_pool;
 689
 690        struct workqueue_struct *close_wq;
 691        struct workqueue_struct *bb_wq;
 692        struct workqueue_struct *r_end_wq;
 693
 694        struct timer_list wtimer;
 695
 696        struct pblk_gc gc;
 697};
 698
 699struct pblk_line_ws {
 700        struct pblk *pblk;
 701        struct pblk_line *line;
 702        void *priv;
 703        struct work_struct ws;
 704};
 705
 706#define pblk_g_rq_size (sizeof(struct nvm_rq) + sizeof(struct pblk_g_ctx))
 707#define pblk_w_rq_size (sizeof(struct nvm_rq) + sizeof(struct pblk_c_ctx))
 708
 709/*
 710 * pblk ring buffer operations
 711 */
 712int pblk_rb_init(struct pblk_rb *rb, struct pblk_rb_entry *rb_entry_base,
 713                 unsigned int power_size, unsigned int power_seg_sz);
 714unsigned int pblk_rb_calculate_size(unsigned int nr_entries);
 715void *pblk_rb_entries_ref(struct pblk_rb *rb);
 716int pblk_rb_may_write_user(struct pblk_rb *rb, struct bio *bio,
 717                           unsigned int nr_entries, unsigned int *pos);
 718int pblk_rb_may_write_gc(struct pblk_rb *rb, unsigned int nr_entries,
 719                         unsigned int *pos);
 720void pblk_rb_write_entry_user(struct pblk_rb *rb, void *data,
 721                              struct pblk_w_ctx w_ctx, unsigned int pos);
 722void pblk_rb_write_entry_gc(struct pblk_rb *rb, void *data,
 723                            struct pblk_w_ctx w_ctx, struct pblk_line *line,
 724                            u64 paddr, unsigned int pos);
 725struct pblk_w_ctx *pblk_rb_w_ctx(struct pblk_rb *rb, unsigned int pos);
 726void pblk_rb_flush(struct pblk_rb *rb);
 727
 728void pblk_rb_sync_l2p(struct pblk_rb *rb);
 729unsigned int pblk_rb_read_to_bio(struct pblk_rb *rb, struct nvm_rq *rqd,
 730                                 unsigned int pos, unsigned int nr_entries,
 731                                 unsigned int count);
 732int pblk_rb_copy_to_bio(struct pblk_rb *rb, struct bio *bio, sector_t lba,
 733                        struct ppa_addr ppa, int bio_iter, bool advanced_bio);
 734unsigned int pblk_rb_read_commit(struct pblk_rb *rb, unsigned int entries);
 735
 736unsigned int pblk_rb_sync_init(struct pblk_rb *rb, unsigned long *flags);
 737unsigned int pblk_rb_sync_advance(struct pblk_rb *rb, unsigned int nr_entries);
 738struct pblk_rb_entry *pblk_rb_sync_scan_entry(struct pblk_rb *rb,
 739                                              struct ppa_addr *ppa);
 740void pblk_rb_sync_end(struct pblk_rb *rb, unsigned long *flags);
 741unsigned int pblk_rb_flush_point_count(struct pblk_rb *rb);
 742
 743unsigned int pblk_rb_read_count(struct pblk_rb *rb);
 744unsigned int pblk_rb_sync_count(struct pblk_rb *rb);
 745unsigned int pblk_rb_wrap_pos(struct pblk_rb *rb, unsigned int pos);
 746
 747int pblk_rb_tear_down_check(struct pblk_rb *rb);
 748int pblk_rb_pos_oob(struct pblk_rb *rb, u64 pos);
 749void pblk_rb_data_free(struct pblk_rb *rb);
 750ssize_t pblk_rb_sysfs(struct pblk_rb *rb, char *buf);
 751
 752/*
 753 * pblk core
 754 */
 755struct nvm_rq *pblk_alloc_rqd(struct pblk *pblk, int type);
 756void pblk_free_rqd(struct pblk *pblk, struct nvm_rq *rqd, int type);
 757void pblk_set_sec_per_write(struct pblk *pblk, int sec_per_write);
 758int pblk_setup_w_rec_rq(struct pblk *pblk, struct nvm_rq *rqd,
 759                        struct pblk_c_ctx *c_ctx);
 760void pblk_discard(struct pblk *pblk, struct bio *bio);
 761struct nvm_chk_meta *pblk_chunk_get_info(struct pblk *pblk);
 762struct nvm_chk_meta *pblk_chunk_get_off(struct pblk *pblk,
 763                                              struct nvm_chk_meta *lp,
 764                                              struct ppa_addr ppa);
 765void pblk_log_write_err(struct pblk *pblk, struct nvm_rq *rqd);
 766void pblk_log_read_err(struct pblk *pblk, struct nvm_rq *rqd);
 767int pblk_submit_io(struct pblk *pblk, struct nvm_rq *rqd);
 768int pblk_submit_io_sync(struct pblk *pblk, struct nvm_rq *rqd);
 769int pblk_submit_meta_io(struct pblk *pblk, struct pblk_line *meta_line);
 770struct bio *pblk_bio_map_addr(struct pblk *pblk, void *data,
 771                              unsigned int nr_secs, unsigned int len,
 772                              int alloc_type, gfp_t gfp_mask);
 773struct pblk_line *pblk_line_get(struct pblk *pblk);
 774struct pblk_line *pblk_line_get_first_data(struct pblk *pblk);
 775struct pblk_line *pblk_line_replace_data(struct pblk *pblk);
 776int pblk_line_recov_alloc(struct pblk *pblk, struct pblk_line *line);
 777void pblk_line_recov_close(struct pblk *pblk, struct pblk_line *line);
 778struct pblk_line *pblk_line_get_data(struct pblk *pblk);
 779struct pblk_line *pblk_line_get_erase(struct pblk *pblk);
 780int pblk_line_erase(struct pblk *pblk, struct pblk_line *line);
 781int pblk_line_is_full(struct pblk_line *line);
 782void pblk_line_free(struct pblk_line *line);
 783void pblk_line_close_meta(struct pblk *pblk, struct pblk_line *line);
 784void pblk_line_close(struct pblk *pblk, struct pblk_line *line);
 785void pblk_line_close_ws(struct work_struct *work);
 786void pblk_pipeline_stop(struct pblk *pblk);
 787void __pblk_pipeline_stop(struct pblk *pblk);
 788void __pblk_pipeline_flush(struct pblk *pblk);
 789void pblk_gen_run_ws(struct pblk *pblk, struct pblk_line *line, void *priv,
 790                     void (*work)(struct work_struct *), gfp_t gfp_mask,
 791                     struct workqueue_struct *wq);
 792u64 pblk_line_smeta_start(struct pblk *pblk, struct pblk_line *line);
 793int pblk_line_read_smeta(struct pblk *pblk, struct pblk_line *line);
 794int pblk_line_read_emeta(struct pblk *pblk, struct pblk_line *line,
 795                         void *emeta_buf);
 796int pblk_blk_erase_async(struct pblk *pblk, struct ppa_addr erase_ppa);
 797void pblk_line_put(struct kref *ref);
 798void pblk_line_put_wq(struct kref *ref);
 799struct list_head *pblk_line_gc_list(struct pblk *pblk, struct pblk_line *line);
 800u64 pblk_lookup_page(struct pblk *pblk, struct pblk_line *line);
 801void pblk_dealloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs);
 802u64 pblk_alloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs);
 803u64 __pblk_alloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs);
 804int pblk_calc_secs(struct pblk *pblk, unsigned long secs_avail,
 805                   unsigned long secs_to_flush);
 806void pblk_up_page(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas);
 807void pblk_down_rq(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas,
 808                  unsigned long *lun_bitmap);
 809void pblk_down_page(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas);
 810void pblk_up_rq(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas,
 811                unsigned long *lun_bitmap);
 812int pblk_bio_add_pages(struct pblk *pblk, struct bio *bio, gfp_t flags,
 813                       int nr_pages);
 814void pblk_bio_free_pages(struct pblk *pblk, struct bio *bio, int off,
 815                         int nr_pages);
 816void pblk_map_invalidate(struct pblk *pblk, struct ppa_addr ppa);
 817void __pblk_map_invalidate(struct pblk *pblk, struct pblk_line *line,
 818                           u64 paddr);
 819void pblk_update_map(struct pblk *pblk, sector_t lba, struct ppa_addr ppa);
 820void pblk_update_map_cache(struct pblk *pblk, sector_t lba,
 821                           struct ppa_addr ppa);
 822void pblk_update_map_dev(struct pblk *pblk, sector_t lba,
 823                         struct ppa_addr ppa, struct ppa_addr entry_line);
 824int pblk_update_map_gc(struct pblk *pblk, sector_t lba, struct ppa_addr ppa,
 825                       struct pblk_line *gc_line, u64 paddr);
 826void pblk_lookup_l2p_rand(struct pblk *pblk, struct ppa_addr *ppas,
 827                          u64 *lba_list, int nr_secs);
 828void pblk_lookup_l2p_seq(struct pblk *pblk, struct ppa_addr *ppas,
 829                         sector_t blba, int nr_secs);
 830
 831/*
 832 * pblk user I/O write path
 833 */
 834int pblk_write_to_cache(struct pblk *pblk, struct bio *bio,
 835                        unsigned long flags);
 836int pblk_write_gc_to_cache(struct pblk *pblk, struct pblk_gc_rq *gc_rq);
 837
 838/*
 839 * pblk map
 840 */
 841void pblk_map_erase_rq(struct pblk *pblk, struct nvm_rq *rqd,
 842                       unsigned int sentry, unsigned long *lun_bitmap,
 843                       unsigned int valid_secs, struct ppa_addr *erase_ppa);
 844void pblk_map_rq(struct pblk *pblk, struct nvm_rq *rqd, unsigned int sentry,
 845                 unsigned long *lun_bitmap, unsigned int valid_secs,
 846                 unsigned int off);
 847
 848/*
 849 * pblk write thread
 850 */
 851int pblk_write_ts(void *data);
 852void pblk_write_timer_fn(struct timer_list *t);
 853void pblk_write_should_kick(struct pblk *pblk);
 854void pblk_write_kick(struct pblk *pblk);
 855
 856/*
 857 * pblk read path
 858 */
 859extern struct bio_set pblk_bio_set;
 860int pblk_submit_read(struct pblk *pblk, struct bio *bio);
 861int pblk_submit_read_gc(struct pblk *pblk, struct pblk_gc_rq *gc_rq);
 862/*
 863 * pblk recovery
 864 */
 865struct pblk_line *pblk_recov_l2p(struct pblk *pblk);
 866int pblk_recov_pad(struct pblk *pblk);
 867int pblk_recov_check_emeta(struct pblk *pblk, struct line_emeta *emeta);
 868
 869/*
 870 * pblk gc
 871 */
 872#define PBLK_GC_MAX_READERS 8   /* Max number of outstanding GC reader jobs */
 873#define PBLK_GC_RQ_QD 128       /* Queue depth for inflight GC requests */
 874#define PBLK_GC_L_QD 4          /* Queue depth for inflight GC lines */
 875#define PBLK_GC_RSV_LINE 1      /* Reserved lines for GC */
 876
 877int pblk_gc_init(struct pblk *pblk);
 878void pblk_gc_exit(struct pblk *pblk, bool graceful);
 879void pblk_gc_should_start(struct pblk *pblk);
 880void pblk_gc_should_stop(struct pblk *pblk);
 881void pblk_gc_should_kick(struct pblk *pblk);
 882void pblk_gc_free_full_lines(struct pblk *pblk);
 883void pblk_gc_sysfs_state_show(struct pblk *pblk, int *gc_enabled,
 884                              int *gc_active);
 885int pblk_gc_sysfs_force(struct pblk *pblk, int force);
 886
 887/*
 888 * pblk rate limiter
 889 */
 890void pblk_rl_init(struct pblk_rl *rl, int budget);
 891void pblk_rl_free(struct pblk_rl *rl);
 892void pblk_rl_update_rates(struct pblk_rl *rl);
 893int pblk_rl_high_thrs(struct pblk_rl *rl);
 894unsigned long pblk_rl_nr_free_blks(struct pblk_rl *rl);
 895unsigned long pblk_rl_nr_user_free_blks(struct pblk_rl *rl);
 896int pblk_rl_user_may_insert(struct pblk_rl *rl, int nr_entries);
 897void pblk_rl_inserted(struct pblk_rl *rl, int nr_entries);
 898void pblk_rl_user_in(struct pblk_rl *rl, int nr_entries);
 899int pblk_rl_gc_may_insert(struct pblk_rl *rl, int nr_entries);
 900void pblk_rl_gc_in(struct pblk_rl *rl, int nr_entries);
 901void pblk_rl_out(struct pblk_rl *rl, int nr_user, int nr_gc);
 902int pblk_rl_max_io(struct pblk_rl *rl);
 903void pblk_rl_free_lines_inc(struct pblk_rl *rl, struct pblk_line *line);
 904void pblk_rl_free_lines_dec(struct pblk_rl *rl, struct pblk_line *line,
 905                            bool used);
 906int pblk_rl_is_limit(struct pblk_rl *rl);
 907
 908void pblk_rl_werr_line_in(struct pblk_rl *rl);
 909void pblk_rl_werr_line_out(struct pblk_rl *rl);
 910
 911/*
 912 * pblk sysfs
 913 */
 914int pblk_sysfs_init(struct gendisk *tdisk);
 915void pblk_sysfs_exit(struct gendisk *tdisk);
 916
 917static inline void *pblk_malloc(size_t size, int type, gfp_t flags)
 918{
 919        if (type == PBLK_KMALLOC_META)
 920                return kmalloc(size, flags);
 921        return vmalloc(size);
 922}
 923
 924static inline void pblk_mfree(void *ptr, int type)
 925{
 926        if (type == PBLK_KMALLOC_META)
 927                kfree(ptr);
 928        else
 929                vfree(ptr);
 930}
 931
 932static inline struct nvm_rq *nvm_rq_from_c_ctx(void *c_ctx)
 933{
 934        return c_ctx - sizeof(struct nvm_rq);
 935}
 936
 937static inline void *emeta_to_bb(struct line_emeta *emeta)
 938{
 939        return emeta->bb_bitmap;
 940}
 941
 942static inline void *emeta_to_wa(struct pblk_line_meta *lm,
 943                                struct line_emeta *emeta)
 944{
 945        return emeta->bb_bitmap + lm->blk_bitmap_len;
 946}
 947
 948static inline void *emeta_to_lbas(struct pblk *pblk, struct line_emeta *emeta)
 949{
 950        return ((void *)emeta + pblk->lm.emeta_len[1]);
 951}
 952
 953static inline void *emeta_to_vsc(struct pblk *pblk, struct line_emeta *emeta)
 954{
 955        return (emeta_to_lbas(pblk, emeta) + pblk->lm.emeta_len[2]);
 956}
 957
 958static inline int pblk_line_vsc(struct pblk_line *line)
 959{
 960        return le32_to_cpu(*line->vsc);
 961}
 962
 963static inline int pblk_pad_distance(struct pblk *pblk)
 964{
 965        struct nvm_tgt_dev *dev = pblk->dev;
 966        struct nvm_geo *geo = &dev->geo;
 967
 968        return geo->mw_cunits * geo->all_luns * geo->ws_opt;
 969}
 970
 971static inline int pblk_ppa_to_line(struct ppa_addr p)
 972{
 973        return p.a.blk;
 974}
 975
 976static inline int pblk_ppa_to_pos(struct nvm_geo *geo, struct ppa_addr p)
 977{
 978        return p.a.lun * geo->num_ch + p.a.ch;
 979}
 980
 981static inline struct ppa_addr addr_to_gen_ppa(struct pblk *pblk, u64 paddr,
 982                                              u64 line_id)
 983{
 984        struct nvm_tgt_dev *dev = pblk->dev;
 985        struct nvm_geo *geo = &dev->geo;
 986        struct ppa_addr ppa;
 987
 988        if (geo->version == NVM_OCSSD_SPEC_12) {
 989                struct nvm_addrf_12 *ppaf = (struct nvm_addrf_12 *)&pblk->addrf;
 990
 991                ppa.ppa = 0;
 992                ppa.g.blk = line_id;
 993                ppa.g.pg = (paddr & ppaf->pg_mask) >> ppaf->pg_offset;
 994                ppa.g.lun = (paddr & ppaf->lun_mask) >> ppaf->lun_offset;
 995                ppa.g.ch = (paddr & ppaf->ch_mask) >> ppaf->ch_offset;
 996                ppa.g.pl = (paddr & ppaf->pln_mask) >> ppaf->pln_offset;
 997                ppa.g.sec = (paddr & ppaf->sec_mask) >> ppaf->sec_offset;
 998        } else {
 999                struct pblk_addrf *uaddrf = &pblk->uaddrf;
1000                int secs, chnls, luns;
1001
1002                ppa.ppa = 0;
1003
1004                ppa.m.chk = line_id;
1005
1006                paddr = div_u64_rem(paddr, uaddrf->sec_stripe, &secs);
1007                ppa.m.sec = secs;
1008
1009                paddr = div_u64_rem(paddr, uaddrf->ch_stripe, &chnls);
1010                ppa.m.grp = chnls;
1011
1012                paddr = div_u64_rem(paddr, uaddrf->lun_stripe, &luns);
1013                ppa.m.pu = luns;
1014
1015                ppa.m.sec += uaddrf->sec_stripe * paddr;
1016        }
1017
1018        return ppa;
1019}
1020
1021static inline u64 pblk_dev_ppa_to_line_addr(struct pblk *pblk,
1022                                                        struct ppa_addr p)
1023{
1024        struct nvm_tgt_dev *dev = pblk->dev;
1025        struct nvm_geo *geo = &dev->geo;
1026        u64 paddr;
1027
1028        if (geo->version == NVM_OCSSD_SPEC_12) {
1029                struct nvm_addrf_12 *ppaf = (struct nvm_addrf_12 *)&pblk->addrf;
1030
1031                paddr = (u64)p.g.ch << ppaf->ch_offset;
1032                paddr |= (u64)p.g.lun << ppaf->lun_offset;
1033                paddr |= (u64)p.g.pg << ppaf->pg_offset;
1034                paddr |= (u64)p.g.pl << ppaf->pln_offset;
1035                paddr |= (u64)p.g.sec << ppaf->sec_offset;
1036        } else {
1037                struct pblk_addrf *uaddrf = &pblk->uaddrf;
1038                u64 secs = p.m.sec;
1039                int sec_stripe;
1040
1041                paddr = (u64)p.m.grp * uaddrf->sec_stripe;
1042                paddr += (u64)p.m.pu * uaddrf->sec_lun_stripe;
1043
1044                secs = div_u64_rem(secs, uaddrf->sec_stripe, &sec_stripe);
1045                paddr += secs * uaddrf->sec_ws_stripe;
1046                paddr += sec_stripe;
1047        }
1048
1049        return paddr;
1050}
1051
1052static inline struct ppa_addr pblk_ppa32_to_ppa64(struct pblk *pblk, u32 ppa32)
1053{
1054        struct ppa_addr ppa64;
1055
1056        ppa64.ppa = 0;
1057
1058        if (ppa32 == -1) {
1059                ppa64.ppa = ADDR_EMPTY;
1060        } else if (ppa32 & (1U << 31)) {
1061                ppa64.c.line = ppa32 & ((~0U) >> 1);
1062                ppa64.c.is_cached = 1;
1063        } else {
1064                struct nvm_tgt_dev *dev = pblk->dev;
1065                struct nvm_geo *geo = &dev->geo;
1066
1067                if (geo->version == NVM_OCSSD_SPEC_12) {
1068                        struct nvm_addrf_12 *ppaf =
1069                                        (struct nvm_addrf_12 *)&pblk->addrf;
1070
1071                        ppa64.g.ch = (ppa32 & ppaf->ch_mask) >>
1072                                                        ppaf->ch_offset;
1073                        ppa64.g.lun = (ppa32 & ppaf->lun_mask) >>
1074                                                        ppaf->lun_offset;
1075                        ppa64.g.blk = (ppa32 & ppaf->blk_mask) >>
1076                                                        ppaf->blk_offset;
1077                        ppa64.g.pg = (ppa32 & ppaf->pg_mask) >>
1078                                                        ppaf->pg_offset;
1079                        ppa64.g.pl = (ppa32 & ppaf->pln_mask) >>
1080                                                        ppaf->pln_offset;
1081                        ppa64.g.sec = (ppa32 & ppaf->sec_mask) >>
1082                                                        ppaf->sec_offset;
1083                } else {
1084                        struct nvm_addrf *lbaf = &pblk->addrf;
1085
1086                        ppa64.m.grp = (ppa32 & lbaf->ch_mask) >>
1087                                                        lbaf->ch_offset;
1088                        ppa64.m.pu = (ppa32 & lbaf->lun_mask) >>
1089                                                        lbaf->lun_offset;
1090                        ppa64.m.chk = (ppa32 & lbaf->chk_mask) >>
1091                                                        lbaf->chk_offset;
1092                        ppa64.m.sec = (ppa32 & lbaf->sec_mask) >>
1093                                                        lbaf->sec_offset;
1094                }
1095        }
1096
1097        return ppa64;
1098}
1099
1100static inline u32 pblk_ppa64_to_ppa32(struct pblk *pblk, struct ppa_addr ppa64)
1101{
1102        u32 ppa32 = 0;
1103
1104        if (ppa64.ppa == ADDR_EMPTY) {
1105                ppa32 = ~0U;
1106        } else if (ppa64.c.is_cached) {
1107                ppa32 |= ppa64.c.line;
1108                ppa32 |= 1U << 31;
1109        } else {
1110                struct nvm_tgt_dev *dev = pblk->dev;
1111                struct nvm_geo *geo = &dev->geo;
1112
1113                if (geo->version == NVM_OCSSD_SPEC_12) {
1114                        struct nvm_addrf_12 *ppaf =
1115                                        (struct nvm_addrf_12 *)&pblk->addrf;
1116
1117                        ppa32 |= ppa64.g.ch << ppaf->ch_offset;
1118                        ppa32 |= ppa64.g.lun << ppaf->lun_offset;
1119                        ppa32 |= ppa64.g.blk << ppaf->blk_offset;
1120                        ppa32 |= ppa64.g.pg << ppaf->pg_offset;
1121                        ppa32 |= ppa64.g.pl << ppaf->pln_offset;
1122                        ppa32 |= ppa64.g.sec << ppaf->sec_offset;
1123                } else {
1124                        struct nvm_addrf *lbaf = &pblk->addrf;
1125
1126                        ppa32 |= ppa64.m.grp << lbaf->ch_offset;
1127                        ppa32 |= ppa64.m.pu << lbaf->lun_offset;
1128                        ppa32 |= ppa64.m.chk << lbaf->chk_offset;
1129                        ppa32 |= ppa64.m.sec << lbaf->sec_offset;
1130                }
1131        }
1132
1133        return ppa32;
1134}
1135
1136static inline struct ppa_addr pblk_trans_map_get(struct pblk *pblk,
1137                                                                sector_t lba)
1138{
1139        struct ppa_addr ppa;
1140
1141        if (pblk->addrf_len < 32) {
1142                u32 *map = (u32 *)pblk->trans_map;
1143
1144                ppa = pblk_ppa32_to_ppa64(pblk, map[lba]);
1145        } else {
1146                struct ppa_addr *map = (struct ppa_addr *)pblk->trans_map;
1147
1148                ppa = map[lba];
1149        }
1150
1151        return ppa;
1152}
1153
1154static inline void pblk_trans_map_set(struct pblk *pblk, sector_t lba,
1155                                                struct ppa_addr ppa)
1156{
1157        if (pblk->addrf_len < 32) {
1158                u32 *map = (u32 *)pblk->trans_map;
1159
1160                map[lba] = pblk_ppa64_to_ppa32(pblk, ppa);
1161        } else {
1162                u64 *map = (u64 *)pblk->trans_map;
1163
1164                map[lba] = ppa.ppa;
1165        }
1166}
1167
1168static inline int pblk_ppa_empty(struct ppa_addr ppa_addr)
1169{
1170        return (ppa_addr.ppa == ADDR_EMPTY);
1171}
1172
1173static inline void pblk_ppa_set_empty(struct ppa_addr *ppa_addr)
1174{
1175        ppa_addr->ppa = ADDR_EMPTY;
1176}
1177
1178static inline bool pblk_ppa_comp(struct ppa_addr lppa, struct ppa_addr rppa)
1179{
1180        return (lppa.ppa == rppa.ppa);
1181}
1182
1183static inline int pblk_addr_in_cache(struct ppa_addr ppa)
1184{
1185        return (ppa.ppa != ADDR_EMPTY && ppa.c.is_cached);
1186}
1187
1188static inline int pblk_addr_to_cacheline(struct ppa_addr ppa)
1189{
1190        return ppa.c.line;
1191}
1192
1193static inline struct ppa_addr pblk_cacheline_to_addr(int addr)
1194{
1195        struct ppa_addr p;
1196
1197        p.c.line = addr;
1198        p.c.is_cached = 1;
1199
1200        return p;
1201}
1202
1203static inline u32 pblk_calc_meta_header_crc(struct pblk *pblk,
1204                                            struct line_header *header)
1205{
1206        u32 crc = ~(u32)0;
1207
1208        crc = crc32_le(crc, (unsigned char *)header + sizeof(crc),
1209                                sizeof(struct line_header) - sizeof(crc));
1210
1211        return crc;
1212}
1213
1214static inline u32 pblk_calc_smeta_crc(struct pblk *pblk,
1215                                      struct line_smeta *smeta)
1216{
1217        struct pblk_line_meta *lm = &pblk->lm;
1218        u32 crc = ~(u32)0;
1219
1220        crc = crc32_le(crc, (unsigned char *)smeta +
1221                                sizeof(struct line_header) + sizeof(crc),
1222                                lm->smeta_len -
1223                                sizeof(struct line_header) - sizeof(crc));
1224
1225        return crc;
1226}
1227
1228static inline u32 pblk_calc_emeta_crc(struct pblk *pblk,
1229                                      struct line_emeta *emeta)
1230{
1231        struct pblk_line_meta *lm = &pblk->lm;
1232        u32 crc = ~(u32)0;
1233
1234        crc = crc32_le(crc, (unsigned char *)emeta +
1235                                sizeof(struct line_header) + sizeof(crc),
1236                                lm->emeta_len[0] -
1237                                sizeof(struct line_header) - sizeof(crc));
1238
1239        return crc;
1240}
1241
1242static inline int pblk_set_progr_mode(struct pblk *pblk, int type)
1243{
1244        struct nvm_tgt_dev *dev = pblk->dev;
1245        struct nvm_geo *geo = &dev->geo;
1246        int flags;
1247
1248        if (geo->version == NVM_OCSSD_SPEC_20)
1249                return 0;
1250
1251        flags = geo->pln_mode >> 1;
1252
1253        if (type == PBLK_WRITE)
1254                flags |= NVM_IO_SCRAMBLE_ENABLE;
1255
1256        return flags;
1257}
1258
1259enum {
1260        PBLK_READ_RANDOM        = 0,
1261        PBLK_READ_SEQUENTIAL    = 1,
1262};
1263
1264static inline int pblk_set_read_mode(struct pblk *pblk, int type)
1265{
1266        struct nvm_tgt_dev *dev = pblk->dev;
1267        struct nvm_geo *geo = &dev->geo;
1268        int flags;
1269
1270        if (geo->version == NVM_OCSSD_SPEC_20)
1271                return 0;
1272
1273        flags = NVM_IO_SUSPEND | NVM_IO_SCRAMBLE_ENABLE;
1274        if (type == PBLK_READ_SEQUENTIAL)
1275                flags |= geo->pln_mode >> 1;
1276
1277        return flags;
1278}
1279
1280static inline int pblk_io_aligned(struct pblk *pblk, int nr_secs)
1281{
1282        return !(nr_secs % pblk->min_write_pgs);
1283}
1284
1285#ifdef CONFIG_NVM_DEBUG
1286static inline void print_ppa(struct nvm_geo *geo, struct ppa_addr *p,
1287                             char *msg, int error)
1288{
1289        if (p->c.is_cached) {
1290                pr_err("ppa: (%s: %x) cache line: %llu\n",
1291                                msg, error, (u64)p->c.line);
1292        } else if (geo->version == NVM_OCSSD_SPEC_12) {
1293                pr_err("ppa: (%s: %x):ch:%d,lun:%d,blk:%d,pg:%d,pl:%d,sec:%d\n",
1294                        msg, error,
1295                        p->g.ch, p->g.lun, p->g.blk,
1296                        p->g.pg, p->g.pl, p->g.sec);
1297        } else {
1298                pr_err("ppa: (%s: %x):ch:%d,lun:%d,chk:%d,sec:%d\n",
1299                        msg, error,
1300                        p->m.grp, p->m.pu, p->m.chk, p->m.sec);
1301        }
1302}
1303
1304static inline void pblk_print_failed_rqd(struct pblk *pblk, struct nvm_rq *rqd,
1305                                         int error)
1306{
1307        int bit = -1;
1308
1309        if (rqd->nr_ppas ==  1) {
1310                print_ppa(&pblk->dev->geo, &rqd->ppa_addr, "rqd", error);
1311                return;
1312        }
1313
1314        while ((bit = find_next_bit((void *)&rqd->ppa_status, rqd->nr_ppas,
1315                                                bit + 1)) < rqd->nr_ppas) {
1316                print_ppa(&pblk->dev->geo, &rqd->ppa_list[bit], "rqd", error);
1317        }
1318
1319        pr_err("error:%d, ppa_status:%llx\n", error, rqd->ppa_status);
1320}
1321
1322static inline int pblk_boundary_ppa_checks(struct nvm_tgt_dev *tgt_dev,
1323                                       struct ppa_addr *ppas, int nr_ppas)
1324{
1325        struct nvm_geo *geo = &tgt_dev->geo;
1326        struct ppa_addr *ppa;
1327        int i;
1328
1329        for (i = 0; i < nr_ppas; i++) {
1330                ppa = &ppas[i];
1331
1332                if (geo->version == NVM_OCSSD_SPEC_12) {
1333                        if (!ppa->c.is_cached &&
1334                                        ppa->g.ch < geo->num_ch &&
1335                                        ppa->g.lun < geo->num_lun &&
1336                                        ppa->g.pl < geo->num_pln &&
1337                                        ppa->g.blk < geo->num_chk &&
1338                                        ppa->g.pg < geo->num_pg &&
1339                                        ppa->g.sec < geo->ws_min)
1340                                continue;
1341                } else {
1342                        if (!ppa->c.is_cached &&
1343                                        ppa->m.grp < geo->num_ch &&
1344                                        ppa->m.pu < geo->num_lun &&
1345                                        ppa->m.chk < geo->num_chk &&
1346                                        ppa->m.sec < geo->clba)
1347                                continue;
1348                }
1349
1350                print_ppa(geo, ppa, "boundary", i);
1351
1352                return 1;
1353        }
1354        return 0;
1355}
1356
1357static inline int pblk_check_io(struct pblk *pblk, struct nvm_rq *rqd)
1358{
1359        struct nvm_tgt_dev *dev = pblk->dev;
1360        struct ppa_addr *ppa_list;
1361
1362        ppa_list = (rqd->nr_ppas > 1) ? rqd->ppa_list : &rqd->ppa_addr;
1363
1364        if (pblk_boundary_ppa_checks(dev, ppa_list, rqd->nr_ppas)) {
1365                WARN_ON(1);
1366                return -EINVAL;
1367        }
1368
1369        if (rqd->opcode == NVM_OP_PWRITE) {
1370                struct pblk_line *line;
1371                struct ppa_addr ppa;
1372                int i;
1373
1374                for (i = 0; i < rqd->nr_ppas; i++) {
1375                        ppa = ppa_list[i];
1376                        line = &pblk->lines[pblk_ppa_to_line(ppa)];
1377
1378                        spin_lock(&line->lock);
1379                        if (line->state != PBLK_LINESTATE_OPEN) {
1380                                pr_err("pblk: bad ppa: line:%d,state:%d\n",
1381                                                        line->id, line->state);
1382                                WARN_ON(1);
1383                                spin_unlock(&line->lock);
1384                                return -EINVAL;
1385                        }
1386                        spin_unlock(&line->lock);
1387                }
1388        }
1389
1390        return 0;
1391}
1392#endif
1393
1394static inline int pblk_boundary_paddr_checks(struct pblk *pblk, u64 paddr)
1395{
1396        struct pblk_line_meta *lm = &pblk->lm;
1397
1398        if (paddr > lm->sec_per_line)
1399                return 1;
1400
1401        return 0;
1402}
1403
1404static inline unsigned int pblk_get_bi_idx(struct bio *bio)
1405{
1406        return bio->bi_iter.bi_idx;
1407}
1408
1409static inline sector_t pblk_get_lba(struct bio *bio)
1410{
1411        return bio->bi_iter.bi_sector / NR_PHY_IN_LOG;
1412}
1413
1414static inline unsigned int pblk_get_secs(struct bio *bio)
1415{
1416        return  bio->bi_iter.bi_size / PBLK_EXPOSED_PAGE_SIZE;
1417}
1418
1419static inline void pblk_setup_uuid(struct pblk *pblk)
1420{
1421        uuid_le uuid;
1422
1423        uuid_le_gen(&uuid);
1424        memcpy(pblk->instance_uuid, uuid.b, 16);
1425}
1426#endif /* PBLK_H_ */
1427