linux/drivers/lightnvm/pblk.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2/*
   3 * Copyright (C) 2015 IT University of Copenhagen (rrpc.h)
   4 * Copyright (C) 2016 CNEX Labs
   5 * Initial release: Matias Bjorling <matias@cnexlabs.com>
   6 * Write buffering: Javier Gonzalez <javier@cnexlabs.com>
   7 *
   8 * This program is free software; you can redistribute it and/or
   9 * modify it under the terms of the GNU General Public License version
  10 * 2 as published by the Free Software Foundation.
  11 *
  12 * This program is distributed in the hope that it will be useful, but
  13 * WITHOUT ANY WARRANTY; without even the implied warranty of
  14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  15 * General Public License for more details.
  16 *
  17 * Implementation of a Physical Block-device target for Open-channel SSDs.
  18 *
  19 */
  20
  21#ifndef PBLK_H_
  22#define PBLK_H_
  23
  24#include <linux/blkdev.h>
  25#include <linux/blk-mq.h>
  26#include <linux/bio.h>
  27#include <linux/module.h>
  28#include <linux/kthread.h>
  29#include <linux/vmalloc.h>
  30#include <linux/crc32.h>
  31#include <linux/uuid.h>
  32
  33#include <linux/lightnvm.h>
  34
  35/* Run only GC if less than 1/X blocks are free */
  36#define GC_LIMIT_INVERSE 5
  37#define GC_TIME_MSECS 1000
  38
  39#define PBLK_SECTOR (512)
  40#define PBLK_EXPOSED_PAGE_SIZE (4096)
  41
  42#define PBLK_NR_CLOSE_JOBS (4)
  43
  44#define PBLK_CACHE_NAME_LEN (DISK_NAME_LEN + 16)
  45
  46/* Max 512 LUNs per device */
  47#define PBLK_MAX_LUNS_BITMAP (4)
  48
  49#define NR_PHY_IN_LOG (PBLK_EXPOSED_PAGE_SIZE / PBLK_SECTOR)
  50
  51/* Static pool sizes */
  52#define PBLK_GEN_WS_POOL_SIZE (2)
  53
  54#define PBLK_DEFAULT_OP (11)
  55
  56enum {
  57        PBLK_READ               = READ,
  58        PBLK_WRITE              = WRITE,/* Write from write buffer */
  59        PBLK_WRITE_INT,                 /* Internal write - no write buffer */
  60        PBLK_READ_RECOV,                /* Recovery read - errors allowed */
  61        PBLK_ERASE,
  62};
  63
  64enum {
  65        /* IO Types */
  66        PBLK_IOTYPE_USER        = 1 << 0,
  67        PBLK_IOTYPE_GC          = 1 << 1,
  68
  69        /* Write buffer flags */
  70        PBLK_FLUSH_ENTRY        = 1 << 2,
  71        PBLK_WRITTEN_DATA       = 1 << 3,
  72        PBLK_SUBMITTED_ENTRY    = 1 << 4,
  73        PBLK_WRITABLE_ENTRY     = 1 << 5,
  74};
  75
  76enum {
  77        PBLK_BLK_ST_OPEN =      0x1,
  78        PBLK_BLK_ST_CLOSED =    0x2,
  79};
  80
  81enum {
  82        PBLK_CHUNK_RESET_START,
  83        PBLK_CHUNK_RESET_DONE,
  84        PBLK_CHUNK_RESET_FAILED,
  85};
  86
  87struct pblk_sec_meta {
  88        u64 reserved;
  89        __le64 lba;
  90};
  91
  92/* The number of GC lists and the rate-limiter states go together. This way the
  93 * rate-limiter can dictate how much GC is needed based on resource utilization.
  94 */
  95#define PBLK_GC_NR_LISTS 4
  96
  97enum {
  98        PBLK_RL_OFF = 0,
  99        PBLK_RL_WERR = 1,
 100        PBLK_RL_HIGH = 2,
 101        PBLK_RL_MID = 3,
 102        PBLK_RL_LOW = 4
 103};
 104
 105#define pblk_dma_ppa_size (sizeof(u64) * NVM_MAX_VLBA)
 106
 107/* write buffer completion context */
 108struct pblk_c_ctx {
 109        struct list_head list;          /* Head for out-of-order completion */
 110
 111        unsigned long *lun_bitmap;      /* Luns used on current request */
 112        unsigned int sentry;
 113        unsigned int nr_valid;
 114        unsigned int nr_padded;
 115};
 116
 117/* read context */
 118struct pblk_g_ctx {
 119        void *private;
 120        unsigned long start_time;
 121        u64 lba;
 122};
 123
 124/* Pad context */
 125struct pblk_pad_rq {
 126        struct pblk *pblk;
 127        struct completion wait;
 128        struct kref ref;
 129};
 130
 131/* Recovery context */
 132struct pblk_rec_ctx {
 133        struct pblk *pblk;
 134        struct nvm_rq *rqd;
 135        struct work_struct ws_rec;
 136};
 137
 138/* Write context */
 139struct pblk_w_ctx {
 140        struct bio_list bios;           /* Original bios - used for completion
 141                                         * in REQ_FUA, REQ_FLUSH case
 142                                         */
 143        u64 lba;                        /* Logic addr. associated with entry */
 144        struct ppa_addr ppa;            /* Physic addr. associated with entry */
 145        int flags;                      /* Write context flags */
 146};
 147
 148struct pblk_rb_entry {
 149        struct ppa_addr cacheline;      /* Cacheline for this entry */
 150        void *data;                     /* Pointer to data on this entry */
 151        struct pblk_w_ctx w_ctx;        /* Context for this entry */
 152        struct list_head index;         /* List head to enable indexes */
 153};
 154
 155#define EMPTY_ENTRY (~0U)
 156
 157struct pblk_rb_pages {
 158        struct page *pages;
 159        int order;
 160        struct list_head list;
 161};
 162
 163struct pblk_rb {
 164        struct pblk_rb_entry *entries;  /* Ring buffer entries */
 165        unsigned int mem;               /* Write offset - points to next
 166                                         * writable entry in memory
 167                                         */
 168        unsigned int subm;              /* Read offset - points to last entry
 169                                         * that has been submitted to the media
 170                                         * to be persisted
 171                                         */
 172        unsigned int sync;              /* Synced - backpointer that signals
 173                                         * the last submitted entry that has
 174                                         * been successfully persisted to media
 175                                         */
 176        unsigned int flush_point;       /* Sync point - last entry that must be
 177                                         * flushed to the media. Used with
 178                                         * REQ_FLUSH and REQ_FUA
 179                                         */
 180        unsigned int l2p_update;        /* l2p update point - next entry for
 181                                         * which l2p mapping will be updated to
 182                                         * contain a device ppa address (instead
 183                                         * of a cacheline
 184                                         */
 185        unsigned int nr_entries;        /* Number of entries in write buffer -
 186                                         * must be a power of two
 187                                         */
 188        unsigned int seg_size;          /* Size of the data segments being
 189                                         * stored on each entry. Typically this
 190                                         * will be 4KB
 191                                         */
 192
 193        unsigned int back_thres;        /* Threshold that shall be maintained by
 194                                         * the backpointer in order to respect
 195                                         * geo->mw_cunits on a per chunk basis
 196                                         */
 197
 198        struct list_head pages;         /* List of data pages */
 199
 200        spinlock_t w_lock;              /* Write lock */
 201        spinlock_t s_lock;              /* Sync lock */
 202
 203#ifdef CONFIG_NVM_PBLK_DEBUG
 204        atomic_t inflight_flush_point;  /* Not served REQ_FLUSH | REQ_FUA */
 205#endif
 206};
 207
 208#define PBLK_RECOVERY_SECTORS 16
 209
 210struct pblk_lun {
 211        struct ppa_addr bppa;
 212        struct semaphore wr_sem;
 213};
 214
 215struct pblk_gc_rq {
 216        struct pblk_line *line;
 217        void *data;
 218        u64 paddr_list[NVM_MAX_VLBA];
 219        u64 lba_list[NVM_MAX_VLBA];
 220        int nr_secs;
 221        int secs_to_gc;
 222        struct list_head list;
 223};
 224
 225struct pblk_gc {
 226        /* These states are not protected by a lock since (i) they are in the
 227         * fast path, and (ii) they are not critical.
 228         */
 229        int gc_active;
 230        int gc_enabled;
 231        int gc_forced;
 232
 233        struct task_struct *gc_ts;
 234        struct task_struct *gc_writer_ts;
 235        struct task_struct *gc_reader_ts;
 236
 237        struct workqueue_struct *gc_line_reader_wq;
 238        struct workqueue_struct *gc_reader_wq;
 239
 240        struct timer_list gc_timer;
 241
 242        struct semaphore gc_sem;
 243        atomic_t read_inflight_gc; /* Number of lines with inflight GC reads */
 244        atomic_t pipeline_gc;      /* Number of lines in the GC pipeline -
 245                                    * started reads to finished writes
 246                                    */
 247        int w_entries;
 248
 249        struct list_head w_list;
 250        struct list_head r_list;
 251
 252        spinlock_t lock;
 253        spinlock_t w_lock;
 254        spinlock_t r_lock;
 255};
 256
 257struct pblk_rl {
 258        unsigned int high;      /* Upper threshold for rate limiter (free run -
 259                                 * user I/O rate limiter
 260                                 */
 261        unsigned int high_pw;   /* High rounded up as a power of 2 */
 262
 263#define PBLK_USER_HIGH_THRS 8   /* Begin write limit at 12% available blks */
 264#define PBLK_USER_LOW_THRS 10   /* Aggressive GC at 10% available blocks */
 265
 266        int rb_windows_pw;      /* Number of rate windows in the write buffer
 267                                 * given as a power-of-2. This guarantees that
 268                                 * when user I/O is being rate limited, there
 269                                 * will be reserved enough space for the GC to
 270                                 * place its payload. A window is of
 271                                 * pblk->max_write_pgs size, which in NVMe is
 272                                 * 64, i.e., 256kb.
 273                                 */
 274        int rb_budget;          /* Total number of entries available for I/O */
 275        int rb_user_max;        /* Max buffer entries available for user I/O */
 276        int rb_gc_max;          /* Max buffer entries available for GC I/O */
 277        int rb_gc_rsv;          /* Reserved buffer entries for GC I/O */
 278        int rb_state;           /* Rate-limiter current state */
 279        int rb_max_io;          /* Maximum size for an I/O giving the config */
 280
 281        atomic_t rb_user_cnt;   /* User I/O buffer counter */
 282        atomic_t rb_gc_cnt;     /* GC I/O buffer counter */
 283        atomic_t rb_space;      /* Space limit in case of reaching capacity */
 284
 285        int rsv_blocks;         /* Reserved blocks for GC */
 286
 287        int rb_user_active;
 288        int rb_gc_active;
 289
 290        atomic_t werr_lines;    /* Number of write error lines that needs gc */
 291
 292        struct timer_list u_timer;
 293
 294        unsigned long total_blocks;
 295
 296        atomic_t free_blocks;           /* Total number of free blocks (+ OP) */
 297        atomic_t free_user_blocks;      /* Number of user free blocks (no OP) */
 298};
 299
 300#define PBLK_LINE_EMPTY (~0U)
 301
 302enum {
 303        /* Line Types */
 304        PBLK_LINETYPE_FREE = 0,
 305        PBLK_LINETYPE_LOG = 1,
 306        PBLK_LINETYPE_DATA = 2,
 307
 308        /* Line state */
 309        PBLK_LINESTATE_NEW = 9,
 310        PBLK_LINESTATE_FREE = 10,
 311        PBLK_LINESTATE_OPEN = 11,
 312        PBLK_LINESTATE_CLOSED = 12,
 313        PBLK_LINESTATE_GC = 13,
 314        PBLK_LINESTATE_BAD = 14,
 315        PBLK_LINESTATE_CORRUPT = 15,
 316
 317        /* GC group */
 318        PBLK_LINEGC_NONE = 20,
 319        PBLK_LINEGC_EMPTY = 21,
 320        PBLK_LINEGC_LOW = 22,
 321        PBLK_LINEGC_MID = 23,
 322        PBLK_LINEGC_HIGH = 24,
 323        PBLK_LINEGC_FULL = 25,
 324        PBLK_LINEGC_WERR = 26
 325};
 326
 327#define PBLK_MAGIC 0x70626c6b /*pblk*/
 328
 329/* emeta/smeta persistent storage format versions:
 330 * Changes in major version requires offline migration.
 331 * Changes in minor version are handled automatically during
 332 * recovery.
 333 */
 334
 335#define SMETA_VERSION_MAJOR (0)
 336#define SMETA_VERSION_MINOR (1)
 337
 338#define EMETA_VERSION_MAJOR (0)
 339#define EMETA_VERSION_MINOR (2)
 340
 341struct line_header {
 342        __le32 crc;
 343        __le32 identifier;      /* pblk identifier */
 344        __u8 uuid[16];          /* instance uuid */
 345        __le16 type;            /* line type */
 346        __u8 version_major;     /* version major */
 347        __u8 version_minor;     /* version minor */
 348        __le32 id;              /* line id for current line */
 349};
 350
 351struct line_smeta {
 352        struct line_header header;
 353
 354        __le32 crc;             /* Full structure including struct crc */
 355        /* Previous line metadata */
 356        __le32 prev_id;         /* Line id for previous line */
 357
 358        /* Current line metadata */
 359        __le64 seq_nr;          /* Sequence number for current line */
 360
 361        /* Active writers */
 362        __le32 window_wr_lun;   /* Number of parallel LUNs to write */
 363
 364        __le32 rsvd[2];
 365
 366        __le64 lun_bitmap[];
 367};
 368
 369
 370/*
 371 * Metadata layout in media:
 372 *      First sector:
 373 *              1. struct line_emeta
 374 *              2. bad block bitmap (u64 * window_wr_lun)
 375 *              3. write amplification counters
 376 *      Mid sectors (start at lbas_sector):
 377 *              3. nr_lbas (u64) forming lba list
 378 *      Last sectors (start at vsc_sector):
 379 *              4. u32 valid sector count (vsc) for all lines (~0U: free line)
 380 */
 381struct line_emeta {
 382        struct line_header header;
 383
 384        __le32 crc;             /* Full structure including struct crc */
 385
 386        /* Previous line metadata */
 387        __le32 prev_id;         /* Line id for prev line */
 388
 389        /* Current line metadata */
 390        __le64 seq_nr;          /* Sequence number for current line */
 391
 392        /* Active writers */
 393        __le32 window_wr_lun;   /* Number of parallel LUNs to write */
 394
 395        /* Bookkeeping for recovery */
 396        __le32 next_id;         /* Line id for next line */
 397        __le64 nr_lbas;         /* Number of lbas mapped in line */
 398        __le64 nr_valid_lbas;   /* Number of valid lbas mapped in line */
 399        __le64 bb_bitmap[];     /* Updated bad block bitmap for line */
 400};
 401
 402
 403/* Write amplification counters stored on media */
 404struct wa_counters {
 405        __le64 user;            /* Number of user written sectors */
 406        __le64 gc;              /* Number of sectors written by GC*/
 407        __le64 pad;             /* Number of padded sectors */
 408};
 409
 410struct pblk_emeta {
 411        struct line_emeta *buf;         /* emeta buffer in media format */
 412        int mem;                        /* Write offset - points to next
 413                                         * writable entry in memory
 414                                         */
 415        atomic_t sync;                  /* Synced - backpointer that signals the
 416                                         * last entry that has been successfully
 417                                         * persisted to media
 418                                         */
 419        unsigned int nr_entries;        /* Number of emeta entries */
 420};
 421
 422struct pblk_smeta {
 423        struct line_smeta *buf;         /* smeta buffer in persistent format */
 424};
 425
 426struct pblk_w_err_gc {
 427        int has_write_err;
 428        int has_gc_err;
 429        __le64 *lba_list;
 430};
 431
 432struct pblk_line {
 433        struct pblk *pblk;
 434        unsigned int id;                /* Line number corresponds to the
 435                                         * block line
 436                                         */
 437        unsigned int seq_nr;            /* Unique line sequence number */
 438
 439        int state;                      /* PBLK_LINESTATE_X */
 440        int type;                       /* PBLK_LINETYPE_X */
 441        int gc_group;                   /* PBLK_LINEGC_X */
 442        struct list_head list;          /* Free, GC lists */
 443
 444        unsigned long *lun_bitmap;      /* Bitmap for LUNs mapped in line */
 445
 446        struct nvm_chk_meta *chks;      /* Chunks forming line */
 447
 448        struct pblk_smeta *smeta;       /* Start metadata */
 449        struct pblk_emeta *emeta;       /* End medatada */
 450
 451        int meta_line;                  /* Metadata line id */
 452        int meta_distance;              /* Distance between data and metadata */
 453
 454        u64 emeta_ssec;                 /* Sector where emeta starts */
 455
 456        unsigned int sec_in_line;       /* Number of usable secs in line */
 457
 458        atomic_t blk_in_line;           /* Number of good blocks in line */
 459        unsigned long *blk_bitmap;      /* Bitmap for valid/invalid blocks */
 460        unsigned long *erase_bitmap;    /* Bitmap for erased blocks */
 461
 462        unsigned long *map_bitmap;      /* Bitmap for mapped sectors in line */
 463        unsigned long *invalid_bitmap;  /* Bitmap for invalid sectors in line */
 464
 465        atomic_t left_eblks;            /* Blocks left for erasing */
 466        atomic_t left_seblks;           /* Blocks left for sync erasing */
 467
 468        int left_msecs;                 /* Sectors left for mapping */
 469        unsigned int cur_sec;           /* Sector map pointer */
 470        unsigned int nr_valid_lbas;     /* Number of valid lbas in line */
 471
 472        __le32 *vsc;                    /* Valid sector count in line */
 473
 474        struct kref ref;                /* Write buffer L2P references */
 475        atomic_t sec_to_update;         /* Outstanding L2P updates to ppa */
 476
 477        struct pblk_w_err_gc *w_err_gc; /* Write error gc recovery metadata */
 478
 479        spinlock_t lock;                /* Necessary for invalid_bitmap only */
 480};
 481
 482#define PBLK_DATA_LINES 4
 483
 484enum {
 485        PBLK_KMALLOC_META = 1,
 486        PBLK_VMALLOC_META = 2,
 487};
 488
 489enum {
 490        PBLK_EMETA_TYPE_HEADER = 1,     /* struct line_emeta first sector */
 491        PBLK_EMETA_TYPE_LLBA = 2,       /* lba list - type: __le64 */
 492        PBLK_EMETA_TYPE_VSC = 3,        /* vsc list - type: __le32 */
 493};
 494
 495struct pblk_line_mgmt {
 496        int nr_lines;                   /* Total number of full lines */
 497        int nr_free_lines;              /* Number of full lines in free list */
 498
 499        /* Free lists - use free_lock */
 500        struct list_head free_list;     /* Full lines ready to use */
 501        struct list_head corrupt_list;  /* Full lines corrupted */
 502        struct list_head bad_list;      /* Full lines bad */
 503
 504        /* GC lists - use gc_lock */
 505        struct list_head *gc_lists[PBLK_GC_NR_LISTS];
 506        struct list_head gc_high_list;  /* Full lines ready to GC, high isc */
 507        struct list_head gc_mid_list;   /* Full lines ready to GC, mid isc */
 508        struct list_head gc_low_list;   /* Full lines ready to GC, low isc */
 509
 510        struct list_head gc_werr_list;  /* Write err recovery list */
 511
 512        struct list_head gc_full_list;  /* Full lines ready to GC, no valid */
 513        struct list_head gc_empty_list; /* Full lines close, all valid */
 514
 515        struct pblk_line *log_line;     /* Current FTL log line */
 516        struct pblk_line *data_line;    /* Current data line */
 517        struct pblk_line *log_next;     /* Next FTL log line */
 518        struct pblk_line *data_next;    /* Next data line */
 519
 520        struct list_head emeta_list;    /* Lines queued to schedule emeta */
 521
 522        __le32 *vsc_list;               /* Valid sector counts for all lines */
 523
 524        /* Metadata allocation type: VMALLOC | KMALLOC */
 525        int emeta_alloc_type;
 526
 527        /* Pre-allocated metadata for data lines */
 528        struct pblk_smeta *sline_meta[PBLK_DATA_LINES];
 529        struct pblk_emeta *eline_meta[PBLK_DATA_LINES];
 530        unsigned long meta_bitmap;
 531
 532        /* Cache and mempool for map/invalid bitmaps */
 533        struct kmem_cache *bitmap_cache;
 534        mempool_t *bitmap_pool;
 535
 536        /* Helpers for fast bitmap calculations */
 537        unsigned long *bb_template;
 538        unsigned long *bb_aux;
 539
 540        unsigned long d_seq_nr;         /* Data line unique sequence number */
 541        unsigned long l_seq_nr;         /* Log line unique sequence number */
 542
 543        spinlock_t free_lock;
 544        spinlock_t close_lock;
 545        spinlock_t gc_lock;
 546};
 547
 548struct pblk_line_meta {
 549        unsigned int smeta_len;         /* Total length for smeta */
 550        unsigned int smeta_sec;         /* Sectors needed for smeta */
 551
 552        unsigned int emeta_len[4];      /* Lengths for emeta:
 553                                         *  [0]: Total
 554                                         *  [1]: struct line_emeta +
 555                                         *       bb_bitmap + struct wa_counters
 556                                         *  [2]: L2P portion
 557                                         *  [3]: vsc
 558                                         */
 559        unsigned int emeta_sec[4];      /* Sectors needed for emeta. Same layout
 560                                         * as emeta_len
 561                                         */
 562
 563        unsigned int emeta_bb;          /* Boundary for bb that affects emeta */
 564
 565        unsigned int vsc_list_len;      /* Length for vsc list */
 566        unsigned int sec_bitmap_len;    /* Length for sector bitmap in line */
 567        unsigned int blk_bitmap_len;    /* Length for block bitmap in line */
 568        unsigned int lun_bitmap_len;    /* Length for lun bitmap in line */
 569
 570        unsigned int blk_per_line;      /* Number of blocks in a full line */
 571        unsigned int sec_per_line;      /* Number of sectors in a line */
 572        unsigned int dsec_per_line;     /* Number of data sectors in a line */
 573        unsigned int min_blk_line;      /* Min. number of good blocks in line */
 574
 575        unsigned int mid_thrs;          /* Threshold for GC mid list */
 576        unsigned int high_thrs;         /* Threshold for GC high list */
 577
 578        unsigned int meta_distance;     /* Distance between data and metadata */
 579};
 580
 581enum {
 582        PBLK_STATE_RUNNING = 0,
 583        PBLK_STATE_STOPPING = 1,
 584        PBLK_STATE_RECOVERING = 2,
 585        PBLK_STATE_STOPPED = 3,
 586};
 587
 588/* Internal format to support not power-of-2 device formats */
 589struct pblk_addrf {
 590        /* gen to dev */
 591        int sec_stripe;
 592        int ch_stripe;
 593        int lun_stripe;
 594
 595        /* dev to gen */
 596        int sec_lun_stripe;
 597        int sec_ws_stripe;
 598};
 599
 600struct pblk {
 601        struct nvm_tgt_dev *dev;
 602        struct gendisk *disk;
 603
 604        struct kobject kobj;
 605
 606        struct pblk_lun *luns;
 607
 608        struct pblk_line *lines;                /* Line array */
 609        struct pblk_line_mgmt l_mg;             /* Line management */
 610        struct pblk_line_meta lm;               /* Line metadata */
 611
 612        struct nvm_addrf addrf;         /* Aligned address format */
 613        struct pblk_addrf uaddrf;       /* Unaligned address format */
 614        int addrf_len;
 615
 616        struct pblk_rb rwb;
 617
 618        int state;                      /* pblk line state */
 619
 620        int min_write_pgs; /* Minimum amount of pages required by controller */
 621        int min_write_pgs_data; /* Minimum amount of payload pages */
 622        int max_write_pgs; /* Maximum amount of pages supported by controller */
 623        int oob_meta_size; /* Size of OOB sector metadata */
 624
 625        sector_t capacity; /* Device capacity when bad blocks are subtracted */
 626
 627        int op;      /* Percentage of device used for over-provisioning */
 628        int op_blks; /* Number of blocks used for over-provisioning */
 629
 630        /* pblk provisioning values. Used by rate limiter */
 631        struct pblk_rl rl;
 632
 633        int sec_per_write;
 634
 635        guid_t instance_uuid;
 636
 637        /* Persistent write amplification counters, 4kb sector I/Os */
 638        atomic64_t user_wa;             /* Sectors written by user */
 639        atomic64_t gc_wa;               /* Sectors written by GC */
 640        atomic64_t pad_wa;              /* Padded sectors written */
 641
 642        /* Reset values for delta write amplification measurements */
 643        u64 user_rst_wa;
 644        u64 gc_rst_wa;
 645        u64 pad_rst_wa;
 646
 647        /* Counters used for calculating padding distribution */
 648        atomic64_t *pad_dist;           /* Padding distribution buckets */
 649        u64 nr_flush_rst;               /* Flushes reset value for pad dist.*/
 650        atomic64_t nr_flush;            /* Number of flush/fua I/O */
 651
 652#ifdef CONFIG_NVM_PBLK_DEBUG
 653        /* Non-persistent debug counters, 4kb sector I/Os */
 654        atomic_long_t inflight_writes;  /* Inflight writes (user and gc) */
 655        atomic_long_t padded_writes;    /* Sectors padded due to flush/fua */
 656        atomic_long_t padded_wb;        /* Sectors padded in write buffer */
 657        atomic_long_t req_writes;       /* Sectors stored on write buffer */
 658        atomic_long_t sub_writes;       /* Sectors submitted from buffer */
 659        atomic_long_t sync_writes;      /* Sectors synced to media */
 660        atomic_long_t inflight_reads;   /* Inflight sector read requests */
 661        atomic_long_t cache_reads;      /* Read requests that hit the cache */
 662        atomic_long_t sync_reads;       /* Completed sector read requests */
 663        atomic_long_t recov_writes;     /* Sectors submitted from recovery */
 664        atomic_long_t recov_gc_writes;  /* Sectors submitted from write GC */
 665        atomic_long_t recov_gc_reads;   /* Sectors submitted from read GC */
 666#endif
 667
 668        spinlock_t lock;
 669
 670        atomic_long_t read_failed;
 671        atomic_long_t read_empty;
 672        atomic_long_t read_high_ecc;
 673        atomic_long_t read_failed_gc;
 674        atomic_long_t write_failed;
 675        atomic_long_t erase_failed;
 676
 677        atomic_t inflight_io;           /* General inflight I/O counter */
 678
 679        struct task_struct *writer_ts;
 680
 681        /* Simple translation map of logical addresses to physical addresses.
 682         * The logical addresses is known by the host system, while the physical
 683         * addresses are used when writing to the disk block device.
 684         */
 685        unsigned char *trans_map;
 686        spinlock_t trans_lock;
 687
 688        struct list_head compl_list;
 689
 690        spinlock_t resubmit_lock;        /* Resubmit list lock */
 691        struct list_head resubmit_list; /* Resubmit list for failed writes*/
 692
 693        mempool_t page_bio_pool;
 694        mempool_t gen_ws_pool;
 695        mempool_t rec_pool;
 696        mempool_t r_rq_pool;
 697        mempool_t w_rq_pool;
 698        mempool_t e_rq_pool;
 699
 700        struct workqueue_struct *close_wq;
 701        struct workqueue_struct *bb_wq;
 702        struct workqueue_struct *r_end_wq;
 703
 704        struct timer_list wtimer;
 705
 706        struct pblk_gc gc;
 707};
 708
 709struct pblk_line_ws {
 710        struct pblk *pblk;
 711        struct pblk_line *line;
 712        void *priv;
 713        struct work_struct ws;
 714};
 715
 716#define pblk_g_rq_size (sizeof(struct nvm_rq) + sizeof(struct pblk_g_ctx))
 717#define pblk_w_rq_size (sizeof(struct nvm_rq) + sizeof(struct pblk_c_ctx))
 718
 719#define pblk_err(pblk, fmt, ...)                        \
 720        pr_err("pblk %s: " fmt, pblk->disk->disk_name, ##__VA_ARGS__)
 721#define pblk_info(pblk, fmt, ...)                       \
 722        pr_info("pblk %s: " fmt, pblk->disk->disk_name, ##__VA_ARGS__)
 723#define pblk_warn(pblk, fmt, ...)                       \
 724        pr_warn("pblk %s: " fmt, pblk->disk->disk_name, ##__VA_ARGS__)
 725#define pblk_debug(pblk, fmt, ...)                      \
 726        pr_debug("pblk %s: " fmt, pblk->disk->disk_name, ##__VA_ARGS__)
 727
 728/*
 729 * pblk ring buffer operations
 730 */
 731int pblk_rb_init(struct pblk_rb *rb, unsigned int size, unsigned int threshold,
 732                 unsigned int seg_sz);
 733int pblk_rb_may_write_user(struct pblk_rb *rb, struct bio *bio,
 734                           unsigned int nr_entries, unsigned int *pos);
 735int pblk_rb_may_write_gc(struct pblk_rb *rb, unsigned int nr_entries,
 736                         unsigned int *pos);
 737void pblk_rb_write_entry_user(struct pblk_rb *rb, void *data,
 738                              struct pblk_w_ctx w_ctx, unsigned int pos);
 739void pblk_rb_write_entry_gc(struct pblk_rb *rb, void *data,
 740                            struct pblk_w_ctx w_ctx, struct pblk_line *line,
 741                            u64 paddr, unsigned int pos);
 742struct pblk_w_ctx *pblk_rb_w_ctx(struct pblk_rb *rb, unsigned int pos);
 743void pblk_rb_flush(struct pblk_rb *rb);
 744
 745void pblk_rb_sync_l2p(struct pblk_rb *rb);
 746unsigned int pblk_rb_read_to_bio(struct pblk_rb *rb, struct nvm_rq *rqd,
 747                                 unsigned int pos, unsigned int nr_entries,
 748                                 unsigned int count);
 749int pblk_rb_copy_to_bio(struct pblk_rb *rb, struct bio *bio, sector_t lba,
 750                        struct ppa_addr ppa);
 751unsigned int pblk_rb_read_commit(struct pblk_rb *rb, unsigned int entries);
 752
 753unsigned int pblk_rb_sync_init(struct pblk_rb *rb, unsigned long *flags);
 754unsigned int pblk_rb_sync_advance(struct pblk_rb *rb, unsigned int nr_entries);
 755unsigned int pblk_rb_ptr_wrap(struct pblk_rb *rb, unsigned int p,
 756                              unsigned int nr_entries);
 757void pblk_rb_sync_end(struct pblk_rb *rb, unsigned long *flags);
 758unsigned int pblk_rb_flush_point_count(struct pblk_rb *rb);
 759
 760unsigned int pblk_rb_read_count(struct pblk_rb *rb);
 761unsigned int pblk_rb_sync_count(struct pblk_rb *rb);
 762unsigned int pblk_rb_wrap_pos(struct pblk_rb *rb, unsigned int pos);
 763
 764int pblk_rb_tear_down_check(struct pblk_rb *rb);
 765int pblk_rb_pos_oob(struct pblk_rb *rb, u64 pos);
 766void pblk_rb_free(struct pblk_rb *rb);
 767ssize_t pblk_rb_sysfs(struct pblk_rb *rb, char *buf);
 768
 769/*
 770 * pblk core
 771 */
 772struct nvm_rq *pblk_alloc_rqd(struct pblk *pblk, int type);
 773void pblk_free_rqd(struct pblk *pblk, struct nvm_rq *rqd, int type);
 774int pblk_alloc_rqd_meta(struct pblk *pblk, struct nvm_rq *rqd);
 775void pblk_free_rqd_meta(struct pblk *pblk, struct nvm_rq *rqd);
 776void pblk_set_sec_per_write(struct pblk *pblk, int sec_per_write);
 777int pblk_setup_w_rec_rq(struct pblk *pblk, struct nvm_rq *rqd,
 778                        struct pblk_c_ctx *c_ctx);
 779void pblk_discard(struct pblk *pblk, struct bio *bio);
 780struct nvm_chk_meta *pblk_get_chunk_meta(struct pblk *pblk);
 781struct nvm_chk_meta *pblk_chunk_get_off(struct pblk *pblk,
 782                                              struct nvm_chk_meta *lp,
 783                                              struct ppa_addr ppa);
 784void pblk_log_write_err(struct pblk *pblk, struct nvm_rq *rqd);
 785void pblk_log_read_err(struct pblk *pblk, struct nvm_rq *rqd);
 786int pblk_submit_io(struct pblk *pblk, struct nvm_rq *rqd);
 787int pblk_submit_io_sync(struct pblk *pblk, struct nvm_rq *rqd);
 788int pblk_submit_io_sync_sem(struct pblk *pblk, struct nvm_rq *rqd);
 789int pblk_submit_meta_io(struct pblk *pblk, struct pblk_line *meta_line);
 790void pblk_check_chunk_state_update(struct pblk *pblk, struct nvm_rq *rqd);
 791struct bio *pblk_bio_map_addr(struct pblk *pblk, void *data,
 792                              unsigned int nr_secs, unsigned int len,
 793                              int alloc_type, gfp_t gfp_mask);
 794struct pblk_line *pblk_line_get(struct pblk *pblk);
 795struct pblk_line *pblk_line_get_first_data(struct pblk *pblk);
 796struct pblk_line *pblk_line_replace_data(struct pblk *pblk);
 797void pblk_ppa_to_line_put(struct pblk *pblk, struct ppa_addr ppa);
 798void pblk_rq_to_line_put(struct pblk *pblk, struct nvm_rq *rqd);
 799int pblk_line_recov_alloc(struct pblk *pblk, struct pblk_line *line);
 800void pblk_line_recov_close(struct pblk *pblk, struct pblk_line *line);
 801struct pblk_line *pblk_line_get_data(struct pblk *pblk);
 802struct pblk_line *pblk_line_get_erase(struct pblk *pblk);
 803int pblk_line_erase(struct pblk *pblk, struct pblk_line *line);
 804int pblk_line_is_full(struct pblk_line *line);
 805void pblk_line_free(struct pblk_line *line);
 806void pblk_line_close_meta(struct pblk *pblk, struct pblk_line *line);
 807void pblk_line_close(struct pblk *pblk, struct pblk_line *line);
 808void pblk_line_close_ws(struct work_struct *work);
 809void pblk_pipeline_stop(struct pblk *pblk);
 810void __pblk_pipeline_stop(struct pblk *pblk);
 811void __pblk_pipeline_flush(struct pblk *pblk);
 812void pblk_gen_run_ws(struct pblk *pblk, struct pblk_line *line, void *priv,
 813                     void (*work)(struct work_struct *), gfp_t gfp_mask,
 814                     struct workqueue_struct *wq);
 815u64 pblk_line_smeta_start(struct pblk *pblk, struct pblk_line *line);
 816int pblk_line_smeta_read(struct pblk *pblk, struct pblk_line *line);
 817int pblk_line_emeta_read(struct pblk *pblk, struct pblk_line *line,
 818                         void *emeta_buf);
 819int pblk_blk_erase_async(struct pblk *pblk, struct ppa_addr erase_ppa);
 820void pblk_line_put(struct kref *ref);
 821void pblk_line_put_wq(struct kref *ref);
 822struct list_head *pblk_line_gc_list(struct pblk *pblk, struct pblk_line *line);
 823u64 pblk_lookup_page(struct pblk *pblk, struct pblk_line *line);
 824void pblk_dealloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs);
 825u64 pblk_alloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs);
 826u64 __pblk_alloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs);
 827int pblk_calc_secs(struct pblk *pblk, unsigned long secs_avail,
 828                   unsigned long secs_to_flush, bool skip_meta);
 829void pblk_down_rq(struct pblk *pblk, struct ppa_addr ppa,
 830                  unsigned long *lun_bitmap);
 831void pblk_down_chunk(struct pblk *pblk, struct ppa_addr ppa);
 832void pblk_up_chunk(struct pblk *pblk, struct ppa_addr ppa);
 833void pblk_up_rq(struct pblk *pblk, unsigned long *lun_bitmap);
 834int pblk_bio_add_pages(struct pblk *pblk, struct bio *bio, gfp_t flags,
 835                       int nr_pages);
 836void pblk_bio_free_pages(struct pblk *pblk, struct bio *bio, int off,
 837                         int nr_pages);
 838void pblk_map_invalidate(struct pblk *pblk, struct ppa_addr ppa);
 839void __pblk_map_invalidate(struct pblk *pblk, struct pblk_line *line,
 840                           u64 paddr);
 841void pblk_update_map(struct pblk *pblk, sector_t lba, struct ppa_addr ppa);
 842void pblk_update_map_cache(struct pblk *pblk, sector_t lba,
 843                           struct ppa_addr ppa);
 844void pblk_update_map_dev(struct pblk *pblk, sector_t lba,
 845                         struct ppa_addr ppa, struct ppa_addr entry_line);
 846int pblk_update_map_gc(struct pblk *pblk, sector_t lba, struct ppa_addr ppa,
 847                       struct pblk_line *gc_line, u64 paddr);
 848void pblk_lookup_l2p_rand(struct pblk *pblk, struct ppa_addr *ppas,
 849                          u64 *lba_list, int nr_secs);
 850int pblk_lookup_l2p_seq(struct pblk *pblk, struct ppa_addr *ppas,
 851                         sector_t blba, int nr_secs, bool *from_cache);
 852void *pblk_get_meta_for_writes(struct pblk *pblk, struct nvm_rq *rqd);
 853void pblk_get_packed_meta(struct pblk *pblk, struct nvm_rq *rqd);
 854
 855/*
 856 * pblk user I/O write path
 857 */
 858void pblk_write_to_cache(struct pblk *pblk, struct bio *bio,
 859                        unsigned long flags);
 860int pblk_write_gc_to_cache(struct pblk *pblk, struct pblk_gc_rq *gc_rq);
 861
 862/*
 863 * pblk map
 864 */
 865int pblk_map_erase_rq(struct pblk *pblk, struct nvm_rq *rqd,
 866                       unsigned int sentry, unsigned long *lun_bitmap,
 867                       unsigned int valid_secs, struct ppa_addr *erase_ppa);
 868int pblk_map_rq(struct pblk *pblk, struct nvm_rq *rqd, unsigned int sentry,
 869                 unsigned long *lun_bitmap, unsigned int valid_secs,
 870                 unsigned int off);
 871
 872/*
 873 * pblk write thread
 874 */
 875int pblk_write_ts(void *data);
 876void pblk_write_timer_fn(struct timer_list *t);
 877void pblk_write_should_kick(struct pblk *pblk);
 878void pblk_write_kick(struct pblk *pblk);
 879
 880/*
 881 * pblk read path
 882 */
 883extern struct bio_set pblk_bio_set;
 884void pblk_submit_read(struct pblk *pblk, struct bio *bio);
 885int pblk_submit_read_gc(struct pblk *pblk, struct pblk_gc_rq *gc_rq);
 886/*
 887 * pblk recovery
 888 */
 889struct pblk_line *pblk_recov_l2p(struct pblk *pblk);
 890int pblk_recov_pad(struct pblk *pblk);
 891int pblk_recov_check_emeta(struct pblk *pblk, struct line_emeta *emeta);
 892
 893/*
 894 * pblk gc
 895 */
 896#define PBLK_GC_MAX_READERS 8   /* Max number of outstanding GC reader jobs */
 897#define PBLK_GC_RQ_QD 128       /* Queue depth for inflight GC requests */
 898#define PBLK_GC_L_QD 4          /* Queue depth for inflight GC lines */
 899
 900int pblk_gc_init(struct pblk *pblk);
 901void pblk_gc_exit(struct pblk *pblk, bool graceful);
 902void pblk_gc_should_start(struct pblk *pblk);
 903void pblk_gc_should_stop(struct pblk *pblk);
 904void pblk_gc_should_kick(struct pblk *pblk);
 905void pblk_gc_free_full_lines(struct pblk *pblk);
 906void pblk_gc_sysfs_state_show(struct pblk *pblk, int *gc_enabled,
 907                              int *gc_active);
 908int pblk_gc_sysfs_force(struct pblk *pblk, int force);
 909void pblk_put_line_back(struct pblk *pblk, struct pblk_line *line);
 910
 911/*
 912 * pblk rate limiter
 913 */
 914void pblk_rl_init(struct pblk_rl *rl, int budget, int threshold);
 915void pblk_rl_free(struct pblk_rl *rl);
 916void pblk_rl_update_rates(struct pblk_rl *rl);
 917int pblk_rl_high_thrs(struct pblk_rl *rl);
 918unsigned long pblk_rl_nr_free_blks(struct pblk_rl *rl);
 919unsigned long pblk_rl_nr_user_free_blks(struct pblk_rl *rl);
 920int pblk_rl_user_may_insert(struct pblk_rl *rl, int nr_entries);
 921void pblk_rl_inserted(struct pblk_rl *rl, int nr_entries);
 922void pblk_rl_user_in(struct pblk_rl *rl, int nr_entries);
 923int pblk_rl_gc_may_insert(struct pblk_rl *rl, int nr_entries);
 924void pblk_rl_gc_in(struct pblk_rl *rl, int nr_entries);
 925void pblk_rl_out(struct pblk_rl *rl, int nr_user, int nr_gc);
 926int pblk_rl_max_io(struct pblk_rl *rl);
 927void pblk_rl_free_lines_inc(struct pblk_rl *rl, struct pblk_line *line);
 928void pblk_rl_free_lines_dec(struct pblk_rl *rl, struct pblk_line *line,
 929                            bool used);
 930int pblk_rl_is_limit(struct pblk_rl *rl);
 931
 932void pblk_rl_werr_line_in(struct pblk_rl *rl);
 933void pblk_rl_werr_line_out(struct pblk_rl *rl);
 934
 935/*
 936 * pblk sysfs
 937 */
 938int pblk_sysfs_init(struct gendisk *tdisk);
 939void pblk_sysfs_exit(struct gendisk *tdisk);
 940
 941static inline void *pblk_malloc(size_t size, int type, gfp_t flags)
 942{
 943        if (type == PBLK_KMALLOC_META)
 944                return kmalloc(size, flags);
 945        return vmalloc(size);
 946}
 947
 948static inline void pblk_mfree(void *ptr, int type)
 949{
 950        if (type == PBLK_KMALLOC_META)
 951                kfree(ptr);
 952        else
 953                vfree(ptr);
 954}
 955
 956static inline struct nvm_rq *nvm_rq_from_c_ctx(void *c_ctx)
 957{
 958        return c_ctx - sizeof(struct nvm_rq);
 959}
 960
 961static inline void *emeta_to_bb(struct line_emeta *emeta)
 962{
 963        return emeta->bb_bitmap;
 964}
 965
 966static inline void *emeta_to_wa(struct pblk_line_meta *lm,
 967                                struct line_emeta *emeta)
 968{
 969        return emeta->bb_bitmap + lm->blk_bitmap_len;
 970}
 971
 972static inline void *emeta_to_lbas(struct pblk *pblk, struct line_emeta *emeta)
 973{
 974        return ((void *)emeta + pblk->lm.emeta_len[1]);
 975}
 976
 977static inline void *emeta_to_vsc(struct pblk *pblk, struct line_emeta *emeta)
 978{
 979        return (emeta_to_lbas(pblk, emeta) + pblk->lm.emeta_len[2]);
 980}
 981
 982static inline int pblk_line_vsc(struct pblk_line *line)
 983{
 984        return le32_to_cpu(*line->vsc);
 985}
 986
 987static inline int pblk_ppa_to_line_id(struct ppa_addr p)
 988{
 989        return p.a.blk;
 990}
 991
 992static inline struct pblk_line *pblk_ppa_to_line(struct pblk *pblk,
 993                                                 struct ppa_addr p)
 994{
 995        return &pblk->lines[pblk_ppa_to_line_id(p)];
 996}
 997
 998static inline int pblk_ppa_to_pos(struct nvm_geo *geo, struct ppa_addr p)
 999{
1000        return p.a.lun * geo->num_ch + p.a.ch;
1001}
1002
1003static inline struct ppa_addr addr_to_gen_ppa(struct pblk *pblk, u64 paddr,
1004                                              u64 line_id)
1005{
1006        struct nvm_tgt_dev *dev = pblk->dev;
1007        struct nvm_geo *geo = &dev->geo;
1008        struct ppa_addr ppa;
1009
1010        if (geo->version == NVM_OCSSD_SPEC_12) {
1011                struct nvm_addrf_12 *ppaf = (struct nvm_addrf_12 *)&pblk->addrf;
1012
1013                ppa.ppa = 0;
1014                ppa.g.blk = line_id;
1015                ppa.g.pg = (paddr & ppaf->pg_mask) >> ppaf->pg_offset;
1016                ppa.g.lun = (paddr & ppaf->lun_mask) >> ppaf->lun_offset;
1017                ppa.g.ch = (paddr & ppaf->ch_mask) >> ppaf->ch_offset;
1018                ppa.g.pl = (paddr & ppaf->pln_mask) >> ppaf->pln_offset;
1019                ppa.g.sec = (paddr & ppaf->sec_mask) >> ppaf->sec_offset;
1020        } else {
1021                struct pblk_addrf *uaddrf = &pblk->uaddrf;
1022                int secs, chnls, luns;
1023
1024                ppa.ppa = 0;
1025
1026                ppa.m.chk = line_id;
1027
1028                paddr = div_u64_rem(paddr, uaddrf->sec_stripe, &secs);
1029                ppa.m.sec = secs;
1030
1031                paddr = div_u64_rem(paddr, uaddrf->ch_stripe, &chnls);
1032                ppa.m.grp = chnls;
1033
1034                paddr = div_u64_rem(paddr, uaddrf->lun_stripe, &luns);
1035                ppa.m.pu = luns;
1036
1037                ppa.m.sec += uaddrf->sec_stripe * paddr;
1038        }
1039
1040        return ppa;
1041}
1042
1043static inline struct nvm_chk_meta *pblk_dev_ppa_to_chunk(struct pblk *pblk,
1044                                                        struct ppa_addr p)
1045{
1046        struct nvm_tgt_dev *dev = pblk->dev;
1047        struct nvm_geo *geo = &dev->geo;
1048        struct pblk_line *line = pblk_ppa_to_line(pblk, p);
1049        int pos = pblk_ppa_to_pos(geo, p);
1050
1051        return &line->chks[pos];
1052}
1053
1054static inline u64 pblk_dev_ppa_to_chunk_addr(struct pblk *pblk,
1055                                                        struct ppa_addr p)
1056{
1057        struct nvm_tgt_dev *dev = pblk->dev;
1058
1059        return dev_to_chunk_addr(dev->parent, &pblk->addrf, p);
1060}
1061
1062static inline u64 pblk_dev_ppa_to_line_addr(struct pblk *pblk,
1063                                                        struct ppa_addr p)
1064{
1065        struct nvm_tgt_dev *dev = pblk->dev;
1066        struct nvm_geo *geo = &dev->geo;
1067        u64 paddr;
1068
1069        if (geo->version == NVM_OCSSD_SPEC_12) {
1070                struct nvm_addrf_12 *ppaf = (struct nvm_addrf_12 *)&pblk->addrf;
1071
1072                paddr = (u64)p.g.ch << ppaf->ch_offset;
1073                paddr |= (u64)p.g.lun << ppaf->lun_offset;
1074                paddr |= (u64)p.g.pg << ppaf->pg_offset;
1075                paddr |= (u64)p.g.pl << ppaf->pln_offset;
1076                paddr |= (u64)p.g.sec << ppaf->sec_offset;
1077        } else {
1078                struct pblk_addrf *uaddrf = &pblk->uaddrf;
1079                u64 secs = p.m.sec;
1080                int sec_stripe;
1081
1082                paddr = (u64)p.m.grp * uaddrf->sec_stripe;
1083                paddr += (u64)p.m.pu * uaddrf->sec_lun_stripe;
1084
1085                secs = div_u64_rem(secs, uaddrf->sec_stripe, &sec_stripe);
1086                paddr += secs * uaddrf->sec_ws_stripe;
1087                paddr += sec_stripe;
1088        }
1089
1090        return paddr;
1091}
1092
1093static inline struct ppa_addr pblk_ppa32_to_ppa64(struct pblk *pblk, u32 ppa32)
1094{
1095        struct nvm_tgt_dev *dev = pblk->dev;
1096
1097        return nvm_ppa32_to_ppa64(dev->parent, &pblk->addrf, ppa32);
1098}
1099
1100static inline u32 pblk_ppa64_to_ppa32(struct pblk *pblk, struct ppa_addr ppa64)
1101{
1102        struct nvm_tgt_dev *dev = pblk->dev;
1103
1104        return nvm_ppa64_to_ppa32(dev->parent, &pblk->addrf, ppa64);
1105}
1106
1107static inline struct ppa_addr pblk_trans_map_get(struct pblk *pblk,
1108                                                                sector_t lba)
1109{
1110        struct ppa_addr ppa;
1111
1112        if (pblk->addrf_len < 32) {
1113                u32 *map = (u32 *)pblk->trans_map;
1114
1115                ppa = pblk_ppa32_to_ppa64(pblk, map[lba]);
1116        } else {
1117                struct ppa_addr *map = (struct ppa_addr *)pblk->trans_map;
1118
1119                ppa = map[lba];
1120        }
1121
1122        return ppa;
1123}
1124
1125static inline void pblk_trans_map_set(struct pblk *pblk, sector_t lba,
1126                                                struct ppa_addr ppa)
1127{
1128        if (pblk->addrf_len < 32) {
1129                u32 *map = (u32 *)pblk->trans_map;
1130
1131                map[lba] = pblk_ppa64_to_ppa32(pblk, ppa);
1132        } else {
1133                u64 *map = (u64 *)pblk->trans_map;
1134
1135                map[lba] = ppa.ppa;
1136        }
1137}
1138
1139static inline int pblk_ppa_empty(struct ppa_addr ppa_addr)
1140{
1141        return (ppa_addr.ppa == ADDR_EMPTY);
1142}
1143
1144static inline void pblk_ppa_set_empty(struct ppa_addr *ppa_addr)
1145{
1146        ppa_addr->ppa = ADDR_EMPTY;
1147}
1148
1149static inline bool pblk_ppa_comp(struct ppa_addr lppa, struct ppa_addr rppa)
1150{
1151        return (lppa.ppa == rppa.ppa);
1152}
1153
1154static inline int pblk_addr_in_cache(struct ppa_addr ppa)
1155{
1156        return (ppa.ppa != ADDR_EMPTY && ppa.c.is_cached);
1157}
1158
1159static inline int pblk_addr_to_cacheline(struct ppa_addr ppa)
1160{
1161        return ppa.c.line;
1162}
1163
1164static inline struct ppa_addr pblk_cacheline_to_addr(int addr)
1165{
1166        struct ppa_addr p;
1167
1168        p.c.line = addr;
1169        p.c.is_cached = 1;
1170
1171        return p;
1172}
1173
1174static inline u32 pblk_calc_meta_header_crc(struct pblk *pblk,
1175                                            struct line_header *header)
1176{
1177        u32 crc = ~(u32)0;
1178
1179        crc = crc32_le(crc, (unsigned char *)header + sizeof(crc),
1180                                sizeof(struct line_header) - sizeof(crc));
1181
1182        return crc;
1183}
1184
1185static inline u32 pblk_calc_smeta_crc(struct pblk *pblk,
1186                                      struct line_smeta *smeta)
1187{
1188        struct pblk_line_meta *lm = &pblk->lm;
1189        u32 crc = ~(u32)0;
1190
1191        crc = crc32_le(crc, (unsigned char *)smeta +
1192                                sizeof(struct line_header) + sizeof(crc),
1193                                lm->smeta_len -
1194                                sizeof(struct line_header) - sizeof(crc));
1195
1196        return crc;
1197}
1198
1199static inline u32 pblk_calc_emeta_crc(struct pblk *pblk,
1200                                      struct line_emeta *emeta)
1201{
1202        struct pblk_line_meta *lm = &pblk->lm;
1203        u32 crc = ~(u32)0;
1204
1205        crc = crc32_le(crc, (unsigned char *)emeta +
1206                                sizeof(struct line_header) + sizeof(crc),
1207                                lm->emeta_len[0] -
1208                                sizeof(struct line_header) - sizeof(crc));
1209
1210        return crc;
1211}
1212
1213static inline int pblk_io_aligned(struct pblk *pblk, int nr_secs)
1214{
1215        return !(nr_secs % pblk->min_write_pgs);
1216}
1217
1218#ifdef CONFIG_NVM_PBLK_DEBUG
1219static inline void print_ppa(struct pblk *pblk, struct ppa_addr *p,
1220                             char *msg, int error)
1221{
1222        struct nvm_geo *geo = &pblk->dev->geo;
1223
1224        if (p->c.is_cached) {
1225                pblk_err(pblk, "ppa: (%s: %x) cache line: %llu\n",
1226                                msg, error, (u64)p->c.line);
1227        } else if (geo->version == NVM_OCSSD_SPEC_12) {
1228                pblk_err(pblk, "ppa: (%s: %x):ch:%d,lun:%d,blk:%d,pg:%d,pl:%d,sec:%d\n",
1229                        msg, error,
1230                        p->g.ch, p->g.lun, p->g.blk,
1231                        p->g.pg, p->g.pl, p->g.sec);
1232        } else {
1233                pblk_err(pblk, "ppa: (%s: %x):ch:%d,lun:%d,chk:%d,sec:%d\n",
1234                        msg, error,
1235                        p->m.grp, p->m.pu, p->m.chk, p->m.sec);
1236        }
1237}
1238
1239static inline void pblk_print_failed_rqd(struct pblk *pblk, struct nvm_rq *rqd,
1240                                         int error)
1241{
1242        int bit = -1;
1243
1244        if (rqd->nr_ppas ==  1) {
1245                print_ppa(pblk, &rqd->ppa_addr, "rqd", error);
1246                return;
1247        }
1248
1249        while ((bit = find_next_bit((void *)&rqd->ppa_status, rqd->nr_ppas,
1250                                                bit + 1)) < rqd->nr_ppas) {
1251                print_ppa(pblk, &rqd->ppa_list[bit], "rqd", error);
1252        }
1253
1254        pblk_err(pblk, "error:%d, ppa_status:%llx\n", error, rqd->ppa_status);
1255}
1256
1257static inline int pblk_boundary_ppa_checks(struct nvm_tgt_dev *tgt_dev,
1258                                       struct ppa_addr *ppas, int nr_ppas)
1259{
1260        struct nvm_geo *geo = &tgt_dev->geo;
1261        struct ppa_addr *ppa;
1262        int i;
1263
1264        for (i = 0; i < nr_ppas; i++) {
1265                ppa = &ppas[i];
1266
1267                if (geo->version == NVM_OCSSD_SPEC_12) {
1268                        if (!ppa->c.is_cached &&
1269                                        ppa->g.ch < geo->num_ch &&
1270                                        ppa->g.lun < geo->num_lun &&
1271                                        ppa->g.pl < geo->num_pln &&
1272                                        ppa->g.blk < geo->num_chk &&
1273                                        ppa->g.pg < geo->num_pg &&
1274                                        ppa->g.sec < geo->ws_min)
1275                                continue;
1276                } else {
1277                        if (!ppa->c.is_cached &&
1278                                        ppa->m.grp < geo->num_ch &&
1279                                        ppa->m.pu < geo->num_lun &&
1280                                        ppa->m.chk < geo->num_chk &&
1281                                        ppa->m.sec < geo->clba)
1282                                continue;
1283                }
1284
1285                print_ppa(tgt_dev->q->queuedata, ppa, "boundary", i);
1286
1287                return 1;
1288        }
1289        return 0;
1290}
1291
1292static inline int pblk_check_io(struct pblk *pblk, struct nvm_rq *rqd)
1293{
1294        struct nvm_tgt_dev *dev = pblk->dev;
1295        struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
1296
1297        if (pblk_boundary_ppa_checks(dev, ppa_list, rqd->nr_ppas)) {
1298                WARN_ON(1);
1299                return -EINVAL;
1300        }
1301
1302        if (rqd->opcode == NVM_OP_PWRITE) {
1303                struct pblk_line *line;
1304                int i;
1305
1306                for (i = 0; i < rqd->nr_ppas; i++) {
1307                        line = pblk_ppa_to_line(pblk, ppa_list[i]);
1308
1309                        spin_lock(&line->lock);
1310                        if (line->state != PBLK_LINESTATE_OPEN) {
1311                                pblk_err(pblk, "bad ppa: line:%d,state:%d\n",
1312                                                        line->id, line->state);
1313                                WARN_ON(1);
1314                                spin_unlock(&line->lock);
1315                                return -EINVAL;
1316                        }
1317                        spin_unlock(&line->lock);
1318                }
1319        }
1320
1321        return 0;
1322}
1323#endif
1324
1325static inline int pblk_boundary_paddr_checks(struct pblk *pblk, u64 paddr)
1326{
1327        struct pblk_line_meta *lm = &pblk->lm;
1328
1329        if (paddr > lm->sec_per_line)
1330                return 1;
1331
1332        return 0;
1333}
1334
1335static inline unsigned int pblk_get_bi_idx(struct bio *bio)
1336{
1337        return bio->bi_iter.bi_idx;
1338}
1339
1340static inline sector_t pblk_get_lba(struct bio *bio)
1341{
1342        return bio->bi_iter.bi_sector / NR_PHY_IN_LOG;
1343}
1344
1345static inline unsigned int pblk_get_secs(struct bio *bio)
1346{
1347        return  bio->bi_iter.bi_size / PBLK_EXPOSED_PAGE_SIZE;
1348}
1349
1350static inline char *pblk_disk_name(struct pblk *pblk)
1351{
1352        struct gendisk *disk = pblk->disk;
1353
1354        return disk->disk_name;
1355}
1356
1357static inline unsigned int pblk_get_min_chks(struct pblk *pblk)
1358{
1359        struct pblk_line_meta *lm = &pblk->lm;
1360        /* In a worst-case scenario every line will have OP invalid sectors.
1361         * We will then need a minimum of 1/OP lines to free up a single line
1362         */
1363
1364        return DIV_ROUND_UP(100, pblk->op) * lm->blk_per_line;
1365}
1366
1367static inline struct pblk_sec_meta *pblk_get_meta(struct pblk *pblk,
1368                                                         void *meta, int index)
1369{
1370        return meta +
1371               max_t(int, sizeof(struct pblk_sec_meta), pblk->oob_meta_size)
1372               * index;
1373}
1374
1375static inline int pblk_dma_meta_size(struct pblk *pblk)
1376{
1377        return max_t(int, sizeof(struct pblk_sec_meta), pblk->oob_meta_size)
1378               * NVM_MAX_VLBA;
1379}
1380
1381static inline int pblk_is_oob_meta_supported(struct pblk *pblk)
1382{
1383        return pblk->oob_meta_size >= sizeof(struct pblk_sec_meta);
1384}
1385#endif /* PBLK_H_ */
1386