linux/drivers/lightnvm/pblk-core.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (C) 2016 CNEX Labs
   4 * Initial release: Javier Gonzalez <javier@cnexlabs.com>
   5 *                  Matias Bjorling <matias@cnexlabs.com>
   6 *
   7 * This program is free software; you can redistribute it and/or
   8 * modify it under the terms of the GNU General Public License version
   9 * 2 as published by the Free Software Foundation.
  10 *
  11 * This program is distributed in the hope that it will be useful, but
  12 * WITHOUT ANY WARRANTY; without even the implied warranty of
  13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  14 * General Public License for more details.
  15 *
  16 * pblk-core.c - pblk's core functionality
  17 *
  18 */
  19
  20#define CREATE_TRACE_POINTS
  21
  22#include "pblk.h"
  23#include "pblk-trace.h"
  24
  25static void pblk_line_mark_bb(struct work_struct *work)
  26{
  27        struct pblk_line_ws *line_ws = container_of(work, struct pblk_line_ws,
  28                                                                        ws);
  29        struct pblk *pblk = line_ws->pblk;
  30        struct nvm_tgt_dev *dev = pblk->dev;
  31        struct ppa_addr *ppa = line_ws->priv;
  32        int ret;
  33
  34        ret = nvm_set_chunk_meta(dev, ppa, 1, NVM_BLK_T_GRWN_BAD);
  35        if (ret) {
  36                struct pblk_line *line;
  37                int pos;
  38
  39                line = pblk_ppa_to_line(pblk, *ppa);
  40                pos = pblk_ppa_to_pos(&dev->geo, *ppa);
  41
  42                pblk_err(pblk, "failed to mark bb, line:%d, pos:%d\n",
  43                                line->id, pos);
  44        }
  45
  46        kfree(ppa);
  47        mempool_free(line_ws, &pblk->gen_ws_pool);
  48}
  49
  50static void pblk_mark_bb(struct pblk *pblk, struct pblk_line *line,
  51                         struct ppa_addr ppa_addr)
  52{
  53        struct nvm_tgt_dev *dev = pblk->dev;
  54        struct nvm_geo *geo = &dev->geo;
  55        struct ppa_addr *ppa;
  56        int pos = pblk_ppa_to_pos(geo, ppa_addr);
  57
  58        pblk_debug(pblk, "erase failed: line:%d, pos:%d\n", line->id, pos);
  59        atomic_long_inc(&pblk->erase_failed);
  60
  61        atomic_dec(&line->blk_in_line);
  62        if (test_and_set_bit(pos, line->blk_bitmap))
  63                pblk_err(pblk, "attempted to erase bb: line:%d, pos:%d\n",
  64                                                        line->id, pos);
  65
  66        /* Not necessary to mark bad blocks on 2.0 spec. */
  67        if (geo->version == NVM_OCSSD_SPEC_20)
  68                return;
  69
  70        ppa = kmalloc(sizeof(struct ppa_addr), GFP_ATOMIC);
  71        if (!ppa)
  72                return;
  73
  74        *ppa = ppa_addr;
  75        pblk_gen_run_ws(pblk, NULL, ppa, pblk_line_mark_bb,
  76                                                GFP_ATOMIC, pblk->bb_wq);
  77}
  78
  79static void __pblk_end_io_erase(struct pblk *pblk, struct nvm_rq *rqd)
  80{
  81        struct nvm_tgt_dev *dev = pblk->dev;
  82        struct nvm_geo *geo = &dev->geo;
  83        struct nvm_chk_meta *chunk;
  84        struct pblk_line *line;
  85        int pos;
  86
  87        line = pblk_ppa_to_line(pblk, rqd->ppa_addr);
  88        pos = pblk_ppa_to_pos(geo, rqd->ppa_addr);
  89        chunk = &line->chks[pos];
  90
  91        atomic_dec(&line->left_seblks);
  92
  93        if (rqd->error) {
  94                trace_pblk_chunk_reset(pblk_disk_name(pblk),
  95                                &rqd->ppa_addr, PBLK_CHUNK_RESET_FAILED);
  96
  97                chunk->state = NVM_CHK_ST_OFFLINE;
  98                pblk_mark_bb(pblk, line, rqd->ppa_addr);
  99        } else {
 100                trace_pblk_chunk_reset(pblk_disk_name(pblk),
 101                                &rqd->ppa_addr, PBLK_CHUNK_RESET_DONE);
 102
 103                chunk->state = NVM_CHK_ST_FREE;
 104        }
 105
 106        trace_pblk_chunk_state(pblk_disk_name(pblk), &rqd->ppa_addr,
 107                                chunk->state);
 108
 109        atomic_dec(&pblk->inflight_io);
 110}
 111
 112/* Erase completion assumes that only one block is erased at the time */
 113static void pblk_end_io_erase(struct nvm_rq *rqd)
 114{
 115        struct pblk *pblk = rqd->private;
 116
 117        __pblk_end_io_erase(pblk, rqd);
 118        mempool_free(rqd, &pblk->e_rq_pool);
 119}
 120
 121/*
 122 * Get information for all chunks from the device.
 123 *
 124 * The caller is responsible for freeing (vmalloc) the returned structure
 125 */
 126struct nvm_chk_meta *pblk_get_chunk_meta(struct pblk *pblk)
 127{
 128        struct nvm_tgt_dev *dev = pblk->dev;
 129        struct nvm_geo *geo = &dev->geo;
 130        struct nvm_chk_meta *meta;
 131        struct ppa_addr ppa;
 132        unsigned long len;
 133        int ret;
 134
 135        ppa.ppa = 0;
 136
 137        len = geo->all_chunks * sizeof(*meta);
 138        meta = vzalloc(len);
 139        if (!meta)
 140                return ERR_PTR(-ENOMEM);
 141
 142        ret = nvm_get_chunk_meta(dev, ppa, geo->all_chunks, meta);
 143        if (ret) {
 144                vfree(meta);
 145                return ERR_PTR(-EIO);
 146        }
 147
 148        return meta;
 149}
 150
 151struct nvm_chk_meta *pblk_chunk_get_off(struct pblk *pblk,
 152                                              struct nvm_chk_meta *meta,
 153                                              struct ppa_addr ppa)
 154{
 155        struct nvm_tgt_dev *dev = pblk->dev;
 156        struct nvm_geo *geo = &dev->geo;
 157        int ch_off = ppa.m.grp * geo->num_chk * geo->num_lun;
 158        int lun_off = ppa.m.pu * geo->num_chk;
 159        int chk_off = ppa.m.chk;
 160
 161        return meta + ch_off + lun_off + chk_off;
 162}
 163
 164void __pblk_map_invalidate(struct pblk *pblk, struct pblk_line *line,
 165                           u64 paddr)
 166{
 167        struct pblk_line_mgmt *l_mg = &pblk->l_mg;
 168        struct list_head *move_list = NULL;
 169
 170        /* Lines being reclaimed (GC'ed) cannot be invalidated. Before the L2P
 171         * table is modified with reclaimed sectors, a check is done to endure
 172         * that newer updates are not overwritten.
 173         */
 174        spin_lock(&line->lock);
 175        WARN_ON(line->state == PBLK_LINESTATE_FREE);
 176
 177        if (test_and_set_bit(paddr, line->invalid_bitmap)) {
 178                WARN_ONCE(1, "pblk: double invalidate\n");
 179                spin_unlock(&line->lock);
 180                return;
 181        }
 182        le32_add_cpu(line->vsc, -1);
 183
 184        if (line->state == PBLK_LINESTATE_CLOSED)
 185                move_list = pblk_line_gc_list(pblk, line);
 186        spin_unlock(&line->lock);
 187
 188        if (move_list) {
 189                spin_lock(&l_mg->gc_lock);
 190                spin_lock(&line->lock);
 191                /* Prevent moving a line that has just been chosen for GC */
 192                if (line->state == PBLK_LINESTATE_GC) {
 193                        spin_unlock(&line->lock);
 194                        spin_unlock(&l_mg->gc_lock);
 195                        return;
 196                }
 197                spin_unlock(&line->lock);
 198
 199                list_move_tail(&line->list, move_list);
 200                spin_unlock(&l_mg->gc_lock);
 201        }
 202}
 203
 204void pblk_map_invalidate(struct pblk *pblk, struct ppa_addr ppa)
 205{
 206        struct pblk_line *line;
 207        u64 paddr;
 208
 209#ifdef CONFIG_NVM_PBLK_DEBUG
 210        /* Callers must ensure that the ppa points to a device address */
 211        BUG_ON(pblk_addr_in_cache(ppa));
 212        BUG_ON(pblk_ppa_empty(ppa));
 213#endif
 214
 215        line = pblk_ppa_to_line(pblk, ppa);
 216        paddr = pblk_dev_ppa_to_line_addr(pblk, ppa);
 217
 218        __pblk_map_invalidate(pblk, line, paddr);
 219}
 220
 221static void pblk_invalidate_range(struct pblk *pblk, sector_t slba,
 222                                  unsigned int nr_secs)
 223{
 224        sector_t lba;
 225
 226        spin_lock(&pblk->trans_lock);
 227        for (lba = slba; lba < slba + nr_secs; lba++) {
 228                struct ppa_addr ppa;
 229
 230                ppa = pblk_trans_map_get(pblk, lba);
 231
 232                if (!pblk_addr_in_cache(ppa) && !pblk_ppa_empty(ppa))
 233                        pblk_map_invalidate(pblk, ppa);
 234
 235                pblk_ppa_set_empty(&ppa);
 236                pblk_trans_map_set(pblk, lba, ppa);
 237        }
 238        spin_unlock(&pblk->trans_lock);
 239}
 240
 241int pblk_alloc_rqd_meta(struct pblk *pblk, struct nvm_rq *rqd)
 242{
 243        struct nvm_tgt_dev *dev = pblk->dev;
 244
 245        rqd->meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL,
 246                                                        &rqd->dma_meta_list);
 247        if (!rqd->meta_list)
 248                return -ENOMEM;
 249
 250        if (rqd->nr_ppas == 1)
 251                return 0;
 252
 253        rqd->ppa_list = rqd->meta_list + pblk_dma_meta_size(pblk);
 254        rqd->dma_ppa_list = rqd->dma_meta_list + pblk_dma_meta_size(pblk);
 255
 256        return 0;
 257}
 258
 259void pblk_free_rqd_meta(struct pblk *pblk, struct nvm_rq *rqd)
 260{
 261        struct nvm_tgt_dev *dev = pblk->dev;
 262
 263        if (rqd->meta_list)
 264                nvm_dev_dma_free(dev->parent, rqd->meta_list,
 265                                rqd->dma_meta_list);
 266}
 267
 268/* Caller must guarantee that the request is a valid type */
 269struct nvm_rq *pblk_alloc_rqd(struct pblk *pblk, int type)
 270{
 271        mempool_t *pool;
 272        struct nvm_rq *rqd;
 273        int rq_size;
 274
 275        switch (type) {
 276        case PBLK_WRITE:
 277        case PBLK_WRITE_INT:
 278                pool = &pblk->w_rq_pool;
 279                rq_size = pblk_w_rq_size;
 280                break;
 281        case PBLK_READ:
 282                pool = &pblk->r_rq_pool;
 283                rq_size = pblk_g_rq_size;
 284                break;
 285        default:
 286                pool = &pblk->e_rq_pool;
 287                rq_size = pblk_g_rq_size;
 288        }
 289
 290        rqd = mempool_alloc(pool, GFP_KERNEL);
 291        memset(rqd, 0, rq_size);
 292
 293        return rqd;
 294}
 295
 296/* Typically used on completion path. Cannot guarantee request consistency */
 297void pblk_free_rqd(struct pblk *pblk, struct nvm_rq *rqd, int type)
 298{
 299        mempool_t *pool;
 300
 301        switch (type) {
 302        case PBLK_WRITE:
 303                kfree(((struct pblk_c_ctx *)nvm_rq_to_pdu(rqd))->lun_bitmap);
 304                /* fall through */
 305        case PBLK_WRITE_INT:
 306                pool = &pblk->w_rq_pool;
 307                break;
 308        case PBLK_READ:
 309                pool = &pblk->r_rq_pool;
 310                break;
 311        case PBLK_ERASE:
 312                pool = &pblk->e_rq_pool;
 313                break;
 314        default:
 315                pblk_err(pblk, "trying to free unknown rqd type\n");
 316                return;
 317        }
 318
 319        pblk_free_rqd_meta(pblk, rqd);
 320        mempool_free(rqd, pool);
 321}
 322
 323void pblk_bio_free_pages(struct pblk *pblk, struct bio *bio, int off,
 324                         int nr_pages)
 325{
 326        struct bio_vec *bv;
 327        struct page *page;
 328        int i, e, nbv = 0;
 329
 330        for (i = 0; i < bio->bi_vcnt; i++) {
 331                bv = &bio->bi_io_vec[i];
 332                page = bv->bv_page;
 333                for (e = 0; e < bv->bv_len; e += PBLK_EXPOSED_PAGE_SIZE, nbv++)
 334                        if (nbv >= off)
 335                                mempool_free(page++, &pblk->page_bio_pool);
 336        }
 337}
 338
 339int pblk_bio_add_pages(struct pblk *pblk, struct bio *bio, gfp_t flags,
 340                       int nr_pages)
 341{
 342        struct request_queue *q = pblk->dev->q;
 343        struct page *page;
 344        int i, ret;
 345
 346        for (i = 0; i < nr_pages; i++) {
 347                page = mempool_alloc(&pblk->page_bio_pool, flags);
 348
 349                ret = bio_add_pc_page(q, bio, page, PBLK_EXPOSED_PAGE_SIZE, 0);
 350                if (ret != PBLK_EXPOSED_PAGE_SIZE) {
 351                        pblk_err(pblk, "could not add page to bio\n");
 352                        mempool_free(page, &pblk->page_bio_pool);
 353                        goto err;
 354                }
 355        }
 356
 357        return 0;
 358err:
 359        pblk_bio_free_pages(pblk, bio, (bio->bi_vcnt - i), i);
 360        return -1;
 361}
 362
 363void pblk_write_kick(struct pblk *pblk)
 364{
 365        wake_up_process(pblk->writer_ts);
 366        mod_timer(&pblk->wtimer, jiffies + msecs_to_jiffies(1000));
 367}
 368
 369void pblk_write_timer_fn(struct timer_list *t)
 370{
 371        struct pblk *pblk = from_timer(pblk, t, wtimer);
 372
 373        /* kick the write thread every tick to flush outstanding data */
 374        pblk_write_kick(pblk);
 375}
 376
 377void pblk_write_should_kick(struct pblk *pblk)
 378{
 379        unsigned int secs_avail = pblk_rb_read_count(&pblk->rwb);
 380
 381        if (secs_avail >= pblk->min_write_pgs_data)
 382                pblk_write_kick(pblk);
 383}
 384
 385static void pblk_wait_for_meta(struct pblk *pblk)
 386{
 387        do {
 388                if (!atomic_read(&pblk->inflight_io))
 389                        break;
 390
 391                schedule();
 392        } while (1);
 393}
 394
 395static void pblk_flush_writer(struct pblk *pblk)
 396{
 397        pblk_rb_flush(&pblk->rwb);
 398        do {
 399                if (!pblk_rb_sync_count(&pblk->rwb))
 400                        break;
 401
 402                pblk_write_kick(pblk);
 403                schedule();
 404        } while (1);
 405}
 406
 407struct list_head *pblk_line_gc_list(struct pblk *pblk, struct pblk_line *line)
 408{
 409        struct pblk_line_meta *lm = &pblk->lm;
 410        struct pblk_line_mgmt *l_mg = &pblk->l_mg;
 411        struct list_head *move_list = NULL;
 412        int packed_meta = (le32_to_cpu(*line->vsc) / pblk->min_write_pgs_data)
 413                        * (pblk->min_write_pgs - pblk->min_write_pgs_data);
 414        int vsc = le32_to_cpu(*line->vsc) + packed_meta;
 415
 416        lockdep_assert_held(&line->lock);
 417
 418        if (line->w_err_gc->has_write_err) {
 419                if (line->gc_group != PBLK_LINEGC_WERR) {
 420                        line->gc_group = PBLK_LINEGC_WERR;
 421                        move_list = &l_mg->gc_werr_list;
 422                        pblk_rl_werr_line_in(&pblk->rl);
 423                }
 424        } else if (!vsc) {
 425                if (line->gc_group != PBLK_LINEGC_FULL) {
 426                        line->gc_group = PBLK_LINEGC_FULL;
 427                        move_list = &l_mg->gc_full_list;
 428                }
 429        } else if (vsc < lm->high_thrs) {
 430                if (line->gc_group != PBLK_LINEGC_HIGH) {
 431                        line->gc_group = PBLK_LINEGC_HIGH;
 432                        move_list = &l_mg->gc_high_list;
 433                }
 434        } else if (vsc < lm->mid_thrs) {
 435                if (line->gc_group != PBLK_LINEGC_MID) {
 436                        line->gc_group = PBLK_LINEGC_MID;
 437                        move_list = &l_mg->gc_mid_list;
 438                }
 439        } else if (vsc < line->sec_in_line) {
 440                if (line->gc_group != PBLK_LINEGC_LOW) {
 441                        line->gc_group = PBLK_LINEGC_LOW;
 442                        move_list = &l_mg->gc_low_list;
 443                }
 444        } else if (vsc == line->sec_in_line) {
 445                if (line->gc_group != PBLK_LINEGC_EMPTY) {
 446                        line->gc_group = PBLK_LINEGC_EMPTY;
 447                        move_list = &l_mg->gc_empty_list;
 448                }
 449        } else {
 450                line->state = PBLK_LINESTATE_CORRUPT;
 451                trace_pblk_line_state(pblk_disk_name(pblk), line->id,
 452                                        line->state);
 453
 454                line->gc_group = PBLK_LINEGC_NONE;
 455                move_list =  &l_mg->corrupt_list;
 456                pblk_err(pblk, "corrupted vsc for line %d, vsc:%d (%d/%d/%d)\n",
 457                                                line->id, vsc,
 458                                                line->sec_in_line,
 459                                                lm->high_thrs, lm->mid_thrs);
 460        }
 461
 462        return move_list;
 463}
 464
 465void pblk_discard(struct pblk *pblk, struct bio *bio)
 466{
 467        sector_t slba = pblk_get_lba(bio);
 468        sector_t nr_secs = pblk_get_secs(bio);
 469
 470        pblk_invalidate_range(pblk, slba, nr_secs);
 471}
 472
 473void pblk_log_write_err(struct pblk *pblk, struct nvm_rq *rqd)
 474{
 475        atomic_long_inc(&pblk->write_failed);
 476#ifdef CONFIG_NVM_PBLK_DEBUG
 477        pblk_print_failed_rqd(pblk, rqd, rqd->error);
 478#endif
 479}
 480
 481void pblk_log_read_err(struct pblk *pblk, struct nvm_rq *rqd)
 482{
 483        /* Empty page read is not necessarily an error (e.g., L2P recovery) */
 484        if (rqd->error == NVM_RSP_ERR_EMPTYPAGE) {
 485                atomic_long_inc(&pblk->read_empty);
 486                return;
 487        }
 488
 489        switch (rqd->error) {
 490        case NVM_RSP_WARN_HIGHECC:
 491                atomic_long_inc(&pblk->read_high_ecc);
 492                break;
 493        case NVM_RSP_ERR_FAILECC:
 494        case NVM_RSP_ERR_FAILCRC:
 495                atomic_long_inc(&pblk->read_failed);
 496                break;
 497        default:
 498                pblk_err(pblk, "unknown read error:%d\n", rqd->error);
 499        }
 500#ifdef CONFIG_NVM_PBLK_DEBUG
 501        pblk_print_failed_rqd(pblk, rqd, rqd->error);
 502#endif
 503}
 504
 505void pblk_set_sec_per_write(struct pblk *pblk, int sec_per_write)
 506{
 507        pblk->sec_per_write = sec_per_write;
 508}
 509
 510int pblk_submit_io(struct pblk *pblk, struct nvm_rq *rqd)
 511{
 512        struct nvm_tgt_dev *dev = pblk->dev;
 513
 514        atomic_inc(&pblk->inflight_io);
 515
 516#ifdef CONFIG_NVM_PBLK_DEBUG
 517        if (pblk_check_io(pblk, rqd))
 518                return NVM_IO_ERR;
 519#endif
 520
 521        return nvm_submit_io(dev, rqd);
 522}
 523
 524void pblk_check_chunk_state_update(struct pblk *pblk, struct nvm_rq *rqd)
 525{
 526        struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
 527
 528        int i;
 529
 530        for (i = 0; i < rqd->nr_ppas; i++) {
 531                struct ppa_addr *ppa = &ppa_list[i];
 532                struct nvm_chk_meta *chunk = pblk_dev_ppa_to_chunk(pblk, *ppa);
 533                u64 caddr = pblk_dev_ppa_to_chunk_addr(pblk, *ppa);
 534
 535                if (caddr == 0)
 536                        trace_pblk_chunk_state(pblk_disk_name(pblk),
 537                                                        ppa, NVM_CHK_ST_OPEN);
 538                else if (caddr == (chunk->cnlb - 1))
 539                        trace_pblk_chunk_state(pblk_disk_name(pblk),
 540                                                        ppa, NVM_CHK_ST_CLOSED);
 541        }
 542}
 543
 544int pblk_submit_io_sync(struct pblk *pblk, struct nvm_rq *rqd)
 545{
 546        struct nvm_tgt_dev *dev = pblk->dev;
 547        int ret;
 548
 549        atomic_inc(&pblk->inflight_io);
 550
 551#ifdef CONFIG_NVM_PBLK_DEBUG
 552        if (pblk_check_io(pblk, rqd))
 553                return NVM_IO_ERR;
 554#endif
 555
 556        ret = nvm_submit_io_sync(dev, rqd);
 557
 558        if (trace_pblk_chunk_state_enabled() && !ret &&
 559            rqd->opcode == NVM_OP_PWRITE)
 560                pblk_check_chunk_state_update(pblk, rqd);
 561
 562        return ret;
 563}
 564
 565int pblk_submit_io_sync_sem(struct pblk *pblk, struct nvm_rq *rqd)
 566{
 567        struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
 568        int ret;
 569
 570        pblk_down_chunk(pblk, ppa_list[0]);
 571        ret = pblk_submit_io_sync(pblk, rqd);
 572        pblk_up_chunk(pblk, ppa_list[0]);
 573
 574        return ret;
 575}
 576
 577static void pblk_bio_map_addr_endio(struct bio *bio)
 578{
 579        bio_put(bio);
 580}
 581
 582struct bio *pblk_bio_map_addr(struct pblk *pblk, void *data,
 583                              unsigned int nr_secs, unsigned int len,
 584                              int alloc_type, gfp_t gfp_mask)
 585{
 586        struct nvm_tgt_dev *dev = pblk->dev;
 587        void *kaddr = data;
 588        struct page *page;
 589        struct bio *bio;
 590        int i, ret;
 591
 592        if (alloc_type == PBLK_KMALLOC_META)
 593                return bio_map_kern(dev->q, kaddr, len, gfp_mask);
 594
 595        bio = bio_kmalloc(gfp_mask, nr_secs);
 596        if (!bio)
 597                return ERR_PTR(-ENOMEM);
 598
 599        for (i = 0; i < nr_secs; i++) {
 600                page = vmalloc_to_page(kaddr);
 601                if (!page) {
 602                        pblk_err(pblk, "could not map vmalloc bio\n");
 603                        bio_put(bio);
 604                        bio = ERR_PTR(-ENOMEM);
 605                        goto out;
 606                }
 607
 608                ret = bio_add_pc_page(dev->q, bio, page, PAGE_SIZE, 0);
 609                if (ret != PAGE_SIZE) {
 610                        pblk_err(pblk, "could not add page to bio\n");
 611                        bio_put(bio);
 612                        bio = ERR_PTR(-ENOMEM);
 613                        goto out;
 614                }
 615
 616                kaddr += PAGE_SIZE;
 617        }
 618
 619        bio->bi_end_io = pblk_bio_map_addr_endio;
 620out:
 621        return bio;
 622}
 623
 624int pblk_calc_secs(struct pblk *pblk, unsigned long secs_avail,
 625                   unsigned long secs_to_flush, bool skip_meta)
 626{
 627        int max = pblk->sec_per_write;
 628        int min = pblk->min_write_pgs;
 629        int secs_to_sync = 0;
 630
 631        if (skip_meta && pblk->min_write_pgs_data != pblk->min_write_pgs)
 632                min = max = pblk->min_write_pgs_data;
 633
 634        if (secs_avail >= max)
 635                secs_to_sync = max;
 636        else if (secs_avail >= min)
 637                secs_to_sync = min * (secs_avail / min);
 638        else if (secs_to_flush)
 639                secs_to_sync = min;
 640
 641        return secs_to_sync;
 642}
 643
 644void pblk_dealloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs)
 645{
 646        u64 addr;
 647        int i;
 648
 649        spin_lock(&line->lock);
 650        addr = find_next_zero_bit(line->map_bitmap,
 651                                        pblk->lm.sec_per_line, line->cur_sec);
 652        line->cur_sec = addr - nr_secs;
 653
 654        for (i = 0; i < nr_secs; i++, line->cur_sec--)
 655                WARN_ON(!test_and_clear_bit(line->cur_sec, line->map_bitmap));
 656        spin_unlock(&line->lock);
 657}
 658
 659u64 __pblk_alloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs)
 660{
 661        u64 addr;
 662        int i;
 663
 664        lockdep_assert_held(&line->lock);
 665
 666        /* logic error: ppa out-of-bounds. Prevent generating bad address */
 667        if (line->cur_sec + nr_secs > pblk->lm.sec_per_line) {
 668                WARN(1, "pblk: page allocation out of bounds\n");
 669                nr_secs = pblk->lm.sec_per_line - line->cur_sec;
 670        }
 671
 672        line->cur_sec = addr = find_next_zero_bit(line->map_bitmap,
 673                                        pblk->lm.sec_per_line, line->cur_sec);
 674        for (i = 0; i < nr_secs; i++, line->cur_sec++)
 675                WARN_ON(test_and_set_bit(line->cur_sec, line->map_bitmap));
 676
 677        return addr;
 678}
 679
 680u64 pblk_alloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs)
 681{
 682        u64 addr;
 683
 684        /* Lock needed in case a write fails and a recovery needs to remap
 685         * failed write buffer entries
 686         */
 687        spin_lock(&line->lock);
 688        addr = __pblk_alloc_page(pblk, line, nr_secs);
 689        line->left_msecs -= nr_secs;
 690        WARN(line->left_msecs < 0, "pblk: page allocation out of bounds\n");
 691        spin_unlock(&line->lock);
 692
 693        return addr;
 694}
 695
 696u64 pblk_lookup_page(struct pblk *pblk, struct pblk_line *line)
 697{
 698        u64 paddr;
 699
 700        spin_lock(&line->lock);
 701        paddr = find_next_zero_bit(line->map_bitmap,
 702                                        pblk->lm.sec_per_line, line->cur_sec);
 703        spin_unlock(&line->lock);
 704
 705        return paddr;
 706}
 707
 708u64 pblk_line_smeta_start(struct pblk *pblk, struct pblk_line *line)
 709{
 710        struct nvm_tgt_dev *dev = pblk->dev;
 711        struct nvm_geo *geo = &dev->geo;
 712        struct pblk_line_meta *lm = &pblk->lm;
 713        int bit;
 714
 715        /* This usually only happens on bad lines */
 716        bit = find_first_zero_bit(line->blk_bitmap, lm->blk_per_line);
 717        if (bit >= lm->blk_per_line)
 718                return -1;
 719
 720        return bit * geo->ws_opt;
 721}
 722
 723int pblk_line_smeta_read(struct pblk *pblk, struct pblk_line *line)
 724{
 725        struct nvm_tgt_dev *dev = pblk->dev;
 726        struct pblk_line_meta *lm = &pblk->lm;
 727        struct bio *bio;
 728        struct ppa_addr *ppa_list;
 729        struct nvm_rq rqd;
 730        u64 paddr = pblk_line_smeta_start(pblk, line);
 731        int i, ret;
 732
 733        memset(&rqd, 0, sizeof(struct nvm_rq));
 734
 735        ret = pblk_alloc_rqd_meta(pblk, &rqd);
 736        if (ret)
 737                return ret;
 738
 739        bio = bio_map_kern(dev->q, line->smeta, lm->smeta_len, GFP_KERNEL);
 740        if (IS_ERR(bio)) {
 741                ret = PTR_ERR(bio);
 742                goto clear_rqd;
 743        }
 744
 745        bio->bi_iter.bi_sector = 0; /* internal bio */
 746        bio_set_op_attrs(bio, REQ_OP_READ, 0);
 747
 748        rqd.bio = bio;
 749        rqd.opcode = NVM_OP_PREAD;
 750        rqd.nr_ppas = lm->smeta_sec;
 751        rqd.is_seq = 1;
 752        ppa_list = nvm_rq_to_ppa_list(&rqd);
 753
 754        for (i = 0; i < lm->smeta_sec; i++, paddr++)
 755                ppa_list[i] = addr_to_gen_ppa(pblk, paddr, line->id);
 756
 757        ret = pblk_submit_io_sync(pblk, &rqd);
 758        if (ret) {
 759                pblk_err(pblk, "smeta I/O submission failed: %d\n", ret);
 760                bio_put(bio);
 761                goto clear_rqd;
 762        }
 763
 764        atomic_dec(&pblk->inflight_io);
 765
 766        if (rqd.error && rqd.error != NVM_RSP_WARN_HIGHECC) {
 767                pblk_log_read_err(pblk, &rqd);
 768                ret = -EIO;
 769        }
 770
 771clear_rqd:
 772        pblk_free_rqd_meta(pblk, &rqd);
 773        return ret;
 774}
 775
 776static int pblk_line_smeta_write(struct pblk *pblk, struct pblk_line *line,
 777                                 u64 paddr)
 778{
 779        struct nvm_tgt_dev *dev = pblk->dev;
 780        struct pblk_line_meta *lm = &pblk->lm;
 781        struct bio *bio;
 782        struct ppa_addr *ppa_list;
 783        struct nvm_rq rqd;
 784        __le64 *lba_list = emeta_to_lbas(pblk, line->emeta->buf);
 785        __le64 addr_empty = cpu_to_le64(ADDR_EMPTY);
 786        int i, ret;
 787
 788        memset(&rqd, 0, sizeof(struct nvm_rq));
 789
 790        ret = pblk_alloc_rqd_meta(pblk, &rqd);
 791        if (ret)
 792                return ret;
 793
 794        bio = bio_map_kern(dev->q, line->smeta, lm->smeta_len, GFP_KERNEL);
 795        if (IS_ERR(bio)) {
 796                ret = PTR_ERR(bio);
 797                goto clear_rqd;
 798        }
 799
 800        bio->bi_iter.bi_sector = 0; /* internal bio */
 801        bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
 802
 803        rqd.bio = bio;
 804        rqd.opcode = NVM_OP_PWRITE;
 805        rqd.nr_ppas = lm->smeta_sec;
 806        rqd.is_seq = 1;
 807        ppa_list = nvm_rq_to_ppa_list(&rqd);
 808
 809        for (i = 0; i < lm->smeta_sec; i++, paddr++) {
 810                struct pblk_sec_meta *meta = pblk_get_meta(pblk,
 811                                                           rqd.meta_list, i);
 812
 813                ppa_list[i] = addr_to_gen_ppa(pblk, paddr, line->id);
 814                meta->lba = lba_list[paddr] = addr_empty;
 815        }
 816
 817        ret = pblk_submit_io_sync_sem(pblk, &rqd);
 818        if (ret) {
 819                pblk_err(pblk, "smeta I/O submission failed: %d\n", ret);
 820                bio_put(bio);
 821                goto clear_rqd;
 822        }
 823
 824        atomic_dec(&pblk->inflight_io);
 825
 826        if (rqd.error) {
 827                pblk_log_write_err(pblk, &rqd);
 828                ret = -EIO;
 829        }
 830
 831clear_rqd:
 832        pblk_free_rqd_meta(pblk, &rqd);
 833        return ret;
 834}
 835
 836int pblk_line_emeta_read(struct pblk *pblk, struct pblk_line *line,
 837                         void *emeta_buf)
 838{
 839        struct nvm_tgt_dev *dev = pblk->dev;
 840        struct nvm_geo *geo = &dev->geo;
 841        struct pblk_line_mgmt *l_mg = &pblk->l_mg;
 842        struct pblk_line_meta *lm = &pblk->lm;
 843        void *ppa_list_buf, *meta_list;
 844        struct bio *bio;
 845        struct ppa_addr *ppa_list;
 846        struct nvm_rq rqd;
 847        u64 paddr = line->emeta_ssec;
 848        dma_addr_t dma_ppa_list, dma_meta_list;
 849        int min = pblk->min_write_pgs;
 850        int left_ppas = lm->emeta_sec[0];
 851        int line_id = line->id;
 852        int rq_ppas, rq_len;
 853        int i, j;
 854        int ret;
 855
 856        meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL,
 857                                                        &dma_meta_list);
 858        if (!meta_list)
 859                return -ENOMEM;
 860
 861        ppa_list_buf = meta_list + pblk_dma_meta_size(pblk);
 862        dma_ppa_list = dma_meta_list + pblk_dma_meta_size(pblk);
 863
 864next_rq:
 865        memset(&rqd, 0, sizeof(struct nvm_rq));
 866
 867        rq_ppas = pblk_calc_secs(pblk, left_ppas, 0, false);
 868        rq_len = rq_ppas * geo->csecs;
 869
 870        bio = pblk_bio_map_addr(pblk, emeta_buf, rq_ppas, rq_len,
 871                                        l_mg->emeta_alloc_type, GFP_KERNEL);
 872        if (IS_ERR(bio)) {
 873                ret = PTR_ERR(bio);
 874                goto free_rqd_dma;
 875        }
 876
 877        bio->bi_iter.bi_sector = 0; /* internal bio */
 878        bio_set_op_attrs(bio, REQ_OP_READ, 0);
 879
 880        rqd.bio = bio;
 881        rqd.meta_list = meta_list;
 882        rqd.ppa_list = ppa_list_buf;
 883        rqd.dma_meta_list = dma_meta_list;
 884        rqd.dma_ppa_list = dma_ppa_list;
 885        rqd.opcode = NVM_OP_PREAD;
 886        rqd.nr_ppas = rq_ppas;
 887        ppa_list = nvm_rq_to_ppa_list(&rqd);
 888
 889        for (i = 0; i < rqd.nr_ppas; ) {
 890                struct ppa_addr ppa = addr_to_gen_ppa(pblk, paddr, line_id);
 891                int pos = pblk_ppa_to_pos(geo, ppa);
 892
 893                if (pblk_io_aligned(pblk, rq_ppas))
 894                        rqd.is_seq = 1;
 895
 896                while (test_bit(pos, line->blk_bitmap)) {
 897                        paddr += min;
 898                        if (pblk_boundary_paddr_checks(pblk, paddr)) {
 899                                bio_put(bio);
 900                                ret = -EINTR;
 901                                goto free_rqd_dma;
 902                        }
 903
 904                        ppa = addr_to_gen_ppa(pblk, paddr, line_id);
 905                        pos = pblk_ppa_to_pos(geo, ppa);
 906                }
 907
 908                if (pblk_boundary_paddr_checks(pblk, paddr + min)) {
 909                        bio_put(bio);
 910                        ret = -EINTR;
 911                        goto free_rqd_dma;
 912                }
 913
 914                for (j = 0; j < min; j++, i++, paddr++)
 915                        ppa_list[i] = addr_to_gen_ppa(pblk, paddr, line_id);
 916        }
 917
 918        ret = pblk_submit_io_sync(pblk, &rqd);
 919        if (ret) {
 920                pblk_err(pblk, "emeta I/O submission failed: %d\n", ret);
 921                bio_put(bio);
 922                goto free_rqd_dma;
 923        }
 924
 925        atomic_dec(&pblk->inflight_io);
 926
 927        if (rqd.error && rqd.error != NVM_RSP_WARN_HIGHECC) {
 928                pblk_log_read_err(pblk, &rqd);
 929                ret = -EIO;
 930                goto free_rqd_dma;
 931        }
 932
 933        emeta_buf += rq_len;
 934        left_ppas -= rq_ppas;
 935        if (left_ppas)
 936                goto next_rq;
 937
 938free_rqd_dma:
 939        nvm_dev_dma_free(dev->parent, rqd.meta_list, rqd.dma_meta_list);
 940        return ret;
 941}
 942
 943static void pblk_setup_e_rq(struct pblk *pblk, struct nvm_rq *rqd,
 944                            struct ppa_addr ppa)
 945{
 946        rqd->opcode = NVM_OP_ERASE;
 947        rqd->ppa_addr = ppa;
 948        rqd->nr_ppas = 1;
 949        rqd->is_seq = 1;
 950        rqd->bio = NULL;
 951}
 952
 953static int pblk_blk_erase_sync(struct pblk *pblk, struct ppa_addr ppa)
 954{
 955        struct nvm_rq rqd = {NULL};
 956        int ret;
 957
 958        trace_pblk_chunk_reset(pblk_disk_name(pblk), &ppa,
 959                                PBLK_CHUNK_RESET_START);
 960
 961        pblk_setup_e_rq(pblk, &rqd, ppa);
 962
 963        /* The write thread schedules erases so that it minimizes disturbances
 964         * with writes. Thus, there is no need to take the LUN semaphore.
 965         */
 966        ret = pblk_submit_io_sync(pblk, &rqd);
 967        rqd.private = pblk;
 968        __pblk_end_io_erase(pblk, &rqd);
 969
 970        return ret;
 971}
 972
 973int pblk_line_erase(struct pblk *pblk, struct pblk_line *line)
 974{
 975        struct pblk_line_meta *lm = &pblk->lm;
 976        struct ppa_addr ppa;
 977        int ret, bit = -1;
 978
 979        /* Erase only good blocks, one at a time */
 980        do {
 981                spin_lock(&line->lock);
 982                bit = find_next_zero_bit(line->erase_bitmap, lm->blk_per_line,
 983                                                                bit + 1);
 984                if (bit >= lm->blk_per_line) {
 985                        spin_unlock(&line->lock);
 986                        break;
 987                }
 988
 989                ppa = pblk->luns[bit].bppa; /* set ch and lun */
 990                ppa.a.blk = line->id;
 991
 992                atomic_dec(&line->left_eblks);
 993                WARN_ON(test_and_set_bit(bit, line->erase_bitmap));
 994                spin_unlock(&line->lock);
 995
 996                ret = pblk_blk_erase_sync(pblk, ppa);
 997                if (ret) {
 998                        pblk_err(pblk, "failed to erase line %d\n", line->id);
 999                        return ret;
1000                }
1001        } while (1);
1002
1003        return 0;
1004}
1005
1006static void pblk_line_setup_metadata(struct pblk_line *line,
1007                                     struct pblk_line_mgmt *l_mg,
1008                                     struct pblk_line_meta *lm)
1009{
1010        int meta_line;
1011
1012        lockdep_assert_held(&l_mg->free_lock);
1013
1014retry_meta:
1015        meta_line = find_first_zero_bit(&l_mg->meta_bitmap, PBLK_DATA_LINES);
1016        if (meta_line == PBLK_DATA_LINES) {
1017                spin_unlock(&l_mg->free_lock);
1018                io_schedule();
1019                spin_lock(&l_mg->free_lock);
1020                goto retry_meta;
1021        }
1022
1023        set_bit(meta_line, &l_mg->meta_bitmap);
1024        line->meta_line = meta_line;
1025
1026        line->smeta = l_mg->sline_meta[meta_line];
1027        line->emeta = l_mg->eline_meta[meta_line];
1028
1029        memset(line->smeta, 0, lm->smeta_len);
1030        memset(line->emeta->buf, 0, lm->emeta_len[0]);
1031
1032        line->emeta->mem = 0;
1033        atomic_set(&line->emeta->sync, 0);
1034}
1035
1036/* For now lines are always assumed full lines. Thus, smeta former and current
1037 * lun bitmaps are omitted.
1038 */
1039static int pblk_line_init_metadata(struct pblk *pblk, struct pblk_line *line,
1040                                  struct pblk_line *cur)
1041{
1042        struct nvm_tgt_dev *dev = pblk->dev;
1043        struct nvm_geo *geo = &dev->geo;
1044        struct pblk_line_meta *lm = &pblk->lm;
1045        struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1046        struct pblk_emeta *emeta = line->emeta;
1047        struct line_emeta *emeta_buf = emeta->buf;
1048        struct line_smeta *smeta_buf = (struct line_smeta *)line->smeta;
1049        int nr_blk_line;
1050
1051        /* After erasing the line, new bad blocks might appear and we risk
1052         * having an invalid line
1053         */
1054        nr_blk_line = lm->blk_per_line -
1055                        bitmap_weight(line->blk_bitmap, lm->blk_per_line);
1056        if (nr_blk_line < lm->min_blk_line) {
1057                spin_lock(&l_mg->free_lock);
1058                spin_lock(&line->lock);
1059                line->state = PBLK_LINESTATE_BAD;
1060                trace_pblk_line_state(pblk_disk_name(pblk), line->id,
1061                                        line->state);
1062                spin_unlock(&line->lock);
1063
1064                list_add_tail(&line->list, &l_mg->bad_list);
1065                spin_unlock(&l_mg->free_lock);
1066
1067                pblk_debug(pblk, "line %d is bad\n", line->id);
1068
1069                return 0;
1070        }
1071
1072        /* Run-time metadata */
1073        line->lun_bitmap = ((void *)(smeta_buf)) + sizeof(struct line_smeta);
1074
1075        /* Mark LUNs allocated in this line (all for now) */
1076        bitmap_set(line->lun_bitmap, 0, lm->lun_bitmap_len);
1077
1078        smeta_buf->header.identifier = cpu_to_le32(PBLK_MAGIC);
1079        guid_copy((guid_t *)&smeta_buf->header.uuid, &pblk->instance_uuid);
1080        smeta_buf->header.id = cpu_to_le32(line->id);
1081        smeta_buf->header.type = cpu_to_le16(line->type);
1082        smeta_buf->header.version_major = SMETA_VERSION_MAJOR;
1083        smeta_buf->header.version_minor = SMETA_VERSION_MINOR;
1084
1085        /* Start metadata */
1086        smeta_buf->seq_nr = cpu_to_le64(line->seq_nr);
1087        smeta_buf->window_wr_lun = cpu_to_le32(geo->all_luns);
1088
1089        /* Fill metadata among lines */
1090        if (cur) {
1091                memcpy(line->lun_bitmap, cur->lun_bitmap, lm->lun_bitmap_len);
1092                smeta_buf->prev_id = cpu_to_le32(cur->id);
1093                cur->emeta->buf->next_id = cpu_to_le32(line->id);
1094        } else {
1095                smeta_buf->prev_id = cpu_to_le32(PBLK_LINE_EMPTY);
1096        }
1097
1098        /* All smeta must be set at this point */
1099        smeta_buf->header.crc = cpu_to_le32(
1100                        pblk_calc_meta_header_crc(pblk, &smeta_buf->header));
1101        smeta_buf->crc = cpu_to_le32(pblk_calc_smeta_crc(pblk, smeta_buf));
1102
1103        /* End metadata */
1104        memcpy(&emeta_buf->header, &smeta_buf->header,
1105                                                sizeof(struct line_header));
1106
1107        emeta_buf->header.version_major = EMETA_VERSION_MAJOR;
1108        emeta_buf->header.version_minor = EMETA_VERSION_MINOR;
1109        emeta_buf->header.crc = cpu_to_le32(
1110                        pblk_calc_meta_header_crc(pblk, &emeta_buf->header));
1111
1112        emeta_buf->seq_nr = cpu_to_le64(line->seq_nr);
1113        emeta_buf->nr_lbas = cpu_to_le64(line->sec_in_line);
1114        emeta_buf->nr_valid_lbas = cpu_to_le64(0);
1115        emeta_buf->next_id = cpu_to_le32(PBLK_LINE_EMPTY);
1116        emeta_buf->crc = cpu_to_le32(0);
1117        emeta_buf->prev_id = smeta_buf->prev_id;
1118
1119        return 1;
1120}
1121
1122static int pblk_line_alloc_bitmaps(struct pblk *pblk, struct pblk_line *line)
1123{
1124        struct pblk_line_meta *lm = &pblk->lm;
1125        struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1126
1127        line->map_bitmap = mempool_alloc(l_mg->bitmap_pool, GFP_KERNEL);
1128        if (!line->map_bitmap)
1129                return -ENOMEM;
1130
1131        memset(line->map_bitmap, 0, lm->sec_bitmap_len);
1132
1133        /* will be initialized using bb info from map_bitmap */
1134        line->invalid_bitmap = mempool_alloc(l_mg->bitmap_pool, GFP_KERNEL);
1135        if (!line->invalid_bitmap) {
1136                mempool_free(line->map_bitmap, l_mg->bitmap_pool);
1137                line->map_bitmap = NULL;
1138                return -ENOMEM;
1139        }
1140
1141        return 0;
1142}
1143
1144/* For now lines are always assumed full lines. Thus, smeta former and current
1145 * lun bitmaps are omitted.
1146 */
1147static int pblk_line_init_bb(struct pblk *pblk, struct pblk_line *line,
1148                             int init)
1149{
1150        struct nvm_tgt_dev *dev = pblk->dev;
1151        struct nvm_geo *geo = &dev->geo;
1152        struct pblk_line_meta *lm = &pblk->lm;
1153        struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1154        u64 off;
1155        int bit = -1;
1156        int emeta_secs;
1157
1158        line->sec_in_line = lm->sec_per_line;
1159
1160        /* Capture bad block information on line mapping bitmaps */
1161        while ((bit = find_next_bit(line->blk_bitmap, lm->blk_per_line,
1162                                        bit + 1)) < lm->blk_per_line) {
1163                off = bit * geo->ws_opt;
1164                bitmap_shift_left(l_mg->bb_aux, l_mg->bb_template, off,
1165                                                        lm->sec_per_line);
1166                bitmap_or(line->map_bitmap, line->map_bitmap, l_mg->bb_aux,
1167                                                        lm->sec_per_line);
1168                line->sec_in_line -= geo->clba;
1169        }
1170
1171        /* Mark smeta metadata sectors as bad sectors */
1172        bit = find_first_zero_bit(line->blk_bitmap, lm->blk_per_line);
1173        off = bit * geo->ws_opt;
1174        bitmap_set(line->map_bitmap, off, lm->smeta_sec);
1175        line->sec_in_line -= lm->smeta_sec;
1176        line->cur_sec = off + lm->smeta_sec;
1177
1178        if (init && pblk_line_smeta_write(pblk, line, off)) {
1179                pblk_debug(pblk, "line smeta I/O failed. Retry\n");
1180                return 0;
1181        }
1182
1183        bitmap_copy(line->invalid_bitmap, line->map_bitmap, lm->sec_per_line);
1184
1185        /* Mark emeta metadata sectors as bad sectors. We need to consider bad
1186         * blocks to make sure that there are enough sectors to store emeta
1187         */
1188        emeta_secs = lm->emeta_sec[0];
1189        off = lm->sec_per_line;
1190        while (emeta_secs) {
1191                off -= geo->ws_opt;
1192                if (!test_bit(off, line->invalid_bitmap)) {
1193                        bitmap_set(line->invalid_bitmap, off, geo->ws_opt);
1194                        emeta_secs -= geo->ws_opt;
1195                }
1196        }
1197
1198        line->emeta_ssec = off;
1199        line->sec_in_line -= lm->emeta_sec[0];
1200        line->nr_valid_lbas = 0;
1201        line->left_msecs = line->sec_in_line;
1202        *line->vsc = cpu_to_le32(line->sec_in_line);
1203
1204        if (lm->sec_per_line - line->sec_in_line !=
1205                bitmap_weight(line->invalid_bitmap, lm->sec_per_line)) {
1206                spin_lock(&line->lock);
1207                line->state = PBLK_LINESTATE_BAD;
1208                trace_pblk_line_state(pblk_disk_name(pblk), line->id,
1209                                        line->state);
1210                spin_unlock(&line->lock);
1211
1212                list_add_tail(&line->list, &l_mg->bad_list);
1213                pblk_err(pblk, "unexpected line %d is bad\n", line->id);
1214
1215                return 0;
1216        }
1217
1218        return 1;
1219}
1220
1221static int pblk_prepare_new_line(struct pblk *pblk, struct pblk_line *line)
1222{
1223        struct pblk_line_meta *lm = &pblk->lm;
1224        struct nvm_tgt_dev *dev = pblk->dev;
1225        struct nvm_geo *geo = &dev->geo;
1226        int blk_to_erase = atomic_read(&line->blk_in_line);
1227        int i;
1228
1229        for (i = 0; i < lm->blk_per_line; i++) {
1230                struct pblk_lun *rlun = &pblk->luns[i];
1231                int pos = pblk_ppa_to_pos(geo, rlun->bppa);
1232                int state = line->chks[pos].state;
1233
1234                /* Free chunks should not be erased */
1235                if (state & NVM_CHK_ST_FREE) {
1236                        set_bit(pblk_ppa_to_pos(geo, rlun->bppa),
1237                                                        line->erase_bitmap);
1238                        blk_to_erase--;
1239                }
1240        }
1241
1242        return blk_to_erase;
1243}
1244
1245static int pblk_line_prepare(struct pblk *pblk, struct pblk_line *line)
1246{
1247        struct pblk_line_meta *lm = &pblk->lm;
1248        int blk_in_line = atomic_read(&line->blk_in_line);
1249        int blk_to_erase;
1250
1251        /* Bad blocks do not need to be erased */
1252        bitmap_copy(line->erase_bitmap, line->blk_bitmap, lm->blk_per_line);
1253
1254        spin_lock(&line->lock);
1255
1256        /* If we have not written to this line, we need to mark up free chunks
1257         * as already erased
1258         */
1259        if (line->state == PBLK_LINESTATE_NEW) {
1260                blk_to_erase = pblk_prepare_new_line(pblk, line);
1261                line->state = PBLK_LINESTATE_FREE;
1262                trace_pblk_line_state(pblk_disk_name(pblk), line->id,
1263                                        line->state);
1264        } else {
1265                blk_to_erase = blk_in_line;
1266        }
1267
1268        if (blk_in_line < lm->min_blk_line) {
1269                spin_unlock(&line->lock);
1270                return -EAGAIN;
1271        }
1272
1273        if (line->state != PBLK_LINESTATE_FREE) {
1274                WARN(1, "pblk: corrupted line %d, state %d\n",
1275                                                        line->id, line->state);
1276                spin_unlock(&line->lock);
1277                return -EINTR;
1278        }
1279
1280        line->state = PBLK_LINESTATE_OPEN;
1281        trace_pblk_line_state(pblk_disk_name(pblk), line->id,
1282                                line->state);
1283
1284        atomic_set(&line->left_eblks, blk_to_erase);
1285        atomic_set(&line->left_seblks, blk_to_erase);
1286
1287        line->meta_distance = lm->meta_distance;
1288        spin_unlock(&line->lock);
1289
1290        kref_init(&line->ref);
1291        atomic_set(&line->sec_to_update, 0);
1292
1293        return 0;
1294}
1295
1296/* Line allocations in the recovery path are always single threaded */
1297int pblk_line_recov_alloc(struct pblk *pblk, struct pblk_line *line)
1298{
1299        struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1300        int ret;
1301
1302        spin_lock(&l_mg->free_lock);
1303        l_mg->data_line = line;
1304        list_del(&line->list);
1305
1306        ret = pblk_line_prepare(pblk, line);
1307        if (ret) {
1308                list_add(&line->list, &l_mg->free_list);
1309                spin_unlock(&l_mg->free_lock);
1310                return ret;
1311        }
1312        spin_unlock(&l_mg->free_lock);
1313
1314        ret = pblk_line_alloc_bitmaps(pblk, line);
1315        if (ret)
1316                goto fail;
1317
1318        if (!pblk_line_init_bb(pblk, line, 0)) {
1319                ret = -EINTR;
1320                goto fail;
1321        }
1322
1323        pblk_rl_free_lines_dec(&pblk->rl, line, true);
1324        return 0;
1325
1326fail:
1327        spin_lock(&l_mg->free_lock);
1328        list_add(&line->list, &l_mg->free_list);
1329        spin_unlock(&l_mg->free_lock);
1330
1331        return ret;
1332}
1333
1334void pblk_line_recov_close(struct pblk *pblk, struct pblk_line *line)
1335{
1336        struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1337
1338        mempool_free(line->map_bitmap, l_mg->bitmap_pool);
1339        line->map_bitmap = NULL;
1340        line->smeta = NULL;
1341        line->emeta = NULL;
1342}
1343
1344static void pblk_line_reinit(struct pblk_line *line)
1345{
1346        *line->vsc = cpu_to_le32(EMPTY_ENTRY);
1347
1348        line->map_bitmap = NULL;
1349        line->invalid_bitmap = NULL;
1350        line->smeta = NULL;
1351        line->emeta = NULL;
1352}
1353
1354void pblk_line_free(struct pblk_line *line)
1355{
1356        struct pblk *pblk = line->pblk;
1357        struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1358
1359        mempool_free(line->map_bitmap, l_mg->bitmap_pool);
1360        mempool_free(line->invalid_bitmap, l_mg->bitmap_pool);
1361
1362        pblk_line_reinit(line);
1363}
1364
1365struct pblk_line *pblk_line_get(struct pblk *pblk)
1366{
1367        struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1368        struct pblk_line_meta *lm = &pblk->lm;
1369        struct pblk_line *line;
1370        int ret, bit;
1371
1372        lockdep_assert_held(&l_mg->free_lock);
1373
1374retry:
1375        if (list_empty(&l_mg->free_list)) {
1376                pblk_err(pblk, "no free lines\n");
1377                return NULL;
1378        }
1379
1380        line = list_first_entry(&l_mg->free_list, struct pblk_line, list);
1381        list_del(&line->list);
1382        l_mg->nr_free_lines--;
1383
1384        bit = find_first_zero_bit(line->blk_bitmap, lm->blk_per_line);
1385        if (unlikely(bit >= lm->blk_per_line)) {
1386                spin_lock(&line->lock);
1387                line->state = PBLK_LINESTATE_BAD;
1388                trace_pblk_line_state(pblk_disk_name(pblk), line->id,
1389                                        line->state);
1390                spin_unlock(&line->lock);
1391
1392                list_add_tail(&line->list, &l_mg->bad_list);
1393
1394                pblk_debug(pblk, "line %d is bad\n", line->id);
1395                goto retry;
1396        }
1397
1398        ret = pblk_line_prepare(pblk, line);
1399        if (ret) {
1400                switch (ret) {
1401                case -EAGAIN:
1402                        list_add(&line->list, &l_mg->bad_list);
1403                        goto retry;
1404                case -EINTR:
1405                        list_add(&line->list, &l_mg->corrupt_list);
1406                        goto retry;
1407                default:
1408                        pblk_err(pblk, "failed to prepare line %d\n", line->id);
1409                        list_add(&line->list, &l_mg->free_list);
1410                        l_mg->nr_free_lines++;
1411                        return NULL;
1412                }
1413        }
1414
1415        return line;
1416}
1417
1418static struct pblk_line *pblk_line_retry(struct pblk *pblk,
1419                                         struct pblk_line *line)
1420{
1421        struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1422        struct pblk_line *retry_line;
1423
1424retry:
1425        spin_lock(&l_mg->free_lock);
1426        retry_line = pblk_line_get(pblk);
1427        if (!retry_line) {
1428                l_mg->data_line = NULL;
1429                spin_unlock(&l_mg->free_lock);
1430                return NULL;
1431        }
1432
1433        retry_line->map_bitmap = line->map_bitmap;
1434        retry_line->invalid_bitmap = line->invalid_bitmap;
1435        retry_line->smeta = line->smeta;
1436        retry_line->emeta = line->emeta;
1437        retry_line->meta_line = line->meta_line;
1438
1439        pblk_line_reinit(line);
1440
1441        l_mg->data_line = retry_line;
1442        spin_unlock(&l_mg->free_lock);
1443
1444        pblk_rl_free_lines_dec(&pblk->rl, line, false);
1445
1446        if (pblk_line_erase(pblk, retry_line))
1447                goto retry;
1448
1449        return retry_line;
1450}
1451
1452static void pblk_set_space_limit(struct pblk *pblk)
1453{
1454        struct pblk_rl *rl = &pblk->rl;
1455
1456        atomic_set(&rl->rb_space, 0);
1457}
1458
1459struct pblk_line *pblk_line_get_first_data(struct pblk *pblk)
1460{
1461        struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1462        struct pblk_line *line;
1463
1464        spin_lock(&l_mg->free_lock);
1465        line = pblk_line_get(pblk);
1466        if (!line) {
1467                spin_unlock(&l_mg->free_lock);
1468                return NULL;
1469        }
1470
1471        line->seq_nr = l_mg->d_seq_nr++;
1472        line->type = PBLK_LINETYPE_DATA;
1473        l_mg->data_line = line;
1474
1475        pblk_line_setup_metadata(line, l_mg, &pblk->lm);
1476
1477        /* Allocate next line for preparation */
1478        l_mg->data_next = pblk_line_get(pblk);
1479        if (!l_mg->data_next) {
1480                /* If we cannot get a new line, we need to stop the pipeline.
1481                 * Only allow as many writes in as we can store safely and then
1482                 * fail gracefully
1483                 */
1484                pblk_set_space_limit(pblk);
1485
1486                l_mg->data_next = NULL;
1487        } else {
1488                l_mg->data_next->seq_nr = l_mg->d_seq_nr++;
1489                l_mg->data_next->type = PBLK_LINETYPE_DATA;
1490        }
1491        spin_unlock(&l_mg->free_lock);
1492
1493        if (pblk_line_alloc_bitmaps(pblk, line))
1494                return NULL;
1495
1496        if (pblk_line_erase(pblk, line)) {
1497                line = pblk_line_retry(pblk, line);
1498                if (!line)
1499                        return NULL;
1500        }
1501
1502retry_setup:
1503        if (!pblk_line_init_metadata(pblk, line, NULL)) {
1504                line = pblk_line_retry(pblk, line);
1505                if (!line)
1506                        return NULL;
1507
1508                goto retry_setup;
1509        }
1510
1511        if (!pblk_line_init_bb(pblk, line, 1)) {
1512                line = pblk_line_retry(pblk, line);
1513                if (!line)
1514                        return NULL;
1515
1516                goto retry_setup;
1517        }
1518
1519        pblk_rl_free_lines_dec(&pblk->rl, line, true);
1520
1521        return line;
1522}
1523
1524void pblk_ppa_to_line_put(struct pblk *pblk, struct ppa_addr ppa)
1525{
1526        struct pblk_line *line;
1527
1528        line = pblk_ppa_to_line(pblk, ppa);
1529        kref_put(&line->ref, pblk_line_put_wq);
1530}
1531
1532void pblk_rq_to_line_put(struct pblk *pblk, struct nvm_rq *rqd)
1533{
1534        struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
1535        int i;
1536
1537        for (i = 0; i < rqd->nr_ppas; i++)
1538                pblk_ppa_to_line_put(pblk, ppa_list[i]);
1539}
1540
1541static void pblk_stop_writes(struct pblk *pblk, struct pblk_line *line)
1542{
1543        lockdep_assert_held(&pblk->l_mg.free_lock);
1544
1545        pblk_set_space_limit(pblk);
1546        pblk->state = PBLK_STATE_STOPPING;
1547        trace_pblk_state(pblk_disk_name(pblk), pblk->state);
1548}
1549
1550static void pblk_line_close_meta_sync(struct pblk *pblk)
1551{
1552        struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1553        struct pblk_line_meta *lm = &pblk->lm;
1554        struct pblk_line *line, *tline;
1555        LIST_HEAD(list);
1556
1557        spin_lock(&l_mg->close_lock);
1558        if (list_empty(&l_mg->emeta_list)) {
1559                spin_unlock(&l_mg->close_lock);
1560                return;
1561        }
1562
1563        list_cut_position(&list, &l_mg->emeta_list, l_mg->emeta_list.prev);
1564        spin_unlock(&l_mg->close_lock);
1565
1566        list_for_each_entry_safe(line, tline, &list, list) {
1567                struct pblk_emeta *emeta = line->emeta;
1568
1569                while (emeta->mem < lm->emeta_len[0]) {
1570                        int ret;
1571
1572                        ret = pblk_submit_meta_io(pblk, line);
1573                        if (ret) {
1574                                pblk_err(pblk, "sync meta line %d failed (%d)\n",
1575                                                        line->id, ret);
1576                                return;
1577                        }
1578                }
1579        }
1580
1581        pblk_wait_for_meta(pblk);
1582        flush_workqueue(pblk->close_wq);
1583}
1584
1585void __pblk_pipeline_flush(struct pblk *pblk)
1586{
1587        struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1588        int ret;
1589
1590        spin_lock(&l_mg->free_lock);
1591        if (pblk->state == PBLK_STATE_RECOVERING ||
1592                                        pblk->state == PBLK_STATE_STOPPED) {
1593                spin_unlock(&l_mg->free_lock);
1594                return;
1595        }
1596        pblk->state = PBLK_STATE_RECOVERING;
1597        trace_pblk_state(pblk_disk_name(pblk), pblk->state);
1598        spin_unlock(&l_mg->free_lock);
1599
1600        pblk_flush_writer(pblk);
1601        pblk_wait_for_meta(pblk);
1602
1603        ret = pblk_recov_pad(pblk);
1604        if (ret) {
1605                pblk_err(pblk, "could not close data on teardown(%d)\n", ret);
1606                return;
1607        }
1608
1609        flush_workqueue(pblk->bb_wq);
1610        pblk_line_close_meta_sync(pblk);
1611}
1612
1613void __pblk_pipeline_stop(struct pblk *pblk)
1614{
1615        struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1616
1617        spin_lock(&l_mg->free_lock);
1618        pblk->state = PBLK_STATE_STOPPED;
1619        trace_pblk_state(pblk_disk_name(pblk), pblk->state);
1620        l_mg->data_line = NULL;
1621        l_mg->data_next = NULL;
1622        spin_unlock(&l_mg->free_lock);
1623}
1624
1625void pblk_pipeline_stop(struct pblk *pblk)
1626{
1627        __pblk_pipeline_flush(pblk);
1628        __pblk_pipeline_stop(pblk);
1629}
1630
1631struct pblk_line *pblk_line_replace_data(struct pblk *pblk)
1632{
1633        struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1634        struct pblk_line *cur, *new = NULL;
1635        unsigned int left_seblks;
1636
1637        new = l_mg->data_next;
1638        if (!new)
1639                goto out;
1640
1641        spin_lock(&l_mg->free_lock);
1642        cur = l_mg->data_line;
1643        l_mg->data_line = new;
1644
1645        pblk_line_setup_metadata(new, l_mg, &pblk->lm);
1646        spin_unlock(&l_mg->free_lock);
1647
1648retry_erase:
1649        left_seblks = atomic_read(&new->left_seblks);
1650        if (left_seblks) {
1651                /* If line is not fully erased, erase it */
1652                if (atomic_read(&new->left_eblks)) {
1653                        if (pblk_line_erase(pblk, new))
1654                                goto out;
1655                } else {
1656                        io_schedule();
1657                }
1658                goto retry_erase;
1659        }
1660
1661        if (pblk_line_alloc_bitmaps(pblk, new))
1662                return NULL;
1663
1664retry_setup:
1665        if (!pblk_line_init_metadata(pblk, new, cur)) {
1666                new = pblk_line_retry(pblk, new);
1667                if (!new)
1668                        goto out;
1669
1670                goto retry_setup;
1671        }
1672
1673        if (!pblk_line_init_bb(pblk, new, 1)) {
1674                new = pblk_line_retry(pblk, new);
1675                if (!new)
1676                        goto out;
1677
1678                goto retry_setup;
1679        }
1680
1681        pblk_rl_free_lines_dec(&pblk->rl, new, true);
1682
1683        /* Allocate next line for preparation */
1684        spin_lock(&l_mg->free_lock);
1685        l_mg->data_next = pblk_line_get(pblk);
1686        if (!l_mg->data_next) {
1687                /* If we cannot get a new line, we need to stop the pipeline.
1688                 * Only allow as many writes in as we can store safely and then
1689                 * fail gracefully
1690                 */
1691                pblk_stop_writes(pblk, new);
1692                l_mg->data_next = NULL;
1693        } else {
1694                l_mg->data_next->seq_nr = l_mg->d_seq_nr++;
1695                l_mg->data_next->type = PBLK_LINETYPE_DATA;
1696        }
1697        spin_unlock(&l_mg->free_lock);
1698
1699out:
1700        return new;
1701}
1702
1703static void __pblk_line_put(struct pblk *pblk, struct pblk_line *line)
1704{
1705        struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1706        struct pblk_gc *gc = &pblk->gc;
1707
1708        spin_lock(&line->lock);
1709        WARN_ON(line->state != PBLK_LINESTATE_GC);
1710        if (line->w_err_gc->has_gc_err) {
1711                spin_unlock(&line->lock);
1712                pblk_err(pblk, "line %d had errors during GC\n", line->id);
1713                pblk_put_line_back(pblk, line);
1714                line->w_err_gc->has_gc_err = 0;
1715                return;
1716        }
1717
1718        line->state = PBLK_LINESTATE_FREE;
1719        trace_pblk_line_state(pblk_disk_name(pblk), line->id,
1720                                        line->state);
1721        line->gc_group = PBLK_LINEGC_NONE;
1722        pblk_line_free(line);
1723
1724        if (line->w_err_gc->has_write_err) {
1725                pblk_rl_werr_line_out(&pblk->rl);
1726                line->w_err_gc->has_write_err = 0;
1727        }
1728
1729        spin_unlock(&line->lock);
1730        atomic_dec(&gc->pipeline_gc);
1731
1732        spin_lock(&l_mg->free_lock);
1733        list_add_tail(&line->list, &l_mg->free_list);
1734        l_mg->nr_free_lines++;
1735        spin_unlock(&l_mg->free_lock);
1736
1737        pblk_rl_free_lines_inc(&pblk->rl, line);
1738}
1739
1740static void pblk_line_put_ws(struct work_struct *work)
1741{
1742        struct pblk_line_ws *line_put_ws = container_of(work,
1743                                                struct pblk_line_ws, ws);
1744        struct pblk *pblk = line_put_ws->pblk;
1745        struct pblk_line *line = line_put_ws->line;
1746
1747        __pblk_line_put(pblk, line);
1748        mempool_free(line_put_ws, &pblk->gen_ws_pool);
1749}
1750
1751void pblk_line_put(struct kref *ref)
1752{
1753        struct pblk_line *line = container_of(ref, struct pblk_line, ref);
1754        struct pblk *pblk = line->pblk;
1755
1756        __pblk_line_put(pblk, line);
1757}
1758
1759void pblk_line_put_wq(struct kref *ref)
1760{
1761        struct pblk_line *line = container_of(ref, struct pblk_line, ref);
1762        struct pblk *pblk = line->pblk;
1763        struct pblk_line_ws *line_put_ws;
1764
1765        line_put_ws = mempool_alloc(&pblk->gen_ws_pool, GFP_ATOMIC);
1766        if (!line_put_ws)
1767                return;
1768
1769        line_put_ws->pblk = pblk;
1770        line_put_ws->line = line;
1771        line_put_ws->priv = NULL;
1772
1773        INIT_WORK(&line_put_ws->ws, pblk_line_put_ws);
1774        queue_work(pblk->r_end_wq, &line_put_ws->ws);
1775}
1776
1777int pblk_blk_erase_async(struct pblk *pblk, struct ppa_addr ppa)
1778{
1779        struct nvm_rq *rqd;
1780        int err;
1781
1782        rqd = pblk_alloc_rqd(pblk, PBLK_ERASE);
1783
1784        pblk_setup_e_rq(pblk, rqd, ppa);
1785
1786        rqd->end_io = pblk_end_io_erase;
1787        rqd->private = pblk;
1788
1789        trace_pblk_chunk_reset(pblk_disk_name(pblk),
1790                                &ppa, PBLK_CHUNK_RESET_START);
1791
1792        /* The write thread schedules erases so that it minimizes disturbances
1793         * with writes. Thus, there is no need to take the LUN semaphore.
1794         */
1795        err = pblk_submit_io(pblk, rqd);
1796        if (err) {
1797                struct nvm_tgt_dev *dev = pblk->dev;
1798                struct nvm_geo *geo = &dev->geo;
1799
1800                pblk_err(pblk, "could not async erase line:%d,blk:%d\n",
1801                                        pblk_ppa_to_line_id(ppa),
1802                                        pblk_ppa_to_pos(geo, ppa));
1803        }
1804
1805        return err;
1806}
1807
1808struct pblk_line *pblk_line_get_data(struct pblk *pblk)
1809{
1810        return pblk->l_mg.data_line;
1811}
1812
1813/* For now, always erase next line */
1814struct pblk_line *pblk_line_get_erase(struct pblk *pblk)
1815{
1816        return pblk->l_mg.data_next;
1817}
1818
1819int pblk_line_is_full(struct pblk_line *line)
1820{
1821        return (line->left_msecs == 0);
1822}
1823
1824static void pblk_line_should_sync_meta(struct pblk *pblk)
1825{
1826        if (pblk_rl_is_limit(&pblk->rl))
1827                pblk_line_close_meta_sync(pblk);
1828}
1829
1830void pblk_line_close(struct pblk *pblk, struct pblk_line *line)
1831{
1832        struct nvm_tgt_dev *dev = pblk->dev;
1833        struct nvm_geo *geo = &dev->geo;
1834        struct pblk_line_meta *lm = &pblk->lm;
1835        struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1836        struct list_head *move_list;
1837        int i;
1838
1839#ifdef CONFIG_NVM_PBLK_DEBUG
1840        WARN(!bitmap_full(line->map_bitmap, lm->sec_per_line),
1841                                "pblk: corrupt closed line %d\n", line->id);
1842#endif
1843
1844        spin_lock(&l_mg->free_lock);
1845        WARN_ON(!test_and_clear_bit(line->meta_line, &l_mg->meta_bitmap));
1846        spin_unlock(&l_mg->free_lock);
1847
1848        spin_lock(&l_mg->gc_lock);
1849        spin_lock(&line->lock);
1850        WARN_ON(line->state != PBLK_LINESTATE_OPEN);
1851        line->state = PBLK_LINESTATE_CLOSED;
1852        move_list = pblk_line_gc_list(pblk, line);
1853        list_add_tail(&line->list, move_list);
1854
1855        mempool_free(line->map_bitmap, l_mg->bitmap_pool);
1856        line->map_bitmap = NULL;
1857        line->smeta = NULL;
1858        line->emeta = NULL;
1859
1860        for (i = 0; i < lm->blk_per_line; i++) {
1861                struct pblk_lun *rlun = &pblk->luns[i];
1862                int pos = pblk_ppa_to_pos(geo, rlun->bppa);
1863                int state = line->chks[pos].state;
1864
1865                if (!(state & NVM_CHK_ST_OFFLINE))
1866                        state = NVM_CHK_ST_CLOSED;
1867        }
1868
1869        spin_unlock(&line->lock);
1870        spin_unlock(&l_mg->gc_lock);
1871
1872        trace_pblk_line_state(pblk_disk_name(pblk), line->id,
1873                                        line->state);
1874}
1875
1876void pblk_line_close_meta(struct pblk *pblk, struct pblk_line *line)
1877{
1878        struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1879        struct pblk_line_meta *lm = &pblk->lm;
1880        struct pblk_emeta *emeta = line->emeta;
1881        struct line_emeta *emeta_buf = emeta->buf;
1882        struct wa_counters *wa = emeta_to_wa(lm, emeta_buf);
1883
1884        /* No need for exact vsc value; avoid a big line lock and take aprox. */
1885        memcpy(emeta_to_vsc(pblk, emeta_buf), l_mg->vsc_list, lm->vsc_list_len);
1886        memcpy(emeta_to_bb(emeta_buf), line->blk_bitmap, lm->blk_bitmap_len);
1887
1888        wa->user = cpu_to_le64(atomic64_read(&pblk->user_wa));
1889        wa->pad = cpu_to_le64(atomic64_read(&pblk->pad_wa));
1890        wa->gc = cpu_to_le64(atomic64_read(&pblk->gc_wa));
1891
1892        if (le32_to_cpu(emeta_buf->header.identifier) != PBLK_MAGIC) {
1893                emeta_buf->header.identifier = cpu_to_le32(PBLK_MAGIC);
1894                guid_copy((guid_t *)&emeta_buf->header.uuid,
1895                                                        &pblk->instance_uuid);
1896                emeta_buf->header.id = cpu_to_le32(line->id);
1897                emeta_buf->header.type = cpu_to_le16(line->type);
1898                emeta_buf->header.version_major = EMETA_VERSION_MAJOR;
1899                emeta_buf->header.version_minor = EMETA_VERSION_MINOR;
1900                emeta_buf->header.crc = cpu_to_le32(
1901                        pblk_calc_meta_header_crc(pblk, &emeta_buf->header));
1902        }
1903
1904        emeta_buf->nr_valid_lbas = cpu_to_le64(line->nr_valid_lbas);
1905        emeta_buf->crc = cpu_to_le32(pblk_calc_emeta_crc(pblk, emeta_buf));
1906
1907        spin_lock(&l_mg->close_lock);
1908        spin_lock(&line->lock);
1909
1910        /* Update the in-memory start address for emeta, in case it has
1911         * shifted due to write errors
1912         */
1913        if (line->emeta_ssec != line->cur_sec)
1914                line->emeta_ssec = line->cur_sec;
1915
1916        list_add_tail(&line->list, &l_mg->emeta_list);
1917        spin_unlock(&line->lock);
1918        spin_unlock(&l_mg->close_lock);
1919
1920        pblk_line_should_sync_meta(pblk);
1921}
1922
1923static void pblk_save_lba_list(struct pblk *pblk, struct pblk_line *line)
1924{
1925        struct pblk_line_meta *lm = &pblk->lm;
1926        struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1927        unsigned int lba_list_size = lm->emeta_len[2];
1928        struct pblk_w_err_gc *w_err_gc = line->w_err_gc;
1929        struct pblk_emeta *emeta = line->emeta;
1930
1931        w_err_gc->lba_list = pblk_malloc(lba_list_size,
1932                                         l_mg->emeta_alloc_type, GFP_KERNEL);
1933        memcpy(w_err_gc->lba_list, emeta_to_lbas(pblk, emeta->buf),
1934                                lba_list_size);
1935}
1936
1937void pblk_line_close_ws(struct work_struct *work)
1938{
1939        struct pblk_line_ws *line_ws = container_of(work, struct pblk_line_ws,
1940                                                                        ws);
1941        struct pblk *pblk = line_ws->pblk;
1942        struct pblk_line *line = line_ws->line;
1943        struct pblk_w_err_gc *w_err_gc = line->w_err_gc;
1944
1945        /* Write errors makes the emeta start address stored in smeta invalid,
1946         * so keep a copy of the lba list until we've gc'd the line
1947         */
1948        if (w_err_gc->has_write_err)
1949                pblk_save_lba_list(pblk, line);
1950
1951        pblk_line_close(pblk, line);
1952        mempool_free(line_ws, &pblk->gen_ws_pool);
1953}
1954
1955void pblk_gen_run_ws(struct pblk *pblk, struct pblk_line *line, void *priv,
1956                      void (*work)(struct work_struct *), gfp_t gfp_mask,
1957                      struct workqueue_struct *wq)
1958{
1959        struct pblk_line_ws *line_ws;
1960
1961        line_ws = mempool_alloc(&pblk->gen_ws_pool, gfp_mask);
1962
1963        line_ws->pblk = pblk;
1964        line_ws->line = line;
1965        line_ws->priv = priv;
1966
1967        INIT_WORK(&line_ws->ws, work);
1968        queue_work(wq, &line_ws->ws);
1969}
1970
1971static void __pblk_down_chunk(struct pblk *pblk, int pos)
1972{
1973        struct pblk_lun *rlun = &pblk->luns[pos];
1974        int ret;
1975
1976        /*
1977         * Only send one inflight I/O per LUN. Since we map at a page
1978         * granurality, all ppas in the I/O will map to the same LUN
1979         */
1980
1981        ret = down_timeout(&rlun->wr_sem, msecs_to_jiffies(30000));
1982        if (ret == -ETIME || ret == -EINTR)
1983                pblk_err(pblk, "taking lun semaphore timed out: err %d\n",
1984                                -ret);
1985}
1986
1987void pblk_down_chunk(struct pblk *pblk, struct ppa_addr ppa)
1988{
1989        struct nvm_tgt_dev *dev = pblk->dev;
1990        struct nvm_geo *geo = &dev->geo;
1991        int pos = pblk_ppa_to_pos(geo, ppa);
1992
1993        __pblk_down_chunk(pblk, pos);
1994}
1995
1996void pblk_down_rq(struct pblk *pblk, struct ppa_addr ppa,
1997                  unsigned long *lun_bitmap)
1998{
1999        struct nvm_tgt_dev *dev = pblk->dev;
2000        struct nvm_geo *geo = &dev->geo;
2001        int pos = pblk_ppa_to_pos(geo, ppa);
2002
2003        /* If the LUN has been locked for this same request, do no attempt to
2004         * lock it again
2005         */
2006        if (test_and_set_bit(pos, lun_bitmap))
2007                return;
2008
2009        __pblk_down_chunk(pblk, pos);
2010}
2011
2012void pblk_up_chunk(struct pblk *pblk, struct ppa_addr ppa)
2013{
2014        struct nvm_tgt_dev *dev = pblk->dev;
2015        struct nvm_geo *geo = &dev->geo;
2016        struct pblk_lun *rlun;
2017        int pos = pblk_ppa_to_pos(geo, ppa);
2018
2019        rlun = &pblk->luns[pos];
2020        up(&rlun->wr_sem);
2021}
2022
2023void pblk_up_rq(struct pblk *pblk, unsigned long *lun_bitmap)
2024{
2025        struct nvm_tgt_dev *dev = pblk->dev;
2026        struct nvm_geo *geo = &dev->geo;
2027        struct pblk_lun *rlun;
2028        int num_lun = geo->all_luns;
2029        int bit = -1;
2030
2031        while ((bit = find_next_bit(lun_bitmap, num_lun, bit + 1)) < num_lun) {
2032                rlun = &pblk->luns[bit];
2033                up(&rlun->wr_sem);
2034        }
2035}
2036
2037void pblk_update_map(struct pblk *pblk, sector_t lba, struct ppa_addr ppa)
2038{
2039        struct ppa_addr ppa_l2p;
2040
2041        /* logic error: lba out-of-bounds. Ignore update */
2042        if (!(lba < pblk->capacity)) {
2043                WARN(1, "pblk: corrupted L2P map request\n");
2044                return;
2045        }
2046
2047        spin_lock(&pblk->trans_lock);
2048        ppa_l2p = pblk_trans_map_get(pblk, lba);
2049
2050        if (!pblk_addr_in_cache(ppa_l2p) && !pblk_ppa_empty(ppa_l2p))
2051                pblk_map_invalidate(pblk, ppa_l2p);
2052
2053        pblk_trans_map_set(pblk, lba, ppa);
2054        spin_unlock(&pblk->trans_lock);
2055}
2056
2057void pblk_update_map_cache(struct pblk *pblk, sector_t lba, struct ppa_addr ppa)
2058{
2059
2060#ifdef CONFIG_NVM_PBLK_DEBUG
2061        /* Callers must ensure that the ppa points to a cache address */
2062        BUG_ON(!pblk_addr_in_cache(ppa));
2063        BUG_ON(pblk_rb_pos_oob(&pblk->rwb, pblk_addr_to_cacheline(ppa)));
2064#endif
2065
2066        pblk_update_map(pblk, lba, ppa);
2067}
2068
2069int pblk_update_map_gc(struct pblk *pblk, sector_t lba, struct ppa_addr ppa_new,
2070                       struct pblk_line *gc_line, u64 paddr_gc)
2071{
2072        struct ppa_addr ppa_l2p, ppa_gc;
2073        int ret = 1;
2074
2075#ifdef CONFIG_NVM_PBLK_DEBUG
2076        /* Callers must ensure that the ppa points to a cache address */
2077        BUG_ON(!pblk_addr_in_cache(ppa_new));
2078        BUG_ON(pblk_rb_pos_oob(&pblk->rwb, pblk_addr_to_cacheline(ppa_new)));
2079#endif
2080
2081        /* logic error: lba out-of-bounds. Ignore update */
2082        if (!(lba < pblk->capacity)) {
2083                WARN(1, "pblk: corrupted L2P map request\n");
2084                return 0;
2085        }
2086
2087        spin_lock(&pblk->trans_lock);
2088        ppa_l2p = pblk_trans_map_get(pblk, lba);
2089        ppa_gc = addr_to_gen_ppa(pblk, paddr_gc, gc_line->id);
2090
2091        if (!pblk_ppa_comp(ppa_l2p, ppa_gc)) {
2092                spin_lock(&gc_line->lock);
2093                WARN(!test_bit(paddr_gc, gc_line->invalid_bitmap),
2094                                                "pblk: corrupted GC update");
2095                spin_unlock(&gc_line->lock);
2096
2097                ret = 0;
2098                goto out;
2099        }
2100
2101        pblk_trans_map_set(pblk, lba, ppa_new);
2102out:
2103        spin_unlock(&pblk->trans_lock);
2104        return ret;
2105}
2106
2107void pblk_update_map_dev(struct pblk *pblk, sector_t lba,
2108                         struct ppa_addr ppa_mapped, struct ppa_addr ppa_cache)
2109{
2110        struct ppa_addr ppa_l2p;
2111
2112#ifdef CONFIG_NVM_PBLK_DEBUG
2113        /* Callers must ensure that the ppa points to a device address */
2114        BUG_ON(pblk_addr_in_cache(ppa_mapped));
2115#endif
2116        /* Invalidate and discard padded entries */
2117        if (lba == ADDR_EMPTY) {
2118                atomic64_inc(&pblk->pad_wa);
2119#ifdef CONFIG_NVM_PBLK_DEBUG
2120                atomic_long_inc(&pblk->padded_wb);
2121#endif
2122                if (!pblk_ppa_empty(ppa_mapped))
2123                        pblk_map_invalidate(pblk, ppa_mapped);
2124                return;
2125        }
2126
2127        /* logic error: lba out-of-bounds. Ignore update */
2128        if (!(lba < pblk->capacity)) {
2129                WARN(1, "pblk: corrupted L2P map request\n");
2130                return;
2131        }
2132
2133        spin_lock(&pblk->trans_lock);
2134        ppa_l2p = pblk_trans_map_get(pblk, lba);
2135
2136        /* Do not update L2P if the cacheline has been updated. In this case,
2137         * the mapped ppa must be invalidated
2138         */
2139        if (!pblk_ppa_comp(ppa_l2p, ppa_cache)) {
2140                if (!pblk_ppa_empty(ppa_mapped))
2141                        pblk_map_invalidate(pblk, ppa_mapped);
2142                goto out;
2143        }
2144
2145#ifdef CONFIG_NVM_PBLK_DEBUG
2146        WARN_ON(!pblk_addr_in_cache(ppa_l2p) && !pblk_ppa_empty(ppa_l2p));
2147#endif
2148
2149        pblk_trans_map_set(pblk, lba, ppa_mapped);
2150out:
2151        spin_unlock(&pblk->trans_lock);
2152}
2153
2154int pblk_lookup_l2p_seq(struct pblk *pblk, struct ppa_addr *ppas,
2155                         sector_t blba, int nr_secs, bool *from_cache)
2156{
2157        int i;
2158
2159        spin_lock(&pblk->trans_lock);
2160        for (i = 0; i < nr_secs; i++) {
2161                struct ppa_addr ppa;
2162
2163                ppa = ppas[i] = pblk_trans_map_get(pblk, blba + i);
2164
2165                /* If the L2P entry maps to a line, the reference is valid */
2166                if (!pblk_ppa_empty(ppa) && !pblk_addr_in_cache(ppa)) {
2167                        struct pblk_line *line = pblk_ppa_to_line(pblk, ppa);
2168
2169                        if (i > 0 && *from_cache)
2170                                break;
2171                        *from_cache = false;
2172
2173                        kref_get(&line->ref);
2174                } else {
2175                        if (i > 0 && !*from_cache)
2176                                break;
2177                        *from_cache = true;
2178                }
2179        }
2180        spin_unlock(&pblk->trans_lock);
2181        return i;
2182}
2183
2184void pblk_lookup_l2p_rand(struct pblk *pblk, struct ppa_addr *ppas,
2185                          u64 *lba_list, int nr_secs)
2186{
2187        u64 lba;
2188        int i;
2189
2190        spin_lock(&pblk->trans_lock);
2191        for (i = 0; i < nr_secs; i++) {
2192                lba = lba_list[i];
2193                if (lba != ADDR_EMPTY) {
2194                        /* logic error: lba out-of-bounds. Ignore update */
2195                        if (!(lba < pblk->capacity)) {
2196                                WARN(1, "pblk: corrupted L2P map request\n");
2197                                continue;
2198                        }
2199                        ppas[i] = pblk_trans_map_get(pblk, lba);
2200                }
2201        }
2202        spin_unlock(&pblk->trans_lock);
2203}
2204
2205void *pblk_get_meta_for_writes(struct pblk *pblk, struct nvm_rq *rqd)
2206{
2207        void *buffer;
2208
2209        if (pblk_is_oob_meta_supported(pblk)) {
2210                /* Just use OOB metadata buffer as always */
2211                buffer = rqd->meta_list;
2212        } else {
2213                /* We need to reuse last page of request (packed metadata)
2214                 * in similar way as traditional oob metadata
2215                 */
2216                buffer = page_to_virt(
2217                        rqd->bio->bi_io_vec[rqd->bio->bi_vcnt - 1].bv_page);
2218        }
2219
2220        return buffer;
2221}
2222
2223void pblk_get_packed_meta(struct pblk *pblk, struct nvm_rq *rqd)
2224{
2225        void *meta_list = rqd->meta_list;
2226        void *page;
2227        int i = 0;
2228
2229        if (pblk_is_oob_meta_supported(pblk))
2230                return;
2231
2232        page = page_to_virt(rqd->bio->bi_io_vec[rqd->bio->bi_vcnt - 1].bv_page);
2233        /* We need to fill oob meta buffer with data from packed metadata */
2234        for (; i < rqd->nr_ppas; i++)
2235                memcpy(pblk_get_meta(pblk, meta_list, i),
2236                        page + (i * sizeof(struct pblk_sec_meta)),
2237                        sizeof(struct pblk_sec_meta));
2238}
2239