linux/drivers/lightnvm/rrpc.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2015 IT University of Copenhagen
   3 * Initial release: Matias Bjorling <m@bjorling.me>
   4 *
   5 * This program is free software; you can redistribute it and/or
   6 * modify it under the terms of the GNU General Public License version
   7 * 2 as published by the Free Software Foundation.
   8 *
   9 * This program is distributed in the hope that it will be useful, but
  10 * WITHOUT ANY WARRANTY; without even the implied warranty of
  11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  12 * General Public License for more details.
  13 *
  14 * Implementation of a Round-robin page-based Hybrid FTL for Open-channel SSDs.
  15 */
  16
  17#include "rrpc.h"
  18
  19static struct kmem_cache *rrpc_gcb_cache, *rrpc_rq_cache;
  20static DECLARE_RWSEM(rrpc_lock);
  21
  22static int rrpc_submit_io(struct rrpc *rrpc, struct bio *bio,
  23                                struct nvm_rq *rqd, unsigned long flags);
  24
  25#define rrpc_for_each_lun(rrpc, rlun, i) \
  26                for ((i) = 0, rlun = &(rrpc)->luns[0]; \
  27                        (i) < (rrpc)->nr_luns; (i)++, rlun = &(rrpc)->luns[(i)])
  28
  29static void rrpc_page_invalidate(struct rrpc *rrpc, struct rrpc_addr *a)
  30{
  31        struct rrpc_block *rblk = a->rblk;
  32        unsigned int pg_offset;
  33
  34        lockdep_assert_held(&rrpc->rev_lock);
  35
  36        if (a->addr == ADDR_EMPTY || !rblk)
  37                return;
  38
  39        spin_lock(&rblk->lock);
  40
  41        div_u64_rem(a->addr, rrpc->dev->sec_per_blk, &pg_offset);
  42        WARN_ON(test_and_set_bit(pg_offset, rblk->invalid_pages));
  43        rblk->nr_invalid_pages++;
  44
  45        spin_unlock(&rblk->lock);
  46
  47        rrpc->rev_trans_map[a->addr - rrpc->poffset].addr = ADDR_EMPTY;
  48}
  49
  50static void rrpc_invalidate_range(struct rrpc *rrpc, sector_t slba,
  51                                                        unsigned int len)
  52{
  53        sector_t i;
  54
  55        spin_lock(&rrpc->rev_lock);
  56        for (i = slba; i < slba + len; i++) {
  57                struct rrpc_addr *gp = &rrpc->trans_map[i];
  58
  59                rrpc_page_invalidate(rrpc, gp);
  60                gp->rblk = NULL;
  61        }
  62        spin_unlock(&rrpc->rev_lock);
  63}
  64
  65static struct nvm_rq *rrpc_inflight_laddr_acquire(struct rrpc *rrpc,
  66                                        sector_t laddr, unsigned int pages)
  67{
  68        struct nvm_rq *rqd;
  69        struct rrpc_inflight_rq *inf;
  70
  71        rqd = mempool_alloc(rrpc->rq_pool, GFP_ATOMIC);
  72        if (!rqd)
  73                return ERR_PTR(-ENOMEM);
  74
  75        inf = rrpc_get_inflight_rq(rqd);
  76        if (rrpc_lock_laddr(rrpc, laddr, pages, inf)) {
  77                mempool_free(rqd, rrpc->rq_pool);
  78                return NULL;
  79        }
  80
  81        return rqd;
  82}
  83
  84static void rrpc_inflight_laddr_release(struct rrpc *rrpc, struct nvm_rq *rqd)
  85{
  86        struct rrpc_inflight_rq *inf = rrpc_get_inflight_rq(rqd);
  87
  88        rrpc_unlock_laddr(rrpc, inf);
  89
  90        mempool_free(rqd, rrpc->rq_pool);
  91}
  92
  93static void rrpc_discard(struct rrpc *rrpc, struct bio *bio)
  94{
  95        sector_t slba = bio->bi_iter.bi_sector / NR_PHY_IN_LOG;
  96        sector_t len = bio->bi_iter.bi_size / RRPC_EXPOSED_PAGE_SIZE;
  97        struct nvm_rq *rqd;
  98
  99        while (1) {
 100                rqd = rrpc_inflight_laddr_acquire(rrpc, slba, len);
 101                if (rqd)
 102                        break;
 103
 104                schedule();
 105        }
 106
 107        if (IS_ERR(rqd)) {
 108                pr_err("rrpc: unable to acquire inflight IO\n");
 109                bio_io_error(bio);
 110                return;
 111        }
 112
 113        rrpc_invalidate_range(rrpc, slba, len);
 114        rrpc_inflight_laddr_release(rrpc, rqd);
 115}
 116
 117static int block_is_full(struct rrpc *rrpc, struct rrpc_block *rblk)
 118{
 119        return (rblk->next_page == rrpc->dev->sec_per_blk);
 120}
 121
 122/* Calculate relative addr for the given block, considering instantiated LUNs */
 123static u64 block_to_rel_addr(struct rrpc *rrpc, struct rrpc_block *rblk)
 124{
 125        struct nvm_block *blk = rblk->parent;
 126        int lun_blk = blk->id % (rrpc->dev->blks_per_lun * rrpc->nr_luns);
 127
 128        return lun_blk * rrpc->dev->sec_per_blk;
 129}
 130
 131/* Calculate global addr for the given block */
 132static u64 block_to_addr(struct rrpc *rrpc, struct rrpc_block *rblk)
 133{
 134        struct nvm_block *blk = rblk->parent;
 135
 136        return blk->id * rrpc->dev->sec_per_blk;
 137}
 138
 139static struct ppa_addr linear_to_generic_addr(struct nvm_dev *dev,
 140                                                        struct ppa_addr r)
 141{
 142        struct ppa_addr l;
 143        int secs, pgs, blks, luns;
 144        sector_t ppa = r.ppa;
 145
 146        l.ppa = 0;
 147
 148        div_u64_rem(ppa, dev->sec_per_pg, &secs);
 149        l.g.sec = secs;
 150
 151        sector_div(ppa, dev->sec_per_pg);
 152        div_u64_rem(ppa, dev->pgs_per_blk, &pgs);
 153        l.g.pg = pgs;
 154
 155        sector_div(ppa, dev->pgs_per_blk);
 156        div_u64_rem(ppa, dev->blks_per_lun, &blks);
 157        l.g.blk = blks;
 158
 159        sector_div(ppa, dev->blks_per_lun);
 160        div_u64_rem(ppa, dev->luns_per_chnl, &luns);
 161        l.g.lun = luns;
 162
 163        sector_div(ppa, dev->luns_per_chnl);
 164        l.g.ch = ppa;
 165
 166        return l;
 167}
 168
 169static struct ppa_addr rrpc_ppa_to_gaddr(struct nvm_dev *dev, u64 addr)
 170{
 171        struct ppa_addr paddr;
 172
 173        paddr.ppa = addr;
 174        return linear_to_generic_addr(dev, paddr);
 175}
 176
 177/* requires lun->lock taken */
 178static void rrpc_set_lun_cur(struct rrpc_lun *rlun, struct rrpc_block *new_rblk,
 179                                                struct rrpc_block **cur_rblk)
 180{
 181        struct rrpc *rrpc = rlun->rrpc;
 182
 183        if (*cur_rblk) {
 184                spin_lock(&(*cur_rblk)->lock);
 185                WARN_ON(!block_is_full(rrpc, *cur_rblk));
 186                spin_unlock(&(*cur_rblk)->lock);
 187        }
 188        *cur_rblk = new_rblk;
 189}
 190
 191static struct rrpc_block *rrpc_get_blk(struct rrpc *rrpc, struct rrpc_lun *rlun,
 192                                                        unsigned long flags)
 193{
 194        struct nvm_block *blk;
 195        struct rrpc_block *rblk;
 196
 197        blk = nvm_get_blk(rrpc->dev, rlun->parent, flags);
 198        if (!blk) {
 199                pr_err("nvm: rrpc: cannot get new block from media manager\n");
 200                return NULL;
 201        }
 202
 203        rblk = rrpc_get_rblk(rlun, blk->id);
 204        blk->priv = rblk;
 205        bitmap_zero(rblk->invalid_pages, rrpc->dev->sec_per_blk);
 206        rblk->next_page = 0;
 207        rblk->nr_invalid_pages = 0;
 208        atomic_set(&rblk->data_cmnt_size, 0);
 209
 210        return rblk;
 211}
 212
 213static void rrpc_put_blk(struct rrpc *rrpc, struct rrpc_block *rblk)
 214{
 215        nvm_put_blk(rrpc->dev, rblk->parent);
 216}
 217
 218static void rrpc_put_blks(struct rrpc *rrpc)
 219{
 220        struct rrpc_lun *rlun;
 221        int i;
 222
 223        for (i = 0; i < rrpc->nr_luns; i++) {
 224                rlun = &rrpc->luns[i];
 225                if (rlun->cur)
 226                        rrpc_put_blk(rrpc, rlun->cur);
 227                if (rlun->gc_cur)
 228                        rrpc_put_blk(rrpc, rlun->gc_cur);
 229        }
 230}
 231
 232static struct rrpc_lun *get_next_lun(struct rrpc *rrpc)
 233{
 234        int next = atomic_inc_return(&rrpc->next_lun);
 235
 236        return &rrpc->luns[next % rrpc->nr_luns];
 237}
 238
 239static void rrpc_gc_kick(struct rrpc *rrpc)
 240{
 241        struct rrpc_lun *rlun;
 242        unsigned int i;
 243
 244        for (i = 0; i < rrpc->nr_luns; i++) {
 245                rlun = &rrpc->luns[i];
 246                queue_work(rrpc->krqd_wq, &rlun->ws_gc);
 247        }
 248}
 249
 250/*
 251 * timed GC every interval.
 252 */
 253static void rrpc_gc_timer(unsigned long data)
 254{
 255        struct rrpc *rrpc = (struct rrpc *)data;
 256
 257        rrpc_gc_kick(rrpc);
 258        mod_timer(&rrpc->gc_timer, jiffies + msecs_to_jiffies(10));
 259}
 260
 261static void rrpc_end_sync_bio(struct bio *bio)
 262{
 263        struct completion *waiting = bio->bi_private;
 264
 265        if (bio->bi_error)
 266                pr_err("nvm: gc request failed (%u).\n", bio->bi_error);
 267
 268        complete(waiting);
 269}
 270
 271/*
 272 * rrpc_move_valid_pages -- migrate live data off the block
 273 * @rrpc: the 'rrpc' structure
 274 * @block: the block from which to migrate live pages
 275 *
 276 * Description:
 277 *   GC algorithms may call this function to migrate remaining live
 278 *   pages off the block prior to erasing it. This function blocks
 279 *   further execution until the operation is complete.
 280 */
 281static int rrpc_move_valid_pages(struct rrpc *rrpc, struct rrpc_block *rblk)
 282{
 283        struct request_queue *q = rrpc->dev->q;
 284        struct rrpc_rev_addr *rev;
 285        struct nvm_rq *rqd;
 286        struct bio *bio;
 287        struct page *page;
 288        int slot;
 289        int nr_sec_per_blk = rrpc->dev->sec_per_blk;
 290        u64 phys_addr;
 291        DECLARE_COMPLETION_ONSTACK(wait);
 292
 293        if (bitmap_full(rblk->invalid_pages, nr_sec_per_blk))
 294                return 0;
 295
 296        bio = bio_alloc(GFP_NOIO, 1);
 297        if (!bio) {
 298                pr_err("nvm: could not alloc bio to gc\n");
 299                return -ENOMEM;
 300        }
 301
 302        page = mempool_alloc(rrpc->page_pool, GFP_NOIO);
 303        if (!page) {
 304                bio_put(bio);
 305                return -ENOMEM;
 306        }
 307
 308        while ((slot = find_first_zero_bit(rblk->invalid_pages,
 309                                            nr_sec_per_blk)) < nr_sec_per_blk) {
 310
 311                /* Lock laddr */
 312                phys_addr = rblk->parent->id * nr_sec_per_blk + slot;
 313
 314try:
 315                spin_lock(&rrpc->rev_lock);
 316                /* Get logical address from physical to logical table */
 317                rev = &rrpc->rev_trans_map[phys_addr - rrpc->poffset];
 318                /* already updated by previous regular write */
 319                if (rev->addr == ADDR_EMPTY) {
 320                        spin_unlock(&rrpc->rev_lock);
 321                        continue;
 322                }
 323
 324                rqd = rrpc_inflight_laddr_acquire(rrpc, rev->addr, 1);
 325                if (IS_ERR_OR_NULL(rqd)) {
 326                        spin_unlock(&rrpc->rev_lock);
 327                        schedule();
 328                        goto try;
 329                }
 330
 331                spin_unlock(&rrpc->rev_lock);
 332
 333                /* Perform read to do GC */
 334                bio->bi_iter.bi_sector = rrpc_get_sector(rev->addr);
 335                bio_set_op_attrs(bio,  REQ_OP_READ, 0);
 336                bio->bi_private = &wait;
 337                bio->bi_end_io = rrpc_end_sync_bio;
 338
 339                /* TODO: may fail when EXP_PG_SIZE > PAGE_SIZE */
 340                bio_add_pc_page(q, bio, page, RRPC_EXPOSED_PAGE_SIZE, 0);
 341
 342                if (rrpc_submit_io(rrpc, bio, rqd, NVM_IOTYPE_GC)) {
 343                        pr_err("rrpc: gc read failed.\n");
 344                        rrpc_inflight_laddr_release(rrpc, rqd);
 345                        goto finished;
 346                }
 347                wait_for_completion_io(&wait);
 348                if (bio->bi_error) {
 349                        rrpc_inflight_laddr_release(rrpc, rqd);
 350                        goto finished;
 351                }
 352
 353                bio_reset(bio);
 354                reinit_completion(&wait);
 355
 356                bio->bi_iter.bi_sector = rrpc_get_sector(rev->addr);
 357                bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
 358                bio->bi_private = &wait;
 359                bio->bi_end_io = rrpc_end_sync_bio;
 360
 361                bio_add_pc_page(q, bio, page, RRPC_EXPOSED_PAGE_SIZE, 0);
 362
 363                /* turn the command around and write the data back to a new
 364                 * address
 365                 */
 366                if (rrpc_submit_io(rrpc, bio, rqd, NVM_IOTYPE_GC)) {
 367                        pr_err("rrpc: gc write failed.\n");
 368                        rrpc_inflight_laddr_release(rrpc, rqd);
 369                        goto finished;
 370                }
 371                wait_for_completion_io(&wait);
 372
 373                rrpc_inflight_laddr_release(rrpc, rqd);
 374                if (bio->bi_error)
 375                        goto finished;
 376
 377                bio_reset(bio);
 378        }
 379
 380finished:
 381        mempool_free(page, rrpc->page_pool);
 382        bio_put(bio);
 383
 384        if (!bitmap_full(rblk->invalid_pages, nr_sec_per_blk)) {
 385                pr_err("nvm: failed to garbage collect block\n");
 386                return -EIO;
 387        }
 388
 389        return 0;
 390}
 391
 392static void rrpc_block_gc(struct work_struct *work)
 393{
 394        struct rrpc_block_gc *gcb = container_of(work, struct rrpc_block_gc,
 395                                                                        ws_gc);
 396        struct rrpc *rrpc = gcb->rrpc;
 397        struct rrpc_block *rblk = gcb->rblk;
 398        struct rrpc_lun *rlun = rblk->rlun;
 399        struct nvm_dev *dev = rrpc->dev;
 400
 401        mempool_free(gcb, rrpc->gcb_pool);
 402        pr_debug("nvm: block '%lu' being reclaimed\n", rblk->parent->id);
 403
 404        if (rrpc_move_valid_pages(rrpc, rblk))
 405                goto put_back;
 406
 407        if (nvm_erase_blk(dev, rblk->parent))
 408                goto put_back;
 409
 410        rrpc_put_blk(rrpc, rblk);
 411
 412        return;
 413
 414put_back:
 415        spin_lock(&rlun->lock);
 416        list_add_tail(&rblk->prio, &rlun->prio_list);
 417        spin_unlock(&rlun->lock);
 418}
 419
 420/* the block with highest number of invalid pages, will be in the beginning
 421 * of the list
 422 */
 423static struct rrpc_block *rblock_max_invalid(struct rrpc_block *ra,
 424                                                        struct rrpc_block *rb)
 425{
 426        if (ra->nr_invalid_pages == rb->nr_invalid_pages)
 427                return ra;
 428
 429        return (ra->nr_invalid_pages < rb->nr_invalid_pages) ? rb : ra;
 430}
 431
 432/* linearly find the block with highest number of invalid pages
 433 * requires lun->lock
 434 */
 435static struct rrpc_block *block_prio_find_max(struct rrpc_lun *rlun)
 436{
 437        struct list_head *prio_list = &rlun->prio_list;
 438        struct rrpc_block *rblock, *max;
 439
 440        BUG_ON(list_empty(prio_list));
 441
 442        max = list_first_entry(prio_list, struct rrpc_block, prio);
 443        list_for_each_entry(rblock, prio_list, prio)
 444                max = rblock_max_invalid(max, rblock);
 445
 446        return max;
 447}
 448
 449static void rrpc_lun_gc(struct work_struct *work)
 450{
 451        struct rrpc_lun *rlun = container_of(work, struct rrpc_lun, ws_gc);
 452        struct rrpc *rrpc = rlun->rrpc;
 453        struct nvm_lun *lun = rlun->parent;
 454        struct rrpc_block_gc *gcb;
 455        unsigned int nr_blocks_need;
 456
 457        nr_blocks_need = rrpc->dev->blks_per_lun / GC_LIMIT_INVERSE;
 458
 459        if (nr_blocks_need < rrpc->nr_luns)
 460                nr_blocks_need = rrpc->nr_luns;
 461
 462        spin_lock(&rlun->lock);
 463        while (nr_blocks_need > lun->nr_free_blocks &&
 464                                        !list_empty(&rlun->prio_list)) {
 465                struct rrpc_block *rblock = block_prio_find_max(rlun);
 466                struct nvm_block *block = rblock->parent;
 467
 468                if (!rblock->nr_invalid_pages)
 469                        break;
 470
 471                gcb = mempool_alloc(rrpc->gcb_pool, GFP_ATOMIC);
 472                if (!gcb)
 473                        break;
 474
 475                list_del_init(&rblock->prio);
 476
 477                BUG_ON(!block_is_full(rrpc, rblock));
 478
 479                pr_debug("rrpc: selected block '%lu' for GC\n", block->id);
 480
 481                gcb->rrpc = rrpc;
 482                gcb->rblk = rblock;
 483                INIT_WORK(&gcb->ws_gc, rrpc_block_gc);
 484
 485                queue_work(rrpc->kgc_wq, &gcb->ws_gc);
 486
 487                nr_blocks_need--;
 488        }
 489        spin_unlock(&rlun->lock);
 490
 491        /* TODO: Hint that request queue can be started again */
 492}
 493
 494static void rrpc_gc_queue(struct work_struct *work)
 495{
 496        struct rrpc_block_gc *gcb = container_of(work, struct rrpc_block_gc,
 497                                                                        ws_gc);
 498        struct rrpc *rrpc = gcb->rrpc;
 499        struct rrpc_block *rblk = gcb->rblk;
 500        struct rrpc_lun *rlun = rblk->rlun;
 501
 502        spin_lock(&rlun->lock);
 503        list_add_tail(&rblk->prio, &rlun->prio_list);
 504        spin_unlock(&rlun->lock);
 505
 506        mempool_free(gcb, rrpc->gcb_pool);
 507        pr_debug("nvm: block '%lu' is full, allow GC (sched)\n",
 508                                                        rblk->parent->id);
 509}
 510
 511static const struct block_device_operations rrpc_fops = {
 512        .owner          = THIS_MODULE,
 513};
 514
 515static struct rrpc_lun *rrpc_get_lun_rr(struct rrpc *rrpc, int is_gc)
 516{
 517        unsigned int i;
 518        struct rrpc_lun *rlun, *max_free;
 519
 520        if (!is_gc)
 521                return get_next_lun(rrpc);
 522
 523        /* during GC, we don't care about RR, instead we want to make
 524         * sure that we maintain evenness between the block luns.
 525         */
 526        max_free = &rrpc->luns[0];
 527        /* prevent GC-ing lun from devouring pages of a lun with
 528         * little free blocks. We don't take the lock as we only need an
 529         * estimate.
 530         */
 531        rrpc_for_each_lun(rrpc, rlun, i) {
 532                if (rlun->parent->nr_free_blocks >
 533                                        max_free->parent->nr_free_blocks)
 534                        max_free = rlun;
 535        }
 536
 537        return max_free;
 538}
 539
 540static struct rrpc_addr *rrpc_update_map(struct rrpc *rrpc, sector_t laddr,
 541                                        struct rrpc_block *rblk, u64 paddr)
 542{
 543        struct rrpc_addr *gp;
 544        struct rrpc_rev_addr *rev;
 545
 546        BUG_ON(laddr >= rrpc->nr_sects);
 547
 548        gp = &rrpc->trans_map[laddr];
 549        spin_lock(&rrpc->rev_lock);
 550        if (gp->rblk)
 551                rrpc_page_invalidate(rrpc, gp);
 552
 553        gp->addr = paddr;
 554        gp->rblk = rblk;
 555
 556        rev = &rrpc->rev_trans_map[gp->addr - rrpc->poffset];
 557        rev->addr = laddr;
 558        spin_unlock(&rrpc->rev_lock);
 559
 560        return gp;
 561}
 562
 563static u64 rrpc_alloc_addr(struct rrpc *rrpc, struct rrpc_block *rblk)
 564{
 565        u64 addr = ADDR_EMPTY;
 566
 567        spin_lock(&rblk->lock);
 568        if (block_is_full(rrpc, rblk))
 569                goto out;
 570
 571        addr = block_to_addr(rrpc, rblk) + rblk->next_page;
 572
 573        rblk->next_page++;
 574out:
 575        spin_unlock(&rblk->lock);
 576        return addr;
 577}
 578
 579/* Map logical address to a physical page. The mapping implements a round robin
 580 * approach and allocates a page from the next lun available.
 581 *
 582 * Returns rrpc_addr with the physical address and block. Returns NULL if no
 583 * blocks in the next rlun are available.
 584 */
 585static struct rrpc_addr *rrpc_map_page(struct rrpc *rrpc, sector_t laddr,
 586                                                                int is_gc)
 587{
 588        struct rrpc_lun *rlun;
 589        struct rrpc_block *rblk, **cur_rblk;
 590        struct nvm_lun *lun;
 591        u64 paddr;
 592        int gc_force = 0;
 593
 594        rlun = rrpc_get_lun_rr(rrpc, is_gc);
 595        lun = rlun->parent;
 596
 597        if (!is_gc && lun->nr_free_blocks < rrpc->nr_luns * 4)
 598                return NULL;
 599
 600        /*
 601         * page allocation steps:
 602         * 1. Try to allocate new page from current rblk
 603         * 2a. If succeed, proceed to map it in and return
 604         * 2b. If fail, first try to allocate a new block from media manger,
 605         *     and then retry step 1. Retry until the normal block pool is
 606         *     exhausted.
 607         * 3. If exhausted, and garbage collector is requesting the block,
 608         *    go to the reserved block and retry step 1.
 609         *    In the case that this fails as well, or it is not GC
 610         *    requesting, report not able to retrieve a block and let the
 611         *    caller handle further processing.
 612         */
 613
 614        spin_lock(&rlun->lock);
 615        cur_rblk = &rlun->cur;
 616        rblk = rlun->cur;
 617retry:
 618        paddr = rrpc_alloc_addr(rrpc, rblk);
 619
 620        if (paddr != ADDR_EMPTY)
 621                goto done;
 622
 623        if (!list_empty(&rlun->wblk_list)) {
 624new_blk:
 625                rblk = list_first_entry(&rlun->wblk_list, struct rrpc_block,
 626                                                                        prio);
 627                rrpc_set_lun_cur(rlun, rblk, cur_rblk);
 628                list_del(&rblk->prio);
 629                goto retry;
 630        }
 631        spin_unlock(&rlun->lock);
 632
 633        rblk = rrpc_get_blk(rrpc, rlun, gc_force);
 634        if (rblk) {
 635                spin_lock(&rlun->lock);
 636                list_add_tail(&rblk->prio, &rlun->wblk_list);
 637                /*
 638                 * another thread might already have added a new block,
 639                 * Therefore, make sure that one is used, instead of the
 640                 * one just added.
 641                 */
 642                goto new_blk;
 643        }
 644
 645        if (unlikely(is_gc) && !gc_force) {
 646                /* retry from emergency gc block */
 647                cur_rblk = &rlun->gc_cur;
 648                rblk = rlun->gc_cur;
 649                gc_force = 1;
 650                spin_lock(&rlun->lock);
 651                goto retry;
 652        }
 653
 654        pr_err("rrpc: failed to allocate new block\n");
 655        return NULL;
 656done:
 657        spin_unlock(&rlun->lock);
 658        return rrpc_update_map(rrpc, laddr, rblk, paddr);
 659}
 660
 661static void rrpc_run_gc(struct rrpc *rrpc, struct rrpc_block *rblk)
 662{
 663        struct rrpc_block_gc *gcb;
 664
 665        gcb = mempool_alloc(rrpc->gcb_pool, GFP_ATOMIC);
 666        if (!gcb) {
 667                pr_err("rrpc: unable to queue block for gc.");
 668                return;
 669        }
 670
 671        gcb->rrpc = rrpc;
 672        gcb->rblk = rblk;
 673
 674        INIT_WORK(&gcb->ws_gc, rrpc_gc_queue);
 675        queue_work(rrpc->kgc_wq, &gcb->ws_gc);
 676}
 677
 678static void rrpc_end_io_write(struct rrpc *rrpc, struct rrpc_rq *rrqd,
 679                                                sector_t laddr, uint8_t npages)
 680{
 681        struct rrpc_addr *p;
 682        struct rrpc_block *rblk;
 683        struct nvm_lun *lun;
 684        int cmnt_size, i;
 685
 686        for (i = 0; i < npages; i++) {
 687                p = &rrpc->trans_map[laddr + i];
 688                rblk = p->rblk;
 689                lun = rblk->parent->lun;
 690
 691                cmnt_size = atomic_inc_return(&rblk->data_cmnt_size);
 692                if (unlikely(cmnt_size == rrpc->dev->sec_per_blk))
 693                        rrpc_run_gc(rrpc, rblk);
 694        }
 695}
 696
 697static void rrpc_end_io(struct nvm_rq *rqd)
 698{
 699        struct rrpc *rrpc = container_of(rqd->ins, struct rrpc, instance);
 700        struct rrpc_rq *rrqd = nvm_rq_to_pdu(rqd);
 701        uint8_t npages = rqd->nr_ppas;
 702        sector_t laddr = rrpc_get_laddr(rqd->bio) - npages;
 703
 704        if (bio_data_dir(rqd->bio) == WRITE)
 705                rrpc_end_io_write(rrpc, rrqd, laddr, npages);
 706
 707        bio_put(rqd->bio);
 708
 709        if (rrqd->flags & NVM_IOTYPE_GC)
 710                return;
 711
 712        rrpc_unlock_rq(rrpc, rqd);
 713
 714        if (npages > 1)
 715                nvm_dev_dma_free(rrpc->dev, rqd->ppa_list, rqd->dma_ppa_list);
 716
 717        mempool_free(rqd, rrpc->rq_pool);
 718}
 719
 720static int rrpc_read_ppalist_rq(struct rrpc *rrpc, struct bio *bio,
 721                        struct nvm_rq *rqd, unsigned long flags, int npages)
 722{
 723        struct rrpc_inflight_rq *r = rrpc_get_inflight_rq(rqd);
 724        struct rrpc_addr *gp;
 725        sector_t laddr = rrpc_get_laddr(bio);
 726        int is_gc = flags & NVM_IOTYPE_GC;
 727        int i;
 728
 729        if (!is_gc && rrpc_lock_rq(rrpc, bio, rqd)) {
 730                nvm_dev_dma_free(rrpc->dev, rqd->ppa_list, rqd->dma_ppa_list);
 731                return NVM_IO_REQUEUE;
 732        }
 733
 734        for (i = 0; i < npages; i++) {
 735                /* We assume that mapping occurs at 4KB granularity */
 736                BUG_ON(!(laddr + i >= 0 && laddr + i < rrpc->nr_sects));
 737                gp = &rrpc->trans_map[laddr + i];
 738
 739                if (gp->rblk) {
 740                        rqd->ppa_list[i] = rrpc_ppa_to_gaddr(rrpc->dev,
 741                                                                gp->addr);
 742                } else {
 743                        BUG_ON(is_gc);
 744                        rrpc_unlock_laddr(rrpc, r);
 745                        nvm_dev_dma_free(rrpc->dev, rqd->ppa_list,
 746                                                        rqd->dma_ppa_list);
 747                        return NVM_IO_DONE;
 748                }
 749        }
 750
 751        rqd->opcode = NVM_OP_HBREAD;
 752
 753        return NVM_IO_OK;
 754}
 755
 756static int rrpc_read_rq(struct rrpc *rrpc, struct bio *bio, struct nvm_rq *rqd,
 757                                                        unsigned long flags)
 758{
 759        struct rrpc_rq *rrqd = nvm_rq_to_pdu(rqd);
 760        int is_gc = flags & NVM_IOTYPE_GC;
 761        sector_t laddr = rrpc_get_laddr(bio);
 762        struct rrpc_addr *gp;
 763
 764        if (!is_gc && rrpc_lock_rq(rrpc, bio, rqd))
 765                return NVM_IO_REQUEUE;
 766
 767        BUG_ON(!(laddr >= 0 && laddr < rrpc->nr_sects));
 768        gp = &rrpc->trans_map[laddr];
 769
 770        if (gp->rblk) {
 771                rqd->ppa_addr = rrpc_ppa_to_gaddr(rrpc->dev, gp->addr);
 772        } else {
 773                BUG_ON(is_gc);
 774                rrpc_unlock_rq(rrpc, rqd);
 775                return NVM_IO_DONE;
 776        }
 777
 778        rqd->opcode = NVM_OP_HBREAD;
 779        rrqd->addr = gp;
 780
 781        return NVM_IO_OK;
 782}
 783
 784static int rrpc_write_ppalist_rq(struct rrpc *rrpc, struct bio *bio,
 785                        struct nvm_rq *rqd, unsigned long flags, int npages)
 786{
 787        struct rrpc_inflight_rq *r = rrpc_get_inflight_rq(rqd);
 788        struct rrpc_addr *p;
 789        sector_t laddr = rrpc_get_laddr(bio);
 790        int is_gc = flags & NVM_IOTYPE_GC;
 791        int i;
 792
 793        if (!is_gc && rrpc_lock_rq(rrpc, bio, rqd)) {
 794                nvm_dev_dma_free(rrpc->dev, rqd->ppa_list, rqd->dma_ppa_list);
 795                return NVM_IO_REQUEUE;
 796        }
 797
 798        for (i = 0; i < npages; i++) {
 799                /* We assume that mapping occurs at 4KB granularity */
 800                p = rrpc_map_page(rrpc, laddr + i, is_gc);
 801                if (!p) {
 802                        BUG_ON(is_gc);
 803                        rrpc_unlock_laddr(rrpc, r);
 804                        nvm_dev_dma_free(rrpc->dev, rqd->ppa_list,
 805                                                        rqd->dma_ppa_list);
 806                        rrpc_gc_kick(rrpc);
 807                        return NVM_IO_REQUEUE;
 808                }
 809
 810                rqd->ppa_list[i] = rrpc_ppa_to_gaddr(rrpc->dev,
 811                                                                p->addr);
 812        }
 813
 814        rqd->opcode = NVM_OP_HBWRITE;
 815
 816        return NVM_IO_OK;
 817}
 818
 819static int rrpc_write_rq(struct rrpc *rrpc, struct bio *bio,
 820                                struct nvm_rq *rqd, unsigned long flags)
 821{
 822        struct rrpc_rq *rrqd = nvm_rq_to_pdu(rqd);
 823        struct rrpc_addr *p;
 824        int is_gc = flags & NVM_IOTYPE_GC;
 825        sector_t laddr = rrpc_get_laddr(bio);
 826
 827        if (!is_gc && rrpc_lock_rq(rrpc, bio, rqd))
 828                return NVM_IO_REQUEUE;
 829
 830        p = rrpc_map_page(rrpc, laddr, is_gc);
 831        if (!p) {
 832                BUG_ON(is_gc);
 833                rrpc_unlock_rq(rrpc, rqd);
 834                rrpc_gc_kick(rrpc);
 835                return NVM_IO_REQUEUE;
 836        }
 837
 838        rqd->ppa_addr = rrpc_ppa_to_gaddr(rrpc->dev, p->addr);
 839        rqd->opcode = NVM_OP_HBWRITE;
 840        rrqd->addr = p;
 841
 842        return NVM_IO_OK;
 843}
 844
 845static int rrpc_setup_rq(struct rrpc *rrpc, struct bio *bio,
 846                        struct nvm_rq *rqd, unsigned long flags, uint8_t npages)
 847{
 848        if (npages > 1) {
 849                rqd->ppa_list = nvm_dev_dma_alloc(rrpc->dev, GFP_KERNEL,
 850                                                        &rqd->dma_ppa_list);
 851                if (!rqd->ppa_list) {
 852                        pr_err("rrpc: not able to allocate ppa list\n");
 853                        return NVM_IO_ERR;
 854                }
 855
 856                if (bio_op(bio) == REQ_OP_WRITE)
 857                        return rrpc_write_ppalist_rq(rrpc, bio, rqd, flags,
 858                                                                        npages);
 859
 860                return rrpc_read_ppalist_rq(rrpc, bio, rqd, flags, npages);
 861        }
 862
 863        if (bio_op(bio) == REQ_OP_WRITE)
 864                return rrpc_write_rq(rrpc, bio, rqd, flags);
 865
 866        return rrpc_read_rq(rrpc, bio, rqd, flags);
 867}
 868
 869static int rrpc_submit_io(struct rrpc *rrpc, struct bio *bio,
 870                                struct nvm_rq *rqd, unsigned long flags)
 871{
 872        int err;
 873        struct rrpc_rq *rrq = nvm_rq_to_pdu(rqd);
 874        uint8_t nr_pages = rrpc_get_pages(bio);
 875        int bio_size = bio_sectors(bio) << 9;
 876
 877        if (bio_size < rrpc->dev->sec_size)
 878                return NVM_IO_ERR;
 879        else if (bio_size > rrpc->dev->max_rq_size)
 880                return NVM_IO_ERR;
 881
 882        err = rrpc_setup_rq(rrpc, bio, rqd, flags, nr_pages);
 883        if (err)
 884                return err;
 885
 886        bio_get(bio);
 887        rqd->bio = bio;
 888        rqd->ins = &rrpc->instance;
 889        rqd->nr_ppas = nr_pages;
 890        rrq->flags = flags;
 891
 892        err = nvm_submit_io(rrpc->dev, rqd);
 893        if (err) {
 894                pr_err("rrpc: I/O submission failed: %d\n", err);
 895                bio_put(bio);
 896                if (!(flags & NVM_IOTYPE_GC)) {
 897                        rrpc_unlock_rq(rrpc, rqd);
 898                        if (rqd->nr_ppas > 1)
 899                                nvm_dev_dma_free(rrpc->dev,
 900                        rqd->ppa_list, rqd->dma_ppa_list);
 901                }
 902                return NVM_IO_ERR;
 903        }
 904
 905        return NVM_IO_OK;
 906}
 907
 908static blk_qc_t rrpc_make_rq(struct request_queue *q, struct bio *bio)
 909{
 910        struct rrpc *rrpc = q->queuedata;
 911        struct nvm_rq *rqd;
 912        int err;
 913
 914        if (bio_op(bio) == REQ_OP_DISCARD) {
 915                rrpc_discard(rrpc, bio);
 916                return BLK_QC_T_NONE;
 917        }
 918
 919        rqd = mempool_alloc(rrpc->rq_pool, GFP_KERNEL);
 920        if (!rqd) {
 921                pr_err_ratelimited("rrpc: not able to queue bio.");
 922                bio_io_error(bio);
 923                return BLK_QC_T_NONE;
 924        }
 925        memset(rqd, 0, sizeof(struct nvm_rq));
 926
 927        err = rrpc_submit_io(rrpc, bio, rqd, NVM_IOTYPE_NONE);
 928        switch (err) {
 929        case NVM_IO_OK:
 930                return BLK_QC_T_NONE;
 931        case NVM_IO_ERR:
 932                bio_io_error(bio);
 933                break;
 934        case NVM_IO_DONE:
 935                bio_endio(bio);
 936                break;
 937        case NVM_IO_REQUEUE:
 938                spin_lock(&rrpc->bio_lock);
 939                bio_list_add(&rrpc->requeue_bios, bio);
 940                spin_unlock(&rrpc->bio_lock);
 941                queue_work(rrpc->kgc_wq, &rrpc->ws_requeue);
 942                break;
 943        }
 944
 945        mempool_free(rqd, rrpc->rq_pool);
 946        return BLK_QC_T_NONE;
 947}
 948
 949static void rrpc_requeue(struct work_struct *work)
 950{
 951        struct rrpc *rrpc = container_of(work, struct rrpc, ws_requeue);
 952        struct bio_list bios;
 953        struct bio *bio;
 954
 955        bio_list_init(&bios);
 956
 957        spin_lock(&rrpc->bio_lock);
 958        bio_list_merge(&bios, &rrpc->requeue_bios);
 959        bio_list_init(&rrpc->requeue_bios);
 960        spin_unlock(&rrpc->bio_lock);
 961
 962        while ((bio = bio_list_pop(&bios)))
 963                rrpc_make_rq(rrpc->disk->queue, bio);
 964}
 965
 966static void rrpc_gc_free(struct rrpc *rrpc)
 967{
 968        if (rrpc->krqd_wq)
 969                destroy_workqueue(rrpc->krqd_wq);
 970
 971        if (rrpc->kgc_wq)
 972                destroy_workqueue(rrpc->kgc_wq);
 973}
 974
 975static int rrpc_gc_init(struct rrpc *rrpc)
 976{
 977        rrpc->krqd_wq = alloc_workqueue("rrpc-lun", WQ_MEM_RECLAIM|WQ_UNBOUND,
 978                                                                rrpc->nr_luns);
 979        if (!rrpc->krqd_wq)
 980                return -ENOMEM;
 981
 982        rrpc->kgc_wq = alloc_workqueue("rrpc-bg", WQ_MEM_RECLAIM, 1);
 983        if (!rrpc->kgc_wq)
 984                return -ENOMEM;
 985
 986        setup_timer(&rrpc->gc_timer, rrpc_gc_timer, (unsigned long)rrpc);
 987
 988        return 0;
 989}
 990
 991static void rrpc_map_free(struct rrpc *rrpc)
 992{
 993        vfree(rrpc->rev_trans_map);
 994        vfree(rrpc->trans_map);
 995}
 996
 997static int rrpc_l2p_update(u64 slba, u32 nlb, __le64 *entries, void *private)
 998{
 999        struct rrpc *rrpc = (struct rrpc *)private;
1000        struct nvm_dev *dev = rrpc->dev;
1001        struct rrpc_addr *addr = rrpc->trans_map + slba;
1002        struct rrpc_rev_addr *raddr = rrpc->rev_trans_map;
1003        u64 elba = slba + nlb;
1004        u64 i;
1005
1006        if (unlikely(elba > dev->total_secs)) {
1007                pr_err("nvm: L2P data from device is out of bounds!\n");
1008                return -EINVAL;
1009        }
1010
1011        for (i = 0; i < nlb; i++) {
1012                u64 pba = le64_to_cpu(entries[i]);
1013                unsigned int mod;
1014                /* LNVM treats address-spaces as silos, LBA and PBA are
1015                 * equally large and zero-indexed.
1016                 */
1017                if (unlikely(pba >= dev->total_secs && pba != U64_MAX)) {
1018                        pr_err("nvm: L2P data entry is out of bounds!\n");
1019                        return -EINVAL;
1020                }
1021
1022                /* Address zero is a special one. The first page on a disk is
1023                 * protected. As it often holds internal device boot
1024                 * information.
1025                 */
1026                if (!pba)
1027                        continue;
1028
1029                div_u64_rem(pba, rrpc->nr_sects, &mod);
1030
1031                addr[i].addr = pba;
1032                raddr[mod].addr = slba + i;
1033        }
1034
1035        return 0;
1036}
1037
1038static int rrpc_map_init(struct rrpc *rrpc)
1039{
1040        struct nvm_dev *dev = rrpc->dev;
1041        sector_t i;
1042        int ret;
1043
1044        rrpc->trans_map = vzalloc(sizeof(struct rrpc_addr) * rrpc->nr_sects);
1045        if (!rrpc->trans_map)
1046                return -ENOMEM;
1047
1048        rrpc->rev_trans_map = vmalloc(sizeof(struct rrpc_rev_addr)
1049                                                        * rrpc->nr_sects);
1050        if (!rrpc->rev_trans_map)
1051                return -ENOMEM;
1052
1053        for (i = 0; i < rrpc->nr_sects; i++) {
1054                struct rrpc_addr *p = &rrpc->trans_map[i];
1055                struct rrpc_rev_addr *r = &rrpc->rev_trans_map[i];
1056
1057                p->addr = ADDR_EMPTY;
1058                r->addr = ADDR_EMPTY;
1059        }
1060
1061        if (!dev->ops->get_l2p_tbl)
1062                return 0;
1063
1064        /* Bring up the mapping table from device */
1065        ret = dev->ops->get_l2p_tbl(dev, rrpc->soffset, rrpc->nr_sects,
1066                                        rrpc_l2p_update, rrpc);
1067        if (ret) {
1068                pr_err("nvm: rrpc: could not read L2P table.\n");
1069                return -EINVAL;
1070        }
1071
1072        return 0;
1073}
1074
1075/* Minimum pages needed within a lun */
1076#define PAGE_POOL_SIZE 16
1077#define ADDR_POOL_SIZE 64
1078
1079static int rrpc_core_init(struct rrpc *rrpc)
1080{
1081        down_write(&rrpc_lock);
1082        if (!rrpc_gcb_cache) {
1083                rrpc_gcb_cache = kmem_cache_create("rrpc_gcb",
1084                                sizeof(struct rrpc_block_gc), 0, 0, NULL);
1085                if (!rrpc_gcb_cache) {
1086                        up_write(&rrpc_lock);
1087                        return -ENOMEM;
1088                }
1089
1090                rrpc_rq_cache = kmem_cache_create("rrpc_rq",
1091                                sizeof(struct nvm_rq) + sizeof(struct rrpc_rq),
1092                                0, 0, NULL);
1093                if (!rrpc_rq_cache) {
1094                        kmem_cache_destroy(rrpc_gcb_cache);
1095                        up_write(&rrpc_lock);
1096                        return -ENOMEM;
1097                }
1098        }
1099        up_write(&rrpc_lock);
1100
1101        rrpc->page_pool = mempool_create_page_pool(PAGE_POOL_SIZE, 0);
1102        if (!rrpc->page_pool)
1103                return -ENOMEM;
1104
1105        rrpc->gcb_pool = mempool_create_slab_pool(rrpc->dev->nr_luns,
1106                                                                rrpc_gcb_cache);
1107        if (!rrpc->gcb_pool)
1108                return -ENOMEM;
1109
1110        rrpc->rq_pool = mempool_create_slab_pool(64, rrpc_rq_cache);
1111        if (!rrpc->rq_pool)
1112                return -ENOMEM;
1113
1114        spin_lock_init(&rrpc->inflights.lock);
1115        INIT_LIST_HEAD(&rrpc->inflights.reqs);
1116
1117        return 0;
1118}
1119
1120static void rrpc_core_free(struct rrpc *rrpc)
1121{
1122        mempool_destroy(rrpc->page_pool);
1123        mempool_destroy(rrpc->gcb_pool);
1124        mempool_destroy(rrpc->rq_pool);
1125}
1126
1127static void rrpc_luns_free(struct rrpc *rrpc)
1128{
1129        struct nvm_dev *dev = rrpc->dev;
1130        struct nvm_lun *lun;
1131        struct rrpc_lun *rlun;
1132        int i;
1133
1134        if (!rrpc->luns)
1135                return;
1136
1137        for (i = 0; i < rrpc->nr_luns; i++) {
1138                rlun = &rrpc->luns[i];
1139                lun = rlun->parent;
1140                if (!lun)
1141                        break;
1142                dev->mt->release_lun(dev, lun->id);
1143                vfree(rlun->blocks);
1144        }
1145
1146        kfree(rrpc->luns);
1147}
1148
1149static int rrpc_luns_init(struct rrpc *rrpc, int lun_begin, int lun_end)
1150{
1151        struct nvm_dev *dev = rrpc->dev;
1152        struct rrpc_lun *rlun;
1153        int i, j, ret = -EINVAL;
1154
1155        if (dev->sec_per_blk > MAX_INVALID_PAGES_STORAGE * BITS_PER_LONG) {
1156                pr_err("rrpc: number of pages per block too high.");
1157                return -EINVAL;
1158        }
1159
1160        spin_lock_init(&rrpc->rev_lock);
1161
1162        rrpc->luns = kcalloc(rrpc->nr_luns, sizeof(struct rrpc_lun),
1163                                                                GFP_KERNEL);
1164        if (!rrpc->luns)
1165                return -ENOMEM;
1166
1167        /* 1:1 mapping */
1168        for (i = 0; i < rrpc->nr_luns; i++) {
1169                int lunid = lun_begin + i;
1170                struct nvm_lun *lun;
1171
1172                if (dev->mt->reserve_lun(dev, lunid)) {
1173                        pr_err("rrpc: lun %u is already allocated\n", lunid);
1174                        goto err;
1175                }
1176
1177                lun = dev->mt->get_lun(dev, lunid);
1178                if (!lun)
1179                        goto err;
1180
1181                rlun = &rrpc->luns[i];
1182                rlun->parent = lun;
1183                rlun->blocks = vzalloc(sizeof(struct rrpc_block) *
1184                                                rrpc->dev->blks_per_lun);
1185                if (!rlun->blocks) {
1186                        ret = -ENOMEM;
1187                        goto err;
1188                }
1189
1190                for (j = 0; j < rrpc->dev->blks_per_lun; j++) {
1191                        struct rrpc_block *rblk = &rlun->blocks[j];
1192                        struct nvm_block *blk = &lun->blocks[j];
1193
1194                        rblk->parent = blk;
1195                        rblk->rlun = rlun;
1196                        INIT_LIST_HEAD(&rblk->prio);
1197                        spin_lock_init(&rblk->lock);
1198                }
1199
1200                rlun->rrpc = rrpc;
1201                INIT_LIST_HEAD(&rlun->prio_list);
1202                INIT_LIST_HEAD(&rlun->wblk_list);
1203
1204                INIT_WORK(&rlun->ws_gc, rrpc_lun_gc);
1205                spin_lock_init(&rlun->lock);
1206        }
1207
1208        return 0;
1209err:
1210        return ret;
1211}
1212
1213/* returns 0 on success and stores the beginning address in *begin */
1214static int rrpc_area_init(struct rrpc *rrpc, sector_t *begin)
1215{
1216        struct nvm_dev *dev = rrpc->dev;
1217        struct nvmm_type *mt = dev->mt;
1218        sector_t size = rrpc->nr_sects * dev->sec_size;
1219        int ret;
1220
1221        size >>= 9;
1222
1223        ret = mt->get_area(dev, begin, size);
1224        if (!ret)
1225                *begin >>= (ilog2(dev->sec_size) - 9);
1226
1227        return ret;
1228}
1229
1230static void rrpc_area_free(struct rrpc *rrpc)
1231{
1232        struct nvm_dev *dev = rrpc->dev;
1233        struct nvmm_type *mt = dev->mt;
1234        sector_t begin = rrpc->soffset << (ilog2(dev->sec_size) - 9);
1235
1236        mt->put_area(dev, begin);
1237}
1238
1239static void rrpc_free(struct rrpc *rrpc)
1240{
1241        rrpc_gc_free(rrpc);
1242        rrpc_map_free(rrpc);
1243        rrpc_core_free(rrpc);
1244        rrpc_luns_free(rrpc);
1245        rrpc_area_free(rrpc);
1246
1247        kfree(rrpc);
1248}
1249
1250static void rrpc_exit(void *private)
1251{
1252        struct rrpc *rrpc = private;
1253
1254        del_timer(&rrpc->gc_timer);
1255
1256        flush_workqueue(rrpc->krqd_wq);
1257        flush_workqueue(rrpc->kgc_wq);
1258
1259        rrpc_free(rrpc);
1260}
1261
1262static sector_t rrpc_capacity(void *private)
1263{
1264        struct rrpc *rrpc = private;
1265        struct nvm_dev *dev = rrpc->dev;
1266        sector_t reserved, provisioned;
1267
1268        /* cur, gc, and two emergency blocks for each lun */
1269        reserved = rrpc->nr_luns * dev->sec_per_blk * 4;
1270        provisioned = rrpc->nr_sects - reserved;
1271
1272        if (reserved > rrpc->nr_sects) {
1273                pr_err("rrpc: not enough space available to expose storage.\n");
1274                return 0;
1275        }
1276
1277        sector_div(provisioned, 10);
1278        return provisioned * 9 * NR_PHY_IN_LOG;
1279}
1280
1281/*
1282 * Looks up the logical address from reverse trans map and check if its valid by
1283 * comparing the logical to physical address with the physical address.
1284 * Returns 0 on free, otherwise 1 if in use
1285 */
1286static void rrpc_block_map_update(struct rrpc *rrpc, struct rrpc_block *rblk)
1287{
1288        struct nvm_dev *dev = rrpc->dev;
1289        int offset;
1290        struct rrpc_addr *laddr;
1291        u64 bpaddr, paddr, pladdr;
1292
1293        bpaddr = block_to_rel_addr(rrpc, rblk);
1294        for (offset = 0; offset < dev->sec_per_blk; offset++) {
1295                paddr = bpaddr + offset;
1296
1297                pladdr = rrpc->rev_trans_map[paddr].addr;
1298                if (pladdr == ADDR_EMPTY)
1299                        continue;
1300
1301                laddr = &rrpc->trans_map[pladdr];
1302
1303                if (paddr == laddr->addr) {
1304                        laddr->rblk = rblk;
1305                } else {
1306                        set_bit(offset, rblk->invalid_pages);
1307                        rblk->nr_invalid_pages++;
1308                }
1309        }
1310}
1311
1312static int rrpc_blocks_init(struct rrpc *rrpc)
1313{
1314        struct rrpc_lun *rlun;
1315        struct rrpc_block *rblk;
1316        int lun_iter, blk_iter;
1317
1318        for (lun_iter = 0; lun_iter < rrpc->nr_luns; lun_iter++) {
1319                rlun = &rrpc->luns[lun_iter];
1320
1321                for (blk_iter = 0; blk_iter < rrpc->dev->blks_per_lun;
1322                                                                blk_iter++) {
1323                        rblk = &rlun->blocks[blk_iter];
1324                        rrpc_block_map_update(rrpc, rblk);
1325                }
1326        }
1327
1328        return 0;
1329}
1330
1331static int rrpc_luns_configure(struct rrpc *rrpc)
1332{
1333        struct rrpc_lun *rlun;
1334        struct rrpc_block *rblk;
1335        int i;
1336
1337        for (i = 0; i < rrpc->nr_luns; i++) {
1338                rlun = &rrpc->luns[i];
1339
1340                rblk = rrpc_get_blk(rrpc, rlun, 0);
1341                if (!rblk)
1342                        goto err;
1343                rrpc_set_lun_cur(rlun, rblk, &rlun->cur);
1344
1345                /* Emergency gc block */
1346                rblk = rrpc_get_blk(rrpc, rlun, 1);
1347                if (!rblk)
1348                        goto err;
1349                rrpc_set_lun_cur(rlun, rblk, &rlun->gc_cur);
1350        }
1351
1352        return 0;
1353err:
1354        rrpc_put_blks(rrpc);
1355        return -EINVAL;
1356}
1357
1358static struct nvm_tgt_type tt_rrpc;
1359
1360static void *rrpc_init(struct nvm_dev *dev, struct gendisk *tdisk,
1361                                                int lun_begin, int lun_end)
1362{
1363        struct request_queue *bqueue = dev->q;
1364        struct request_queue *tqueue = tdisk->queue;
1365        struct rrpc *rrpc;
1366        sector_t soffset;
1367        int ret;
1368
1369        if (!(dev->identity.dom & NVM_RSP_L2P)) {
1370                pr_err("nvm: rrpc: device does not support l2p (%x)\n",
1371                                                        dev->identity.dom);
1372                return ERR_PTR(-EINVAL);
1373        }
1374
1375        rrpc = kzalloc(sizeof(struct rrpc), GFP_KERNEL);
1376        if (!rrpc)
1377                return ERR_PTR(-ENOMEM);
1378
1379        rrpc->instance.tt = &tt_rrpc;
1380        rrpc->dev = dev;
1381        rrpc->disk = tdisk;
1382
1383        bio_list_init(&rrpc->requeue_bios);
1384        spin_lock_init(&rrpc->bio_lock);
1385        INIT_WORK(&rrpc->ws_requeue, rrpc_requeue);
1386
1387        rrpc->nr_luns = lun_end - lun_begin + 1;
1388        rrpc->total_blocks = (unsigned long)dev->blks_per_lun * rrpc->nr_luns;
1389        rrpc->nr_sects = (unsigned long long)dev->sec_per_lun * rrpc->nr_luns;
1390
1391        /* simple round-robin strategy */
1392        atomic_set(&rrpc->next_lun, -1);
1393
1394        ret = rrpc_area_init(rrpc, &soffset);
1395        if (ret < 0) {
1396                pr_err("nvm: rrpc: could not initialize area\n");
1397                return ERR_PTR(ret);
1398        }
1399        rrpc->soffset = soffset;
1400
1401        ret = rrpc_luns_init(rrpc, lun_begin, lun_end);
1402        if (ret) {
1403                pr_err("nvm: rrpc: could not initialize luns\n");
1404                goto err;
1405        }
1406
1407        rrpc->poffset = dev->sec_per_lun * lun_begin;
1408        rrpc->lun_offset = lun_begin;
1409
1410        ret = rrpc_core_init(rrpc);
1411        if (ret) {
1412                pr_err("nvm: rrpc: could not initialize core\n");
1413                goto err;
1414        }
1415
1416        ret = rrpc_map_init(rrpc);
1417        if (ret) {
1418                pr_err("nvm: rrpc: could not initialize maps\n");
1419                goto err;
1420        }
1421
1422        ret = rrpc_blocks_init(rrpc);
1423        if (ret) {
1424                pr_err("nvm: rrpc: could not initialize state for blocks\n");
1425                goto err;
1426        }
1427
1428        ret = rrpc_luns_configure(rrpc);
1429        if (ret) {
1430                pr_err("nvm: rrpc: not enough blocks available in LUNs.\n");
1431                goto err;
1432        }
1433
1434        ret = rrpc_gc_init(rrpc);
1435        if (ret) {
1436                pr_err("nvm: rrpc: could not initialize gc\n");
1437                goto err;
1438        }
1439
1440        /* inherit the size from the underlying device */
1441        blk_queue_logical_block_size(tqueue, queue_physical_block_size(bqueue));
1442        blk_queue_max_hw_sectors(tqueue, queue_max_hw_sectors(bqueue));
1443
1444        pr_info("nvm: rrpc initialized with %u luns and %llu pages.\n",
1445                        rrpc->nr_luns, (unsigned long long)rrpc->nr_sects);
1446
1447        mod_timer(&rrpc->gc_timer, jiffies + msecs_to_jiffies(10));
1448
1449        return rrpc;
1450err:
1451        rrpc_free(rrpc);
1452        return ERR_PTR(ret);
1453}
1454
1455/* round robin, page-based FTL, and cost-based GC */
1456static struct nvm_tgt_type tt_rrpc = {
1457        .name           = "rrpc",
1458        .version        = {1, 0, 0},
1459
1460        .make_rq        = rrpc_make_rq,
1461        .capacity       = rrpc_capacity,
1462        .end_io         = rrpc_end_io,
1463
1464        .init           = rrpc_init,
1465        .exit           = rrpc_exit,
1466};
1467
1468static int __init rrpc_module_init(void)
1469{
1470        return nvm_register_tgt_type(&tt_rrpc);
1471}
1472
1473static void rrpc_module_exit(void)
1474{
1475        nvm_unregister_tgt_type(&tt_rrpc);
1476}
1477
1478module_init(rrpc_module_init);
1479module_exit(rrpc_module_exit);
1480MODULE_LICENSE("GPL v2");
1481MODULE_DESCRIPTION("Block-Device Target for Open-Channel SSDs");
1482