linux/drivers/lightnvm/pblk-read.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2016 CNEX Labs
   3 * Initial release: Javier Gonzalez <javier@cnexlabs.com>
   4 *                  Matias Bjorling <matias@cnexlabs.com>
   5 *
   6 * This program is free software; you can redistribute it and/or
   7 * modify it under the terms of the GNU General Public License version
   8 * 2 as published by the Free Software Foundation.
   9 *
  10 * This program is distributed in the hope that it will be useful, but
  11 * WITHOUT ANY WARRANTY; without even the implied warranty of
  12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  13 * General Public License for more details.
  14 *
  15 * pblk-read.c - pblk's read path
  16 */
  17
  18#include "pblk.h"
  19
  20/*
  21 * There is no guarantee that the value read from cache has not been updated and
  22 * resides at another location in the cache. We guarantee though that if the
  23 * value is read from the cache, it belongs to the mapped lba. In order to
  24 * guarantee and order between writes and reads are ordered, a flush must be
  25 * issued.
  26 */
  27static int pblk_read_from_cache(struct pblk *pblk, struct bio *bio,
  28                                sector_t lba, struct ppa_addr ppa,
  29                                int bio_iter, bool advanced_bio)
  30{
  31#ifdef CONFIG_NVM_DEBUG
  32        /* Callers must ensure that the ppa points to a cache address */
  33        BUG_ON(pblk_ppa_empty(ppa));
  34        BUG_ON(!pblk_addr_in_cache(ppa));
  35#endif
  36
  37        return pblk_rb_copy_to_bio(&pblk->rwb, bio, lba, ppa,
  38                                                bio_iter, advanced_bio);
  39}
  40
  41static void pblk_read_ppalist_rq(struct pblk *pblk, struct nvm_rq *rqd,
  42                                 struct bio *bio, sector_t blba,
  43                                 unsigned long *read_bitmap)
  44{
  45        struct pblk_sec_meta *meta_list = rqd->meta_list;
  46        struct ppa_addr ppas[PBLK_MAX_REQ_ADDRS];
  47        int nr_secs = rqd->nr_ppas;
  48        bool advanced_bio = false;
  49        int i, j = 0;
  50
  51        pblk_lookup_l2p_seq(pblk, ppas, blba, nr_secs);
  52
  53        for (i = 0; i < nr_secs; i++) {
  54                struct ppa_addr p = ppas[i];
  55                sector_t lba = blba + i;
  56
  57retry:
  58                if (pblk_ppa_empty(p)) {
  59                        WARN_ON(test_and_set_bit(i, read_bitmap));
  60                        meta_list[i].lba = cpu_to_le64(ADDR_EMPTY);
  61
  62                        if (unlikely(!advanced_bio)) {
  63                                bio_advance(bio, (i) * PBLK_EXPOSED_PAGE_SIZE);
  64                                advanced_bio = true;
  65                        }
  66
  67                        goto next;
  68                }
  69
  70                /* Try to read from write buffer. The address is later checked
  71                 * on the write buffer to prevent retrieving overwritten data.
  72                 */
  73                if (pblk_addr_in_cache(p)) {
  74                        if (!pblk_read_from_cache(pblk, bio, lba, p, i,
  75                                                                advanced_bio)) {
  76                                pblk_lookup_l2p_seq(pblk, &p, lba, 1);
  77                                goto retry;
  78                        }
  79                        WARN_ON(test_and_set_bit(i, read_bitmap));
  80                        meta_list[i].lba = cpu_to_le64(lba);
  81                        advanced_bio = true;
  82#ifdef CONFIG_NVM_DEBUG
  83                        atomic_long_inc(&pblk->cache_reads);
  84#endif
  85                } else {
  86                        /* Read from media non-cached sectors */
  87                        rqd->ppa_list[j++] = p;
  88                }
  89
  90next:
  91                if (advanced_bio)
  92                        bio_advance(bio, PBLK_EXPOSED_PAGE_SIZE);
  93        }
  94
  95        if (pblk_io_aligned(pblk, nr_secs))
  96                rqd->flags = pblk_set_read_mode(pblk, PBLK_READ_SEQUENTIAL);
  97        else
  98                rqd->flags = pblk_set_read_mode(pblk, PBLK_READ_RANDOM);
  99
 100#ifdef CONFIG_NVM_DEBUG
 101        atomic_long_add(nr_secs, &pblk->inflight_reads);
 102#endif
 103}
 104
 105
 106static void pblk_read_check_seq(struct pblk *pblk, struct nvm_rq *rqd,
 107                                sector_t blba)
 108{
 109        struct pblk_sec_meta *meta_lba_list = rqd->meta_list;
 110        int nr_lbas = rqd->nr_ppas;
 111        int i;
 112
 113        for (i = 0; i < nr_lbas; i++) {
 114                u64 lba = le64_to_cpu(meta_lba_list[i].lba);
 115
 116                if (lba == ADDR_EMPTY)
 117                        continue;
 118
 119                if (lba != blba + i) {
 120#ifdef CONFIG_NVM_DEBUG
 121                        struct ppa_addr *p;
 122
 123                        p = (nr_lbas == 1) ? &rqd->ppa_list[i] : &rqd->ppa_addr;
 124                        print_ppa(&pblk->dev->geo, p, "seq", i);
 125#endif
 126                        pr_err("pblk: corrupted read LBA (%llu/%llu)\n",
 127                                                        lba, (u64)blba + i);
 128                        WARN_ON(1);
 129                }
 130        }
 131}
 132
 133/*
 134 * There can be holes in the lba list.
 135 */
 136static void pblk_read_check_rand(struct pblk *pblk, struct nvm_rq *rqd,
 137                                 u64 *lba_list, int nr_lbas)
 138{
 139        struct pblk_sec_meta *meta_lba_list = rqd->meta_list;
 140        int i, j;
 141
 142        for (i = 0, j = 0; i < nr_lbas; i++) {
 143                u64 lba = lba_list[i];
 144                u64 meta_lba;
 145
 146                if (lba == ADDR_EMPTY)
 147                        continue;
 148
 149                meta_lba = le64_to_cpu(meta_lba_list[j].lba);
 150
 151                if (lba != meta_lba) {
 152#ifdef CONFIG_NVM_DEBUG
 153                        struct ppa_addr *p;
 154                        int nr_ppas = rqd->nr_ppas;
 155
 156                        p = (nr_ppas == 1) ? &rqd->ppa_list[j] : &rqd->ppa_addr;
 157                        print_ppa(&pblk->dev->geo, p, "seq", j);
 158#endif
 159                        pr_err("pblk: corrupted read LBA (%llu/%llu)\n",
 160                                                                lba, meta_lba);
 161                        WARN_ON(1);
 162                }
 163
 164                j++;
 165        }
 166
 167        WARN_ONCE(j != rqd->nr_ppas, "pblk: corrupted random request\n");
 168}
 169
 170static void pblk_read_put_rqd_kref(struct pblk *pblk, struct nvm_rq *rqd)
 171{
 172        struct ppa_addr *ppa_list;
 173        int i;
 174
 175        ppa_list = (rqd->nr_ppas > 1) ? rqd->ppa_list : &rqd->ppa_addr;
 176
 177        for (i = 0; i < rqd->nr_ppas; i++) {
 178                struct ppa_addr ppa = ppa_list[i];
 179                struct pblk_line *line;
 180
 181                line = &pblk->lines[pblk_ppa_to_line(ppa)];
 182                kref_put(&line->ref, pblk_line_put_wq);
 183        }
 184}
 185
 186static void pblk_end_user_read(struct bio *bio)
 187{
 188#ifdef CONFIG_NVM_DEBUG
 189        WARN_ONCE(bio->bi_status, "pblk: corrupted read bio\n");
 190#endif
 191        bio_endio(bio);
 192}
 193
 194static void __pblk_end_io_read(struct pblk *pblk, struct nvm_rq *rqd,
 195                               bool put_line)
 196{
 197        struct nvm_tgt_dev *dev = pblk->dev;
 198        struct pblk_g_ctx *r_ctx = nvm_rq_to_pdu(rqd);
 199        struct bio *int_bio = rqd->bio;
 200        unsigned long start_time = r_ctx->start_time;
 201
 202        generic_end_io_acct(dev->q, REQ_OP_READ, &pblk->disk->part0, start_time);
 203
 204        if (rqd->error)
 205                pblk_log_read_err(pblk, rqd);
 206
 207        pblk_read_check_seq(pblk, rqd, r_ctx->lba);
 208
 209        if (int_bio)
 210                bio_put(int_bio);
 211
 212        if (put_line)
 213                pblk_read_put_rqd_kref(pblk, rqd);
 214
 215#ifdef CONFIG_NVM_DEBUG
 216        atomic_long_add(rqd->nr_ppas, &pblk->sync_reads);
 217        atomic_long_sub(rqd->nr_ppas, &pblk->inflight_reads);
 218#endif
 219
 220        pblk_free_rqd(pblk, rqd, PBLK_READ);
 221        atomic_dec(&pblk->inflight_io);
 222}
 223
 224static void pblk_end_io_read(struct nvm_rq *rqd)
 225{
 226        struct pblk *pblk = rqd->private;
 227        struct pblk_g_ctx *r_ctx = nvm_rq_to_pdu(rqd);
 228        struct bio *bio = (struct bio *)r_ctx->private;
 229
 230        pblk_end_user_read(bio);
 231        __pblk_end_io_read(pblk, rqd, true);
 232}
 233
 234static int pblk_partial_read(struct pblk *pblk, struct nvm_rq *rqd,
 235                             struct bio *orig_bio, unsigned int bio_init_idx,
 236                             unsigned long *read_bitmap)
 237{
 238        struct pblk_sec_meta *meta_list = rqd->meta_list;
 239        struct bio *new_bio;
 240        struct bio_vec src_bv, dst_bv;
 241        void *ppa_ptr = NULL;
 242        void *src_p, *dst_p;
 243        dma_addr_t dma_ppa_list = 0;
 244        __le64 *lba_list_mem, *lba_list_media;
 245        int nr_secs = rqd->nr_ppas;
 246        int nr_holes = nr_secs - bitmap_weight(read_bitmap, nr_secs);
 247        int i, ret, hole;
 248
 249        /* Re-use allocated memory for intermediate lbas */
 250        lba_list_mem = (((void *)rqd->ppa_list) + pblk_dma_ppa_size);
 251        lba_list_media = (((void *)rqd->ppa_list) + 2 * pblk_dma_ppa_size);
 252
 253        new_bio = bio_alloc(GFP_KERNEL, nr_holes);
 254
 255        if (pblk_bio_add_pages(pblk, new_bio, GFP_KERNEL, nr_holes))
 256                goto fail_add_pages;
 257
 258        if (nr_holes != new_bio->bi_vcnt) {
 259                pr_err("pblk: malformed bio\n");
 260                goto fail;
 261        }
 262
 263        for (i = 0; i < nr_secs; i++)
 264                lba_list_mem[i] = meta_list[i].lba;
 265
 266        new_bio->bi_iter.bi_sector = 0; /* internal bio */
 267        bio_set_op_attrs(new_bio, REQ_OP_READ, 0);
 268
 269        rqd->bio = new_bio;
 270        rqd->nr_ppas = nr_holes;
 271        rqd->flags = pblk_set_read_mode(pblk, PBLK_READ_RANDOM);
 272
 273        if (unlikely(nr_holes == 1)) {
 274                ppa_ptr = rqd->ppa_list;
 275                dma_ppa_list = rqd->dma_ppa_list;
 276                rqd->ppa_addr = rqd->ppa_list[0];
 277        }
 278
 279        ret = pblk_submit_io_sync(pblk, rqd);
 280        if (ret) {
 281                bio_put(rqd->bio);
 282                pr_err("pblk: sync read IO submission failed\n");
 283                goto fail;
 284        }
 285
 286        if (rqd->error) {
 287                atomic_long_inc(&pblk->read_failed);
 288#ifdef CONFIG_NVM_DEBUG
 289                pblk_print_failed_rqd(pblk, rqd, rqd->error);
 290#endif
 291        }
 292
 293        if (unlikely(nr_holes == 1)) {
 294                struct ppa_addr ppa;
 295
 296                ppa = rqd->ppa_addr;
 297                rqd->ppa_list = ppa_ptr;
 298                rqd->dma_ppa_list = dma_ppa_list;
 299                rqd->ppa_list[0] = ppa;
 300        }
 301
 302        for (i = 0; i < nr_secs; i++) {
 303                lba_list_media[i] = meta_list[i].lba;
 304                meta_list[i].lba = lba_list_mem[i];
 305        }
 306
 307        /* Fill the holes in the original bio */
 308        i = 0;
 309        hole = find_first_zero_bit(read_bitmap, nr_secs);
 310        do {
 311                int line_id = pblk_ppa_to_line(rqd->ppa_list[i]);
 312                struct pblk_line *line = &pblk->lines[line_id];
 313
 314                kref_put(&line->ref, pblk_line_put);
 315
 316                meta_list[hole].lba = lba_list_media[i];
 317
 318                src_bv = new_bio->bi_io_vec[i++];
 319                dst_bv = orig_bio->bi_io_vec[bio_init_idx + hole];
 320
 321                src_p = kmap_atomic(src_bv.bv_page);
 322                dst_p = kmap_atomic(dst_bv.bv_page);
 323
 324                memcpy(dst_p + dst_bv.bv_offset,
 325                        src_p + src_bv.bv_offset,
 326                        PBLK_EXPOSED_PAGE_SIZE);
 327
 328                kunmap_atomic(src_p);
 329                kunmap_atomic(dst_p);
 330
 331                mempool_free(src_bv.bv_page, &pblk->page_bio_pool);
 332
 333                hole = find_next_zero_bit(read_bitmap, nr_secs, hole + 1);
 334        } while (hole < nr_secs);
 335
 336        bio_put(new_bio);
 337
 338        /* restore original request */
 339        rqd->bio = NULL;
 340        rqd->nr_ppas = nr_secs;
 341
 342        __pblk_end_io_read(pblk, rqd, false);
 343        return NVM_IO_DONE;
 344
 345fail:
 346        /* Free allocated pages in new bio */
 347        pblk_bio_free_pages(pblk, new_bio, 0, new_bio->bi_vcnt);
 348fail_add_pages:
 349        pr_err("pblk: failed to perform partial read\n");
 350        __pblk_end_io_read(pblk, rqd, false);
 351        return NVM_IO_ERR;
 352}
 353
 354static void pblk_read_rq(struct pblk *pblk, struct nvm_rq *rqd, struct bio *bio,
 355                         sector_t lba, unsigned long *read_bitmap)
 356{
 357        struct pblk_sec_meta *meta_list = rqd->meta_list;
 358        struct ppa_addr ppa;
 359
 360        pblk_lookup_l2p_seq(pblk, &ppa, lba, 1);
 361
 362#ifdef CONFIG_NVM_DEBUG
 363        atomic_long_inc(&pblk->inflight_reads);
 364#endif
 365
 366retry:
 367        if (pblk_ppa_empty(ppa)) {
 368                WARN_ON(test_and_set_bit(0, read_bitmap));
 369                meta_list[0].lba = cpu_to_le64(ADDR_EMPTY);
 370                return;
 371        }
 372
 373        /* Try to read from write buffer. The address is later checked on the
 374         * write buffer to prevent retrieving overwritten data.
 375         */
 376        if (pblk_addr_in_cache(ppa)) {
 377                if (!pblk_read_from_cache(pblk, bio, lba, ppa, 0, 1)) {
 378                        pblk_lookup_l2p_seq(pblk, &ppa, lba, 1);
 379                        goto retry;
 380                }
 381
 382                WARN_ON(test_and_set_bit(0, read_bitmap));
 383                meta_list[0].lba = cpu_to_le64(lba);
 384
 385#ifdef CONFIG_NVM_DEBUG
 386                atomic_long_inc(&pblk->cache_reads);
 387#endif
 388        } else {
 389                rqd->ppa_addr = ppa;
 390        }
 391
 392        rqd->flags = pblk_set_read_mode(pblk, PBLK_READ_RANDOM);
 393}
 394
 395int pblk_submit_read(struct pblk *pblk, struct bio *bio)
 396{
 397        struct nvm_tgt_dev *dev = pblk->dev;
 398        struct request_queue *q = dev->q;
 399        sector_t blba = pblk_get_lba(bio);
 400        unsigned int nr_secs = pblk_get_secs(bio);
 401        struct pblk_g_ctx *r_ctx;
 402        struct nvm_rq *rqd;
 403        unsigned int bio_init_idx;
 404        unsigned long read_bitmap; /* Max 64 ppas per request */
 405        int ret = NVM_IO_ERR;
 406
 407        /* logic error: lba out-of-bounds. Ignore read request */
 408        if (blba >= pblk->rl.nr_secs || nr_secs > PBLK_MAX_REQ_ADDRS) {
 409                WARN(1, "pblk: read lba out of bounds (lba:%llu, nr:%d)\n",
 410                                        (unsigned long long)blba, nr_secs);
 411                return NVM_IO_ERR;
 412        }
 413
 414        generic_start_io_acct(q, REQ_OP_READ, bio_sectors(bio),
 415                              &pblk->disk->part0);
 416
 417        bitmap_zero(&read_bitmap, nr_secs);
 418
 419        rqd = pblk_alloc_rqd(pblk, PBLK_READ);
 420
 421        rqd->opcode = NVM_OP_PREAD;
 422        rqd->nr_ppas = nr_secs;
 423        rqd->bio = NULL; /* cloned bio if needed */
 424        rqd->private = pblk;
 425        rqd->end_io = pblk_end_io_read;
 426
 427        r_ctx = nvm_rq_to_pdu(rqd);
 428        r_ctx->start_time = jiffies;
 429        r_ctx->lba = blba;
 430        r_ctx->private = bio; /* original bio */
 431
 432        /* Save the index for this bio's start. This is needed in case
 433         * we need to fill a partial read.
 434         */
 435        bio_init_idx = pblk_get_bi_idx(bio);
 436
 437        rqd->meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL,
 438                                                        &rqd->dma_meta_list);
 439        if (!rqd->meta_list) {
 440                pr_err("pblk: not able to allocate ppa list\n");
 441                goto fail_rqd_free;
 442        }
 443
 444        if (nr_secs > 1) {
 445                rqd->ppa_list = rqd->meta_list + pblk_dma_meta_size;
 446                rqd->dma_ppa_list = rqd->dma_meta_list + pblk_dma_meta_size;
 447
 448                pblk_read_ppalist_rq(pblk, rqd, bio, blba, &read_bitmap);
 449        } else {
 450                pblk_read_rq(pblk, rqd, bio, blba, &read_bitmap);
 451        }
 452
 453        if (bitmap_full(&read_bitmap, nr_secs)) {
 454                atomic_inc(&pblk->inflight_io);
 455                __pblk_end_io_read(pblk, rqd, false);
 456                return NVM_IO_DONE;
 457        }
 458
 459        /* All sectors are to be read from the device */
 460        if (bitmap_empty(&read_bitmap, rqd->nr_ppas)) {
 461                struct bio *int_bio = NULL;
 462
 463                /* Clone read bio to deal with read errors internally */
 464                int_bio = bio_clone_fast(bio, GFP_KERNEL, &pblk_bio_set);
 465                if (!int_bio) {
 466                        pr_err("pblk: could not clone read bio\n");
 467                        goto fail_end_io;
 468                }
 469
 470                rqd->bio = int_bio;
 471
 472                if (pblk_submit_io(pblk, rqd)) {
 473                        pr_err("pblk: read IO submission failed\n");
 474                        ret = NVM_IO_ERR;
 475                        goto fail_end_io;
 476                }
 477
 478                return NVM_IO_OK;
 479        }
 480
 481        /* The read bio request could be partially filled by the write buffer,
 482         * but there are some holes that need to be read from the drive.
 483         */
 484        return pblk_partial_read(pblk, rqd, bio, bio_init_idx, &read_bitmap);
 485
 486fail_rqd_free:
 487        pblk_free_rqd(pblk, rqd, PBLK_READ);
 488        return ret;
 489fail_end_io:
 490        __pblk_end_io_read(pblk, rqd, false);
 491        return ret;
 492}
 493
 494static int read_ppalist_rq_gc(struct pblk *pblk, struct nvm_rq *rqd,
 495                              struct pblk_line *line, u64 *lba_list,
 496                              u64 *paddr_list_gc, unsigned int nr_secs)
 497{
 498        struct ppa_addr ppa_list_l2p[PBLK_MAX_REQ_ADDRS];
 499        struct ppa_addr ppa_gc;
 500        int valid_secs = 0;
 501        int i;
 502
 503        pblk_lookup_l2p_rand(pblk, ppa_list_l2p, lba_list, nr_secs);
 504
 505        for (i = 0; i < nr_secs; i++) {
 506                if (lba_list[i] == ADDR_EMPTY)
 507                        continue;
 508
 509                ppa_gc = addr_to_gen_ppa(pblk, paddr_list_gc[i], line->id);
 510                if (!pblk_ppa_comp(ppa_list_l2p[i], ppa_gc)) {
 511                        paddr_list_gc[i] = lba_list[i] = ADDR_EMPTY;
 512                        continue;
 513                }
 514
 515                rqd->ppa_list[valid_secs++] = ppa_list_l2p[i];
 516        }
 517
 518#ifdef CONFIG_NVM_DEBUG
 519        atomic_long_add(valid_secs, &pblk->inflight_reads);
 520#endif
 521
 522        return valid_secs;
 523}
 524
 525static int read_rq_gc(struct pblk *pblk, struct nvm_rq *rqd,
 526                      struct pblk_line *line, sector_t lba,
 527                      u64 paddr_gc)
 528{
 529        struct ppa_addr ppa_l2p, ppa_gc;
 530        int valid_secs = 0;
 531
 532        if (lba == ADDR_EMPTY)
 533                goto out;
 534
 535        /* logic error: lba out-of-bounds */
 536        if (lba >= pblk->rl.nr_secs) {
 537                WARN(1, "pblk: read lba out of bounds\n");
 538                goto out;
 539        }
 540
 541        spin_lock(&pblk->trans_lock);
 542        ppa_l2p = pblk_trans_map_get(pblk, lba);
 543        spin_unlock(&pblk->trans_lock);
 544
 545        ppa_gc = addr_to_gen_ppa(pblk, paddr_gc, line->id);
 546        if (!pblk_ppa_comp(ppa_l2p, ppa_gc))
 547                goto out;
 548
 549        rqd->ppa_addr = ppa_l2p;
 550        valid_secs = 1;
 551
 552#ifdef CONFIG_NVM_DEBUG
 553        atomic_long_inc(&pblk->inflight_reads);
 554#endif
 555
 556out:
 557        return valid_secs;
 558}
 559
 560int pblk_submit_read_gc(struct pblk *pblk, struct pblk_gc_rq *gc_rq)
 561{
 562        struct nvm_tgt_dev *dev = pblk->dev;
 563        struct nvm_geo *geo = &dev->geo;
 564        struct bio *bio;
 565        struct nvm_rq rqd;
 566        int data_len;
 567        int ret = NVM_IO_OK;
 568
 569        memset(&rqd, 0, sizeof(struct nvm_rq));
 570
 571        rqd.meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL,
 572                                                        &rqd.dma_meta_list);
 573        if (!rqd.meta_list)
 574                return -ENOMEM;
 575
 576        if (gc_rq->nr_secs > 1) {
 577                rqd.ppa_list = rqd.meta_list + pblk_dma_meta_size;
 578                rqd.dma_ppa_list = rqd.dma_meta_list + pblk_dma_meta_size;
 579
 580                gc_rq->secs_to_gc = read_ppalist_rq_gc(pblk, &rqd, gc_rq->line,
 581                                                        gc_rq->lba_list,
 582                                                        gc_rq->paddr_list,
 583                                                        gc_rq->nr_secs);
 584                if (gc_rq->secs_to_gc == 1)
 585                        rqd.ppa_addr = rqd.ppa_list[0];
 586        } else {
 587                gc_rq->secs_to_gc = read_rq_gc(pblk, &rqd, gc_rq->line,
 588                                                        gc_rq->lba_list[0],
 589                                                        gc_rq->paddr_list[0]);
 590        }
 591
 592        if (!(gc_rq->secs_to_gc))
 593                goto out;
 594
 595        data_len = (gc_rq->secs_to_gc) * geo->csecs;
 596        bio = pblk_bio_map_addr(pblk, gc_rq->data, gc_rq->secs_to_gc, data_len,
 597                                                PBLK_VMALLOC_META, GFP_KERNEL);
 598        if (IS_ERR(bio)) {
 599                pr_err("pblk: could not allocate GC bio (%lu)\n", PTR_ERR(bio));
 600                goto err_free_dma;
 601        }
 602
 603        bio->bi_iter.bi_sector = 0; /* internal bio */
 604        bio_set_op_attrs(bio, REQ_OP_READ, 0);
 605
 606        rqd.opcode = NVM_OP_PREAD;
 607        rqd.nr_ppas = gc_rq->secs_to_gc;
 608        rqd.flags = pblk_set_read_mode(pblk, PBLK_READ_RANDOM);
 609        rqd.bio = bio;
 610
 611        if (pblk_submit_io_sync(pblk, &rqd)) {
 612                ret = -EIO;
 613                pr_err("pblk: GC read request failed\n");
 614                goto err_free_bio;
 615        }
 616
 617        pblk_read_check_rand(pblk, &rqd, gc_rq->lba_list, gc_rq->nr_secs);
 618
 619        atomic_dec(&pblk->inflight_io);
 620
 621        if (rqd.error) {
 622                atomic_long_inc(&pblk->read_failed_gc);
 623#ifdef CONFIG_NVM_DEBUG
 624                pblk_print_failed_rqd(pblk, &rqd, rqd.error);
 625#endif
 626        }
 627
 628#ifdef CONFIG_NVM_DEBUG
 629        atomic_long_add(gc_rq->secs_to_gc, &pblk->sync_reads);
 630        atomic_long_add(gc_rq->secs_to_gc, &pblk->recov_gc_reads);
 631        atomic_long_sub(gc_rq->secs_to_gc, &pblk->inflight_reads);
 632#endif
 633
 634out:
 635        nvm_dev_dma_free(dev->parent, rqd.meta_list, rqd.dma_meta_list);
 636        return ret;
 637
 638err_free_bio:
 639        bio_put(bio);
 640err_free_dma:
 641        nvm_dev_dma_free(dev->parent, rqd.meta_list, rqd.dma_meta_list);
 642        return ret;
 643}
 644