linux/drivers/lightnvm/pblk-read.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (C) 2016 CNEX Labs
   4 * Initial release: Javier Gonzalez <javier@cnexlabs.com>
   5 *                  Matias Bjorling <matias@cnexlabs.com>
   6 *
   7 * This program is free software; you can redistribute it and/or
   8 * modify it under the terms of the GNU General Public License version
   9 * 2 as published by the Free Software Foundation.
  10 *
  11 * This program is distributed in the hope that it will be useful, but
  12 * WITHOUT ANY WARRANTY; without even the implied warranty of
  13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  14 * General Public License for more details.
  15 *
  16 * pblk-read.c - pblk's read path
  17 */
  18
  19#include "pblk.h"
  20
  21/*
  22 * There is no guarantee that the value read from cache has not been updated and
  23 * resides at another location in the cache. We guarantee though that if the
  24 * value is read from the cache, it belongs to the mapped lba. In order to
  25 * guarantee and order between writes and reads are ordered, a flush must be
  26 * issued.
  27 */
  28static int pblk_read_from_cache(struct pblk *pblk, struct bio *bio,
  29                                sector_t lba, struct ppa_addr ppa)
  30{
  31#ifdef CONFIG_NVM_PBLK_DEBUG
  32        /* Callers must ensure that the ppa points to a cache address */
  33        BUG_ON(pblk_ppa_empty(ppa));
  34        BUG_ON(!pblk_addr_in_cache(ppa));
  35#endif
  36
  37        return pblk_rb_copy_to_bio(&pblk->rwb, bio, lba, ppa);
  38}
  39
  40static int pblk_read_ppalist_rq(struct pblk *pblk, struct nvm_rq *rqd,
  41                                 struct bio *bio, sector_t blba,
  42                                 bool *from_cache)
  43{
  44        void *meta_list = rqd->meta_list;
  45        int nr_secs, i;
  46
  47retry:
  48        nr_secs = pblk_lookup_l2p_seq(pblk, rqd->ppa_list, blba, rqd->nr_ppas,
  49                                        from_cache);
  50
  51        if (!*from_cache)
  52                goto end;
  53
  54        for (i = 0; i < nr_secs; i++) {
  55                struct pblk_sec_meta *meta = pblk_get_meta(pblk, meta_list, i);
  56                sector_t lba = blba + i;
  57
  58                if (pblk_ppa_empty(rqd->ppa_list[i])) {
  59                        __le64 addr_empty = cpu_to_le64(ADDR_EMPTY);
  60
  61                        meta->lba = addr_empty;
  62                } else if (pblk_addr_in_cache(rqd->ppa_list[i])) {
  63                        /*
  64                         * Try to read from write buffer. The address is later
  65                         * checked on the write buffer to prevent retrieving
  66                         * overwritten data.
  67                         */
  68                        if (!pblk_read_from_cache(pblk, bio, lba,
  69                                                        rqd->ppa_list[i])) {
  70                                if (i == 0) {
  71                                        /*
  72                                         * We didn't call with bio_advance()
  73                                         * yet, so we can just retry.
  74                                         */
  75                                        goto retry;
  76                                } else {
  77                                        /*
  78                                         * We already call bio_advance()
  79                                         * so we cannot retry and we need
  80                                         * to quit that function in order
  81                                         * to allow caller to handle the bio
  82                                         * splitting in the current sector
  83                                         * position.
  84                                         */
  85                                        nr_secs = i;
  86                                        goto end;
  87                                }
  88                        }
  89                        meta->lba = cpu_to_le64(lba);
  90#ifdef CONFIG_NVM_PBLK_DEBUG
  91                        atomic_long_inc(&pblk->cache_reads);
  92#endif
  93                }
  94                bio_advance(bio, PBLK_EXPOSED_PAGE_SIZE);
  95        }
  96
  97end:
  98        if (pblk_io_aligned(pblk, nr_secs))
  99                rqd->is_seq = 1;
 100
 101#ifdef CONFIG_NVM_PBLK_DEBUG
 102        atomic_long_add(nr_secs, &pblk->inflight_reads);
 103#endif
 104
 105        return nr_secs;
 106}
 107
 108
 109static void pblk_read_check_seq(struct pblk *pblk, struct nvm_rq *rqd,
 110                                sector_t blba)
 111{
 112        void *meta_list = rqd->meta_list;
 113        int nr_lbas = rqd->nr_ppas;
 114        int i;
 115
 116        if (!pblk_is_oob_meta_supported(pblk))
 117                return;
 118
 119        for (i = 0; i < nr_lbas; i++) {
 120                struct pblk_sec_meta *meta = pblk_get_meta(pblk, meta_list, i);
 121                u64 lba = le64_to_cpu(meta->lba);
 122
 123                if (lba == ADDR_EMPTY)
 124                        continue;
 125
 126                if (lba != blba + i) {
 127#ifdef CONFIG_NVM_PBLK_DEBUG
 128                        struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
 129
 130                        print_ppa(pblk, &ppa_list[i], "seq", i);
 131#endif
 132                        pblk_err(pblk, "corrupted read LBA (%llu/%llu)\n",
 133                                                        lba, (u64)blba + i);
 134                        WARN_ON(1);
 135                }
 136        }
 137}
 138
 139/*
 140 * There can be holes in the lba list.
 141 */
 142static void pblk_read_check_rand(struct pblk *pblk, struct nvm_rq *rqd,
 143                                 u64 *lba_list, int nr_lbas)
 144{
 145        void *meta_lba_list = rqd->meta_list;
 146        int i, j;
 147
 148        if (!pblk_is_oob_meta_supported(pblk))
 149                return;
 150
 151        for (i = 0, j = 0; i < nr_lbas; i++) {
 152                struct pblk_sec_meta *meta = pblk_get_meta(pblk,
 153                                                           meta_lba_list, j);
 154                u64 lba = lba_list[i];
 155                u64 meta_lba;
 156
 157                if (lba == ADDR_EMPTY)
 158                        continue;
 159
 160                meta_lba = le64_to_cpu(meta->lba);
 161
 162                if (lba != meta_lba) {
 163#ifdef CONFIG_NVM_PBLK_DEBUG
 164                        struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
 165
 166                        print_ppa(pblk, &ppa_list[j], "rnd", j);
 167#endif
 168                        pblk_err(pblk, "corrupted read LBA (%llu/%llu)\n",
 169                                                        meta_lba, lba);
 170                        WARN_ON(1);
 171                }
 172
 173                j++;
 174        }
 175
 176        WARN_ONCE(j != rqd->nr_ppas, "pblk: corrupted random request\n");
 177}
 178
 179static void pblk_end_user_read(struct bio *bio, int error)
 180{
 181        if (error && error != NVM_RSP_WARN_HIGHECC)
 182                bio_io_error(bio);
 183        else
 184                bio_endio(bio);
 185}
 186
 187static void __pblk_end_io_read(struct pblk *pblk, struct nvm_rq *rqd,
 188                               bool put_line)
 189{
 190        struct pblk_g_ctx *r_ctx = nvm_rq_to_pdu(rqd);
 191        struct bio *int_bio = rqd->bio;
 192        unsigned long start_time = r_ctx->start_time;
 193
 194        bio_end_io_acct(int_bio, start_time);
 195
 196        if (rqd->error)
 197                pblk_log_read_err(pblk, rqd);
 198
 199        pblk_read_check_seq(pblk, rqd, r_ctx->lba);
 200        bio_put(int_bio);
 201
 202        if (put_line)
 203                pblk_rq_to_line_put(pblk, rqd);
 204
 205#ifdef CONFIG_NVM_PBLK_DEBUG
 206        atomic_long_add(rqd->nr_ppas, &pblk->sync_reads);
 207        atomic_long_sub(rqd->nr_ppas, &pblk->inflight_reads);
 208#endif
 209
 210        pblk_free_rqd(pblk, rqd, PBLK_READ);
 211        atomic_dec(&pblk->inflight_io);
 212}
 213
 214static void pblk_end_io_read(struct nvm_rq *rqd)
 215{
 216        struct pblk *pblk = rqd->private;
 217        struct pblk_g_ctx *r_ctx = nvm_rq_to_pdu(rqd);
 218        struct bio *bio = (struct bio *)r_ctx->private;
 219
 220        pblk_end_user_read(bio, rqd->error);
 221        __pblk_end_io_read(pblk, rqd, true);
 222}
 223
 224static void pblk_read_rq(struct pblk *pblk, struct nvm_rq *rqd, struct bio *bio,
 225                         sector_t lba, bool *from_cache)
 226{
 227        struct pblk_sec_meta *meta = pblk_get_meta(pblk, rqd->meta_list, 0);
 228        struct ppa_addr ppa;
 229
 230        pblk_lookup_l2p_seq(pblk, &ppa, lba, 1, from_cache);
 231
 232#ifdef CONFIG_NVM_PBLK_DEBUG
 233        atomic_long_inc(&pblk->inflight_reads);
 234#endif
 235
 236retry:
 237        if (pblk_ppa_empty(ppa)) {
 238                __le64 addr_empty = cpu_to_le64(ADDR_EMPTY);
 239
 240                meta->lba = addr_empty;
 241                return;
 242        }
 243
 244        /* Try to read from write buffer. The address is later checked on the
 245         * write buffer to prevent retrieving overwritten data.
 246         */
 247        if (pblk_addr_in_cache(ppa)) {
 248                if (!pblk_read_from_cache(pblk, bio, lba, ppa)) {
 249                        pblk_lookup_l2p_seq(pblk, &ppa, lba, 1, from_cache);
 250                        goto retry;
 251                }
 252
 253                meta->lba = cpu_to_le64(lba);
 254
 255#ifdef CONFIG_NVM_PBLK_DEBUG
 256                atomic_long_inc(&pblk->cache_reads);
 257#endif
 258        } else {
 259                rqd->ppa_addr = ppa;
 260        }
 261}
 262
 263void pblk_submit_read(struct pblk *pblk, struct bio *bio)
 264{
 265        sector_t blba = pblk_get_lba(bio);
 266        unsigned int nr_secs = pblk_get_secs(bio);
 267        bool from_cache;
 268        struct pblk_g_ctx *r_ctx;
 269        struct nvm_rq *rqd;
 270        struct bio *int_bio, *split_bio;
 271        unsigned long start_time;
 272
 273        start_time = bio_start_io_acct(bio);
 274
 275        rqd = pblk_alloc_rqd(pblk, PBLK_READ);
 276
 277        rqd->opcode = NVM_OP_PREAD;
 278        rqd->nr_ppas = nr_secs;
 279        rqd->private = pblk;
 280        rqd->end_io = pblk_end_io_read;
 281
 282        r_ctx = nvm_rq_to_pdu(rqd);
 283        r_ctx->start_time = start_time;
 284        r_ctx->lba = blba;
 285
 286        if (pblk_alloc_rqd_meta(pblk, rqd)) {
 287                bio_io_error(bio);
 288                pblk_free_rqd(pblk, rqd, PBLK_READ);
 289                return;
 290        }
 291
 292        /* Clone read bio to deal internally with:
 293         * -read errors when reading from drive
 294         * -bio_advance() calls during cache reads
 295         */
 296        int_bio = bio_clone_fast(bio, GFP_KERNEL, &pblk_bio_set);
 297
 298        if (nr_secs > 1)
 299                nr_secs = pblk_read_ppalist_rq(pblk, rqd, int_bio, blba,
 300                                                &from_cache);
 301        else
 302                pblk_read_rq(pblk, rqd, int_bio, blba, &from_cache);
 303
 304split_retry:
 305        r_ctx->private = bio; /* original bio */
 306        rqd->bio = int_bio; /* internal bio */
 307
 308        if (from_cache && nr_secs == rqd->nr_ppas) {
 309                /* All data was read from cache, we can complete the IO. */
 310                pblk_end_user_read(bio, 0);
 311                atomic_inc(&pblk->inflight_io);
 312                __pblk_end_io_read(pblk, rqd, false);
 313        } else if (nr_secs != rqd->nr_ppas) {
 314                /* The read bio request could be partially filled by the write
 315                 * buffer, but there are some holes that need to be read from
 316                 * the drive. In order to handle this, we will use block layer
 317                 * mechanism to split this request in to smaller ones and make
 318                 * a chain of it.
 319                 */
 320                split_bio = bio_split(bio, nr_secs * NR_PHY_IN_LOG, GFP_KERNEL,
 321                                        &pblk_bio_set);
 322                bio_chain(split_bio, bio);
 323                submit_bio_noacct(bio);
 324
 325                /* New bio contains first N sectors of the previous one, so
 326                 * we can continue to use existing rqd, but we need to shrink
 327                 * the number of PPAs in it. New bio is also guaranteed that
 328                 * it contains only either data from cache or from drive, newer
 329                 * mix of them.
 330                 */
 331                bio = split_bio;
 332                rqd->nr_ppas = nr_secs;
 333                if (rqd->nr_ppas == 1)
 334                        rqd->ppa_addr = rqd->ppa_list[0];
 335
 336                /* Recreate int_bio - existing might have some needed internal
 337                 * fields modified already.
 338                 */
 339                bio_put(int_bio);
 340                int_bio = bio_clone_fast(bio, GFP_KERNEL, &pblk_bio_set);
 341                goto split_retry;
 342        } else if (pblk_submit_io(pblk, rqd, NULL)) {
 343                /* Submitting IO to drive failed, let's report an error */
 344                rqd->error = -ENODEV;
 345                pblk_end_io_read(rqd);
 346        }
 347}
 348
 349static int read_ppalist_rq_gc(struct pblk *pblk, struct nvm_rq *rqd,
 350                              struct pblk_line *line, u64 *lba_list,
 351                              u64 *paddr_list_gc, unsigned int nr_secs)
 352{
 353        struct ppa_addr ppa_list_l2p[NVM_MAX_VLBA];
 354        struct ppa_addr ppa_gc;
 355        int valid_secs = 0;
 356        int i;
 357
 358        pblk_lookup_l2p_rand(pblk, ppa_list_l2p, lba_list, nr_secs);
 359
 360        for (i = 0; i < nr_secs; i++) {
 361                if (lba_list[i] == ADDR_EMPTY)
 362                        continue;
 363
 364                ppa_gc = addr_to_gen_ppa(pblk, paddr_list_gc[i], line->id);
 365                if (!pblk_ppa_comp(ppa_list_l2p[i], ppa_gc)) {
 366                        paddr_list_gc[i] = lba_list[i] = ADDR_EMPTY;
 367                        continue;
 368                }
 369
 370                rqd->ppa_list[valid_secs++] = ppa_list_l2p[i];
 371        }
 372
 373#ifdef CONFIG_NVM_PBLK_DEBUG
 374        atomic_long_add(valid_secs, &pblk->inflight_reads);
 375#endif
 376
 377        return valid_secs;
 378}
 379
 380static int read_rq_gc(struct pblk *pblk, struct nvm_rq *rqd,
 381                      struct pblk_line *line, sector_t lba,
 382                      u64 paddr_gc)
 383{
 384        struct ppa_addr ppa_l2p, ppa_gc;
 385        int valid_secs = 0;
 386
 387        if (lba == ADDR_EMPTY)
 388                goto out;
 389
 390        /* logic error: lba out-of-bounds */
 391        if (lba >= pblk->capacity) {
 392                WARN(1, "pblk: read lba out of bounds\n");
 393                goto out;
 394        }
 395
 396        spin_lock(&pblk->trans_lock);
 397        ppa_l2p = pblk_trans_map_get(pblk, lba);
 398        spin_unlock(&pblk->trans_lock);
 399
 400        ppa_gc = addr_to_gen_ppa(pblk, paddr_gc, line->id);
 401        if (!pblk_ppa_comp(ppa_l2p, ppa_gc))
 402                goto out;
 403
 404        rqd->ppa_addr = ppa_l2p;
 405        valid_secs = 1;
 406
 407#ifdef CONFIG_NVM_PBLK_DEBUG
 408        atomic_long_inc(&pblk->inflight_reads);
 409#endif
 410
 411out:
 412        return valid_secs;
 413}
 414
 415int pblk_submit_read_gc(struct pblk *pblk, struct pblk_gc_rq *gc_rq)
 416{
 417        struct nvm_rq rqd;
 418        int ret = NVM_IO_OK;
 419
 420        memset(&rqd, 0, sizeof(struct nvm_rq));
 421
 422        ret = pblk_alloc_rqd_meta(pblk, &rqd);
 423        if (ret)
 424                return ret;
 425
 426        if (gc_rq->nr_secs > 1) {
 427                gc_rq->secs_to_gc = read_ppalist_rq_gc(pblk, &rqd, gc_rq->line,
 428                                                        gc_rq->lba_list,
 429                                                        gc_rq->paddr_list,
 430                                                        gc_rq->nr_secs);
 431                if (gc_rq->secs_to_gc == 1)
 432                        rqd.ppa_addr = rqd.ppa_list[0];
 433        } else {
 434                gc_rq->secs_to_gc = read_rq_gc(pblk, &rqd, gc_rq->line,
 435                                                        gc_rq->lba_list[0],
 436                                                        gc_rq->paddr_list[0]);
 437        }
 438
 439        if (!(gc_rq->secs_to_gc))
 440                goto out;
 441
 442        rqd.opcode = NVM_OP_PREAD;
 443        rqd.nr_ppas = gc_rq->secs_to_gc;
 444
 445        if (pblk_submit_io_sync(pblk, &rqd, gc_rq->data)) {
 446                ret = -EIO;
 447                goto err_free_dma;
 448        }
 449
 450        pblk_read_check_rand(pblk, &rqd, gc_rq->lba_list, gc_rq->nr_secs);
 451
 452        atomic_dec(&pblk->inflight_io);
 453
 454        if (rqd.error) {
 455                atomic_long_inc(&pblk->read_failed_gc);
 456#ifdef CONFIG_NVM_PBLK_DEBUG
 457                pblk_print_failed_rqd(pblk, &rqd, rqd.error);
 458#endif
 459        }
 460
 461#ifdef CONFIG_NVM_PBLK_DEBUG
 462        atomic_long_add(gc_rq->secs_to_gc, &pblk->sync_reads);
 463        atomic_long_add(gc_rq->secs_to_gc, &pblk->recov_gc_reads);
 464        atomic_long_sub(gc_rq->secs_to_gc, &pblk->inflight_reads);
 465#endif
 466
 467out:
 468        pblk_free_rqd_meta(pblk, &rqd);
 469        return ret;
 470
 471err_free_dma:
 472        pblk_free_rqd_meta(pblk, &rqd);
 473        return ret;
 474}
 475