linux/drivers/lightnvm/pblk-map.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2016 CNEX Labs
   3 * Initial release: Javier Gonzalez <javier@cnexlabs.com>
   4 *                  Matias Bjorling <matias@cnexlabs.com>
   5 *
   6 * This program is free software; you can redistribute it and/or
   7 * modify it under the terms of the GNU General Public License version
   8 * 2 as published by the Free Software Foundation.
   9 *
  10 * This program is distributed in the hope that it will be useful, but
  11 * WITHOUT ANY WARRANTY; without even the implied warranty of
  12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  13 * General Public License for more details.
  14 *
  15 * pblk-map.c - pblk's lba-ppa mapping strategy
  16 *
  17 */
  18
  19#include "pblk.h"
  20
  21static void pblk_map_page_data(struct pblk *pblk, unsigned int sentry,
  22                               struct ppa_addr *ppa_list,
  23                               unsigned long *lun_bitmap,
  24                               struct pblk_sec_meta *meta_list,
  25                               unsigned int valid_secs)
  26{
  27        struct pblk_line *line = pblk_line_get_data(pblk);
  28        struct pblk_emeta *emeta = line->emeta;
  29        struct pblk_w_ctx *w_ctx;
  30        __le64 *lba_list = emeta_to_lbas(pblk, emeta->buf);
  31        u64 paddr;
  32        int nr_secs = pblk->min_write_pgs;
  33        int i;
  34
  35        paddr = pblk_alloc_page(pblk, line, nr_secs);
  36
  37        for (i = 0; i < nr_secs; i++, paddr++) {
  38                /* ppa to be sent to the device */
  39                ppa_list[i] = addr_to_gen_ppa(pblk, paddr, line->id);
  40
  41                /* Write context for target bio completion on write buffer. Note
  42                 * that the write buffer is protected by the sync backpointer,
  43                 * and a single writer thread have access to each specific entry
  44                 * at a time. Thus, it is safe to modify the context for the
  45                 * entry we are setting up for submission without taking any
  46                 * lock or memory barrier.
  47                 */
  48                if (i < valid_secs) {
  49                        kref_get(&line->ref);
  50                        w_ctx = pblk_rb_w_ctx(&pblk->rwb, sentry + i);
  51                        w_ctx->ppa = ppa_list[i];
  52                        meta_list[i].lba = cpu_to_le64(w_ctx->lba);
  53                        lba_list[paddr] = cpu_to_le64(w_ctx->lba);
  54                        line->nr_valid_lbas++;
  55                } else {
  56                        __le64 addr_empty = cpu_to_le64(ADDR_EMPTY);
  57
  58                        lba_list[paddr] = meta_list[i].lba = addr_empty;
  59                        __pblk_map_invalidate(pblk, line, paddr);
  60                }
  61        }
  62
  63        if (pblk_line_is_full(line)) {
  64                struct pblk_line *prev_line = line;
  65
  66                pblk_line_replace_data(pblk);
  67                pblk_line_close_meta(pblk, prev_line);
  68        }
  69
  70        pblk_down_rq(pblk, ppa_list, nr_secs, lun_bitmap);
  71}
  72
  73void pblk_map_rq(struct pblk *pblk, struct nvm_rq *rqd, unsigned int sentry,
  74                 unsigned long *lun_bitmap, unsigned int valid_secs,
  75                 unsigned int off)
  76{
  77        struct pblk_sec_meta *meta_list = rqd->meta_list;
  78        unsigned int map_secs;
  79        int min = pblk->min_write_pgs;
  80        int i;
  81
  82        for (i = off; i < rqd->nr_ppas; i += min) {
  83                map_secs = (i + min > valid_secs) ? (valid_secs % min) : min;
  84                pblk_map_page_data(pblk, sentry + i, &rqd->ppa_list[i],
  85                                        lun_bitmap, &meta_list[i], map_secs);
  86        }
  87}
  88
  89/* only if erase_ppa is set, acquire erase semaphore */
  90void pblk_map_erase_rq(struct pblk *pblk, struct nvm_rq *rqd,
  91                       unsigned int sentry, unsigned long *lun_bitmap,
  92                       unsigned int valid_secs, struct ppa_addr *erase_ppa)
  93{
  94        struct nvm_tgt_dev *dev = pblk->dev;
  95        struct nvm_geo *geo = &dev->geo;
  96        struct pblk_line_meta *lm = &pblk->lm;
  97        struct pblk_sec_meta *meta_list = rqd->meta_list;
  98        struct pblk_line *e_line, *d_line;
  99        unsigned int map_secs;
 100        int min = pblk->min_write_pgs;
 101        int i, erase_lun;
 102
 103        for (i = 0; i < rqd->nr_ppas; i += min) {
 104                map_secs = (i + min > valid_secs) ? (valid_secs % min) : min;
 105                pblk_map_page_data(pblk, sentry + i, &rqd->ppa_list[i],
 106                                        lun_bitmap, &meta_list[i], map_secs);
 107
 108                erase_lun = pblk_ppa_to_pos(geo, rqd->ppa_list[i]);
 109
 110                /* line can change after page map. We might also be writing the
 111                 * last line.
 112                 */
 113                e_line = pblk_line_get_erase(pblk);
 114                if (!e_line)
 115                        return pblk_map_rq(pblk, rqd, sentry, lun_bitmap,
 116                                                        valid_secs, i + min);
 117
 118                spin_lock(&e_line->lock);
 119                if (!test_bit(erase_lun, e_line->erase_bitmap)) {
 120                        set_bit(erase_lun, e_line->erase_bitmap);
 121                        atomic_dec(&e_line->left_eblks);
 122
 123                        *erase_ppa = rqd->ppa_list[i];
 124                        erase_ppa->g.blk = e_line->id;
 125
 126                        spin_unlock(&e_line->lock);
 127
 128                        /* Avoid evaluating e_line->left_eblks */
 129                        return pblk_map_rq(pblk, rqd, sentry, lun_bitmap,
 130                                                        valid_secs, i + min);
 131                }
 132                spin_unlock(&e_line->lock);
 133        }
 134
 135        d_line = pblk_line_get_data(pblk);
 136
 137        /* line can change after page map. We might also be writing the
 138         * last line.
 139         */
 140        e_line = pblk_line_get_erase(pblk);
 141        if (!e_line)
 142                return;
 143
 144        /* Erase blocks that are bad in this line but might not be in next */
 145        if (unlikely(ppa_empty(*erase_ppa)) &&
 146                        bitmap_weight(d_line->blk_bitmap, lm->blk_per_line)) {
 147                int bit = -1;
 148
 149retry:
 150                bit = find_next_bit(d_line->blk_bitmap,
 151                                                lm->blk_per_line, bit + 1);
 152                if (bit >= lm->blk_per_line)
 153                        return;
 154
 155                spin_lock(&e_line->lock);
 156                if (test_bit(bit, e_line->erase_bitmap)) {
 157                        spin_unlock(&e_line->lock);
 158                        goto retry;
 159                }
 160                spin_unlock(&e_line->lock);
 161
 162                set_bit(bit, e_line->erase_bitmap);
 163                atomic_dec(&e_line->left_eblks);
 164                *erase_ppa = pblk->luns[bit].bppa; /* set ch and lun */
 165                erase_ppa->g.blk = e_line->id;
 166        }
 167}
 168