linux/drivers/lightnvm/pblk-write.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (C) 2016 CNEX Labs
   4 * Initial release: Javier Gonzalez <javier@cnexlabs.com>
   5 *                  Matias Bjorling <matias@cnexlabs.com>
   6 *
   7 * This program is free software; you can redistribute it and/or
   8 * modify it under the terms of the GNU General Public License version
   9 * 2 as published by the Free Software Foundation.
  10 *
  11 * This program is distributed in the hope that it will be useful, but
  12 * WITHOUT ANY WARRANTY; without even the implied warranty of
  13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  14 * General Public License for more details.
  15 *
  16 * pblk-write.c - pblk's write path from write buffer to media
  17 */
  18
  19#include "pblk.h"
  20#include "pblk-trace.h"
  21
  22static unsigned long pblk_end_w_bio(struct pblk *pblk, struct nvm_rq *rqd,
  23                                    struct pblk_c_ctx *c_ctx)
  24{
  25        struct bio *original_bio;
  26        struct pblk_rb *rwb = &pblk->rwb;
  27        unsigned long ret;
  28        int i;
  29
  30        for (i = 0; i < c_ctx->nr_valid; i++) {
  31                struct pblk_w_ctx *w_ctx;
  32                int pos = c_ctx->sentry + i;
  33                int flags;
  34
  35                w_ctx = pblk_rb_w_ctx(rwb, pos);
  36                flags = READ_ONCE(w_ctx->flags);
  37
  38                if (flags & PBLK_FLUSH_ENTRY) {
  39                        flags &= ~PBLK_FLUSH_ENTRY;
  40                        /* Release flags on context. Protect from writes */
  41                        smp_store_release(&w_ctx->flags, flags);
  42
  43#ifdef CONFIG_NVM_PBLK_DEBUG
  44                        atomic_dec(&rwb->inflight_flush_point);
  45#endif
  46                }
  47
  48                while ((original_bio = bio_list_pop(&w_ctx->bios)))
  49                        bio_endio(original_bio);
  50        }
  51
  52        if (c_ctx->nr_padded)
  53                pblk_bio_free_pages(pblk, rqd->bio, c_ctx->nr_valid,
  54                                                        c_ctx->nr_padded);
  55
  56#ifdef CONFIG_NVM_PBLK_DEBUG
  57        atomic_long_add(rqd->nr_ppas, &pblk->sync_writes);
  58#endif
  59
  60        ret = pblk_rb_sync_advance(&pblk->rwb, c_ctx->nr_valid);
  61
  62        bio_put(rqd->bio);
  63        pblk_free_rqd(pblk, rqd, PBLK_WRITE);
  64
  65        return ret;
  66}
  67
  68static unsigned long pblk_end_queued_w_bio(struct pblk *pblk,
  69                                           struct nvm_rq *rqd,
  70                                           struct pblk_c_ctx *c_ctx)
  71{
  72        list_del(&c_ctx->list);
  73        return pblk_end_w_bio(pblk, rqd, c_ctx);
  74}
  75
  76static void pblk_complete_write(struct pblk *pblk, struct nvm_rq *rqd,
  77                                struct pblk_c_ctx *c_ctx)
  78{
  79        struct pblk_c_ctx *c, *r;
  80        unsigned long flags;
  81        unsigned long pos;
  82
  83#ifdef CONFIG_NVM_PBLK_DEBUG
  84        atomic_long_sub(c_ctx->nr_valid, &pblk->inflight_writes);
  85#endif
  86        pblk_up_rq(pblk, c_ctx->lun_bitmap);
  87
  88        pos = pblk_rb_sync_init(&pblk->rwb, &flags);
  89        if (pos == c_ctx->sentry) {
  90                pos = pblk_end_w_bio(pblk, rqd, c_ctx);
  91
  92retry:
  93                list_for_each_entry_safe(c, r, &pblk->compl_list, list) {
  94                        rqd = nvm_rq_from_c_ctx(c);
  95                        if (c->sentry == pos) {
  96                                pos = pblk_end_queued_w_bio(pblk, rqd, c);
  97                                goto retry;
  98                        }
  99                }
 100        } else {
 101                WARN_ON(nvm_rq_from_c_ctx(c_ctx) != rqd);
 102                list_add_tail(&c_ctx->list, &pblk->compl_list);
 103        }
 104        pblk_rb_sync_end(&pblk->rwb, &flags);
 105}
 106
 107/* Map remaining sectors in chunk, starting from ppa */
 108static void pblk_map_remaining(struct pblk *pblk, struct ppa_addr *ppa,
 109                int rqd_ppas)
 110{
 111        struct pblk_line *line;
 112        struct ppa_addr map_ppa = *ppa;
 113        __le64 addr_empty = cpu_to_le64(ADDR_EMPTY);
 114        __le64 *lba_list;
 115        u64 paddr;
 116        int done = 0;
 117        int n = 0;
 118
 119        line = pblk_ppa_to_line(pblk, *ppa);
 120        lba_list = emeta_to_lbas(pblk, line->emeta->buf);
 121
 122        spin_lock(&line->lock);
 123
 124        while (!done)  {
 125                paddr = pblk_dev_ppa_to_line_addr(pblk, map_ppa);
 126
 127                if (!test_and_set_bit(paddr, line->map_bitmap))
 128                        line->left_msecs--;
 129
 130                if (n < rqd_ppas && lba_list[paddr] != addr_empty)
 131                        line->nr_valid_lbas--;
 132
 133                lba_list[paddr] = addr_empty;
 134
 135                if (!test_and_set_bit(paddr, line->invalid_bitmap))
 136                        le32_add_cpu(line->vsc, -1);
 137
 138                done = nvm_next_ppa_in_chk(pblk->dev, &map_ppa);
 139
 140                n++;
 141        }
 142
 143        line->w_err_gc->has_write_err = 1;
 144        spin_unlock(&line->lock);
 145}
 146
 147static void pblk_prepare_resubmit(struct pblk *pblk, unsigned int sentry,
 148                                  unsigned int nr_entries)
 149{
 150        struct pblk_rb *rb = &pblk->rwb;
 151        struct pblk_rb_entry *entry;
 152        struct pblk_line *line;
 153        struct pblk_w_ctx *w_ctx;
 154        struct ppa_addr ppa_l2p;
 155        int flags;
 156        unsigned int i;
 157
 158        spin_lock(&pblk->trans_lock);
 159        for (i = 0; i < nr_entries; i++) {
 160                entry = &rb->entries[pblk_rb_ptr_wrap(rb, sentry, i)];
 161                w_ctx = &entry->w_ctx;
 162
 163                /* Check if the lba has been overwritten */
 164                if (w_ctx->lba != ADDR_EMPTY) {
 165                        ppa_l2p = pblk_trans_map_get(pblk, w_ctx->lba);
 166                        if (!pblk_ppa_comp(ppa_l2p, entry->cacheline))
 167                                w_ctx->lba = ADDR_EMPTY;
 168                }
 169
 170                /* Mark up the entry as submittable again */
 171                flags = READ_ONCE(w_ctx->flags);
 172                flags |= PBLK_WRITTEN_DATA;
 173                /* Release flags on write context. Protect from writes */
 174                smp_store_release(&w_ctx->flags, flags);
 175
 176                /* Decrease the reference count to the line as we will
 177                 * re-map these entries
 178                 */
 179                line = pblk_ppa_to_line(pblk, w_ctx->ppa);
 180                atomic_dec(&line->sec_to_update);
 181                kref_put(&line->ref, pblk_line_put);
 182        }
 183        spin_unlock(&pblk->trans_lock);
 184}
 185
 186static void pblk_queue_resubmit(struct pblk *pblk, struct pblk_c_ctx *c_ctx)
 187{
 188        struct pblk_c_ctx *r_ctx;
 189
 190        r_ctx = kzalloc(sizeof(struct pblk_c_ctx), GFP_KERNEL);
 191        if (!r_ctx)
 192                return;
 193
 194        r_ctx->lun_bitmap = NULL;
 195        r_ctx->sentry = c_ctx->sentry;
 196        r_ctx->nr_valid = c_ctx->nr_valid;
 197        r_ctx->nr_padded = c_ctx->nr_padded;
 198
 199        spin_lock(&pblk->resubmit_lock);
 200        list_add_tail(&r_ctx->list, &pblk->resubmit_list);
 201        spin_unlock(&pblk->resubmit_lock);
 202
 203#ifdef CONFIG_NVM_PBLK_DEBUG
 204        atomic_long_add(c_ctx->nr_valid, &pblk->recov_writes);
 205#endif
 206}
 207
 208static void pblk_submit_rec(struct work_struct *work)
 209{
 210        struct pblk_rec_ctx *recovery =
 211                        container_of(work, struct pblk_rec_ctx, ws_rec);
 212        struct pblk *pblk = recovery->pblk;
 213        struct nvm_rq *rqd = recovery->rqd;
 214        struct pblk_c_ctx *c_ctx = nvm_rq_to_pdu(rqd);
 215        struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
 216
 217        pblk_log_write_err(pblk, rqd);
 218
 219        pblk_map_remaining(pblk, ppa_list, rqd->nr_ppas);
 220        pblk_queue_resubmit(pblk, c_ctx);
 221
 222        pblk_up_rq(pblk, c_ctx->lun_bitmap);
 223        if (c_ctx->nr_padded)
 224                pblk_bio_free_pages(pblk, rqd->bio, c_ctx->nr_valid,
 225                                                        c_ctx->nr_padded);
 226        bio_put(rqd->bio);
 227        pblk_free_rqd(pblk, rqd, PBLK_WRITE);
 228        mempool_free(recovery, &pblk->rec_pool);
 229
 230        atomic_dec(&pblk->inflight_io);
 231        pblk_write_kick(pblk);
 232}
 233
 234
 235static void pblk_end_w_fail(struct pblk *pblk, struct nvm_rq *rqd)
 236{
 237        struct pblk_rec_ctx *recovery;
 238
 239        recovery = mempool_alloc(&pblk->rec_pool, GFP_ATOMIC);
 240        if (!recovery) {
 241                pblk_err(pblk, "could not allocate recovery work\n");
 242                return;
 243        }
 244
 245        recovery->pblk = pblk;
 246        recovery->rqd = rqd;
 247
 248        INIT_WORK(&recovery->ws_rec, pblk_submit_rec);
 249        queue_work(pblk->close_wq, &recovery->ws_rec);
 250}
 251
 252static void pblk_end_io_write(struct nvm_rq *rqd)
 253{
 254        struct pblk *pblk = rqd->private;
 255        struct pblk_c_ctx *c_ctx = nvm_rq_to_pdu(rqd);
 256
 257        if (rqd->error) {
 258                pblk_end_w_fail(pblk, rqd);
 259                return;
 260        } else {
 261                if (trace_pblk_chunk_state_enabled())
 262                        pblk_check_chunk_state_update(pblk, rqd);
 263#ifdef CONFIG_NVM_PBLK_DEBUG
 264                WARN_ONCE(rqd->bio->bi_status, "pblk: corrupted write error\n");
 265#endif
 266        }
 267
 268        pblk_complete_write(pblk, rqd, c_ctx);
 269        atomic_dec(&pblk->inflight_io);
 270}
 271
 272static void pblk_end_io_write_meta(struct nvm_rq *rqd)
 273{
 274        struct pblk *pblk = rqd->private;
 275        struct pblk_g_ctx *m_ctx = nvm_rq_to_pdu(rqd);
 276        struct pblk_line *line = m_ctx->private;
 277        struct pblk_emeta *emeta = line->emeta;
 278        struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
 279        int sync;
 280
 281        pblk_up_chunk(pblk, ppa_list[0]);
 282
 283        if (rqd->error) {
 284                pblk_log_write_err(pblk, rqd);
 285                pblk_err(pblk, "metadata I/O failed. Line %d\n", line->id);
 286                line->w_err_gc->has_write_err = 1;
 287        } else {
 288                if (trace_pblk_chunk_state_enabled())
 289                        pblk_check_chunk_state_update(pblk, rqd);
 290        }
 291
 292        sync = atomic_add_return(rqd->nr_ppas, &emeta->sync);
 293        if (sync == emeta->nr_entries)
 294                pblk_gen_run_ws(pblk, line, NULL, pblk_line_close_ws,
 295                                                GFP_ATOMIC, pblk->close_wq);
 296
 297        pblk_free_rqd(pblk, rqd, PBLK_WRITE_INT);
 298
 299        atomic_dec(&pblk->inflight_io);
 300}
 301
 302static int pblk_alloc_w_rq(struct pblk *pblk, struct nvm_rq *rqd,
 303                           unsigned int nr_secs, nvm_end_io_fn(*end_io))
 304{
 305        /* Setup write request */
 306        rqd->opcode = NVM_OP_PWRITE;
 307        rqd->nr_ppas = nr_secs;
 308        rqd->is_seq = 1;
 309        rqd->private = pblk;
 310        rqd->end_io = end_io;
 311
 312        return pblk_alloc_rqd_meta(pblk, rqd);
 313}
 314
 315static int pblk_setup_w_rq(struct pblk *pblk, struct nvm_rq *rqd,
 316                           struct ppa_addr *erase_ppa)
 317{
 318        struct pblk_line_meta *lm = &pblk->lm;
 319        struct pblk_line *e_line = pblk_line_get_erase(pblk);
 320        struct pblk_c_ctx *c_ctx = nvm_rq_to_pdu(rqd);
 321        unsigned int valid = c_ctx->nr_valid;
 322        unsigned int padded = c_ctx->nr_padded;
 323        unsigned int nr_secs = valid + padded;
 324        unsigned long *lun_bitmap;
 325        int ret;
 326
 327        lun_bitmap = kzalloc(lm->lun_bitmap_len, GFP_KERNEL);
 328        if (!lun_bitmap)
 329                return -ENOMEM;
 330        c_ctx->lun_bitmap = lun_bitmap;
 331
 332        ret = pblk_alloc_w_rq(pblk, rqd, nr_secs, pblk_end_io_write);
 333        if (ret) {
 334                kfree(lun_bitmap);
 335                return ret;
 336        }
 337
 338        if (likely(!e_line || !atomic_read(&e_line->left_eblks)))
 339                ret = pblk_map_rq(pblk, rqd, c_ctx->sentry, lun_bitmap,
 340                                                        valid, 0);
 341        else
 342                ret = pblk_map_erase_rq(pblk, rqd, c_ctx->sentry, lun_bitmap,
 343                                                        valid, erase_ppa);
 344
 345        return ret;
 346}
 347
 348static int pblk_calc_secs_to_sync(struct pblk *pblk, unsigned int secs_avail,
 349                                  unsigned int secs_to_flush)
 350{
 351        int secs_to_sync;
 352
 353        secs_to_sync = pblk_calc_secs(pblk, secs_avail, secs_to_flush, true);
 354
 355#ifdef CONFIG_NVM_PBLK_DEBUG
 356        if ((!secs_to_sync && secs_to_flush)
 357                        || (secs_to_sync < 0)
 358                        || (secs_to_sync > secs_avail && !secs_to_flush)) {
 359                pblk_err(pblk, "bad sector calculation (a:%d,s:%d,f:%d)\n",
 360                                secs_avail, secs_to_sync, secs_to_flush);
 361        }
 362#endif
 363
 364        return secs_to_sync;
 365}
 366
 367int pblk_submit_meta_io(struct pblk *pblk, struct pblk_line *meta_line)
 368{
 369        struct nvm_tgt_dev *dev = pblk->dev;
 370        struct nvm_geo *geo = &dev->geo;
 371        struct pblk_line_mgmt *l_mg = &pblk->l_mg;
 372        struct pblk_line_meta *lm = &pblk->lm;
 373        struct pblk_emeta *emeta = meta_line->emeta;
 374        struct ppa_addr *ppa_list;
 375        struct pblk_g_ctx *m_ctx;
 376        struct bio *bio;
 377        struct nvm_rq *rqd;
 378        void *data;
 379        u64 paddr;
 380        int rq_ppas = pblk->min_write_pgs;
 381        int id = meta_line->id;
 382        int rq_len;
 383        int i, j;
 384        int ret;
 385
 386        rqd = pblk_alloc_rqd(pblk, PBLK_WRITE_INT);
 387
 388        m_ctx = nvm_rq_to_pdu(rqd);
 389        m_ctx->private = meta_line;
 390
 391        rq_len = rq_ppas * geo->csecs;
 392        data = ((void *)emeta->buf) + emeta->mem;
 393
 394        bio = pblk_bio_map_addr(pblk, data, rq_ppas, rq_len,
 395                                        l_mg->emeta_alloc_type, GFP_KERNEL);
 396        if (IS_ERR(bio)) {
 397                pblk_err(pblk, "failed to map emeta io");
 398                ret = PTR_ERR(bio);
 399                goto fail_free_rqd;
 400        }
 401        bio->bi_iter.bi_sector = 0; /* internal bio */
 402        bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
 403        rqd->bio = bio;
 404
 405        ret = pblk_alloc_w_rq(pblk, rqd, rq_ppas, pblk_end_io_write_meta);
 406        if (ret)
 407                goto fail_free_bio;
 408
 409        ppa_list = nvm_rq_to_ppa_list(rqd);
 410        for (i = 0; i < rqd->nr_ppas; ) {
 411                spin_lock(&meta_line->lock);
 412                paddr = __pblk_alloc_page(pblk, meta_line, rq_ppas);
 413                spin_unlock(&meta_line->lock);
 414                for (j = 0; j < rq_ppas; j++, i++, paddr++)
 415                        ppa_list[i] = addr_to_gen_ppa(pblk, paddr, id);
 416        }
 417
 418        spin_lock(&l_mg->close_lock);
 419        emeta->mem += rq_len;
 420        if (emeta->mem >= lm->emeta_len[0])
 421                list_del(&meta_line->list);
 422        spin_unlock(&l_mg->close_lock);
 423
 424        pblk_down_chunk(pblk, ppa_list[0]);
 425
 426        ret = pblk_submit_io(pblk, rqd);
 427        if (ret) {
 428                pblk_err(pblk, "emeta I/O submission failed: %d\n", ret);
 429                goto fail_rollback;
 430        }
 431
 432        return NVM_IO_OK;
 433
 434fail_rollback:
 435        pblk_up_chunk(pblk, ppa_list[0]);
 436        spin_lock(&l_mg->close_lock);
 437        pblk_dealloc_page(pblk, meta_line, rq_ppas);
 438        list_add(&meta_line->list, &meta_line->list);
 439        spin_unlock(&l_mg->close_lock);
 440fail_free_bio:
 441        bio_put(bio);
 442fail_free_rqd:
 443        pblk_free_rqd(pblk, rqd, PBLK_WRITE_INT);
 444        return ret;
 445}
 446
 447static inline bool pblk_valid_meta_ppa(struct pblk *pblk,
 448                                       struct pblk_line *meta_line,
 449                                       struct nvm_rq *data_rqd)
 450{
 451        struct nvm_tgt_dev *dev = pblk->dev;
 452        struct nvm_geo *geo = &dev->geo;
 453        struct pblk_c_ctx *data_c_ctx = nvm_rq_to_pdu(data_rqd);
 454        struct pblk_line *data_line = pblk_line_get_data(pblk);
 455        struct ppa_addr ppa, ppa_opt;
 456        u64 paddr;
 457        int pos_opt;
 458
 459        /* Schedule a metadata I/O that is half the distance from the data I/O
 460         * with regards to the number of LUNs forming the pblk instance. This
 461         * balances LUN conflicts across every I/O.
 462         *
 463         * When the LUN configuration changes (e.g., due to GC), this distance
 464         * can align, which would result on metadata and data I/Os colliding. In
 465         * this case, modify the distance to not be optimal, but move the
 466         * optimal in the right direction.
 467         */
 468        paddr = pblk_lookup_page(pblk, meta_line);
 469        ppa = addr_to_gen_ppa(pblk, paddr, 0);
 470        ppa_opt = addr_to_gen_ppa(pblk, paddr + data_line->meta_distance, 0);
 471        pos_opt = pblk_ppa_to_pos(geo, ppa_opt);
 472
 473        if (test_bit(pos_opt, data_c_ctx->lun_bitmap) ||
 474                                test_bit(pos_opt, data_line->blk_bitmap))
 475                return true;
 476
 477        if (unlikely(pblk_ppa_comp(ppa_opt, ppa)))
 478                data_line->meta_distance--;
 479
 480        return false;
 481}
 482
 483static struct pblk_line *pblk_should_submit_meta_io(struct pblk *pblk,
 484                                                    struct nvm_rq *data_rqd)
 485{
 486        struct pblk_line_meta *lm = &pblk->lm;
 487        struct pblk_line_mgmt *l_mg = &pblk->l_mg;
 488        struct pblk_line *meta_line;
 489
 490        spin_lock(&l_mg->close_lock);
 491        if (list_empty(&l_mg->emeta_list)) {
 492                spin_unlock(&l_mg->close_lock);
 493                return NULL;
 494        }
 495        meta_line = list_first_entry(&l_mg->emeta_list, struct pblk_line, list);
 496        if (meta_line->emeta->mem >= lm->emeta_len[0]) {
 497                spin_unlock(&l_mg->close_lock);
 498                return NULL;
 499        }
 500        spin_unlock(&l_mg->close_lock);
 501
 502        if (!pblk_valid_meta_ppa(pblk, meta_line, data_rqd))
 503                return NULL;
 504
 505        return meta_line;
 506}
 507
 508static int pblk_submit_io_set(struct pblk *pblk, struct nvm_rq *rqd)
 509{
 510        struct ppa_addr erase_ppa;
 511        struct pblk_line *meta_line;
 512        int err;
 513
 514        pblk_ppa_set_empty(&erase_ppa);
 515
 516        /* Assign lbas to ppas and populate request structure */
 517        err = pblk_setup_w_rq(pblk, rqd, &erase_ppa);
 518        if (err) {
 519                pblk_err(pblk, "could not setup write request: %d\n", err);
 520                return NVM_IO_ERR;
 521        }
 522
 523        meta_line = pblk_should_submit_meta_io(pblk, rqd);
 524
 525        /* Submit data write for current data line */
 526        err = pblk_submit_io(pblk, rqd);
 527        if (err) {
 528                pblk_err(pblk, "data I/O submission failed: %d\n", err);
 529                return NVM_IO_ERR;
 530        }
 531
 532        if (!pblk_ppa_empty(erase_ppa)) {
 533                /* Submit erase for next data line */
 534                if (pblk_blk_erase_async(pblk, erase_ppa)) {
 535                        struct pblk_line *e_line = pblk_line_get_erase(pblk);
 536                        struct nvm_tgt_dev *dev = pblk->dev;
 537                        struct nvm_geo *geo = &dev->geo;
 538                        int bit;
 539
 540                        atomic_inc(&e_line->left_eblks);
 541                        bit = pblk_ppa_to_pos(geo, erase_ppa);
 542                        WARN_ON(!test_and_clear_bit(bit, e_line->erase_bitmap));
 543                }
 544        }
 545
 546        if (meta_line) {
 547                /* Submit metadata write for previous data line */
 548                err = pblk_submit_meta_io(pblk, meta_line);
 549                if (err) {
 550                        pblk_err(pblk, "metadata I/O submission failed: %d",
 551                                        err);
 552                        return NVM_IO_ERR;
 553                }
 554        }
 555
 556        return NVM_IO_OK;
 557}
 558
 559static void pblk_free_write_rqd(struct pblk *pblk, struct nvm_rq *rqd)
 560{
 561        struct pblk_c_ctx *c_ctx = nvm_rq_to_pdu(rqd);
 562        struct bio *bio = rqd->bio;
 563
 564        if (c_ctx->nr_padded)
 565                pblk_bio_free_pages(pblk, bio, c_ctx->nr_valid,
 566                                                        c_ctx->nr_padded);
 567}
 568
 569static int pblk_submit_write(struct pblk *pblk, int *secs_left)
 570{
 571        struct bio *bio;
 572        struct nvm_rq *rqd;
 573        unsigned int secs_avail, secs_to_sync, secs_to_com;
 574        unsigned int secs_to_flush, packed_meta_pgs;
 575        unsigned long pos;
 576        unsigned int resubmit;
 577
 578        *secs_left = 0;
 579
 580        spin_lock(&pblk->resubmit_lock);
 581        resubmit = !list_empty(&pblk->resubmit_list);
 582        spin_unlock(&pblk->resubmit_lock);
 583
 584        /* Resubmit failed writes first */
 585        if (resubmit) {
 586                struct pblk_c_ctx *r_ctx;
 587
 588                spin_lock(&pblk->resubmit_lock);
 589                r_ctx = list_first_entry(&pblk->resubmit_list,
 590                                        struct pblk_c_ctx, list);
 591                list_del(&r_ctx->list);
 592                spin_unlock(&pblk->resubmit_lock);
 593
 594                secs_avail = r_ctx->nr_valid;
 595                pos = r_ctx->sentry;
 596
 597                pblk_prepare_resubmit(pblk, pos, secs_avail);
 598                secs_to_sync = pblk_calc_secs_to_sync(pblk, secs_avail,
 599                                secs_avail);
 600
 601                kfree(r_ctx);
 602        } else {
 603                /* If there are no sectors in the cache,
 604                 * flushes (bios without data) will be cleared on
 605                 * the cache threads
 606                 */
 607                secs_avail = pblk_rb_read_count(&pblk->rwb);
 608                if (!secs_avail)
 609                        return 0;
 610
 611                secs_to_flush = pblk_rb_flush_point_count(&pblk->rwb);
 612                if (!secs_to_flush && secs_avail < pblk->min_write_pgs_data)
 613                        return 0;
 614
 615                secs_to_sync = pblk_calc_secs_to_sync(pblk, secs_avail,
 616                                        secs_to_flush);
 617                if (secs_to_sync > pblk->max_write_pgs) {
 618                        pblk_err(pblk, "bad buffer sync calculation\n");
 619                        return 0;
 620                }
 621
 622                secs_to_com = (secs_to_sync > secs_avail) ?
 623                        secs_avail : secs_to_sync;
 624                pos = pblk_rb_read_commit(&pblk->rwb, secs_to_com);
 625        }
 626
 627        packed_meta_pgs = (pblk->min_write_pgs - pblk->min_write_pgs_data);
 628        bio = bio_alloc(GFP_KERNEL, secs_to_sync + packed_meta_pgs);
 629
 630        bio->bi_iter.bi_sector = 0; /* internal bio */
 631        bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
 632
 633        rqd = pblk_alloc_rqd(pblk, PBLK_WRITE);
 634        rqd->bio = bio;
 635
 636        if (pblk_rb_read_to_bio(&pblk->rwb, rqd, pos, secs_to_sync,
 637                                                                secs_avail)) {
 638                pblk_err(pblk, "corrupted write bio\n");
 639                goto fail_put_bio;
 640        }
 641
 642        if (pblk_submit_io_set(pblk, rqd))
 643                goto fail_free_bio;
 644
 645#ifdef CONFIG_NVM_PBLK_DEBUG
 646        atomic_long_add(secs_to_sync, &pblk->sub_writes);
 647#endif
 648
 649        *secs_left = 1;
 650        return 0;
 651
 652fail_free_bio:
 653        pblk_free_write_rqd(pblk, rqd);
 654fail_put_bio:
 655        bio_put(bio);
 656        pblk_free_rqd(pblk, rqd, PBLK_WRITE);
 657
 658        return -EINTR;
 659}
 660
 661int pblk_write_ts(void *data)
 662{
 663        struct pblk *pblk = data;
 664        int secs_left;
 665        int write_failure = 0;
 666
 667        while (!kthread_should_stop()) {
 668                if (!write_failure) {
 669                        write_failure = pblk_submit_write(pblk, &secs_left);
 670
 671                        if (secs_left)
 672                                continue;
 673                }
 674                set_current_state(TASK_INTERRUPTIBLE);
 675                io_schedule();
 676        }
 677
 678        return 0;
 679}
 680