linux/drivers/lightnvm/pblk-write.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2016 CNEX Labs
   3 * Initial release: Javier Gonzalez <javier@cnexlabs.com>
   4 *                  Matias Bjorling <matias@cnexlabs.com>
   5 *
   6 * This program is free software; you can redistribute it and/or
   7 * modify it under the terms of the GNU General Public License version
   8 * 2 as published by the Free Software Foundation.
   9 *
  10 * This program is distributed in the hope that it will be useful, but
  11 * WITHOUT ANY WARRANTY; without even the implied warranty of
  12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  13 * General Public License for more details.
  14 *
  15 * pblk-write.c - pblk's write path from write buffer to media
  16 */
  17
  18#include "pblk.h"
  19
  20static unsigned long pblk_end_w_bio(struct pblk *pblk, struct nvm_rq *rqd,
  21                                    struct pblk_c_ctx *c_ctx)
  22{
  23        struct bio *original_bio;
  24        struct pblk_rb *rwb = &pblk->rwb;
  25        unsigned long ret;
  26        int i;
  27
  28        for (i = 0; i < c_ctx->nr_valid; i++) {
  29                struct pblk_w_ctx *w_ctx;
  30                int pos = c_ctx->sentry + i;
  31                int flags;
  32
  33                w_ctx = pblk_rb_w_ctx(rwb, pos);
  34                flags = READ_ONCE(w_ctx->flags);
  35
  36                if (flags & PBLK_FLUSH_ENTRY) {
  37                        flags &= ~PBLK_FLUSH_ENTRY;
  38                        /* Release flags on context. Protect from writes */
  39                        smp_store_release(&w_ctx->flags, flags);
  40
  41#ifdef CONFIG_NVM_DEBUG
  42                        atomic_dec(&rwb->inflight_flush_point);
  43#endif
  44                }
  45
  46                while ((original_bio = bio_list_pop(&w_ctx->bios)))
  47                        bio_endio(original_bio);
  48        }
  49
  50        if (c_ctx->nr_padded)
  51                pblk_bio_free_pages(pblk, rqd->bio, c_ctx->nr_valid,
  52                                                        c_ctx->nr_padded);
  53
  54#ifdef CONFIG_NVM_DEBUG
  55        atomic_long_add(rqd->nr_ppas, &pblk->sync_writes);
  56#endif
  57
  58        ret = pblk_rb_sync_advance(&pblk->rwb, c_ctx->nr_valid);
  59
  60        bio_put(rqd->bio);
  61        pblk_free_rqd(pblk, rqd, PBLK_WRITE);
  62
  63        return ret;
  64}
  65
  66static unsigned long pblk_end_queued_w_bio(struct pblk *pblk,
  67                                           struct nvm_rq *rqd,
  68                                           struct pblk_c_ctx *c_ctx)
  69{
  70        list_del(&c_ctx->list);
  71        return pblk_end_w_bio(pblk, rqd, c_ctx);
  72}
  73
  74static void pblk_complete_write(struct pblk *pblk, struct nvm_rq *rqd,
  75                                struct pblk_c_ctx *c_ctx)
  76{
  77        struct pblk_c_ctx *c, *r;
  78        unsigned long flags;
  79        unsigned long pos;
  80
  81#ifdef CONFIG_NVM_DEBUG
  82        atomic_long_sub(c_ctx->nr_valid, &pblk->inflight_writes);
  83#endif
  84
  85        pblk_up_rq(pblk, rqd->ppa_list, rqd->nr_ppas, c_ctx->lun_bitmap);
  86
  87        pos = pblk_rb_sync_init(&pblk->rwb, &flags);
  88        if (pos == c_ctx->sentry) {
  89                pos = pblk_end_w_bio(pblk, rqd, c_ctx);
  90
  91retry:
  92                list_for_each_entry_safe(c, r, &pblk->compl_list, list) {
  93                        rqd = nvm_rq_from_c_ctx(c);
  94                        if (c->sentry == pos) {
  95                                pos = pblk_end_queued_w_bio(pblk, rqd, c);
  96                                goto retry;
  97                        }
  98                }
  99        } else {
 100                WARN_ON(nvm_rq_from_c_ctx(c_ctx) != rqd);
 101                list_add_tail(&c_ctx->list, &pblk->compl_list);
 102        }
 103        pblk_rb_sync_end(&pblk->rwb, &flags);
 104}
 105
 106/* Map remaining sectors in chunk, starting from ppa */
 107static void pblk_map_remaining(struct pblk *pblk, struct ppa_addr *ppa)
 108{
 109        struct nvm_tgt_dev *dev = pblk->dev;
 110        struct nvm_geo *geo = &dev->geo;
 111        struct pblk_line *line;
 112        struct ppa_addr map_ppa = *ppa;
 113        u64 paddr;
 114        int done = 0;
 115
 116        line = &pblk->lines[pblk_ppa_to_line(*ppa)];
 117        spin_lock(&line->lock);
 118
 119        while (!done)  {
 120                paddr = pblk_dev_ppa_to_line_addr(pblk, map_ppa);
 121
 122                if (!test_and_set_bit(paddr, line->map_bitmap))
 123                        line->left_msecs--;
 124
 125                if (!test_and_set_bit(paddr, line->invalid_bitmap))
 126                        le32_add_cpu(line->vsc, -1);
 127
 128                if (geo->version == NVM_OCSSD_SPEC_12) {
 129                        map_ppa.ppa++;
 130                        if (map_ppa.g.pg == geo->num_pg)
 131                                done = 1;
 132                } else {
 133                        map_ppa.m.sec++;
 134                        if (map_ppa.m.sec == geo->clba)
 135                                done = 1;
 136                }
 137        }
 138
 139        line->w_err_gc->has_write_err = 1;
 140        spin_unlock(&line->lock);
 141}
 142
 143static void pblk_prepare_resubmit(struct pblk *pblk, unsigned int sentry,
 144                                  unsigned int nr_entries)
 145{
 146        struct pblk_rb *rb = &pblk->rwb;
 147        struct pblk_rb_entry *entry;
 148        struct pblk_line *line;
 149        struct pblk_w_ctx *w_ctx;
 150        struct ppa_addr ppa_l2p;
 151        int flags;
 152        unsigned int pos, i;
 153
 154        spin_lock(&pblk->trans_lock);
 155        pos = sentry;
 156        for (i = 0; i < nr_entries; i++) {
 157                entry = &rb->entries[pos];
 158                w_ctx = &entry->w_ctx;
 159
 160                /* Check if the lba has been overwritten */
 161                ppa_l2p = pblk_trans_map_get(pblk, w_ctx->lba);
 162                if (!pblk_ppa_comp(ppa_l2p, entry->cacheline))
 163                        w_ctx->lba = ADDR_EMPTY;
 164
 165                /* Mark up the entry as submittable again */
 166                flags = READ_ONCE(w_ctx->flags);
 167                flags |= PBLK_WRITTEN_DATA;
 168                /* Release flags on write context. Protect from writes */
 169                smp_store_release(&w_ctx->flags, flags);
 170
 171                /* Decrese the reference count to the line as we will
 172                 * re-map these entries
 173                 */
 174                line = &pblk->lines[pblk_ppa_to_line(w_ctx->ppa)];
 175                kref_put(&line->ref, pblk_line_put);
 176
 177                pos = (pos + 1) & (rb->nr_entries - 1);
 178        }
 179        spin_unlock(&pblk->trans_lock);
 180}
 181
 182static void pblk_queue_resubmit(struct pblk *pblk, struct pblk_c_ctx *c_ctx)
 183{
 184        struct pblk_c_ctx *r_ctx;
 185
 186        r_ctx = kzalloc(sizeof(struct pblk_c_ctx), GFP_KERNEL);
 187        if (!r_ctx)
 188                return;
 189
 190        r_ctx->lun_bitmap = NULL;
 191        r_ctx->sentry = c_ctx->sentry;
 192        r_ctx->nr_valid = c_ctx->nr_valid;
 193        r_ctx->nr_padded = c_ctx->nr_padded;
 194
 195        spin_lock(&pblk->resubmit_lock);
 196        list_add_tail(&r_ctx->list, &pblk->resubmit_list);
 197        spin_unlock(&pblk->resubmit_lock);
 198
 199#ifdef CONFIG_NVM_DEBUG
 200        atomic_long_add(c_ctx->nr_valid, &pblk->recov_writes);
 201#endif
 202}
 203
 204static void pblk_submit_rec(struct work_struct *work)
 205{
 206        struct pblk_rec_ctx *recovery =
 207                        container_of(work, struct pblk_rec_ctx, ws_rec);
 208        struct pblk *pblk = recovery->pblk;
 209        struct nvm_rq *rqd = recovery->rqd;
 210        struct pblk_c_ctx *c_ctx = nvm_rq_to_pdu(rqd);
 211        struct ppa_addr *ppa_list;
 212
 213        pblk_log_write_err(pblk, rqd);
 214
 215        if (rqd->nr_ppas == 1)
 216                ppa_list = &rqd->ppa_addr;
 217        else
 218                ppa_list = rqd->ppa_list;
 219
 220        pblk_map_remaining(pblk, ppa_list);
 221        pblk_queue_resubmit(pblk, c_ctx);
 222
 223        pblk_up_rq(pblk, rqd->ppa_list, rqd->nr_ppas, c_ctx->lun_bitmap);
 224        if (c_ctx->nr_padded)
 225                pblk_bio_free_pages(pblk, rqd->bio, c_ctx->nr_valid,
 226                                                        c_ctx->nr_padded);
 227        bio_put(rqd->bio);
 228        pblk_free_rqd(pblk, rqd, PBLK_WRITE);
 229        mempool_free(recovery, &pblk->rec_pool);
 230
 231        atomic_dec(&pblk->inflight_io);
 232}
 233
 234
 235static void pblk_end_w_fail(struct pblk *pblk, struct nvm_rq *rqd)
 236{
 237        struct pblk_rec_ctx *recovery;
 238
 239        recovery = mempool_alloc(&pblk->rec_pool, GFP_ATOMIC);
 240        if (!recovery) {
 241                pr_err("pblk: could not allocate recovery work\n");
 242                return;
 243        }
 244
 245        recovery->pblk = pblk;
 246        recovery->rqd = rqd;
 247
 248        INIT_WORK(&recovery->ws_rec, pblk_submit_rec);
 249        queue_work(pblk->close_wq, &recovery->ws_rec);
 250}
 251
 252static void pblk_end_io_write(struct nvm_rq *rqd)
 253{
 254        struct pblk *pblk = rqd->private;
 255        struct pblk_c_ctx *c_ctx = nvm_rq_to_pdu(rqd);
 256
 257        if (rqd->error) {
 258                pblk_end_w_fail(pblk, rqd);
 259                return;
 260        }
 261#ifdef CONFIG_NVM_DEBUG
 262        else
 263                WARN_ONCE(rqd->bio->bi_status, "pblk: corrupted write error\n");
 264#endif
 265
 266        pblk_complete_write(pblk, rqd, c_ctx);
 267        atomic_dec(&pblk->inflight_io);
 268}
 269
 270static void pblk_end_io_write_meta(struct nvm_rq *rqd)
 271{
 272        struct pblk *pblk = rqd->private;
 273        struct pblk_g_ctx *m_ctx = nvm_rq_to_pdu(rqd);
 274        struct pblk_line *line = m_ctx->private;
 275        struct pblk_emeta *emeta = line->emeta;
 276        int sync;
 277
 278        pblk_up_page(pblk, rqd->ppa_list, rqd->nr_ppas);
 279
 280        if (rqd->error) {
 281                pblk_log_write_err(pblk, rqd);
 282                pr_err("pblk: metadata I/O failed. Line %d\n", line->id);
 283                line->w_err_gc->has_write_err = 1;
 284        }
 285
 286        sync = atomic_add_return(rqd->nr_ppas, &emeta->sync);
 287        if (sync == emeta->nr_entries)
 288                pblk_gen_run_ws(pblk, line, NULL, pblk_line_close_ws,
 289                                                GFP_ATOMIC, pblk->close_wq);
 290
 291        pblk_free_rqd(pblk, rqd, PBLK_WRITE_INT);
 292
 293        atomic_dec(&pblk->inflight_io);
 294}
 295
 296static int pblk_alloc_w_rq(struct pblk *pblk, struct nvm_rq *rqd,
 297                           unsigned int nr_secs,
 298                           nvm_end_io_fn(*end_io))
 299{
 300        struct nvm_tgt_dev *dev = pblk->dev;
 301
 302        /* Setup write request */
 303        rqd->opcode = NVM_OP_PWRITE;
 304        rqd->nr_ppas = nr_secs;
 305        rqd->flags = pblk_set_progr_mode(pblk, PBLK_WRITE);
 306        rqd->private = pblk;
 307        rqd->end_io = end_io;
 308
 309        rqd->meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL,
 310                                                        &rqd->dma_meta_list);
 311        if (!rqd->meta_list)
 312                return -ENOMEM;
 313
 314        rqd->ppa_list = rqd->meta_list + pblk_dma_meta_size;
 315        rqd->dma_ppa_list = rqd->dma_meta_list + pblk_dma_meta_size;
 316
 317        return 0;
 318}
 319
 320static int pblk_setup_w_rq(struct pblk *pblk, struct nvm_rq *rqd,
 321                           struct ppa_addr *erase_ppa)
 322{
 323        struct pblk_line_meta *lm = &pblk->lm;
 324        struct pblk_line *e_line = pblk_line_get_erase(pblk);
 325        struct pblk_c_ctx *c_ctx = nvm_rq_to_pdu(rqd);
 326        unsigned int valid = c_ctx->nr_valid;
 327        unsigned int padded = c_ctx->nr_padded;
 328        unsigned int nr_secs = valid + padded;
 329        unsigned long *lun_bitmap;
 330        int ret;
 331
 332        lun_bitmap = kzalloc(lm->lun_bitmap_len, GFP_KERNEL);
 333        if (!lun_bitmap)
 334                return -ENOMEM;
 335        c_ctx->lun_bitmap = lun_bitmap;
 336
 337        ret = pblk_alloc_w_rq(pblk, rqd, nr_secs, pblk_end_io_write);
 338        if (ret) {
 339                kfree(lun_bitmap);
 340                return ret;
 341        }
 342
 343        if (likely(!e_line || !atomic_read(&e_line->left_eblks)))
 344                pblk_map_rq(pblk, rqd, c_ctx->sentry, lun_bitmap, valid, 0);
 345        else
 346                pblk_map_erase_rq(pblk, rqd, c_ctx->sentry, lun_bitmap,
 347                                                        valid, erase_ppa);
 348
 349        return 0;
 350}
 351
 352static int pblk_calc_secs_to_sync(struct pblk *pblk, unsigned int secs_avail,
 353                                  unsigned int secs_to_flush)
 354{
 355        int secs_to_sync;
 356
 357        secs_to_sync = pblk_calc_secs(pblk, secs_avail, secs_to_flush);
 358
 359#ifdef CONFIG_NVM_DEBUG
 360        if ((!secs_to_sync && secs_to_flush)
 361                        || (secs_to_sync < 0)
 362                        || (secs_to_sync > secs_avail && !secs_to_flush)) {
 363                pr_err("pblk: bad sector calculation (a:%d,s:%d,f:%d)\n",
 364                                secs_avail, secs_to_sync, secs_to_flush);
 365        }
 366#endif
 367
 368        return secs_to_sync;
 369}
 370
 371int pblk_submit_meta_io(struct pblk *pblk, struct pblk_line *meta_line)
 372{
 373        struct nvm_tgt_dev *dev = pblk->dev;
 374        struct nvm_geo *geo = &dev->geo;
 375        struct pblk_line_mgmt *l_mg = &pblk->l_mg;
 376        struct pblk_line_meta *lm = &pblk->lm;
 377        struct pblk_emeta *emeta = meta_line->emeta;
 378        struct pblk_g_ctx *m_ctx;
 379        struct bio *bio;
 380        struct nvm_rq *rqd;
 381        void *data;
 382        u64 paddr;
 383        int rq_ppas = pblk->min_write_pgs;
 384        int id = meta_line->id;
 385        int rq_len;
 386        int i, j;
 387        int ret;
 388
 389        rqd = pblk_alloc_rqd(pblk, PBLK_WRITE_INT);
 390
 391        m_ctx = nvm_rq_to_pdu(rqd);
 392        m_ctx->private = meta_line;
 393
 394        rq_len = rq_ppas * geo->csecs;
 395        data = ((void *)emeta->buf) + emeta->mem;
 396
 397        bio = pblk_bio_map_addr(pblk, data, rq_ppas, rq_len,
 398                                        l_mg->emeta_alloc_type, GFP_KERNEL);
 399        if (IS_ERR(bio)) {
 400                pr_err("pblk: failed to map emeta io");
 401                ret = PTR_ERR(bio);
 402                goto fail_free_rqd;
 403        }
 404        bio->bi_iter.bi_sector = 0; /* internal bio */
 405        bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
 406        rqd->bio = bio;
 407
 408        ret = pblk_alloc_w_rq(pblk, rqd, rq_ppas, pblk_end_io_write_meta);
 409        if (ret)
 410                goto fail_free_bio;
 411
 412        for (i = 0; i < rqd->nr_ppas; ) {
 413                spin_lock(&meta_line->lock);
 414                paddr = __pblk_alloc_page(pblk, meta_line, rq_ppas);
 415                spin_unlock(&meta_line->lock);
 416                for (j = 0; j < rq_ppas; j++, i++, paddr++)
 417                        rqd->ppa_list[i] = addr_to_gen_ppa(pblk, paddr, id);
 418        }
 419
 420        emeta->mem += rq_len;
 421        if (emeta->mem >= lm->emeta_len[0]) {
 422                spin_lock(&l_mg->close_lock);
 423                list_del(&meta_line->list);
 424                spin_unlock(&l_mg->close_lock);
 425        }
 426
 427        pblk_down_page(pblk, rqd->ppa_list, rqd->nr_ppas);
 428
 429        ret = pblk_submit_io(pblk, rqd);
 430        if (ret) {
 431                pr_err("pblk: emeta I/O submission failed: %d\n", ret);
 432                goto fail_rollback;
 433        }
 434
 435        return NVM_IO_OK;
 436
 437fail_rollback:
 438        pblk_up_page(pblk, rqd->ppa_list, rqd->nr_ppas);
 439        spin_lock(&l_mg->close_lock);
 440        pblk_dealloc_page(pblk, meta_line, rq_ppas);
 441        list_add(&meta_line->list, &meta_line->list);
 442        spin_unlock(&l_mg->close_lock);
 443fail_free_bio:
 444        bio_put(bio);
 445fail_free_rqd:
 446        pblk_free_rqd(pblk, rqd, PBLK_WRITE_INT);
 447        return ret;
 448}
 449
 450static inline bool pblk_valid_meta_ppa(struct pblk *pblk,
 451                                       struct pblk_line *meta_line,
 452                                       struct nvm_rq *data_rqd)
 453{
 454        struct nvm_tgt_dev *dev = pblk->dev;
 455        struct nvm_geo *geo = &dev->geo;
 456        struct pblk_c_ctx *data_c_ctx = nvm_rq_to_pdu(data_rqd);
 457        struct pblk_line *data_line = pblk_line_get_data(pblk);
 458        struct ppa_addr ppa, ppa_opt;
 459        u64 paddr;
 460        int pos_opt;
 461
 462        /* Schedule a metadata I/O that is half the distance from the data I/O
 463         * with regards to the number of LUNs forming the pblk instance. This
 464         * balances LUN conflicts across every I/O.
 465         *
 466         * When the LUN configuration changes (e.g., due to GC), this distance
 467         * can align, which would result on metadata and data I/Os colliding. In
 468         * this case, modify the distance to not be optimal, but move the
 469         * optimal in the right direction.
 470         */
 471        paddr = pblk_lookup_page(pblk, meta_line);
 472        ppa = addr_to_gen_ppa(pblk, paddr, 0);
 473        ppa_opt = addr_to_gen_ppa(pblk, paddr + data_line->meta_distance, 0);
 474        pos_opt = pblk_ppa_to_pos(geo, ppa_opt);
 475
 476        if (test_bit(pos_opt, data_c_ctx->lun_bitmap) ||
 477                                test_bit(pos_opt, data_line->blk_bitmap))
 478                return true;
 479
 480        if (unlikely(pblk_ppa_comp(ppa_opt, ppa)))
 481                data_line->meta_distance--;
 482
 483        return false;
 484}
 485
 486static struct pblk_line *pblk_should_submit_meta_io(struct pblk *pblk,
 487                                                    struct nvm_rq *data_rqd)
 488{
 489        struct pblk_line_meta *lm = &pblk->lm;
 490        struct pblk_line_mgmt *l_mg = &pblk->l_mg;
 491        struct pblk_line *meta_line;
 492
 493        spin_lock(&l_mg->close_lock);
 494retry:
 495        if (list_empty(&l_mg->emeta_list)) {
 496                spin_unlock(&l_mg->close_lock);
 497                return NULL;
 498        }
 499        meta_line = list_first_entry(&l_mg->emeta_list, struct pblk_line, list);
 500        if (meta_line->emeta->mem >= lm->emeta_len[0])
 501                goto retry;
 502        spin_unlock(&l_mg->close_lock);
 503
 504        if (!pblk_valid_meta_ppa(pblk, meta_line, data_rqd))
 505                return NULL;
 506
 507        return meta_line;
 508}
 509
 510static int pblk_submit_io_set(struct pblk *pblk, struct nvm_rq *rqd)
 511{
 512        struct ppa_addr erase_ppa;
 513        struct pblk_line *meta_line;
 514        int err;
 515
 516        pblk_ppa_set_empty(&erase_ppa);
 517
 518        /* Assign lbas to ppas and populate request structure */
 519        err = pblk_setup_w_rq(pblk, rqd, &erase_ppa);
 520        if (err) {
 521                pr_err("pblk: could not setup write request: %d\n", err);
 522                return NVM_IO_ERR;
 523        }
 524
 525        meta_line = pblk_should_submit_meta_io(pblk, rqd);
 526
 527        /* Submit data write for current data line */
 528        err = pblk_submit_io(pblk, rqd);
 529        if (err) {
 530                pr_err("pblk: data I/O submission failed: %d\n", err);
 531                return NVM_IO_ERR;
 532        }
 533
 534        if (!pblk_ppa_empty(erase_ppa)) {
 535                /* Submit erase for next data line */
 536                if (pblk_blk_erase_async(pblk, erase_ppa)) {
 537                        struct pblk_line *e_line = pblk_line_get_erase(pblk);
 538                        struct nvm_tgt_dev *dev = pblk->dev;
 539                        struct nvm_geo *geo = &dev->geo;
 540                        int bit;
 541
 542                        atomic_inc(&e_line->left_eblks);
 543                        bit = pblk_ppa_to_pos(geo, erase_ppa);
 544                        WARN_ON(!test_and_clear_bit(bit, e_line->erase_bitmap));
 545                }
 546        }
 547
 548        if (meta_line) {
 549                /* Submit metadata write for previous data line */
 550                err = pblk_submit_meta_io(pblk, meta_line);
 551                if (err) {
 552                        pr_err("pblk: metadata I/O submission failed: %d", err);
 553                        return NVM_IO_ERR;
 554                }
 555        }
 556
 557        return NVM_IO_OK;
 558}
 559
 560static void pblk_free_write_rqd(struct pblk *pblk, struct nvm_rq *rqd)
 561{
 562        struct pblk_c_ctx *c_ctx = nvm_rq_to_pdu(rqd);
 563        struct bio *bio = rqd->bio;
 564
 565        if (c_ctx->nr_padded)
 566                pblk_bio_free_pages(pblk, bio, c_ctx->nr_valid,
 567                                                        c_ctx->nr_padded);
 568}
 569
 570static int pblk_submit_write(struct pblk *pblk)
 571{
 572        struct bio *bio;
 573        struct nvm_rq *rqd;
 574        unsigned int secs_avail, secs_to_sync, secs_to_com;
 575        unsigned int secs_to_flush;
 576        unsigned long pos;
 577        unsigned int resubmit;
 578
 579        spin_lock(&pblk->resubmit_lock);
 580        resubmit = !list_empty(&pblk->resubmit_list);
 581        spin_unlock(&pblk->resubmit_lock);
 582
 583        /* Resubmit failed writes first */
 584        if (resubmit) {
 585                struct pblk_c_ctx *r_ctx;
 586
 587                spin_lock(&pblk->resubmit_lock);
 588                r_ctx = list_first_entry(&pblk->resubmit_list,
 589                                        struct pblk_c_ctx, list);
 590                list_del(&r_ctx->list);
 591                spin_unlock(&pblk->resubmit_lock);
 592
 593                secs_avail = r_ctx->nr_valid;
 594                pos = r_ctx->sentry;
 595
 596                pblk_prepare_resubmit(pblk, pos, secs_avail);
 597                secs_to_sync = pblk_calc_secs_to_sync(pblk, secs_avail,
 598                                secs_avail);
 599
 600                kfree(r_ctx);
 601        } else {
 602                /* If there are no sectors in the cache,
 603                 * flushes (bios without data) will be cleared on
 604                 * the cache threads
 605                 */
 606                secs_avail = pblk_rb_read_count(&pblk->rwb);
 607                if (!secs_avail)
 608                        return 1;
 609
 610                secs_to_flush = pblk_rb_flush_point_count(&pblk->rwb);
 611                if (!secs_to_flush && secs_avail < pblk->min_write_pgs)
 612                        return 1;
 613
 614                secs_to_sync = pblk_calc_secs_to_sync(pblk, secs_avail,
 615                                        secs_to_flush);
 616                if (secs_to_sync > pblk->max_write_pgs) {
 617                        pr_err("pblk: bad buffer sync calculation\n");
 618                        return 1;
 619                }
 620
 621                secs_to_com = (secs_to_sync > secs_avail) ?
 622                        secs_avail : secs_to_sync;
 623                pos = pblk_rb_read_commit(&pblk->rwb, secs_to_com);
 624        }
 625
 626        bio = bio_alloc(GFP_KERNEL, secs_to_sync);
 627
 628        bio->bi_iter.bi_sector = 0; /* internal bio */
 629        bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
 630
 631        rqd = pblk_alloc_rqd(pblk, PBLK_WRITE);
 632        rqd->bio = bio;
 633
 634        if (pblk_rb_read_to_bio(&pblk->rwb, rqd, pos, secs_to_sync,
 635                                                                secs_avail)) {
 636                pr_err("pblk: corrupted write bio\n");
 637                goto fail_put_bio;
 638        }
 639
 640        if (pblk_submit_io_set(pblk, rqd))
 641                goto fail_free_bio;
 642
 643#ifdef CONFIG_NVM_DEBUG
 644        atomic_long_add(secs_to_sync, &pblk->sub_writes);
 645#endif
 646
 647        return 0;
 648
 649fail_free_bio:
 650        pblk_free_write_rqd(pblk, rqd);
 651fail_put_bio:
 652        bio_put(bio);
 653        pblk_free_rqd(pblk, rqd, PBLK_WRITE);
 654
 655        return 1;
 656}
 657
 658int pblk_write_ts(void *data)
 659{
 660        struct pblk *pblk = data;
 661
 662        while (!kthread_should_stop()) {
 663                if (!pblk_submit_write(pblk))
 664                        continue;
 665                set_current_state(TASK_INTERRUPTIBLE);
 666                io_schedule();
 667        }
 668
 669        return 0;
 670}
 671