linux/drivers/md/bcache/journal.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * bcache journalling code, for btree insertions
   4 *
   5 * Copyright 2012 Google, Inc.
   6 */
   7
   8#include "bcache.h"
   9#include "btree.h"
  10#include "debug.h"
  11#include "extents.h"
  12
  13#include <trace/events/bcache.h>
  14
  15/*
  16 * Journal replay/recovery:
  17 *
  18 * This code is all driven from run_cache_set(); we first read the journal
  19 * entries, do some other stuff, then we mark all the keys in the journal
  20 * entries (same as garbage collection would), then we replay them - reinserting
  21 * them into the cache in precisely the same order as they appear in the
  22 * journal.
  23 *
  24 * We only journal keys that go in leaf nodes, which simplifies things quite a
  25 * bit.
  26 */
  27
  28static void journal_read_endio(struct bio *bio)
  29{
  30        struct closure *cl = bio->bi_private;
  31
  32        closure_put(cl);
  33}
  34
  35static int journal_read_bucket(struct cache *ca, struct list_head *list,
  36                               unsigned int bucket_index)
  37{
  38        struct journal_device *ja = &ca->journal;
  39        struct bio *bio = &ja->bio;
  40
  41        struct journal_replay *i;
  42        struct jset *j, *data = ca->set->journal.w[0].data;
  43        struct closure cl;
  44        unsigned int len, left, offset = 0;
  45        int ret = 0;
  46        sector_t bucket = bucket_to_sector(ca->set, ca->sb.d[bucket_index]);
  47
  48        closure_init_stack(&cl);
  49
  50        pr_debug("reading %u\n", bucket_index);
  51
  52        while (offset < ca->sb.bucket_size) {
  53reread:         left = ca->sb.bucket_size - offset;
  54                len = min_t(unsigned int, left, PAGE_SECTORS << JSET_BITS);
  55
  56                bio_reset(bio);
  57                bio->bi_iter.bi_sector  = bucket + offset;
  58                bio_set_dev(bio, ca->bdev);
  59                bio->bi_iter.bi_size    = len << 9;
  60
  61                bio->bi_end_io  = journal_read_endio;
  62                bio->bi_private = &cl;
  63                bio_set_op_attrs(bio, REQ_OP_READ, 0);
  64                bch_bio_map(bio, data);
  65
  66                closure_bio_submit(ca->set, bio, &cl);
  67                closure_sync(&cl);
  68
  69                /* This function could be simpler now since we no longer write
  70                 * journal entries that overlap bucket boundaries; this means
  71                 * the start of a bucket will always have a valid journal entry
  72                 * if it has any journal entries at all.
  73                 */
  74
  75                j = data;
  76                while (len) {
  77                        struct list_head *where;
  78                        size_t blocks, bytes = set_bytes(j);
  79
  80                        if (j->magic != jset_magic(&ca->sb)) {
  81                                pr_debug("%u: bad magic\n", bucket_index);
  82                                return ret;
  83                        }
  84
  85                        if (bytes > left << 9 ||
  86                            bytes > PAGE_SIZE << JSET_BITS) {
  87                                pr_info("%u: too big, %zu bytes, offset %u\n",
  88                                        bucket_index, bytes, offset);
  89                                return ret;
  90                        }
  91
  92                        if (bytes > len << 9)
  93                                goto reread;
  94
  95                        if (j->csum != csum_set(j)) {
  96                                pr_info("%u: bad csum, %zu bytes, offset %u\n",
  97                                        bucket_index, bytes, offset);
  98                                return ret;
  99                        }
 100
 101                        blocks = set_blocks(j, block_bytes(ca));
 102
 103                        /*
 104                         * Nodes in 'list' are in linear increasing order of
 105                         * i->j.seq, the node on head has the smallest (oldest)
 106                         * journal seq, the node on tail has the biggest
 107                         * (latest) journal seq.
 108                         */
 109
 110                        /*
 111                         * Check from the oldest jset for last_seq. If
 112                         * i->j.seq < j->last_seq, it means the oldest jset
 113                         * in list is expired and useless, remove it from
 114                         * this list. Otherwise, j is a candidate jset for
 115                         * further following checks.
 116                         */
 117                        while (!list_empty(list)) {
 118                                i = list_first_entry(list,
 119                                        struct journal_replay, list);
 120                                if (i->j.seq >= j->last_seq)
 121                                        break;
 122                                list_del(&i->list);
 123                                kfree(i);
 124                        }
 125
 126                        /* iterate list in reverse order (from latest jset) */
 127                        list_for_each_entry_reverse(i, list, list) {
 128                                if (j->seq == i->j.seq)
 129                                        goto next_set;
 130
 131                                /*
 132                                 * if j->seq is less than any i->j.last_seq
 133                                 * in list, j is an expired and useless jset.
 134                                 */
 135                                if (j->seq < i->j.last_seq)
 136                                        goto next_set;
 137
 138                                /*
 139                                 * 'where' points to first jset in list which
 140                                 * is elder then j.
 141                                 */
 142                                if (j->seq > i->j.seq) {
 143                                        where = &i->list;
 144                                        goto add;
 145                                }
 146                        }
 147
 148                        where = list;
 149add:
 150                        i = kmalloc(offsetof(struct journal_replay, j) +
 151                                    bytes, GFP_KERNEL);
 152                        if (!i)
 153                                return -ENOMEM;
 154                        memcpy(&i->j, j, bytes);
 155                        /* Add to the location after 'where' points to */
 156                        list_add(&i->list, where);
 157                        ret = 1;
 158
 159                        if (j->seq > ja->seq[bucket_index])
 160                                ja->seq[bucket_index] = j->seq;
 161next_set:
 162                        offset  += blocks * ca->sb.block_size;
 163                        len     -= blocks * ca->sb.block_size;
 164                        j = ((void *) j) + blocks * block_bytes(ca);
 165                }
 166        }
 167
 168        return ret;
 169}
 170
 171int bch_journal_read(struct cache_set *c, struct list_head *list)
 172{
 173#define read_bucket(b)                                                  \
 174        ({                                                              \
 175                ret = journal_read_bucket(ca, list, b);                 \
 176                __set_bit(b, bitmap);                                   \
 177                if (ret < 0)                                            \
 178                        return ret;                                     \
 179                ret;                                                    \
 180        })
 181
 182        struct cache *ca = c->cache;
 183        int ret = 0;
 184        struct journal_device *ja = &ca->journal;
 185        DECLARE_BITMAP(bitmap, SB_JOURNAL_BUCKETS);
 186        unsigned int i, l, r, m;
 187        uint64_t seq;
 188
 189        bitmap_zero(bitmap, SB_JOURNAL_BUCKETS);
 190        pr_debug("%u journal buckets\n", ca->sb.njournal_buckets);
 191
 192        /*
 193         * Read journal buckets ordered by golden ratio hash to quickly
 194         * find a sequence of buckets with valid journal entries
 195         */
 196        for (i = 0; i < ca->sb.njournal_buckets; i++) {
 197                /*
 198                 * We must try the index l with ZERO first for
 199                 * correctness due to the scenario that the journal
 200                 * bucket is circular buffer which might have wrapped
 201                 */
 202                l = (i * 2654435769U) % ca->sb.njournal_buckets;
 203
 204                if (test_bit(l, bitmap))
 205                        break;
 206
 207                if (read_bucket(l))
 208                        goto bsearch;
 209        }
 210
 211        /*
 212         * If that fails, check all the buckets we haven't checked
 213         * already
 214         */
 215        pr_debug("falling back to linear search\n");
 216
 217        for_each_clear_bit(l, bitmap, ca->sb.njournal_buckets)
 218                if (read_bucket(l))
 219                        goto bsearch;
 220
 221        /* no journal entries on this device? */
 222        if (l == ca->sb.njournal_buckets)
 223                goto out;
 224bsearch:
 225        BUG_ON(list_empty(list));
 226
 227        /* Binary search */
 228        m = l;
 229        r = find_next_bit(bitmap, ca->sb.njournal_buckets, l + 1);
 230        pr_debug("starting binary search, l %u r %u\n", l, r);
 231
 232        while (l + 1 < r) {
 233                seq = list_entry(list->prev, struct journal_replay,
 234                                 list)->j.seq;
 235
 236                m = (l + r) >> 1;
 237                read_bucket(m);
 238
 239                if (seq != list_entry(list->prev, struct journal_replay,
 240                                      list)->j.seq)
 241                        l = m;
 242                else
 243                        r = m;
 244        }
 245
 246        /*
 247         * Read buckets in reverse order until we stop finding more
 248         * journal entries
 249         */
 250        pr_debug("finishing up: m %u njournal_buckets %u\n",
 251                 m, ca->sb.njournal_buckets);
 252        l = m;
 253
 254        while (1) {
 255                if (!l--)
 256                        l = ca->sb.njournal_buckets - 1;
 257
 258                if (l == m)
 259                        break;
 260
 261                if (test_bit(l, bitmap))
 262                        continue;
 263
 264                if (!read_bucket(l))
 265                        break;
 266        }
 267
 268        seq = 0;
 269
 270        for (i = 0; i < ca->sb.njournal_buckets; i++)
 271                if (ja->seq[i] > seq) {
 272                        seq = ja->seq[i];
 273                        /*
 274                         * When journal_reclaim() goes to allocate for
 275                         * the first time, it'll use the bucket after
 276                         * ja->cur_idx
 277                         */
 278                        ja->cur_idx = i;
 279                        ja->last_idx = ja->discard_idx = (i + 1) %
 280                                ca->sb.njournal_buckets;
 281
 282                }
 283
 284out:
 285        if (!list_empty(list))
 286                c->journal.seq = list_entry(list->prev,
 287                                            struct journal_replay,
 288                                            list)->j.seq;
 289
 290        return 0;
 291#undef read_bucket
 292}
 293
 294void bch_journal_mark(struct cache_set *c, struct list_head *list)
 295{
 296        atomic_t p = { 0 };
 297        struct bkey *k;
 298        struct journal_replay *i;
 299        struct journal *j = &c->journal;
 300        uint64_t last = j->seq;
 301
 302        /*
 303         * journal.pin should never fill up - we never write a journal
 304         * entry when it would fill up. But if for some reason it does, we
 305         * iterate over the list in reverse order so that we can just skip that
 306         * refcount instead of bugging.
 307         */
 308
 309        list_for_each_entry_reverse(i, list, list) {
 310                BUG_ON(last < i->j.seq);
 311                i->pin = NULL;
 312
 313                while (last-- != i->j.seq)
 314                        if (fifo_free(&j->pin) > 1) {
 315                                fifo_push_front(&j->pin, p);
 316                                atomic_set(&fifo_front(&j->pin), 0);
 317                        }
 318
 319                if (fifo_free(&j->pin) > 1) {
 320                        fifo_push_front(&j->pin, p);
 321                        i->pin = &fifo_front(&j->pin);
 322                        atomic_set(i->pin, 1);
 323                }
 324
 325                for (k = i->j.start;
 326                     k < bset_bkey_last(&i->j);
 327                     k = bkey_next(k))
 328                        if (!__bch_extent_invalid(c, k)) {
 329                                unsigned int j;
 330
 331                                for (j = 0; j < KEY_PTRS(k); j++)
 332                                        if (ptr_available(c, k, j))
 333                                                atomic_inc(&PTR_BUCKET(c, k, j)->pin);
 334
 335                                bch_initial_mark_key(c, 0, k);
 336                        }
 337        }
 338}
 339
 340static bool is_discard_enabled(struct cache_set *s)
 341{
 342        struct cache *ca = s->cache;
 343
 344        if (ca->discard)
 345                return true;
 346
 347        return false;
 348}
 349
 350int bch_journal_replay(struct cache_set *s, struct list_head *list)
 351{
 352        int ret = 0, keys = 0, entries = 0;
 353        struct bkey *k;
 354        struct journal_replay *i =
 355                list_entry(list->prev, struct journal_replay, list);
 356
 357        uint64_t start = i->j.last_seq, end = i->j.seq, n = start;
 358        struct keylist keylist;
 359
 360        list_for_each_entry(i, list, list) {
 361                BUG_ON(i->pin && atomic_read(i->pin) != 1);
 362
 363                if (n != i->j.seq) {
 364                        if (n == start && is_discard_enabled(s))
 365                                pr_info("journal entries %llu-%llu may be discarded! (replaying %llu-%llu)\n",
 366                                        n, i->j.seq - 1, start, end);
 367                        else {
 368                                pr_err("journal entries %llu-%llu missing! (replaying %llu-%llu)\n",
 369                                        n, i->j.seq - 1, start, end);
 370                                ret = -EIO;
 371                                goto err;
 372                        }
 373                }
 374
 375                for (k = i->j.start;
 376                     k < bset_bkey_last(&i->j);
 377                     k = bkey_next(k)) {
 378                        trace_bcache_journal_replay_key(k);
 379
 380                        bch_keylist_init_single(&keylist, k);
 381
 382                        ret = bch_btree_insert(s, &keylist, i->pin, NULL);
 383                        if (ret)
 384                                goto err;
 385
 386                        BUG_ON(!bch_keylist_empty(&keylist));
 387                        keys++;
 388
 389                        cond_resched();
 390                }
 391
 392                if (i->pin)
 393                        atomic_dec(i->pin);
 394                n = i->j.seq + 1;
 395                entries++;
 396        }
 397
 398        pr_info("journal replay done, %i keys in %i entries, seq %llu\n",
 399                keys, entries, end);
 400err:
 401        while (!list_empty(list)) {
 402                i = list_first_entry(list, struct journal_replay, list);
 403                list_del(&i->list);
 404                kfree(i);
 405        }
 406
 407        return ret;
 408}
 409
 410/* Journalling */
 411
 412static void btree_flush_write(struct cache_set *c)
 413{
 414        struct btree *b, *t, *btree_nodes[BTREE_FLUSH_NR];
 415        unsigned int i, nr;
 416        int ref_nr;
 417        atomic_t *fifo_front_p, *now_fifo_front_p;
 418        size_t mask;
 419
 420        if (c->journal.btree_flushing)
 421                return;
 422
 423        spin_lock(&c->journal.flush_write_lock);
 424        if (c->journal.btree_flushing) {
 425                spin_unlock(&c->journal.flush_write_lock);
 426                return;
 427        }
 428        c->journal.btree_flushing = true;
 429        spin_unlock(&c->journal.flush_write_lock);
 430
 431        /* get the oldest journal entry and check its refcount */
 432        spin_lock(&c->journal.lock);
 433        fifo_front_p = &fifo_front(&c->journal.pin);
 434        ref_nr = atomic_read(fifo_front_p);
 435        if (ref_nr <= 0) {
 436                /*
 437                 * do nothing if no btree node references
 438                 * the oldest journal entry
 439                 */
 440                spin_unlock(&c->journal.lock);
 441                goto out;
 442        }
 443        spin_unlock(&c->journal.lock);
 444
 445        mask = c->journal.pin.mask;
 446        nr = 0;
 447        atomic_long_inc(&c->flush_write);
 448        memset(btree_nodes, 0, sizeof(btree_nodes));
 449
 450        mutex_lock(&c->bucket_lock);
 451        list_for_each_entry_safe_reverse(b, t, &c->btree_cache, list) {
 452                /*
 453                 * It is safe to get now_fifo_front_p without holding
 454                 * c->journal.lock here, because we don't need to know
 455                 * the exactly accurate value, just check whether the
 456                 * front pointer of c->journal.pin is changed.
 457                 */
 458                now_fifo_front_p = &fifo_front(&c->journal.pin);
 459                /*
 460                 * If the oldest journal entry is reclaimed and front
 461                 * pointer of c->journal.pin changes, it is unnecessary
 462                 * to scan c->btree_cache anymore, just quit the loop and
 463                 * flush out what we have already.
 464                 */
 465                if (now_fifo_front_p != fifo_front_p)
 466                        break;
 467                /*
 468                 * quit this loop if all matching btree nodes are
 469                 * scanned and record in btree_nodes[] already.
 470                 */
 471                ref_nr = atomic_read(fifo_front_p);
 472                if (nr >= ref_nr)
 473                        break;
 474
 475                if (btree_node_journal_flush(b))
 476                        pr_err("BUG: flush_write bit should not be set here!\n");
 477
 478                mutex_lock(&b->write_lock);
 479
 480                if (!btree_node_dirty(b)) {
 481                        mutex_unlock(&b->write_lock);
 482                        continue;
 483                }
 484
 485                if (!btree_current_write(b)->journal) {
 486                        mutex_unlock(&b->write_lock);
 487                        continue;
 488                }
 489
 490                /*
 491                 * Only select the btree node which exactly references
 492                 * the oldest journal entry.
 493                 *
 494                 * If the journal entry pointed by fifo_front_p is
 495                 * reclaimed in parallel, don't worry:
 496                 * - the list_for_each_xxx loop will quit when checking
 497                 *   next now_fifo_front_p.
 498                 * - If there are matched nodes recorded in btree_nodes[],
 499                 *   they are clean now (this is why and how the oldest
 500                 *   journal entry can be reclaimed). These selected nodes
 501                 *   will be ignored and skipped in the following for-loop.
 502                 */
 503                if (((btree_current_write(b)->journal - fifo_front_p) &
 504                     mask) != 0) {
 505                        mutex_unlock(&b->write_lock);
 506                        continue;
 507                }
 508
 509                set_btree_node_journal_flush(b);
 510
 511                mutex_unlock(&b->write_lock);
 512
 513                btree_nodes[nr++] = b;
 514                /*
 515                 * To avoid holding c->bucket_lock too long time,
 516                 * only scan for BTREE_FLUSH_NR matched btree nodes
 517                 * at most. If there are more btree nodes reference
 518                 * the oldest journal entry, try to flush them next
 519                 * time when btree_flush_write() is called.
 520                 */
 521                if (nr == BTREE_FLUSH_NR)
 522                        break;
 523        }
 524        mutex_unlock(&c->bucket_lock);
 525
 526        for (i = 0; i < nr; i++) {
 527                b = btree_nodes[i];
 528                if (!b) {
 529                        pr_err("BUG: btree_nodes[%d] is NULL\n", i);
 530                        continue;
 531                }
 532
 533                /* safe to check without holding b->write_lock */
 534                if (!btree_node_journal_flush(b)) {
 535                        pr_err("BUG: bnode %p: journal_flush bit cleaned\n", b);
 536                        continue;
 537                }
 538
 539                mutex_lock(&b->write_lock);
 540                if (!btree_current_write(b)->journal) {
 541                        clear_bit(BTREE_NODE_journal_flush, &b->flags);
 542                        mutex_unlock(&b->write_lock);
 543                        pr_debug("bnode %p: written by others\n", b);
 544                        continue;
 545                }
 546
 547                if (!btree_node_dirty(b)) {
 548                        clear_bit(BTREE_NODE_journal_flush, &b->flags);
 549                        mutex_unlock(&b->write_lock);
 550                        pr_debug("bnode %p: dirty bit cleaned by others\n", b);
 551                        continue;
 552                }
 553
 554                __bch_btree_node_write(b, NULL);
 555                clear_bit(BTREE_NODE_journal_flush, &b->flags);
 556                mutex_unlock(&b->write_lock);
 557        }
 558
 559out:
 560        spin_lock(&c->journal.flush_write_lock);
 561        c->journal.btree_flushing = false;
 562        spin_unlock(&c->journal.flush_write_lock);
 563}
 564
 565#define last_seq(j)     ((j)->seq - fifo_used(&(j)->pin) + 1)
 566
 567static void journal_discard_endio(struct bio *bio)
 568{
 569        struct journal_device *ja =
 570                container_of(bio, struct journal_device, discard_bio);
 571        struct cache *ca = container_of(ja, struct cache, journal);
 572
 573        atomic_set(&ja->discard_in_flight, DISCARD_DONE);
 574
 575        closure_wake_up(&ca->set->journal.wait);
 576        closure_put(&ca->set->cl);
 577}
 578
 579static void journal_discard_work(struct work_struct *work)
 580{
 581        struct journal_device *ja =
 582                container_of(work, struct journal_device, discard_work);
 583
 584        submit_bio(&ja->discard_bio);
 585}
 586
 587static void do_journal_discard(struct cache *ca)
 588{
 589        struct journal_device *ja = &ca->journal;
 590        struct bio *bio = &ja->discard_bio;
 591
 592        if (!ca->discard) {
 593                ja->discard_idx = ja->last_idx;
 594                return;
 595        }
 596
 597        switch (atomic_read(&ja->discard_in_flight)) {
 598        case DISCARD_IN_FLIGHT:
 599                return;
 600
 601        case DISCARD_DONE:
 602                ja->discard_idx = (ja->discard_idx + 1) %
 603                        ca->sb.njournal_buckets;
 604
 605                atomic_set(&ja->discard_in_flight, DISCARD_READY);
 606                fallthrough;
 607
 608        case DISCARD_READY:
 609                if (ja->discard_idx == ja->last_idx)
 610                        return;
 611
 612                atomic_set(&ja->discard_in_flight, DISCARD_IN_FLIGHT);
 613
 614                bio_init(bio, bio->bi_inline_vecs, 1);
 615                bio_set_op_attrs(bio, REQ_OP_DISCARD, 0);
 616                bio->bi_iter.bi_sector  = bucket_to_sector(ca->set,
 617                                                ca->sb.d[ja->discard_idx]);
 618                bio_set_dev(bio, ca->bdev);
 619                bio->bi_iter.bi_size    = bucket_bytes(ca);
 620                bio->bi_end_io          = journal_discard_endio;
 621
 622                closure_get(&ca->set->cl);
 623                INIT_WORK(&ja->discard_work, journal_discard_work);
 624                queue_work(bch_journal_wq, &ja->discard_work);
 625        }
 626}
 627
 628static void journal_reclaim(struct cache_set *c)
 629{
 630        struct bkey *k = &c->journal.key;
 631        struct cache *ca = c->cache;
 632        uint64_t last_seq;
 633        unsigned int next;
 634        struct journal_device *ja = &ca->journal;
 635        atomic_t p __maybe_unused;
 636
 637        atomic_long_inc(&c->reclaim);
 638
 639        while (!atomic_read(&fifo_front(&c->journal.pin)))
 640                fifo_pop(&c->journal.pin, p);
 641
 642        last_seq = last_seq(&c->journal);
 643
 644        /* Update last_idx */
 645
 646        while (ja->last_idx != ja->cur_idx &&
 647               ja->seq[ja->last_idx] < last_seq)
 648                ja->last_idx = (ja->last_idx + 1) %
 649                        ca->sb.njournal_buckets;
 650
 651        do_journal_discard(ca);
 652
 653        if (c->journal.blocks_free)
 654                goto out;
 655
 656        next = (ja->cur_idx + 1) % ca->sb.njournal_buckets;
 657        /* No space available on this device */
 658        if (next == ja->discard_idx)
 659                goto out;
 660
 661        ja->cur_idx = next;
 662        k->ptr[0] = MAKE_PTR(0,
 663                             bucket_to_sector(c, ca->sb.d[ja->cur_idx]),
 664                             ca->sb.nr_this_dev);
 665        atomic_long_inc(&c->reclaimed_journal_buckets);
 666
 667        bkey_init(k);
 668        SET_KEY_PTRS(k, 1);
 669        c->journal.blocks_free = ca->sb.bucket_size >> c->block_bits;
 670
 671out:
 672        if (!journal_full(&c->journal))
 673                __closure_wake_up(&c->journal.wait);
 674}
 675
 676void bch_journal_next(struct journal *j)
 677{
 678        atomic_t p = { 1 };
 679
 680        j->cur = (j->cur == j->w)
 681                ? &j->w[1]
 682                : &j->w[0];
 683
 684        /*
 685         * The fifo_push() needs to happen at the same time as j->seq is
 686         * incremented for last_seq() to be calculated correctly
 687         */
 688        BUG_ON(!fifo_push(&j->pin, p));
 689        atomic_set(&fifo_back(&j->pin), 1);
 690
 691        j->cur->data->seq       = ++j->seq;
 692        j->cur->dirty           = false;
 693        j->cur->need_write      = false;
 694        j->cur->data->keys      = 0;
 695
 696        if (fifo_full(&j->pin))
 697                pr_debug("journal_pin full (%zu)\n", fifo_used(&j->pin));
 698}
 699
 700static void journal_write_endio(struct bio *bio)
 701{
 702        struct journal_write *w = bio->bi_private;
 703
 704        cache_set_err_on(bio->bi_status, w->c, "journal io error");
 705        closure_put(&w->c->journal.io);
 706}
 707
 708static void journal_write(struct closure *cl);
 709
 710static void journal_write_done(struct closure *cl)
 711{
 712        struct journal *j = container_of(cl, struct journal, io);
 713        struct journal_write *w = (j->cur == j->w)
 714                ? &j->w[1]
 715                : &j->w[0];
 716
 717        __closure_wake_up(&w->wait);
 718        continue_at_nobarrier(cl, journal_write, bch_journal_wq);
 719}
 720
 721static void journal_write_unlock(struct closure *cl)
 722        __releases(&c->journal.lock)
 723{
 724        struct cache_set *c = container_of(cl, struct cache_set, journal.io);
 725
 726        c->journal.io_in_flight = 0;
 727        spin_unlock(&c->journal.lock);
 728}
 729
 730static void journal_write_unlocked(struct closure *cl)
 731        __releases(c->journal.lock)
 732{
 733        struct cache_set *c = container_of(cl, struct cache_set, journal.io);
 734        struct cache *ca = c->cache;
 735        struct journal_write *w = c->journal.cur;
 736        struct bkey *k = &c->journal.key;
 737        unsigned int i, sectors = set_blocks(w->data, block_bytes(ca)) *
 738                ca->sb.block_size;
 739
 740        struct bio *bio;
 741        struct bio_list list;
 742
 743        bio_list_init(&list);
 744
 745        if (!w->need_write) {
 746                closure_return_with_destructor(cl, journal_write_unlock);
 747                return;
 748        } else if (journal_full(&c->journal)) {
 749                journal_reclaim(c);
 750                spin_unlock(&c->journal.lock);
 751
 752                btree_flush_write(c);
 753                continue_at(cl, journal_write, bch_journal_wq);
 754                return;
 755        }
 756
 757        c->journal.blocks_free -= set_blocks(w->data, block_bytes(ca));
 758
 759        w->data->btree_level = c->root->level;
 760
 761        bkey_copy(&w->data->btree_root, &c->root->key);
 762        bkey_copy(&w->data->uuid_bucket, &c->uuid_bucket);
 763
 764        w->data->prio_bucket[ca->sb.nr_this_dev] = ca->prio_buckets[0];
 765        w->data->magic          = jset_magic(&ca->sb);
 766        w->data->version        = BCACHE_JSET_VERSION;
 767        w->data->last_seq       = last_seq(&c->journal);
 768        w->data->csum           = csum_set(w->data);
 769
 770        for (i = 0; i < KEY_PTRS(k); i++) {
 771                ca = c->cache;
 772                bio = &ca->journal.bio;
 773
 774                atomic_long_add(sectors, &ca->meta_sectors_written);
 775
 776                bio_reset(bio);
 777                bio->bi_iter.bi_sector  = PTR_OFFSET(k, i);
 778                bio_set_dev(bio, ca->bdev);
 779                bio->bi_iter.bi_size = sectors << 9;
 780
 781                bio->bi_end_io  = journal_write_endio;
 782                bio->bi_private = w;
 783                bio_set_op_attrs(bio, REQ_OP_WRITE,
 784                                 REQ_SYNC|REQ_META|REQ_PREFLUSH|REQ_FUA);
 785                bch_bio_map(bio, w->data);
 786
 787                trace_bcache_journal_write(bio, w->data->keys);
 788                bio_list_add(&list, bio);
 789
 790                SET_PTR_OFFSET(k, i, PTR_OFFSET(k, i) + sectors);
 791
 792                ca->journal.seq[ca->journal.cur_idx] = w->data->seq;
 793        }
 794
 795        /* If KEY_PTRS(k) == 0, this jset gets lost in air */
 796        BUG_ON(i == 0);
 797
 798        atomic_dec_bug(&fifo_back(&c->journal.pin));
 799        bch_journal_next(&c->journal);
 800        journal_reclaim(c);
 801
 802        spin_unlock(&c->journal.lock);
 803
 804        while ((bio = bio_list_pop(&list)))
 805                closure_bio_submit(c, bio, cl);
 806
 807        continue_at(cl, journal_write_done, NULL);
 808}
 809
 810static void journal_write(struct closure *cl)
 811{
 812        struct cache_set *c = container_of(cl, struct cache_set, journal.io);
 813
 814        spin_lock(&c->journal.lock);
 815        journal_write_unlocked(cl);
 816}
 817
 818static void journal_try_write(struct cache_set *c)
 819        __releases(c->journal.lock)
 820{
 821        struct closure *cl = &c->journal.io;
 822        struct journal_write *w = c->journal.cur;
 823
 824        w->need_write = true;
 825
 826        if (!c->journal.io_in_flight) {
 827                c->journal.io_in_flight = 1;
 828                closure_call(cl, journal_write_unlocked, NULL, &c->cl);
 829        } else {
 830                spin_unlock(&c->journal.lock);
 831        }
 832}
 833
 834static struct journal_write *journal_wait_for_write(struct cache_set *c,
 835                                                    unsigned int nkeys)
 836        __acquires(&c->journal.lock)
 837{
 838        size_t sectors;
 839        struct closure cl;
 840        bool wait = false;
 841        struct cache *ca = c->cache;
 842
 843        closure_init_stack(&cl);
 844
 845        spin_lock(&c->journal.lock);
 846
 847        while (1) {
 848                struct journal_write *w = c->journal.cur;
 849
 850                sectors = __set_blocks(w->data, w->data->keys + nkeys,
 851                                       block_bytes(ca)) * ca->sb.block_size;
 852
 853                if (sectors <= min_t(size_t,
 854                                     c->journal.blocks_free * ca->sb.block_size,
 855                                     PAGE_SECTORS << JSET_BITS))
 856                        return w;
 857
 858                if (wait)
 859                        closure_wait(&c->journal.wait, &cl);
 860
 861                if (!journal_full(&c->journal)) {
 862                        if (wait)
 863                                trace_bcache_journal_entry_full(c);
 864
 865                        /*
 866                         * XXX: If we were inserting so many keys that they
 867                         * won't fit in an _empty_ journal write, we'll
 868                         * deadlock. For now, handle this in
 869                         * bch_keylist_realloc() - but something to think about.
 870                         */
 871                        BUG_ON(!w->data->keys);
 872
 873                        journal_try_write(c); /* unlocks */
 874                } else {
 875                        if (wait)
 876                                trace_bcache_journal_full(c);
 877
 878                        journal_reclaim(c);
 879                        spin_unlock(&c->journal.lock);
 880
 881                        btree_flush_write(c);
 882                }
 883
 884                closure_sync(&cl);
 885                spin_lock(&c->journal.lock);
 886                wait = true;
 887        }
 888}
 889
 890static void journal_write_work(struct work_struct *work)
 891{
 892        struct cache_set *c = container_of(to_delayed_work(work),
 893                                           struct cache_set,
 894                                           journal.work);
 895        spin_lock(&c->journal.lock);
 896        if (c->journal.cur->dirty)
 897                journal_try_write(c);
 898        else
 899                spin_unlock(&c->journal.lock);
 900}
 901
 902/*
 903 * Entry point to the journalling code - bio_insert() and btree_invalidate()
 904 * pass bch_journal() a list of keys to be journalled, and then
 905 * bch_journal() hands those same keys off to btree_insert_async()
 906 */
 907
 908atomic_t *bch_journal(struct cache_set *c,
 909                      struct keylist *keys,
 910                      struct closure *parent)
 911{
 912        struct journal_write *w;
 913        atomic_t *ret;
 914
 915        /* No journaling if CACHE_SET_IO_DISABLE set already */
 916        if (unlikely(test_bit(CACHE_SET_IO_DISABLE, &c->flags)))
 917                return NULL;
 918
 919        if (!CACHE_SYNC(&c->cache->sb))
 920                return NULL;
 921
 922        w = journal_wait_for_write(c, bch_keylist_nkeys(keys));
 923
 924        memcpy(bset_bkey_last(w->data), keys->keys, bch_keylist_bytes(keys));
 925        w->data->keys += bch_keylist_nkeys(keys);
 926
 927        ret = &fifo_back(&c->journal.pin);
 928        atomic_inc(ret);
 929
 930        if (parent) {
 931                closure_wait(&w->wait, parent);
 932                journal_try_write(c);
 933        } else if (!w->dirty) {
 934                w->dirty = true;
 935                queue_delayed_work(bch_flush_wq, &c->journal.work,
 936                                   msecs_to_jiffies(c->journal_delay_ms));
 937                spin_unlock(&c->journal.lock);
 938        } else {
 939                spin_unlock(&c->journal.lock);
 940        }
 941
 942
 943        return ret;
 944}
 945
 946void bch_journal_meta(struct cache_set *c, struct closure *cl)
 947{
 948        struct keylist keys;
 949        atomic_t *ref;
 950
 951        bch_keylist_init(&keys);
 952
 953        ref = bch_journal(c, &keys, cl);
 954        if (ref)
 955                atomic_dec_bug(ref);
 956}
 957
 958void bch_journal_free(struct cache_set *c)
 959{
 960        free_pages((unsigned long) c->journal.w[1].data, JSET_BITS);
 961        free_pages((unsigned long) c->journal.w[0].data, JSET_BITS);
 962        free_fifo(&c->journal.pin);
 963}
 964
 965int bch_journal_alloc(struct cache_set *c)
 966{
 967        struct journal *j = &c->journal;
 968
 969        spin_lock_init(&j->lock);
 970        spin_lock_init(&j->flush_write_lock);
 971        INIT_DELAYED_WORK(&j->work, journal_write_work);
 972
 973        c->journal_delay_ms = 100;
 974
 975        j->w[0].c = c;
 976        j->w[1].c = c;
 977
 978        if (!(init_fifo(&j->pin, JOURNAL_PIN, GFP_KERNEL)) ||
 979            !(j->w[0].data = (void *) __get_free_pages(GFP_KERNEL|__GFP_COMP, JSET_BITS)) ||
 980            !(j->w[1].data = (void *) __get_free_pages(GFP_KERNEL|__GFP_COMP, JSET_BITS)))
 981                return -ENOMEM;
 982
 983        return 0;
 984}
 985