linux/drivers/md/bcache/journal.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * bcache journalling code, for btree insertions
   4 *
   5 * Copyright 2012 Google, Inc.
   6 */
   7
   8#include "bcache.h"
   9#include "btree.h"
  10#include "debug.h"
  11#include "extents.h"
  12
  13#include <trace/events/bcache.h>
  14
  15/*
  16 * Journal replay/recovery:
  17 *
  18 * This code is all driven from run_cache_set(); we first read the journal
  19 * entries, do some other stuff, then we mark all the keys in the journal
  20 * entries (same as garbage collection would), then we replay them - reinserting
  21 * them into the cache in precisely the same order as they appear in the
  22 * journal.
  23 *
  24 * We only journal keys that go in leaf nodes, which simplifies things quite a
  25 * bit.
  26 */
  27
  28static void journal_read_endio(struct bio *bio)
  29{
  30        struct closure *cl = bio->bi_private;
  31
  32        closure_put(cl);
  33}
  34
  35static int journal_read_bucket(struct cache *ca, struct list_head *list,
  36                               unsigned int bucket_index)
  37{
  38        struct journal_device *ja = &ca->journal;
  39        struct bio *bio = &ja->bio;
  40
  41        struct journal_replay *i;
  42        struct jset *j, *data = ca->set->journal.w[0].data;
  43        struct closure cl;
  44        unsigned int len, left, offset = 0;
  45        int ret = 0;
  46        sector_t bucket = bucket_to_sector(ca->set, ca->sb.d[bucket_index]);
  47
  48        closure_init_stack(&cl);
  49
  50        pr_debug("reading %u", bucket_index);
  51
  52        while (offset < ca->sb.bucket_size) {
  53reread:         left = ca->sb.bucket_size - offset;
  54                len = min_t(unsigned int, left, PAGE_SECTORS << JSET_BITS);
  55
  56                bio_reset(bio);
  57                bio->bi_iter.bi_sector  = bucket + offset;
  58                bio_set_dev(bio, ca->bdev);
  59                bio->bi_iter.bi_size    = len << 9;
  60
  61                bio->bi_end_io  = journal_read_endio;
  62                bio->bi_private = &cl;
  63                bio_set_op_attrs(bio, REQ_OP_READ, 0);
  64                bch_bio_map(bio, data);
  65
  66                closure_bio_submit(ca->set, bio, &cl);
  67                closure_sync(&cl);
  68
  69                /* This function could be simpler now since we no longer write
  70                 * journal entries that overlap bucket boundaries; this means
  71                 * the start of a bucket will always have a valid journal entry
  72                 * if it has any journal entries at all.
  73                 */
  74
  75                j = data;
  76                while (len) {
  77                        struct list_head *where;
  78                        size_t blocks, bytes = set_bytes(j);
  79
  80                        if (j->magic != jset_magic(&ca->sb)) {
  81                                pr_debug("%u: bad magic", bucket_index);
  82                                return ret;
  83                        }
  84
  85                        if (bytes > left << 9 ||
  86                            bytes > PAGE_SIZE << JSET_BITS) {
  87                                pr_info("%u: too big, %zu bytes, offset %u",
  88                                        bucket_index, bytes, offset);
  89                                return ret;
  90                        }
  91
  92                        if (bytes > len << 9)
  93                                goto reread;
  94
  95                        if (j->csum != csum_set(j)) {
  96                                pr_info("%u: bad csum, %zu bytes, offset %u",
  97                                        bucket_index, bytes, offset);
  98                                return ret;
  99                        }
 100
 101                        blocks = set_blocks(j, block_bytes(ca->set));
 102
 103                        /*
 104                         * Nodes in 'list' are in linear increasing order of
 105                         * i->j.seq, the node on head has the smallest (oldest)
 106                         * journal seq, the node on tail has the biggest
 107                         * (latest) journal seq.
 108                         */
 109
 110                        /*
 111                         * Check from the oldest jset for last_seq. If
 112                         * i->j.seq < j->last_seq, it means the oldest jset
 113                         * in list is expired and useless, remove it from
 114                         * this list. Otherwise, j is a condidate jset for
 115                         * further following checks.
 116                         */
 117                        while (!list_empty(list)) {
 118                                i = list_first_entry(list,
 119                                        struct journal_replay, list);
 120                                if (i->j.seq >= j->last_seq)
 121                                        break;
 122                                list_del(&i->list);
 123                                kfree(i);
 124                        }
 125
 126                        /* iterate list in reverse order (from latest jset) */
 127                        list_for_each_entry_reverse(i, list, list) {
 128                                if (j->seq == i->j.seq)
 129                                        goto next_set;
 130
 131                                /*
 132                                 * if j->seq is less than any i->j.last_seq
 133                                 * in list, j is an expired and useless jset.
 134                                 */
 135                                if (j->seq < i->j.last_seq)
 136                                        goto next_set;
 137
 138                                /*
 139                                 * 'where' points to first jset in list which
 140                                 * is elder then j.
 141                                 */
 142                                if (j->seq > i->j.seq) {
 143                                        where = &i->list;
 144                                        goto add;
 145                                }
 146                        }
 147
 148                        where = list;
 149add:
 150                        i = kmalloc(offsetof(struct journal_replay, j) +
 151                                    bytes, GFP_KERNEL);
 152                        if (!i)
 153                                return -ENOMEM;
 154                        memcpy(&i->j, j, bytes);
 155                        /* Add to the location after 'where' points to */
 156                        list_add(&i->list, where);
 157                        ret = 1;
 158
 159                        if (j->seq > ja->seq[bucket_index])
 160                                ja->seq[bucket_index] = j->seq;
 161next_set:
 162                        offset  += blocks * ca->sb.block_size;
 163                        len     -= blocks * ca->sb.block_size;
 164                        j = ((void *) j) + blocks * block_bytes(ca);
 165                }
 166        }
 167
 168        return ret;
 169}
 170
 171int bch_journal_read(struct cache_set *c, struct list_head *list)
 172{
 173#define read_bucket(b)                                                  \
 174        ({                                                              \
 175                ret = journal_read_bucket(ca, list, b);                 \
 176                __set_bit(b, bitmap);                                   \
 177                if (ret < 0)                                            \
 178                        return ret;                                     \
 179                ret;                                                    \
 180        })
 181
 182        struct cache *ca;
 183        unsigned int iter;
 184        int ret = 0;
 185
 186        for_each_cache(ca, c, iter) {
 187                struct journal_device *ja = &ca->journal;
 188                DECLARE_BITMAP(bitmap, SB_JOURNAL_BUCKETS);
 189                unsigned int i, l, r, m;
 190                uint64_t seq;
 191
 192                bitmap_zero(bitmap, SB_JOURNAL_BUCKETS);
 193                pr_debug("%u journal buckets", ca->sb.njournal_buckets);
 194
 195                /*
 196                 * Read journal buckets ordered by golden ratio hash to quickly
 197                 * find a sequence of buckets with valid journal entries
 198                 */
 199                for (i = 0; i < ca->sb.njournal_buckets; i++) {
 200                        /*
 201                         * We must try the index l with ZERO first for
 202                         * correctness due to the scenario that the journal
 203                         * bucket is circular buffer which might have wrapped
 204                         */
 205                        l = (i * 2654435769U) % ca->sb.njournal_buckets;
 206
 207                        if (test_bit(l, bitmap))
 208                                break;
 209
 210                        if (read_bucket(l))
 211                                goto bsearch;
 212                }
 213
 214                /*
 215                 * If that fails, check all the buckets we haven't checked
 216                 * already
 217                 */
 218                pr_debug("falling back to linear search");
 219
 220                for (l = find_first_zero_bit(bitmap, ca->sb.njournal_buckets);
 221                     l < ca->sb.njournal_buckets;
 222                     l = find_next_zero_bit(bitmap, ca->sb.njournal_buckets,
 223                                            l + 1))
 224                        if (read_bucket(l))
 225                                goto bsearch;
 226
 227                /* no journal entries on this device? */
 228                if (l == ca->sb.njournal_buckets)
 229                        continue;
 230bsearch:
 231                BUG_ON(list_empty(list));
 232
 233                /* Binary search */
 234                m = l;
 235                r = find_next_bit(bitmap, ca->sb.njournal_buckets, l + 1);
 236                pr_debug("starting binary search, l %u r %u", l, r);
 237
 238                while (l + 1 < r) {
 239                        seq = list_entry(list->prev, struct journal_replay,
 240                                         list)->j.seq;
 241
 242                        m = (l + r) >> 1;
 243                        read_bucket(m);
 244
 245                        if (seq != list_entry(list->prev, struct journal_replay,
 246                                              list)->j.seq)
 247                                l = m;
 248                        else
 249                                r = m;
 250                }
 251
 252                /*
 253                 * Read buckets in reverse order until we stop finding more
 254                 * journal entries
 255                 */
 256                pr_debug("finishing up: m %u njournal_buckets %u",
 257                         m, ca->sb.njournal_buckets);
 258                l = m;
 259
 260                while (1) {
 261                        if (!l--)
 262                                l = ca->sb.njournal_buckets - 1;
 263
 264                        if (l == m)
 265                                break;
 266
 267                        if (test_bit(l, bitmap))
 268                                continue;
 269
 270                        if (!read_bucket(l))
 271                                break;
 272                }
 273
 274                seq = 0;
 275
 276                for (i = 0; i < ca->sb.njournal_buckets; i++)
 277                        if (ja->seq[i] > seq) {
 278                                seq = ja->seq[i];
 279                                /*
 280                                 * When journal_reclaim() goes to allocate for
 281                                 * the first time, it'll use the bucket after
 282                                 * ja->cur_idx
 283                                 */
 284                                ja->cur_idx = i;
 285                                ja->last_idx = ja->discard_idx = (i + 1) %
 286                                        ca->sb.njournal_buckets;
 287
 288                        }
 289        }
 290
 291        if (!list_empty(list))
 292                c->journal.seq = list_entry(list->prev,
 293                                            struct journal_replay,
 294                                            list)->j.seq;
 295
 296        return 0;
 297#undef read_bucket
 298}
 299
 300void bch_journal_mark(struct cache_set *c, struct list_head *list)
 301{
 302        atomic_t p = { 0 };
 303        struct bkey *k;
 304        struct journal_replay *i;
 305        struct journal *j = &c->journal;
 306        uint64_t last = j->seq;
 307
 308        /*
 309         * journal.pin should never fill up - we never write a journal
 310         * entry when it would fill up. But if for some reason it does, we
 311         * iterate over the list in reverse order so that we can just skip that
 312         * refcount instead of bugging.
 313         */
 314
 315        list_for_each_entry_reverse(i, list, list) {
 316                BUG_ON(last < i->j.seq);
 317                i->pin = NULL;
 318
 319                while (last-- != i->j.seq)
 320                        if (fifo_free(&j->pin) > 1) {
 321                                fifo_push_front(&j->pin, p);
 322                                atomic_set(&fifo_front(&j->pin), 0);
 323                        }
 324
 325                if (fifo_free(&j->pin) > 1) {
 326                        fifo_push_front(&j->pin, p);
 327                        i->pin = &fifo_front(&j->pin);
 328                        atomic_set(i->pin, 1);
 329                }
 330
 331                for (k = i->j.start;
 332                     k < bset_bkey_last(&i->j);
 333                     k = bkey_next(k))
 334                        if (!__bch_extent_invalid(c, k)) {
 335                                unsigned int j;
 336
 337                                for (j = 0; j < KEY_PTRS(k); j++)
 338                                        if (ptr_available(c, k, j))
 339                                                atomic_inc(&PTR_BUCKET(c, k, j)->pin);
 340
 341                                bch_initial_mark_key(c, 0, k);
 342                        }
 343        }
 344}
 345
 346static bool is_discard_enabled(struct cache_set *s)
 347{
 348        struct cache *ca;
 349        unsigned int i;
 350
 351        for_each_cache(ca, s, i)
 352                if (ca->discard)
 353                        return true;
 354
 355        return false;
 356}
 357
 358int bch_journal_replay(struct cache_set *s, struct list_head *list)
 359{
 360        int ret = 0, keys = 0, entries = 0;
 361        struct bkey *k;
 362        struct journal_replay *i =
 363                list_entry(list->prev, struct journal_replay, list);
 364
 365        uint64_t start = i->j.last_seq, end = i->j.seq, n = start;
 366        struct keylist keylist;
 367
 368        list_for_each_entry(i, list, list) {
 369                BUG_ON(i->pin && atomic_read(i->pin) != 1);
 370
 371                if (n != i->j.seq) {
 372                        if (n == start && is_discard_enabled(s))
 373                                pr_info("bcache: journal entries %llu-%llu may be discarded! (replaying %llu-%llu)",
 374                                        n, i->j.seq - 1, start, end);
 375                        else {
 376                                pr_err("bcache: journal entries %llu-%llu missing! (replaying %llu-%llu)",
 377                                        n, i->j.seq - 1, start, end);
 378                                ret = -EIO;
 379                                goto err;
 380                        }
 381                }
 382
 383                for (k = i->j.start;
 384                     k < bset_bkey_last(&i->j);
 385                     k = bkey_next(k)) {
 386                        trace_bcache_journal_replay_key(k);
 387
 388                        bch_keylist_init_single(&keylist, k);
 389
 390                        ret = bch_btree_insert(s, &keylist, i->pin, NULL);
 391                        if (ret)
 392                                goto err;
 393
 394                        BUG_ON(!bch_keylist_empty(&keylist));
 395                        keys++;
 396
 397                        cond_resched();
 398                }
 399
 400                if (i->pin)
 401                        atomic_dec(i->pin);
 402                n = i->j.seq + 1;
 403                entries++;
 404        }
 405
 406        pr_info("journal replay done, %i keys in %i entries, seq %llu",
 407                keys, entries, end);
 408err:
 409        while (!list_empty(list)) {
 410                i = list_first_entry(list, struct journal_replay, list);
 411                list_del(&i->list);
 412                kfree(i);
 413        }
 414
 415        return ret;
 416}
 417
 418/* Journalling */
 419
 420static void btree_flush_write(struct cache_set *c)
 421{
 422        struct btree *b, *t, *btree_nodes[BTREE_FLUSH_NR];
 423        unsigned int i, n;
 424
 425        if (c->journal.btree_flushing)
 426                return;
 427
 428        spin_lock(&c->journal.flush_write_lock);
 429        if (c->journal.btree_flushing) {
 430                spin_unlock(&c->journal.flush_write_lock);
 431                return;
 432        }
 433        c->journal.btree_flushing = true;
 434        spin_unlock(&c->journal.flush_write_lock);
 435
 436        atomic_long_inc(&c->flush_write);
 437        memset(btree_nodes, 0, sizeof(btree_nodes));
 438        n = 0;
 439
 440        mutex_lock(&c->bucket_lock);
 441        list_for_each_entry_safe_reverse(b, t, &c->btree_cache, list) {
 442                if (btree_node_journal_flush(b))
 443                        pr_err("BUG: flush_write bit should not be set here!");
 444
 445                mutex_lock(&b->write_lock);
 446
 447                if (!btree_node_dirty(b)) {
 448                        mutex_unlock(&b->write_lock);
 449                        continue;
 450                }
 451
 452                if (!btree_current_write(b)->journal) {
 453                        mutex_unlock(&b->write_lock);
 454                        continue;
 455                }
 456
 457                set_btree_node_journal_flush(b);
 458
 459                mutex_unlock(&b->write_lock);
 460
 461                btree_nodes[n++] = b;
 462                if (n == BTREE_FLUSH_NR)
 463                        break;
 464        }
 465        mutex_unlock(&c->bucket_lock);
 466
 467        for (i = 0; i < n; i++) {
 468                b = btree_nodes[i];
 469                if (!b) {
 470                        pr_err("BUG: btree_nodes[%d] is NULL", i);
 471                        continue;
 472                }
 473
 474                /* safe to check without holding b->write_lock */
 475                if (!btree_node_journal_flush(b)) {
 476                        pr_err("BUG: bnode %p: journal_flush bit cleaned", b);
 477                        continue;
 478                }
 479
 480                mutex_lock(&b->write_lock);
 481                if (!btree_current_write(b)->journal) {
 482                        clear_bit(BTREE_NODE_journal_flush, &b->flags);
 483                        mutex_unlock(&b->write_lock);
 484                        pr_debug("bnode %p: written by others", b);
 485                        continue;
 486                }
 487
 488                if (!btree_node_dirty(b)) {
 489                        clear_bit(BTREE_NODE_journal_flush, &b->flags);
 490                        mutex_unlock(&b->write_lock);
 491                        pr_debug("bnode %p: dirty bit cleaned by others", b);
 492                        continue;
 493                }
 494
 495                __bch_btree_node_write(b, NULL);
 496                clear_bit(BTREE_NODE_journal_flush, &b->flags);
 497                mutex_unlock(&b->write_lock);
 498        }
 499
 500        spin_lock(&c->journal.flush_write_lock);
 501        c->journal.btree_flushing = false;
 502        spin_unlock(&c->journal.flush_write_lock);
 503}
 504
 505#define last_seq(j)     ((j)->seq - fifo_used(&(j)->pin) + 1)
 506
 507static void journal_discard_endio(struct bio *bio)
 508{
 509        struct journal_device *ja =
 510                container_of(bio, struct journal_device, discard_bio);
 511        struct cache *ca = container_of(ja, struct cache, journal);
 512
 513        atomic_set(&ja->discard_in_flight, DISCARD_DONE);
 514
 515        closure_wake_up(&ca->set->journal.wait);
 516        closure_put(&ca->set->cl);
 517}
 518
 519static void journal_discard_work(struct work_struct *work)
 520{
 521        struct journal_device *ja =
 522                container_of(work, struct journal_device, discard_work);
 523
 524        submit_bio(&ja->discard_bio);
 525}
 526
 527static void do_journal_discard(struct cache *ca)
 528{
 529        struct journal_device *ja = &ca->journal;
 530        struct bio *bio = &ja->discard_bio;
 531
 532        if (!ca->discard) {
 533                ja->discard_idx = ja->last_idx;
 534                return;
 535        }
 536
 537        switch (atomic_read(&ja->discard_in_flight)) {
 538        case DISCARD_IN_FLIGHT:
 539                return;
 540
 541        case DISCARD_DONE:
 542                ja->discard_idx = (ja->discard_idx + 1) %
 543                        ca->sb.njournal_buckets;
 544
 545                atomic_set(&ja->discard_in_flight, DISCARD_READY);
 546                /* fallthrough */
 547
 548        case DISCARD_READY:
 549                if (ja->discard_idx == ja->last_idx)
 550                        return;
 551
 552                atomic_set(&ja->discard_in_flight, DISCARD_IN_FLIGHT);
 553
 554                bio_init(bio, bio->bi_inline_vecs, 1);
 555                bio_set_op_attrs(bio, REQ_OP_DISCARD, 0);
 556                bio->bi_iter.bi_sector  = bucket_to_sector(ca->set,
 557                                                ca->sb.d[ja->discard_idx]);
 558                bio_set_dev(bio, ca->bdev);
 559                bio->bi_iter.bi_size    = bucket_bytes(ca);
 560                bio->bi_end_io          = journal_discard_endio;
 561
 562                closure_get(&ca->set->cl);
 563                INIT_WORK(&ja->discard_work, journal_discard_work);
 564                queue_work(bch_journal_wq, &ja->discard_work);
 565        }
 566}
 567
 568static void journal_reclaim(struct cache_set *c)
 569{
 570        struct bkey *k = &c->journal.key;
 571        struct cache *ca;
 572        uint64_t last_seq;
 573        unsigned int iter, n = 0;
 574        atomic_t p __maybe_unused;
 575
 576        atomic_long_inc(&c->reclaim);
 577
 578        while (!atomic_read(&fifo_front(&c->journal.pin)))
 579                fifo_pop(&c->journal.pin, p);
 580
 581        last_seq = last_seq(&c->journal);
 582
 583        /* Update last_idx */
 584
 585        for_each_cache(ca, c, iter) {
 586                struct journal_device *ja = &ca->journal;
 587
 588                while (ja->last_idx != ja->cur_idx &&
 589                       ja->seq[ja->last_idx] < last_seq)
 590                        ja->last_idx = (ja->last_idx + 1) %
 591                                ca->sb.njournal_buckets;
 592        }
 593
 594        for_each_cache(ca, c, iter)
 595                do_journal_discard(ca);
 596
 597        if (c->journal.blocks_free)
 598                goto out;
 599
 600        /*
 601         * Allocate:
 602         * XXX: Sort by free journal space
 603         */
 604
 605        for_each_cache(ca, c, iter) {
 606                struct journal_device *ja = &ca->journal;
 607                unsigned int next = (ja->cur_idx + 1) % ca->sb.njournal_buckets;
 608
 609                /* No space available on this device */
 610                if (next == ja->discard_idx)
 611                        continue;
 612
 613                ja->cur_idx = next;
 614                k->ptr[n++] = MAKE_PTR(0,
 615                                  bucket_to_sector(c, ca->sb.d[ja->cur_idx]),
 616                                  ca->sb.nr_this_dev);
 617                atomic_long_inc(&c->reclaimed_journal_buckets);
 618        }
 619
 620        if (n) {
 621                bkey_init(k);
 622                SET_KEY_PTRS(k, n);
 623                c->journal.blocks_free = c->sb.bucket_size >> c->block_bits;
 624        }
 625out:
 626        if (!journal_full(&c->journal))
 627                __closure_wake_up(&c->journal.wait);
 628}
 629
 630void bch_journal_next(struct journal *j)
 631{
 632        atomic_t p = { 1 };
 633
 634        j->cur = (j->cur == j->w)
 635                ? &j->w[1]
 636                : &j->w[0];
 637
 638        /*
 639         * The fifo_push() needs to happen at the same time as j->seq is
 640         * incremented for last_seq() to be calculated correctly
 641         */
 642        BUG_ON(!fifo_push(&j->pin, p));
 643        atomic_set(&fifo_back(&j->pin), 1);
 644
 645        j->cur->data->seq       = ++j->seq;
 646        j->cur->dirty           = false;
 647        j->cur->need_write      = false;
 648        j->cur->data->keys      = 0;
 649
 650        if (fifo_full(&j->pin))
 651                pr_debug("journal_pin full (%zu)", fifo_used(&j->pin));
 652}
 653
 654static void journal_write_endio(struct bio *bio)
 655{
 656        struct journal_write *w = bio->bi_private;
 657
 658        cache_set_err_on(bio->bi_status, w->c, "journal io error");
 659        closure_put(&w->c->journal.io);
 660}
 661
 662static void journal_write(struct closure *cl);
 663
 664static void journal_write_done(struct closure *cl)
 665{
 666        struct journal *j = container_of(cl, struct journal, io);
 667        struct journal_write *w = (j->cur == j->w)
 668                ? &j->w[1]
 669                : &j->w[0];
 670
 671        __closure_wake_up(&w->wait);
 672        continue_at_nobarrier(cl, journal_write, bch_journal_wq);
 673}
 674
 675static void journal_write_unlock(struct closure *cl)
 676        __releases(&c->journal.lock)
 677{
 678        struct cache_set *c = container_of(cl, struct cache_set, journal.io);
 679
 680        c->journal.io_in_flight = 0;
 681        spin_unlock(&c->journal.lock);
 682}
 683
 684static void journal_write_unlocked(struct closure *cl)
 685        __releases(c->journal.lock)
 686{
 687        struct cache_set *c = container_of(cl, struct cache_set, journal.io);
 688        struct cache *ca;
 689        struct journal_write *w = c->journal.cur;
 690        struct bkey *k = &c->journal.key;
 691        unsigned int i, sectors = set_blocks(w->data, block_bytes(c)) *
 692                c->sb.block_size;
 693
 694        struct bio *bio;
 695        struct bio_list list;
 696
 697        bio_list_init(&list);
 698
 699        if (!w->need_write) {
 700                closure_return_with_destructor(cl, journal_write_unlock);
 701                return;
 702        } else if (journal_full(&c->journal)) {
 703                journal_reclaim(c);
 704                spin_unlock(&c->journal.lock);
 705
 706                btree_flush_write(c);
 707                continue_at(cl, journal_write, bch_journal_wq);
 708                return;
 709        }
 710
 711        c->journal.blocks_free -= set_blocks(w->data, block_bytes(c));
 712
 713        w->data->btree_level = c->root->level;
 714
 715        bkey_copy(&w->data->btree_root, &c->root->key);
 716        bkey_copy(&w->data->uuid_bucket, &c->uuid_bucket);
 717
 718        for_each_cache(ca, c, i)
 719                w->data->prio_bucket[ca->sb.nr_this_dev] = ca->prio_buckets[0];
 720
 721        w->data->magic          = jset_magic(&c->sb);
 722        w->data->version        = BCACHE_JSET_VERSION;
 723        w->data->last_seq       = last_seq(&c->journal);
 724        w->data->csum           = csum_set(w->data);
 725
 726        for (i = 0; i < KEY_PTRS(k); i++) {
 727                ca = PTR_CACHE(c, k, i);
 728                bio = &ca->journal.bio;
 729
 730                atomic_long_add(sectors, &ca->meta_sectors_written);
 731
 732                bio_reset(bio);
 733                bio->bi_iter.bi_sector  = PTR_OFFSET(k, i);
 734                bio_set_dev(bio, ca->bdev);
 735                bio->bi_iter.bi_size = sectors << 9;
 736
 737                bio->bi_end_io  = journal_write_endio;
 738                bio->bi_private = w;
 739                bio_set_op_attrs(bio, REQ_OP_WRITE,
 740                                 REQ_SYNC|REQ_META|REQ_PREFLUSH|REQ_FUA);
 741                bch_bio_map(bio, w->data);
 742
 743                trace_bcache_journal_write(bio, w->data->keys);
 744                bio_list_add(&list, bio);
 745
 746                SET_PTR_OFFSET(k, i, PTR_OFFSET(k, i) + sectors);
 747
 748                ca->journal.seq[ca->journal.cur_idx] = w->data->seq;
 749        }
 750
 751        /* If KEY_PTRS(k) == 0, this jset gets lost in air */
 752        BUG_ON(i == 0);
 753
 754        atomic_dec_bug(&fifo_back(&c->journal.pin));
 755        bch_journal_next(&c->journal);
 756        journal_reclaim(c);
 757
 758        spin_unlock(&c->journal.lock);
 759
 760        while ((bio = bio_list_pop(&list)))
 761                closure_bio_submit(c, bio, cl);
 762
 763        continue_at(cl, journal_write_done, NULL);
 764}
 765
 766static void journal_write(struct closure *cl)
 767{
 768        struct cache_set *c = container_of(cl, struct cache_set, journal.io);
 769
 770        spin_lock(&c->journal.lock);
 771        journal_write_unlocked(cl);
 772}
 773
 774static void journal_try_write(struct cache_set *c)
 775        __releases(c->journal.lock)
 776{
 777        struct closure *cl = &c->journal.io;
 778        struct journal_write *w = c->journal.cur;
 779
 780        w->need_write = true;
 781
 782        if (!c->journal.io_in_flight) {
 783                c->journal.io_in_flight = 1;
 784                closure_call(cl, journal_write_unlocked, NULL, &c->cl);
 785        } else {
 786                spin_unlock(&c->journal.lock);
 787        }
 788}
 789
 790static struct journal_write *journal_wait_for_write(struct cache_set *c,
 791                                                    unsigned int nkeys)
 792        __acquires(&c->journal.lock)
 793{
 794        size_t sectors;
 795        struct closure cl;
 796        bool wait = false;
 797
 798        closure_init_stack(&cl);
 799
 800        spin_lock(&c->journal.lock);
 801
 802        while (1) {
 803                struct journal_write *w = c->journal.cur;
 804
 805                sectors = __set_blocks(w->data, w->data->keys + nkeys,
 806                                       block_bytes(c)) * c->sb.block_size;
 807
 808                if (sectors <= min_t(size_t,
 809                                     c->journal.blocks_free * c->sb.block_size,
 810                                     PAGE_SECTORS << JSET_BITS))
 811                        return w;
 812
 813                if (wait)
 814                        closure_wait(&c->journal.wait, &cl);
 815
 816                if (!journal_full(&c->journal)) {
 817                        if (wait)
 818                                trace_bcache_journal_entry_full(c);
 819
 820                        /*
 821                         * XXX: If we were inserting so many keys that they
 822                         * won't fit in an _empty_ journal write, we'll
 823                         * deadlock. For now, handle this in
 824                         * bch_keylist_realloc() - but something to think about.
 825                         */
 826                        BUG_ON(!w->data->keys);
 827
 828                        journal_try_write(c); /* unlocks */
 829                } else {
 830                        if (wait)
 831                                trace_bcache_journal_full(c);
 832
 833                        journal_reclaim(c);
 834                        spin_unlock(&c->journal.lock);
 835
 836                        btree_flush_write(c);
 837                }
 838
 839                closure_sync(&cl);
 840                spin_lock(&c->journal.lock);
 841                wait = true;
 842        }
 843}
 844
 845static void journal_write_work(struct work_struct *work)
 846{
 847        struct cache_set *c = container_of(to_delayed_work(work),
 848                                           struct cache_set,
 849                                           journal.work);
 850        spin_lock(&c->journal.lock);
 851        if (c->journal.cur->dirty)
 852                journal_try_write(c);
 853        else
 854                spin_unlock(&c->journal.lock);
 855}
 856
 857/*
 858 * Entry point to the journalling code - bio_insert() and btree_invalidate()
 859 * pass bch_journal() a list of keys to be journalled, and then
 860 * bch_journal() hands those same keys off to btree_insert_async()
 861 */
 862
 863atomic_t *bch_journal(struct cache_set *c,
 864                      struct keylist *keys,
 865                      struct closure *parent)
 866{
 867        struct journal_write *w;
 868        atomic_t *ret;
 869
 870        /* No journaling if CACHE_SET_IO_DISABLE set already */
 871        if (unlikely(test_bit(CACHE_SET_IO_DISABLE, &c->flags)))
 872                return NULL;
 873
 874        if (!CACHE_SYNC(&c->sb))
 875                return NULL;
 876
 877        w = journal_wait_for_write(c, bch_keylist_nkeys(keys));
 878
 879        memcpy(bset_bkey_last(w->data), keys->keys, bch_keylist_bytes(keys));
 880        w->data->keys += bch_keylist_nkeys(keys);
 881
 882        ret = &fifo_back(&c->journal.pin);
 883        atomic_inc(ret);
 884
 885        if (parent) {
 886                closure_wait(&w->wait, parent);
 887                journal_try_write(c);
 888        } else if (!w->dirty) {
 889                w->dirty = true;
 890                schedule_delayed_work(&c->journal.work,
 891                                      msecs_to_jiffies(c->journal_delay_ms));
 892                spin_unlock(&c->journal.lock);
 893        } else {
 894                spin_unlock(&c->journal.lock);
 895        }
 896
 897
 898        return ret;
 899}
 900
 901void bch_journal_meta(struct cache_set *c, struct closure *cl)
 902{
 903        struct keylist keys;
 904        atomic_t *ref;
 905
 906        bch_keylist_init(&keys);
 907
 908        ref = bch_journal(c, &keys, cl);
 909        if (ref)
 910                atomic_dec_bug(ref);
 911}
 912
 913void bch_journal_free(struct cache_set *c)
 914{
 915        free_pages((unsigned long) c->journal.w[1].data, JSET_BITS);
 916        free_pages((unsigned long) c->journal.w[0].data, JSET_BITS);
 917        free_fifo(&c->journal.pin);
 918}
 919
 920int bch_journal_alloc(struct cache_set *c)
 921{
 922        struct journal *j = &c->journal;
 923
 924        spin_lock_init(&j->lock);
 925        spin_lock_init(&j->flush_write_lock);
 926        INIT_DELAYED_WORK(&j->work, journal_write_work);
 927
 928        c->journal_delay_ms = 100;
 929
 930        j->w[0].c = c;
 931        j->w[1].c = c;
 932
 933        if (!(init_fifo(&j->pin, JOURNAL_PIN, GFP_KERNEL)) ||
 934            !(j->w[0].data = (void *) __get_free_pages(GFP_KERNEL, JSET_BITS)) ||
 935            !(j->w[1].data = (void *) __get_free_pages(GFP_KERNEL, JSET_BITS)))
 936                return -ENOMEM;
 937
 938        return 0;
 939}
 940