linux/drivers/md/bcache/bset.c
<<
>>
Prefs
   1/*
   2 * Code for working with individual keys, and sorted sets of keys with in a
   3 * btree node
   4 *
   5 * Copyright 2012 Google, Inc.
   6 */
   7
   8#include "bcache.h"
   9#include "btree.h"
  10#include "debug.h"
  11
  12#include <linux/random.h>
  13#include <linux/prefetch.h>
  14
  15/* Keylists */
  16
  17void bch_keylist_copy(struct keylist *dest, struct keylist *src)
  18{
  19        *dest = *src;
  20
  21        if (src->list == src->d) {
  22                size_t n = (uint64_t *) src->top - src->d;
  23                dest->top = (struct bkey *) &dest->d[n];
  24                dest->list = dest->d;
  25        }
  26}
  27
  28int bch_keylist_realloc(struct keylist *l, int nptrs, struct cache_set *c)
  29{
  30        unsigned oldsize = (uint64_t *) l->top - l->list;
  31        unsigned newsize = oldsize + 2 + nptrs;
  32        uint64_t *new;
  33
  34        /* The journalling code doesn't handle the case where the keys to insert
  35         * is bigger than an empty write: If we just return -ENOMEM here,
  36         * bio_insert() and bio_invalidate() will insert the keys created so far
  37         * and finish the rest when the keylist is empty.
  38         */
  39        if (newsize * sizeof(uint64_t) > block_bytes(c) - sizeof(struct jset))
  40                return -ENOMEM;
  41
  42        newsize = roundup_pow_of_two(newsize);
  43
  44        if (newsize <= KEYLIST_INLINE ||
  45            roundup_pow_of_two(oldsize) == newsize)
  46                return 0;
  47
  48        new = krealloc(l->list == l->d ? NULL : l->list,
  49                       sizeof(uint64_t) * newsize, GFP_NOIO);
  50
  51        if (!new)
  52                return -ENOMEM;
  53
  54        if (l->list == l->d)
  55                memcpy(new, l->list, sizeof(uint64_t) * KEYLIST_INLINE);
  56
  57        l->list = new;
  58        l->top = (struct bkey *) (&l->list[oldsize]);
  59
  60        return 0;
  61}
  62
  63struct bkey *bch_keylist_pop(struct keylist *l)
  64{
  65        struct bkey *k = l->bottom;
  66
  67        if (k == l->top)
  68                return NULL;
  69
  70        while (bkey_next(k) != l->top)
  71                k = bkey_next(k);
  72
  73        return l->top = k;
  74}
  75
  76/* Pointer validation */
  77
  78bool __bch_ptr_invalid(struct cache_set *c, int level, const struct bkey *k)
  79{
  80        unsigned i;
  81        char buf[80];
  82
  83        if (level && (!KEY_PTRS(k) || !KEY_SIZE(k) || KEY_DIRTY(k)))
  84                goto bad;
  85
  86        if (!level && KEY_SIZE(k) > KEY_OFFSET(k))
  87                goto bad;
  88
  89        if (!KEY_SIZE(k))
  90                return true;
  91
  92        for (i = 0; i < KEY_PTRS(k); i++)
  93                if (ptr_available(c, k, i)) {
  94                        struct cache *ca = PTR_CACHE(c, k, i);
  95                        size_t bucket = PTR_BUCKET_NR(c, k, i);
  96                        size_t r = bucket_remainder(c, PTR_OFFSET(k, i));
  97
  98                        if (KEY_SIZE(k) + r > c->sb.bucket_size ||
  99                            bucket <  ca->sb.first_bucket ||
 100                            bucket >= ca->sb.nbuckets)
 101                                goto bad;
 102                }
 103
 104        return false;
 105bad:
 106        bch_bkey_to_text(buf, sizeof(buf), k);
 107        cache_bug(c, "spotted bad key %s: %s", buf, bch_ptr_status(c, k));
 108        return true;
 109}
 110
 111bool bch_ptr_bad(struct btree *b, const struct bkey *k)
 112{
 113        struct bucket *g;
 114        unsigned i, stale;
 115
 116        if (!bkey_cmp(k, &ZERO_KEY) ||
 117            !KEY_PTRS(k) ||
 118            bch_ptr_invalid(b, k))
 119                return true;
 120
 121        if (KEY_PTRS(k) && PTR_DEV(k, 0) == PTR_CHECK_DEV)
 122                return true;
 123
 124        for (i = 0; i < KEY_PTRS(k); i++)
 125                if (ptr_available(b->c, k, i)) {
 126                        g = PTR_BUCKET(b->c, k, i);
 127                        stale = ptr_stale(b->c, k, i);
 128
 129                        btree_bug_on(stale > 96, b,
 130                                     "key too stale: %i, need_gc %u",
 131                                     stale, b->c->need_gc);
 132
 133                        btree_bug_on(stale && KEY_DIRTY(k) && KEY_SIZE(k),
 134                                     b, "stale dirty pointer");
 135
 136                        if (stale)
 137                                return true;
 138
 139#ifdef CONFIG_BCACHE_EDEBUG
 140                        if (!mutex_trylock(&b->c->bucket_lock))
 141                                continue;
 142
 143                        if (b->level) {
 144                                if (KEY_DIRTY(k) ||
 145                                    g->prio != BTREE_PRIO ||
 146                                    (b->c->gc_mark_valid &&
 147                                     GC_MARK(g) != GC_MARK_METADATA))
 148                                        goto bug;
 149
 150                        } else {
 151                                if (g->prio == BTREE_PRIO)
 152                                        goto bug;
 153
 154                                if (KEY_DIRTY(k) &&
 155                                    b->c->gc_mark_valid &&
 156                                    GC_MARK(g) != GC_MARK_DIRTY)
 157                                        goto bug;
 158                        }
 159                        mutex_unlock(&b->c->bucket_lock);
 160#endif
 161                }
 162
 163        return false;
 164#ifdef CONFIG_BCACHE_EDEBUG
 165bug:
 166        mutex_unlock(&b->c->bucket_lock);
 167
 168        {
 169                char buf[80];
 170
 171                bch_bkey_to_text(buf, sizeof(buf), k);
 172                btree_bug(b,
 173"inconsistent pointer %s: bucket %zu pin %i prio %i gen %i last_gc %i mark %llu gc_gen %i",
 174                          buf, PTR_BUCKET_NR(b->c, k, i), atomic_read(&g->pin),
 175                          g->prio, g->gen, g->last_gc, GC_MARK(g), g->gc_gen);
 176        }
 177        return true;
 178#endif
 179}
 180
 181/* Key/pointer manipulation */
 182
 183void bch_bkey_copy_single_ptr(struct bkey *dest, const struct bkey *src,
 184                              unsigned i)
 185{
 186        BUG_ON(i > KEY_PTRS(src));
 187
 188        /* Only copy the header, key, and one pointer. */
 189        memcpy(dest, src, 2 * sizeof(uint64_t));
 190        dest->ptr[0] = src->ptr[i];
 191        SET_KEY_PTRS(dest, 1);
 192        /* We didn't copy the checksum so clear that bit. */
 193        SET_KEY_CSUM(dest, 0);
 194}
 195
 196bool __bch_cut_front(const struct bkey *where, struct bkey *k)
 197{
 198        unsigned i, len = 0;
 199
 200        if (bkey_cmp(where, &START_KEY(k)) <= 0)
 201                return false;
 202
 203        if (bkey_cmp(where, k) < 0)
 204                len = KEY_OFFSET(k) - KEY_OFFSET(where);
 205        else
 206                bkey_copy_key(k, where);
 207
 208        for (i = 0; i < KEY_PTRS(k); i++)
 209                SET_PTR_OFFSET(k, i, PTR_OFFSET(k, i) + KEY_SIZE(k) - len);
 210
 211        BUG_ON(len > KEY_SIZE(k));
 212        SET_KEY_SIZE(k, len);
 213        return true;
 214}
 215
 216bool __bch_cut_back(const struct bkey *where, struct bkey *k)
 217{
 218        unsigned len = 0;
 219
 220        if (bkey_cmp(where, k) >= 0)
 221                return false;
 222
 223        BUG_ON(KEY_INODE(where) != KEY_INODE(k));
 224
 225        if (bkey_cmp(where, &START_KEY(k)) > 0)
 226                len = KEY_OFFSET(where) - KEY_START(k);
 227
 228        bkey_copy_key(k, where);
 229
 230        BUG_ON(len > KEY_SIZE(k));
 231        SET_KEY_SIZE(k, len);
 232        return true;
 233}
 234
 235static uint64_t merge_chksums(struct bkey *l, struct bkey *r)
 236{
 237        return (l->ptr[KEY_PTRS(l)] + r->ptr[KEY_PTRS(r)]) &
 238                ~((uint64_t)1 << 63);
 239}
 240
 241/* Tries to merge l and r: l should be lower than r
 242 * Returns true if we were able to merge. If we did merge, l will be the merged
 243 * key, r will be untouched.
 244 */
 245bool bch_bkey_try_merge(struct btree *b, struct bkey *l, struct bkey *r)
 246{
 247        unsigned i;
 248
 249        if (key_merging_disabled(b->c))
 250                return false;
 251
 252        if (KEY_PTRS(l) != KEY_PTRS(r) ||
 253            KEY_DIRTY(l) != KEY_DIRTY(r) ||
 254            bkey_cmp(l, &START_KEY(r)))
 255                return false;
 256
 257        for (i = 0; i < KEY_PTRS(l); i++)
 258                if (l->ptr[i] + PTR(0, KEY_SIZE(l), 0) != r->ptr[i] ||
 259                    PTR_BUCKET_NR(b->c, l, i) != PTR_BUCKET_NR(b->c, r, i))
 260                        return false;
 261
 262        /* Keys with no pointers aren't restricted to one bucket and could
 263         * overflow KEY_SIZE
 264         */
 265        if (KEY_SIZE(l) + KEY_SIZE(r) > USHRT_MAX) {
 266                SET_KEY_OFFSET(l, KEY_OFFSET(l) + USHRT_MAX - KEY_SIZE(l));
 267                SET_KEY_SIZE(l, USHRT_MAX);
 268
 269                bch_cut_front(l, r);
 270                return false;
 271        }
 272
 273        if (KEY_CSUM(l)) {
 274                if (KEY_CSUM(r))
 275                        l->ptr[KEY_PTRS(l)] = merge_chksums(l, r);
 276                else
 277                        SET_KEY_CSUM(l, 0);
 278        }
 279
 280        SET_KEY_OFFSET(l, KEY_OFFSET(l) + KEY_SIZE(r));
 281        SET_KEY_SIZE(l, KEY_SIZE(l) + KEY_SIZE(r));
 282
 283        return true;
 284}
 285
 286/* Binary tree stuff for auxiliary search trees */
 287
 288static unsigned inorder_next(unsigned j, unsigned size)
 289{
 290        if (j * 2 + 1 < size) {
 291                j = j * 2 + 1;
 292
 293                while (j * 2 < size)
 294                        j *= 2;
 295        } else
 296                j >>= ffz(j) + 1;
 297
 298        return j;
 299}
 300
 301static unsigned inorder_prev(unsigned j, unsigned size)
 302{
 303        if (j * 2 < size) {
 304                j = j * 2;
 305
 306                while (j * 2 + 1 < size)
 307                        j = j * 2 + 1;
 308        } else
 309                j >>= ffs(j);
 310
 311        return j;
 312}
 313
 314/* I have no idea why this code works... and I'm the one who wrote it
 315 *
 316 * However, I do know what it does:
 317 * Given a binary tree constructed in an array (i.e. how you normally implement
 318 * a heap), it converts a node in the tree - referenced by array index - to the
 319 * index it would have if you did an inorder traversal.
 320 *
 321 * Also tested for every j, size up to size somewhere around 6 million.
 322 *
 323 * The binary tree starts at array index 1, not 0
 324 * extra is a function of size:
 325 *   extra = (size - rounddown_pow_of_two(size - 1)) << 1;
 326 */
 327static unsigned __to_inorder(unsigned j, unsigned size, unsigned extra)
 328{
 329        unsigned b = fls(j);
 330        unsigned shift = fls(size - 1) - b;
 331
 332        j  ^= 1U << (b - 1);
 333        j <<= 1;
 334        j  |= 1;
 335        j <<= shift;
 336
 337        if (j > extra)
 338                j -= (j - extra) >> 1;
 339
 340        return j;
 341}
 342
 343static unsigned to_inorder(unsigned j, struct bset_tree *t)
 344{
 345        return __to_inorder(j, t->size, t->extra);
 346}
 347
 348static unsigned __inorder_to_tree(unsigned j, unsigned size, unsigned extra)
 349{
 350        unsigned shift;
 351
 352        if (j > extra)
 353                j += j - extra;
 354
 355        shift = ffs(j);
 356
 357        j >>= shift;
 358        j  |= roundup_pow_of_two(size) >> shift;
 359
 360        return j;
 361}
 362
 363static unsigned inorder_to_tree(unsigned j, struct bset_tree *t)
 364{
 365        return __inorder_to_tree(j, t->size, t->extra);
 366}
 367
 368#if 0
 369void inorder_test(void)
 370{
 371        unsigned long done = 0;
 372        ktime_t start = ktime_get();
 373
 374        for (unsigned size = 2;
 375             size < 65536000;
 376             size++) {
 377                unsigned extra = (size - rounddown_pow_of_two(size - 1)) << 1;
 378                unsigned i = 1, j = rounddown_pow_of_two(size - 1);
 379
 380                if (!(size % 4096))
 381                        printk(KERN_NOTICE "loop %u, %llu per us\n", size,
 382                               done / ktime_us_delta(ktime_get(), start));
 383
 384                while (1) {
 385                        if (__inorder_to_tree(i, size, extra) != j)
 386                                panic("size %10u j %10u i %10u", size, j, i);
 387
 388                        if (__to_inorder(j, size, extra) != i)
 389                                panic("size %10u j %10u i %10u", size, j, i);
 390
 391                        if (j == rounddown_pow_of_two(size) - 1)
 392                                break;
 393
 394                        BUG_ON(inorder_prev(inorder_next(j, size), size) != j);
 395
 396                        j = inorder_next(j, size);
 397                        i++;
 398                }
 399
 400                done += size - 1;
 401        }
 402}
 403#endif
 404
 405/*
 406 * Cacheline/offset <-> bkey pointer arithmetic:
 407 *
 408 * t->tree is a binary search tree in an array; each node corresponds to a key
 409 * in one cacheline in t->set (BSET_CACHELINE bytes).
 410 *
 411 * This means we don't have to store the full index of the key that a node in
 412 * the binary tree points to; to_inorder() gives us the cacheline, and then
 413 * bkey_float->m gives us the offset within that cacheline, in units of 8 bytes.
 414 *
 415 * cacheline_to_bkey() and friends abstract out all the pointer arithmetic to
 416 * make this work.
 417 *
 418 * To construct the bfloat for an arbitrary key we need to know what the key
 419 * immediately preceding it is: we have to check if the two keys differ in the
 420 * bits we're going to store in bkey_float->mantissa. t->prev[j] stores the size
 421 * of the previous key so we can walk backwards to it from t->tree[j]'s key.
 422 */
 423
 424static struct bkey *cacheline_to_bkey(struct bset_tree *t, unsigned cacheline,
 425                                      unsigned offset)
 426{
 427        return ((void *) t->data) + cacheline * BSET_CACHELINE + offset * 8;
 428}
 429
 430static unsigned bkey_to_cacheline(struct bset_tree *t, struct bkey *k)
 431{
 432        return ((void *) k - (void *) t->data) / BSET_CACHELINE;
 433}
 434
 435static unsigned bkey_to_cacheline_offset(struct bkey *k)
 436{
 437        return ((size_t) k & (BSET_CACHELINE - 1)) / sizeof(uint64_t);
 438}
 439
 440static struct bkey *tree_to_bkey(struct bset_tree *t, unsigned j)
 441{
 442        return cacheline_to_bkey(t, to_inorder(j, t), t->tree[j].m);
 443}
 444
 445static struct bkey *tree_to_prev_bkey(struct bset_tree *t, unsigned j)
 446{
 447        return (void *) (((uint64_t *) tree_to_bkey(t, j)) - t->prev[j]);
 448}
 449
 450/*
 451 * For the write set - the one we're currently inserting keys into - we don't
 452 * maintain a full search tree, we just keep a simple lookup table in t->prev.
 453 */
 454static struct bkey *table_to_bkey(struct bset_tree *t, unsigned cacheline)
 455{
 456        return cacheline_to_bkey(t, cacheline, t->prev[cacheline]);
 457}
 458
 459static inline uint64_t shrd128(uint64_t high, uint64_t low, uint8_t shift)
 460{
 461#ifdef CONFIG_X86_64
 462        asm("shrd %[shift],%[high],%[low]"
 463            : [low] "+Rm" (low)
 464            : [high] "R" (high),
 465            [shift] "ci" (shift)
 466            : "cc");
 467#else
 468        low >>= shift;
 469        low  |= (high << 1) << (63U - shift);
 470#endif
 471        return low;
 472}
 473
 474static inline unsigned bfloat_mantissa(const struct bkey *k,
 475                                       struct bkey_float *f)
 476{
 477        const uint64_t *p = &k->low - (f->exponent >> 6);
 478        return shrd128(p[-1], p[0], f->exponent & 63) & BKEY_MANTISSA_MASK;
 479}
 480
 481static void make_bfloat(struct bset_tree *t, unsigned j)
 482{
 483        struct bkey_float *f = &t->tree[j];
 484        struct bkey *m = tree_to_bkey(t, j);
 485        struct bkey *p = tree_to_prev_bkey(t, j);
 486
 487        struct bkey *l = is_power_of_2(j)
 488                ? t->data->start
 489                : tree_to_prev_bkey(t, j >> ffs(j));
 490
 491        struct bkey *r = is_power_of_2(j + 1)
 492                ? node(t->data, t->data->keys - bkey_u64s(&t->end))
 493                : tree_to_bkey(t, j >> (ffz(j) + 1));
 494
 495        BUG_ON(m < l || m > r);
 496        BUG_ON(bkey_next(p) != m);
 497
 498        if (KEY_INODE(l) != KEY_INODE(r))
 499                f->exponent = fls64(KEY_INODE(r) ^ KEY_INODE(l)) + 64;
 500        else
 501                f->exponent = fls64(r->low ^ l->low);
 502
 503        f->exponent = max_t(int, f->exponent - BKEY_MANTISSA_BITS, 0);
 504
 505        /*
 506         * Setting f->exponent = 127 flags this node as failed, and causes the
 507         * lookup code to fall back to comparing against the original key.
 508         */
 509
 510        if (bfloat_mantissa(m, f) != bfloat_mantissa(p, f))
 511                f->mantissa = bfloat_mantissa(m, f) - 1;
 512        else
 513                f->exponent = 127;
 514}
 515
 516static void bset_alloc_tree(struct btree *b, struct bset_tree *t)
 517{
 518        if (t != b->sets) {
 519                unsigned j = roundup(t[-1].size,
 520                                     64 / sizeof(struct bkey_float));
 521
 522                t->tree = t[-1].tree + j;
 523                t->prev = t[-1].prev + j;
 524        }
 525
 526        while (t < b->sets + MAX_BSETS)
 527                t++->size = 0;
 528}
 529
 530static void bset_build_unwritten_tree(struct btree *b)
 531{
 532        struct bset_tree *t = b->sets + b->nsets;
 533
 534        bset_alloc_tree(b, t);
 535
 536        if (t->tree != b->sets->tree + bset_tree_space(b)) {
 537                t->prev[0] = bkey_to_cacheline_offset(t->data->start);
 538                t->size = 1;
 539        }
 540}
 541
 542static void bset_build_written_tree(struct btree *b)
 543{
 544        struct bset_tree *t = b->sets + b->nsets;
 545        struct bkey *k = t->data->start;
 546        unsigned j, cacheline = 1;
 547
 548        bset_alloc_tree(b, t);
 549
 550        t->size = min_t(unsigned,
 551                        bkey_to_cacheline(t, end(t->data)),
 552                        b->sets->tree + bset_tree_space(b) - t->tree);
 553
 554        if (t->size < 2) {
 555                t->size = 0;
 556                return;
 557        }
 558
 559        t->extra = (t->size - rounddown_pow_of_two(t->size - 1)) << 1;
 560
 561        /* First we figure out where the first key in each cacheline is */
 562        for (j = inorder_next(0, t->size);
 563             j;
 564             j = inorder_next(j, t->size)) {
 565                while (bkey_to_cacheline(t, k) != cacheline)
 566                        k = bkey_next(k);
 567
 568                t->prev[j] = bkey_u64s(k);
 569                k = bkey_next(k);
 570                cacheline++;
 571                t->tree[j].m = bkey_to_cacheline_offset(k);
 572        }
 573
 574        while (bkey_next(k) != end(t->data))
 575                k = bkey_next(k);
 576
 577        t->end = *k;
 578
 579        /* Then we build the tree */
 580        for (j = inorder_next(0, t->size);
 581             j;
 582             j = inorder_next(j, t->size))
 583                make_bfloat(t, j);
 584}
 585
 586void bch_bset_fix_invalidated_key(struct btree *b, struct bkey *k)
 587{
 588        struct bset_tree *t;
 589        unsigned inorder, j = 1;
 590
 591        for (t = b->sets; t <= &b->sets[b->nsets]; t++)
 592                if (k < end(t->data))
 593                        goto found_set;
 594
 595        BUG();
 596found_set:
 597        if (!t->size || !bset_written(b, t))
 598                return;
 599
 600        inorder = bkey_to_cacheline(t, k);
 601
 602        if (k == t->data->start)
 603                goto fix_left;
 604
 605        if (bkey_next(k) == end(t->data)) {
 606                t->end = *k;
 607                goto fix_right;
 608        }
 609
 610        j = inorder_to_tree(inorder, t);
 611
 612        if (j &&
 613            j < t->size &&
 614            k == tree_to_bkey(t, j))
 615fix_left:       do {
 616                        make_bfloat(t, j);
 617                        j = j * 2;
 618                } while (j < t->size);
 619
 620        j = inorder_to_tree(inorder + 1, t);
 621
 622        if (j &&
 623            j < t->size &&
 624            k == tree_to_prev_bkey(t, j))
 625fix_right:      do {
 626                        make_bfloat(t, j);
 627                        j = j * 2 + 1;
 628                } while (j < t->size);
 629}
 630
 631void bch_bset_fix_lookup_table(struct btree *b, struct bkey *k)
 632{
 633        struct bset_tree *t = &b->sets[b->nsets];
 634        unsigned shift = bkey_u64s(k);
 635        unsigned j = bkey_to_cacheline(t, k);
 636
 637        /* We're getting called from btree_split() or btree_gc, just bail out */
 638        if (!t->size)
 639                return;
 640
 641        /* k is the key we just inserted; we need to find the entry in the
 642         * lookup table for the first key that is strictly greater than k:
 643         * it's either k's cacheline or the next one
 644         */
 645        if (j < t->size &&
 646            table_to_bkey(t, j) <= k)
 647                j++;
 648
 649        /* Adjust all the lookup table entries, and find a new key for any that
 650         * have gotten too big
 651         */
 652        for (; j < t->size; j++) {
 653                t->prev[j] += shift;
 654
 655                if (t->prev[j] > 7) {
 656                        k = table_to_bkey(t, j - 1);
 657
 658                        while (k < cacheline_to_bkey(t, j, 0))
 659                                k = bkey_next(k);
 660
 661                        t->prev[j] = bkey_to_cacheline_offset(k);
 662                }
 663        }
 664
 665        if (t->size == b->sets->tree + bset_tree_space(b) - t->tree)
 666                return;
 667
 668        /* Possibly add a new entry to the end of the lookup table */
 669
 670        for (k = table_to_bkey(t, t->size - 1);
 671             k != end(t->data);
 672             k = bkey_next(k))
 673                if (t->size == bkey_to_cacheline(t, k)) {
 674                        t->prev[t->size] = bkey_to_cacheline_offset(k);
 675                        t->size++;
 676                }
 677}
 678
 679void bch_bset_init_next(struct btree *b)
 680{
 681        struct bset *i = write_block(b);
 682
 683        if (i != b->sets[0].data) {
 684                b->sets[++b->nsets].data = i;
 685                i->seq = b->sets[0].data->seq;
 686        } else
 687                get_random_bytes(&i->seq, sizeof(uint64_t));
 688
 689        i->magic        = bset_magic(b->c);
 690        i->version      = 0;
 691        i->keys         = 0;
 692
 693        bset_build_unwritten_tree(b);
 694}
 695
 696struct bset_search_iter {
 697        struct bkey *l, *r;
 698};
 699
 700static struct bset_search_iter bset_search_write_set(struct btree *b,
 701                                                     struct bset_tree *t,
 702                                                     const struct bkey *search)
 703{
 704        unsigned li = 0, ri = t->size;
 705
 706        BUG_ON(!b->nsets &&
 707               t->size < bkey_to_cacheline(t, end(t->data)));
 708
 709        while (li + 1 != ri) {
 710                unsigned m = (li + ri) >> 1;
 711
 712                if (bkey_cmp(table_to_bkey(t, m), search) > 0)
 713                        ri = m;
 714                else
 715                        li = m;
 716        }
 717
 718        return (struct bset_search_iter) {
 719                table_to_bkey(t, li),
 720                ri < t->size ? table_to_bkey(t, ri) : end(t->data)
 721        };
 722}
 723
 724static struct bset_search_iter bset_search_tree(struct btree *b,
 725                                                struct bset_tree *t,
 726                                                const struct bkey *search)
 727{
 728        struct bkey *l, *r;
 729        struct bkey_float *f;
 730        unsigned inorder, j, n = 1;
 731
 732        do {
 733                unsigned p = n << 4;
 734                p &= ((int) (p - t->size)) >> 31;
 735
 736                prefetch(&t->tree[p]);
 737
 738                j = n;
 739                f = &t->tree[j];
 740
 741                /*
 742                 * n = (f->mantissa > bfloat_mantissa())
 743                 *      ? j * 2
 744                 *      : j * 2 + 1;
 745                 *
 746                 * We need to subtract 1 from f->mantissa for the sign bit trick
 747                 * to work  - that's done in make_bfloat()
 748                 */
 749                if (likely(f->exponent != 127))
 750                        n = j * 2 + (((unsigned)
 751                                      (f->mantissa -
 752                                       bfloat_mantissa(search, f))) >> 31);
 753                else
 754                        n = (bkey_cmp(tree_to_bkey(t, j), search) > 0)
 755                                ? j * 2
 756                                : j * 2 + 1;
 757        } while (n < t->size);
 758
 759        inorder = to_inorder(j, t);
 760
 761        /*
 762         * n would have been the node we recursed to - the low bit tells us if
 763         * we recursed left or recursed right.
 764         */
 765        if (n & 1) {
 766                l = cacheline_to_bkey(t, inorder, f->m);
 767
 768                if (++inorder != t->size) {
 769                        f = &t->tree[inorder_next(j, t->size)];
 770                        r = cacheline_to_bkey(t, inorder, f->m);
 771                } else
 772                        r = end(t->data);
 773        } else {
 774                r = cacheline_to_bkey(t, inorder, f->m);
 775
 776                if (--inorder) {
 777                        f = &t->tree[inorder_prev(j, t->size)];
 778                        l = cacheline_to_bkey(t, inorder, f->m);
 779                } else
 780                        l = t->data->start;
 781        }
 782
 783        return (struct bset_search_iter) {l, r};
 784}
 785
 786struct bkey *__bch_bset_search(struct btree *b, struct bset_tree *t,
 787                               const struct bkey *search)
 788{
 789        struct bset_search_iter i;
 790
 791        /*
 792         * First, we search for a cacheline, then lastly we do a linear search
 793         * within that cacheline.
 794         *
 795         * To search for the cacheline, there's three different possibilities:
 796         *  * The set is too small to have a search tree, so we just do a linear
 797         *    search over the whole set.
 798         *  * The set is the one we're currently inserting into; keeping a full
 799         *    auxiliary search tree up to date would be too expensive, so we
 800         *    use a much simpler lookup table to do a binary search -
 801         *    bset_search_write_set().
 802         *  * Or we use the auxiliary search tree we constructed earlier -
 803         *    bset_search_tree()
 804         */
 805
 806        if (unlikely(!t->size)) {
 807                i.l = t->data->start;
 808                i.r = end(t->data);
 809        } else if (bset_written(b, t)) {
 810                /*
 811                 * Each node in the auxiliary search tree covers a certain range
 812                 * of bits, and keys above and below the set it covers might
 813                 * differ outside those bits - so we have to special case the
 814                 * start and end - handle that here:
 815                 */
 816
 817                if (unlikely(bkey_cmp(search, &t->end) >= 0))
 818                        return end(t->data);
 819
 820                if (unlikely(bkey_cmp(search, t->data->start) < 0))
 821                        return t->data->start;
 822
 823                i = bset_search_tree(b, t, search);
 824        } else
 825                i = bset_search_write_set(b, t, search);
 826
 827#ifdef CONFIG_BCACHE_EDEBUG
 828        BUG_ON(bset_written(b, t) &&
 829               i.l != t->data->start &&
 830               bkey_cmp(tree_to_prev_bkey(t,
 831                  inorder_to_tree(bkey_to_cacheline(t, i.l), t)),
 832                        search) > 0);
 833
 834        BUG_ON(i.r != end(t->data) &&
 835               bkey_cmp(i.r, search) <= 0);
 836#endif
 837
 838        while (likely(i.l != i.r) &&
 839               bkey_cmp(i.l, search) <= 0)
 840                i.l = bkey_next(i.l);
 841
 842        return i.l;
 843}
 844
 845/* Btree iterator */
 846
 847static inline bool btree_iter_cmp(struct btree_iter_set l,
 848                                  struct btree_iter_set r)
 849{
 850        int64_t c = bkey_cmp(&START_KEY(l.k), &START_KEY(r.k));
 851
 852        return c ? c > 0 : l.k < r.k;
 853}
 854
 855static inline bool btree_iter_end(struct btree_iter *iter)
 856{
 857        return !iter->used;
 858}
 859
 860void bch_btree_iter_push(struct btree_iter *iter, struct bkey *k,
 861                         struct bkey *end)
 862{
 863        if (k != end)
 864                BUG_ON(!heap_add(iter,
 865                                 ((struct btree_iter_set) { k, end }),
 866                                 btree_iter_cmp));
 867}
 868
 869struct bkey *__bch_btree_iter_init(struct btree *b, struct btree_iter *iter,
 870                               struct bkey *search, struct bset_tree *start)
 871{
 872        struct bkey *ret = NULL;
 873        iter->size = ARRAY_SIZE(iter->data);
 874        iter->used = 0;
 875
 876        for (; start <= &b->sets[b->nsets]; start++) {
 877                ret = bch_bset_search(b, start, search);
 878                bch_btree_iter_push(iter, ret, end(start->data));
 879        }
 880
 881        return ret;
 882}
 883
 884struct bkey *bch_btree_iter_next(struct btree_iter *iter)
 885{
 886        struct btree_iter_set unused;
 887        struct bkey *ret = NULL;
 888
 889        if (!btree_iter_end(iter)) {
 890                ret = iter->data->k;
 891                iter->data->k = bkey_next(iter->data->k);
 892
 893                if (iter->data->k > iter->data->end) {
 894                        WARN_ONCE(1, "bset was corrupt!\n");
 895                        iter->data->k = iter->data->end;
 896                }
 897
 898                if (iter->data->k == iter->data->end)
 899                        heap_pop(iter, unused, btree_iter_cmp);
 900                else
 901                        heap_sift(iter, 0, btree_iter_cmp);
 902        }
 903
 904        return ret;
 905}
 906
 907struct bkey *bch_btree_iter_next_filter(struct btree_iter *iter,
 908                                        struct btree *b, ptr_filter_fn fn)
 909{
 910        struct bkey *ret;
 911
 912        do {
 913                ret = bch_btree_iter_next(iter);
 914        } while (ret && fn(b, ret));
 915
 916        return ret;
 917}
 918
 919struct bkey *bch_next_recurse_key(struct btree *b, struct bkey *search)
 920{
 921        struct btree_iter iter;
 922
 923        bch_btree_iter_init(b, &iter, search);
 924        return bch_btree_iter_next_filter(&iter, b, bch_ptr_bad);
 925}
 926
 927/* Mergesort */
 928
 929static void btree_sort_fixup(struct btree_iter *iter)
 930{
 931        while (iter->used > 1) {
 932                struct btree_iter_set *top = iter->data, *i = top + 1;
 933                struct bkey *k;
 934
 935                if (iter->used > 2 &&
 936                    btree_iter_cmp(i[0], i[1]))
 937                        i++;
 938
 939                for (k = i->k;
 940                     k != i->end && bkey_cmp(top->k, &START_KEY(k)) > 0;
 941                     k = bkey_next(k))
 942                        if (top->k > i->k)
 943                                __bch_cut_front(top->k, k);
 944                        else if (KEY_SIZE(k))
 945                                bch_cut_back(&START_KEY(k), top->k);
 946
 947                if (top->k < i->k || k == i->k)
 948                        break;
 949
 950                heap_sift(iter, i - top, btree_iter_cmp);
 951        }
 952}
 953
 954static void btree_mergesort(struct btree *b, struct bset *out,
 955                            struct btree_iter *iter,
 956                            bool fixup, bool remove_stale)
 957{
 958        struct bkey *k, *last = NULL;
 959        bool (*bad)(struct btree *, const struct bkey *) = remove_stale
 960                ? bch_ptr_bad
 961                : bch_ptr_invalid;
 962
 963        while (!btree_iter_end(iter)) {
 964                if (fixup && !b->level)
 965                        btree_sort_fixup(iter);
 966
 967                k = bch_btree_iter_next(iter);
 968                if (bad(b, k))
 969                        continue;
 970
 971                if (!last) {
 972                        last = out->start;
 973                        bkey_copy(last, k);
 974                } else if (b->level ||
 975                           !bch_bkey_try_merge(b, last, k)) {
 976                        last = bkey_next(last);
 977                        bkey_copy(last, k);
 978                }
 979        }
 980
 981        out->keys = last ? (uint64_t *) bkey_next(last) - out->d : 0;
 982
 983        pr_debug("sorted %i keys", out->keys);
 984        bch_check_key_order(b, out);
 985}
 986
 987static void __btree_sort(struct btree *b, struct btree_iter *iter,
 988                         unsigned start, unsigned order, bool fixup)
 989{
 990        uint64_t start_time;
 991        bool remove_stale = !b->written;
 992        struct bset *out = (void *) __get_free_pages(__GFP_NOWARN|GFP_NOIO,
 993                                                     order);
 994        if (!out) {
 995                mutex_lock(&b->c->sort_lock);
 996                out = b->c->sort;
 997                order = ilog2(bucket_pages(b->c));
 998        }
 999
1000        start_time = local_clock();
1001
1002        btree_mergesort(b, out, iter, fixup, remove_stale);
1003        b->nsets = start;
1004
1005        if (!fixup && !start && b->written)
1006                bch_btree_verify(b, out);
1007
1008        if (!start && order == b->page_order) {
1009                /*
1010                 * Our temporary buffer is the same size as the btree node's
1011                 * buffer, we can just swap buffers instead of doing a big
1012                 * memcpy()
1013                 */
1014
1015                out->magic      = bset_magic(b->c);
1016                out->seq        = b->sets[0].data->seq;
1017                out->version    = b->sets[0].data->version;
1018                swap(out, b->sets[0].data);
1019
1020                if (b->c->sort == b->sets[0].data)
1021                        b->c->sort = out;
1022        } else {
1023                b->sets[start].data->keys = out->keys;
1024                memcpy(b->sets[start].data->start, out->start,
1025                       (void *) end(out) - (void *) out->start);
1026        }
1027
1028        if (out == b->c->sort)
1029                mutex_unlock(&b->c->sort_lock);
1030        else
1031                free_pages((unsigned long) out, order);
1032
1033        if (b->written)
1034                bset_build_written_tree(b);
1035
1036        if (!start) {
1037                spin_lock(&b->c->sort_time_lock);
1038                bch_time_stats_update(&b->c->sort_time, start_time);
1039                spin_unlock(&b->c->sort_time_lock);
1040        }
1041}
1042
1043void bch_btree_sort_partial(struct btree *b, unsigned start)
1044{
1045        size_t oldsize = 0, order = b->page_order, keys = 0;
1046        struct btree_iter iter;
1047        __bch_btree_iter_init(b, &iter, NULL, &b->sets[start]);
1048
1049        BUG_ON(b->sets[b->nsets].data == write_block(b) &&
1050               (b->sets[b->nsets].size || b->nsets));
1051
1052        if (b->written)
1053                oldsize = bch_count_data(b);
1054
1055        if (start) {
1056                unsigned i;
1057
1058                for (i = start; i <= b->nsets; i++)
1059                        keys += b->sets[i].data->keys;
1060
1061                order = roundup_pow_of_two(__set_bytes(b->sets->data,
1062                                                       keys)) / PAGE_SIZE;
1063                if (order)
1064                        order = ilog2(order);
1065        }
1066
1067        __btree_sort(b, &iter, start, order, false);
1068
1069        EBUG_ON(b->written && bch_count_data(b) != oldsize);
1070}
1071
1072void bch_btree_sort_and_fix_extents(struct btree *b, struct btree_iter *iter)
1073{
1074        BUG_ON(!b->written);
1075        __btree_sort(b, iter, 0, b->page_order, true);
1076}
1077
1078void bch_btree_sort_into(struct btree *b, struct btree *new)
1079{
1080        uint64_t start_time = local_clock();
1081
1082        struct btree_iter iter;
1083        bch_btree_iter_init(b, &iter, NULL);
1084
1085        btree_mergesort(b, new->sets->data, &iter, false, true);
1086
1087        spin_lock(&b->c->sort_time_lock);
1088        bch_time_stats_update(&b->c->sort_time, start_time);
1089        spin_unlock(&b->c->sort_time_lock);
1090
1091        bkey_copy_key(&new->key, &b->key);
1092        new->sets->size = 0;
1093}
1094
1095#define SORT_CRIT       (4096 / sizeof(uint64_t))
1096
1097void bch_btree_sort_lazy(struct btree *b)
1098{
1099        unsigned crit = SORT_CRIT;
1100        int i;
1101
1102        /* Don't sort if nothing to do */
1103        if (!b->nsets)
1104                goto out;
1105
1106        /* If not a leaf node, always sort */
1107        if (b->level) {
1108                bch_btree_sort(b);
1109                return;
1110        }
1111
1112        for (i = b->nsets - 1; i >= 0; --i) {
1113                crit *= b->c->sort_crit_factor;
1114
1115                if (b->sets[i].data->keys < crit) {
1116                        bch_btree_sort_partial(b, i);
1117                        return;
1118                }
1119        }
1120
1121        /* Sort if we'd overflow */
1122        if (b->nsets + 1 == MAX_BSETS) {
1123                bch_btree_sort(b);
1124                return;
1125        }
1126
1127out:
1128        bset_build_written_tree(b);
1129}
1130
1131/* Sysfs stuff */
1132
1133struct bset_stats {
1134        size_t nodes;
1135        size_t sets_written, sets_unwritten;
1136        size_t bytes_written, bytes_unwritten;
1137        size_t floats, failed;
1138};
1139
1140static int bch_btree_bset_stats(struct btree *b, struct btree_op *op,
1141                            struct bset_stats *stats)
1142{
1143        struct bkey *k;
1144        unsigned i;
1145
1146        stats->nodes++;
1147
1148        for (i = 0; i <= b->nsets; i++) {
1149                struct bset_tree *t = &b->sets[i];
1150                size_t bytes = t->data->keys * sizeof(uint64_t);
1151                size_t j;
1152
1153                if (bset_written(b, t)) {
1154                        stats->sets_written++;
1155                        stats->bytes_written += bytes;
1156
1157                        stats->floats += t->size - 1;
1158
1159                        for (j = 1; j < t->size; j++)
1160                                if (t->tree[j].exponent == 127)
1161                                        stats->failed++;
1162                } else {
1163                        stats->sets_unwritten++;
1164                        stats->bytes_unwritten += bytes;
1165                }
1166        }
1167
1168        if (b->level) {
1169                struct btree_iter iter;
1170
1171                for_each_key_filter(b, k, &iter, bch_ptr_bad) {
1172                        int ret = btree(bset_stats, k, b, op, stats);
1173                        if (ret)
1174                                return ret;
1175                }
1176        }
1177
1178        return 0;
1179}
1180
1181int bch_bset_print_stats(struct cache_set *c, char *buf)
1182{
1183        struct btree_op op;
1184        struct bset_stats t;
1185        int ret;
1186
1187        bch_btree_op_init_stack(&op);
1188        memset(&t, 0, sizeof(struct bset_stats));
1189
1190        ret = btree_root(bset_stats, c, &op, &t);
1191        if (ret)
1192                return ret;
1193
1194        return snprintf(buf, PAGE_SIZE,
1195                        "btree nodes:           %zu\n"
1196                        "written sets:          %zu\n"
1197                        "unwritten sets:                %zu\n"
1198                        "written key bytes:     %zu\n"
1199                        "unwritten key bytes:   %zu\n"
1200                        "floats:                        %zu\n"
1201                        "failed:                        %zu\n",
1202                        t.nodes,
1203                        t.sets_written, t.sets_unwritten,
1204                        t.bytes_written, t.bytes_unwritten,
1205                        t.floats, t.failed);
1206}
1207