linux/drivers/md/bcache/bset.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Code for working with individual keys, and sorted sets of keys with in a
   4 * btree node
   5 *
   6 * Copyright 2012 Google, Inc.
   7 */
   8
   9#define pr_fmt(fmt) "bcache: %s() " fmt "\n", __func__
  10
  11#include "util.h"
  12#include "bset.h"
  13
  14#include <linux/console.h>
  15#include <linux/sched/clock.h>
  16#include <linux/random.h>
  17#include <linux/prefetch.h>
  18
  19#ifdef CONFIG_BCACHE_DEBUG
  20
  21void bch_dump_bset(struct btree_keys *b, struct bset *i, unsigned set)
  22{
  23        struct bkey *k, *next;
  24
  25        for (k = i->start; k < bset_bkey_last(i); k = next) {
  26                next = bkey_next(k);
  27
  28                printk(KERN_ERR "block %u key %u/%u: ", set,
  29                       (unsigned) ((u64 *) k - i->d), i->keys);
  30
  31                if (b->ops->key_dump)
  32                        b->ops->key_dump(b, k);
  33                else
  34                        printk("%llu:%llu\n", KEY_INODE(k), KEY_OFFSET(k));
  35
  36                if (next < bset_bkey_last(i) &&
  37                    bkey_cmp(k, b->ops->is_extents ?
  38                             &START_KEY(next) : next) > 0)
  39                        printk(KERN_ERR "Key skipped backwards\n");
  40        }
  41}
  42
  43void bch_dump_bucket(struct btree_keys *b)
  44{
  45        unsigned i;
  46
  47        console_lock();
  48        for (i = 0; i <= b->nsets; i++)
  49                bch_dump_bset(b, b->set[i].data,
  50                              bset_sector_offset(b, b->set[i].data));
  51        console_unlock();
  52}
  53
  54int __bch_count_data(struct btree_keys *b)
  55{
  56        unsigned ret = 0;
  57        struct btree_iter iter;
  58        struct bkey *k;
  59
  60        if (b->ops->is_extents)
  61                for_each_key(b, k, &iter)
  62                        ret += KEY_SIZE(k);
  63        return ret;
  64}
  65
  66void __bch_check_keys(struct btree_keys *b, const char *fmt, ...)
  67{
  68        va_list args;
  69        struct bkey *k, *p = NULL;
  70        struct btree_iter iter;
  71        const char *err;
  72
  73        for_each_key(b, k, &iter) {
  74                if (b->ops->is_extents) {
  75                        err = "Keys out of order";
  76                        if (p && bkey_cmp(&START_KEY(p), &START_KEY(k)) > 0)
  77                                goto bug;
  78
  79                        if (bch_ptr_invalid(b, k))
  80                                continue;
  81
  82                        err =  "Overlapping keys";
  83                        if (p && bkey_cmp(p, &START_KEY(k)) > 0)
  84                                goto bug;
  85                } else {
  86                        if (bch_ptr_bad(b, k))
  87                                continue;
  88
  89                        err = "Duplicate keys";
  90                        if (p && !bkey_cmp(p, k))
  91                                goto bug;
  92                }
  93                p = k;
  94        }
  95#if 0
  96        err = "Key larger than btree node key";
  97        if (p && bkey_cmp(p, &b->key) > 0)
  98                goto bug;
  99#endif
 100        return;
 101bug:
 102        bch_dump_bucket(b);
 103
 104        va_start(args, fmt);
 105        vprintk(fmt, args);
 106        va_end(args);
 107
 108        panic("bch_check_keys error:  %s:\n", err);
 109}
 110
 111static void bch_btree_iter_next_check(struct btree_iter *iter)
 112{
 113        struct bkey *k = iter->data->k, *next = bkey_next(k);
 114
 115        if (next < iter->data->end &&
 116            bkey_cmp(k, iter->b->ops->is_extents ?
 117                     &START_KEY(next) : next) > 0) {
 118                bch_dump_bucket(iter->b);
 119                panic("Key skipped backwards\n");
 120        }
 121}
 122
 123#else
 124
 125static inline void bch_btree_iter_next_check(struct btree_iter *iter) {}
 126
 127#endif
 128
 129/* Keylists */
 130
 131int __bch_keylist_realloc(struct keylist *l, unsigned u64s)
 132{
 133        size_t oldsize = bch_keylist_nkeys(l);
 134        size_t newsize = oldsize + u64s;
 135        uint64_t *old_keys = l->keys_p == l->inline_keys ? NULL : l->keys_p;
 136        uint64_t *new_keys;
 137
 138        newsize = roundup_pow_of_two(newsize);
 139
 140        if (newsize <= KEYLIST_INLINE ||
 141            roundup_pow_of_two(oldsize) == newsize)
 142                return 0;
 143
 144        new_keys = krealloc(old_keys, sizeof(uint64_t) * newsize, GFP_NOIO);
 145
 146        if (!new_keys)
 147                return -ENOMEM;
 148
 149        if (!old_keys)
 150                memcpy(new_keys, l->inline_keys, sizeof(uint64_t) * oldsize);
 151
 152        l->keys_p = new_keys;
 153        l->top_p = new_keys + oldsize;
 154
 155        return 0;
 156}
 157
 158struct bkey *bch_keylist_pop(struct keylist *l)
 159{
 160        struct bkey *k = l->keys;
 161
 162        if (k == l->top)
 163                return NULL;
 164
 165        while (bkey_next(k) != l->top)
 166                k = bkey_next(k);
 167
 168        return l->top = k;
 169}
 170
 171void bch_keylist_pop_front(struct keylist *l)
 172{
 173        l->top_p -= bkey_u64s(l->keys);
 174
 175        memmove(l->keys,
 176                bkey_next(l->keys),
 177                bch_keylist_bytes(l));
 178}
 179
 180/* Key/pointer manipulation */
 181
 182void bch_bkey_copy_single_ptr(struct bkey *dest, const struct bkey *src,
 183                              unsigned i)
 184{
 185        BUG_ON(i > KEY_PTRS(src));
 186
 187        /* Only copy the header, key, and one pointer. */
 188        memcpy(dest, src, 2 * sizeof(uint64_t));
 189        dest->ptr[0] = src->ptr[i];
 190        SET_KEY_PTRS(dest, 1);
 191        /* We didn't copy the checksum so clear that bit. */
 192        SET_KEY_CSUM(dest, 0);
 193}
 194
 195bool __bch_cut_front(const struct bkey *where, struct bkey *k)
 196{
 197        unsigned i, len = 0;
 198
 199        if (bkey_cmp(where, &START_KEY(k)) <= 0)
 200                return false;
 201
 202        if (bkey_cmp(where, k) < 0)
 203                len = KEY_OFFSET(k) - KEY_OFFSET(where);
 204        else
 205                bkey_copy_key(k, where);
 206
 207        for (i = 0; i < KEY_PTRS(k); i++)
 208                SET_PTR_OFFSET(k, i, PTR_OFFSET(k, i) + KEY_SIZE(k) - len);
 209
 210        BUG_ON(len > KEY_SIZE(k));
 211        SET_KEY_SIZE(k, len);
 212        return true;
 213}
 214
 215bool __bch_cut_back(const struct bkey *where, struct bkey *k)
 216{
 217        unsigned len = 0;
 218
 219        if (bkey_cmp(where, k) >= 0)
 220                return false;
 221
 222        BUG_ON(KEY_INODE(where) != KEY_INODE(k));
 223
 224        if (bkey_cmp(where, &START_KEY(k)) > 0)
 225                len = KEY_OFFSET(where) - KEY_START(k);
 226
 227        bkey_copy_key(k, where);
 228
 229        BUG_ON(len > KEY_SIZE(k));
 230        SET_KEY_SIZE(k, len);
 231        return true;
 232}
 233
 234/* Auxiliary search trees */
 235
 236/* 32 bits total: */
 237#define BKEY_MID_BITS           3
 238#define BKEY_EXPONENT_BITS      7
 239#define BKEY_MANTISSA_BITS      (32 - BKEY_MID_BITS - BKEY_EXPONENT_BITS)
 240#define BKEY_MANTISSA_MASK      ((1 << BKEY_MANTISSA_BITS) - 1)
 241
 242struct bkey_float {
 243        unsigned        exponent:BKEY_EXPONENT_BITS;
 244        unsigned        m:BKEY_MID_BITS;
 245        unsigned        mantissa:BKEY_MANTISSA_BITS;
 246} __packed;
 247
 248/*
 249 * BSET_CACHELINE was originally intended to match the hardware cacheline size -
 250 * it used to be 64, but I realized the lookup code would touch slightly less
 251 * memory if it was 128.
 252 *
 253 * It definites the number of bytes (in struct bset) per struct bkey_float in
 254 * the auxiliar search tree - when we're done searching the bset_float tree we
 255 * have this many bytes left that we do a linear search over.
 256 *
 257 * Since (after level 5) every level of the bset_tree is on a new cacheline,
 258 * we're touching one fewer cacheline in the bset tree in exchange for one more
 259 * cacheline in the linear search - but the linear search might stop before it
 260 * gets to the second cacheline.
 261 */
 262
 263#define BSET_CACHELINE          128
 264
 265/* Space required for the btree node keys */
 266static inline size_t btree_keys_bytes(struct btree_keys *b)
 267{
 268        return PAGE_SIZE << b->page_order;
 269}
 270
 271static inline size_t btree_keys_cachelines(struct btree_keys *b)
 272{
 273        return btree_keys_bytes(b) / BSET_CACHELINE;
 274}
 275
 276/* Space required for the auxiliary search trees */
 277static inline size_t bset_tree_bytes(struct btree_keys *b)
 278{
 279        return btree_keys_cachelines(b) * sizeof(struct bkey_float);
 280}
 281
 282/* Space required for the prev pointers */
 283static inline size_t bset_prev_bytes(struct btree_keys *b)
 284{
 285        return btree_keys_cachelines(b) * sizeof(uint8_t);
 286}
 287
 288/* Memory allocation */
 289
 290void bch_btree_keys_free(struct btree_keys *b)
 291{
 292        struct bset_tree *t = b->set;
 293
 294        if (bset_prev_bytes(b) < PAGE_SIZE)
 295                kfree(t->prev);
 296        else
 297                free_pages((unsigned long) t->prev,
 298                           get_order(bset_prev_bytes(b)));
 299
 300        if (bset_tree_bytes(b) < PAGE_SIZE)
 301                kfree(t->tree);
 302        else
 303                free_pages((unsigned long) t->tree,
 304                           get_order(bset_tree_bytes(b)));
 305
 306        free_pages((unsigned long) t->data, b->page_order);
 307
 308        t->prev = NULL;
 309        t->tree = NULL;
 310        t->data = NULL;
 311}
 312EXPORT_SYMBOL(bch_btree_keys_free);
 313
 314int bch_btree_keys_alloc(struct btree_keys *b, unsigned page_order, gfp_t gfp)
 315{
 316        struct bset_tree *t = b->set;
 317
 318        BUG_ON(t->data);
 319
 320        b->page_order = page_order;
 321
 322        t->data = (void *) __get_free_pages(gfp, b->page_order);
 323        if (!t->data)
 324                goto err;
 325
 326        t->tree = bset_tree_bytes(b) < PAGE_SIZE
 327                ? kmalloc(bset_tree_bytes(b), gfp)
 328                : (void *) __get_free_pages(gfp, get_order(bset_tree_bytes(b)));
 329        if (!t->tree)
 330                goto err;
 331
 332        t->prev = bset_prev_bytes(b) < PAGE_SIZE
 333                ? kmalloc(bset_prev_bytes(b), gfp)
 334                : (void *) __get_free_pages(gfp, get_order(bset_prev_bytes(b)));
 335        if (!t->prev)
 336                goto err;
 337
 338        return 0;
 339err:
 340        bch_btree_keys_free(b);
 341        return -ENOMEM;
 342}
 343EXPORT_SYMBOL(bch_btree_keys_alloc);
 344
 345void bch_btree_keys_init(struct btree_keys *b, const struct btree_keys_ops *ops,
 346                         bool *expensive_debug_checks)
 347{
 348        unsigned i;
 349
 350        b->ops = ops;
 351        b->expensive_debug_checks = expensive_debug_checks;
 352        b->nsets = 0;
 353        b->last_set_unwritten = 0;
 354
 355        /* XXX: shouldn't be needed */
 356        for (i = 0; i < MAX_BSETS; i++)
 357                b->set[i].size = 0;
 358        /*
 359         * Second loop starts at 1 because b->keys[0]->data is the memory we
 360         * allocated
 361         */
 362        for (i = 1; i < MAX_BSETS; i++)
 363                b->set[i].data = NULL;
 364}
 365EXPORT_SYMBOL(bch_btree_keys_init);
 366
 367/* Binary tree stuff for auxiliary search trees */
 368
 369static unsigned inorder_next(unsigned j, unsigned size)
 370{
 371        if (j * 2 + 1 < size) {
 372                j = j * 2 + 1;
 373
 374                while (j * 2 < size)
 375                        j *= 2;
 376        } else
 377                j >>= ffz(j) + 1;
 378
 379        return j;
 380}
 381
 382static unsigned inorder_prev(unsigned j, unsigned size)
 383{
 384        if (j * 2 < size) {
 385                j = j * 2;
 386
 387                while (j * 2 + 1 < size)
 388                        j = j * 2 + 1;
 389        } else
 390                j >>= ffs(j);
 391
 392        return j;
 393}
 394
 395/* I have no idea why this code works... and I'm the one who wrote it
 396 *
 397 * However, I do know what it does:
 398 * Given a binary tree constructed in an array (i.e. how you normally implement
 399 * a heap), it converts a node in the tree - referenced by array index - to the
 400 * index it would have if you did an inorder traversal.
 401 *
 402 * Also tested for every j, size up to size somewhere around 6 million.
 403 *
 404 * The binary tree starts at array index 1, not 0
 405 * extra is a function of size:
 406 *   extra = (size - rounddown_pow_of_two(size - 1)) << 1;
 407 */
 408static unsigned __to_inorder(unsigned j, unsigned size, unsigned extra)
 409{
 410        unsigned b = fls(j);
 411        unsigned shift = fls(size - 1) - b;
 412
 413        j  ^= 1U << (b - 1);
 414        j <<= 1;
 415        j  |= 1;
 416        j <<= shift;
 417
 418        if (j > extra)
 419                j -= (j - extra) >> 1;
 420
 421        return j;
 422}
 423
 424static unsigned to_inorder(unsigned j, struct bset_tree *t)
 425{
 426        return __to_inorder(j, t->size, t->extra);
 427}
 428
 429static unsigned __inorder_to_tree(unsigned j, unsigned size, unsigned extra)
 430{
 431        unsigned shift;
 432
 433        if (j > extra)
 434                j += j - extra;
 435
 436        shift = ffs(j);
 437
 438        j >>= shift;
 439        j  |= roundup_pow_of_two(size) >> shift;
 440
 441        return j;
 442}
 443
 444static unsigned inorder_to_tree(unsigned j, struct bset_tree *t)
 445{
 446        return __inorder_to_tree(j, t->size, t->extra);
 447}
 448
 449#if 0
 450void inorder_test(void)
 451{
 452        unsigned long done = 0;
 453        ktime_t start = ktime_get();
 454
 455        for (unsigned size = 2;
 456             size < 65536000;
 457             size++) {
 458                unsigned extra = (size - rounddown_pow_of_two(size - 1)) << 1;
 459                unsigned i = 1, j = rounddown_pow_of_two(size - 1);
 460
 461                if (!(size % 4096))
 462                        printk(KERN_NOTICE "loop %u, %llu per us\n", size,
 463                               done / ktime_us_delta(ktime_get(), start));
 464
 465                while (1) {
 466                        if (__inorder_to_tree(i, size, extra) != j)
 467                                panic("size %10u j %10u i %10u", size, j, i);
 468
 469                        if (__to_inorder(j, size, extra) != i)
 470                                panic("size %10u j %10u i %10u", size, j, i);
 471
 472                        if (j == rounddown_pow_of_two(size) - 1)
 473                                break;
 474
 475                        BUG_ON(inorder_prev(inorder_next(j, size), size) != j);
 476
 477                        j = inorder_next(j, size);
 478                        i++;
 479                }
 480
 481                done += size - 1;
 482        }
 483}
 484#endif
 485
 486/*
 487 * Cacheline/offset <-> bkey pointer arithmetic:
 488 *
 489 * t->tree is a binary search tree in an array; each node corresponds to a key
 490 * in one cacheline in t->set (BSET_CACHELINE bytes).
 491 *
 492 * This means we don't have to store the full index of the key that a node in
 493 * the binary tree points to; to_inorder() gives us the cacheline, and then
 494 * bkey_float->m gives us the offset within that cacheline, in units of 8 bytes.
 495 *
 496 * cacheline_to_bkey() and friends abstract out all the pointer arithmetic to
 497 * make this work.
 498 *
 499 * To construct the bfloat for an arbitrary key we need to know what the key
 500 * immediately preceding it is: we have to check if the two keys differ in the
 501 * bits we're going to store in bkey_float->mantissa. t->prev[j] stores the size
 502 * of the previous key so we can walk backwards to it from t->tree[j]'s key.
 503 */
 504
 505static struct bkey *cacheline_to_bkey(struct bset_tree *t, unsigned cacheline,
 506                                      unsigned offset)
 507{
 508        return ((void *) t->data) + cacheline * BSET_CACHELINE + offset * 8;
 509}
 510
 511static unsigned bkey_to_cacheline(struct bset_tree *t, struct bkey *k)
 512{
 513        return ((void *) k - (void *) t->data) / BSET_CACHELINE;
 514}
 515
 516static unsigned bkey_to_cacheline_offset(struct bset_tree *t,
 517                                         unsigned cacheline,
 518                                         struct bkey *k)
 519{
 520        return (u64 *) k - (u64 *) cacheline_to_bkey(t, cacheline, 0);
 521}
 522
 523static struct bkey *tree_to_bkey(struct bset_tree *t, unsigned j)
 524{
 525        return cacheline_to_bkey(t, to_inorder(j, t), t->tree[j].m);
 526}
 527
 528static struct bkey *tree_to_prev_bkey(struct bset_tree *t, unsigned j)
 529{
 530        return (void *) (((uint64_t *) tree_to_bkey(t, j)) - t->prev[j]);
 531}
 532
 533/*
 534 * For the write set - the one we're currently inserting keys into - we don't
 535 * maintain a full search tree, we just keep a simple lookup table in t->prev.
 536 */
 537static struct bkey *table_to_bkey(struct bset_tree *t, unsigned cacheline)
 538{
 539        return cacheline_to_bkey(t, cacheline, t->prev[cacheline]);
 540}
 541
 542static inline uint64_t shrd128(uint64_t high, uint64_t low, uint8_t shift)
 543{
 544        low >>= shift;
 545        low  |= (high << 1) << (63U - shift);
 546        return low;
 547}
 548
 549static inline unsigned bfloat_mantissa(const struct bkey *k,
 550                                       struct bkey_float *f)
 551{
 552        const uint64_t *p = &k->low - (f->exponent >> 6);
 553        return shrd128(p[-1], p[0], f->exponent & 63) & BKEY_MANTISSA_MASK;
 554}
 555
 556static void make_bfloat(struct bset_tree *t, unsigned j)
 557{
 558        struct bkey_float *f = &t->tree[j];
 559        struct bkey *m = tree_to_bkey(t, j);
 560        struct bkey *p = tree_to_prev_bkey(t, j);
 561
 562        struct bkey *l = is_power_of_2(j)
 563                ? t->data->start
 564                : tree_to_prev_bkey(t, j >> ffs(j));
 565
 566        struct bkey *r = is_power_of_2(j + 1)
 567                ? bset_bkey_idx(t->data, t->data->keys - bkey_u64s(&t->end))
 568                : tree_to_bkey(t, j >> (ffz(j) + 1));
 569
 570        BUG_ON(m < l || m > r);
 571        BUG_ON(bkey_next(p) != m);
 572
 573        if (KEY_INODE(l) != KEY_INODE(r))
 574                f->exponent = fls64(KEY_INODE(r) ^ KEY_INODE(l)) + 64;
 575        else
 576                f->exponent = fls64(r->low ^ l->low);
 577
 578        f->exponent = max_t(int, f->exponent - BKEY_MANTISSA_BITS, 0);
 579
 580        /*
 581         * Setting f->exponent = 127 flags this node as failed, and causes the
 582         * lookup code to fall back to comparing against the original key.
 583         */
 584
 585        if (bfloat_mantissa(m, f) != bfloat_mantissa(p, f))
 586                f->mantissa = bfloat_mantissa(m, f) - 1;
 587        else
 588                f->exponent = 127;
 589}
 590
 591static void bset_alloc_tree(struct btree_keys *b, struct bset_tree *t)
 592{
 593        if (t != b->set) {
 594                unsigned j = roundup(t[-1].size,
 595                                     64 / sizeof(struct bkey_float));
 596
 597                t->tree = t[-1].tree + j;
 598                t->prev = t[-1].prev + j;
 599        }
 600
 601        while (t < b->set + MAX_BSETS)
 602                t++->size = 0;
 603}
 604
 605static void bch_bset_build_unwritten_tree(struct btree_keys *b)
 606{
 607        struct bset_tree *t = bset_tree_last(b);
 608
 609        BUG_ON(b->last_set_unwritten);
 610        b->last_set_unwritten = 1;
 611
 612        bset_alloc_tree(b, t);
 613
 614        if (t->tree != b->set->tree + btree_keys_cachelines(b)) {
 615                t->prev[0] = bkey_to_cacheline_offset(t, 0, t->data->start);
 616                t->size = 1;
 617        }
 618}
 619
 620void bch_bset_init_next(struct btree_keys *b, struct bset *i, uint64_t magic)
 621{
 622        if (i != b->set->data) {
 623                b->set[++b->nsets].data = i;
 624                i->seq = b->set->data->seq;
 625        } else
 626                get_random_bytes(&i->seq, sizeof(uint64_t));
 627
 628        i->magic        = magic;
 629        i->version      = 0;
 630        i->keys         = 0;
 631
 632        bch_bset_build_unwritten_tree(b);
 633}
 634EXPORT_SYMBOL(bch_bset_init_next);
 635
 636void bch_bset_build_written_tree(struct btree_keys *b)
 637{
 638        struct bset_tree *t = bset_tree_last(b);
 639        struct bkey *prev = NULL, *k = t->data->start;
 640        unsigned j, cacheline = 1;
 641
 642        b->last_set_unwritten = 0;
 643
 644        bset_alloc_tree(b, t);
 645
 646        t->size = min_t(unsigned,
 647                        bkey_to_cacheline(t, bset_bkey_last(t->data)),
 648                        b->set->tree + btree_keys_cachelines(b) - t->tree);
 649
 650        if (t->size < 2) {
 651                t->size = 0;
 652                return;
 653        }
 654
 655        t->extra = (t->size - rounddown_pow_of_two(t->size - 1)) << 1;
 656
 657        /* First we figure out where the first key in each cacheline is */
 658        for (j = inorder_next(0, t->size);
 659             j;
 660             j = inorder_next(j, t->size)) {
 661                while (bkey_to_cacheline(t, k) < cacheline)
 662                        prev = k, k = bkey_next(k);
 663
 664                t->prev[j] = bkey_u64s(prev);
 665                t->tree[j].m = bkey_to_cacheline_offset(t, cacheline++, k);
 666        }
 667
 668        while (bkey_next(k) != bset_bkey_last(t->data))
 669                k = bkey_next(k);
 670
 671        t->end = *k;
 672
 673        /* Then we build the tree */
 674        for (j = inorder_next(0, t->size);
 675             j;
 676             j = inorder_next(j, t->size))
 677                make_bfloat(t, j);
 678}
 679EXPORT_SYMBOL(bch_bset_build_written_tree);
 680
 681/* Insert */
 682
 683void bch_bset_fix_invalidated_key(struct btree_keys *b, struct bkey *k)
 684{
 685        struct bset_tree *t;
 686        unsigned inorder, j = 1;
 687
 688        for (t = b->set; t <= bset_tree_last(b); t++)
 689                if (k < bset_bkey_last(t->data))
 690                        goto found_set;
 691
 692        BUG();
 693found_set:
 694        if (!t->size || !bset_written(b, t))
 695                return;
 696
 697        inorder = bkey_to_cacheline(t, k);
 698
 699        if (k == t->data->start)
 700                goto fix_left;
 701
 702        if (bkey_next(k) == bset_bkey_last(t->data)) {
 703                t->end = *k;
 704                goto fix_right;
 705        }
 706
 707        j = inorder_to_tree(inorder, t);
 708
 709        if (j &&
 710            j < t->size &&
 711            k == tree_to_bkey(t, j))
 712fix_left:       do {
 713                        make_bfloat(t, j);
 714                        j = j * 2;
 715                } while (j < t->size);
 716
 717        j = inorder_to_tree(inorder + 1, t);
 718
 719        if (j &&
 720            j < t->size &&
 721            k == tree_to_prev_bkey(t, j))
 722fix_right:      do {
 723                        make_bfloat(t, j);
 724                        j = j * 2 + 1;
 725                } while (j < t->size);
 726}
 727EXPORT_SYMBOL(bch_bset_fix_invalidated_key);
 728
 729static void bch_bset_fix_lookup_table(struct btree_keys *b,
 730                                      struct bset_tree *t,
 731                                      struct bkey *k)
 732{
 733        unsigned shift = bkey_u64s(k);
 734        unsigned j = bkey_to_cacheline(t, k);
 735
 736        /* We're getting called from btree_split() or btree_gc, just bail out */
 737        if (!t->size)
 738                return;
 739
 740        /* k is the key we just inserted; we need to find the entry in the
 741         * lookup table for the first key that is strictly greater than k:
 742         * it's either k's cacheline or the next one
 743         */
 744        while (j < t->size &&
 745               table_to_bkey(t, j) <= k)
 746                j++;
 747
 748        /* Adjust all the lookup table entries, and find a new key for any that
 749         * have gotten too big
 750         */
 751        for (; j < t->size; j++) {
 752                t->prev[j] += shift;
 753
 754                if (t->prev[j] > 7) {
 755                        k = table_to_bkey(t, j - 1);
 756
 757                        while (k < cacheline_to_bkey(t, j, 0))
 758                                k = bkey_next(k);
 759
 760                        t->prev[j] = bkey_to_cacheline_offset(t, j, k);
 761                }
 762        }
 763
 764        if (t->size == b->set->tree + btree_keys_cachelines(b) - t->tree)
 765                return;
 766
 767        /* Possibly add a new entry to the end of the lookup table */
 768
 769        for (k = table_to_bkey(t, t->size - 1);
 770             k != bset_bkey_last(t->data);
 771             k = bkey_next(k))
 772                if (t->size == bkey_to_cacheline(t, k)) {
 773                        t->prev[t->size] = bkey_to_cacheline_offset(t, t->size, k);
 774                        t->size++;
 775                }
 776}
 777
 778/*
 779 * Tries to merge l and r: l should be lower than r
 780 * Returns true if we were able to merge. If we did merge, l will be the merged
 781 * key, r will be untouched.
 782 */
 783bool bch_bkey_try_merge(struct btree_keys *b, struct bkey *l, struct bkey *r)
 784{
 785        if (!b->ops->key_merge)
 786                return false;
 787
 788        /*
 789         * Generic header checks
 790         * Assumes left and right are in order
 791         * Left and right must be exactly aligned
 792         */
 793        if (!bch_bkey_equal_header(l, r) ||
 794             bkey_cmp(l, &START_KEY(r)))
 795                return false;
 796
 797        return b->ops->key_merge(b, l, r);
 798}
 799EXPORT_SYMBOL(bch_bkey_try_merge);
 800
 801void bch_bset_insert(struct btree_keys *b, struct bkey *where,
 802                     struct bkey *insert)
 803{
 804        struct bset_tree *t = bset_tree_last(b);
 805
 806        BUG_ON(!b->last_set_unwritten);
 807        BUG_ON(bset_byte_offset(b, t->data) +
 808               __set_bytes(t->data, t->data->keys + bkey_u64s(insert)) >
 809               PAGE_SIZE << b->page_order);
 810
 811        memmove((uint64_t *) where + bkey_u64s(insert),
 812                where,
 813                (void *) bset_bkey_last(t->data) - (void *) where);
 814
 815        t->data->keys += bkey_u64s(insert);
 816        bkey_copy(where, insert);
 817        bch_bset_fix_lookup_table(b, t, where);
 818}
 819EXPORT_SYMBOL(bch_bset_insert);
 820
 821unsigned bch_btree_insert_key(struct btree_keys *b, struct bkey *k,
 822                              struct bkey *replace_key)
 823{
 824        unsigned status = BTREE_INSERT_STATUS_NO_INSERT;
 825        struct bset *i = bset_tree_last(b)->data;
 826        struct bkey *m, *prev = NULL;
 827        struct btree_iter iter;
 828
 829        BUG_ON(b->ops->is_extents && !KEY_SIZE(k));
 830
 831        m = bch_btree_iter_init(b, &iter, b->ops->is_extents
 832                                ? PRECEDING_KEY(&START_KEY(k))
 833                                : PRECEDING_KEY(k));
 834
 835        if (b->ops->insert_fixup(b, k, &iter, replace_key))
 836                return status;
 837
 838        status = BTREE_INSERT_STATUS_INSERT;
 839
 840        while (m != bset_bkey_last(i) &&
 841               bkey_cmp(k, b->ops->is_extents ? &START_KEY(m) : m) > 0)
 842                prev = m, m = bkey_next(m);
 843
 844        /* prev is in the tree, if we merge we're done */
 845        status = BTREE_INSERT_STATUS_BACK_MERGE;
 846        if (prev &&
 847            bch_bkey_try_merge(b, prev, k))
 848                goto merged;
 849#if 0
 850        status = BTREE_INSERT_STATUS_OVERWROTE;
 851        if (m != bset_bkey_last(i) &&
 852            KEY_PTRS(m) == KEY_PTRS(k) && !KEY_SIZE(m))
 853                goto copy;
 854#endif
 855        status = BTREE_INSERT_STATUS_FRONT_MERGE;
 856        if (m != bset_bkey_last(i) &&
 857            bch_bkey_try_merge(b, k, m))
 858                goto copy;
 859
 860        bch_bset_insert(b, m, k);
 861copy:   bkey_copy(m, k);
 862merged:
 863        return status;
 864}
 865EXPORT_SYMBOL(bch_btree_insert_key);
 866
 867/* Lookup */
 868
 869struct bset_search_iter {
 870        struct bkey *l, *r;
 871};
 872
 873static struct bset_search_iter bset_search_write_set(struct bset_tree *t,
 874                                                     const struct bkey *search)
 875{
 876        unsigned li = 0, ri = t->size;
 877
 878        while (li + 1 != ri) {
 879                unsigned m = (li + ri) >> 1;
 880
 881                if (bkey_cmp(table_to_bkey(t, m), search) > 0)
 882                        ri = m;
 883                else
 884                        li = m;
 885        }
 886
 887        return (struct bset_search_iter) {
 888                table_to_bkey(t, li),
 889                ri < t->size ? table_to_bkey(t, ri) : bset_bkey_last(t->data)
 890        };
 891}
 892
 893static struct bset_search_iter bset_search_tree(struct bset_tree *t,
 894                                                const struct bkey *search)
 895{
 896        struct bkey *l, *r;
 897        struct bkey_float *f;
 898        unsigned inorder, j, n = 1;
 899
 900        do {
 901                unsigned p = n << 4;
 902                p &= ((int) (p - t->size)) >> 31;
 903
 904                prefetch(&t->tree[p]);
 905
 906                j = n;
 907                f = &t->tree[j];
 908
 909                /*
 910                 * n = (f->mantissa > bfloat_mantissa())
 911                 *      ? j * 2
 912                 *      : j * 2 + 1;
 913                 *
 914                 * We need to subtract 1 from f->mantissa for the sign bit trick
 915                 * to work  - that's done in make_bfloat()
 916                 */
 917                if (likely(f->exponent != 127))
 918                        n = j * 2 + (((unsigned)
 919                                      (f->mantissa -
 920                                       bfloat_mantissa(search, f))) >> 31);
 921                else
 922                        n = (bkey_cmp(tree_to_bkey(t, j), search) > 0)
 923                                ? j * 2
 924                                : j * 2 + 1;
 925        } while (n < t->size);
 926
 927        inorder = to_inorder(j, t);
 928
 929        /*
 930         * n would have been the node we recursed to - the low bit tells us if
 931         * we recursed left or recursed right.
 932         */
 933        if (n & 1) {
 934                l = cacheline_to_bkey(t, inorder, f->m);
 935
 936                if (++inorder != t->size) {
 937                        f = &t->tree[inorder_next(j, t->size)];
 938                        r = cacheline_to_bkey(t, inorder, f->m);
 939                } else
 940                        r = bset_bkey_last(t->data);
 941        } else {
 942                r = cacheline_to_bkey(t, inorder, f->m);
 943
 944                if (--inorder) {
 945                        f = &t->tree[inorder_prev(j, t->size)];
 946                        l = cacheline_to_bkey(t, inorder, f->m);
 947                } else
 948                        l = t->data->start;
 949        }
 950
 951        return (struct bset_search_iter) {l, r};
 952}
 953
 954struct bkey *__bch_bset_search(struct btree_keys *b, struct bset_tree *t,
 955                               const struct bkey *search)
 956{
 957        struct bset_search_iter i;
 958
 959        /*
 960         * First, we search for a cacheline, then lastly we do a linear search
 961         * within that cacheline.
 962         *
 963         * To search for the cacheline, there's three different possibilities:
 964         *  * The set is too small to have a search tree, so we just do a linear
 965         *    search over the whole set.
 966         *  * The set is the one we're currently inserting into; keeping a full
 967         *    auxiliary search tree up to date would be too expensive, so we
 968         *    use a much simpler lookup table to do a binary search -
 969         *    bset_search_write_set().
 970         *  * Or we use the auxiliary search tree we constructed earlier -
 971         *    bset_search_tree()
 972         */
 973
 974        if (unlikely(!t->size)) {
 975                i.l = t->data->start;
 976                i.r = bset_bkey_last(t->data);
 977        } else if (bset_written(b, t)) {
 978                /*
 979                 * Each node in the auxiliary search tree covers a certain range
 980                 * of bits, and keys above and below the set it covers might
 981                 * differ outside those bits - so we have to special case the
 982                 * start and end - handle that here:
 983                 */
 984
 985                if (unlikely(bkey_cmp(search, &t->end) >= 0))
 986                        return bset_bkey_last(t->data);
 987
 988                if (unlikely(bkey_cmp(search, t->data->start) < 0))
 989                        return t->data->start;
 990
 991                i = bset_search_tree(t, search);
 992        } else {
 993                BUG_ON(!b->nsets &&
 994                       t->size < bkey_to_cacheline(t, bset_bkey_last(t->data)));
 995
 996                i = bset_search_write_set(t, search);
 997        }
 998
 999        if (btree_keys_expensive_checks(b)) {
1000                BUG_ON(bset_written(b, t) &&
1001                       i.l != t->data->start &&
1002                       bkey_cmp(tree_to_prev_bkey(t,
1003                          inorder_to_tree(bkey_to_cacheline(t, i.l), t)),
1004                                search) > 0);
1005
1006                BUG_ON(i.r != bset_bkey_last(t->data) &&
1007                       bkey_cmp(i.r, search) <= 0);
1008        }
1009
1010        while (likely(i.l != i.r) &&
1011               bkey_cmp(i.l, search) <= 0)
1012                i.l = bkey_next(i.l);
1013
1014        return i.l;
1015}
1016EXPORT_SYMBOL(__bch_bset_search);
1017
1018/* Btree iterator */
1019
1020typedef bool (btree_iter_cmp_fn)(struct btree_iter_set,
1021                                 struct btree_iter_set);
1022
1023static inline bool btree_iter_cmp(struct btree_iter_set l,
1024                                  struct btree_iter_set r)
1025{
1026        return bkey_cmp(l.k, r.k) > 0;
1027}
1028
1029static inline bool btree_iter_end(struct btree_iter *iter)
1030{
1031        return !iter->used;
1032}
1033
1034void bch_btree_iter_push(struct btree_iter *iter, struct bkey *k,
1035                         struct bkey *end)
1036{
1037        if (k != end)
1038                BUG_ON(!heap_add(iter,
1039                                 ((struct btree_iter_set) { k, end }),
1040                                 btree_iter_cmp));
1041}
1042
1043static struct bkey *__bch_btree_iter_init(struct btree_keys *b,
1044                                          struct btree_iter *iter,
1045                                          struct bkey *search,
1046                                          struct bset_tree *start)
1047{
1048        struct bkey *ret = NULL;
1049        iter->size = ARRAY_SIZE(iter->data);
1050        iter->used = 0;
1051
1052#ifdef CONFIG_BCACHE_DEBUG
1053        iter->b = b;
1054#endif
1055
1056        for (; start <= bset_tree_last(b); start++) {
1057                ret = bch_bset_search(b, start, search);
1058                bch_btree_iter_push(iter, ret, bset_bkey_last(start->data));
1059        }
1060
1061        return ret;
1062}
1063
1064struct bkey *bch_btree_iter_init(struct btree_keys *b,
1065                                 struct btree_iter *iter,
1066                                 struct bkey *search)
1067{
1068        return __bch_btree_iter_init(b, iter, search, b->set);
1069}
1070EXPORT_SYMBOL(bch_btree_iter_init);
1071
1072static inline struct bkey *__bch_btree_iter_next(struct btree_iter *iter,
1073                                                 btree_iter_cmp_fn *cmp)
1074{
1075        struct btree_iter_set b __maybe_unused;
1076        struct bkey *ret = NULL;
1077
1078        if (!btree_iter_end(iter)) {
1079                bch_btree_iter_next_check(iter);
1080
1081                ret = iter->data->k;
1082                iter->data->k = bkey_next(iter->data->k);
1083
1084                if (iter->data->k > iter->data->end) {
1085                        WARN_ONCE(1, "bset was corrupt!\n");
1086                        iter->data->k = iter->data->end;
1087                }
1088
1089                if (iter->data->k == iter->data->end)
1090                        heap_pop(iter, b, cmp);
1091                else
1092                        heap_sift(iter, 0, cmp);
1093        }
1094
1095        return ret;
1096}
1097
1098struct bkey *bch_btree_iter_next(struct btree_iter *iter)
1099{
1100        return __bch_btree_iter_next(iter, btree_iter_cmp);
1101
1102}
1103EXPORT_SYMBOL(bch_btree_iter_next);
1104
1105struct bkey *bch_btree_iter_next_filter(struct btree_iter *iter,
1106                                        struct btree_keys *b, ptr_filter_fn fn)
1107{
1108        struct bkey *ret;
1109
1110        do {
1111                ret = bch_btree_iter_next(iter);
1112        } while (ret && fn(b, ret));
1113
1114        return ret;
1115}
1116
1117/* Mergesort */
1118
1119void bch_bset_sort_state_free(struct bset_sort_state *state)
1120{
1121        if (state->pool)
1122                mempool_destroy(state->pool);
1123}
1124
1125int bch_bset_sort_state_init(struct bset_sort_state *state, unsigned page_order)
1126{
1127        spin_lock_init(&state->time.lock);
1128
1129        state->page_order = page_order;
1130        state->crit_factor = int_sqrt(1 << page_order);
1131
1132        state->pool = mempool_create_page_pool(1, page_order);
1133        if (!state->pool)
1134                return -ENOMEM;
1135
1136        return 0;
1137}
1138EXPORT_SYMBOL(bch_bset_sort_state_init);
1139
1140static void btree_mergesort(struct btree_keys *b, struct bset *out,
1141                            struct btree_iter *iter,
1142                            bool fixup, bool remove_stale)
1143{
1144        int i;
1145        struct bkey *k, *last = NULL;
1146        BKEY_PADDED(k) tmp;
1147        bool (*bad)(struct btree_keys *, const struct bkey *) = remove_stale
1148                ? bch_ptr_bad
1149                : bch_ptr_invalid;
1150
1151        /* Heapify the iterator, using our comparison function */
1152        for (i = iter->used / 2 - 1; i >= 0; --i)
1153                heap_sift(iter, i, b->ops->sort_cmp);
1154
1155        while (!btree_iter_end(iter)) {
1156                if (b->ops->sort_fixup && fixup)
1157                        k = b->ops->sort_fixup(iter, &tmp.k);
1158                else
1159                        k = NULL;
1160
1161                if (!k)
1162                        k = __bch_btree_iter_next(iter, b->ops->sort_cmp);
1163
1164                if (bad(b, k))
1165                        continue;
1166
1167                if (!last) {
1168                        last = out->start;
1169                        bkey_copy(last, k);
1170                } else if (!bch_bkey_try_merge(b, last, k)) {
1171                        last = bkey_next(last);
1172                        bkey_copy(last, k);
1173                }
1174        }
1175
1176        out->keys = last ? (uint64_t *) bkey_next(last) - out->d : 0;
1177
1178        pr_debug("sorted %i keys", out->keys);
1179}
1180
1181static void __btree_sort(struct btree_keys *b, struct btree_iter *iter,
1182                         unsigned start, unsigned order, bool fixup,
1183                         struct bset_sort_state *state)
1184{
1185        uint64_t start_time;
1186        bool used_mempool = false;
1187        struct bset *out = (void *) __get_free_pages(__GFP_NOWARN|GFP_NOWAIT,
1188                                                     order);
1189        if (!out) {
1190                struct page *outp;
1191
1192                BUG_ON(order > state->page_order);
1193
1194                outp = mempool_alloc(state->pool, GFP_NOIO);
1195                out = page_address(outp);
1196                used_mempool = true;
1197                order = state->page_order;
1198        }
1199
1200        start_time = local_clock();
1201
1202        btree_mergesort(b, out, iter, fixup, false);
1203        b->nsets = start;
1204
1205        if (!start && order == b->page_order) {
1206                /*
1207                 * Our temporary buffer is the same size as the btree node's
1208                 * buffer, we can just swap buffers instead of doing a big
1209                 * memcpy()
1210                 */
1211
1212                out->magic      = b->set->data->magic;
1213                out->seq        = b->set->data->seq;
1214                out->version    = b->set->data->version;
1215                swap(out, b->set->data);
1216        } else {
1217                b->set[start].data->keys = out->keys;
1218                memcpy(b->set[start].data->start, out->start,
1219                       (void *) bset_bkey_last(out) - (void *) out->start);
1220        }
1221
1222        if (used_mempool)
1223                mempool_free(virt_to_page(out), state->pool);
1224        else
1225                free_pages((unsigned long) out, order);
1226
1227        bch_bset_build_written_tree(b);
1228
1229        if (!start)
1230                bch_time_stats_update(&state->time, start_time);
1231}
1232
1233void bch_btree_sort_partial(struct btree_keys *b, unsigned start,
1234                            struct bset_sort_state *state)
1235{
1236        size_t order = b->page_order, keys = 0;
1237        struct btree_iter iter;
1238        int oldsize = bch_count_data(b);
1239
1240        __bch_btree_iter_init(b, &iter, NULL, &b->set[start]);
1241
1242        if (start) {
1243                unsigned i;
1244
1245                for (i = start; i <= b->nsets; i++)
1246                        keys += b->set[i].data->keys;
1247
1248                order = get_order(__set_bytes(b->set->data, keys));
1249        }
1250
1251        __btree_sort(b, &iter, start, order, false, state);
1252
1253        EBUG_ON(oldsize >= 0 && bch_count_data(b) != oldsize);
1254}
1255EXPORT_SYMBOL(bch_btree_sort_partial);
1256
1257void bch_btree_sort_and_fix_extents(struct btree_keys *b,
1258                                    struct btree_iter *iter,
1259                                    struct bset_sort_state *state)
1260{
1261        __btree_sort(b, iter, 0, b->page_order, true, state);
1262}
1263
1264void bch_btree_sort_into(struct btree_keys *b, struct btree_keys *new,
1265                         struct bset_sort_state *state)
1266{
1267        uint64_t start_time = local_clock();
1268
1269        struct btree_iter iter;
1270        bch_btree_iter_init(b, &iter, NULL);
1271
1272        btree_mergesort(b, new->set->data, &iter, false, true);
1273
1274        bch_time_stats_update(&state->time, start_time);
1275
1276        new->set->size = 0; // XXX: why?
1277}
1278
1279#define SORT_CRIT       (4096 / sizeof(uint64_t))
1280
1281void bch_btree_sort_lazy(struct btree_keys *b, struct bset_sort_state *state)
1282{
1283        unsigned crit = SORT_CRIT;
1284        int i;
1285
1286        /* Don't sort if nothing to do */
1287        if (!b->nsets)
1288                goto out;
1289
1290        for (i = b->nsets - 1; i >= 0; --i) {
1291                crit *= state->crit_factor;
1292
1293                if (b->set[i].data->keys < crit) {
1294                        bch_btree_sort_partial(b, i, state);
1295                        return;
1296                }
1297        }
1298
1299        /* Sort if we'd overflow */
1300        if (b->nsets + 1 == MAX_BSETS) {
1301                bch_btree_sort(b, state);
1302                return;
1303        }
1304
1305out:
1306        bch_bset_build_written_tree(b);
1307}
1308EXPORT_SYMBOL(bch_btree_sort_lazy);
1309
1310void bch_btree_keys_stats(struct btree_keys *b, struct bset_stats *stats)
1311{
1312        unsigned i;
1313
1314        for (i = 0; i <= b->nsets; i++) {
1315                struct bset_tree *t = &b->set[i];
1316                size_t bytes = t->data->keys * sizeof(uint64_t);
1317                size_t j;
1318
1319                if (bset_written(b, t)) {
1320                        stats->sets_written++;
1321                        stats->bytes_written += bytes;
1322
1323                        stats->floats += t->size - 1;
1324
1325                        for (j = 1; j < t->size; j++)
1326                                if (t->tree[j].exponent == 127)
1327                                        stats->failed++;
1328                } else {
1329                        stats->sets_unwritten++;
1330                        stats->bytes_unwritten += bytes;
1331                }
1332        }
1333}
1334