linux/drivers/md/bcache/bset.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Code for working with individual keys, and sorted sets of keys with in a
   4 * btree node
   5 *
   6 * Copyright 2012 Google, Inc.
   7 */
   8
   9#define pr_fmt(fmt) "bcache: %s() " fmt, __func__
  10
  11#include "util.h"
  12#include "bset.h"
  13
  14#include <linux/console.h>
  15#include <linux/sched/clock.h>
  16#include <linux/random.h>
  17#include <linux/prefetch.h>
  18
  19#ifdef CONFIG_BCACHE_DEBUG
  20
  21void bch_dump_bset(struct btree_keys *b, struct bset *i, unsigned int set)
  22{
  23        struct bkey *k, *next;
  24
  25        for (k = i->start; k < bset_bkey_last(i); k = next) {
  26                next = bkey_next(k);
  27
  28                pr_err("block %u key %u/%u: ", set,
  29                       (unsigned int) ((u64 *) k - i->d), i->keys);
  30
  31                if (b->ops->key_dump)
  32                        b->ops->key_dump(b, k);
  33                else
  34                        pr_cont("%llu:%llu\n", KEY_INODE(k), KEY_OFFSET(k));
  35
  36                if (next < bset_bkey_last(i) &&
  37                    bkey_cmp(k, b->ops->is_extents ?
  38                             &START_KEY(next) : next) > 0)
  39                        pr_err("Key skipped backwards\n");
  40        }
  41}
  42
  43void bch_dump_bucket(struct btree_keys *b)
  44{
  45        unsigned int i;
  46
  47        console_lock();
  48        for (i = 0; i <= b->nsets; i++)
  49                bch_dump_bset(b, b->set[i].data,
  50                              bset_sector_offset(b, b->set[i].data));
  51        console_unlock();
  52}
  53
  54int __bch_count_data(struct btree_keys *b)
  55{
  56        unsigned int ret = 0;
  57        struct btree_iter iter;
  58        struct bkey *k;
  59
  60        if (b->ops->is_extents)
  61                for_each_key(b, k, &iter)
  62                        ret += KEY_SIZE(k);
  63        return ret;
  64}
  65
  66void __bch_check_keys(struct btree_keys *b, const char *fmt, ...)
  67{
  68        va_list args;
  69        struct bkey *k, *p = NULL;
  70        struct btree_iter iter;
  71        const char *err;
  72
  73        for_each_key(b, k, &iter) {
  74                if (b->ops->is_extents) {
  75                        err = "Keys out of order";
  76                        if (p && bkey_cmp(&START_KEY(p), &START_KEY(k)) > 0)
  77                                goto bug;
  78
  79                        if (bch_ptr_invalid(b, k))
  80                                continue;
  81
  82                        err =  "Overlapping keys";
  83                        if (p && bkey_cmp(p, &START_KEY(k)) > 0)
  84                                goto bug;
  85                } else {
  86                        if (bch_ptr_bad(b, k))
  87                                continue;
  88
  89                        err = "Duplicate keys";
  90                        if (p && !bkey_cmp(p, k))
  91                                goto bug;
  92                }
  93                p = k;
  94        }
  95#if 0
  96        err = "Key larger than btree node key";
  97        if (p && bkey_cmp(p, &b->key) > 0)
  98                goto bug;
  99#endif
 100        return;
 101bug:
 102        bch_dump_bucket(b);
 103
 104        va_start(args, fmt);
 105        vprintk(fmt, args);
 106        va_end(args);
 107
 108        panic("bch_check_keys error:  %s:\n", err);
 109}
 110
 111static void bch_btree_iter_next_check(struct btree_iter *iter)
 112{
 113        struct bkey *k = iter->data->k, *next = bkey_next(k);
 114
 115        if (next < iter->data->end &&
 116            bkey_cmp(k, iter->b->ops->is_extents ?
 117                     &START_KEY(next) : next) > 0) {
 118                bch_dump_bucket(iter->b);
 119                panic("Key skipped backwards\n");
 120        }
 121}
 122
 123#else
 124
 125static inline void bch_btree_iter_next_check(struct btree_iter *iter) {}
 126
 127#endif
 128
 129/* Keylists */
 130
 131int __bch_keylist_realloc(struct keylist *l, unsigned int u64s)
 132{
 133        size_t oldsize = bch_keylist_nkeys(l);
 134        size_t newsize = oldsize + u64s;
 135        uint64_t *old_keys = l->keys_p == l->inline_keys ? NULL : l->keys_p;
 136        uint64_t *new_keys;
 137
 138        newsize = roundup_pow_of_two(newsize);
 139
 140        if (newsize <= KEYLIST_INLINE ||
 141            roundup_pow_of_two(oldsize) == newsize)
 142                return 0;
 143
 144        new_keys = krealloc(old_keys, sizeof(uint64_t) * newsize, GFP_NOIO);
 145
 146        if (!new_keys)
 147                return -ENOMEM;
 148
 149        if (!old_keys)
 150                memcpy(new_keys, l->inline_keys, sizeof(uint64_t) * oldsize);
 151
 152        l->keys_p = new_keys;
 153        l->top_p = new_keys + oldsize;
 154
 155        return 0;
 156}
 157
 158/* Pop the top key of keylist by pointing l->top to its previous key */
 159struct bkey *bch_keylist_pop(struct keylist *l)
 160{
 161        struct bkey *k = l->keys;
 162
 163        if (k == l->top)
 164                return NULL;
 165
 166        while (bkey_next(k) != l->top)
 167                k = bkey_next(k);
 168
 169        return l->top = k;
 170}
 171
 172/* Pop the bottom key of keylist and update l->top_p */
 173void bch_keylist_pop_front(struct keylist *l)
 174{
 175        l->top_p -= bkey_u64s(l->keys);
 176
 177        memmove(l->keys,
 178                bkey_next(l->keys),
 179                bch_keylist_bytes(l));
 180}
 181
 182/* Key/pointer manipulation */
 183
 184void bch_bkey_copy_single_ptr(struct bkey *dest, const struct bkey *src,
 185                              unsigned int i)
 186{
 187        BUG_ON(i > KEY_PTRS(src));
 188
 189        /* Only copy the header, key, and one pointer. */
 190        memcpy(dest, src, 2 * sizeof(uint64_t));
 191        dest->ptr[0] = src->ptr[i];
 192        SET_KEY_PTRS(dest, 1);
 193        /* We didn't copy the checksum so clear that bit. */
 194        SET_KEY_CSUM(dest, 0);
 195}
 196
 197bool __bch_cut_front(const struct bkey *where, struct bkey *k)
 198{
 199        unsigned int i, len = 0;
 200
 201        if (bkey_cmp(where, &START_KEY(k)) <= 0)
 202                return false;
 203
 204        if (bkey_cmp(where, k) < 0)
 205                len = KEY_OFFSET(k) - KEY_OFFSET(where);
 206        else
 207                bkey_copy_key(k, where);
 208
 209        for (i = 0; i < KEY_PTRS(k); i++)
 210                SET_PTR_OFFSET(k, i, PTR_OFFSET(k, i) + KEY_SIZE(k) - len);
 211
 212        BUG_ON(len > KEY_SIZE(k));
 213        SET_KEY_SIZE(k, len);
 214        return true;
 215}
 216
 217bool __bch_cut_back(const struct bkey *where, struct bkey *k)
 218{
 219        unsigned int len = 0;
 220
 221        if (bkey_cmp(where, k) >= 0)
 222                return false;
 223
 224        BUG_ON(KEY_INODE(where) != KEY_INODE(k));
 225
 226        if (bkey_cmp(where, &START_KEY(k)) > 0)
 227                len = KEY_OFFSET(where) - KEY_START(k);
 228
 229        bkey_copy_key(k, where);
 230
 231        BUG_ON(len > KEY_SIZE(k));
 232        SET_KEY_SIZE(k, len);
 233        return true;
 234}
 235
 236/* Auxiliary search trees */
 237
 238/* 32 bits total: */
 239#define BKEY_MID_BITS           3
 240#define BKEY_EXPONENT_BITS      7
 241#define BKEY_MANTISSA_BITS      (32 - BKEY_MID_BITS - BKEY_EXPONENT_BITS)
 242#define BKEY_MANTISSA_MASK      ((1 << BKEY_MANTISSA_BITS) - 1)
 243
 244struct bkey_float {
 245        unsigned int    exponent:BKEY_EXPONENT_BITS;
 246        unsigned int    m:BKEY_MID_BITS;
 247        unsigned int    mantissa:BKEY_MANTISSA_BITS;
 248} __packed;
 249
 250/*
 251 * BSET_CACHELINE was originally intended to match the hardware cacheline size -
 252 * it used to be 64, but I realized the lookup code would touch slightly less
 253 * memory if it was 128.
 254 *
 255 * It definites the number of bytes (in struct bset) per struct bkey_float in
 256 * the auxiliar search tree - when we're done searching the bset_float tree we
 257 * have this many bytes left that we do a linear search over.
 258 *
 259 * Since (after level 5) every level of the bset_tree is on a new cacheline,
 260 * we're touching one fewer cacheline in the bset tree in exchange for one more
 261 * cacheline in the linear search - but the linear search might stop before it
 262 * gets to the second cacheline.
 263 */
 264
 265#define BSET_CACHELINE          128
 266
 267/* Space required for the btree node keys */
 268static inline size_t btree_keys_bytes(struct btree_keys *b)
 269{
 270        return PAGE_SIZE << b->page_order;
 271}
 272
 273static inline size_t btree_keys_cachelines(struct btree_keys *b)
 274{
 275        return btree_keys_bytes(b) / BSET_CACHELINE;
 276}
 277
 278/* Space required for the auxiliary search trees */
 279static inline size_t bset_tree_bytes(struct btree_keys *b)
 280{
 281        return btree_keys_cachelines(b) * sizeof(struct bkey_float);
 282}
 283
 284/* Space required for the prev pointers */
 285static inline size_t bset_prev_bytes(struct btree_keys *b)
 286{
 287        return btree_keys_cachelines(b) * sizeof(uint8_t);
 288}
 289
 290/* Memory allocation */
 291
 292void bch_btree_keys_free(struct btree_keys *b)
 293{
 294        struct bset_tree *t = b->set;
 295
 296        if (bset_prev_bytes(b) < PAGE_SIZE)
 297                kfree(t->prev);
 298        else
 299                free_pages((unsigned long) t->prev,
 300                           get_order(bset_prev_bytes(b)));
 301
 302        if (bset_tree_bytes(b) < PAGE_SIZE)
 303                kfree(t->tree);
 304        else
 305                free_pages((unsigned long) t->tree,
 306                           get_order(bset_tree_bytes(b)));
 307
 308        free_pages((unsigned long) t->data, b->page_order);
 309
 310        t->prev = NULL;
 311        t->tree = NULL;
 312        t->data = NULL;
 313}
 314
 315int bch_btree_keys_alloc(struct btree_keys *b,
 316                         unsigned int page_order,
 317                         gfp_t gfp)
 318{
 319        struct bset_tree *t = b->set;
 320
 321        BUG_ON(t->data);
 322
 323        b->page_order = page_order;
 324
 325        t->data = (void *) __get_free_pages(__GFP_COMP|gfp, b->page_order);
 326        if (!t->data)
 327                goto err;
 328
 329        t->tree = bset_tree_bytes(b) < PAGE_SIZE
 330                ? kmalloc(bset_tree_bytes(b), gfp)
 331                : (void *) __get_free_pages(gfp, get_order(bset_tree_bytes(b)));
 332        if (!t->tree)
 333                goto err;
 334
 335        t->prev = bset_prev_bytes(b) < PAGE_SIZE
 336                ? kmalloc(bset_prev_bytes(b), gfp)
 337                : (void *) __get_free_pages(gfp, get_order(bset_prev_bytes(b)));
 338        if (!t->prev)
 339                goto err;
 340
 341        return 0;
 342err:
 343        bch_btree_keys_free(b);
 344        return -ENOMEM;
 345}
 346
 347void bch_btree_keys_init(struct btree_keys *b, const struct btree_keys_ops *ops,
 348                         bool *expensive_debug_checks)
 349{
 350        b->ops = ops;
 351        b->expensive_debug_checks = expensive_debug_checks;
 352        b->nsets = 0;
 353        b->last_set_unwritten = 0;
 354
 355        /*
 356         * struct btree_keys in embedded in struct btree, and struct
 357         * bset_tree is embedded into struct btree_keys. They are all
 358         * initialized as 0 by kzalloc() in mca_bucket_alloc(), and
 359         * b->set[0].data is allocated in bch_btree_keys_alloc(), so we
 360         * don't have to initiate b->set[].size and b->set[].data here
 361         * any more.
 362         */
 363}
 364
 365/* Binary tree stuff for auxiliary search trees */
 366
 367/*
 368 * return array index next to j when does in-order traverse
 369 * of a binary tree which is stored in a linear array
 370 */
 371static unsigned int inorder_next(unsigned int j, unsigned int size)
 372{
 373        if (j * 2 + 1 < size) {
 374                j = j * 2 + 1;
 375
 376                while (j * 2 < size)
 377                        j *= 2;
 378        } else
 379                j >>= ffz(j) + 1;
 380
 381        return j;
 382}
 383
 384/*
 385 * return array index previous to j when does in-order traverse
 386 * of a binary tree which is stored in a linear array
 387 */
 388static unsigned int inorder_prev(unsigned int j, unsigned int size)
 389{
 390        if (j * 2 < size) {
 391                j = j * 2;
 392
 393                while (j * 2 + 1 < size)
 394                        j = j * 2 + 1;
 395        } else
 396                j >>= ffs(j);
 397
 398        return j;
 399}
 400
 401/*
 402 * I have no idea why this code works... and I'm the one who wrote it
 403 *
 404 * However, I do know what it does:
 405 * Given a binary tree constructed in an array (i.e. how you normally implement
 406 * a heap), it converts a node in the tree - referenced by array index - to the
 407 * index it would have if you did an inorder traversal.
 408 *
 409 * Also tested for every j, size up to size somewhere around 6 million.
 410 *
 411 * The binary tree starts at array index 1, not 0
 412 * extra is a function of size:
 413 *   extra = (size - rounddown_pow_of_two(size - 1)) << 1;
 414 */
 415static unsigned int __to_inorder(unsigned int j,
 416                                  unsigned int size,
 417                                  unsigned int extra)
 418{
 419        unsigned int b = fls(j);
 420        unsigned int shift = fls(size - 1) - b;
 421
 422        j  ^= 1U << (b - 1);
 423        j <<= 1;
 424        j  |= 1;
 425        j <<= shift;
 426
 427        if (j > extra)
 428                j -= (j - extra) >> 1;
 429
 430        return j;
 431}
 432
 433/*
 434 * Return the cacheline index in bset_tree->data, where j is index
 435 * from a linear array which stores the auxiliar binary tree
 436 */
 437static unsigned int to_inorder(unsigned int j, struct bset_tree *t)
 438{
 439        return __to_inorder(j, t->size, t->extra);
 440}
 441
 442static unsigned int __inorder_to_tree(unsigned int j,
 443                                      unsigned int size,
 444                                      unsigned int extra)
 445{
 446        unsigned int shift;
 447
 448        if (j > extra)
 449                j += j - extra;
 450
 451        shift = ffs(j);
 452
 453        j >>= shift;
 454        j  |= roundup_pow_of_two(size) >> shift;
 455
 456        return j;
 457}
 458
 459/*
 460 * Return an index from a linear array which stores the auxiliar binary
 461 * tree, j is the cacheline index of t->data.
 462 */
 463static unsigned int inorder_to_tree(unsigned int j, struct bset_tree *t)
 464{
 465        return __inorder_to_tree(j, t->size, t->extra);
 466}
 467
 468#if 0
 469void inorder_test(void)
 470{
 471        unsigned long done = 0;
 472        ktime_t start = ktime_get();
 473
 474        for (unsigned int size = 2;
 475             size < 65536000;
 476             size++) {
 477                unsigned int extra =
 478                        (size - rounddown_pow_of_two(size - 1)) << 1;
 479                unsigned int i = 1, j = rounddown_pow_of_two(size - 1);
 480
 481                if (!(size % 4096))
 482                        pr_notice("loop %u, %llu per us\n", size,
 483                               done / ktime_us_delta(ktime_get(), start));
 484
 485                while (1) {
 486                        if (__inorder_to_tree(i, size, extra) != j)
 487                                panic("size %10u j %10u i %10u", size, j, i);
 488
 489                        if (__to_inorder(j, size, extra) != i)
 490                                panic("size %10u j %10u i %10u", size, j, i);
 491
 492                        if (j == rounddown_pow_of_two(size) - 1)
 493                                break;
 494
 495                        BUG_ON(inorder_prev(inorder_next(j, size), size) != j);
 496
 497                        j = inorder_next(j, size);
 498                        i++;
 499                }
 500
 501                done += size - 1;
 502        }
 503}
 504#endif
 505
 506/*
 507 * Cacheline/offset <-> bkey pointer arithmetic:
 508 *
 509 * t->tree is a binary search tree in an array; each node corresponds to a key
 510 * in one cacheline in t->set (BSET_CACHELINE bytes).
 511 *
 512 * This means we don't have to store the full index of the key that a node in
 513 * the binary tree points to; to_inorder() gives us the cacheline, and then
 514 * bkey_float->m gives us the offset within that cacheline, in units of 8 bytes.
 515 *
 516 * cacheline_to_bkey() and friends abstract out all the pointer arithmetic to
 517 * make this work.
 518 *
 519 * To construct the bfloat for an arbitrary key we need to know what the key
 520 * immediately preceding it is: we have to check if the two keys differ in the
 521 * bits we're going to store in bkey_float->mantissa. t->prev[j] stores the size
 522 * of the previous key so we can walk backwards to it from t->tree[j]'s key.
 523 */
 524
 525static struct bkey *cacheline_to_bkey(struct bset_tree *t,
 526                                      unsigned int cacheline,
 527                                      unsigned int offset)
 528{
 529        return ((void *) t->data) + cacheline * BSET_CACHELINE + offset * 8;
 530}
 531
 532static unsigned int bkey_to_cacheline(struct bset_tree *t, struct bkey *k)
 533{
 534        return ((void *) k - (void *) t->data) / BSET_CACHELINE;
 535}
 536
 537static unsigned int bkey_to_cacheline_offset(struct bset_tree *t,
 538                                         unsigned int cacheline,
 539                                         struct bkey *k)
 540{
 541        return (u64 *) k - (u64 *) cacheline_to_bkey(t, cacheline, 0);
 542}
 543
 544static struct bkey *tree_to_bkey(struct bset_tree *t, unsigned int j)
 545{
 546        return cacheline_to_bkey(t, to_inorder(j, t), t->tree[j].m);
 547}
 548
 549static struct bkey *tree_to_prev_bkey(struct bset_tree *t, unsigned int j)
 550{
 551        return (void *) (((uint64_t *) tree_to_bkey(t, j)) - t->prev[j]);
 552}
 553
 554/*
 555 * For the write set - the one we're currently inserting keys into - we don't
 556 * maintain a full search tree, we just keep a simple lookup table in t->prev.
 557 */
 558static struct bkey *table_to_bkey(struct bset_tree *t, unsigned int cacheline)
 559{
 560        return cacheline_to_bkey(t, cacheline, t->prev[cacheline]);
 561}
 562
 563static inline uint64_t shrd128(uint64_t high, uint64_t low, uint8_t shift)
 564{
 565        low >>= shift;
 566        low  |= (high << 1) << (63U - shift);
 567        return low;
 568}
 569
 570/*
 571 * Calculate mantissa value for struct bkey_float.
 572 * If most significant bit of f->exponent is not set, then
 573 *  - f->exponent >> 6 is 0
 574 *  - p[0] points to bkey->low
 575 *  - p[-1] borrows bits from KEY_INODE() of bkey->high
 576 * if most isgnificant bits of f->exponent is set, then
 577 *  - f->exponent >> 6 is 1
 578 *  - p[0] points to bits from KEY_INODE() of bkey->high
 579 *  - p[-1] points to other bits from KEY_INODE() of
 580 *    bkey->high too.
 581 * See make_bfloat() to check when most significant bit of f->exponent
 582 * is set or not.
 583 */
 584static inline unsigned int bfloat_mantissa(const struct bkey *k,
 585                                       struct bkey_float *f)
 586{
 587        const uint64_t *p = &k->low - (f->exponent >> 6);
 588
 589        return shrd128(p[-1], p[0], f->exponent & 63) & BKEY_MANTISSA_MASK;
 590}
 591
 592static void make_bfloat(struct bset_tree *t, unsigned int j)
 593{
 594        struct bkey_float *f = &t->tree[j];
 595        struct bkey *m = tree_to_bkey(t, j);
 596        struct bkey *p = tree_to_prev_bkey(t, j);
 597
 598        struct bkey *l = is_power_of_2(j)
 599                ? t->data->start
 600                : tree_to_prev_bkey(t, j >> ffs(j));
 601
 602        struct bkey *r = is_power_of_2(j + 1)
 603                ? bset_bkey_idx(t->data, t->data->keys - bkey_u64s(&t->end))
 604                : tree_to_bkey(t, j >> (ffz(j) + 1));
 605
 606        BUG_ON(m < l || m > r);
 607        BUG_ON(bkey_next(p) != m);
 608
 609        /*
 610         * If l and r have different KEY_INODE values (different backing
 611         * device), f->exponent records how many least significant bits
 612         * are different in KEY_INODE values and sets most significant
 613         * bits to 1 (by +64).
 614         * If l and r have same KEY_INODE value, f->exponent records
 615         * how many different bits in least significant bits of bkey->low.
 616         * See bfloat_mantiss() how the most significant bit of
 617         * f->exponent is used to calculate bfloat mantissa value.
 618         */
 619        if (KEY_INODE(l) != KEY_INODE(r))
 620                f->exponent = fls64(KEY_INODE(r) ^ KEY_INODE(l)) + 64;
 621        else
 622                f->exponent = fls64(r->low ^ l->low);
 623
 624        f->exponent = max_t(int, f->exponent - BKEY_MANTISSA_BITS, 0);
 625
 626        /*
 627         * Setting f->exponent = 127 flags this node as failed, and causes the
 628         * lookup code to fall back to comparing against the original key.
 629         */
 630
 631        if (bfloat_mantissa(m, f) != bfloat_mantissa(p, f))
 632                f->mantissa = bfloat_mantissa(m, f) - 1;
 633        else
 634                f->exponent = 127;
 635}
 636
 637static void bset_alloc_tree(struct btree_keys *b, struct bset_tree *t)
 638{
 639        if (t != b->set) {
 640                unsigned int j = roundup(t[-1].size,
 641                                     64 / sizeof(struct bkey_float));
 642
 643                t->tree = t[-1].tree + j;
 644                t->prev = t[-1].prev + j;
 645        }
 646
 647        while (t < b->set + MAX_BSETS)
 648                t++->size = 0;
 649}
 650
 651static void bch_bset_build_unwritten_tree(struct btree_keys *b)
 652{
 653        struct bset_tree *t = bset_tree_last(b);
 654
 655        BUG_ON(b->last_set_unwritten);
 656        b->last_set_unwritten = 1;
 657
 658        bset_alloc_tree(b, t);
 659
 660        if (t->tree != b->set->tree + btree_keys_cachelines(b)) {
 661                t->prev[0] = bkey_to_cacheline_offset(t, 0, t->data->start);
 662                t->size = 1;
 663        }
 664}
 665
 666void bch_bset_init_next(struct btree_keys *b, struct bset *i, uint64_t magic)
 667{
 668        if (i != b->set->data) {
 669                b->set[++b->nsets].data = i;
 670                i->seq = b->set->data->seq;
 671        } else
 672                get_random_bytes(&i->seq, sizeof(uint64_t));
 673
 674        i->magic        = magic;
 675        i->version      = 0;
 676        i->keys         = 0;
 677
 678        bch_bset_build_unwritten_tree(b);
 679}
 680
 681/*
 682 * Build auxiliary binary tree 'struct bset_tree *t', this tree is used to
 683 * accelerate bkey search in a btree node (pointed by bset_tree->data in
 684 * memory). After search in the auxiliar tree by calling bset_search_tree(),
 685 * a struct bset_search_iter is returned which indicates range [l, r] from
 686 * bset_tree->data where the searching bkey might be inside. Then a followed
 687 * linear comparison does the exact search, see __bch_bset_search() for how
 688 * the auxiliary tree is used.
 689 */
 690void bch_bset_build_written_tree(struct btree_keys *b)
 691{
 692        struct bset_tree *t = bset_tree_last(b);
 693        struct bkey *prev = NULL, *k = t->data->start;
 694        unsigned int j, cacheline = 1;
 695
 696        b->last_set_unwritten = 0;
 697
 698        bset_alloc_tree(b, t);
 699
 700        t->size = min_t(unsigned int,
 701                        bkey_to_cacheline(t, bset_bkey_last(t->data)),
 702                        b->set->tree + btree_keys_cachelines(b) - t->tree);
 703
 704        if (t->size < 2) {
 705                t->size = 0;
 706                return;
 707        }
 708
 709        t->extra = (t->size - rounddown_pow_of_two(t->size - 1)) << 1;
 710
 711        /* First we figure out where the first key in each cacheline is */
 712        for (j = inorder_next(0, t->size);
 713             j;
 714             j = inorder_next(j, t->size)) {
 715                while (bkey_to_cacheline(t, k) < cacheline) {
 716                        prev = k;
 717                        k = bkey_next(k);
 718                }
 719
 720                t->prev[j] = bkey_u64s(prev);
 721                t->tree[j].m = bkey_to_cacheline_offset(t, cacheline++, k);
 722        }
 723
 724        while (bkey_next(k) != bset_bkey_last(t->data))
 725                k = bkey_next(k);
 726
 727        t->end = *k;
 728
 729        /* Then we build the tree */
 730        for (j = inorder_next(0, t->size);
 731             j;
 732             j = inorder_next(j, t->size))
 733                make_bfloat(t, j);
 734}
 735
 736/* Insert */
 737
 738void bch_bset_fix_invalidated_key(struct btree_keys *b, struct bkey *k)
 739{
 740        struct bset_tree *t;
 741        unsigned int inorder, j = 1;
 742
 743        for (t = b->set; t <= bset_tree_last(b); t++)
 744                if (k < bset_bkey_last(t->data))
 745                        goto found_set;
 746
 747        BUG();
 748found_set:
 749        if (!t->size || !bset_written(b, t))
 750                return;
 751
 752        inorder = bkey_to_cacheline(t, k);
 753
 754        if (k == t->data->start)
 755                goto fix_left;
 756
 757        if (bkey_next(k) == bset_bkey_last(t->data)) {
 758                t->end = *k;
 759                goto fix_right;
 760        }
 761
 762        j = inorder_to_tree(inorder, t);
 763
 764        if (j &&
 765            j < t->size &&
 766            k == tree_to_bkey(t, j))
 767fix_left:       do {
 768                        make_bfloat(t, j);
 769                        j = j * 2;
 770                } while (j < t->size);
 771
 772        j = inorder_to_tree(inorder + 1, t);
 773
 774        if (j &&
 775            j < t->size &&
 776            k == tree_to_prev_bkey(t, j))
 777fix_right:      do {
 778                        make_bfloat(t, j);
 779                        j = j * 2 + 1;
 780                } while (j < t->size);
 781}
 782
 783static void bch_bset_fix_lookup_table(struct btree_keys *b,
 784                                      struct bset_tree *t,
 785                                      struct bkey *k)
 786{
 787        unsigned int shift = bkey_u64s(k);
 788        unsigned int j = bkey_to_cacheline(t, k);
 789
 790        /* We're getting called from btree_split() or btree_gc, just bail out */
 791        if (!t->size)
 792                return;
 793
 794        /*
 795         * k is the key we just inserted; we need to find the entry in the
 796         * lookup table for the first key that is strictly greater than k:
 797         * it's either k's cacheline or the next one
 798         */
 799        while (j < t->size &&
 800               table_to_bkey(t, j) <= k)
 801                j++;
 802
 803        /*
 804         * Adjust all the lookup table entries, and find a new key for any that
 805         * have gotten too big
 806         */
 807        for (; j < t->size; j++) {
 808                t->prev[j] += shift;
 809
 810                if (t->prev[j] > 7) {
 811                        k = table_to_bkey(t, j - 1);
 812
 813                        while (k < cacheline_to_bkey(t, j, 0))
 814                                k = bkey_next(k);
 815
 816                        t->prev[j] = bkey_to_cacheline_offset(t, j, k);
 817                }
 818        }
 819
 820        if (t->size == b->set->tree + btree_keys_cachelines(b) - t->tree)
 821                return;
 822
 823        /* Possibly add a new entry to the end of the lookup table */
 824
 825        for (k = table_to_bkey(t, t->size - 1);
 826             k != bset_bkey_last(t->data);
 827             k = bkey_next(k))
 828                if (t->size == bkey_to_cacheline(t, k)) {
 829                        t->prev[t->size] =
 830                                bkey_to_cacheline_offset(t, t->size, k);
 831                        t->size++;
 832                }
 833}
 834
 835/*
 836 * Tries to merge l and r: l should be lower than r
 837 * Returns true if we were able to merge. If we did merge, l will be the merged
 838 * key, r will be untouched.
 839 */
 840bool bch_bkey_try_merge(struct btree_keys *b, struct bkey *l, struct bkey *r)
 841{
 842        if (!b->ops->key_merge)
 843                return false;
 844
 845        /*
 846         * Generic header checks
 847         * Assumes left and right are in order
 848         * Left and right must be exactly aligned
 849         */
 850        if (!bch_bkey_equal_header(l, r) ||
 851             bkey_cmp(l, &START_KEY(r)))
 852                return false;
 853
 854        return b->ops->key_merge(b, l, r);
 855}
 856
 857void bch_bset_insert(struct btree_keys *b, struct bkey *where,
 858                     struct bkey *insert)
 859{
 860        struct bset_tree *t = bset_tree_last(b);
 861
 862        BUG_ON(!b->last_set_unwritten);
 863        BUG_ON(bset_byte_offset(b, t->data) +
 864               __set_bytes(t->data, t->data->keys + bkey_u64s(insert)) >
 865               PAGE_SIZE << b->page_order);
 866
 867        memmove((uint64_t *) where + bkey_u64s(insert),
 868                where,
 869                (void *) bset_bkey_last(t->data) - (void *) where);
 870
 871        t->data->keys += bkey_u64s(insert);
 872        bkey_copy(where, insert);
 873        bch_bset_fix_lookup_table(b, t, where);
 874}
 875
 876unsigned int bch_btree_insert_key(struct btree_keys *b, struct bkey *k,
 877                              struct bkey *replace_key)
 878{
 879        unsigned int status = BTREE_INSERT_STATUS_NO_INSERT;
 880        struct bset *i = bset_tree_last(b)->data;
 881        struct bkey *m, *prev = NULL;
 882        struct btree_iter iter;
 883        struct bkey preceding_key_on_stack = ZERO_KEY;
 884        struct bkey *preceding_key_p = &preceding_key_on_stack;
 885
 886        BUG_ON(b->ops->is_extents && !KEY_SIZE(k));
 887
 888        /*
 889         * If k has preceding key, preceding_key_p will be set to address
 890         *  of k's preceding key; otherwise preceding_key_p will be set
 891         * to NULL inside preceding_key().
 892         */
 893        if (b->ops->is_extents)
 894                preceding_key(&START_KEY(k), &preceding_key_p);
 895        else
 896                preceding_key(k, &preceding_key_p);
 897
 898        m = bch_btree_iter_init(b, &iter, preceding_key_p);
 899
 900        if (b->ops->insert_fixup(b, k, &iter, replace_key))
 901                return status;
 902
 903        status = BTREE_INSERT_STATUS_INSERT;
 904
 905        while (m != bset_bkey_last(i) &&
 906               bkey_cmp(k, b->ops->is_extents ? &START_KEY(m) : m) > 0) {
 907                prev = m;
 908                m = bkey_next(m);
 909        }
 910
 911        /* prev is in the tree, if we merge we're done */
 912        status = BTREE_INSERT_STATUS_BACK_MERGE;
 913        if (prev &&
 914            bch_bkey_try_merge(b, prev, k))
 915                goto merged;
 916#if 0
 917        status = BTREE_INSERT_STATUS_OVERWROTE;
 918        if (m != bset_bkey_last(i) &&
 919            KEY_PTRS(m) == KEY_PTRS(k) && !KEY_SIZE(m))
 920                goto copy;
 921#endif
 922        status = BTREE_INSERT_STATUS_FRONT_MERGE;
 923        if (m != bset_bkey_last(i) &&
 924            bch_bkey_try_merge(b, k, m))
 925                goto copy;
 926
 927        bch_bset_insert(b, m, k);
 928copy:   bkey_copy(m, k);
 929merged:
 930        return status;
 931}
 932
 933/* Lookup */
 934
 935struct bset_search_iter {
 936        struct bkey *l, *r;
 937};
 938
 939static struct bset_search_iter bset_search_write_set(struct bset_tree *t,
 940                                                     const struct bkey *search)
 941{
 942        unsigned int li = 0, ri = t->size;
 943
 944        while (li + 1 != ri) {
 945                unsigned int m = (li + ri) >> 1;
 946
 947                if (bkey_cmp(table_to_bkey(t, m), search) > 0)
 948                        ri = m;
 949                else
 950                        li = m;
 951        }
 952
 953        return (struct bset_search_iter) {
 954                table_to_bkey(t, li),
 955                ri < t->size ? table_to_bkey(t, ri) : bset_bkey_last(t->data)
 956        };
 957}
 958
 959static struct bset_search_iter bset_search_tree(struct bset_tree *t,
 960                                                const struct bkey *search)
 961{
 962        struct bkey *l, *r;
 963        struct bkey_float *f;
 964        unsigned int inorder, j, n = 1;
 965
 966        do {
 967                unsigned int p = n << 4;
 968
 969                if (p < t->size)
 970                        prefetch(&t->tree[p]);
 971
 972                j = n;
 973                f = &t->tree[j];
 974
 975                if (likely(f->exponent != 127)) {
 976                        if (f->mantissa >= bfloat_mantissa(search, f))
 977                                n = j * 2;
 978                        else
 979                                n = j * 2 + 1;
 980                } else {
 981                        if (bkey_cmp(tree_to_bkey(t, j), search) > 0)
 982                                n = j * 2;
 983                        else
 984                                n = j * 2 + 1;
 985                }
 986        } while (n < t->size);
 987
 988        inorder = to_inorder(j, t);
 989
 990        /*
 991         * n would have been the node we recursed to - the low bit tells us if
 992         * we recursed left or recursed right.
 993         */
 994        if (n & 1) {
 995                l = cacheline_to_bkey(t, inorder, f->m);
 996
 997                if (++inorder != t->size) {
 998                        f = &t->tree[inorder_next(j, t->size)];
 999                        r = cacheline_to_bkey(t, inorder, f->m);
1000                } else
1001                        r = bset_bkey_last(t->data);
1002        } else {
1003                r = cacheline_to_bkey(t, inorder, f->m);
1004
1005                if (--inorder) {
1006                        f = &t->tree[inorder_prev(j, t->size)];
1007                        l = cacheline_to_bkey(t, inorder, f->m);
1008                } else
1009                        l = t->data->start;
1010        }
1011
1012        return (struct bset_search_iter) {l, r};
1013}
1014
1015struct bkey *__bch_bset_search(struct btree_keys *b, struct bset_tree *t,
1016                               const struct bkey *search)
1017{
1018        struct bset_search_iter i;
1019
1020        /*
1021         * First, we search for a cacheline, then lastly we do a linear search
1022         * within that cacheline.
1023         *
1024         * To search for the cacheline, there's three different possibilities:
1025         *  * The set is too small to have a search tree, so we just do a linear
1026         *    search over the whole set.
1027         *  * The set is the one we're currently inserting into; keeping a full
1028         *    auxiliary search tree up to date would be too expensive, so we
1029         *    use a much simpler lookup table to do a binary search -
1030         *    bset_search_write_set().
1031         *  * Or we use the auxiliary search tree we constructed earlier -
1032         *    bset_search_tree()
1033         */
1034
1035        if (unlikely(!t->size)) {
1036                i.l = t->data->start;
1037                i.r = bset_bkey_last(t->data);
1038        } else if (bset_written(b, t)) {
1039                /*
1040                 * Each node in the auxiliary search tree covers a certain range
1041                 * of bits, and keys above and below the set it covers might
1042                 * differ outside those bits - so we have to special case the
1043                 * start and end - handle that here:
1044                 */
1045
1046                if (unlikely(bkey_cmp(search, &t->end) >= 0))
1047                        return bset_bkey_last(t->data);
1048
1049                if (unlikely(bkey_cmp(search, t->data->start) < 0))
1050                        return t->data->start;
1051
1052                i = bset_search_tree(t, search);
1053        } else {
1054                BUG_ON(!b->nsets &&
1055                       t->size < bkey_to_cacheline(t, bset_bkey_last(t->data)));
1056
1057                i = bset_search_write_set(t, search);
1058        }
1059
1060        if (btree_keys_expensive_checks(b)) {
1061                BUG_ON(bset_written(b, t) &&
1062                       i.l != t->data->start &&
1063                       bkey_cmp(tree_to_prev_bkey(t,
1064                          inorder_to_tree(bkey_to_cacheline(t, i.l), t)),
1065                                search) > 0);
1066
1067                BUG_ON(i.r != bset_bkey_last(t->data) &&
1068                       bkey_cmp(i.r, search) <= 0);
1069        }
1070
1071        while (likely(i.l != i.r) &&
1072               bkey_cmp(i.l, search) <= 0)
1073                i.l = bkey_next(i.l);
1074
1075        return i.l;
1076}
1077
1078/* Btree iterator */
1079
1080typedef bool (btree_iter_cmp_fn)(struct btree_iter_set,
1081                                 struct btree_iter_set);
1082
1083static inline bool btree_iter_cmp(struct btree_iter_set l,
1084                                  struct btree_iter_set r)
1085{
1086        return bkey_cmp(l.k, r.k) > 0;
1087}
1088
1089static inline bool btree_iter_end(struct btree_iter *iter)
1090{
1091        return !iter->used;
1092}
1093
1094void bch_btree_iter_push(struct btree_iter *iter, struct bkey *k,
1095                         struct bkey *end)
1096{
1097        if (k != end)
1098                BUG_ON(!heap_add(iter,
1099                                 ((struct btree_iter_set) { k, end }),
1100                                 btree_iter_cmp));
1101}
1102
1103static struct bkey *__bch_btree_iter_init(struct btree_keys *b,
1104                                          struct btree_iter *iter,
1105                                          struct bkey *search,
1106                                          struct bset_tree *start)
1107{
1108        struct bkey *ret = NULL;
1109
1110        iter->size = ARRAY_SIZE(iter->data);
1111        iter->used = 0;
1112
1113#ifdef CONFIG_BCACHE_DEBUG
1114        iter->b = b;
1115#endif
1116
1117        for (; start <= bset_tree_last(b); start++) {
1118                ret = bch_bset_search(b, start, search);
1119                bch_btree_iter_push(iter, ret, bset_bkey_last(start->data));
1120        }
1121
1122        return ret;
1123}
1124
1125struct bkey *bch_btree_iter_init(struct btree_keys *b,
1126                                 struct btree_iter *iter,
1127                                 struct bkey *search)
1128{
1129        return __bch_btree_iter_init(b, iter, search, b->set);
1130}
1131
1132static inline struct bkey *__bch_btree_iter_next(struct btree_iter *iter,
1133                                                 btree_iter_cmp_fn *cmp)
1134{
1135        struct btree_iter_set b __maybe_unused;
1136        struct bkey *ret = NULL;
1137
1138        if (!btree_iter_end(iter)) {
1139                bch_btree_iter_next_check(iter);
1140
1141                ret = iter->data->k;
1142                iter->data->k = bkey_next(iter->data->k);
1143
1144                if (iter->data->k > iter->data->end) {
1145                        WARN_ONCE(1, "bset was corrupt!\n");
1146                        iter->data->k = iter->data->end;
1147                }
1148
1149                if (iter->data->k == iter->data->end)
1150                        heap_pop(iter, b, cmp);
1151                else
1152                        heap_sift(iter, 0, cmp);
1153        }
1154
1155        return ret;
1156}
1157
1158struct bkey *bch_btree_iter_next(struct btree_iter *iter)
1159{
1160        return __bch_btree_iter_next(iter, btree_iter_cmp);
1161
1162}
1163
1164struct bkey *bch_btree_iter_next_filter(struct btree_iter *iter,
1165                                        struct btree_keys *b, ptr_filter_fn fn)
1166{
1167        struct bkey *ret;
1168
1169        do {
1170                ret = bch_btree_iter_next(iter);
1171        } while (ret && fn(b, ret));
1172
1173        return ret;
1174}
1175
1176/* Mergesort */
1177
1178void bch_bset_sort_state_free(struct bset_sort_state *state)
1179{
1180        mempool_exit(&state->pool);
1181}
1182
1183int bch_bset_sort_state_init(struct bset_sort_state *state,
1184                             unsigned int page_order)
1185{
1186        spin_lock_init(&state->time.lock);
1187
1188        state->page_order = page_order;
1189        state->crit_factor = int_sqrt(1 << page_order);
1190
1191        return mempool_init_page_pool(&state->pool, 1, page_order);
1192}
1193
1194static void btree_mergesort(struct btree_keys *b, struct bset *out,
1195                            struct btree_iter *iter,
1196                            bool fixup, bool remove_stale)
1197{
1198        int i;
1199        struct bkey *k, *last = NULL;
1200        BKEY_PADDED(k) tmp;
1201        bool (*bad)(struct btree_keys *, const struct bkey *) = remove_stale
1202                ? bch_ptr_bad
1203                : bch_ptr_invalid;
1204
1205        /* Heapify the iterator, using our comparison function */
1206        for (i = iter->used / 2 - 1; i >= 0; --i)
1207                heap_sift(iter, i, b->ops->sort_cmp);
1208
1209        while (!btree_iter_end(iter)) {
1210                if (b->ops->sort_fixup && fixup)
1211                        k = b->ops->sort_fixup(iter, &tmp.k);
1212                else
1213                        k = NULL;
1214
1215                if (!k)
1216                        k = __bch_btree_iter_next(iter, b->ops->sort_cmp);
1217
1218                if (bad(b, k))
1219                        continue;
1220
1221                if (!last) {
1222                        last = out->start;
1223                        bkey_copy(last, k);
1224                } else if (!bch_bkey_try_merge(b, last, k)) {
1225                        last = bkey_next(last);
1226                        bkey_copy(last, k);
1227                }
1228        }
1229
1230        out->keys = last ? (uint64_t *) bkey_next(last) - out->d : 0;
1231
1232        pr_debug("sorted %i keys\n", out->keys);
1233}
1234
1235static void __btree_sort(struct btree_keys *b, struct btree_iter *iter,
1236                         unsigned int start, unsigned int order, bool fixup,
1237                         struct bset_sort_state *state)
1238{
1239        uint64_t start_time;
1240        bool used_mempool = false;
1241        struct bset *out = (void *) __get_free_pages(__GFP_NOWARN|GFP_NOWAIT,
1242                                                     order);
1243        if (!out) {
1244                struct page *outp;
1245
1246                BUG_ON(order > state->page_order);
1247
1248                outp = mempool_alloc(&state->pool, GFP_NOIO);
1249                out = page_address(outp);
1250                used_mempool = true;
1251                order = state->page_order;
1252        }
1253
1254        start_time = local_clock();
1255
1256        btree_mergesort(b, out, iter, fixup, false);
1257        b->nsets = start;
1258
1259        if (!start && order == b->page_order) {
1260                /*
1261                 * Our temporary buffer is the same size as the btree node's
1262                 * buffer, we can just swap buffers instead of doing a big
1263                 * memcpy()
1264                 *
1265                 * Don't worry event 'out' is allocated from mempool, it can
1266                 * still be swapped here. Because state->pool is a page mempool
1267                 * creaated by by mempool_init_page_pool(), which allocates
1268                 * pages by alloc_pages() indeed.
1269                 */
1270
1271                out->magic      = b->set->data->magic;
1272                out->seq        = b->set->data->seq;
1273                out->version    = b->set->data->version;
1274                swap(out, b->set->data);
1275        } else {
1276                b->set[start].data->keys = out->keys;
1277                memcpy(b->set[start].data->start, out->start,
1278                       (void *) bset_bkey_last(out) - (void *) out->start);
1279        }
1280
1281        if (used_mempool)
1282                mempool_free(virt_to_page(out), &state->pool);
1283        else
1284                free_pages((unsigned long) out, order);
1285
1286        bch_bset_build_written_tree(b);
1287
1288        if (!start)
1289                bch_time_stats_update(&state->time, start_time);
1290}
1291
1292void bch_btree_sort_partial(struct btree_keys *b, unsigned int start,
1293                            struct bset_sort_state *state)
1294{
1295        size_t order = b->page_order, keys = 0;
1296        struct btree_iter iter;
1297        int oldsize = bch_count_data(b);
1298
1299        __bch_btree_iter_init(b, &iter, NULL, &b->set[start]);
1300
1301        if (start) {
1302                unsigned int i;
1303
1304                for (i = start; i <= b->nsets; i++)
1305                        keys += b->set[i].data->keys;
1306
1307                order = get_order(__set_bytes(b->set->data, keys));
1308        }
1309
1310        __btree_sort(b, &iter, start, order, false, state);
1311
1312        EBUG_ON(oldsize >= 0 && bch_count_data(b) != oldsize);
1313}
1314
1315void bch_btree_sort_and_fix_extents(struct btree_keys *b,
1316                                    struct btree_iter *iter,
1317                                    struct bset_sort_state *state)
1318{
1319        __btree_sort(b, iter, 0, b->page_order, true, state);
1320}
1321
1322void bch_btree_sort_into(struct btree_keys *b, struct btree_keys *new,
1323                         struct bset_sort_state *state)
1324{
1325        uint64_t start_time = local_clock();
1326        struct btree_iter iter;
1327
1328        bch_btree_iter_init(b, &iter, NULL);
1329
1330        btree_mergesort(b, new->set->data, &iter, false, true);
1331
1332        bch_time_stats_update(&state->time, start_time);
1333
1334        new->set->size = 0; // XXX: why?
1335}
1336
1337#define SORT_CRIT       (4096 / sizeof(uint64_t))
1338
1339void bch_btree_sort_lazy(struct btree_keys *b, struct bset_sort_state *state)
1340{
1341        unsigned int crit = SORT_CRIT;
1342        int i;
1343
1344        /* Don't sort if nothing to do */
1345        if (!b->nsets)
1346                goto out;
1347
1348        for (i = b->nsets - 1; i >= 0; --i) {
1349                crit *= state->crit_factor;
1350
1351                if (b->set[i].data->keys < crit) {
1352                        bch_btree_sort_partial(b, i, state);
1353                        return;
1354                }
1355        }
1356
1357        /* Sort if we'd overflow */
1358        if (b->nsets + 1 == MAX_BSETS) {
1359                bch_btree_sort(b, state);
1360                return;
1361        }
1362
1363out:
1364        bch_bset_build_written_tree(b);
1365}
1366
1367void bch_btree_keys_stats(struct btree_keys *b, struct bset_stats *stats)
1368{
1369        unsigned int i;
1370
1371        for (i = 0; i <= b->nsets; i++) {
1372                struct bset_tree *t = &b->set[i];
1373                size_t bytes = t->data->keys * sizeof(uint64_t);
1374                size_t j;
1375
1376                if (bset_written(b, t)) {
1377                        stats->sets_written++;
1378                        stats->bytes_written += bytes;
1379
1380                        stats->floats += t->size - 1;
1381
1382                        for (j = 1; j < t->size; j++)
1383                                if (t->tree[j].exponent == 127)
1384                                        stats->failed++;
1385                } else {
1386                        stats->sets_unwritten++;
1387                        stats->bytes_unwritten += bytes;
1388                }
1389        }
1390}
1391