linux/drivers/md/bcache/bset.h
<<
>>
Prefs
   1#ifndef _BCACHE_BSET_H
   2#define _BCACHE_BSET_H
   3
   4#include <linux/slab.h>
   5
   6/*
   7 * BKEYS:
   8 *
   9 * A bkey contains a key, a size field, a variable number of pointers, and some
  10 * ancillary flag bits.
  11 *
  12 * We use two different functions for validating bkeys, bch_ptr_invalid and
  13 * bch_ptr_bad().
  14 *
  15 * bch_ptr_invalid() primarily filters out keys and pointers that would be
  16 * invalid due to some sort of bug, whereas bch_ptr_bad() filters out keys and
  17 * pointer that occur in normal practice but don't point to real data.
  18 *
  19 * The one exception to the rule that ptr_invalid() filters out invalid keys is
  20 * that it also filters out keys of size 0 - these are keys that have been
  21 * completely overwritten. It'd be safe to delete these in memory while leaving
  22 * them on disk, just unnecessary work - so we filter them out when resorting
  23 * instead.
  24 *
  25 * We can't filter out stale keys when we're resorting, because garbage
  26 * collection needs to find them to ensure bucket gens don't wrap around -
  27 * unless we're rewriting the btree node those stale keys still exist on disk.
  28 *
  29 * We also implement functions here for removing some number of sectors from the
  30 * front or the back of a bkey - this is mainly used for fixing overlapping
  31 * extents, by removing the overlapping sectors from the older key.
  32 *
  33 * BSETS:
  34 *
  35 * A bset is an array of bkeys laid out contiguously in memory in sorted order,
  36 * along with a header. A btree node is made up of a number of these, written at
  37 * different times.
  38 *
  39 * There could be many of them on disk, but we never allow there to be more than
  40 * 4 in memory - we lazily resort as needed.
  41 *
  42 * We implement code here for creating and maintaining auxiliary search trees
  43 * (described below) for searching an individial bset, and on top of that we
  44 * implement a btree iterator.
  45 *
  46 * BTREE ITERATOR:
  47 *
  48 * Most of the code in bcache doesn't care about an individual bset - it needs
  49 * to search entire btree nodes and iterate over them in sorted order.
  50 *
  51 * The btree iterator code serves both functions; it iterates through the keys
  52 * in a btree node in sorted order, starting from either keys after a specific
  53 * point (if you pass it a search key) or the start of the btree node.
  54 *
  55 * AUXILIARY SEARCH TREES:
  56 *
  57 * Since keys are variable length, we can't use a binary search on a bset - we
  58 * wouldn't be able to find the start of the next key. But binary searches are
  59 * slow anyways, due to terrible cache behaviour; bcache originally used binary
  60 * searches and that code topped out at under 50k lookups/second.
  61 *
  62 * So we need to construct some sort of lookup table. Since we only insert keys
  63 * into the last (unwritten) set, most of the keys within a given btree node are
  64 * usually in sets that are mostly constant. We use two different types of
  65 * lookup tables to take advantage of this.
  66 *
  67 * Both lookup tables share in common that they don't index every key in the
  68 * set; they index one key every BSET_CACHELINE bytes, and then a linear search
  69 * is used for the rest.
  70 *
  71 * For sets that have been written to disk and are no longer being inserted
  72 * into, we construct a binary search tree in an array - traversing a binary
  73 * search tree in an array gives excellent locality of reference and is very
  74 * fast, since both children of any node are adjacent to each other in memory
  75 * (and their grandchildren, and great grandchildren...) - this means
  76 * prefetching can be used to great effect.
  77 *
  78 * It's quite useful performance wise to keep these nodes small - not just
  79 * because they're more likely to be in L2, but also because we can prefetch
  80 * more nodes on a single cacheline and thus prefetch more iterations in advance
  81 * when traversing this tree.
  82 *
  83 * Nodes in the auxiliary search tree must contain both a key to compare against
  84 * (we don't want to fetch the key from the set, that would defeat the purpose),
  85 * and a pointer to the key. We use a few tricks to compress both of these.
  86 *
  87 * To compress the pointer, we take advantage of the fact that one node in the
  88 * search tree corresponds to precisely BSET_CACHELINE bytes in the set. We have
  89 * a function (to_inorder()) that takes the index of a node in a binary tree and
  90 * returns what its index would be in an inorder traversal, so we only have to
  91 * store the low bits of the offset.
  92 *
  93 * The key is 84 bits (KEY_DEV + key->key, the offset on the device). To
  94 * compress that,  we take advantage of the fact that when we're traversing the
  95 * search tree at every iteration we know that both our search key and the key
  96 * we're looking for lie within some range - bounded by our previous
  97 * comparisons. (We special case the start of a search so that this is true even
  98 * at the root of the tree).
  99 *
 100 * So we know the key we're looking for is between a and b, and a and b don't
 101 * differ higher than bit 50, we don't need to check anything higher than bit
 102 * 50.
 103 *
 104 * We don't usually need the rest of the bits, either; we only need enough bits
 105 * to partition the key range we're currently checking.  Consider key n - the
 106 * key our auxiliary search tree node corresponds to, and key p, the key
 107 * immediately preceding n.  The lowest bit we need to store in the auxiliary
 108 * search tree is the highest bit that differs between n and p.
 109 *
 110 * Note that this could be bit 0 - we might sometimes need all 80 bits to do the
 111 * comparison. But we'd really like our nodes in the auxiliary search tree to be
 112 * of fixed size.
 113 *
 114 * The solution is to make them fixed size, and when we're constructing a node
 115 * check if p and n differed in the bits we needed them to. If they don't we
 116 * flag that node, and when doing lookups we fallback to comparing against the
 117 * real key. As long as this doesn't happen to often (and it seems to reliably
 118 * happen a bit less than 1% of the time), we win - even on failures, that key
 119 * is then more likely to be in cache than if we were doing binary searches all
 120 * the way, since we're touching so much less memory.
 121 *
 122 * The keys in the auxiliary search tree are stored in (software) floating
 123 * point, with an exponent and a mantissa. The exponent needs to be big enough
 124 * to address all the bits in the original key, but the number of bits in the
 125 * mantissa is somewhat arbitrary; more bits just gets us fewer failures.
 126 *
 127 * We need 7 bits for the exponent and 3 bits for the key's offset (since keys
 128 * are 8 byte aligned); using 22 bits for the mantissa means a node is 4 bytes.
 129 * We need one node per 128 bytes in the btree node, which means the auxiliary
 130 * search trees take up 3% as much memory as the btree itself.
 131 *
 132 * Constructing these auxiliary search trees is moderately expensive, and we
 133 * don't want to be constantly rebuilding the search tree for the last set
 134 * whenever we insert another key into it. For the unwritten set, we use a much
 135 * simpler lookup table - it's just a flat array, so index i in the lookup table
 136 * corresponds to the i range of BSET_CACHELINE bytes in the set. Indexing
 137 * within each byte range works the same as with the auxiliary search trees.
 138 *
 139 * These are much easier to keep up to date when we insert a key - we do it
 140 * somewhat lazily; when we shift a key up we usually just increment the pointer
 141 * to it, only when it would overflow do we go to the trouble of finding the
 142 * first key in that range of bytes again.
 143 */
 144
 145/* Btree key comparison/iteration */
 146
 147#define MAX_BSETS               4U
 148
 149struct btree_iter {
 150        size_t size, used;
 151#ifdef CONFIG_BCACHE_DEBUG
 152        struct btree *b;
 153#endif
 154        struct btree_iter_set {
 155                struct bkey *k, *end;
 156        } data[MAX_BSETS];
 157};
 158
 159struct bset_tree {
 160        /*
 161         * We construct a binary tree in an array as if the array
 162         * started at 1, so that things line up on the same cachelines
 163         * better: see comments in bset.c at cacheline_to_bkey() for
 164         * details
 165         */
 166
 167        /* size of the binary tree and prev array */
 168        unsigned        size;
 169
 170        /* function of size - precalculated for to_inorder() */
 171        unsigned        extra;
 172
 173        /* copy of the last key in the set */
 174        struct bkey     end;
 175        struct bkey_float *tree;
 176
 177        /*
 178         * The nodes in the bset tree point to specific keys - this
 179         * array holds the sizes of the previous key.
 180         *
 181         * Conceptually it's a member of struct bkey_float, but we want
 182         * to keep bkey_float to 4 bytes and prev isn't used in the fast
 183         * path.
 184         */
 185        uint8_t         *prev;
 186
 187        /* The actual btree node, with pointers to each sorted set */
 188        struct bset     *data;
 189};
 190
 191static __always_inline int64_t bkey_cmp(const struct bkey *l,
 192                                        const struct bkey *r)
 193{
 194        return unlikely(KEY_INODE(l) != KEY_INODE(r))
 195                ? (int64_t) KEY_INODE(l) - (int64_t) KEY_INODE(r)
 196                : (int64_t) KEY_OFFSET(l) - (int64_t) KEY_OFFSET(r);
 197}
 198
 199/* Keylists */
 200
 201struct keylist {
 202        union {
 203                struct bkey             *keys;
 204                uint64_t                *keys_p;
 205        };
 206        union {
 207                struct bkey             *top;
 208                uint64_t                *top_p;
 209        };
 210
 211        /* Enough room for btree_split's keys without realloc */
 212#define KEYLIST_INLINE          16
 213        uint64_t                inline_keys[KEYLIST_INLINE];
 214};
 215
 216static inline void bch_keylist_init(struct keylist *l)
 217{
 218        l->top_p = l->keys_p = l->inline_keys;
 219}
 220
 221static inline void bch_keylist_push(struct keylist *l)
 222{
 223        l->top = bkey_next(l->top);
 224}
 225
 226static inline void bch_keylist_add(struct keylist *l, struct bkey *k)
 227{
 228        bkey_copy(l->top, k);
 229        bch_keylist_push(l);
 230}
 231
 232static inline bool bch_keylist_empty(struct keylist *l)
 233{
 234        return l->top == l->keys;
 235}
 236
 237static inline void bch_keylist_reset(struct keylist *l)
 238{
 239        l->top = l->keys;
 240}
 241
 242static inline void bch_keylist_free(struct keylist *l)
 243{
 244        if (l->keys_p != l->inline_keys)
 245                kfree(l->keys_p);
 246}
 247
 248static inline size_t bch_keylist_nkeys(struct keylist *l)
 249{
 250        return l->top_p - l->keys_p;
 251}
 252
 253static inline size_t bch_keylist_bytes(struct keylist *l)
 254{
 255        return bch_keylist_nkeys(l) * sizeof(uint64_t);
 256}
 257
 258struct bkey *bch_keylist_pop(struct keylist *);
 259void bch_keylist_pop_front(struct keylist *);
 260int bch_keylist_realloc(struct keylist *, int, struct cache_set *);
 261
 262void bch_bkey_copy_single_ptr(struct bkey *, const struct bkey *,
 263                              unsigned);
 264bool __bch_cut_front(const struct bkey *, struct bkey *);
 265bool __bch_cut_back(const struct bkey *, struct bkey *);
 266
 267static inline bool bch_cut_front(const struct bkey *where, struct bkey *k)
 268{
 269        BUG_ON(bkey_cmp(where, k) > 0);
 270        return __bch_cut_front(where, k);
 271}
 272
 273static inline bool bch_cut_back(const struct bkey *where, struct bkey *k)
 274{
 275        BUG_ON(bkey_cmp(where, &START_KEY(k)) < 0);
 276        return __bch_cut_back(where, k);
 277}
 278
 279const char *bch_ptr_status(struct cache_set *, const struct bkey *);
 280bool bch_btree_ptr_invalid(struct cache_set *, const struct bkey *);
 281bool bch_extent_ptr_invalid(struct cache_set *, const struct bkey *);
 282
 283bool bch_ptr_bad(struct btree *, const struct bkey *);
 284
 285static inline uint8_t gen_after(uint8_t a, uint8_t b)
 286{
 287        uint8_t r = a - b;
 288        return r > 128U ? 0 : r;
 289}
 290
 291static inline uint8_t ptr_stale(struct cache_set *c, const struct bkey *k,
 292                                unsigned i)
 293{
 294        return gen_after(PTR_BUCKET(c, k, i)->gen, PTR_GEN(k, i));
 295}
 296
 297static inline bool ptr_available(struct cache_set *c, const struct bkey *k,
 298                                 unsigned i)
 299{
 300        return (PTR_DEV(k, i) < MAX_CACHES_PER_SET) && PTR_CACHE(c, k, i);
 301}
 302
 303
 304typedef bool (*ptr_filter_fn)(struct btree *, const struct bkey *);
 305
 306struct bkey *bch_btree_iter_next(struct btree_iter *);
 307struct bkey *bch_btree_iter_next_filter(struct btree_iter *,
 308                                        struct btree *, ptr_filter_fn);
 309
 310void bch_btree_iter_push(struct btree_iter *, struct bkey *, struct bkey *);
 311struct bkey *__bch_btree_iter_init(struct btree *, struct btree_iter *,
 312                                   struct bkey *, struct bset_tree *);
 313
 314/* 32 bits total: */
 315#define BKEY_MID_BITS           3
 316#define BKEY_EXPONENT_BITS      7
 317#define BKEY_MANTISSA_BITS      22
 318#define BKEY_MANTISSA_MASK      ((1 << BKEY_MANTISSA_BITS) - 1)
 319
 320struct bkey_float {
 321        unsigned        exponent:BKEY_EXPONENT_BITS;
 322        unsigned        m:BKEY_MID_BITS;
 323        unsigned        mantissa:BKEY_MANTISSA_BITS;
 324} __packed;
 325
 326/*
 327 * BSET_CACHELINE was originally intended to match the hardware cacheline size -
 328 * it used to be 64, but I realized the lookup code would touch slightly less
 329 * memory if it was 128.
 330 *
 331 * It definites the number of bytes (in struct bset) per struct bkey_float in
 332 * the auxiliar search tree - when we're done searching the bset_float tree we
 333 * have this many bytes left that we do a linear search over.
 334 *
 335 * Since (after level 5) every level of the bset_tree is on a new cacheline,
 336 * we're touching one fewer cacheline in the bset tree in exchange for one more
 337 * cacheline in the linear search - but the linear search might stop before it
 338 * gets to the second cacheline.
 339 */
 340
 341#define BSET_CACHELINE          128
 342#define bset_tree_space(b)      (btree_data_space(b) / BSET_CACHELINE)
 343
 344#define bset_tree_bytes(b)      (bset_tree_space(b) * sizeof(struct bkey_float))
 345#define bset_prev_bytes(b)      (bset_tree_space(b) * sizeof(uint8_t))
 346
 347void bch_bset_init_next(struct btree *);
 348
 349void bch_bset_fix_invalidated_key(struct btree *, struct bkey *);
 350void bch_bset_fix_lookup_table(struct btree *, struct bkey *);
 351
 352struct bkey *__bch_bset_search(struct btree *, struct bset_tree *,
 353                           const struct bkey *);
 354
 355/*
 356 * Returns the first key that is strictly greater than search
 357 */
 358static inline struct bkey *bch_bset_search(struct btree *b, struct bset_tree *t,
 359                                           const struct bkey *search)
 360{
 361        return search ? __bch_bset_search(b, t, search) : t->data->start;
 362}
 363
 364#define PRECEDING_KEY(_k)                                       \
 365({                                                              \
 366        struct bkey *_ret = NULL;                               \
 367                                                                \
 368        if (KEY_INODE(_k) || KEY_OFFSET(_k)) {                  \
 369                _ret = &KEY(KEY_INODE(_k), KEY_OFFSET(_k), 0);  \
 370                                                                \
 371                if (!_ret->low)                                 \
 372                        _ret->high--;                           \
 373                _ret->low--;                                    \
 374        }                                                       \
 375                                                                \
 376        _ret;                                                   \
 377})
 378
 379bool bch_bkey_try_merge(struct btree *, struct bkey *, struct bkey *);
 380void bch_btree_sort_lazy(struct btree *);
 381void bch_btree_sort_into(struct btree *, struct btree *);
 382void bch_btree_sort_and_fix_extents(struct btree *, struct btree_iter *);
 383void bch_btree_sort_partial(struct btree *, unsigned);
 384
 385static inline void bch_btree_sort(struct btree *b)
 386{
 387        bch_btree_sort_partial(b, 0);
 388}
 389
 390int bch_bset_print_stats(struct cache_set *, char *);
 391
 392#endif
 393