linux/drivers/md/bcache/bset.h
<<
>>
Prefs
   1#ifndef _BCACHE_BSET_H
   2#define _BCACHE_BSET_H
   3
   4/*
   5 * BKEYS:
   6 *
   7 * A bkey contains a key, a size field, a variable number of pointers, and some
   8 * ancillary flag bits.
   9 *
  10 * We use two different functions for validating bkeys, bch_ptr_invalid and
  11 * bch_ptr_bad().
  12 *
  13 * bch_ptr_invalid() primarily filters out keys and pointers that would be
  14 * invalid due to some sort of bug, whereas bch_ptr_bad() filters out keys and
  15 * pointer that occur in normal practice but don't point to real data.
  16 *
  17 * The one exception to the rule that ptr_invalid() filters out invalid keys is
  18 * that it also filters out keys of size 0 - these are keys that have been
  19 * completely overwritten. It'd be safe to delete these in memory while leaving
  20 * them on disk, just unnecessary work - so we filter them out when resorting
  21 * instead.
  22 *
  23 * We can't filter out stale keys when we're resorting, because garbage
  24 * collection needs to find them to ensure bucket gens don't wrap around -
  25 * unless we're rewriting the btree node those stale keys still exist on disk.
  26 *
  27 * We also implement functions here for removing some number of sectors from the
  28 * front or the back of a bkey - this is mainly used for fixing overlapping
  29 * extents, by removing the overlapping sectors from the older key.
  30 *
  31 * BSETS:
  32 *
  33 * A bset is an array of bkeys laid out contiguously in memory in sorted order,
  34 * along with a header. A btree node is made up of a number of these, written at
  35 * different times.
  36 *
  37 * There could be many of them on disk, but we never allow there to be more than
  38 * 4 in memory - we lazily resort as needed.
  39 *
  40 * We implement code here for creating and maintaining auxiliary search trees
  41 * (described below) for searching an individial bset, and on top of that we
  42 * implement a btree iterator.
  43 *
  44 * BTREE ITERATOR:
  45 *
  46 * Most of the code in bcache doesn't care about an individual bset - it needs
  47 * to search entire btree nodes and iterate over them in sorted order.
  48 *
  49 * The btree iterator code serves both functions; it iterates through the keys
  50 * in a btree node in sorted order, starting from either keys after a specific
  51 * point (if you pass it a search key) or the start of the btree node.
  52 *
  53 * AUXILIARY SEARCH TREES:
  54 *
  55 * Since keys are variable length, we can't use a binary search on a bset - we
  56 * wouldn't be able to find the start of the next key. But binary searches are
  57 * slow anyways, due to terrible cache behaviour; bcache originally used binary
  58 * searches and that code topped out at under 50k lookups/second.
  59 *
  60 * So we need to construct some sort of lookup table. Since we only insert keys
  61 * into the last (unwritten) set, most of the keys within a given btree node are
  62 * usually in sets that are mostly constant. We use two different types of
  63 * lookup tables to take advantage of this.
  64 *
  65 * Both lookup tables share in common that they don't index every key in the
  66 * set; they index one key every BSET_CACHELINE bytes, and then a linear search
  67 * is used for the rest.
  68 *
  69 * For sets that have been written to disk and are no longer being inserted
  70 * into, we construct a binary search tree in an array - traversing a binary
  71 * search tree in an array gives excellent locality of reference and is very
  72 * fast, since both children of any node are adjacent to each other in memory
  73 * (and their grandchildren, and great grandchildren...) - this means
  74 * prefetching can be used to great effect.
  75 *
  76 * It's quite useful performance wise to keep these nodes small - not just
  77 * because they're more likely to be in L2, but also because we can prefetch
  78 * more nodes on a single cacheline and thus prefetch more iterations in advance
  79 * when traversing this tree.
  80 *
  81 * Nodes in the auxiliary search tree must contain both a key to compare against
  82 * (we don't want to fetch the key from the set, that would defeat the purpose),
  83 * and a pointer to the key. We use a few tricks to compress both of these.
  84 *
  85 * To compress the pointer, we take advantage of the fact that one node in the
  86 * search tree corresponds to precisely BSET_CACHELINE bytes in the set. We have
  87 * a function (to_inorder()) that takes the index of a node in a binary tree and
  88 * returns what its index would be in an inorder traversal, so we only have to
  89 * store the low bits of the offset.
  90 *
  91 * The key is 84 bits (KEY_DEV + key->key, the offset on the device). To
  92 * compress that,  we take advantage of the fact that when we're traversing the
  93 * search tree at every iteration we know that both our search key and the key
  94 * we're looking for lie within some range - bounded by our previous
  95 * comparisons. (We special case the start of a search so that this is true even
  96 * at the root of the tree).
  97 *
  98 * So we know the key we're looking for is between a and b, and a and b don't
  99 * differ higher than bit 50, we don't need to check anything higher than bit
 100 * 50.
 101 *
 102 * We don't usually need the rest of the bits, either; we only need enough bits
 103 * to partition the key range we're currently checking.  Consider key n - the
 104 * key our auxiliary search tree node corresponds to, and key p, the key
 105 * immediately preceding n.  The lowest bit we need to store in the auxiliary
 106 * search tree is the highest bit that differs between n and p.
 107 *
 108 * Note that this could be bit 0 - we might sometimes need all 80 bits to do the
 109 * comparison. But we'd really like our nodes in the auxiliary search tree to be
 110 * of fixed size.
 111 *
 112 * The solution is to make them fixed size, and when we're constructing a node
 113 * check if p and n differed in the bits we needed them to. If they don't we
 114 * flag that node, and when doing lookups we fallback to comparing against the
 115 * real key. As long as this doesn't happen to often (and it seems to reliably
 116 * happen a bit less than 1% of the time), we win - even on failures, that key
 117 * is then more likely to be in cache than if we were doing binary searches all
 118 * the way, since we're touching so much less memory.
 119 *
 120 * The keys in the auxiliary search tree are stored in (software) floating
 121 * point, with an exponent and a mantissa. The exponent needs to be big enough
 122 * to address all the bits in the original key, but the number of bits in the
 123 * mantissa is somewhat arbitrary; more bits just gets us fewer failures.
 124 *
 125 * We need 7 bits for the exponent and 3 bits for the key's offset (since keys
 126 * are 8 byte aligned); using 22 bits for the mantissa means a node is 4 bytes.
 127 * We need one node per 128 bytes in the btree node, which means the auxiliary
 128 * search trees take up 3% as much memory as the btree itself.
 129 *
 130 * Constructing these auxiliary search trees is moderately expensive, and we
 131 * don't want to be constantly rebuilding the search tree for the last set
 132 * whenever we insert another key into it. For the unwritten set, we use a much
 133 * simpler lookup table - it's just a flat array, so index i in the lookup table
 134 * corresponds to the i range of BSET_CACHELINE bytes in the set. Indexing
 135 * within each byte range works the same as with the auxiliary search trees.
 136 *
 137 * These are much easier to keep up to date when we insert a key - we do it
 138 * somewhat lazily; when we shift a key up we usually just increment the pointer
 139 * to it, only when it would overflow do we go to the trouble of finding the
 140 * first key in that range of bytes again.
 141 */
 142
 143/* Btree key comparison/iteration */
 144
 145struct btree_iter {
 146        size_t size, used;
 147        struct btree_iter_set {
 148                struct bkey *k, *end;
 149        } data[MAX_BSETS];
 150};
 151
 152struct bset_tree {
 153        /*
 154         * We construct a binary tree in an array as if the array
 155         * started at 1, so that things line up on the same cachelines
 156         * better: see comments in bset.c at cacheline_to_bkey() for
 157         * details
 158         */
 159
 160        /* size of the binary tree and prev array */
 161        unsigned        size;
 162
 163        /* function of size - precalculated for to_inorder() */
 164        unsigned        extra;
 165
 166        /* copy of the last key in the set */
 167        struct bkey     end;
 168        struct bkey_float *tree;
 169
 170        /*
 171         * The nodes in the bset tree point to specific keys - this
 172         * array holds the sizes of the previous key.
 173         *
 174         * Conceptually it's a member of struct bkey_float, but we want
 175         * to keep bkey_float to 4 bytes and prev isn't used in the fast
 176         * path.
 177         */
 178        uint8_t         *prev;
 179
 180        /* The actual btree node, with pointers to each sorted set */
 181        struct bset     *data;
 182};
 183
 184static __always_inline int64_t bkey_cmp(const struct bkey *l,
 185                                        const struct bkey *r)
 186{
 187        return unlikely(KEY_INODE(l) != KEY_INODE(r))
 188                ? (int64_t) KEY_INODE(l) - (int64_t) KEY_INODE(r)
 189                : (int64_t) KEY_OFFSET(l) - (int64_t) KEY_OFFSET(r);
 190}
 191
 192static inline size_t bkey_u64s(const struct bkey *k)
 193{
 194        BUG_ON(KEY_CSUM(k) > 1);
 195        return 2 + KEY_PTRS(k) + (KEY_CSUM(k) ? 1 : 0);
 196}
 197
 198static inline size_t bkey_bytes(const struct bkey *k)
 199{
 200        return bkey_u64s(k) * sizeof(uint64_t);
 201}
 202
 203static inline void bkey_copy(struct bkey *dest, const struct bkey *src)
 204{
 205        memcpy(dest, src, bkey_bytes(src));
 206}
 207
 208static inline void bkey_copy_key(struct bkey *dest, const struct bkey *src)
 209{
 210        if (!src)
 211                src = &KEY(0, 0, 0);
 212
 213        SET_KEY_INODE(dest, KEY_INODE(src));
 214        SET_KEY_OFFSET(dest, KEY_OFFSET(src));
 215}
 216
 217static inline struct bkey *bkey_next(const struct bkey *k)
 218{
 219        uint64_t *d = (void *) k;
 220        return (struct bkey *) (d + bkey_u64s(k));
 221}
 222
 223/* Keylists */
 224
 225struct keylist {
 226        struct bkey             *top;
 227        union {
 228                uint64_t                *list;
 229                struct bkey             *bottom;
 230        };
 231
 232        /* Enough room for btree_split's keys without realloc */
 233#define KEYLIST_INLINE          16
 234        uint64_t                d[KEYLIST_INLINE];
 235};
 236
 237static inline void bch_keylist_init(struct keylist *l)
 238{
 239        l->top = (void *) (l->list = l->d);
 240}
 241
 242static inline void bch_keylist_push(struct keylist *l)
 243{
 244        l->top = bkey_next(l->top);
 245}
 246
 247static inline void bch_keylist_add(struct keylist *l, struct bkey *k)
 248{
 249        bkey_copy(l->top, k);
 250        bch_keylist_push(l);
 251}
 252
 253static inline bool bch_keylist_empty(struct keylist *l)
 254{
 255        return l->top == (void *) l->list;
 256}
 257
 258static inline void bch_keylist_free(struct keylist *l)
 259{
 260        if (l->list != l->d)
 261                kfree(l->list);
 262}
 263
 264void bch_keylist_copy(struct keylist *, struct keylist *);
 265struct bkey *bch_keylist_pop(struct keylist *);
 266int bch_keylist_realloc(struct keylist *, int, struct cache_set *);
 267
 268void bch_bkey_copy_single_ptr(struct bkey *, const struct bkey *,
 269                              unsigned);
 270bool __bch_cut_front(const struct bkey *, struct bkey *);
 271bool __bch_cut_back(const struct bkey *, struct bkey *);
 272
 273static inline bool bch_cut_front(const struct bkey *where, struct bkey *k)
 274{
 275        BUG_ON(bkey_cmp(where, k) > 0);
 276        return __bch_cut_front(where, k);
 277}
 278
 279static inline bool bch_cut_back(const struct bkey *where, struct bkey *k)
 280{
 281        BUG_ON(bkey_cmp(where, &START_KEY(k)) < 0);
 282        return __bch_cut_back(where, k);
 283}
 284
 285const char *bch_ptr_status(struct cache_set *, const struct bkey *);
 286bool __bch_ptr_invalid(struct cache_set *, int level, const struct bkey *);
 287bool bch_ptr_bad(struct btree *, const struct bkey *);
 288
 289static inline uint8_t gen_after(uint8_t a, uint8_t b)
 290{
 291        uint8_t r = a - b;
 292        return r > 128U ? 0 : r;
 293}
 294
 295static inline uint8_t ptr_stale(struct cache_set *c, const struct bkey *k,
 296                                unsigned i)
 297{
 298        return gen_after(PTR_BUCKET(c, k, i)->gen, PTR_GEN(k, i));
 299}
 300
 301static inline bool ptr_available(struct cache_set *c, const struct bkey *k,
 302                                 unsigned i)
 303{
 304        return (PTR_DEV(k, i) < MAX_CACHES_PER_SET) && PTR_CACHE(c, k, i);
 305}
 306
 307
 308typedef bool (*ptr_filter_fn)(struct btree *, const struct bkey *);
 309
 310struct bkey *bch_next_recurse_key(struct btree *, struct bkey *);
 311struct bkey *bch_btree_iter_next(struct btree_iter *);
 312struct bkey *bch_btree_iter_next_filter(struct btree_iter *,
 313                                        struct btree *, ptr_filter_fn);
 314
 315void bch_btree_iter_push(struct btree_iter *, struct bkey *, struct bkey *);
 316struct bkey *__bch_btree_iter_init(struct btree *, struct btree_iter *,
 317                                   struct bkey *, struct bset_tree *);
 318
 319/* 32 bits total: */
 320#define BKEY_MID_BITS           3
 321#define BKEY_EXPONENT_BITS      7
 322#define BKEY_MANTISSA_BITS      22
 323#define BKEY_MANTISSA_MASK      ((1 << BKEY_MANTISSA_BITS) - 1)
 324
 325struct bkey_float {
 326        unsigned        exponent:BKEY_EXPONENT_BITS;
 327        unsigned        m:BKEY_MID_BITS;
 328        unsigned        mantissa:BKEY_MANTISSA_BITS;
 329} __packed;
 330
 331/*
 332 * BSET_CACHELINE was originally intended to match the hardware cacheline size -
 333 * it used to be 64, but I realized the lookup code would touch slightly less
 334 * memory if it was 128.
 335 *
 336 * It definites the number of bytes (in struct bset) per struct bkey_float in
 337 * the auxiliar search tree - when we're done searching the bset_float tree we
 338 * have this many bytes left that we do a linear search over.
 339 *
 340 * Since (after level 5) every level of the bset_tree is on a new cacheline,
 341 * we're touching one fewer cacheline in the bset tree in exchange for one more
 342 * cacheline in the linear search - but the linear search might stop before it
 343 * gets to the second cacheline.
 344 */
 345
 346#define BSET_CACHELINE          128
 347#define bset_tree_space(b)      (btree_data_space(b) / BSET_CACHELINE)
 348
 349#define bset_tree_bytes(b)      (bset_tree_space(b) * sizeof(struct bkey_float))
 350#define bset_prev_bytes(b)      (bset_tree_space(b) * sizeof(uint8_t))
 351
 352void bch_bset_init_next(struct btree *);
 353
 354void bch_bset_fix_invalidated_key(struct btree *, struct bkey *);
 355void bch_bset_fix_lookup_table(struct btree *, struct bkey *);
 356
 357struct bkey *__bch_bset_search(struct btree *, struct bset_tree *,
 358                           const struct bkey *);
 359
 360static inline struct bkey *bch_bset_search(struct btree *b, struct bset_tree *t,
 361                                           const struct bkey *search)
 362{
 363        return search ? __bch_bset_search(b, t, search) : t->data->start;
 364}
 365
 366bool bch_bkey_try_merge(struct btree *, struct bkey *, struct bkey *);
 367void bch_btree_sort_lazy(struct btree *);
 368void bch_btree_sort_into(struct btree *, struct btree *);
 369void bch_btree_sort_and_fix_extents(struct btree *, struct btree_iter *);
 370void bch_btree_sort_partial(struct btree *, unsigned);
 371
 372static inline void bch_btree_sort(struct btree *b)
 373{
 374        bch_btree_sort_partial(b, 0);
 375}
 376
 377int bch_bset_print_stats(struct cache_set *, char *);
 378
 379#endif
 380