linux/drivers/md/bcache/bcache.h
<<
>>
Prefs
   1#ifndef _BCACHE_H
   2#define _BCACHE_H
   3
   4/*
   5 * SOME HIGH LEVEL CODE DOCUMENTATION:
   6 *
   7 * Bcache mostly works with cache sets, cache devices, and backing devices.
   8 *
   9 * Support for multiple cache devices hasn't quite been finished off yet, but
  10 * it's about 95% plumbed through. A cache set and its cache devices is sort of
  11 * like a md raid array and its component devices. Most of the code doesn't care
  12 * about individual cache devices, the main abstraction is the cache set.
  13 *
  14 * Multiple cache devices is intended to give us the ability to mirror dirty
  15 * cached data and metadata, without mirroring clean cached data.
  16 *
  17 * Backing devices are different, in that they have a lifetime independent of a
  18 * cache set. When you register a newly formatted backing device it'll come up
  19 * in passthrough mode, and then you can attach and detach a backing device from
  20 * a cache set at runtime - while it's mounted and in use. Detaching implicitly
  21 * invalidates any cached data for that backing device.
  22 *
  23 * A cache set can have multiple (many) backing devices attached to it.
  24 *
  25 * There's also flash only volumes - this is the reason for the distinction
  26 * between struct cached_dev and struct bcache_device. A flash only volume
  27 * works much like a bcache device that has a backing device, except the
  28 * "cached" data is always dirty. The end result is that we get thin
  29 * provisioning with very little additional code.
  30 *
  31 * Flash only volumes work but they're not production ready because the moving
  32 * garbage collector needs more work. More on that later.
  33 *
  34 * BUCKETS/ALLOCATION:
  35 *
  36 * Bcache is primarily designed for caching, which means that in normal
  37 * operation all of our available space will be allocated. Thus, we need an
  38 * efficient way of deleting things from the cache so we can write new things to
  39 * it.
  40 *
  41 * To do this, we first divide the cache device up into buckets. A bucket is the
  42 * unit of allocation; they're typically around 1 mb - anywhere from 128k to 2M+
  43 * works efficiently.
  44 *
  45 * Each bucket has a 16 bit priority, and an 8 bit generation associated with
  46 * it. The gens and priorities for all the buckets are stored contiguously and
  47 * packed on disk (in a linked list of buckets - aside from the superblock, all
  48 * of bcache's metadata is stored in buckets).
  49 *
  50 * The priority is used to implement an LRU. We reset a bucket's priority when
  51 * we allocate it or on cache it, and every so often we decrement the priority
  52 * of each bucket. It could be used to implement something more sophisticated,
  53 * if anyone ever gets around to it.
  54 *
  55 * The generation is used for invalidating buckets. Each pointer also has an 8
  56 * bit generation embedded in it; for a pointer to be considered valid, its gen
  57 * must match the gen of the bucket it points into.  Thus, to reuse a bucket all
  58 * we have to do is increment its gen (and write its new gen to disk; we batch
  59 * this up).
  60 *
  61 * Bcache is entirely COW - we never write twice to a bucket, even buckets that
  62 * contain metadata (including btree nodes).
  63 *
  64 * THE BTREE:
  65 *
  66 * Bcache is in large part design around the btree.
  67 *
  68 * At a high level, the btree is just an index of key -> ptr tuples.
  69 *
  70 * Keys represent extents, and thus have a size field. Keys also have a variable
  71 * number of pointers attached to them (potentially zero, which is handy for
  72 * invalidating the cache).
  73 *
  74 * The key itself is an inode:offset pair. The inode number corresponds to a
  75 * backing device or a flash only volume. The offset is the ending offset of the
  76 * extent within the inode - not the starting offset; this makes lookups
  77 * slightly more convenient.
  78 *
  79 * Pointers contain the cache device id, the offset on that device, and an 8 bit
  80 * generation number. More on the gen later.
  81 *
  82 * Index lookups are not fully abstracted - cache lookups in particular are
  83 * still somewhat mixed in with the btree code, but things are headed in that
  84 * direction.
  85 *
  86 * Updates are fairly well abstracted, though. There are two different ways of
  87 * updating the btree; insert and replace.
  88 *
  89 * BTREE_INSERT will just take a list of keys and insert them into the btree -
  90 * overwriting (possibly only partially) any extents they overlap with. This is
  91 * used to update the index after a write.
  92 *
  93 * BTREE_REPLACE is really cmpxchg(); it inserts a key into the btree iff it is
  94 * overwriting a key that matches another given key. This is used for inserting
  95 * data into the cache after a cache miss, and for background writeback, and for
  96 * the moving garbage collector.
  97 *
  98 * There is no "delete" operation; deleting things from the index is
  99 * accomplished by either by invalidating pointers (by incrementing a bucket's
 100 * gen) or by inserting a key with 0 pointers - which will overwrite anything
 101 * previously present at that location in the index.
 102 *
 103 * This means that there are always stale/invalid keys in the btree. They're
 104 * filtered out by the code that iterates through a btree node, and removed when
 105 * a btree node is rewritten.
 106 *
 107 * BTREE NODES:
 108 *
 109 * Our unit of allocation is a bucket, and we we can't arbitrarily allocate and
 110 * free smaller than a bucket - so, that's how big our btree nodes are.
 111 *
 112 * (If buckets are really big we'll only use part of the bucket for a btree node
 113 * - no less than 1/4th - but a bucket still contains no more than a single
 114 * btree node. I'd actually like to change this, but for now we rely on the
 115 * bucket's gen for deleting btree nodes when we rewrite/split a node.)
 116 *
 117 * Anyways, btree nodes are big - big enough to be inefficient with a textbook
 118 * btree implementation.
 119 *
 120 * The way this is solved is that btree nodes are internally log structured; we
 121 * can append new keys to an existing btree node without rewriting it. This
 122 * means each set of keys we write is sorted, but the node is not.
 123 *
 124 * We maintain this log structure in memory - keeping 1Mb of keys sorted would
 125 * be expensive, and we have to distinguish between the keys we have written and
 126 * the keys we haven't. So to do a lookup in a btree node, we have to search
 127 * each sorted set. But we do merge written sets together lazily, so the cost of
 128 * these extra searches is quite low (normally most of the keys in a btree node
 129 * will be in one big set, and then there'll be one or two sets that are much
 130 * smaller).
 131 *
 132 * This log structure makes bcache's btree more of a hybrid between a
 133 * conventional btree and a compacting data structure, with some of the
 134 * advantages of both.
 135 *
 136 * GARBAGE COLLECTION:
 137 *
 138 * We can't just invalidate any bucket - it might contain dirty data or
 139 * metadata. If it once contained dirty data, other writes might overwrite it
 140 * later, leaving no valid pointers into that bucket in the index.
 141 *
 142 * Thus, the primary purpose of garbage collection is to find buckets to reuse.
 143 * It also counts how much valid data it each bucket currently contains, so that
 144 * allocation can reuse buckets sooner when they've been mostly overwritten.
 145 *
 146 * It also does some things that are really internal to the btree
 147 * implementation. If a btree node contains pointers that are stale by more than
 148 * some threshold, it rewrites the btree node to avoid the bucket's generation
 149 * wrapping around. It also merges adjacent btree nodes if they're empty enough.
 150 *
 151 * THE JOURNAL:
 152 *
 153 * Bcache's journal is not necessary for consistency; we always strictly
 154 * order metadata writes so that the btree and everything else is consistent on
 155 * disk in the event of an unclean shutdown, and in fact bcache had writeback
 156 * caching (with recovery from unclean shutdown) before journalling was
 157 * implemented.
 158 *
 159 * Rather, the journal is purely a performance optimization; we can't complete a
 160 * write until we've updated the index on disk, otherwise the cache would be
 161 * inconsistent in the event of an unclean shutdown. This means that without the
 162 * journal, on random write workloads we constantly have to update all the leaf
 163 * nodes in the btree, and those writes will be mostly empty (appending at most
 164 * a few keys each) - highly inefficient in terms of amount of metadata writes,
 165 * and it puts more strain on the various btree resorting/compacting code.
 166 *
 167 * The journal is just a log of keys we've inserted; on startup we just reinsert
 168 * all the keys in the open journal entries. That means that when we're updating
 169 * a node in the btree, we can wait until a 4k block of keys fills up before
 170 * writing them out.
 171 *
 172 * For simplicity, we only journal updates to leaf nodes; updates to parent
 173 * nodes are rare enough (since our leaf nodes are huge) that it wasn't worth
 174 * the complexity to deal with journalling them (in particular, journal replay)
 175 * - updates to non leaf nodes just happen synchronously (see btree_split()).
 176 */
 177
 178#define pr_fmt(fmt) "bcache: %s() " fmt "\n", __func__
 179
 180#include <linux/bcache.h>
 181#include <linux/bio.h>
 182#include <linux/kobject.h>
 183#include <linux/list.h>
 184#include <linux/mutex.h>
 185#include <linux/rbtree.h>
 186#include <linux/rwsem.h>
 187#include <linux/types.h>
 188#include <linux/workqueue.h>
 189
 190#include "bset.h"
 191#include "util.h"
 192#include "closure.h"
 193
 194struct bucket {
 195        atomic_t        pin;
 196        uint16_t        prio;
 197        uint8_t         gen;
 198        uint8_t         last_gc; /* Most out of date gen in the btree */
 199        uint16_t        gc_mark; /* Bitfield used by GC. See below for field */
 200};
 201
 202/*
 203 * I'd use bitfields for these, but I don't trust the compiler not to screw me
 204 * as multiple threads touch struct bucket without locking
 205 */
 206
 207BITMASK(GC_MARK,         struct bucket, gc_mark, 0, 2);
 208#define GC_MARK_RECLAIMABLE     1
 209#define GC_MARK_DIRTY           2
 210#define GC_MARK_METADATA        3
 211#define GC_SECTORS_USED_SIZE    13
 212#define MAX_GC_SECTORS_USED     (~(~0ULL << GC_SECTORS_USED_SIZE))
 213BITMASK(GC_SECTORS_USED, struct bucket, gc_mark, 2, GC_SECTORS_USED_SIZE);
 214BITMASK(GC_MOVE, struct bucket, gc_mark, 15, 1);
 215
 216#include "journal.h"
 217#include "stats.h"
 218struct search;
 219struct btree;
 220struct keybuf;
 221
 222struct keybuf_key {
 223        struct rb_node          node;
 224        BKEY_PADDED(key);
 225        void                    *private;
 226};
 227
 228struct keybuf {
 229        struct bkey             last_scanned;
 230        spinlock_t              lock;
 231
 232        /*
 233         * Beginning and end of range in rb tree - so that we can skip taking
 234         * lock and checking the rb tree when we need to check for overlapping
 235         * keys.
 236         */
 237        struct bkey             start;
 238        struct bkey             end;
 239
 240        struct rb_root          keys;
 241
 242#define KEYBUF_NR               500
 243        DECLARE_ARRAY_ALLOCATOR(struct keybuf_key, freelist, KEYBUF_NR);
 244};
 245
 246struct bcache_device {
 247        struct closure          cl;
 248
 249        struct kobject          kobj;
 250
 251        struct cache_set        *c;
 252        unsigned                id;
 253#define BCACHEDEVNAME_SIZE      12
 254        char                    name[BCACHEDEVNAME_SIZE];
 255
 256        struct gendisk          *disk;
 257
 258        unsigned long           flags;
 259#define BCACHE_DEV_CLOSING      0
 260#define BCACHE_DEV_DETACHING    1
 261#define BCACHE_DEV_UNLINK_DONE  2
 262
 263        unsigned                nr_stripes;
 264        unsigned                stripe_size;
 265        atomic_t                *stripe_sectors_dirty;
 266        unsigned long           *full_dirty_stripes;
 267
 268        unsigned long           sectors_dirty_last;
 269        long                    sectors_dirty_derivative;
 270
 271        struct bio_set          *bio_split;
 272
 273        unsigned                data_csum:1;
 274
 275        int (*cache_miss)(struct btree *, struct search *,
 276                          struct bio *, unsigned);
 277        int (*ioctl) (struct bcache_device *, fmode_t, unsigned, unsigned long);
 278};
 279
 280struct io {
 281        /* Used to track sequential IO so it can be skipped */
 282        struct hlist_node       hash;
 283        struct list_head        lru;
 284
 285        unsigned long           jiffies;
 286        unsigned                sequential;
 287        sector_t                last;
 288};
 289
 290struct cached_dev {
 291        struct list_head        list;
 292        struct bcache_device    disk;
 293        struct block_device     *bdev;
 294
 295        struct cache_sb         sb;
 296        struct bio              sb_bio;
 297        struct bio_vec          sb_bv[1];
 298        struct closure          sb_write;
 299        struct semaphore        sb_write_mutex;
 300
 301        /* Refcount on the cache set. Always nonzero when we're caching. */
 302        atomic_t                count;
 303        struct work_struct      detach;
 304
 305        /*
 306         * Device might not be running if it's dirty and the cache set hasn't
 307         * showed up yet.
 308         */
 309        atomic_t                running;
 310
 311        /*
 312         * Writes take a shared lock from start to finish; scanning for dirty
 313         * data to refill the rb tree requires an exclusive lock.
 314         */
 315        struct rw_semaphore     writeback_lock;
 316
 317        /*
 318         * Nonzero, and writeback has a refcount (d->count), iff there is dirty
 319         * data in the cache. Protected by writeback_lock; must have an
 320         * shared lock to set and exclusive lock to clear.
 321         */
 322        atomic_t                has_dirty;
 323
 324        struct bch_ratelimit    writeback_rate;
 325        struct delayed_work     writeback_rate_update;
 326
 327        /*
 328         * Internal to the writeback code, so read_dirty() can keep track of
 329         * where it's at.
 330         */
 331        sector_t                last_read;
 332
 333        /* Limit number of writeback bios in flight */
 334        struct semaphore        in_flight;
 335        struct task_struct      *writeback_thread;
 336
 337        struct keybuf           writeback_keys;
 338
 339        /* For tracking sequential IO */
 340#define RECENT_IO_BITS  7
 341#define RECENT_IO       (1 << RECENT_IO_BITS)
 342        struct io               io[RECENT_IO];
 343        struct hlist_head       io_hash[RECENT_IO + 1];
 344        struct list_head        io_lru;
 345        spinlock_t              io_lock;
 346
 347        struct cache_accounting accounting;
 348
 349        /* The rest of this all shows up in sysfs */
 350        unsigned                sequential_cutoff;
 351        unsigned                readahead;
 352
 353        unsigned                verify:1;
 354        unsigned                bypass_torture_test:1;
 355
 356        unsigned                partial_stripes_expensive:1;
 357        unsigned                writeback_metadata:1;
 358        unsigned                writeback_running:1;
 359        unsigned char           writeback_percent;
 360        unsigned                writeback_delay;
 361
 362        uint64_t                writeback_rate_target;
 363        int64_t                 writeback_rate_proportional;
 364        int64_t                 writeback_rate_derivative;
 365        int64_t                 writeback_rate_change;
 366
 367        unsigned                writeback_rate_update_seconds;
 368        unsigned                writeback_rate_d_term;
 369        unsigned                writeback_rate_p_term_inverse;
 370};
 371
 372enum alloc_reserve {
 373        RESERVE_BTREE,
 374        RESERVE_PRIO,
 375        RESERVE_MOVINGGC,
 376        RESERVE_NONE,
 377        RESERVE_NR,
 378};
 379
 380struct cache {
 381        struct cache_set        *set;
 382        struct cache_sb         sb;
 383        struct bio              sb_bio;
 384        struct bio_vec          sb_bv[1];
 385
 386        struct kobject          kobj;
 387        struct block_device     *bdev;
 388
 389        struct task_struct      *alloc_thread;
 390
 391        struct closure          prio;
 392        struct prio_set         *disk_buckets;
 393
 394        /*
 395         * When allocating new buckets, prio_write() gets first dibs - since we
 396         * may not be allocate at all without writing priorities and gens.
 397         * prio_buckets[] contains the last buckets we wrote priorities to (so
 398         * gc can mark them as metadata), prio_next[] contains the buckets
 399         * allocated for the next prio write.
 400         */
 401        uint64_t                *prio_buckets;
 402        uint64_t                *prio_last_buckets;
 403
 404        /*
 405         * free: Buckets that are ready to be used
 406         *
 407         * free_inc: Incoming buckets - these are buckets that currently have
 408         * cached data in them, and we can't reuse them until after we write
 409         * their new gen to disk. After prio_write() finishes writing the new
 410         * gens/prios, they'll be moved to the free list (and possibly discarded
 411         * in the process)
 412         */
 413        DECLARE_FIFO(long, free)[RESERVE_NR];
 414        DECLARE_FIFO(long, free_inc);
 415
 416        size_t                  fifo_last_bucket;
 417
 418        /* Allocation stuff: */
 419        struct bucket           *buckets;
 420
 421        DECLARE_HEAP(struct bucket *, heap);
 422
 423        /*
 424         * If nonzero, we know we aren't going to find any buckets to invalidate
 425         * until a gc finishes - otherwise we could pointlessly burn a ton of
 426         * cpu
 427         */
 428        unsigned                invalidate_needs_gc:1;
 429
 430        bool                    discard; /* Get rid of? */
 431
 432        struct journal_device   journal;
 433
 434        /* The rest of this all shows up in sysfs */
 435#define IO_ERROR_SHIFT          20
 436        atomic_t                io_errors;
 437        atomic_t                io_count;
 438
 439        atomic_long_t           meta_sectors_written;
 440        atomic_long_t           btree_sectors_written;
 441        atomic_long_t           sectors_written;
 442};
 443
 444struct gc_stat {
 445        size_t                  nodes;
 446        size_t                  key_bytes;
 447
 448        size_t                  nkeys;
 449        uint64_t                data;   /* sectors */
 450        unsigned                in_use; /* percent */
 451};
 452
 453/*
 454 * Flag bits, for how the cache set is shutting down, and what phase it's at:
 455 *
 456 * CACHE_SET_UNREGISTERING means we're not just shutting down, we're detaching
 457 * all the backing devices first (their cached data gets invalidated, and they
 458 * won't automatically reattach).
 459 *
 460 * CACHE_SET_STOPPING always gets set first when we're closing down a cache set;
 461 * we'll continue to run normally for awhile with CACHE_SET_STOPPING set (i.e.
 462 * flushing dirty data).
 463 *
 464 * CACHE_SET_RUNNING means all cache devices have been registered and journal
 465 * replay is complete.
 466 */
 467#define CACHE_SET_UNREGISTERING         0
 468#define CACHE_SET_STOPPING              1
 469#define CACHE_SET_RUNNING               2
 470
 471struct cache_set {
 472        struct closure          cl;
 473
 474        struct list_head        list;
 475        struct kobject          kobj;
 476        struct kobject          internal;
 477        struct dentry           *debug;
 478        struct cache_accounting accounting;
 479
 480        unsigned long           flags;
 481
 482        struct cache_sb         sb;
 483
 484        struct cache            *cache[MAX_CACHES_PER_SET];
 485        struct cache            *cache_by_alloc[MAX_CACHES_PER_SET];
 486        int                     caches_loaded;
 487
 488        struct bcache_device    **devices;
 489        struct list_head        cached_devs;
 490        uint64_t                cached_dev_sectors;
 491        struct closure          caching;
 492
 493        struct closure          sb_write;
 494        struct semaphore        sb_write_mutex;
 495
 496        mempool_t               *search;
 497        mempool_t               *bio_meta;
 498        struct bio_set          *bio_split;
 499
 500        /* For the btree cache */
 501        struct shrinker         shrink;
 502
 503        /* For the btree cache and anything allocation related */
 504        struct mutex            bucket_lock;
 505
 506        /* log2(bucket_size), in sectors */
 507        unsigned short          bucket_bits;
 508
 509        /* log2(block_size), in sectors */
 510        unsigned short          block_bits;
 511
 512        /*
 513         * Default number of pages for a new btree node - may be less than a
 514         * full bucket
 515         */
 516        unsigned                btree_pages;
 517
 518        /*
 519         * Lists of struct btrees; lru is the list for structs that have memory
 520         * allocated for actual btree node, freed is for structs that do not.
 521         *
 522         * We never free a struct btree, except on shutdown - we just put it on
 523         * the btree_cache_freed list and reuse it later. This simplifies the
 524         * code, and it doesn't cost us much memory as the memory usage is
 525         * dominated by buffers that hold the actual btree node data and those
 526         * can be freed - and the number of struct btrees allocated is
 527         * effectively bounded.
 528         *
 529         * btree_cache_freeable effectively is a small cache - we use it because
 530         * high order page allocations can be rather expensive, and it's quite
 531         * common to delete and allocate btree nodes in quick succession. It
 532         * should never grow past ~2-3 nodes in practice.
 533         */
 534        struct list_head        btree_cache;
 535        struct list_head        btree_cache_freeable;
 536        struct list_head        btree_cache_freed;
 537
 538        /* Number of elements in btree_cache + btree_cache_freeable lists */
 539        unsigned                btree_cache_used;
 540
 541        /*
 542         * If we need to allocate memory for a new btree node and that
 543         * allocation fails, we can cannibalize another node in the btree cache
 544         * to satisfy the allocation - lock to guarantee only one thread does
 545         * this at a time:
 546         */
 547        wait_queue_head_t       btree_cache_wait;
 548        struct task_struct      *btree_cache_alloc_lock;
 549
 550        /*
 551         * When we free a btree node, we increment the gen of the bucket the
 552         * node is in - but we can't rewrite the prios and gens until we
 553         * finished whatever it is we were doing, otherwise after a crash the
 554         * btree node would be freed but for say a split, we might not have the
 555         * pointers to the new nodes inserted into the btree yet.
 556         *
 557         * This is a refcount that blocks prio_write() until the new keys are
 558         * written.
 559         */
 560        atomic_t                prio_blocked;
 561        wait_queue_head_t       bucket_wait;
 562
 563        /*
 564         * For any bio we don't skip we subtract the number of sectors from
 565         * rescale; when it hits 0 we rescale all the bucket priorities.
 566         */
 567        atomic_t                rescale;
 568        /*
 569         * When we invalidate buckets, we use both the priority and the amount
 570         * of good data to determine which buckets to reuse first - to weight
 571         * those together consistently we keep track of the smallest nonzero
 572         * priority of any bucket.
 573         */
 574        uint16_t                min_prio;
 575
 576        /*
 577         * max(gen - last_gc) for all buckets. When it gets too big we have to gc
 578         * to keep gens from wrapping around.
 579         */
 580        uint8_t                 need_gc;
 581        struct gc_stat          gc_stats;
 582        size_t                  nbuckets;
 583
 584        struct task_struct      *gc_thread;
 585        /* Where in the btree gc currently is */
 586        struct bkey             gc_done;
 587
 588        /*
 589         * The allocation code needs gc_mark in struct bucket to be correct, but
 590         * it's not while a gc is in progress. Protected by bucket_lock.
 591         */
 592        int                     gc_mark_valid;
 593
 594        /* Counts how many sectors bio_insert has added to the cache */
 595        atomic_t                sectors_to_gc;
 596
 597        wait_queue_head_t       moving_gc_wait;
 598        struct keybuf           moving_gc_keys;
 599        /* Number of moving GC bios in flight */
 600        struct semaphore        moving_in_flight;
 601
 602        struct workqueue_struct *moving_gc_wq;
 603
 604        struct btree            *root;
 605
 606#ifdef CONFIG_BCACHE_DEBUG
 607        struct btree            *verify_data;
 608        struct bset             *verify_ondisk;
 609        struct mutex            verify_lock;
 610#endif
 611
 612        unsigned                nr_uuids;
 613        struct uuid_entry       *uuids;
 614        BKEY_PADDED(uuid_bucket);
 615        struct closure          uuid_write;
 616        struct semaphore        uuid_write_mutex;
 617
 618        /*
 619         * A btree node on disk could have too many bsets for an iterator to fit
 620         * on the stack - have to dynamically allocate them
 621         */
 622        mempool_t               *fill_iter;
 623
 624        struct bset_sort_state  sort;
 625
 626        /* List of buckets we're currently writing data to */
 627        struct list_head        data_buckets;
 628        spinlock_t              data_bucket_lock;
 629
 630        struct journal          journal;
 631
 632#define CONGESTED_MAX           1024
 633        unsigned                congested_last_us;
 634        atomic_t                congested;
 635
 636        /* The rest of this all shows up in sysfs */
 637        unsigned                congested_read_threshold_us;
 638        unsigned                congested_write_threshold_us;
 639
 640        struct time_stats       btree_gc_time;
 641        struct time_stats       btree_split_time;
 642        struct time_stats       btree_read_time;
 643
 644        atomic_long_t           cache_read_races;
 645        atomic_long_t           writeback_keys_done;
 646        atomic_long_t           writeback_keys_failed;
 647
 648        enum                    {
 649                ON_ERROR_UNREGISTER,
 650                ON_ERROR_PANIC,
 651        }                       on_error;
 652        unsigned                error_limit;
 653        unsigned                error_decay;
 654
 655        unsigned short          journal_delay_ms;
 656        bool                    expensive_debug_checks;
 657        unsigned                verify:1;
 658        unsigned                key_merging_disabled:1;
 659        unsigned                gc_always_rewrite:1;
 660        unsigned                shrinker_disabled:1;
 661        unsigned                copy_gc_enabled:1;
 662
 663#define BUCKET_HASH_BITS        12
 664        struct hlist_head       bucket_hash[1 << BUCKET_HASH_BITS];
 665};
 666
 667struct bbio {
 668        unsigned                submit_time_us;
 669        union {
 670                struct bkey     key;
 671                uint64_t        _pad[3];
 672                /*
 673                 * We only need pad = 3 here because we only ever carry around a
 674                 * single pointer - i.e. the pointer we're doing io to/from.
 675                 */
 676        };
 677        struct bio              bio;
 678};
 679
 680#define BTREE_PRIO              USHRT_MAX
 681#define INITIAL_PRIO            32768U
 682
 683#define btree_bytes(c)          ((c)->btree_pages * PAGE_SIZE)
 684#define btree_blocks(b)                                                 \
 685        ((unsigned) (KEY_SIZE(&b->key) >> (b)->c->block_bits))
 686
 687#define btree_default_blocks(c)                                         \
 688        ((unsigned) ((PAGE_SECTORS * (c)->btree_pages) >> (c)->block_bits))
 689
 690#define bucket_pages(c)         ((c)->sb.bucket_size / PAGE_SECTORS)
 691#define bucket_bytes(c)         ((c)->sb.bucket_size << 9)
 692#define block_bytes(c)          ((c)->sb.block_size << 9)
 693
 694#define prios_per_bucket(c)                             \
 695        ((bucket_bytes(c) - sizeof(struct prio_set)) /  \
 696         sizeof(struct bucket_disk))
 697#define prio_buckets(c)                                 \
 698        DIV_ROUND_UP((size_t) (c)->sb.nbuckets, prios_per_bucket(c))
 699
 700static inline size_t sector_to_bucket(struct cache_set *c, sector_t s)
 701{
 702        return s >> c->bucket_bits;
 703}
 704
 705static inline sector_t bucket_to_sector(struct cache_set *c, size_t b)
 706{
 707        return ((sector_t) b) << c->bucket_bits;
 708}
 709
 710static inline sector_t bucket_remainder(struct cache_set *c, sector_t s)
 711{
 712        return s & (c->sb.bucket_size - 1);
 713}
 714
 715static inline struct cache *PTR_CACHE(struct cache_set *c,
 716                                      const struct bkey *k,
 717                                      unsigned ptr)
 718{
 719        return c->cache[PTR_DEV(k, ptr)];
 720}
 721
 722static inline size_t PTR_BUCKET_NR(struct cache_set *c,
 723                                   const struct bkey *k,
 724                                   unsigned ptr)
 725{
 726        return sector_to_bucket(c, PTR_OFFSET(k, ptr));
 727}
 728
 729static inline struct bucket *PTR_BUCKET(struct cache_set *c,
 730                                        const struct bkey *k,
 731                                        unsigned ptr)
 732{
 733        return PTR_CACHE(c, k, ptr)->buckets + PTR_BUCKET_NR(c, k, ptr);
 734}
 735
 736static inline uint8_t gen_after(uint8_t a, uint8_t b)
 737{
 738        uint8_t r = a - b;
 739        return r > 128U ? 0 : r;
 740}
 741
 742static inline uint8_t ptr_stale(struct cache_set *c, const struct bkey *k,
 743                                unsigned i)
 744{
 745        return gen_after(PTR_BUCKET(c, k, i)->gen, PTR_GEN(k, i));
 746}
 747
 748static inline bool ptr_available(struct cache_set *c, const struct bkey *k,
 749                                 unsigned i)
 750{
 751        return (PTR_DEV(k, i) < MAX_CACHES_PER_SET) && PTR_CACHE(c, k, i);
 752}
 753
 754/* Btree key macros */
 755
 756/*
 757 * This is used for various on disk data structures - cache_sb, prio_set, bset,
 758 * jset: The checksum is _always_ the first 8 bytes of these structs
 759 */
 760#define csum_set(i)                                                     \
 761        bch_crc64(((void *) (i)) + sizeof(uint64_t),                    \
 762                  ((void *) bset_bkey_last(i)) -                        \
 763                  (((void *) (i)) + sizeof(uint64_t)))
 764
 765/* Error handling macros */
 766
 767#define btree_bug(b, ...)                                               \
 768do {                                                                    \
 769        if (bch_cache_set_error((b)->c, __VA_ARGS__))                   \
 770                dump_stack();                                           \
 771} while (0)
 772
 773#define cache_bug(c, ...)                                               \
 774do {                                                                    \
 775        if (bch_cache_set_error(c, __VA_ARGS__))                        \
 776                dump_stack();                                           \
 777} while (0)
 778
 779#define btree_bug_on(cond, b, ...)                                      \
 780do {                                                                    \
 781        if (cond)                                                       \
 782                btree_bug(b, __VA_ARGS__);                              \
 783} while (0)
 784
 785#define cache_bug_on(cond, c, ...)                                      \
 786do {                                                                    \
 787        if (cond)                                                       \
 788                cache_bug(c, __VA_ARGS__);                              \
 789} while (0)
 790
 791#define cache_set_err_on(cond, c, ...)                                  \
 792do {                                                                    \
 793        if (cond)                                                       \
 794                bch_cache_set_error(c, __VA_ARGS__);                    \
 795} while (0)
 796
 797/* Looping macros */
 798
 799#define for_each_cache(ca, cs, iter)                                    \
 800        for (iter = 0; ca = cs->cache[iter], iter < (cs)->sb.nr_in_set; iter++)
 801
 802#define for_each_bucket(b, ca)                                          \
 803        for (b = (ca)->buckets + (ca)->sb.first_bucket;                 \
 804             b < (ca)->buckets + (ca)->sb.nbuckets; b++)
 805
 806static inline void cached_dev_put(struct cached_dev *dc)
 807{
 808        if (atomic_dec_and_test(&dc->count))
 809                schedule_work(&dc->detach);
 810}
 811
 812static inline bool cached_dev_get(struct cached_dev *dc)
 813{
 814        if (!atomic_inc_not_zero(&dc->count))
 815                return false;
 816
 817        /* Paired with the mb in cached_dev_attach */
 818        smp_mb__after_atomic();
 819        return true;
 820}
 821
 822/*
 823 * bucket_gc_gen() returns the difference between the bucket's current gen and
 824 * the oldest gen of any pointer into that bucket in the btree (last_gc).
 825 */
 826
 827static inline uint8_t bucket_gc_gen(struct bucket *b)
 828{
 829        return b->gen - b->last_gc;
 830}
 831
 832#define BUCKET_GC_GEN_MAX       96U
 833
 834#define kobj_attribute_write(n, fn)                                     \
 835        static struct kobj_attribute ksysfs_##n = __ATTR(n, S_IWUSR, NULL, fn)
 836
 837#define kobj_attribute_rw(n, show, store)                               \
 838        static struct kobj_attribute ksysfs_##n =                       \
 839                __ATTR(n, S_IWUSR|S_IRUSR, show, store)
 840
 841static inline void wake_up_allocators(struct cache_set *c)
 842{
 843        struct cache *ca;
 844        unsigned i;
 845
 846        for_each_cache(ca, c, i)
 847                wake_up_process(ca->alloc_thread);
 848}
 849
 850/* Forward declarations */
 851
 852void bch_count_io_errors(struct cache *, int, const char *);
 853void bch_bbio_count_io_errors(struct cache_set *, struct bio *,
 854                              int, const char *);
 855void bch_bbio_endio(struct cache_set *, struct bio *, int, const char *);
 856void bch_bbio_free(struct bio *, struct cache_set *);
 857struct bio *bch_bbio_alloc(struct cache_set *);
 858
 859void __bch_submit_bbio(struct bio *, struct cache_set *);
 860void bch_submit_bbio(struct bio *, struct cache_set *, struct bkey *, unsigned);
 861
 862uint8_t bch_inc_gen(struct cache *, struct bucket *);
 863void bch_rescale_priorities(struct cache_set *, int);
 864
 865bool bch_can_invalidate_bucket(struct cache *, struct bucket *);
 866void __bch_invalidate_one_bucket(struct cache *, struct bucket *);
 867
 868void __bch_bucket_free(struct cache *, struct bucket *);
 869void bch_bucket_free(struct cache_set *, struct bkey *);
 870
 871long bch_bucket_alloc(struct cache *, unsigned, bool);
 872int __bch_bucket_alloc_set(struct cache_set *, unsigned,
 873                           struct bkey *, int, bool);
 874int bch_bucket_alloc_set(struct cache_set *, unsigned,
 875                         struct bkey *, int, bool);
 876bool bch_alloc_sectors(struct cache_set *, struct bkey *, unsigned,
 877                       unsigned, unsigned, bool);
 878
 879__printf(2, 3)
 880bool bch_cache_set_error(struct cache_set *, const char *, ...);
 881
 882void bch_prio_write(struct cache *);
 883void bch_write_bdev_super(struct cached_dev *, struct closure *);
 884
 885extern struct workqueue_struct *bcache_wq;
 886extern const char * const bch_cache_modes[];
 887extern struct mutex bch_register_lock;
 888extern struct list_head bch_cache_sets;
 889
 890extern struct kobj_type bch_cached_dev_ktype;
 891extern struct kobj_type bch_flash_dev_ktype;
 892extern struct kobj_type bch_cache_set_ktype;
 893extern struct kobj_type bch_cache_set_internal_ktype;
 894extern struct kobj_type bch_cache_ktype;
 895
 896void bch_cached_dev_release(struct kobject *);
 897void bch_flash_dev_release(struct kobject *);
 898void bch_cache_set_release(struct kobject *);
 899void bch_cache_release(struct kobject *);
 900
 901int bch_uuid_write(struct cache_set *);
 902void bcache_write_super(struct cache_set *);
 903
 904int bch_flash_dev_create(struct cache_set *c, uint64_t size);
 905
 906int bch_cached_dev_attach(struct cached_dev *, struct cache_set *);
 907void bch_cached_dev_detach(struct cached_dev *);
 908void bch_cached_dev_run(struct cached_dev *);
 909void bcache_device_stop(struct bcache_device *);
 910
 911void bch_cache_set_unregister(struct cache_set *);
 912void bch_cache_set_stop(struct cache_set *);
 913
 914struct cache_set *bch_cache_set_alloc(struct cache_sb *);
 915void bch_btree_cache_free(struct cache_set *);
 916int bch_btree_cache_alloc(struct cache_set *);
 917void bch_moving_init_cache_set(struct cache_set *);
 918int bch_open_buckets_alloc(struct cache_set *);
 919void bch_open_buckets_free(struct cache_set *);
 920
 921int bch_cache_allocator_start(struct cache *ca);
 922
 923void bch_debug_exit(void);
 924int bch_debug_init(struct kobject *);
 925void bch_request_exit(void);
 926int bch_request_init(void);
 927
 928#endif /* _BCACHE_H */
 929