linux/drivers/md/bcache/bcache.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#ifndef _BCACHE_H
   3#define _BCACHE_H
   4
   5/*
   6 * SOME HIGH LEVEL CODE DOCUMENTATION:
   7 *
   8 * Bcache mostly works with cache sets, cache devices, and backing devices.
   9 *
  10 * Support for multiple cache devices hasn't quite been finished off yet, but
  11 * it's about 95% plumbed through. A cache set and its cache devices is sort of
  12 * like a md raid array and its component devices. Most of the code doesn't care
  13 * about individual cache devices, the main abstraction is the cache set.
  14 *
  15 * Multiple cache devices is intended to give us the ability to mirror dirty
  16 * cached data and metadata, without mirroring clean cached data.
  17 *
  18 * Backing devices are different, in that they have a lifetime independent of a
  19 * cache set. When you register a newly formatted backing device it'll come up
  20 * in passthrough mode, and then you can attach and detach a backing device from
  21 * a cache set at runtime - while it's mounted and in use. Detaching implicitly
  22 * invalidates any cached data for that backing device.
  23 *
  24 * A cache set can have multiple (many) backing devices attached to it.
  25 *
  26 * There's also flash only volumes - this is the reason for the distinction
  27 * between struct cached_dev and struct bcache_device. A flash only volume
  28 * works much like a bcache device that has a backing device, except the
  29 * "cached" data is always dirty. The end result is that we get thin
  30 * provisioning with very little additional code.
  31 *
  32 * Flash only volumes work but they're not production ready because the moving
  33 * garbage collector needs more work. More on that later.
  34 *
  35 * BUCKETS/ALLOCATION:
  36 *
  37 * Bcache is primarily designed for caching, which means that in normal
  38 * operation all of our available space will be allocated. Thus, we need an
  39 * efficient way of deleting things from the cache so we can write new things to
  40 * it.
  41 *
  42 * To do this, we first divide the cache device up into buckets. A bucket is the
  43 * unit of allocation; they're typically around 1 mb - anywhere from 128k to 2M+
  44 * works efficiently.
  45 *
  46 * Each bucket has a 16 bit priority, and an 8 bit generation associated with
  47 * it. The gens and priorities for all the buckets are stored contiguously and
  48 * packed on disk (in a linked list of buckets - aside from the superblock, all
  49 * of bcache's metadata is stored in buckets).
  50 *
  51 * The priority is used to implement an LRU. We reset a bucket's priority when
  52 * we allocate it or on cache it, and every so often we decrement the priority
  53 * of each bucket. It could be used to implement something more sophisticated,
  54 * if anyone ever gets around to it.
  55 *
  56 * The generation is used for invalidating buckets. Each pointer also has an 8
  57 * bit generation embedded in it; for a pointer to be considered valid, its gen
  58 * must match the gen of the bucket it points into.  Thus, to reuse a bucket all
  59 * we have to do is increment its gen (and write its new gen to disk; we batch
  60 * this up).
  61 *
  62 * Bcache is entirely COW - we never write twice to a bucket, even buckets that
  63 * contain metadata (including btree nodes).
  64 *
  65 * THE BTREE:
  66 *
  67 * Bcache is in large part design around the btree.
  68 *
  69 * At a high level, the btree is just an index of key -> ptr tuples.
  70 *
  71 * Keys represent extents, and thus have a size field. Keys also have a variable
  72 * number of pointers attached to them (potentially zero, which is handy for
  73 * invalidating the cache).
  74 *
  75 * The key itself is an inode:offset pair. The inode number corresponds to a
  76 * backing device or a flash only volume. The offset is the ending offset of the
  77 * extent within the inode - not the starting offset; this makes lookups
  78 * slightly more convenient.
  79 *
  80 * Pointers contain the cache device id, the offset on that device, and an 8 bit
  81 * generation number. More on the gen later.
  82 *
  83 * Index lookups are not fully abstracted - cache lookups in particular are
  84 * still somewhat mixed in with the btree code, but things are headed in that
  85 * direction.
  86 *
  87 * Updates are fairly well abstracted, though. There are two different ways of
  88 * updating the btree; insert and replace.
  89 *
  90 * BTREE_INSERT will just take a list of keys and insert them into the btree -
  91 * overwriting (possibly only partially) any extents they overlap with. This is
  92 * used to update the index after a write.
  93 *
  94 * BTREE_REPLACE is really cmpxchg(); it inserts a key into the btree iff it is
  95 * overwriting a key that matches another given key. This is used for inserting
  96 * data into the cache after a cache miss, and for background writeback, and for
  97 * the moving garbage collector.
  98 *
  99 * There is no "delete" operation; deleting things from the index is
 100 * accomplished by either by invalidating pointers (by incrementing a bucket's
 101 * gen) or by inserting a key with 0 pointers - which will overwrite anything
 102 * previously present at that location in the index.
 103 *
 104 * This means that there are always stale/invalid keys in the btree. They're
 105 * filtered out by the code that iterates through a btree node, and removed when
 106 * a btree node is rewritten.
 107 *
 108 * BTREE NODES:
 109 *
 110 * Our unit of allocation is a bucket, and we we can't arbitrarily allocate and
 111 * free smaller than a bucket - so, that's how big our btree nodes are.
 112 *
 113 * (If buckets are really big we'll only use part of the bucket for a btree node
 114 * - no less than 1/4th - but a bucket still contains no more than a single
 115 * btree node. I'd actually like to change this, but for now we rely on the
 116 * bucket's gen for deleting btree nodes when we rewrite/split a node.)
 117 *
 118 * Anyways, btree nodes are big - big enough to be inefficient with a textbook
 119 * btree implementation.
 120 *
 121 * The way this is solved is that btree nodes are internally log structured; we
 122 * can append new keys to an existing btree node without rewriting it. This
 123 * means each set of keys we write is sorted, but the node is not.
 124 *
 125 * We maintain this log structure in memory - keeping 1Mb of keys sorted would
 126 * be expensive, and we have to distinguish between the keys we have written and
 127 * the keys we haven't. So to do a lookup in a btree node, we have to search
 128 * each sorted set. But we do merge written sets together lazily, so the cost of
 129 * these extra searches is quite low (normally most of the keys in a btree node
 130 * will be in one big set, and then there'll be one or two sets that are much
 131 * smaller).
 132 *
 133 * This log structure makes bcache's btree more of a hybrid between a
 134 * conventional btree and a compacting data structure, with some of the
 135 * advantages of both.
 136 *
 137 * GARBAGE COLLECTION:
 138 *
 139 * We can't just invalidate any bucket - it might contain dirty data or
 140 * metadata. If it once contained dirty data, other writes might overwrite it
 141 * later, leaving no valid pointers into that bucket in the index.
 142 *
 143 * Thus, the primary purpose of garbage collection is to find buckets to reuse.
 144 * It also counts how much valid data it each bucket currently contains, so that
 145 * allocation can reuse buckets sooner when they've been mostly overwritten.
 146 *
 147 * It also does some things that are really internal to the btree
 148 * implementation. If a btree node contains pointers that are stale by more than
 149 * some threshold, it rewrites the btree node to avoid the bucket's generation
 150 * wrapping around. It also merges adjacent btree nodes if they're empty enough.
 151 *
 152 * THE JOURNAL:
 153 *
 154 * Bcache's journal is not necessary for consistency; we always strictly
 155 * order metadata writes so that the btree and everything else is consistent on
 156 * disk in the event of an unclean shutdown, and in fact bcache had writeback
 157 * caching (with recovery from unclean shutdown) before journalling was
 158 * implemented.
 159 *
 160 * Rather, the journal is purely a performance optimization; we can't complete a
 161 * write until we've updated the index on disk, otherwise the cache would be
 162 * inconsistent in the event of an unclean shutdown. This means that without the
 163 * journal, on random write workloads we constantly have to update all the leaf
 164 * nodes in the btree, and those writes will be mostly empty (appending at most
 165 * a few keys each) - highly inefficient in terms of amount of metadata writes,
 166 * and it puts more strain on the various btree resorting/compacting code.
 167 *
 168 * The journal is just a log of keys we've inserted; on startup we just reinsert
 169 * all the keys in the open journal entries. That means that when we're updating
 170 * a node in the btree, we can wait until a 4k block of keys fills up before
 171 * writing them out.
 172 *
 173 * For simplicity, we only journal updates to leaf nodes; updates to parent
 174 * nodes are rare enough (since our leaf nodes are huge) that it wasn't worth
 175 * the complexity to deal with journalling them (in particular, journal replay)
 176 * - updates to non leaf nodes just happen synchronously (see btree_split()).
 177 */
 178
 179#define pr_fmt(fmt) "bcache: %s() " fmt "\n", __func__
 180
 181#include <linux/bcache.h>
 182#include <linux/bio.h>
 183#include <linux/kobject.h>
 184#include <linux/list.h>
 185#include <linux/mutex.h>
 186#include <linux/rbtree.h>
 187#include <linux/rwsem.h>
 188#include <linux/refcount.h>
 189#include <linux/types.h>
 190#include <linux/workqueue.h>
 191#include <linux/kthread.h>
 192
 193#include "bset.h"
 194#include "util.h"
 195#include "closure.h"
 196
 197struct bucket {
 198        atomic_t        pin;
 199        uint16_t        prio;
 200        uint8_t         gen;
 201        uint8_t         last_gc; /* Most out of date gen in the btree */
 202        uint16_t        gc_mark; /* Bitfield used by GC. See below for field */
 203};
 204
 205/*
 206 * I'd use bitfields for these, but I don't trust the compiler not to screw me
 207 * as multiple threads touch struct bucket without locking
 208 */
 209
 210BITMASK(GC_MARK,         struct bucket, gc_mark, 0, 2);
 211#define GC_MARK_RECLAIMABLE     1
 212#define GC_MARK_DIRTY           2
 213#define GC_MARK_METADATA        3
 214#define GC_SECTORS_USED_SIZE    13
 215#define MAX_GC_SECTORS_USED     (~(~0ULL << GC_SECTORS_USED_SIZE))
 216BITMASK(GC_SECTORS_USED, struct bucket, gc_mark, 2, GC_SECTORS_USED_SIZE);
 217BITMASK(GC_MOVE, struct bucket, gc_mark, 15, 1);
 218
 219#include "journal.h"
 220#include "stats.h"
 221struct search;
 222struct btree;
 223struct keybuf;
 224
 225struct keybuf_key {
 226        struct rb_node          node;
 227        BKEY_PADDED(key);
 228        void                    *private;
 229};
 230
 231struct keybuf {
 232        struct bkey             last_scanned;
 233        spinlock_t              lock;
 234
 235        /*
 236         * Beginning and end of range in rb tree - so that we can skip taking
 237         * lock and checking the rb tree when we need to check for overlapping
 238         * keys.
 239         */
 240        struct bkey             start;
 241        struct bkey             end;
 242
 243        struct rb_root          keys;
 244
 245#define KEYBUF_NR               500
 246        DECLARE_ARRAY_ALLOCATOR(struct keybuf_key, freelist, KEYBUF_NR);
 247};
 248
 249struct bcache_device {
 250        struct closure          cl;
 251
 252        struct kobject          kobj;
 253
 254        struct cache_set        *c;
 255        unsigned int            id;
 256#define BCACHEDEVNAME_SIZE      12
 257        char                    name[BCACHEDEVNAME_SIZE];
 258
 259        struct gendisk          *disk;
 260
 261        unsigned long           flags;
 262#define BCACHE_DEV_CLOSING              0
 263#define BCACHE_DEV_DETACHING            1
 264#define BCACHE_DEV_UNLINK_DONE          2
 265#define BCACHE_DEV_WB_RUNNING           3
 266#define BCACHE_DEV_RATE_DW_RUNNING      4
 267        unsigned int            nr_stripes;
 268        unsigned int            stripe_size;
 269        atomic_t                *stripe_sectors_dirty;
 270        unsigned long           *full_dirty_stripes;
 271
 272        struct bio_set          bio_split;
 273
 274        unsigned int            data_csum:1;
 275
 276        int (*cache_miss)(struct btree *b, struct search *s,
 277                          struct bio *bio, unsigned int sectors);
 278        int (*ioctl)(struct bcache_device *d, fmode_t mode,
 279                     unsigned int cmd, unsigned long arg);
 280};
 281
 282struct io {
 283        /* Used to track sequential IO so it can be skipped */
 284        struct hlist_node       hash;
 285        struct list_head        lru;
 286
 287        unsigned long           jiffies;
 288        unsigned int            sequential;
 289        sector_t                last;
 290};
 291
 292enum stop_on_failure {
 293        BCH_CACHED_DEV_STOP_AUTO = 0,
 294        BCH_CACHED_DEV_STOP_ALWAYS,
 295        BCH_CACHED_DEV_STOP_MODE_MAX,
 296};
 297
 298struct cached_dev {
 299        struct list_head        list;
 300        struct bcache_device    disk;
 301        struct block_device     *bdev;
 302
 303        struct cache_sb         sb;
 304        struct bio              sb_bio;
 305        struct bio_vec          sb_bv[1];
 306        struct closure          sb_write;
 307        struct semaphore        sb_write_mutex;
 308
 309        /* Refcount on the cache set. Always nonzero when we're caching. */
 310        refcount_t              count;
 311        struct work_struct      detach;
 312
 313        /*
 314         * Device might not be running if it's dirty and the cache set hasn't
 315         * showed up yet.
 316         */
 317        atomic_t                running;
 318
 319        /*
 320         * Writes take a shared lock from start to finish; scanning for dirty
 321         * data to refill the rb tree requires an exclusive lock.
 322         */
 323        struct rw_semaphore     writeback_lock;
 324
 325        /*
 326         * Nonzero, and writeback has a refcount (d->count), iff there is dirty
 327         * data in the cache. Protected by writeback_lock; must have an
 328         * shared lock to set and exclusive lock to clear.
 329         */
 330        atomic_t                has_dirty;
 331
 332        struct bch_ratelimit    writeback_rate;
 333        struct delayed_work     writeback_rate_update;
 334
 335        /* Limit number of writeback bios in flight */
 336        struct semaphore        in_flight;
 337        struct task_struct      *writeback_thread;
 338        struct workqueue_struct *writeback_write_wq;
 339
 340        struct keybuf           writeback_keys;
 341
 342        struct task_struct      *status_update_thread;
 343        /*
 344         * Order the write-half of writeback operations strongly in dispatch
 345         * order.  (Maintain LBA order; don't allow reads completing out of
 346         * order to re-order the writes...)
 347         */
 348        struct closure_waitlist writeback_ordering_wait;
 349        atomic_t                writeback_sequence_next;
 350
 351        /* For tracking sequential IO */
 352#define RECENT_IO_BITS  7
 353#define RECENT_IO       (1 << RECENT_IO_BITS)
 354        struct io               io[RECENT_IO];
 355        struct hlist_head       io_hash[RECENT_IO + 1];
 356        struct list_head        io_lru;
 357        spinlock_t              io_lock;
 358
 359        struct cache_accounting accounting;
 360
 361        /* The rest of this all shows up in sysfs */
 362        unsigned int            sequential_cutoff;
 363        unsigned int            readahead;
 364
 365        unsigned int            io_disable:1;
 366        unsigned int            verify:1;
 367        unsigned int            bypass_torture_test:1;
 368
 369        unsigned int            partial_stripes_expensive:1;
 370        unsigned int            writeback_metadata:1;
 371        unsigned int            writeback_running:1;
 372        unsigned char           writeback_percent;
 373        unsigned int            writeback_delay;
 374
 375        uint64_t                writeback_rate_target;
 376        int64_t                 writeback_rate_proportional;
 377        int64_t                 writeback_rate_integral;
 378        int64_t                 writeback_rate_integral_scaled;
 379        int32_t                 writeback_rate_change;
 380
 381        unsigned int            writeback_rate_update_seconds;
 382        unsigned int            writeback_rate_i_term_inverse;
 383        unsigned int            writeback_rate_p_term_inverse;
 384        unsigned int            writeback_rate_minimum;
 385
 386        enum stop_on_failure    stop_when_cache_set_failed;
 387#define DEFAULT_CACHED_DEV_ERROR_LIMIT  64
 388        atomic_t                io_errors;
 389        unsigned int            error_limit;
 390        unsigned int            offline_seconds;
 391
 392        char                    backing_dev_name[BDEVNAME_SIZE];
 393};
 394
 395enum alloc_reserve {
 396        RESERVE_BTREE,
 397        RESERVE_PRIO,
 398        RESERVE_MOVINGGC,
 399        RESERVE_NONE,
 400        RESERVE_NR,
 401};
 402
 403struct cache {
 404        struct cache_set        *set;
 405        struct cache_sb         sb;
 406        struct bio              sb_bio;
 407        struct bio_vec          sb_bv[1];
 408
 409        struct kobject          kobj;
 410        struct block_device     *bdev;
 411
 412        struct task_struct      *alloc_thread;
 413
 414        struct closure          prio;
 415        struct prio_set         *disk_buckets;
 416
 417        /*
 418         * When allocating new buckets, prio_write() gets first dibs - since we
 419         * may not be allocate at all without writing priorities and gens.
 420         * prio_last_buckets[] contains the last buckets we wrote priorities to
 421         * (so gc can mark them as metadata), prio_buckets[] contains the
 422         * buckets allocated for the next prio write.
 423         */
 424        uint64_t                *prio_buckets;
 425        uint64_t                *prio_last_buckets;
 426
 427        /*
 428         * free: Buckets that are ready to be used
 429         *
 430         * free_inc: Incoming buckets - these are buckets that currently have
 431         * cached data in them, and we can't reuse them until after we write
 432         * their new gen to disk. After prio_write() finishes writing the new
 433         * gens/prios, they'll be moved to the free list (and possibly discarded
 434         * in the process)
 435         */
 436        DECLARE_FIFO(long, free)[RESERVE_NR];
 437        DECLARE_FIFO(long, free_inc);
 438
 439        size_t                  fifo_last_bucket;
 440
 441        /* Allocation stuff: */
 442        struct bucket           *buckets;
 443
 444        DECLARE_HEAP(struct bucket *, heap);
 445
 446        /*
 447         * If nonzero, we know we aren't going to find any buckets to invalidate
 448         * until a gc finishes - otherwise we could pointlessly burn a ton of
 449         * cpu
 450         */
 451        unsigned int            invalidate_needs_gc;
 452
 453        bool                    discard; /* Get rid of? */
 454
 455        struct journal_device   journal;
 456
 457        /* The rest of this all shows up in sysfs */
 458#define IO_ERROR_SHIFT          20
 459        atomic_t                io_errors;
 460        atomic_t                io_count;
 461
 462        atomic_long_t           meta_sectors_written;
 463        atomic_long_t           btree_sectors_written;
 464        atomic_long_t           sectors_written;
 465
 466        char                    cache_dev_name[BDEVNAME_SIZE];
 467};
 468
 469struct gc_stat {
 470        size_t                  nodes;
 471        size_t                  nodes_pre;
 472        size_t                  key_bytes;
 473
 474        size_t                  nkeys;
 475        uint64_t                data;   /* sectors */
 476        unsigned int            in_use; /* percent */
 477};
 478
 479/*
 480 * Flag bits, for how the cache set is shutting down, and what phase it's at:
 481 *
 482 * CACHE_SET_UNREGISTERING means we're not just shutting down, we're detaching
 483 * all the backing devices first (their cached data gets invalidated, and they
 484 * won't automatically reattach).
 485 *
 486 * CACHE_SET_STOPPING always gets set first when we're closing down a cache set;
 487 * we'll continue to run normally for awhile with CACHE_SET_STOPPING set (i.e.
 488 * flushing dirty data).
 489 *
 490 * CACHE_SET_RUNNING means all cache devices have been registered and journal
 491 * replay is complete.
 492 *
 493 * CACHE_SET_IO_DISABLE is set when bcache is stopping the whold cache set, all
 494 * external and internal I/O should be denied when this flag is set.
 495 *
 496 */
 497#define CACHE_SET_UNREGISTERING         0
 498#define CACHE_SET_STOPPING              1
 499#define CACHE_SET_RUNNING               2
 500#define CACHE_SET_IO_DISABLE            3
 501
 502struct cache_set {
 503        struct closure          cl;
 504
 505        struct list_head        list;
 506        struct kobject          kobj;
 507        struct kobject          internal;
 508        struct dentry           *debug;
 509        struct cache_accounting accounting;
 510
 511        unsigned long           flags;
 512        atomic_t                idle_counter;
 513        atomic_t                at_max_writeback_rate;
 514
 515        struct cache_sb         sb;
 516
 517        struct cache            *cache[MAX_CACHES_PER_SET];
 518        struct cache            *cache_by_alloc[MAX_CACHES_PER_SET];
 519        int                     caches_loaded;
 520
 521        struct bcache_device    **devices;
 522        unsigned int            devices_max_used;
 523        atomic_t                attached_dev_nr;
 524        struct list_head        cached_devs;
 525        uint64_t                cached_dev_sectors;
 526        atomic_long_t           flash_dev_dirty_sectors;
 527        struct closure          caching;
 528
 529        struct closure          sb_write;
 530        struct semaphore        sb_write_mutex;
 531
 532        mempool_t               search;
 533        mempool_t               bio_meta;
 534        struct bio_set          bio_split;
 535
 536        /* For the btree cache */
 537        struct shrinker         shrink;
 538
 539        /* For the btree cache and anything allocation related */
 540        struct mutex            bucket_lock;
 541
 542        /* log2(bucket_size), in sectors */
 543        unsigned short          bucket_bits;
 544
 545        /* log2(block_size), in sectors */
 546        unsigned short          block_bits;
 547
 548        /*
 549         * Default number of pages for a new btree node - may be less than a
 550         * full bucket
 551         */
 552        unsigned int            btree_pages;
 553
 554        /*
 555         * Lists of struct btrees; lru is the list for structs that have memory
 556         * allocated for actual btree node, freed is for structs that do not.
 557         *
 558         * We never free a struct btree, except on shutdown - we just put it on
 559         * the btree_cache_freed list and reuse it later. This simplifies the
 560         * code, and it doesn't cost us much memory as the memory usage is
 561         * dominated by buffers that hold the actual btree node data and those
 562         * can be freed - and the number of struct btrees allocated is
 563         * effectively bounded.
 564         *
 565         * btree_cache_freeable effectively is a small cache - we use it because
 566         * high order page allocations can be rather expensive, and it's quite
 567         * common to delete and allocate btree nodes in quick succession. It
 568         * should never grow past ~2-3 nodes in practice.
 569         */
 570        struct list_head        btree_cache;
 571        struct list_head        btree_cache_freeable;
 572        struct list_head        btree_cache_freed;
 573
 574        /* Number of elements in btree_cache + btree_cache_freeable lists */
 575        unsigned int            btree_cache_used;
 576
 577        /*
 578         * If we need to allocate memory for a new btree node and that
 579         * allocation fails, we can cannibalize another node in the btree cache
 580         * to satisfy the allocation - lock to guarantee only one thread does
 581         * this at a time:
 582         */
 583        wait_queue_head_t       btree_cache_wait;
 584        struct task_struct      *btree_cache_alloc_lock;
 585
 586        /*
 587         * When we free a btree node, we increment the gen of the bucket the
 588         * node is in - but we can't rewrite the prios and gens until we
 589         * finished whatever it is we were doing, otherwise after a crash the
 590         * btree node would be freed but for say a split, we might not have the
 591         * pointers to the new nodes inserted into the btree yet.
 592         *
 593         * This is a refcount that blocks prio_write() until the new keys are
 594         * written.
 595         */
 596        atomic_t                prio_blocked;
 597        wait_queue_head_t       bucket_wait;
 598
 599        /*
 600         * For any bio we don't skip we subtract the number of sectors from
 601         * rescale; when it hits 0 we rescale all the bucket priorities.
 602         */
 603        atomic_t                rescale;
 604        /*
 605         * used for GC, identify if any front side I/Os is inflight
 606         */
 607        atomic_t                search_inflight;
 608        /*
 609         * When we invalidate buckets, we use both the priority and the amount
 610         * of good data to determine which buckets to reuse first - to weight
 611         * those together consistently we keep track of the smallest nonzero
 612         * priority of any bucket.
 613         */
 614        uint16_t                min_prio;
 615
 616        /*
 617         * max(gen - last_gc) for all buckets. When it gets too big we have to
 618         * gc to keep gens from wrapping around.
 619         */
 620        uint8_t                 need_gc;
 621        struct gc_stat          gc_stats;
 622        size_t                  nbuckets;
 623        size_t                  avail_nbuckets;
 624
 625        struct task_struct      *gc_thread;
 626        /* Where in the btree gc currently is */
 627        struct bkey             gc_done;
 628
 629        /*
 630         * For automatical garbage collection after writeback completed, this
 631         * varialbe is used as bit fields,
 632         * - 0000 0001b (BCH_ENABLE_AUTO_GC): enable gc after writeback
 633         * - 0000 0010b (BCH_DO_AUTO_GC):     do gc after writeback
 634         * This is an optimization for following write request after writeback
 635         * finished, but read hit rate dropped due to clean data on cache is
 636         * discarded. Unless user explicitly sets it via sysfs, it won't be
 637         * enabled.
 638         */
 639#define BCH_ENABLE_AUTO_GC      1
 640#define BCH_DO_AUTO_GC          2
 641        uint8_t                 gc_after_writeback;
 642
 643        /*
 644         * The allocation code needs gc_mark in struct bucket to be correct, but
 645         * it's not while a gc is in progress. Protected by bucket_lock.
 646         */
 647        int                     gc_mark_valid;
 648
 649        /* Counts how many sectors bio_insert has added to the cache */
 650        atomic_t                sectors_to_gc;
 651        wait_queue_head_t       gc_wait;
 652
 653        struct keybuf           moving_gc_keys;
 654        /* Number of moving GC bios in flight */
 655        struct semaphore        moving_in_flight;
 656
 657        struct workqueue_struct *moving_gc_wq;
 658
 659        struct btree            *root;
 660
 661#ifdef CONFIG_BCACHE_DEBUG
 662        struct btree            *verify_data;
 663        struct bset             *verify_ondisk;
 664        struct mutex            verify_lock;
 665#endif
 666
 667        unsigned int            nr_uuids;
 668        struct uuid_entry       *uuids;
 669        BKEY_PADDED(uuid_bucket);
 670        struct closure          uuid_write;
 671        struct semaphore        uuid_write_mutex;
 672
 673        /*
 674         * A btree node on disk could have too many bsets for an iterator to fit
 675         * on the stack - have to dynamically allocate them.
 676         * bch_cache_set_alloc() will make sure the pool can allocate iterators
 677         * equipped with enough room that can host
 678         *     (sb.bucket_size / sb.block_size)
 679         * btree_iter_sets, which is more than static MAX_BSETS.
 680         */
 681        mempool_t               fill_iter;
 682
 683        struct bset_sort_state  sort;
 684
 685        /* List of buckets we're currently writing data to */
 686        struct list_head        data_buckets;
 687        spinlock_t              data_bucket_lock;
 688
 689        struct journal          journal;
 690
 691#define CONGESTED_MAX           1024
 692        unsigned int            congested_last_us;
 693        atomic_t                congested;
 694
 695        /* The rest of this all shows up in sysfs */
 696        unsigned int            congested_read_threshold_us;
 697        unsigned int            congested_write_threshold_us;
 698
 699        struct time_stats       btree_gc_time;
 700        struct time_stats       btree_split_time;
 701        struct time_stats       btree_read_time;
 702
 703        atomic_long_t           cache_read_races;
 704        atomic_long_t           writeback_keys_done;
 705        atomic_long_t           writeback_keys_failed;
 706
 707        atomic_long_t           reclaim;
 708        atomic_long_t           reclaimed_journal_buckets;
 709        atomic_long_t           flush_write;
 710
 711        enum                    {
 712                ON_ERROR_UNREGISTER,
 713                ON_ERROR_PANIC,
 714        }                       on_error;
 715#define DEFAULT_IO_ERROR_LIMIT 8
 716        unsigned int            error_limit;
 717        unsigned int            error_decay;
 718
 719        unsigned short          journal_delay_ms;
 720        bool                    expensive_debug_checks;
 721        unsigned int            verify:1;
 722        unsigned int            key_merging_disabled:1;
 723        unsigned int            gc_always_rewrite:1;
 724        unsigned int            shrinker_disabled:1;
 725        unsigned int            copy_gc_enabled:1;
 726
 727#define BUCKET_HASH_BITS        12
 728        struct hlist_head       bucket_hash[1 << BUCKET_HASH_BITS];
 729};
 730
 731struct bbio {
 732        unsigned int            submit_time_us;
 733        union {
 734                struct bkey     key;
 735                uint64_t        _pad[3];
 736                /*
 737                 * We only need pad = 3 here because we only ever carry around a
 738                 * single pointer - i.e. the pointer we're doing io to/from.
 739                 */
 740        };
 741        struct bio              bio;
 742};
 743
 744#define BTREE_PRIO              USHRT_MAX
 745#define INITIAL_PRIO            32768U
 746
 747#define btree_bytes(c)          ((c)->btree_pages * PAGE_SIZE)
 748#define btree_blocks(b)                                                 \
 749        ((unsigned int) (KEY_SIZE(&b->key) >> (b)->c->block_bits))
 750
 751#define btree_default_blocks(c)                                         \
 752        ((unsigned int) ((PAGE_SECTORS * (c)->btree_pages) >> (c)->block_bits))
 753
 754#define bucket_pages(c)         ((c)->sb.bucket_size / PAGE_SECTORS)
 755#define bucket_bytes(c)         ((c)->sb.bucket_size << 9)
 756#define block_bytes(c)          ((c)->sb.block_size << 9)
 757
 758#define prios_per_bucket(c)                             \
 759        ((bucket_bytes(c) - sizeof(struct prio_set)) /  \
 760         sizeof(struct bucket_disk))
 761#define prio_buckets(c)                                 \
 762        DIV_ROUND_UP((size_t) (c)->sb.nbuckets, prios_per_bucket(c))
 763
 764static inline size_t sector_to_bucket(struct cache_set *c, sector_t s)
 765{
 766        return s >> c->bucket_bits;
 767}
 768
 769static inline sector_t bucket_to_sector(struct cache_set *c, size_t b)
 770{
 771        return ((sector_t) b) << c->bucket_bits;
 772}
 773
 774static inline sector_t bucket_remainder(struct cache_set *c, sector_t s)
 775{
 776        return s & (c->sb.bucket_size - 1);
 777}
 778
 779static inline struct cache *PTR_CACHE(struct cache_set *c,
 780                                      const struct bkey *k,
 781                                      unsigned int ptr)
 782{
 783        return c->cache[PTR_DEV(k, ptr)];
 784}
 785
 786static inline size_t PTR_BUCKET_NR(struct cache_set *c,
 787                                   const struct bkey *k,
 788                                   unsigned int ptr)
 789{
 790        return sector_to_bucket(c, PTR_OFFSET(k, ptr));
 791}
 792
 793static inline struct bucket *PTR_BUCKET(struct cache_set *c,
 794                                        const struct bkey *k,
 795                                        unsigned int ptr)
 796{
 797        return PTR_CACHE(c, k, ptr)->buckets + PTR_BUCKET_NR(c, k, ptr);
 798}
 799
 800static inline uint8_t gen_after(uint8_t a, uint8_t b)
 801{
 802        uint8_t r = a - b;
 803
 804        return r > 128U ? 0 : r;
 805}
 806
 807static inline uint8_t ptr_stale(struct cache_set *c, const struct bkey *k,
 808                                unsigned int i)
 809{
 810        return gen_after(PTR_BUCKET(c, k, i)->gen, PTR_GEN(k, i));
 811}
 812
 813static inline bool ptr_available(struct cache_set *c, const struct bkey *k,
 814                                 unsigned int i)
 815{
 816        return (PTR_DEV(k, i) < MAX_CACHES_PER_SET) && PTR_CACHE(c, k, i);
 817}
 818
 819/* Btree key macros */
 820
 821/*
 822 * This is used for various on disk data structures - cache_sb, prio_set, bset,
 823 * jset: The checksum is _always_ the first 8 bytes of these structs
 824 */
 825#define csum_set(i)                                                     \
 826        bch_crc64(((void *) (i)) + sizeof(uint64_t),                    \
 827                  ((void *) bset_bkey_last(i)) -                        \
 828                  (((void *) (i)) + sizeof(uint64_t)))
 829
 830/* Error handling macros */
 831
 832#define btree_bug(b, ...)                                               \
 833do {                                                                    \
 834        if (bch_cache_set_error((b)->c, __VA_ARGS__))                   \
 835                dump_stack();                                           \
 836} while (0)
 837
 838#define cache_bug(c, ...)                                               \
 839do {                                                                    \
 840        if (bch_cache_set_error(c, __VA_ARGS__))                        \
 841                dump_stack();                                           \
 842} while (0)
 843
 844#define btree_bug_on(cond, b, ...)                                      \
 845do {                                                                    \
 846        if (cond)                                                       \
 847                btree_bug(b, __VA_ARGS__);                              \
 848} while (0)
 849
 850#define cache_bug_on(cond, c, ...)                                      \
 851do {                                                                    \
 852        if (cond)                                                       \
 853                cache_bug(c, __VA_ARGS__);                              \
 854} while (0)
 855
 856#define cache_set_err_on(cond, c, ...)                                  \
 857do {                                                                    \
 858        if (cond)                                                       \
 859                bch_cache_set_error(c, __VA_ARGS__);                    \
 860} while (0)
 861
 862/* Looping macros */
 863
 864#define for_each_cache(ca, cs, iter)                                    \
 865        for (iter = 0; ca = cs->cache[iter], iter < (cs)->sb.nr_in_set; iter++)
 866
 867#define for_each_bucket(b, ca)                                          \
 868        for (b = (ca)->buckets + (ca)->sb.first_bucket;                 \
 869             b < (ca)->buckets + (ca)->sb.nbuckets; b++)
 870
 871static inline void cached_dev_put(struct cached_dev *dc)
 872{
 873        if (refcount_dec_and_test(&dc->count))
 874                schedule_work(&dc->detach);
 875}
 876
 877static inline bool cached_dev_get(struct cached_dev *dc)
 878{
 879        if (!refcount_inc_not_zero(&dc->count))
 880                return false;
 881
 882        /* Paired with the mb in cached_dev_attach */
 883        smp_mb__after_atomic();
 884        return true;
 885}
 886
 887/*
 888 * bucket_gc_gen() returns the difference between the bucket's current gen and
 889 * the oldest gen of any pointer into that bucket in the btree (last_gc).
 890 */
 891
 892static inline uint8_t bucket_gc_gen(struct bucket *b)
 893{
 894        return b->gen - b->last_gc;
 895}
 896
 897#define BUCKET_GC_GEN_MAX       96U
 898
 899#define kobj_attribute_write(n, fn)                                     \
 900        static struct kobj_attribute ksysfs_##n = __ATTR(n, 0200, NULL, fn)
 901
 902#define kobj_attribute_rw(n, show, store)                               \
 903        static struct kobj_attribute ksysfs_##n =                       \
 904                __ATTR(n, 0600, show, store)
 905
 906static inline void wake_up_allocators(struct cache_set *c)
 907{
 908        struct cache *ca;
 909        unsigned int i;
 910
 911        for_each_cache(ca, c, i)
 912                wake_up_process(ca->alloc_thread);
 913}
 914
 915static inline void closure_bio_submit(struct cache_set *c,
 916                                      struct bio *bio,
 917                                      struct closure *cl)
 918{
 919        closure_get(cl);
 920        if (unlikely(test_bit(CACHE_SET_IO_DISABLE, &c->flags))) {
 921                bio->bi_status = BLK_STS_IOERR;
 922                bio_endio(bio);
 923                return;
 924        }
 925        generic_make_request(bio);
 926}
 927
 928/*
 929 * Prevent the kthread exits directly, and make sure when kthread_stop()
 930 * is called to stop a kthread, it is still alive. If a kthread might be
 931 * stopped by CACHE_SET_IO_DISABLE bit set, wait_for_kthread_stop() is
 932 * necessary before the kthread returns.
 933 */
 934static inline void wait_for_kthread_stop(void)
 935{
 936        while (!kthread_should_stop()) {
 937                set_current_state(TASK_INTERRUPTIBLE);
 938                schedule();
 939        }
 940}
 941
 942/* Forward declarations */
 943
 944void bch_count_backing_io_errors(struct cached_dev *dc, struct bio *bio);
 945void bch_count_io_errors(struct cache *ca, blk_status_t error,
 946                         int is_read, const char *m);
 947void bch_bbio_count_io_errors(struct cache_set *c, struct bio *bio,
 948                              blk_status_t error, const char *m);
 949void bch_bbio_endio(struct cache_set *c, struct bio *bio,
 950                    blk_status_t error, const char *m);
 951void bch_bbio_free(struct bio *bio, struct cache_set *c);
 952struct bio *bch_bbio_alloc(struct cache_set *c);
 953
 954void __bch_submit_bbio(struct bio *bio, struct cache_set *c);
 955void bch_submit_bbio(struct bio *bio, struct cache_set *c,
 956                     struct bkey *k, unsigned int ptr);
 957
 958uint8_t bch_inc_gen(struct cache *ca, struct bucket *b);
 959void bch_rescale_priorities(struct cache_set *c, int sectors);
 960
 961bool bch_can_invalidate_bucket(struct cache *ca, struct bucket *b);
 962void __bch_invalidate_one_bucket(struct cache *ca, struct bucket *b);
 963
 964void __bch_bucket_free(struct cache *ca, struct bucket *b);
 965void bch_bucket_free(struct cache_set *c, struct bkey *k);
 966
 967long bch_bucket_alloc(struct cache *ca, unsigned int reserve, bool wait);
 968int __bch_bucket_alloc_set(struct cache_set *c, unsigned int reserve,
 969                           struct bkey *k, int n, bool wait);
 970int bch_bucket_alloc_set(struct cache_set *c, unsigned int reserve,
 971                         struct bkey *k, int n, bool wait);
 972bool bch_alloc_sectors(struct cache_set *c, struct bkey *k,
 973                       unsigned int sectors, unsigned int write_point,
 974                       unsigned int write_prio, bool wait);
 975bool bch_cached_dev_error(struct cached_dev *dc);
 976
 977__printf(2, 3)
 978bool bch_cache_set_error(struct cache_set *c, const char *fmt, ...);
 979
 980void bch_prio_write(struct cache *ca);
 981void bch_write_bdev_super(struct cached_dev *dc, struct closure *parent);
 982
 983extern struct workqueue_struct *bcache_wq;
 984extern struct workqueue_struct *bch_journal_wq;
 985extern struct mutex bch_register_lock;
 986extern struct list_head bch_cache_sets;
 987
 988extern struct kobj_type bch_cached_dev_ktype;
 989extern struct kobj_type bch_flash_dev_ktype;
 990extern struct kobj_type bch_cache_set_ktype;
 991extern struct kobj_type bch_cache_set_internal_ktype;
 992extern struct kobj_type bch_cache_ktype;
 993
 994void bch_cached_dev_release(struct kobject *kobj);
 995void bch_flash_dev_release(struct kobject *kobj);
 996void bch_cache_set_release(struct kobject *kobj);
 997void bch_cache_release(struct kobject *kobj);
 998
 999int bch_uuid_write(struct cache_set *c);
1000void bcache_write_super(struct cache_set *c);
1001
1002int bch_flash_dev_create(struct cache_set *c, uint64_t size);
1003
1004int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c,
1005                          uint8_t *set_uuid);
1006void bch_cached_dev_detach(struct cached_dev *dc);
1007int bch_cached_dev_run(struct cached_dev *dc);
1008void bcache_device_stop(struct bcache_device *d);
1009
1010void bch_cache_set_unregister(struct cache_set *c);
1011void bch_cache_set_stop(struct cache_set *c);
1012
1013struct cache_set *bch_cache_set_alloc(struct cache_sb *sb);
1014void bch_btree_cache_free(struct cache_set *c);
1015int bch_btree_cache_alloc(struct cache_set *c);
1016void bch_moving_init_cache_set(struct cache_set *c);
1017int bch_open_buckets_alloc(struct cache_set *c);
1018void bch_open_buckets_free(struct cache_set *c);
1019
1020int bch_cache_allocator_start(struct cache *ca);
1021
1022void bch_debug_exit(void);
1023void bch_debug_init(void);
1024void bch_request_exit(void);
1025int bch_request_init(void);
1026
1027#endif /* _BCACHE_H */
1028