linux/drivers/md/bcache/alloc.c
<<
>>
Prefs
   1/*
   2 * Primary bucket allocation code
   3 *
   4 * Copyright 2012 Google, Inc.
   5 *
   6 * Allocation in bcache is done in terms of buckets:
   7 *
   8 * Each bucket has associated an 8 bit gen; this gen corresponds to the gen in
   9 * btree pointers - they must match for the pointer to be considered valid.
  10 *
  11 * Thus (assuming a bucket has no dirty data or metadata in it) we can reuse a
  12 * bucket simply by incrementing its gen.
  13 *
  14 * The gens (along with the priorities; it's really the gens are important but
  15 * the code is named as if it's the priorities) are written in an arbitrary list
  16 * of buckets on disk, with a pointer to them in the journal header.
  17 *
  18 * When we invalidate a bucket, we have to write its new gen to disk and wait
  19 * for that write to complete before we use it - otherwise after a crash we
  20 * could have pointers that appeared to be good but pointed to data that had
  21 * been overwritten.
  22 *
  23 * Since the gens and priorities are all stored contiguously on disk, we can
  24 * batch this up: We fill up the free_inc list with freshly invalidated buckets,
  25 * call prio_write(), and when prio_write() finishes we pull buckets off the
  26 * free_inc list and optionally discard them.
  27 *
  28 * free_inc isn't the only freelist - if it was, we'd often to sleep while
  29 * priorities and gens were being written before we could allocate. c->free is a
  30 * smaller freelist, and buckets on that list are always ready to be used.
  31 *
  32 * If we've got discards enabled, that happens when a bucket moves from the
  33 * free_inc list to the free list.
  34 *
  35 * There is another freelist, because sometimes we have buckets that we know
  36 * have nothing pointing into them - these we can reuse without waiting for
  37 * priorities to be rewritten. These come from freed btree nodes and buckets
  38 * that garbage collection discovered no longer had valid keys pointing into
  39 * them (because they were overwritten). That's the unused list - buckets on the
  40 * unused list move to the free list, optionally being discarded in the process.
  41 *
  42 * It's also important to ensure that gens don't wrap around - with respect to
  43 * either the oldest gen in the btree or the gen on disk. This is quite
  44 * difficult to do in practice, but we explicitly guard against it anyways - if
  45 * a bucket is in danger of wrapping around we simply skip invalidating it that
  46 * time around, and we garbage collect or rewrite the priorities sooner than we
  47 * would have otherwise.
  48 *
  49 * bch_bucket_alloc() allocates a single bucket from a specific cache.
  50 *
  51 * bch_bucket_alloc_set() allocates one or more buckets from different caches
  52 * out of a cache set.
  53 *
  54 * free_some_buckets() drives all the processes described above. It's called
  55 * from bch_bucket_alloc() and a few other places that need to make sure free
  56 * buckets are ready.
  57 *
  58 * invalidate_buckets_(lru|fifo)() find buckets that are available to be
  59 * invalidated, and then invalidate them and stick them on the free_inc list -
  60 * in either lru or fifo order.
  61 */
  62
  63#include "bcache.h"
  64#include "btree.h"
  65
  66#include <linux/freezer.h>
  67#include <linux/kthread.h>
  68#include <linux/random.h>
  69#include <trace/events/bcache.h>
  70
  71#define MAX_IN_FLIGHT_DISCARDS          8U
  72
  73/* Bucket heap / gen */
  74
  75uint8_t bch_inc_gen(struct cache *ca, struct bucket *b)
  76{
  77        uint8_t ret = ++b->gen;
  78
  79        ca->set->need_gc = max(ca->set->need_gc, bucket_gc_gen(b));
  80        WARN_ON_ONCE(ca->set->need_gc > BUCKET_GC_GEN_MAX);
  81
  82        if (CACHE_SYNC(&ca->set->sb)) {
  83                ca->need_save_prio = max(ca->need_save_prio,
  84                                         bucket_disk_gen(b));
  85                WARN_ON_ONCE(ca->need_save_prio > BUCKET_DISK_GEN_MAX);
  86        }
  87
  88        return ret;
  89}
  90
  91void bch_rescale_priorities(struct cache_set *c, int sectors)
  92{
  93        struct cache *ca;
  94        struct bucket *b;
  95        unsigned next = c->nbuckets * c->sb.bucket_size / 1024;
  96        unsigned i;
  97        int r;
  98
  99        atomic_sub(sectors, &c->rescale);
 100
 101        do {
 102                r = atomic_read(&c->rescale);
 103
 104                if (r >= 0)
 105                        return;
 106        } while (atomic_cmpxchg(&c->rescale, r, r + next) != r);
 107
 108        mutex_lock(&c->bucket_lock);
 109
 110        c->min_prio = USHRT_MAX;
 111
 112        for_each_cache(ca, c, i)
 113                for_each_bucket(b, ca)
 114                        if (b->prio &&
 115                            b->prio != BTREE_PRIO &&
 116                            !atomic_read(&b->pin)) {
 117                                b->prio--;
 118                                c->min_prio = min(c->min_prio, b->prio);
 119                        }
 120
 121        mutex_unlock(&c->bucket_lock);
 122}
 123
 124/* Discard/TRIM */
 125
 126struct discard {
 127        struct list_head        list;
 128        struct work_struct      work;
 129        struct cache            *ca;
 130        long                    bucket;
 131
 132        struct bio              bio;
 133        struct bio_vec          bv;
 134};
 135
 136static void discard_finish(struct work_struct *w)
 137{
 138        struct discard *d = container_of(w, struct discard, work);
 139        struct cache *ca = d->ca;
 140        char buf[BDEVNAME_SIZE];
 141
 142        if (!test_bit(BIO_UPTODATE, &d->bio.bi_flags)) {
 143                pr_notice("discard error on %s, disabling",
 144                         bdevname(ca->bdev, buf));
 145                d->ca->discard = 0;
 146        }
 147
 148        mutex_lock(&ca->set->bucket_lock);
 149
 150        fifo_push(&ca->free, d->bucket);
 151        list_add(&d->list, &ca->discards);
 152        atomic_dec(&ca->discards_in_flight);
 153
 154        mutex_unlock(&ca->set->bucket_lock);
 155
 156        closure_wake_up(&ca->set->bucket_wait);
 157        wake_up_process(ca->alloc_thread);
 158
 159        closure_put(&ca->set->cl);
 160}
 161
 162static void discard_endio(struct bio *bio, int error)
 163{
 164        struct discard *d = container_of(bio, struct discard, bio);
 165        schedule_work(&d->work);
 166}
 167
 168static void do_discard(struct cache *ca, long bucket)
 169{
 170        struct discard *d = list_first_entry(&ca->discards,
 171                                             struct discard, list);
 172
 173        list_del(&d->list);
 174        d->bucket = bucket;
 175
 176        atomic_inc(&ca->discards_in_flight);
 177        closure_get(&ca->set->cl);
 178
 179        bio_init(&d->bio);
 180
 181        d->bio.bi_sector        = bucket_to_sector(ca->set, d->bucket);
 182        d->bio.bi_bdev          = ca->bdev;
 183        d->bio.bi_rw            = REQ_WRITE|REQ_DISCARD;
 184        d->bio.bi_max_vecs      = 1;
 185        d->bio.bi_io_vec        = d->bio.bi_inline_vecs;
 186        d->bio.bi_size          = bucket_bytes(ca);
 187        d->bio.bi_end_io        = discard_endio;
 188        bio_set_prio(&d->bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0));
 189
 190        submit_bio(0, &d->bio);
 191}
 192
 193/* Allocation */
 194
 195static inline bool can_inc_bucket_gen(struct bucket *b)
 196{
 197        return bucket_gc_gen(b) < BUCKET_GC_GEN_MAX &&
 198                bucket_disk_gen(b) < BUCKET_DISK_GEN_MAX;
 199}
 200
 201bool bch_bucket_add_unused(struct cache *ca, struct bucket *b)
 202{
 203        BUG_ON(GC_MARK(b) || GC_SECTORS_USED(b));
 204
 205        if (fifo_used(&ca->free) > ca->watermark[WATERMARK_MOVINGGC] &&
 206            CACHE_REPLACEMENT(&ca->sb) == CACHE_REPLACEMENT_FIFO)
 207                return false;
 208
 209        b->prio = 0;
 210
 211        if (can_inc_bucket_gen(b) &&
 212            fifo_push(&ca->unused, b - ca->buckets)) {
 213                atomic_inc(&b->pin);
 214                return true;
 215        }
 216
 217        return false;
 218}
 219
 220static bool can_invalidate_bucket(struct cache *ca, struct bucket *b)
 221{
 222        return GC_MARK(b) == GC_MARK_RECLAIMABLE &&
 223                !atomic_read(&b->pin) &&
 224                can_inc_bucket_gen(b);
 225}
 226
 227static void invalidate_one_bucket(struct cache *ca, struct bucket *b)
 228{
 229        bch_inc_gen(ca, b);
 230        b->prio = INITIAL_PRIO;
 231        atomic_inc(&b->pin);
 232        fifo_push(&ca->free_inc, b - ca->buckets);
 233}
 234
 235#define bucket_prio(b)                          \
 236        (((unsigned) (b->prio - ca->set->min_prio)) * GC_SECTORS_USED(b))
 237
 238#define bucket_max_cmp(l, r)    (bucket_prio(l) < bucket_prio(r))
 239#define bucket_min_cmp(l, r)    (bucket_prio(l) > bucket_prio(r))
 240
 241static void invalidate_buckets_lru(struct cache *ca)
 242{
 243        struct bucket *b;
 244        ssize_t i;
 245
 246        ca->heap.used = 0;
 247
 248        for_each_bucket(b, ca) {
 249                /*
 250                 * If we fill up the unused list, if we then return before
 251                 * adding anything to the free_inc list we'll skip writing
 252                 * prios/gens and just go back to allocating from the unused
 253                 * list:
 254                 */
 255                if (fifo_full(&ca->unused))
 256                        return;
 257
 258                if (!can_invalidate_bucket(ca, b))
 259                        continue;
 260
 261                if (!GC_SECTORS_USED(b) &&
 262                    bch_bucket_add_unused(ca, b))
 263                        continue;
 264
 265                if (!heap_full(&ca->heap))
 266                        heap_add(&ca->heap, b, bucket_max_cmp);
 267                else if (bucket_max_cmp(b, heap_peek(&ca->heap))) {
 268                        ca->heap.data[0] = b;
 269                        heap_sift(&ca->heap, 0, bucket_max_cmp);
 270                }
 271        }
 272
 273        for (i = ca->heap.used / 2 - 1; i >= 0; --i)
 274                heap_sift(&ca->heap, i, bucket_min_cmp);
 275
 276        while (!fifo_full(&ca->free_inc)) {
 277                if (!heap_pop(&ca->heap, b, bucket_min_cmp)) {
 278                        /*
 279                         * We don't want to be calling invalidate_buckets()
 280                         * multiple times when it can't do anything
 281                         */
 282                        ca->invalidate_needs_gc = 1;
 283                        bch_queue_gc(ca->set);
 284                        return;
 285                }
 286
 287                invalidate_one_bucket(ca, b);
 288        }
 289}
 290
 291static void invalidate_buckets_fifo(struct cache *ca)
 292{
 293        struct bucket *b;
 294        size_t checked = 0;
 295
 296        while (!fifo_full(&ca->free_inc)) {
 297                if (ca->fifo_last_bucket <  ca->sb.first_bucket ||
 298                    ca->fifo_last_bucket >= ca->sb.nbuckets)
 299                        ca->fifo_last_bucket = ca->sb.first_bucket;
 300
 301                b = ca->buckets + ca->fifo_last_bucket++;
 302
 303                if (can_invalidate_bucket(ca, b))
 304                        invalidate_one_bucket(ca, b);
 305
 306                if (++checked >= ca->sb.nbuckets) {
 307                        ca->invalidate_needs_gc = 1;
 308                        bch_queue_gc(ca->set);
 309                        return;
 310                }
 311        }
 312}
 313
 314static void invalidate_buckets_random(struct cache *ca)
 315{
 316        struct bucket *b;
 317        size_t checked = 0;
 318
 319        while (!fifo_full(&ca->free_inc)) {
 320                size_t n;
 321                get_random_bytes(&n, sizeof(n));
 322
 323                n %= (size_t) (ca->sb.nbuckets - ca->sb.first_bucket);
 324                n += ca->sb.first_bucket;
 325
 326                b = ca->buckets + n;
 327
 328                if (can_invalidate_bucket(ca, b))
 329                        invalidate_one_bucket(ca, b);
 330
 331                if (++checked >= ca->sb.nbuckets / 2) {
 332                        ca->invalidate_needs_gc = 1;
 333                        bch_queue_gc(ca->set);
 334                        return;
 335                }
 336        }
 337}
 338
 339static void invalidate_buckets(struct cache *ca)
 340{
 341        if (ca->invalidate_needs_gc)
 342                return;
 343
 344        switch (CACHE_REPLACEMENT(&ca->sb)) {
 345        case CACHE_REPLACEMENT_LRU:
 346                invalidate_buckets_lru(ca);
 347                break;
 348        case CACHE_REPLACEMENT_FIFO:
 349                invalidate_buckets_fifo(ca);
 350                break;
 351        case CACHE_REPLACEMENT_RANDOM:
 352                invalidate_buckets_random(ca);
 353                break;
 354        }
 355
 356        trace_bcache_alloc_invalidate(ca);
 357}
 358
 359#define allocator_wait(ca, cond)                                        \
 360do {                                                                    \
 361        while (1) {                                                     \
 362                set_current_state(TASK_INTERRUPTIBLE);                  \
 363                if (cond)                                               \
 364                        break;                                          \
 365                                                                        \
 366                mutex_unlock(&(ca)->set->bucket_lock);                  \
 367                if (kthread_should_stop())                              \
 368                        return 0;                                       \
 369                                                                        \
 370                try_to_freeze();                                        \
 371                schedule();                                             \
 372                mutex_lock(&(ca)->set->bucket_lock);                    \
 373        }                                                               \
 374        __set_current_state(TASK_RUNNING);                              \
 375} while (0)
 376
 377static int bch_allocator_thread(void *arg)
 378{
 379        struct cache *ca = arg;
 380
 381        mutex_lock(&ca->set->bucket_lock);
 382
 383        while (1) {
 384                /*
 385                 * First, we pull buckets off of the unused and free_inc lists,
 386                 * possibly issue discards to them, then we add the bucket to
 387                 * the free list:
 388                 */
 389                while (1) {
 390                        long bucket;
 391
 392                        if ((!atomic_read(&ca->set->prio_blocked) ||
 393                             !CACHE_SYNC(&ca->set->sb)) &&
 394                            !fifo_empty(&ca->unused))
 395                                fifo_pop(&ca->unused, bucket);
 396                        else if (!fifo_empty(&ca->free_inc))
 397                                fifo_pop(&ca->free_inc, bucket);
 398                        else
 399                                break;
 400
 401                        allocator_wait(ca, (int) fifo_free(&ca->free) >
 402                                       atomic_read(&ca->discards_in_flight));
 403
 404                        if (ca->discard) {
 405                                allocator_wait(ca, !list_empty(&ca->discards));
 406                                do_discard(ca, bucket);
 407                        } else {
 408                                fifo_push(&ca->free, bucket);
 409                                closure_wake_up(&ca->set->bucket_wait);
 410                        }
 411                }
 412
 413                /*
 414                 * We've run out of free buckets, we need to find some buckets
 415                 * we can invalidate. First, invalidate them in memory and add
 416                 * them to the free_inc list:
 417                 */
 418
 419                allocator_wait(ca, ca->set->gc_mark_valid &&
 420                               (ca->need_save_prio > 64 ||
 421                                !ca->invalidate_needs_gc));
 422                invalidate_buckets(ca);
 423
 424                /*
 425                 * Now, we write their new gens to disk so we can start writing
 426                 * new stuff to them:
 427                 */
 428                allocator_wait(ca, !atomic_read(&ca->set->prio_blocked));
 429                if (CACHE_SYNC(&ca->set->sb) &&
 430                    (!fifo_empty(&ca->free_inc) ||
 431                     ca->need_save_prio > 64))
 432                        bch_prio_write(ca);
 433        }
 434}
 435
 436long bch_bucket_alloc(struct cache *ca, unsigned watermark, struct closure *cl)
 437{
 438        long r = -1;
 439again:
 440        wake_up_process(ca->alloc_thread);
 441
 442        if (fifo_used(&ca->free) > ca->watermark[watermark] &&
 443            fifo_pop(&ca->free, r)) {
 444                struct bucket *b = ca->buckets + r;
 445#ifdef CONFIG_BCACHE_EDEBUG
 446                size_t iter;
 447                long i;
 448
 449                for (iter = 0; iter < prio_buckets(ca) * 2; iter++)
 450                        BUG_ON(ca->prio_buckets[iter] == (uint64_t) r);
 451
 452                fifo_for_each(i, &ca->free, iter)
 453                        BUG_ON(i == r);
 454                fifo_for_each(i, &ca->free_inc, iter)
 455                        BUG_ON(i == r);
 456                fifo_for_each(i, &ca->unused, iter)
 457                        BUG_ON(i == r);
 458#endif
 459                BUG_ON(atomic_read(&b->pin) != 1);
 460
 461                SET_GC_SECTORS_USED(b, ca->sb.bucket_size);
 462
 463                if (watermark <= WATERMARK_METADATA) {
 464                        SET_GC_MARK(b, GC_MARK_METADATA);
 465                        b->prio = BTREE_PRIO;
 466                } else {
 467                        SET_GC_MARK(b, GC_MARK_RECLAIMABLE);
 468                        b->prio = INITIAL_PRIO;
 469                }
 470
 471                return r;
 472        }
 473
 474        trace_bcache_alloc_fail(ca);
 475
 476        if (cl) {
 477                closure_wait(&ca->set->bucket_wait, cl);
 478
 479                if (closure_blocking(cl)) {
 480                        mutex_unlock(&ca->set->bucket_lock);
 481                        closure_sync(cl);
 482                        mutex_lock(&ca->set->bucket_lock);
 483                        goto again;
 484                }
 485        }
 486
 487        return -1;
 488}
 489
 490void bch_bucket_free(struct cache_set *c, struct bkey *k)
 491{
 492        unsigned i;
 493
 494        for (i = 0; i < KEY_PTRS(k); i++) {
 495                struct bucket *b = PTR_BUCKET(c, k, i);
 496
 497                SET_GC_MARK(b, GC_MARK_RECLAIMABLE);
 498                SET_GC_SECTORS_USED(b, 0);
 499                bch_bucket_add_unused(PTR_CACHE(c, k, i), b);
 500        }
 501}
 502
 503int __bch_bucket_alloc_set(struct cache_set *c, unsigned watermark,
 504                           struct bkey *k, int n, struct closure *cl)
 505{
 506        int i;
 507
 508        lockdep_assert_held(&c->bucket_lock);
 509        BUG_ON(!n || n > c->caches_loaded || n > 8);
 510
 511        bkey_init(k);
 512
 513        /* sort by free space/prio of oldest data in caches */
 514
 515        for (i = 0; i < n; i++) {
 516                struct cache *ca = c->cache_by_alloc[i];
 517                long b = bch_bucket_alloc(ca, watermark, cl);
 518
 519                if (b == -1)
 520                        goto err;
 521
 522                k->ptr[i] = PTR(ca->buckets[b].gen,
 523                                bucket_to_sector(c, b),
 524                                ca->sb.nr_this_dev);
 525
 526                SET_KEY_PTRS(k, i + 1);
 527        }
 528
 529        return 0;
 530err:
 531        bch_bucket_free(c, k);
 532        __bkey_put(c, k);
 533        return -1;
 534}
 535
 536int bch_bucket_alloc_set(struct cache_set *c, unsigned watermark,
 537                         struct bkey *k, int n, struct closure *cl)
 538{
 539        int ret;
 540        mutex_lock(&c->bucket_lock);
 541        ret = __bch_bucket_alloc_set(c, watermark, k, n, cl);
 542        mutex_unlock(&c->bucket_lock);
 543        return ret;
 544}
 545
 546/* Init */
 547
 548int bch_cache_allocator_start(struct cache *ca)
 549{
 550        struct task_struct *k = kthread_run(bch_allocator_thread,
 551                                            ca, "bcache_allocator");
 552        if (IS_ERR(k))
 553                return PTR_ERR(k);
 554
 555        ca->alloc_thread = k;
 556        return 0;
 557}
 558
 559void bch_cache_allocator_exit(struct cache *ca)
 560{
 561        struct discard *d;
 562
 563        while (!list_empty(&ca->discards)) {
 564                d = list_first_entry(&ca->discards, struct discard, list);
 565                cancel_work_sync(&d->work);
 566                list_del(&d->list);
 567                kfree(d);
 568        }
 569}
 570
 571int bch_cache_allocator_init(struct cache *ca)
 572{
 573        unsigned i;
 574
 575        /*
 576         * Reserve:
 577         * Prio/gen writes first
 578         * Then 8 for btree allocations
 579         * Then half for the moving garbage collector
 580         */
 581
 582        ca->watermark[WATERMARK_PRIO] = 0;
 583
 584        ca->watermark[WATERMARK_METADATA] = prio_buckets(ca);
 585
 586        ca->watermark[WATERMARK_MOVINGGC] = 8 +
 587                ca->watermark[WATERMARK_METADATA];
 588
 589        ca->watermark[WATERMARK_NONE] = ca->free.size / 2 +
 590                ca->watermark[WATERMARK_MOVINGGC];
 591
 592        for (i = 0; i < MAX_IN_FLIGHT_DISCARDS; i++) {
 593                struct discard *d = kzalloc(sizeof(*d), GFP_KERNEL);
 594                if (!d)
 595                        return -ENOMEM;
 596
 597                d->ca = ca;
 598                INIT_WORK(&d->work, discard_finish);
 599                list_add(&d->list, &ca->discards);
 600        }
 601
 602        return 0;
 603}
 604