linux/drivers/md/bcache/alloc.c
<<
>>
Prefs
   1/*
   2 * Primary bucket allocation code
   3 *
   4 * Copyright 2012 Google, Inc.
   5 *
   6 * Allocation in bcache is done in terms of buckets:
   7 *
   8 * Each bucket has associated an 8 bit gen; this gen corresponds to the gen in
   9 * btree pointers - they must match for the pointer to be considered valid.
  10 *
  11 * Thus (assuming a bucket has no dirty data or metadata in it) we can reuse a
  12 * bucket simply by incrementing its gen.
  13 *
  14 * The gens (along with the priorities; it's really the gens are important but
  15 * the code is named as if it's the priorities) are written in an arbitrary list
  16 * of buckets on disk, with a pointer to them in the journal header.
  17 *
  18 * When we invalidate a bucket, we have to write its new gen to disk and wait
  19 * for that write to complete before we use it - otherwise after a crash we
  20 * could have pointers that appeared to be good but pointed to data that had
  21 * been overwritten.
  22 *
  23 * Since the gens and priorities are all stored contiguously on disk, we can
  24 * batch this up: We fill up the free_inc list with freshly invalidated buckets,
  25 * call prio_write(), and when prio_write() finishes we pull buckets off the
  26 * free_inc list and optionally discard them.
  27 *
  28 * free_inc isn't the only freelist - if it was, we'd often to sleep while
  29 * priorities and gens were being written before we could allocate. c->free is a
  30 * smaller freelist, and buckets on that list are always ready to be used.
  31 *
  32 * If we've got discards enabled, that happens when a bucket moves from the
  33 * free_inc list to the free list.
  34 *
  35 * There is another freelist, because sometimes we have buckets that we know
  36 * have nothing pointing into them - these we can reuse without waiting for
  37 * priorities to be rewritten. These come from freed btree nodes and buckets
  38 * that garbage collection discovered no longer had valid keys pointing into
  39 * them (because they were overwritten). That's the unused list - buckets on the
  40 * unused list move to the free list, optionally being discarded in the process.
  41 *
  42 * It's also important to ensure that gens don't wrap around - with respect to
  43 * either the oldest gen in the btree or the gen on disk. This is quite
  44 * difficult to do in practice, but we explicitly guard against it anyways - if
  45 * a bucket is in danger of wrapping around we simply skip invalidating it that
  46 * time around, and we garbage collect or rewrite the priorities sooner than we
  47 * would have otherwise.
  48 *
  49 * bch_bucket_alloc() allocates a single bucket from a specific cache.
  50 *
  51 * bch_bucket_alloc_set() allocates one or more buckets from different caches
  52 * out of a cache set.
  53 *
  54 * free_some_buckets() drives all the processes described above. It's called
  55 * from bch_bucket_alloc() and a few other places that need to make sure free
  56 * buckets are ready.
  57 *
  58 * invalidate_buckets_(lru|fifo)() find buckets that are available to be
  59 * invalidated, and then invalidate them and stick them on the free_inc list -
  60 * in either lru or fifo order.
  61 */
  62
  63#include "bcache.h"
  64#include "btree.h"
  65
  66#include <linux/blkdev.h>
  67#include <linux/kthread.h>
  68#include <linux/random.h>
  69#include <trace/events/bcache.h>
  70
  71/* Bucket heap / gen */
  72
  73uint8_t bch_inc_gen(struct cache *ca, struct bucket *b)
  74{
  75        uint8_t ret = ++b->gen;
  76
  77        ca->set->need_gc = max(ca->set->need_gc, bucket_gc_gen(b));
  78        WARN_ON_ONCE(ca->set->need_gc > BUCKET_GC_GEN_MAX);
  79
  80        return ret;
  81}
  82
  83void bch_rescale_priorities(struct cache_set *c, int sectors)
  84{
  85        struct cache *ca;
  86        struct bucket *b;
  87        unsigned next = c->nbuckets * c->sb.bucket_size / 1024;
  88        unsigned i;
  89        int r;
  90
  91        atomic_sub(sectors, &c->rescale);
  92
  93        do {
  94                r = atomic_read(&c->rescale);
  95
  96                if (r >= 0)
  97                        return;
  98        } while (atomic_cmpxchg(&c->rescale, r, r + next) != r);
  99
 100        mutex_lock(&c->bucket_lock);
 101
 102        c->min_prio = USHRT_MAX;
 103
 104        for_each_cache(ca, c, i)
 105                for_each_bucket(b, ca)
 106                        if (b->prio &&
 107                            b->prio != BTREE_PRIO &&
 108                            !atomic_read(&b->pin)) {
 109                                b->prio--;
 110                                c->min_prio = min(c->min_prio, b->prio);
 111                        }
 112
 113        mutex_unlock(&c->bucket_lock);
 114}
 115
 116/*
 117 * Background allocation thread: scans for buckets to be invalidated,
 118 * invalidates them, rewrites prios/gens (marking them as invalidated on disk),
 119 * then optionally issues discard commands to the newly free buckets, then puts
 120 * them on the various freelists.
 121 */
 122
 123static inline bool can_inc_bucket_gen(struct bucket *b)
 124{
 125        return bucket_gc_gen(b) < BUCKET_GC_GEN_MAX;
 126}
 127
 128bool bch_can_invalidate_bucket(struct cache *ca, struct bucket *b)
 129{
 130        BUG_ON(!ca->set->gc_mark_valid);
 131
 132        return (!GC_MARK(b) ||
 133                GC_MARK(b) == GC_MARK_RECLAIMABLE) &&
 134                !atomic_read(&b->pin) &&
 135                can_inc_bucket_gen(b);
 136}
 137
 138void __bch_invalidate_one_bucket(struct cache *ca, struct bucket *b)
 139{
 140        lockdep_assert_held(&ca->set->bucket_lock);
 141        BUG_ON(GC_MARK(b) && GC_MARK(b) != GC_MARK_RECLAIMABLE);
 142
 143        if (GC_SECTORS_USED(b))
 144                trace_bcache_invalidate(ca, b - ca->buckets);
 145
 146        bch_inc_gen(ca, b);
 147        b->prio = INITIAL_PRIO;
 148        atomic_inc(&b->pin);
 149}
 150
 151static void bch_invalidate_one_bucket(struct cache *ca, struct bucket *b)
 152{
 153        __bch_invalidate_one_bucket(ca, b);
 154
 155        fifo_push(&ca->free_inc, b - ca->buckets);
 156}
 157
 158/*
 159 * Determines what order we're going to reuse buckets, smallest bucket_prio()
 160 * first: we also take into account the number of sectors of live data in that
 161 * bucket, and in order for that multiply to make sense we have to scale bucket
 162 *
 163 * Thus, we scale the bucket priorities so that the bucket with the smallest
 164 * prio is worth 1/8th of what INITIAL_PRIO is worth.
 165 */
 166
 167#define bucket_prio(b)                                                  \
 168({                                                                      \
 169        unsigned min_prio = (INITIAL_PRIO - ca->set->min_prio) / 8;     \
 170                                                                        \
 171        (b->prio - ca->set->min_prio + min_prio) * GC_SECTORS_USED(b);  \
 172})
 173
 174#define bucket_max_cmp(l, r)    (bucket_prio(l) < bucket_prio(r))
 175#define bucket_min_cmp(l, r)    (bucket_prio(l) > bucket_prio(r))
 176
 177static void invalidate_buckets_lru(struct cache *ca)
 178{
 179        struct bucket *b;
 180        ssize_t i;
 181
 182        ca->heap.used = 0;
 183
 184        for_each_bucket(b, ca) {
 185                if (!bch_can_invalidate_bucket(ca, b))
 186                        continue;
 187
 188                if (!heap_full(&ca->heap))
 189                        heap_add(&ca->heap, b, bucket_max_cmp);
 190                else if (bucket_max_cmp(b, heap_peek(&ca->heap))) {
 191                        ca->heap.data[0] = b;
 192                        heap_sift(&ca->heap, 0, bucket_max_cmp);
 193                }
 194        }
 195
 196        for (i = ca->heap.used / 2 - 1; i >= 0; --i)
 197                heap_sift(&ca->heap, i, bucket_min_cmp);
 198
 199        while (!fifo_full(&ca->free_inc)) {
 200                if (!heap_pop(&ca->heap, b, bucket_min_cmp)) {
 201                        /*
 202                         * We don't want to be calling invalidate_buckets()
 203                         * multiple times when it can't do anything
 204                         */
 205                        ca->invalidate_needs_gc = 1;
 206                        wake_up_gc(ca->set);
 207                        return;
 208                }
 209
 210                bch_invalidate_one_bucket(ca, b);
 211        }
 212}
 213
 214static void invalidate_buckets_fifo(struct cache *ca)
 215{
 216        struct bucket *b;
 217        size_t checked = 0;
 218
 219        while (!fifo_full(&ca->free_inc)) {
 220                if (ca->fifo_last_bucket <  ca->sb.first_bucket ||
 221                    ca->fifo_last_bucket >= ca->sb.nbuckets)
 222                        ca->fifo_last_bucket = ca->sb.first_bucket;
 223
 224                b = ca->buckets + ca->fifo_last_bucket++;
 225
 226                if (bch_can_invalidate_bucket(ca, b))
 227                        bch_invalidate_one_bucket(ca, b);
 228
 229                if (++checked >= ca->sb.nbuckets) {
 230                        ca->invalidate_needs_gc = 1;
 231                        wake_up_gc(ca->set);
 232                        return;
 233                }
 234        }
 235}
 236
 237static void invalidate_buckets_random(struct cache *ca)
 238{
 239        struct bucket *b;
 240        size_t checked = 0;
 241
 242        while (!fifo_full(&ca->free_inc)) {
 243                size_t n;
 244                get_random_bytes(&n, sizeof(n));
 245
 246                n %= (size_t) (ca->sb.nbuckets - ca->sb.first_bucket);
 247                n += ca->sb.first_bucket;
 248
 249                b = ca->buckets + n;
 250
 251                if (bch_can_invalidate_bucket(ca, b))
 252                        bch_invalidate_one_bucket(ca, b);
 253
 254                if (++checked >= ca->sb.nbuckets / 2) {
 255                        ca->invalidate_needs_gc = 1;
 256                        wake_up_gc(ca->set);
 257                        return;
 258                }
 259        }
 260}
 261
 262static void invalidate_buckets(struct cache *ca)
 263{
 264        BUG_ON(ca->invalidate_needs_gc);
 265
 266        switch (CACHE_REPLACEMENT(&ca->sb)) {
 267        case CACHE_REPLACEMENT_LRU:
 268                invalidate_buckets_lru(ca);
 269                break;
 270        case CACHE_REPLACEMENT_FIFO:
 271                invalidate_buckets_fifo(ca);
 272                break;
 273        case CACHE_REPLACEMENT_RANDOM:
 274                invalidate_buckets_random(ca);
 275                break;
 276        }
 277}
 278
 279#define allocator_wait(ca, cond)                                        \
 280do {                                                                    \
 281        while (1) {                                                     \
 282                set_current_state(TASK_INTERRUPTIBLE);                  \
 283                if (cond)                                               \
 284                        break;                                          \
 285                                                                        \
 286                mutex_unlock(&(ca)->set->bucket_lock);                  \
 287                if (kthread_should_stop())                              \
 288                        return 0;                                       \
 289                                                                        \
 290                schedule();                                             \
 291                mutex_lock(&(ca)->set->bucket_lock);                    \
 292        }                                                               \
 293        __set_current_state(TASK_RUNNING);                              \
 294} while (0)
 295
 296static int bch_allocator_push(struct cache *ca, long bucket)
 297{
 298        unsigned i;
 299
 300        /* Prios/gens are actually the most important reserve */
 301        if (fifo_push(&ca->free[RESERVE_PRIO], bucket))
 302                return true;
 303
 304        for (i = 0; i < RESERVE_NR; i++)
 305                if (fifo_push(&ca->free[i], bucket))
 306                        return true;
 307
 308        return false;
 309}
 310
 311static int bch_allocator_thread(void *arg)
 312{
 313        struct cache *ca = arg;
 314
 315        mutex_lock(&ca->set->bucket_lock);
 316
 317        while (1) {
 318                /*
 319                 * First, we pull buckets off of the unused and free_inc lists,
 320                 * possibly issue discards to them, then we add the bucket to
 321                 * the free list:
 322                 */
 323                while (!fifo_empty(&ca->free_inc)) {
 324                        long bucket;
 325
 326                        fifo_pop(&ca->free_inc, bucket);
 327
 328                        if (ca->discard) {
 329                                mutex_unlock(&ca->set->bucket_lock);
 330                                blkdev_issue_discard(ca->bdev,
 331                                        bucket_to_sector(ca->set, bucket),
 332                                        ca->sb.bucket_size, GFP_KERNEL, 0);
 333                                mutex_lock(&ca->set->bucket_lock);
 334                        }
 335
 336                        allocator_wait(ca, bch_allocator_push(ca, bucket));
 337                        wake_up(&ca->set->btree_cache_wait);
 338                        wake_up(&ca->set->bucket_wait);
 339                }
 340
 341                /*
 342                 * We've run out of free buckets, we need to find some buckets
 343                 * we can invalidate. First, invalidate them in memory and add
 344                 * them to the free_inc list:
 345                 */
 346
 347retry_invalidate:
 348                allocator_wait(ca, ca->set->gc_mark_valid &&
 349                               !ca->invalidate_needs_gc);
 350                invalidate_buckets(ca);
 351
 352                /*
 353                 * Now, we write their new gens to disk so we can start writing
 354                 * new stuff to them:
 355                 */
 356                allocator_wait(ca, !atomic_read(&ca->set->prio_blocked));
 357                if (CACHE_SYNC(&ca->set->sb)) {
 358                        /*
 359                         * This could deadlock if an allocation with a btree
 360                         * node locked ever blocked - having the btree node
 361                         * locked would block garbage collection, but here we're
 362                         * waiting on garbage collection before we invalidate
 363                         * and free anything.
 364                         *
 365                         * But this should be safe since the btree code always
 366                         * uses btree_check_reserve() before allocating now, and
 367                         * if it fails it blocks without btree nodes locked.
 368                         */
 369                        if (!fifo_full(&ca->free_inc))
 370                                goto retry_invalidate;
 371
 372                        bch_prio_write(ca);
 373                }
 374        }
 375}
 376
 377/* Allocation */
 378
 379long bch_bucket_alloc(struct cache *ca, unsigned reserve, bool wait)
 380{
 381        DEFINE_WAIT(w);
 382        struct bucket *b;
 383        long r;
 384
 385        /* fastpath */
 386        if (fifo_pop(&ca->free[RESERVE_NONE], r) ||
 387            fifo_pop(&ca->free[reserve], r))
 388                goto out;
 389
 390        if (!wait) {
 391                trace_bcache_alloc_fail(ca, reserve);
 392                return -1;
 393        }
 394
 395        do {
 396                prepare_to_wait(&ca->set->bucket_wait, &w,
 397                                TASK_UNINTERRUPTIBLE);
 398
 399                mutex_unlock(&ca->set->bucket_lock);
 400                schedule();
 401                mutex_lock(&ca->set->bucket_lock);
 402        } while (!fifo_pop(&ca->free[RESERVE_NONE], r) &&
 403                 !fifo_pop(&ca->free[reserve], r));
 404
 405        finish_wait(&ca->set->bucket_wait, &w);
 406out:
 407        wake_up_process(ca->alloc_thread);
 408
 409        trace_bcache_alloc(ca, reserve);
 410
 411        if (expensive_debug_checks(ca->set)) {
 412                size_t iter;
 413                long i;
 414                unsigned j;
 415
 416                for (iter = 0; iter < prio_buckets(ca) * 2; iter++)
 417                        BUG_ON(ca->prio_buckets[iter] == (uint64_t) r);
 418
 419                for (j = 0; j < RESERVE_NR; j++)
 420                        fifo_for_each(i, &ca->free[j], iter)
 421                                BUG_ON(i == r);
 422                fifo_for_each(i, &ca->free_inc, iter)
 423                        BUG_ON(i == r);
 424        }
 425
 426        b = ca->buckets + r;
 427
 428        BUG_ON(atomic_read(&b->pin) != 1);
 429
 430        SET_GC_SECTORS_USED(b, ca->sb.bucket_size);
 431
 432        if (reserve <= RESERVE_PRIO) {
 433                SET_GC_MARK(b, GC_MARK_METADATA);
 434                SET_GC_MOVE(b, 0);
 435                b->prio = BTREE_PRIO;
 436        } else {
 437                SET_GC_MARK(b, GC_MARK_RECLAIMABLE);
 438                SET_GC_MOVE(b, 0);
 439                b->prio = INITIAL_PRIO;
 440        }
 441
 442        return r;
 443}
 444
 445void __bch_bucket_free(struct cache *ca, struct bucket *b)
 446{
 447        SET_GC_MARK(b, 0);
 448        SET_GC_SECTORS_USED(b, 0);
 449}
 450
 451void bch_bucket_free(struct cache_set *c, struct bkey *k)
 452{
 453        unsigned i;
 454
 455        for (i = 0; i < KEY_PTRS(k); i++)
 456                __bch_bucket_free(PTR_CACHE(c, k, i),
 457                                  PTR_BUCKET(c, k, i));
 458}
 459
 460int __bch_bucket_alloc_set(struct cache_set *c, unsigned reserve,
 461                           struct bkey *k, int n, bool wait)
 462{
 463        int i;
 464
 465        lockdep_assert_held(&c->bucket_lock);
 466        BUG_ON(!n || n > c->caches_loaded || n > 8);
 467
 468        bkey_init(k);
 469
 470        /* sort by free space/prio of oldest data in caches */
 471
 472        for (i = 0; i < n; i++) {
 473                struct cache *ca = c->cache_by_alloc[i];
 474                long b = bch_bucket_alloc(ca, reserve, wait);
 475
 476                if (b == -1)
 477                        goto err;
 478
 479                k->ptr[i] = PTR(ca->buckets[b].gen,
 480                                bucket_to_sector(c, b),
 481                                ca->sb.nr_this_dev);
 482
 483                SET_KEY_PTRS(k, i + 1);
 484        }
 485
 486        return 0;
 487err:
 488        bch_bucket_free(c, k);
 489        bkey_put(c, k);
 490        return -1;
 491}
 492
 493int bch_bucket_alloc_set(struct cache_set *c, unsigned reserve,
 494                         struct bkey *k, int n, bool wait)
 495{
 496        int ret;
 497        mutex_lock(&c->bucket_lock);
 498        ret = __bch_bucket_alloc_set(c, reserve, k, n, wait);
 499        mutex_unlock(&c->bucket_lock);
 500        return ret;
 501}
 502
 503/* Sector allocator */
 504
 505struct open_bucket {
 506        struct list_head        list;
 507        unsigned                last_write_point;
 508        unsigned                sectors_free;
 509        BKEY_PADDED(key);
 510};
 511
 512/*
 513 * We keep multiple buckets open for writes, and try to segregate different
 514 * write streams for better cache utilization: first we look for a bucket where
 515 * the last write to it was sequential with the current write, and failing that
 516 * we look for a bucket that was last used by the same task.
 517 *
 518 * The ideas is if you've got multiple tasks pulling data into the cache at the
 519 * same time, you'll get better cache utilization if you try to segregate their
 520 * data and preserve locality.
 521 *
 522 * For example, say you've starting Firefox at the same time you're copying a
 523 * bunch of files. Firefox will likely end up being fairly hot and stay in the
 524 * cache awhile, but the data you copied might not be; if you wrote all that
 525 * data to the same buckets it'd get invalidated at the same time.
 526 *
 527 * Both of those tasks will be doing fairly random IO so we can't rely on
 528 * detecting sequential IO to segregate their data, but going off of the task
 529 * should be a sane heuristic.
 530 */
 531static struct open_bucket *pick_data_bucket(struct cache_set *c,
 532                                            const struct bkey *search,
 533                                            unsigned write_point,
 534                                            struct bkey *alloc)
 535{
 536        struct open_bucket *ret, *ret_task = NULL;
 537
 538        list_for_each_entry_reverse(ret, &c->data_buckets, list)
 539                if (!bkey_cmp(&ret->key, search))
 540                        goto found;
 541                else if (ret->last_write_point == write_point)
 542                        ret_task = ret;
 543
 544        ret = ret_task ?: list_first_entry(&c->data_buckets,
 545                                           struct open_bucket, list);
 546found:
 547        if (!ret->sectors_free && KEY_PTRS(alloc)) {
 548                ret->sectors_free = c->sb.bucket_size;
 549                bkey_copy(&ret->key, alloc);
 550                bkey_init(alloc);
 551        }
 552
 553        if (!ret->sectors_free)
 554                ret = NULL;
 555
 556        return ret;
 557}
 558
 559/*
 560 * Allocates some space in the cache to write to, and k to point to the newly
 561 * allocated space, and updates KEY_SIZE(k) and KEY_OFFSET(k) (to point to the
 562 * end of the newly allocated space).
 563 *
 564 * May allocate fewer sectors than @sectors, KEY_SIZE(k) indicates how many
 565 * sectors were actually allocated.
 566 *
 567 * If s->writeback is true, will not fail.
 568 */
 569bool bch_alloc_sectors(struct cache_set *c, struct bkey *k, unsigned sectors,
 570                       unsigned write_point, unsigned write_prio, bool wait)
 571{
 572        struct open_bucket *b;
 573        BKEY_PADDED(key) alloc;
 574        unsigned i;
 575
 576        /*
 577         * We might have to allocate a new bucket, which we can't do with a
 578         * spinlock held. So if we have to allocate, we drop the lock, allocate
 579         * and then retry. KEY_PTRS() indicates whether alloc points to
 580         * allocated bucket(s).
 581         */
 582
 583        bkey_init(&alloc.key);
 584        spin_lock(&c->data_bucket_lock);
 585
 586        while (!(b = pick_data_bucket(c, k, write_point, &alloc.key))) {
 587                unsigned watermark = write_prio
 588                        ? RESERVE_MOVINGGC
 589                        : RESERVE_NONE;
 590
 591                spin_unlock(&c->data_bucket_lock);
 592
 593                if (bch_bucket_alloc_set(c, watermark, &alloc.key, 1, wait))
 594                        return false;
 595
 596                spin_lock(&c->data_bucket_lock);
 597        }
 598
 599        /*
 600         * If we had to allocate, we might race and not need to allocate the
 601         * second time we call find_data_bucket(). If we allocated a bucket but
 602         * didn't use it, drop the refcount bch_bucket_alloc_set() took:
 603         */
 604        if (KEY_PTRS(&alloc.key))
 605                bkey_put(c, &alloc.key);
 606
 607        for (i = 0; i < KEY_PTRS(&b->key); i++)
 608                EBUG_ON(ptr_stale(c, &b->key, i));
 609
 610        /* Set up the pointer to the space we're allocating: */
 611
 612        for (i = 0; i < KEY_PTRS(&b->key); i++)
 613                k->ptr[i] = b->key.ptr[i];
 614
 615        sectors = min(sectors, b->sectors_free);
 616
 617        SET_KEY_OFFSET(k, KEY_OFFSET(k) + sectors);
 618        SET_KEY_SIZE(k, sectors);
 619        SET_KEY_PTRS(k, KEY_PTRS(&b->key));
 620
 621        /*
 622         * Move b to the end of the lru, and keep track of what this bucket was
 623         * last used for:
 624         */
 625        list_move_tail(&b->list, &c->data_buckets);
 626        bkey_copy_key(&b->key, k);
 627        b->last_write_point = write_point;
 628
 629        b->sectors_free -= sectors;
 630
 631        for (i = 0; i < KEY_PTRS(&b->key); i++) {
 632                SET_PTR_OFFSET(&b->key, i, PTR_OFFSET(&b->key, i) + sectors);
 633
 634                atomic_long_add(sectors,
 635                                &PTR_CACHE(c, &b->key, i)->sectors_written);
 636        }
 637
 638        if (b->sectors_free < c->sb.block_size)
 639                b->sectors_free = 0;
 640
 641        /*
 642         * k takes refcounts on the buckets it points to until it's inserted
 643         * into the btree, but if we're done with this bucket we just transfer
 644         * get_data_bucket()'s refcount.
 645         */
 646        if (b->sectors_free)
 647                for (i = 0; i < KEY_PTRS(&b->key); i++)
 648                        atomic_inc(&PTR_BUCKET(c, &b->key, i)->pin);
 649
 650        spin_unlock(&c->data_bucket_lock);
 651        return true;
 652}
 653
 654/* Init */
 655
 656void bch_open_buckets_free(struct cache_set *c)
 657{
 658        struct open_bucket *b;
 659
 660        while (!list_empty(&c->data_buckets)) {
 661                b = list_first_entry(&c->data_buckets,
 662                                     struct open_bucket, list);
 663                list_del(&b->list);
 664                kfree(b);
 665        }
 666}
 667
 668int bch_open_buckets_alloc(struct cache_set *c)
 669{
 670        int i;
 671
 672        spin_lock_init(&c->data_bucket_lock);
 673
 674        for (i = 0; i < 6; i++) {
 675                struct open_bucket *b = kzalloc(sizeof(*b), GFP_KERNEL);
 676                if (!b)
 677                        return -ENOMEM;
 678
 679                list_add(&b->list, &c->data_buckets);
 680        }
 681
 682        return 0;
 683}
 684
 685int bch_cache_allocator_start(struct cache *ca)
 686{
 687        struct task_struct *k = kthread_run(bch_allocator_thread,
 688                                            ca, "bcache_allocator");
 689        if (IS_ERR(k))
 690                return PTR_ERR(k);
 691
 692        ca->alloc_thread = k;
 693        return 0;
 694}
 695