1#ifndef _BCACHE_H
2#define _BCACHE_H
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178#define pr_fmt(fmt) "bcache: %s() " fmt "\n", __func__
179
180#include <linux/bio.h>
181#include <linux/kobject.h>
182#include <linux/list.h>
183#include <linux/mutex.h>
184#include <linux/rbtree.h>
185#include <linux/rwsem.h>
186#include <linux/types.h>
187#include <linux/workqueue.h>
188
189#include "util.h"
190#include "closure.h"
191
192struct bucket {
193 atomic_t pin;
194 uint16_t prio;
195 uint8_t gen;
196 uint8_t disk_gen;
197 uint8_t last_gc;
198 uint8_t gc_gen;
199 uint16_t gc_mark;
200};
201
202
203
204
205
206
207BITMASK(GC_MARK, struct bucket, gc_mark, 0, 2);
208#define GC_MARK_RECLAIMABLE 0
209#define GC_MARK_DIRTY 1
210#define GC_MARK_METADATA 2
211BITMASK(GC_SECTORS_USED, struct bucket, gc_mark, 2, 14);
212
213struct bkey {
214 uint64_t high;
215 uint64_t low;
216 uint64_t ptr[];
217};
218
219
220#define BKEY_PAD 8
221
222#define BKEY_PADDED(key) \
223 union { struct bkey key; uint64_t key ## _pad[BKEY_PAD]; }
224
225
226
227
228
229
230
231#define BCACHE_SB_VERSION_CDEV 0
232#define BCACHE_SB_VERSION_BDEV 1
233#define BCACHE_SB_VERSION_CDEV_WITH_UUID 3
234#define BCACHE_SB_VERSION_BDEV_WITH_OFFSET 4
235#define BCACHE_SB_MAX_VERSION 4
236
237#define SB_SECTOR 8
238#define SB_SIZE 4096
239#define SB_LABEL_SIZE 32
240#define SB_JOURNAL_BUCKETS 256U
241
242#define MAX_CACHES_PER_SET 8
243
244#define BDEV_DATA_START_DEFAULT 16
245
246struct cache_sb {
247 uint64_t csum;
248 uint64_t offset;
249 uint64_t version;
250
251 uint8_t magic[16];
252
253 uint8_t uuid[16];
254 union {
255 uint8_t set_uuid[16];
256 uint64_t set_magic;
257 };
258 uint8_t label[SB_LABEL_SIZE];
259
260 uint64_t flags;
261 uint64_t seq;
262 uint64_t pad[8];
263
264 union {
265 struct {
266
267 uint64_t nbuckets;
268
269 uint16_t block_size;
270 uint16_t bucket_size;
271
272 uint16_t nr_in_set;
273 uint16_t nr_this_dev;
274 };
275 struct {
276
277 uint64_t data_offset;
278
279
280
281
282
283
284 };
285 };
286
287 uint32_t last_mount;
288
289 uint16_t first_bucket;
290 union {
291 uint16_t njournal_buckets;
292 uint16_t keys;
293 };
294 uint64_t d[SB_JOURNAL_BUCKETS];
295};
296
297BITMASK(CACHE_SYNC, struct cache_sb, flags, 0, 1);
298BITMASK(CACHE_DISCARD, struct cache_sb, flags, 1, 1);
299BITMASK(CACHE_REPLACEMENT, struct cache_sb, flags, 2, 3);
300#define CACHE_REPLACEMENT_LRU 0U
301#define CACHE_REPLACEMENT_FIFO 1U
302#define CACHE_REPLACEMENT_RANDOM 2U
303
304BITMASK(BDEV_CACHE_MODE, struct cache_sb, flags, 0, 4);
305#define CACHE_MODE_WRITETHROUGH 0U
306#define CACHE_MODE_WRITEBACK 1U
307#define CACHE_MODE_WRITEAROUND 2U
308#define CACHE_MODE_NONE 3U
309BITMASK(BDEV_STATE, struct cache_sb, flags, 61, 2);
310#define BDEV_STATE_NONE 0U
311#define BDEV_STATE_CLEAN 1U
312#define BDEV_STATE_DIRTY 2U
313#define BDEV_STATE_STALE 3U
314
315
316
317#define BCACHE_BSET_VERSION 1
318
319
320
321
322
323struct bset {
324 uint64_t csum;
325 uint64_t magic;
326 uint64_t seq;
327 uint32_t version;
328 uint32_t keys;
329
330 union {
331 struct bkey start[0];
332 uint64_t d[0];
333 };
334};
335
336
337
338
339
340struct prio_set {
341 uint64_t csum;
342 uint64_t magic;
343 uint64_t seq;
344 uint32_t version;
345 uint32_t pad;
346
347 uint64_t next_bucket;
348
349 struct bucket_disk {
350 uint16_t prio;
351 uint8_t gen;
352 } __attribute((packed)) data[];
353};
354
355struct uuid_entry {
356 union {
357 struct {
358 uint8_t uuid[16];
359 uint8_t label[32];
360 uint32_t first_reg;
361 uint32_t last_reg;
362 uint32_t invalidated;
363
364 uint32_t flags;
365
366 uint64_t sectors;
367 };
368
369 uint8_t pad[128];
370 };
371};
372
373BITMASK(UUID_FLASH_ONLY, struct uuid_entry, flags, 0, 1);
374
375#include "journal.h"
376#include "stats.h"
377struct search;
378struct btree;
379struct keybuf;
380
381struct keybuf_key {
382 struct rb_node node;
383 BKEY_PADDED(key);
384 void *private;
385};
386
387typedef bool (keybuf_pred_fn)(struct keybuf *, struct bkey *);
388
389struct keybuf {
390 struct bkey last_scanned;
391 spinlock_t lock;
392
393
394
395
396
397
398 struct bkey start;
399 struct bkey end;
400
401 struct rb_root keys;
402
403#define KEYBUF_NR 100
404 DECLARE_ARRAY_ALLOCATOR(struct keybuf_key, freelist, KEYBUF_NR);
405};
406
407struct bio_split_pool {
408 struct bio_set *bio_split;
409 mempool_t *bio_split_hook;
410};
411
412struct bio_split_hook {
413 struct closure cl;
414 struct bio_split_pool *p;
415 struct bio *bio;
416 bio_end_io_t *bi_end_io;
417 void *bi_private;
418};
419
420struct bcache_device {
421 struct closure cl;
422
423 struct kobject kobj;
424
425 struct cache_set *c;
426 unsigned id;
427#define BCACHEDEVNAME_SIZE 12
428 char name[BCACHEDEVNAME_SIZE];
429
430 struct gendisk *disk;
431
432
433 atomic_t closing;
434
435
436 atomic_t detaching;
437 int flush_done;
438
439 uint64_t nr_stripes;
440 unsigned stripe_size_bits;
441 atomic_t *stripe_sectors_dirty;
442
443 unsigned long sectors_dirty_last;
444 long sectors_dirty_derivative;
445
446 mempool_t *unaligned_bvec;
447 struct bio_set *bio_split;
448
449 unsigned data_csum:1;
450
451 int (*cache_miss)(struct btree *, struct search *,
452 struct bio *, unsigned);
453 int (*ioctl) (struct bcache_device *, fmode_t, unsigned, unsigned long);
454
455 struct bio_split_pool bio_split_hook;
456};
457
458struct io {
459
460 struct hlist_node hash;
461 struct list_head lru;
462
463 unsigned long jiffies;
464 unsigned sequential;
465 sector_t last;
466};
467
468struct cached_dev {
469 struct list_head list;
470 struct bcache_device disk;
471 struct block_device *bdev;
472
473 struct cache_sb sb;
474 struct bio sb_bio;
475 struct bio_vec sb_bv[1];
476 struct closure_with_waitlist sb_write;
477
478
479 atomic_t count;
480 struct work_struct detach;
481
482
483
484
485
486 atomic_t running;
487
488
489
490
491
492 struct rw_semaphore writeback_lock;
493
494
495
496
497
498
499 atomic_t has_dirty;
500
501 struct ratelimit writeback_rate;
502 struct delayed_work writeback_rate_update;
503
504
505
506
507
508 sector_t last_read;
509
510
511 atomic_t in_flight;
512 struct closure_with_timer writeback;
513 struct closure_waitlist writeback_wait;
514
515 struct keybuf writeback_keys;
516
517
518#define RECENT_IO_BITS 7
519#define RECENT_IO (1 << RECENT_IO_BITS)
520 struct io io[RECENT_IO];
521 struct hlist_head io_hash[RECENT_IO + 1];
522 struct list_head io_lru;
523 spinlock_t io_lock;
524
525 struct cache_accounting accounting;
526
527
528 unsigned sequential_cutoff;
529 unsigned readahead;
530
531 unsigned sequential_merge:1;
532 unsigned verify:1;
533
534 unsigned partial_stripes_expensive:1;
535 unsigned writeback_metadata:1;
536 unsigned writeback_running:1;
537 unsigned char writeback_percent;
538 unsigned writeback_delay;
539
540 int writeback_rate_change;
541 int64_t writeback_rate_derivative;
542 uint64_t writeback_rate_target;
543
544 unsigned writeback_rate_update_seconds;
545 unsigned writeback_rate_d_term;
546 unsigned writeback_rate_p_term_inverse;
547 unsigned writeback_rate_d_smooth;
548};
549
550enum alloc_watermarks {
551 WATERMARK_PRIO,
552 WATERMARK_METADATA,
553 WATERMARK_MOVINGGC,
554 WATERMARK_NONE,
555 WATERMARK_MAX
556};
557
558struct cache {
559 struct cache_set *set;
560 struct cache_sb sb;
561 struct bio sb_bio;
562 struct bio_vec sb_bv[1];
563
564 struct kobject kobj;
565 struct block_device *bdev;
566
567 unsigned watermark[WATERMARK_MAX];
568
569 struct task_struct *alloc_thread;
570
571 struct closure prio;
572 struct prio_set *disk_buckets;
573
574
575
576
577
578
579
580
581 uint64_t *prio_buckets;
582 uint64_t *prio_last_buckets;
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597 DECLARE_FIFO(long, free);
598 DECLARE_FIFO(long, free_inc);
599 DECLARE_FIFO(long, unused);
600
601 size_t fifo_last_bucket;
602
603
604 struct bucket *buckets;
605
606 DECLARE_HEAP(struct bucket *, heap);
607
608
609
610
611
612 uint8_t need_save_prio;
613 unsigned gc_move_threshold;
614
615
616
617
618
619
620 unsigned invalidate_needs_gc:1;
621
622 bool discard;
623
624
625
626
627
628
629
630 atomic_t discards_in_flight;
631 struct list_head discards;
632
633 struct journal_device journal;
634
635
636#define IO_ERROR_SHIFT 20
637 atomic_t io_errors;
638 atomic_t io_count;
639
640 atomic_long_t meta_sectors_written;
641 atomic_long_t btree_sectors_written;
642 atomic_long_t sectors_written;
643
644 struct bio_split_pool bio_split_hook;
645};
646
647struct gc_stat {
648 size_t nodes;
649 size_t key_bytes;
650
651 size_t nkeys;
652 uint64_t data;
653 uint64_t dirty;
654 unsigned in_use;
655};
656
657
658
659
660
661
662
663
664
665
666
667
668#define CACHE_SET_UNREGISTERING 0
669#define CACHE_SET_STOPPING 1
670
671struct cache_set {
672 struct closure cl;
673
674 struct list_head list;
675 struct kobject kobj;
676 struct kobject internal;
677 struct dentry *debug;
678 struct cache_accounting accounting;
679
680 unsigned long flags;
681
682 struct cache_sb sb;
683
684 struct cache *cache[MAX_CACHES_PER_SET];
685 struct cache *cache_by_alloc[MAX_CACHES_PER_SET];
686 int caches_loaded;
687
688 struct bcache_device **devices;
689 struct list_head cached_devs;
690 uint64_t cached_dev_sectors;
691 struct closure caching;
692
693 struct closure_with_waitlist sb_write;
694
695 mempool_t *search;
696 mempool_t *bio_meta;
697 struct bio_set *bio_split;
698
699
700 struct shrinker shrink;
701
702
703 struct mutex bucket_lock;
704
705
706 unsigned short bucket_bits;
707
708
709 unsigned short block_bits;
710
711
712
713
714
715 unsigned btree_pages;
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733 struct list_head btree_cache;
734 struct list_head btree_cache_freeable;
735 struct list_head btree_cache_freed;
736
737
738 unsigned bucket_cache_used;
739
740
741
742
743
744
745
746
747
748 struct closure *try_harder;
749 struct closure_waitlist try_wait;
750 uint64_t try_harder_start;
751
752
753
754
755
756
757
758
759
760
761
762 atomic_t prio_blocked;
763 struct closure_waitlist bucket_wait;
764
765
766
767
768
769 atomic_t rescale;
770
771
772
773
774
775
776 uint16_t min_prio;
777
778
779
780
781
782 uint8_t need_gc;
783 struct gc_stat gc_stats;
784 size_t nbuckets;
785
786 struct closure_with_waitlist gc;
787
788 struct bkey gc_done;
789
790
791
792
793
794 int gc_mark_valid;
795
796
797 atomic_t sectors_to_gc;
798
799 struct closure moving_gc;
800 struct closure_waitlist moving_gc_wait;
801 struct keybuf moving_gc_keys;
802
803 atomic_t in_flight;
804
805 struct btree *root;
806
807#ifdef CONFIG_BCACHE_DEBUG
808 struct btree *verify_data;
809 struct mutex verify_lock;
810#endif
811
812 unsigned nr_uuids;
813 struct uuid_entry *uuids;
814 BKEY_PADDED(uuid_bucket);
815 struct closure_with_waitlist uuid_write;
816
817
818
819
820
821 mempool_t *fill_iter;
822
823
824
825
826
827 struct mutex sort_lock;
828 struct bset *sort;
829 unsigned sort_crit_factor;
830
831
832 struct list_head data_buckets;
833 spinlock_t data_bucket_lock;
834
835 struct journal journal;
836
837#define CONGESTED_MAX 1024
838 unsigned congested_last_us;
839 atomic_t congested;
840
841
842 unsigned congested_read_threshold_us;
843 unsigned congested_write_threshold_us;
844
845 spinlock_t sort_time_lock;
846 struct time_stats sort_time;
847 struct time_stats btree_gc_time;
848 struct time_stats btree_split_time;
849 spinlock_t btree_read_time_lock;
850 struct time_stats btree_read_time;
851 struct time_stats try_harder_time;
852
853 atomic_long_t cache_read_races;
854 atomic_long_t writeback_keys_done;
855 atomic_long_t writeback_keys_failed;
856 unsigned error_limit;
857 unsigned error_decay;
858 unsigned short journal_delay_ms;
859 unsigned verify:1;
860 unsigned key_merging_disabled:1;
861 unsigned gc_always_rewrite:1;
862 unsigned shrinker_disabled:1;
863 unsigned copy_gc_enabled:1;
864
865#define BUCKET_HASH_BITS 12
866 struct hlist_head bucket_hash[1 << BUCKET_HASH_BITS];
867};
868
869static inline bool key_merging_disabled(struct cache_set *c)
870{
871#ifdef CONFIG_BCACHE_DEBUG
872 return c->key_merging_disabled;
873#else
874 return 0;
875#endif
876}
877
878static inline bool SB_IS_BDEV(const struct cache_sb *sb)
879{
880 return sb->version == BCACHE_SB_VERSION_BDEV
881 || sb->version == BCACHE_SB_VERSION_BDEV_WITH_OFFSET;
882}
883
884struct bbio {
885 unsigned submit_time_us;
886 union {
887 struct bkey key;
888 uint64_t _pad[3];
889
890
891
892
893 };
894 struct bio bio;
895};
896
897static inline unsigned local_clock_us(void)
898{
899 return local_clock() >> 10;
900}
901
902#define BTREE_PRIO USHRT_MAX
903#define INITIAL_PRIO 32768
904
905#define btree_bytes(c) ((c)->btree_pages * PAGE_SIZE)
906#define btree_blocks(b) \
907 ((unsigned) (KEY_SIZE(&b->key) >> (b)->c->block_bits))
908
909#define btree_default_blocks(c) \
910 ((unsigned) ((PAGE_SECTORS * (c)->btree_pages) >> (c)->block_bits))
911
912#define bucket_pages(c) ((c)->sb.bucket_size / PAGE_SECTORS)
913#define bucket_bytes(c) ((c)->sb.bucket_size << 9)
914#define block_bytes(c) ((c)->sb.block_size << 9)
915
916#define __set_bytes(i, k) (sizeof(*(i)) + (k) * sizeof(uint64_t))
917#define set_bytes(i) __set_bytes(i, i->keys)
918
919#define __set_blocks(i, k, c) DIV_ROUND_UP(__set_bytes(i, k), block_bytes(c))
920#define set_blocks(i, c) __set_blocks(i, (i)->keys, c)
921
922#define node(i, j) ((struct bkey *) ((i)->d + (j)))
923#define end(i) node(i, (i)->keys)
924
925#define index(i, b) \
926 ((size_t) (((void *) i - (void *) (b)->sets[0].data) / \
927 block_bytes(b->c)))
928
929#define btree_data_space(b) (PAGE_SIZE << (b)->page_order)
930
931#define prios_per_bucket(c) \
932 ((bucket_bytes(c) - sizeof(struct prio_set)) / \
933 sizeof(struct bucket_disk))
934#define prio_buckets(c) \
935 DIV_ROUND_UP((size_t) (c)->sb.nbuckets, prios_per_bucket(c))
936
937#define JSET_MAGIC 0x245235c1a3625032ULL
938#define PSET_MAGIC 0x6750e15f87337f91ULL
939#define BSET_MAGIC 0x90135c78b99e07f5ULL
940
941#define jset_magic(c) ((c)->sb.set_magic ^ JSET_MAGIC)
942#define pset_magic(c) ((c)->sb.set_magic ^ PSET_MAGIC)
943#define bset_magic(c) ((c)->sb.set_magic ^ BSET_MAGIC)
944
945
946
947#define KEY_FIELD(name, field, offset, size) \
948 BITMASK(name, struct bkey, field, offset, size)
949
950#define PTR_FIELD(name, offset, size) \
951 static inline uint64_t name(const struct bkey *k, unsigned i) \
952 { return (k->ptr[i] >> offset) & ~(((uint64_t) ~0) << size); } \
953 \
954 static inline void SET_##name(struct bkey *k, unsigned i, uint64_t v)\
955 { \
956 k->ptr[i] &= ~(~((uint64_t) ~0 << size) << offset); \
957 k->ptr[i] |= v << offset; \
958 }
959
960KEY_FIELD(KEY_PTRS, high, 60, 3)
961KEY_FIELD(HEADER_SIZE, high, 58, 2)
962KEY_FIELD(KEY_CSUM, high, 56, 2)
963KEY_FIELD(KEY_PINNED, high, 55, 1)
964KEY_FIELD(KEY_DIRTY, high, 36, 1)
965
966KEY_FIELD(KEY_SIZE, high, 20, 16)
967KEY_FIELD(KEY_INODE, high, 0, 20)
968
969
970
971static inline uint64_t KEY_OFFSET(const struct bkey *k)
972{
973 return k->low;
974}
975
976static inline void SET_KEY_OFFSET(struct bkey *k, uint64_t v)
977{
978 k->low = v;
979}
980
981PTR_FIELD(PTR_DEV, 51, 12)
982PTR_FIELD(PTR_OFFSET, 8, 43)
983PTR_FIELD(PTR_GEN, 0, 8)
984
985#define PTR_CHECK_DEV ((1 << 12) - 1)
986
987#define PTR(gen, offset, dev) \
988 ((((uint64_t) dev) << 51) | ((uint64_t) offset) << 8 | gen)
989
990static inline size_t sector_to_bucket(struct cache_set *c, sector_t s)
991{
992 return s >> c->bucket_bits;
993}
994
995static inline sector_t bucket_to_sector(struct cache_set *c, size_t b)
996{
997 return ((sector_t) b) << c->bucket_bits;
998}
999
1000static inline sector_t bucket_remainder(struct cache_set *c, sector_t s)
1001{
1002 return s & (c->sb.bucket_size - 1);
1003}
1004
1005static inline struct cache *PTR_CACHE(struct cache_set *c,
1006 const struct bkey *k,
1007 unsigned ptr)
1008{
1009 return c->cache[PTR_DEV(k, ptr)];
1010}
1011
1012static inline size_t PTR_BUCKET_NR(struct cache_set *c,
1013 const struct bkey *k,
1014 unsigned ptr)
1015{
1016 return sector_to_bucket(c, PTR_OFFSET(k, ptr));
1017}
1018
1019static inline struct bucket *PTR_BUCKET(struct cache_set *c,
1020 const struct bkey *k,
1021 unsigned ptr)
1022{
1023 return PTR_CACHE(c, k, ptr)->buckets + PTR_BUCKET_NR(c, k, ptr);
1024}
1025
1026
1027
1028
1029
1030
1031
1032
1033#define KEY(dev, sector, len) \
1034((struct bkey) { \
1035 .high = (1ULL << 63) | ((uint64_t) (len) << 20) | (dev), \
1036 .low = (sector) \
1037})
1038
1039static inline void bkey_init(struct bkey *k)
1040{
1041 *k = KEY(0, 0, 0);
1042}
1043
1044#define KEY_START(k) (KEY_OFFSET(k) - KEY_SIZE(k))
1045#define START_KEY(k) KEY(KEY_INODE(k), KEY_START(k), 0)
1046#define MAX_KEY KEY(~(~0 << 20), ((uint64_t) ~0) >> 1, 0)
1047#define ZERO_KEY KEY(0, 0, 0)
1048
1049
1050
1051
1052
1053#define csum_set(i) \
1054 bch_crc64(((void *) (i)) + sizeof(uint64_t), \
1055 ((void *) end(i)) - (((void *) (i)) + sizeof(uint64_t)))
1056
1057
1058
1059#define btree_bug(b, ...) \
1060do { \
1061 if (bch_cache_set_error((b)->c, __VA_ARGS__)) \
1062 dump_stack(); \
1063} while (0)
1064
1065#define cache_bug(c, ...) \
1066do { \
1067 if (bch_cache_set_error(c, __VA_ARGS__)) \
1068 dump_stack(); \
1069} while (0)
1070
1071#define btree_bug_on(cond, b, ...) \
1072do { \
1073 if (cond) \
1074 btree_bug(b, __VA_ARGS__); \
1075} while (0)
1076
1077#define cache_bug_on(cond, c, ...) \
1078do { \
1079 if (cond) \
1080 cache_bug(c, __VA_ARGS__); \
1081} while (0)
1082
1083#define cache_set_err_on(cond, c, ...) \
1084do { \
1085 if (cond) \
1086 bch_cache_set_error(c, __VA_ARGS__); \
1087} while (0)
1088
1089
1090
1091#define for_each_cache(ca, cs, iter) \
1092 for (iter = 0; ca = cs->cache[iter], iter < (cs)->sb.nr_in_set; iter++)
1093
1094#define for_each_bucket(b, ca) \
1095 for (b = (ca)->buckets + (ca)->sb.first_bucket; \
1096 b < (ca)->buckets + (ca)->sb.nbuckets; b++)
1097
1098static inline void __bkey_put(struct cache_set *c, struct bkey *k)
1099{
1100 unsigned i;
1101
1102 for (i = 0; i < KEY_PTRS(k); i++)
1103 atomic_dec_bug(&PTR_BUCKET(c, k, i)->pin);
1104}
1105
1106static inline void cached_dev_put(struct cached_dev *dc)
1107{
1108 if (atomic_dec_and_test(&dc->count))
1109 schedule_work(&dc->detach);
1110}
1111
1112static inline bool cached_dev_get(struct cached_dev *dc)
1113{
1114 if (!atomic_inc_not_zero(&dc->count))
1115 return false;
1116
1117
1118 smp_mb__after_atomic_inc();
1119 return true;
1120}
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130static inline uint8_t bucket_gc_gen(struct bucket *b)
1131{
1132 return b->gen - b->last_gc;
1133}
1134
1135static inline uint8_t bucket_disk_gen(struct bucket *b)
1136{
1137 return b->gen - b->disk_gen;
1138}
1139
1140#define BUCKET_GC_GEN_MAX 96U
1141#define BUCKET_DISK_GEN_MAX 64U
1142
1143#define kobj_attribute_write(n, fn) \
1144 static struct kobj_attribute ksysfs_##n = __ATTR(n, S_IWUSR, NULL, fn)
1145
1146#define kobj_attribute_rw(n, show, store) \
1147 static struct kobj_attribute ksysfs_##n = \
1148 __ATTR(n, S_IWUSR|S_IRUSR, show, store)
1149
1150static inline void wake_up_allocators(struct cache_set *c)
1151{
1152 struct cache *ca;
1153 unsigned i;
1154
1155 for_each_cache(ca, c, i)
1156 wake_up_process(ca->alloc_thread);
1157}
1158
1159
1160
1161void bch_count_io_errors(struct cache *, int, const char *);
1162void bch_bbio_count_io_errors(struct cache_set *, struct bio *,
1163 int, const char *);
1164void bch_bbio_endio(struct cache_set *, struct bio *, int, const char *);
1165void bch_bbio_free(struct bio *, struct cache_set *);
1166struct bio *bch_bbio_alloc(struct cache_set *);
1167
1168struct bio *bch_bio_split(struct bio *, int, gfp_t, struct bio_set *);
1169void bch_generic_make_request(struct bio *, struct bio_split_pool *);
1170void __bch_submit_bbio(struct bio *, struct cache_set *);
1171void bch_submit_bbio(struct bio *, struct cache_set *, struct bkey *, unsigned);
1172
1173uint8_t bch_inc_gen(struct cache *, struct bucket *);
1174void bch_rescale_priorities(struct cache_set *, int);
1175bool bch_bucket_add_unused(struct cache *, struct bucket *);
1176
1177long bch_bucket_alloc(struct cache *, unsigned, struct closure *);
1178void bch_bucket_free(struct cache_set *, struct bkey *);
1179
1180int __bch_bucket_alloc_set(struct cache_set *, unsigned,
1181 struct bkey *, int, struct closure *);
1182int bch_bucket_alloc_set(struct cache_set *, unsigned,
1183 struct bkey *, int, struct closure *);
1184
1185__printf(2, 3)
1186bool bch_cache_set_error(struct cache_set *, const char *, ...);
1187
1188void bch_prio_write(struct cache *);
1189void bch_write_bdev_super(struct cached_dev *, struct closure *);
1190
1191extern struct workqueue_struct *bcache_wq, *bch_gc_wq;
1192extern const char * const bch_cache_modes[];
1193extern struct mutex bch_register_lock;
1194extern struct list_head bch_cache_sets;
1195
1196extern struct kobj_type bch_cached_dev_ktype;
1197extern struct kobj_type bch_flash_dev_ktype;
1198extern struct kobj_type bch_cache_set_ktype;
1199extern struct kobj_type bch_cache_set_internal_ktype;
1200extern struct kobj_type bch_cache_ktype;
1201
1202void bch_cached_dev_release(struct kobject *);
1203void bch_flash_dev_release(struct kobject *);
1204void bch_cache_set_release(struct kobject *);
1205void bch_cache_release(struct kobject *);
1206
1207int bch_uuid_write(struct cache_set *);
1208void bcache_write_super(struct cache_set *);
1209
1210int bch_flash_dev_create(struct cache_set *c, uint64_t size);
1211
1212int bch_cached_dev_attach(struct cached_dev *, struct cache_set *);
1213void bch_cached_dev_detach(struct cached_dev *);
1214void bch_cached_dev_run(struct cached_dev *);
1215void bcache_device_stop(struct bcache_device *);
1216
1217void bch_cache_set_unregister(struct cache_set *);
1218void bch_cache_set_stop(struct cache_set *);
1219
1220struct cache_set *bch_cache_set_alloc(struct cache_sb *);
1221void bch_btree_cache_free(struct cache_set *);
1222int bch_btree_cache_alloc(struct cache_set *);
1223void bch_moving_init_cache_set(struct cache_set *);
1224
1225int bch_cache_allocator_start(struct cache *ca);
1226void bch_cache_allocator_exit(struct cache *ca);
1227int bch_cache_allocator_init(struct cache *ca);
1228
1229void bch_debug_exit(void);
1230int bch_debug_init(struct kobject *);
1231void bch_writeback_exit(void);
1232int bch_writeback_init(void);
1233void bch_request_exit(void);
1234int bch_request_init(void);
1235void bch_btree_exit(void);
1236int bch_btree_init(void);
1237
1238#endif
1239