1#ifndef _BCACHE_H
2#define _BCACHE_H
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178#define pr_fmt(fmt) "bcache: %s() " fmt "\n", __func__
179
180#include <linux/bio.h>
181#include <linux/blktrace_api.h>
182#include <linux/kobject.h>
183#include <linux/list.h>
184#include <linux/mutex.h>
185#include <linux/rbtree.h>
186#include <linux/rwsem.h>
187#include <linux/types.h>
188#include <linux/workqueue.h>
189
190#include "util.h"
191#include "closure.h"
192
193struct bucket {
194 atomic_t pin;
195 uint16_t prio;
196 uint8_t gen;
197 uint8_t disk_gen;
198 uint8_t last_gc;
199 uint8_t gc_gen;
200 uint16_t gc_mark;
201};
202
203
204
205
206
207
208BITMASK(GC_MARK, struct bucket, gc_mark, 0, 2);
209#define GC_MARK_RECLAIMABLE 0
210#define GC_MARK_DIRTY 1
211#define GC_MARK_METADATA 2
212BITMASK(GC_SECTORS_USED, struct bucket, gc_mark, 2, 14);
213
214struct bkey {
215 uint64_t high;
216 uint64_t low;
217 uint64_t ptr[];
218};
219
220
221#define BKEY_PAD 8
222
223#define BKEY_PADDED(key) \
224 union { struct bkey key; uint64_t key ## _pad[BKEY_PAD]; }
225
226
227
228
229
230
231
232#define BCACHE_SB_VERSION_CDEV 0
233#define BCACHE_SB_VERSION_BDEV 1
234#define BCACHE_SB_VERSION_CDEV_WITH_UUID 3
235#define BCACHE_SB_VERSION_BDEV_WITH_OFFSET 4
236#define BCACHE_SB_MAX_VERSION 4
237
238#define SB_SECTOR 8
239#define SB_SIZE 4096
240#define SB_LABEL_SIZE 32
241#define SB_JOURNAL_BUCKETS 256U
242
243#define MAX_CACHES_PER_SET 8
244
245#define BDEV_DATA_START_DEFAULT 16
246
247struct cache_sb {
248 uint64_t csum;
249 uint64_t offset;
250 uint64_t version;
251
252 uint8_t magic[16];
253
254 uint8_t uuid[16];
255 union {
256 uint8_t set_uuid[16];
257 uint64_t set_magic;
258 };
259 uint8_t label[SB_LABEL_SIZE];
260
261 uint64_t flags;
262 uint64_t seq;
263 uint64_t pad[8];
264
265 union {
266 struct {
267
268 uint64_t nbuckets;
269
270 uint16_t block_size;
271 uint16_t bucket_size;
272
273 uint16_t nr_in_set;
274 uint16_t nr_this_dev;
275 };
276 struct {
277
278 uint64_t data_offset;
279
280
281
282
283
284
285 };
286 };
287
288 uint32_t last_mount;
289
290 uint16_t first_bucket;
291 union {
292 uint16_t njournal_buckets;
293 uint16_t keys;
294 };
295 uint64_t d[SB_JOURNAL_BUCKETS];
296};
297
298BITMASK(CACHE_SYNC, struct cache_sb, flags, 0, 1);
299BITMASK(CACHE_DISCARD, struct cache_sb, flags, 1, 1);
300BITMASK(CACHE_REPLACEMENT, struct cache_sb, flags, 2, 3);
301#define CACHE_REPLACEMENT_LRU 0U
302#define CACHE_REPLACEMENT_FIFO 1U
303#define CACHE_REPLACEMENT_RANDOM 2U
304
305BITMASK(BDEV_CACHE_MODE, struct cache_sb, flags, 0, 4);
306#define CACHE_MODE_WRITETHROUGH 0U
307#define CACHE_MODE_WRITEBACK 1U
308#define CACHE_MODE_WRITEAROUND 2U
309#define CACHE_MODE_NONE 3U
310BITMASK(BDEV_STATE, struct cache_sb, flags, 61, 2);
311#define BDEV_STATE_NONE 0U
312#define BDEV_STATE_CLEAN 1U
313#define BDEV_STATE_DIRTY 2U
314#define BDEV_STATE_STALE 3U
315
316
317
318#define BCACHE_BSET_VERSION 1
319
320
321
322
323
324struct bset {
325 uint64_t csum;
326 uint64_t magic;
327 uint64_t seq;
328 uint32_t version;
329 uint32_t keys;
330
331 union {
332 struct bkey start[0];
333 uint64_t d[0];
334 };
335};
336
337
338
339
340
341struct prio_set {
342 uint64_t csum;
343 uint64_t magic;
344 uint64_t seq;
345 uint32_t version;
346 uint32_t pad;
347
348 uint64_t next_bucket;
349
350 struct bucket_disk {
351 uint16_t prio;
352 uint8_t gen;
353 } __attribute((packed)) data[];
354};
355
356struct uuid_entry {
357 union {
358 struct {
359 uint8_t uuid[16];
360 uint8_t label[32];
361 uint32_t first_reg;
362 uint32_t last_reg;
363 uint32_t invalidated;
364
365 uint32_t flags;
366
367 uint64_t sectors;
368 };
369
370 uint8_t pad[128];
371 };
372};
373
374BITMASK(UUID_FLASH_ONLY, struct uuid_entry, flags, 0, 1);
375
376#include "journal.h"
377#include "stats.h"
378struct search;
379struct btree;
380struct keybuf;
381
382struct keybuf_key {
383 struct rb_node node;
384 BKEY_PADDED(key);
385 void *private;
386};
387
388typedef bool (keybuf_pred_fn)(struct keybuf *, struct bkey *);
389
390struct keybuf {
391 keybuf_pred_fn *key_predicate;
392
393 struct bkey last_scanned;
394 spinlock_t lock;
395
396
397
398
399
400
401 struct bkey start;
402 struct bkey end;
403
404 struct rb_root keys;
405
406#define KEYBUF_NR 100
407 DECLARE_ARRAY_ALLOCATOR(struct keybuf_key, freelist, KEYBUF_NR);
408};
409
410struct bio_split_pool {
411 struct bio_set *bio_split;
412 mempool_t *bio_split_hook;
413};
414
415struct bio_split_hook {
416 struct closure cl;
417 struct bio_split_pool *p;
418 struct bio *bio;
419 bio_end_io_t *bi_end_io;
420 void *bi_private;
421};
422
423struct bcache_device {
424 struct closure cl;
425
426 struct kobject kobj;
427
428 struct cache_set *c;
429 unsigned id;
430#define BCACHEDEVNAME_SIZE 12
431 char name[BCACHEDEVNAME_SIZE];
432
433 struct gendisk *disk;
434
435
436 atomic_t closing;
437
438
439 atomic_t detaching;
440 int flush_done;
441
442 atomic_long_t sectors_dirty;
443 unsigned long sectors_dirty_gc;
444 unsigned long sectors_dirty_last;
445 long sectors_dirty_derivative;
446
447 mempool_t *unaligned_bvec;
448 struct bio_set *bio_split;
449
450 unsigned data_csum:1;
451
452 int (*cache_miss)(struct btree *, struct search *,
453 struct bio *, unsigned);
454 int (*ioctl) (struct bcache_device *, fmode_t, unsigned, unsigned long);
455
456 struct bio_split_pool bio_split_hook;
457};
458
459struct io {
460
461 struct hlist_node hash;
462 struct list_head lru;
463
464 unsigned long jiffies;
465 unsigned sequential;
466 sector_t last;
467};
468
469struct cached_dev {
470 struct list_head list;
471 struct bcache_device disk;
472 struct block_device *bdev;
473
474 struct cache_sb sb;
475 struct bio sb_bio;
476 struct bio_vec sb_bv[1];
477 struct closure_with_waitlist sb_write;
478
479
480 atomic_t count;
481 struct work_struct detach;
482
483
484
485
486
487 atomic_t running;
488
489
490
491
492
493 struct rw_semaphore writeback_lock;
494
495
496
497
498
499
500 atomic_t has_dirty;
501
502 struct bch_ratelimit writeback_rate;
503 struct delayed_work writeback_rate_update;
504
505
506
507
508
509 sector_t last_read;
510
511
512 struct semaphore in_flight;
513 struct closure_with_timer writeback;
514
515 struct keybuf writeback_keys;
516
517
518#define RECENT_IO_BITS 7
519#define RECENT_IO (1 << RECENT_IO_BITS)
520 struct io io[RECENT_IO];
521 struct hlist_head io_hash[RECENT_IO + 1];
522 struct list_head io_lru;
523 spinlock_t io_lock;
524
525 struct cache_accounting accounting;
526
527
528 unsigned sequential_cutoff;
529 unsigned readahead;
530
531 unsigned sequential_merge:1;
532 unsigned verify:1;
533
534 unsigned writeback_metadata:1;
535 unsigned writeback_running:1;
536 unsigned char writeback_percent;
537 unsigned writeback_delay;
538
539 int writeback_rate_change;
540 int64_t writeback_rate_derivative;
541 uint64_t writeback_rate_target;
542
543 unsigned writeback_rate_update_seconds;
544 unsigned writeback_rate_d_term;
545 unsigned writeback_rate_p_term_inverse;
546 unsigned writeback_rate_d_smooth;
547};
548
549enum alloc_watermarks {
550 WATERMARK_PRIO,
551 WATERMARK_METADATA,
552 WATERMARK_MOVINGGC,
553 WATERMARK_NONE,
554 WATERMARK_MAX
555};
556
557struct cache {
558 struct cache_set *set;
559 struct cache_sb sb;
560 struct bio sb_bio;
561 struct bio_vec sb_bv[1];
562
563 struct kobject kobj;
564 struct block_device *bdev;
565
566 unsigned watermark[WATERMARK_MAX];
567
568 struct closure alloc;
569 struct workqueue_struct *alloc_workqueue;
570
571 struct closure prio;
572 struct prio_set *disk_buckets;
573
574
575
576
577
578
579
580
581 uint64_t *prio_buckets;
582 uint64_t *prio_last_buckets;
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597 DECLARE_FIFO(long, free);
598 DECLARE_FIFO(long, free_inc);
599 DECLARE_FIFO(long, unused);
600
601 size_t fifo_last_bucket;
602
603
604 struct bucket *buckets;
605
606 DECLARE_HEAP(struct bucket *, heap);
607
608
609
610
611
612 uint8_t need_save_prio;
613 unsigned gc_move_threshold;
614
615
616
617
618
619
620 unsigned invalidate_needs_gc:1;
621
622 bool discard;
623
624
625
626
627
628
629
630 atomic_t discards_in_flight;
631 struct list_head discards;
632
633 struct journal_device journal;
634
635
636#define IO_ERROR_SHIFT 20
637 atomic_t io_errors;
638 atomic_t io_count;
639
640 atomic_long_t meta_sectors_written;
641 atomic_long_t btree_sectors_written;
642 atomic_long_t sectors_written;
643
644 struct bio_split_pool bio_split_hook;
645};
646
647struct gc_stat {
648 size_t nodes;
649 size_t key_bytes;
650
651 size_t nkeys;
652 uint64_t data;
653 uint64_t dirty;
654 unsigned in_use;
655};
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671#define CACHE_SET_UNREGISTERING 0
672#define CACHE_SET_STOPPING 1
673#define CACHE_SET_STOPPING_2 2
674
675struct cache_set {
676 struct closure cl;
677
678 struct list_head list;
679 struct kobject kobj;
680 struct kobject internal;
681 struct dentry *debug;
682 struct cache_accounting accounting;
683
684 unsigned long flags;
685
686 struct cache_sb sb;
687
688 struct cache *cache[MAX_CACHES_PER_SET];
689 struct cache *cache_by_alloc[MAX_CACHES_PER_SET];
690 int caches_loaded;
691
692 struct bcache_device **devices;
693 struct list_head cached_devs;
694 uint64_t cached_dev_sectors;
695 struct closure caching;
696
697 struct closure_with_waitlist sb_write;
698
699 mempool_t *search;
700 mempool_t *bio_meta;
701 struct bio_set *bio_split;
702
703
704 struct shrinker shrink;
705
706
707 wait_queue_head_t alloc_wait;
708
709
710 struct mutex bucket_lock;
711
712
713 unsigned short bucket_bits;
714
715
716 unsigned short block_bits;
717
718
719
720
721
722 unsigned btree_pages;
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740 struct list_head btree_cache;
741 struct list_head btree_cache_freeable;
742 struct list_head btree_cache_freed;
743
744
745 unsigned bucket_cache_used;
746
747
748
749
750
751
752
753
754
755 struct closure *try_harder;
756 struct closure_waitlist try_wait;
757 uint64_t try_harder_start;
758
759
760
761
762
763
764
765
766
767
768
769 atomic_t prio_blocked;
770 struct closure_waitlist bucket_wait;
771
772
773
774
775
776 atomic_t rescale;
777
778
779
780
781
782
783 uint16_t min_prio;
784
785
786
787
788
789 uint8_t need_gc;
790 struct gc_stat gc_stats;
791 size_t nbuckets;
792
793 struct closure_with_waitlist gc;
794
795 struct bkey gc_done;
796
797
798
799
800
801 int gc_mark_valid;
802
803
804 atomic_t sectors_to_gc;
805
806 struct closure moving_gc;
807 struct closure_waitlist moving_gc_wait;
808 struct keybuf moving_gc_keys;
809
810 atomic_t in_flight;
811
812 struct btree *root;
813
814#ifdef CONFIG_BCACHE_DEBUG
815 struct btree *verify_data;
816 struct mutex verify_lock;
817#endif
818
819 unsigned nr_uuids;
820 struct uuid_entry *uuids;
821 BKEY_PADDED(uuid_bucket);
822 struct closure_with_waitlist uuid_write;
823
824
825
826
827
828 struct mutex fill_lock;
829 struct btree_iter *fill_iter;
830
831
832
833
834
835 struct mutex sort_lock;
836 struct bset *sort;
837
838
839 struct list_head data_buckets;
840 spinlock_t data_bucket_lock;
841
842 struct journal journal;
843
844#define CONGESTED_MAX 1024
845 unsigned congested_last_us;
846 atomic_t congested;
847
848
849 unsigned congested_read_threshold_us;
850 unsigned congested_write_threshold_us;
851
852 spinlock_t sort_time_lock;
853 struct time_stats sort_time;
854 struct time_stats btree_gc_time;
855 struct time_stats btree_split_time;
856 spinlock_t btree_read_time_lock;
857 struct time_stats btree_read_time;
858 struct time_stats try_harder_time;
859
860 atomic_long_t cache_read_races;
861 atomic_long_t writeback_keys_done;
862 atomic_long_t writeback_keys_failed;
863 unsigned error_limit;
864 unsigned error_decay;
865 unsigned short journal_delay_ms;
866 unsigned verify:1;
867 unsigned key_merging_disabled:1;
868 unsigned gc_always_rewrite:1;
869 unsigned shrinker_disabled:1;
870 unsigned copy_gc_enabled:1;
871
872#define BUCKET_HASH_BITS 12
873 struct hlist_head bucket_hash[1 << BUCKET_HASH_BITS];
874};
875
876static inline bool key_merging_disabled(struct cache_set *c)
877{
878#ifdef CONFIG_BCACHE_DEBUG
879 return c->key_merging_disabled;
880#else
881 return 0;
882#endif
883}
884
885static inline bool SB_IS_BDEV(const struct cache_sb *sb)
886{
887 return sb->version == BCACHE_SB_VERSION_BDEV
888 || sb->version == BCACHE_SB_VERSION_BDEV_WITH_OFFSET;
889}
890
891struct bbio {
892 unsigned submit_time_us;
893 union {
894 struct bkey key;
895 uint64_t _pad[3];
896
897
898
899
900 };
901 struct bio bio;
902};
903
904static inline unsigned local_clock_us(void)
905{
906 return local_clock() >> 10;
907}
908
909#define MAX_BSETS 4U
910
911#define BTREE_PRIO USHRT_MAX
912#define INITIAL_PRIO 32768
913
914#define btree_bytes(c) ((c)->btree_pages * PAGE_SIZE)
915#define btree_blocks(b) \
916 ((unsigned) (KEY_SIZE(&b->key) >> (b)->c->block_bits))
917
918#define btree_default_blocks(c) \
919 ((unsigned) ((PAGE_SECTORS * (c)->btree_pages) >> (c)->block_bits))
920
921#define bucket_pages(c) ((c)->sb.bucket_size / PAGE_SECTORS)
922#define bucket_bytes(c) ((c)->sb.bucket_size << 9)
923#define block_bytes(c) ((c)->sb.block_size << 9)
924
925#define __set_bytes(i, k) (sizeof(*(i)) + (k) * sizeof(uint64_t))
926#define set_bytes(i) __set_bytes(i, i->keys)
927
928#define __set_blocks(i, k, c) DIV_ROUND_UP(__set_bytes(i, k), block_bytes(c))
929#define set_blocks(i, c) __set_blocks(i, (i)->keys, c)
930
931#define node(i, j) ((struct bkey *) ((i)->d + (j)))
932#define end(i) node(i, (i)->keys)
933
934#define index(i, b) \
935 ((size_t) (((void *) i - (void *) (b)->sets[0].data) / \
936 block_bytes(b->c)))
937
938#define btree_data_space(b) (PAGE_SIZE << (b)->page_order)
939
940#define prios_per_bucket(c) \
941 ((bucket_bytes(c) - sizeof(struct prio_set)) / \
942 sizeof(struct bucket_disk))
943#define prio_buckets(c) \
944 DIV_ROUND_UP((size_t) (c)->sb.nbuckets, prios_per_bucket(c))
945
946#define JSET_MAGIC 0x245235c1a3625032ULL
947#define PSET_MAGIC 0x6750e15f87337f91ULL
948#define BSET_MAGIC 0x90135c78b99e07f5ULL
949
950#define jset_magic(c) ((c)->sb.set_magic ^ JSET_MAGIC)
951#define pset_magic(c) ((c)->sb.set_magic ^ PSET_MAGIC)
952#define bset_magic(c) ((c)->sb.set_magic ^ BSET_MAGIC)
953
954
955
956#define KEY_FIELD(name, field, offset, size) \
957 BITMASK(name, struct bkey, field, offset, size)
958
959#define PTR_FIELD(name, offset, size) \
960 static inline uint64_t name(const struct bkey *k, unsigned i) \
961 { return (k->ptr[i] >> offset) & ~(((uint64_t) ~0) << size); } \
962 \
963 static inline void SET_##name(struct bkey *k, unsigned i, uint64_t v)\
964 { \
965 k->ptr[i] &= ~(~((uint64_t) ~0 << size) << offset); \
966 k->ptr[i] |= v << offset; \
967 }
968
969KEY_FIELD(KEY_PTRS, high, 60, 3)
970KEY_FIELD(HEADER_SIZE, high, 58, 2)
971KEY_FIELD(KEY_CSUM, high, 56, 2)
972KEY_FIELD(KEY_PINNED, high, 55, 1)
973KEY_FIELD(KEY_DIRTY, high, 36, 1)
974
975KEY_FIELD(KEY_SIZE, high, 20, 16)
976KEY_FIELD(KEY_INODE, high, 0, 20)
977
978
979
980static inline uint64_t KEY_OFFSET(const struct bkey *k)
981{
982 return k->low;
983}
984
985static inline void SET_KEY_OFFSET(struct bkey *k, uint64_t v)
986{
987 k->low = v;
988}
989
990PTR_FIELD(PTR_DEV, 51, 12)
991PTR_FIELD(PTR_OFFSET, 8, 43)
992PTR_FIELD(PTR_GEN, 0, 8)
993
994#define PTR_CHECK_DEV ((1 << 12) - 1)
995
996#define PTR(gen, offset, dev) \
997 ((((uint64_t) dev) << 51) | ((uint64_t) offset) << 8 | gen)
998
999static inline size_t sector_to_bucket(struct cache_set *c, sector_t s)
1000{
1001 return s >> c->bucket_bits;
1002}
1003
1004static inline sector_t bucket_to_sector(struct cache_set *c, size_t b)
1005{
1006 return ((sector_t) b) << c->bucket_bits;
1007}
1008
1009static inline sector_t bucket_remainder(struct cache_set *c, sector_t s)
1010{
1011 return s & (c->sb.bucket_size - 1);
1012}
1013
1014static inline struct cache *PTR_CACHE(struct cache_set *c,
1015 const struct bkey *k,
1016 unsigned ptr)
1017{
1018 return c->cache[PTR_DEV(k, ptr)];
1019}
1020
1021static inline size_t PTR_BUCKET_NR(struct cache_set *c,
1022 const struct bkey *k,
1023 unsigned ptr)
1024{
1025 return sector_to_bucket(c, PTR_OFFSET(k, ptr));
1026}
1027
1028static inline struct bucket *PTR_BUCKET(struct cache_set *c,
1029 const struct bkey *k,
1030 unsigned ptr)
1031{
1032 return PTR_CACHE(c, k, ptr)->buckets + PTR_BUCKET_NR(c, k, ptr);
1033}
1034
1035
1036
1037
1038
1039
1040
1041
1042#define KEY(dev, sector, len) \
1043((struct bkey) { \
1044 .high = (1ULL << 63) | ((uint64_t) (len) << 20) | (dev), \
1045 .low = (sector) \
1046})
1047
1048static inline void bkey_init(struct bkey *k)
1049{
1050 *k = KEY(0, 0, 0);
1051}
1052
1053#define KEY_START(k) (KEY_OFFSET(k) - KEY_SIZE(k))
1054#define START_KEY(k) KEY(KEY_INODE(k), KEY_START(k), 0)
1055#define MAX_KEY KEY(~(~0 << 20), ((uint64_t) ~0) >> 1, 0)
1056#define ZERO_KEY KEY(0, 0, 0)
1057
1058
1059
1060
1061
1062#define csum_set(i) \
1063 bch_crc64(((void *) (i)) + sizeof(uint64_t), \
1064 ((void *) end(i)) - (((void *) (i)) + sizeof(uint64_t)))
1065
1066
1067
1068#define btree_bug(b, ...) \
1069do { \
1070 if (bch_cache_set_error((b)->c, __VA_ARGS__)) \
1071 dump_stack(); \
1072} while (0)
1073
1074#define cache_bug(c, ...) \
1075do { \
1076 if (bch_cache_set_error(c, __VA_ARGS__)) \
1077 dump_stack(); \
1078} while (0)
1079
1080#define btree_bug_on(cond, b, ...) \
1081do { \
1082 if (cond) \
1083 btree_bug(b, __VA_ARGS__); \
1084} while (0)
1085
1086#define cache_bug_on(cond, c, ...) \
1087do { \
1088 if (cond) \
1089 cache_bug(c, __VA_ARGS__); \
1090} while (0)
1091
1092#define cache_set_err_on(cond, c, ...) \
1093do { \
1094 if (cond) \
1095 bch_cache_set_error(c, __VA_ARGS__); \
1096} while (0)
1097
1098
1099
1100#define for_each_cache(ca, cs, iter) \
1101 for (iter = 0; ca = cs->cache[iter], iter < (cs)->sb.nr_in_set; iter++)
1102
1103#define for_each_bucket(b, ca) \
1104 for (b = (ca)->buckets + (ca)->sb.first_bucket; \
1105 b < (ca)->buckets + (ca)->sb.nbuckets; b++)
1106
1107static inline void __bkey_put(struct cache_set *c, struct bkey *k)
1108{
1109 unsigned i;
1110
1111 for (i = 0; i < KEY_PTRS(k); i++)
1112 atomic_dec_bug(&PTR_BUCKET(c, k, i)->pin);
1113}
1114
1115
1116
1117#define blktrace_msg(c, fmt, ...) \
1118do { \
1119 struct request_queue *q = bdev_get_queue(c->bdev); \
1120 if (q) \
1121 blk_add_trace_msg(q, fmt, ##__VA_ARGS__); \
1122} while (0)
1123
1124#define blktrace_msg_all(s, fmt, ...) \
1125do { \
1126 struct cache *_c; \
1127 unsigned i; \
1128 for_each_cache(_c, (s), i) \
1129 blktrace_msg(_c, fmt, ##__VA_ARGS__); \
1130} while (0)
1131
1132static inline void cached_dev_put(struct cached_dev *dc)
1133{
1134 if (atomic_dec_and_test(&dc->count))
1135 schedule_work(&dc->detach);
1136}
1137
1138static inline bool cached_dev_get(struct cached_dev *dc)
1139{
1140 if (!atomic_inc_not_zero(&dc->count))
1141 return false;
1142
1143
1144 smp_mb__after_atomic_inc();
1145 return true;
1146}
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156static inline uint8_t bucket_gc_gen(struct bucket *b)
1157{
1158 return b->gen - b->last_gc;
1159}
1160
1161static inline uint8_t bucket_disk_gen(struct bucket *b)
1162{
1163 return b->gen - b->disk_gen;
1164}
1165
1166#define BUCKET_GC_GEN_MAX 96U
1167#define BUCKET_DISK_GEN_MAX 64U
1168
1169#define kobj_attribute_write(n, fn) \
1170 static struct kobj_attribute ksysfs_##n = __ATTR(n, S_IWUSR, NULL, fn)
1171
1172#define kobj_attribute_rw(n, show, store) \
1173 static struct kobj_attribute ksysfs_##n = \
1174 __ATTR(n, S_IWUSR|S_IRUSR, show, store)
1175
1176
1177
1178void bch_writeback_queue(struct cached_dev *);
1179void bch_writeback_add(struct cached_dev *, unsigned);
1180
1181void bch_count_io_errors(struct cache *, int, const char *);
1182void bch_bbio_count_io_errors(struct cache_set *, struct bio *,
1183 int, const char *);
1184void bch_bbio_endio(struct cache_set *, struct bio *, int, const char *);
1185void bch_bbio_free(struct bio *, struct cache_set *);
1186struct bio *bch_bbio_alloc(struct cache_set *);
1187
1188struct bio *bch_bio_split(struct bio *, int, gfp_t, struct bio_set *);
1189void bch_generic_make_request(struct bio *, struct bio_split_pool *);
1190void __bch_submit_bbio(struct bio *, struct cache_set *);
1191void bch_submit_bbio(struct bio *, struct cache_set *, struct bkey *, unsigned);
1192
1193uint8_t bch_inc_gen(struct cache *, struct bucket *);
1194void bch_rescale_priorities(struct cache_set *, int);
1195bool bch_bucket_add_unused(struct cache *, struct bucket *);
1196void bch_allocator_thread(struct closure *);
1197
1198long bch_bucket_alloc(struct cache *, unsigned, struct closure *);
1199void bch_bucket_free(struct cache_set *, struct bkey *);
1200
1201int __bch_bucket_alloc_set(struct cache_set *, unsigned,
1202 struct bkey *, int, struct closure *);
1203int bch_bucket_alloc_set(struct cache_set *, unsigned,
1204 struct bkey *, int, struct closure *);
1205
1206__printf(2, 3)
1207bool bch_cache_set_error(struct cache_set *, const char *, ...);
1208
1209void bch_prio_write(struct cache *);
1210void bch_write_bdev_super(struct cached_dev *, struct closure *);
1211
1212extern struct workqueue_struct *bcache_wq, *bch_gc_wq;
1213extern const char * const bch_cache_modes[];
1214extern struct mutex bch_register_lock;
1215extern struct list_head bch_cache_sets;
1216
1217extern struct kobj_type bch_cached_dev_ktype;
1218extern struct kobj_type bch_flash_dev_ktype;
1219extern struct kobj_type bch_cache_set_ktype;
1220extern struct kobj_type bch_cache_set_internal_ktype;
1221extern struct kobj_type bch_cache_ktype;
1222
1223void bch_cached_dev_release(struct kobject *);
1224void bch_flash_dev_release(struct kobject *);
1225void bch_cache_set_release(struct kobject *);
1226void bch_cache_release(struct kobject *);
1227
1228int bch_uuid_write(struct cache_set *);
1229void bcache_write_super(struct cache_set *);
1230
1231int bch_flash_dev_create(struct cache_set *c, uint64_t size);
1232
1233int bch_cached_dev_attach(struct cached_dev *, struct cache_set *);
1234void bch_cached_dev_detach(struct cached_dev *);
1235void bch_cached_dev_run(struct cached_dev *);
1236void bcache_device_stop(struct bcache_device *);
1237
1238void bch_cache_set_unregister(struct cache_set *);
1239void bch_cache_set_stop(struct cache_set *);
1240
1241struct cache_set *bch_cache_set_alloc(struct cache_sb *);
1242void bch_btree_cache_free(struct cache_set *);
1243int bch_btree_cache_alloc(struct cache_set *);
1244void bch_cached_dev_writeback_init(struct cached_dev *);
1245void bch_moving_init_cache_set(struct cache_set *);
1246
1247void bch_cache_allocator_exit(struct cache *ca);
1248int bch_cache_allocator_init(struct cache *ca);
1249
1250void bch_debug_exit(void);
1251int bch_debug_init(struct kobject *);
1252void bch_writeback_exit(void);
1253int bch_writeback_init(void);
1254void bch_request_exit(void);
1255int bch_request_init(void);
1256void bch_btree_exit(void);
1257int bch_btree_init(void);
1258
1259#endif
1260