1
2
3
4
5
6
7
8#include "bcache.h"
9#include "sysfs.h"
10#include "btree.h"
11#include "request.h"
12#include "writeback.h"
13
14#include <linux/blkdev.h>
15#include <linux/sort.h>
16
17static const char * const cache_replacement_policies[] = {
18 "lru",
19 "fifo",
20 "random",
21 NULL
22};
23
24write_attribute(attach);
25write_attribute(detach);
26write_attribute(unregister);
27write_attribute(stop);
28write_attribute(clear_stats);
29write_attribute(trigger_gc);
30write_attribute(prune_cache);
31write_attribute(flash_vol_create);
32
33read_attribute(bucket_size);
34read_attribute(block_size);
35read_attribute(nbuckets);
36read_attribute(tree_depth);
37read_attribute(root_usage_percent);
38read_attribute(priority_stats);
39read_attribute(btree_cache_size);
40read_attribute(btree_cache_max_chain);
41read_attribute(cache_available_percent);
42read_attribute(written);
43read_attribute(btree_written);
44read_attribute(metadata_written);
45read_attribute(active_journal_entries);
46
47sysfs_time_stats_attribute(btree_gc, sec, ms);
48sysfs_time_stats_attribute(btree_split, sec, us);
49sysfs_time_stats_attribute(btree_sort, ms, us);
50sysfs_time_stats_attribute(btree_read, ms, us);
51sysfs_time_stats_attribute(try_harder, ms, us);
52
53read_attribute(btree_nodes);
54read_attribute(btree_used_percent);
55read_attribute(average_key_size);
56read_attribute(dirty_data);
57read_attribute(bset_tree_stats);
58
59read_attribute(state);
60read_attribute(cache_read_races);
61read_attribute(writeback_keys_done);
62read_attribute(writeback_keys_failed);
63read_attribute(io_errors);
64read_attribute(congested);
65rw_attribute(congested_read_threshold_us);
66rw_attribute(congested_write_threshold_us);
67
68rw_attribute(sequential_cutoff);
69rw_attribute(sequential_merge);
70rw_attribute(data_csum);
71rw_attribute(cache_mode);
72rw_attribute(writeback_metadata);
73rw_attribute(writeback_running);
74rw_attribute(writeback_percent);
75rw_attribute(writeback_delay);
76rw_attribute(writeback_rate);
77
78rw_attribute(writeback_rate_update_seconds);
79rw_attribute(writeback_rate_d_term);
80rw_attribute(writeback_rate_p_term_inverse);
81rw_attribute(writeback_rate_d_smooth);
82read_attribute(writeback_rate_debug);
83
84read_attribute(stripe_size);
85read_attribute(partial_stripes_expensive);
86
87rw_attribute(synchronous);
88rw_attribute(journal_delay_ms);
89rw_attribute(discard);
90rw_attribute(running);
91rw_attribute(label);
92rw_attribute(readahead);
93rw_attribute(io_error_limit);
94rw_attribute(io_error_halflife);
95rw_attribute(verify);
96rw_attribute(key_merging_disabled);
97rw_attribute(gc_always_rewrite);
98rw_attribute(freelist_percent);
99rw_attribute(cache_replacement_policy);
100rw_attribute(btree_shrinker_disabled);
101rw_attribute(copy_gc_enabled);
102rw_attribute(size);
103
104SHOW(__bch_cached_dev)
105{
106 struct cached_dev *dc = container_of(kobj, struct cached_dev,
107 disk.kobj);
108 const char *states[] = { "no cache", "clean", "dirty", "inconsistent" };
109
110#define var(stat) (dc->stat)
111
112 if (attr == &sysfs_cache_mode)
113 return bch_snprint_string_list(buf, PAGE_SIZE,
114 bch_cache_modes + 1,
115 BDEV_CACHE_MODE(&dc->sb));
116
117 sysfs_printf(data_csum, "%i", dc->disk.data_csum);
118 var_printf(verify, "%i");
119 var_printf(writeback_metadata, "%i");
120 var_printf(writeback_running, "%i");
121 var_print(writeback_delay);
122 var_print(writeback_percent);
123 sysfs_print(writeback_rate, dc->writeback_rate.rate);
124
125 var_print(writeback_rate_update_seconds);
126 var_print(writeback_rate_d_term);
127 var_print(writeback_rate_p_term_inverse);
128 var_print(writeback_rate_d_smooth);
129
130 if (attr == &sysfs_writeback_rate_debug) {
131 char dirty[20];
132 char derivative[20];
133 char target[20];
134 bch_hprint(dirty,
135 bcache_dev_sectors_dirty(&dc->disk) << 9);
136 bch_hprint(derivative, dc->writeback_rate_derivative << 9);
137 bch_hprint(target, dc->writeback_rate_target << 9);
138
139 return sprintf(buf,
140 "rate:\t\t%u\n"
141 "change:\t\t%i\n"
142 "dirty:\t\t%s\n"
143 "derivative:\t%s\n"
144 "target:\t\t%s\n",
145 dc->writeback_rate.rate,
146 dc->writeback_rate_change,
147 dirty, derivative, target);
148 }
149
150 sysfs_hprint(dirty_data,
151 bcache_dev_sectors_dirty(&dc->disk) << 9);
152
153 sysfs_hprint(stripe_size, (1 << dc->disk.stripe_size_bits) << 9);
154 var_printf(partial_stripes_expensive, "%u");
155
156 var_printf(sequential_merge, "%i");
157 var_hprint(sequential_cutoff);
158 var_hprint(readahead);
159
160 sysfs_print(running, atomic_read(&dc->running));
161 sysfs_print(state, states[BDEV_STATE(&dc->sb)]);
162
163 if (attr == &sysfs_label) {
164 memcpy(buf, dc->sb.label, SB_LABEL_SIZE);
165 buf[SB_LABEL_SIZE + 1] = '\0';
166 strcat(buf, "\n");
167 return strlen(buf);
168 }
169
170#undef var
171 return 0;
172}
173SHOW_LOCKED(bch_cached_dev)
174
175STORE(__cached_dev)
176{
177 struct cached_dev *dc = container_of(kobj, struct cached_dev,
178 disk.kobj);
179 unsigned v = size;
180 struct cache_set *c;
181 struct kobj_uevent_env *env;
182
183#define d_strtoul(var) sysfs_strtoul(var, dc->var)
184#define d_strtoi_h(var) sysfs_hatoi(var, dc->var)
185
186 sysfs_strtoul(data_csum, dc->disk.data_csum);
187 d_strtoul(verify);
188 d_strtoul(writeback_metadata);
189 d_strtoul(writeback_running);
190 d_strtoul(writeback_delay);
191 sysfs_strtoul_clamp(writeback_rate,
192 dc->writeback_rate.rate, 1, 1000000);
193 sysfs_strtoul_clamp(writeback_percent, dc->writeback_percent, 0, 40);
194
195 d_strtoul(writeback_rate_update_seconds);
196 d_strtoul(writeback_rate_d_term);
197 d_strtoul(writeback_rate_p_term_inverse);
198 sysfs_strtoul_clamp(writeback_rate_p_term_inverse,
199 dc->writeback_rate_p_term_inverse, 1, INT_MAX);
200 d_strtoul(writeback_rate_d_smooth);
201
202 d_strtoul(sequential_merge);
203 d_strtoi_h(sequential_cutoff);
204 d_strtoi_h(readahead);
205
206 if (attr == &sysfs_clear_stats)
207 bch_cache_accounting_clear(&dc->accounting);
208
209 if (attr == &sysfs_running &&
210 strtoul_or_return(buf))
211 bch_cached_dev_run(dc);
212
213 if (attr == &sysfs_cache_mode) {
214 ssize_t v = bch_read_string_list(buf, bch_cache_modes + 1);
215
216 if (v < 0)
217 return v;
218
219 if ((unsigned) v != BDEV_CACHE_MODE(&dc->sb)) {
220 SET_BDEV_CACHE_MODE(&dc->sb, v);
221 bch_write_bdev_super(dc, NULL);
222 }
223 }
224
225 if (attr == &sysfs_label) {
226 if (size > SB_LABEL_SIZE)
227 return -EINVAL;
228 memcpy(dc->sb.label, buf, size);
229 if (size < SB_LABEL_SIZE)
230 dc->sb.label[size] = '\0';
231 if (size && dc->sb.label[size - 1] == '\n')
232 dc->sb.label[size - 1] = '\0';
233 bch_write_bdev_super(dc, NULL);
234 if (dc->disk.c) {
235 memcpy(dc->disk.c->uuids[dc->disk.id].label,
236 buf, SB_LABEL_SIZE);
237 bch_uuid_write(dc->disk.c);
238 }
239 env = kzalloc(sizeof(struct kobj_uevent_env), GFP_KERNEL);
240 if (!env)
241 return -ENOMEM;
242 add_uevent_var(env, "DRIVER=bcache");
243 add_uevent_var(env, "CACHED_UUID=%pU", dc->sb.uuid),
244 add_uevent_var(env, "CACHED_LABEL=%s", buf);
245 kobject_uevent_env(
246 &disk_to_dev(dc->disk.disk)->kobj, KOBJ_CHANGE, env->envp);
247 kfree(env);
248 }
249
250 if (attr == &sysfs_attach) {
251 if (bch_parse_uuid(buf, dc->sb.set_uuid) < 16)
252 return -EINVAL;
253
254 list_for_each_entry(c, &bch_cache_sets, list) {
255 v = bch_cached_dev_attach(dc, c);
256 if (!v)
257 return size;
258 }
259
260 pr_err("Can't attach %s: cache set not found", buf);
261 size = v;
262 }
263
264 if (attr == &sysfs_detach && dc->disk.c)
265 bch_cached_dev_detach(dc);
266
267 if (attr == &sysfs_stop)
268 bcache_device_stop(&dc->disk);
269
270 return size;
271}
272
273STORE(bch_cached_dev)
274{
275 struct cached_dev *dc = container_of(kobj, struct cached_dev,
276 disk.kobj);
277
278 mutex_lock(&bch_register_lock);
279 size = __cached_dev_store(kobj, attr, buf, size);
280
281 if (attr == &sysfs_writeback_running)
282 bch_writeback_queue(dc);
283
284 if (attr == &sysfs_writeback_percent)
285 schedule_delayed_work(&dc->writeback_rate_update,
286 dc->writeback_rate_update_seconds * HZ);
287
288 mutex_unlock(&bch_register_lock);
289 return size;
290}
291
292static struct attribute *bch_cached_dev_files[] = {
293 &sysfs_attach,
294 &sysfs_detach,
295 &sysfs_stop,
296#if 0
297 &sysfs_data_csum,
298#endif
299 &sysfs_cache_mode,
300 &sysfs_writeback_metadata,
301 &sysfs_writeback_running,
302 &sysfs_writeback_delay,
303 &sysfs_writeback_percent,
304 &sysfs_writeback_rate,
305 &sysfs_writeback_rate_update_seconds,
306 &sysfs_writeback_rate_d_term,
307 &sysfs_writeback_rate_p_term_inverse,
308 &sysfs_writeback_rate_d_smooth,
309 &sysfs_writeback_rate_debug,
310 &sysfs_dirty_data,
311 &sysfs_stripe_size,
312 &sysfs_partial_stripes_expensive,
313 &sysfs_sequential_cutoff,
314 &sysfs_sequential_merge,
315 &sysfs_clear_stats,
316 &sysfs_running,
317 &sysfs_state,
318 &sysfs_label,
319 &sysfs_readahead,
320#ifdef CONFIG_BCACHE_DEBUG
321 &sysfs_verify,
322#endif
323 NULL
324};
325KTYPE(bch_cached_dev);
326
327SHOW(bch_flash_dev)
328{
329 struct bcache_device *d = container_of(kobj, struct bcache_device,
330 kobj);
331 struct uuid_entry *u = &d->c->uuids[d->id];
332
333 sysfs_printf(data_csum, "%i", d->data_csum);
334 sysfs_hprint(size, u->sectors << 9);
335
336 if (attr == &sysfs_label) {
337 memcpy(buf, u->label, SB_LABEL_SIZE);
338 buf[SB_LABEL_SIZE + 1] = '\0';
339 strcat(buf, "\n");
340 return strlen(buf);
341 }
342
343 return 0;
344}
345
346STORE(__bch_flash_dev)
347{
348 struct bcache_device *d = container_of(kobj, struct bcache_device,
349 kobj);
350 struct uuid_entry *u = &d->c->uuids[d->id];
351
352 sysfs_strtoul(data_csum, d->data_csum);
353
354 if (attr == &sysfs_size) {
355 uint64_t v;
356 strtoi_h_or_return(buf, v);
357
358 u->sectors = v >> 9;
359 bch_uuid_write(d->c);
360 set_capacity(d->disk, u->sectors);
361 }
362
363 if (attr == &sysfs_label) {
364 memcpy(u->label, buf, SB_LABEL_SIZE);
365 bch_uuid_write(d->c);
366 }
367
368 if (attr == &sysfs_unregister) {
369 atomic_set(&d->detaching, 1);
370 bcache_device_stop(d);
371 }
372
373 return size;
374}
375STORE_LOCKED(bch_flash_dev)
376
377static struct attribute *bch_flash_dev_files[] = {
378 &sysfs_unregister,
379#if 0
380 &sysfs_data_csum,
381#endif
382 &sysfs_label,
383 &sysfs_size,
384 NULL
385};
386KTYPE(bch_flash_dev);
387
388SHOW(__bch_cache_set)
389{
390 unsigned root_usage(struct cache_set *c)
391 {
392 unsigned bytes = 0;
393 struct bkey *k;
394 struct btree *b;
395 struct btree_iter iter;
396
397 goto lock_root;
398
399 do {
400 rw_unlock(false, b);
401lock_root:
402 b = c->root;
403 rw_lock(false, b, b->level);
404 } while (b != c->root);
405
406 for_each_key_filter(b, k, &iter, bch_ptr_bad)
407 bytes += bkey_bytes(k);
408
409 rw_unlock(false, b);
410
411 return (bytes * 100) / btree_bytes(c);
412 }
413
414 size_t cache_size(struct cache_set *c)
415 {
416 size_t ret = 0;
417 struct btree *b;
418
419 mutex_lock(&c->bucket_lock);
420 list_for_each_entry(b, &c->btree_cache, list)
421 ret += 1 << (b->page_order + PAGE_SHIFT);
422
423 mutex_unlock(&c->bucket_lock);
424 return ret;
425 }
426
427 unsigned cache_max_chain(struct cache_set *c)
428 {
429 unsigned ret = 0;
430 struct hlist_head *h;
431
432 mutex_lock(&c->bucket_lock);
433
434 for (h = c->bucket_hash;
435 h < c->bucket_hash + (1 << BUCKET_HASH_BITS);
436 h++) {
437 unsigned i = 0;
438 struct hlist_node *p;
439
440 hlist_for_each(p, h)
441 i++;
442
443 ret = max(ret, i);
444 }
445
446 mutex_unlock(&c->bucket_lock);
447 return ret;
448 }
449
450 unsigned btree_used(struct cache_set *c)
451 {
452 return div64_u64(c->gc_stats.key_bytes * 100,
453 (c->gc_stats.nodes ?: 1) * btree_bytes(c));
454 }
455
456 unsigned average_key_size(struct cache_set *c)
457 {
458 return c->gc_stats.nkeys
459 ? div64_u64(c->gc_stats.data, c->gc_stats.nkeys)
460 : 0;
461 }
462
463 struct cache_set *c = container_of(kobj, struct cache_set, kobj);
464
465 sysfs_print(synchronous, CACHE_SYNC(&c->sb));
466 sysfs_print(journal_delay_ms, c->journal_delay_ms);
467 sysfs_hprint(bucket_size, bucket_bytes(c));
468 sysfs_hprint(block_size, block_bytes(c));
469 sysfs_print(tree_depth, c->root->level);
470 sysfs_print(root_usage_percent, root_usage(c));
471
472 sysfs_hprint(btree_cache_size, cache_size(c));
473 sysfs_print(btree_cache_max_chain, cache_max_chain(c));
474 sysfs_print(cache_available_percent, 100 - c->gc_stats.in_use);
475
476 sysfs_print_time_stats(&c->btree_gc_time, btree_gc, sec, ms);
477 sysfs_print_time_stats(&c->btree_split_time, btree_split, sec, us);
478 sysfs_print_time_stats(&c->sort_time, btree_sort, ms, us);
479 sysfs_print_time_stats(&c->btree_read_time, btree_read, ms, us);
480 sysfs_print_time_stats(&c->try_harder_time, try_harder, ms, us);
481
482 sysfs_print(btree_used_percent, btree_used(c));
483 sysfs_print(btree_nodes, c->gc_stats.nodes);
484 sysfs_hprint(dirty_data, c->gc_stats.dirty);
485 sysfs_hprint(average_key_size, average_key_size(c));
486
487 sysfs_print(cache_read_races,
488 atomic_long_read(&c->cache_read_races));
489
490 sysfs_print(writeback_keys_done,
491 atomic_long_read(&c->writeback_keys_done));
492 sysfs_print(writeback_keys_failed,
493 atomic_long_read(&c->writeback_keys_failed));
494
495
496 sysfs_print(io_error_halflife, c->error_decay * 88);
497 sysfs_print(io_error_limit, c->error_limit >> IO_ERROR_SHIFT);
498
499 sysfs_hprint(congested,
500 ((uint64_t) bch_get_congested(c)) << 9);
501 sysfs_print(congested_read_threshold_us,
502 c->congested_read_threshold_us);
503 sysfs_print(congested_write_threshold_us,
504 c->congested_write_threshold_us);
505
506 sysfs_print(active_journal_entries, fifo_used(&c->journal.pin));
507 sysfs_printf(verify, "%i", c->verify);
508 sysfs_printf(key_merging_disabled, "%i", c->key_merging_disabled);
509 sysfs_printf(gc_always_rewrite, "%i", c->gc_always_rewrite);
510 sysfs_printf(btree_shrinker_disabled, "%i", c->shrinker_disabled);
511 sysfs_printf(copy_gc_enabled, "%i", c->copy_gc_enabled);
512
513 if (attr == &sysfs_bset_tree_stats)
514 return bch_bset_print_stats(c, buf);
515
516 return 0;
517}
518SHOW_LOCKED(bch_cache_set)
519
520STORE(__bch_cache_set)
521{
522 struct cache_set *c = container_of(kobj, struct cache_set, kobj);
523
524 if (attr == &sysfs_unregister)
525 bch_cache_set_unregister(c);
526
527 if (attr == &sysfs_stop)
528 bch_cache_set_stop(c);
529
530 if (attr == &sysfs_synchronous) {
531 bool sync = strtoul_or_return(buf);
532
533 if (sync != CACHE_SYNC(&c->sb)) {
534 SET_CACHE_SYNC(&c->sb, sync);
535 bcache_write_super(c);
536 }
537 }
538
539 if (attr == &sysfs_flash_vol_create) {
540 int r;
541 uint64_t v;
542 strtoi_h_or_return(buf, v);
543
544 r = bch_flash_dev_create(c, v);
545 if (r)
546 return r;
547 }
548
549 if (attr == &sysfs_clear_stats) {
550 atomic_long_set(&c->writeback_keys_done, 0);
551 atomic_long_set(&c->writeback_keys_failed, 0);
552
553 memset(&c->gc_stats, 0, sizeof(struct gc_stat));
554 bch_cache_accounting_clear(&c->accounting);
555 }
556
557 if (attr == &sysfs_trigger_gc)
558 bch_queue_gc(c);
559
560 if (attr == &sysfs_prune_cache) {
561 struct shrink_control sc;
562 sc.gfp_mask = GFP_KERNEL;
563 sc.nr_to_scan = strtoul_or_return(buf);
564 c->shrink.scan_objects(&c->shrink, &sc);
565 }
566
567 sysfs_strtoul(congested_read_threshold_us,
568 c->congested_read_threshold_us);
569 sysfs_strtoul(congested_write_threshold_us,
570 c->congested_write_threshold_us);
571
572 if (attr == &sysfs_io_error_limit)
573 c->error_limit = strtoul_or_return(buf) << IO_ERROR_SHIFT;
574
575
576 if (attr == &sysfs_io_error_halflife)
577 c->error_decay = strtoul_or_return(buf) / 88;
578
579 sysfs_strtoul(journal_delay_ms, c->journal_delay_ms);
580 sysfs_strtoul(verify, c->verify);
581 sysfs_strtoul(key_merging_disabled, c->key_merging_disabled);
582 sysfs_strtoul(gc_always_rewrite, c->gc_always_rewrite);
583 sysfs_strtoul(btree_shrinker_disabled, c->shrinker_disabled);
584 sysfs_strtoul(copy_gc_enabled, c->copy_gc_enabled);
585
586 return size;
587}
588STORE_LOCKED(bch_cache_set)
589
590SHOW(bch_cache_set_internal)
591{
592 struct cache_set *c = container_of(kobj, struct cache_set, internal);
593 return bch_cache_set_show(&c->kobj, attr, buf);
594}
595
596STORE(bch_cache_set_internal)
597{
598 struct cache_set *c = container_of(kobj, struct cache_set, internal);
599 return bch_cache_set_store(&c->kobj, attr, buf, size);
600}
601
602static void bch_cache_set_internal_release(struct kobject *k)
603{
604}
605
606static struct attribute *bch_cache_set_files[] = {
607 &sysfs_unregister,
608 &sysfs_stop,
609 &sysfs_synchronous,
610 &sysfs_journal_delay_ms,
611 &sysfs_flash_vol_create,
612
613 &sysfs_bucket_size,
614 &sysfs_block_size,
615 &sysfs_tree_depth,
616 &sysfs_root_usage_percent,
617 &sysfs_btree_cache_size,
618 &sysfs_cache_available_percent,
619
620 &sysfs_average_key_size,
621 &sysfs_dirty_data,
622
623 &sysfs_io_error_limit,
624 &sysfs_io_error_halflife,
625 &sysfs_congested,
626 &sysfs_congested_read_threshold_us,
627 &sysfs_congested_write_threshold_us,
628 &sysfs_clear_stats,
629 NULL
630};
631KTYPE(bch_cache_set);
632
633static struct attribute *bch_cache_set_internal_files[] = {
634 &sysfs_active_journal_entries,
635
636 sysfs_time_stats_attribute_list(btree_gc, sec, ms)
637 sysfs_time_stats_attribute_list(btree_split, sec, us)
638 sysfs_time_stats_attribute_list(btree_sort, ms, us)
639 sysfs_time_stats_attribute_list(btree_read, ms, us)
640 sysfs_time_stats_attribute_list(try_harder, ms, us)
641
642 &sysfs_btree_nodes,
643 &sysfs_btree_used_percent,
644 &sysfs_btree_cache_max_chain,
645
646 &sysfs_bset_tree_stats,
647 &sysfs_cache_read_races,
648 &sysfs_writeback_keys_done,
649 &sysfs_writeback_keys_failed,
650
651 &sysfs_trigger_gc,
652 &sysfs_prune_cache,
653#ifdef CONFIG_BCACHE_DEBUG
654 &sysfs_verify,
655 &sysfs_key_merging_disabled,
656#endif
657 &sysfs_gc_always_rewrite,
658 &sysfs_btree_shrinker_disabled,
659 &sysfs_copy_gc_enabled,
660 NULL
661};
662KTYPE(bch_cache_set_internal);
663
664SHOW(__bch_cache)
665{
666 struct cache *ca = container_of(kobj, struct cache, kobj);
667
668 sysfs_hprint(bucket_size, bucket_bytes(ca));
669 sysfs_hprint(block_size, block_bytes(ca));
670 sysfs_print(nbuckets, ca->sb.nbuckets);
671 sysfs_print(discard, ca->discard);
672 sysfs_hprint(written, atomic_long_read(&ca->sectors_written) << 9);
673 sysfs_hprint(btree_written,
674 atomic_long_read(&ca->btree_sectors_written) << 9);
675 sysfs_hprint(metadata_written,
676 (atomic_long_read(&ca->meta_sectors_written) +
677 atomic_long_read(&ca->btree_sectors_written)) << 9);
678
679 sysfs_print(io_errors,
680 atomic_read(&ca->io_errors) >> IO_ERROR_SHIFT);
681
682 sysfs_print(freelist_percent, ca->free.size * 100 /
683 ((size_t) ca->sb.nbuckets));
684
685 if (attr == &sysfs_cache_replacement_policy)
686 return bch_snprint_string_list(buf, PAGE_SIZE,
687 cache_replacement_policies,
688 CACHE_REPLACEMENT(&ca->sb));
689
690 if (attr == &sysfs_priority_stats) {
691 int cmp(const void *l, const void *r)
692 { return *((uint16_t *) r) - *((uint16_t *) l); }
693
694 size_t n = ca->sb.nbuckets, i, unused, btree;
695 uint64_t sum = 0;
696
697 uint16_t q[31], *p, *cached;
698 ssize_t ret;
699
700 cached = p = vmalloc(ca->sb.nbuckets * sizeof(uint16_t));
701 if (!p)
702 return -ENOMEM;
703
704 mutex_lock(&ca->set->bucket_lock);
705 for (i = ca->sb.first_bucket; i < n; i++)
706 p[i] = ca->buckets[i].prio;
707 mutex_unlock(&ca->set->bucket_lock);
708
709 sort(p, n, sizeof(uint16_t), cmp, NULL);
710
711 while (n &&
712 !cached[n - 1])
713 --n;
714
715 unused = ca->sb.nbuckets - n;
716
717 while (cached < p + n &&
718 *cached == BTREE_PRIO)
719 cached++;
720
721 btree = cached - p;
722 n -= btree;
723
724 for (i = 0; i < n; i++)
725 sum += INITIAL_PRIO - cached[i];
726
727 if (n)
728 do_div(sum, n);
729
730 for (i = 0; i < ARRAY_SIZE(q); i++)
731 q[i] = INITIAL_PRIO - cached[n * (i + 1) /
732 (ARRAY_SIZE(q) + 1)];
733
734 vfree(p);
735
736 ret = scnprintf(buf, PAGE_SIZE,
737 "Unused: %zu%%\n"
738 "Metadata: %zu%%\n"
739 "Average: %llu\n"
740 "Sectors per Q: %zu\n"
741 "Quantiles: [",
742 unused * 100 / (size_t) ca->sb.nbuckets,
743 btree * 100 / (size_t) ca->sb.nbuckets, sum,
744 n * ca->sb.bucket_size / (ARRAY_SIZE(q) + 1));
745
746 for (i = 0; i < ARRAY_SIZE(q); i++)
747 ret += scnprintf(buf + ret, PAGE_SIZE - ret,
748 "%u ", q[i]);
749 ret--;
750
751 ret += scnprintf(buf + ret, PAGE_SIZE - ret, "]\n");
752
753 return ret;
754 }
755
756 return 0;
757}
758SHOW_LOCKED(bch_cache)
759
760STORE(__bch_cache)
761{
762 struct cache *ca = container_of(kobj, struct cache, kobj);
763
764 if (attr == &sysfs_discard) {
765 bool v = strtoul_or_return(buf);
766
767 if (blk_queue_discard(bdev_get_queue(ca->bdev)))
768 ca->discard = v;
769
770 if (v != CACHE_DISCARD(&ca->sb)) {
771 SET_CACHE_DISCARD(&ca->sb, v);
772 bcache_write_super(ca->set);
773 }
774 }
775
776 if (attr == &sysfs_cache_replacement_policy) {
777 ssize_t v = bch_read_string_list(buf, cache_replacement_policies);
778
779 if (v < 0)
780 return v;
781
782 if ((unsigned) v != CACHE_REPLACEMENT(&ca->sb)) {
783 mutex_lock(&ca->set->bucket_lock);
784 SET_CACHE_REPLACEMENT(&ca->sb, v);
785 mutex_unlock(&ca->set->bucket_lock);
786
787 bcache_write_super(ca->set);
788 }
789 }
790
791 if (attr == &sysfs_freelist_percent) {
792 DECLARE_FIFO(long, free);
793 long i;
794 size_t p = strtoul_or_return(buf);
795
796 p = clamp_t(size_t,
797 ((size_t) ca->sb.nbuckets * p) / 100,
798 roundup_pow_of_two(ca->sb.nbuckets) >> 9,
799 ca->sb.nbuckets / 2);
800
801 if (!init_fifo_exact(&free, p, GFP_KERNEL))
802 return -ENOMEM;
803
804 mutex_lock(&ca->set->bucket_lock);
805
806 fifo_move(&free, &ca->free);
807 fifo_swap(&free, &ca->free);
808
809 mutex_unlock(&ca->set->bucket_lock);
810
811 while (fifo_pop(&free, i))
812 atomic_dec(&ca->buckets[i].pin);
813
814 free_fifo(&free);
815 }
816
817 if (attr == &sysfs_clear_stats) {
818 atomic_long_set(&ca->sectors_written, 0);
819 atomic_long_set(&ca->btree_sectors_written, 0);
820 atomic_long_set(&ca->meta_sectors_written, 0);
821 atomic_set(&ca->io_count, 0);
822 atomic_set(&ca->io_errors, 0);
823 }
824
825 return size;
826}
827STORE_LOCKED(bch_cache)
828
829static struct attribute *bch_cache_files[] = {
830 &sysfs_bucket_size,
831 &sysfs_block_size,
832 &sysfs_nbuckets,
833 &sysfs_priority_stats,
834 &sysfs_discard,
835 &sysfs_written,
836 &sysfs_btree_written,
837 &sysfs_metadata_written,
838 &sysfs_io_errors,
839 &sysfs_clear_stats,
840 &sysfs_freelist_percent,
841 &sysfs_cache_replacement_policy,
842 NULL
843};
844KTYPE(bch_cache);
845