linux/drivers/md/bcache/sysfs.c
<<
>>
Prefs
   1/*
   2 * bcache sysfs interfaces
   3 *
   4 * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
   5 * Copyright 2012 Google, Inc.
   6 */
   7
   8#include "bcache.h"
   9#include "sysfs.h"
  10#include "btree.h"
  11#include "request.h"
  12#include "writeback.h"
  13
  14#include <linux/blkdev.h>
  15#include <linux/sort.h>
  16
  17static const char * const cache_replacement_policies[] = {
  18        "lru",
  19        "fifo",
  20        "random",
  21        NULL
  22};
  23
  24static const char * const error_actions[] = {
  25        "unregister",
  26        "panic",
  27        NULL
  28};
  29
  30write_attribute(attach);
  31write_attribute(detach);
  32write_attribute(unregister);
  33write_attribute(stop);
  34write_attribute(clear_stats);
  35write_attribute(trigger_gc);
  36write_attribute(prune_cache);
  37write_attribute(flash_vol_create);
  38
  39read_attribute(bucket_size);
  40read_attribute(block_size);
  41read_attribute(nbuckets);
  42read_attribute(tree_depth);
  43read_attribute(root_usage_percent);
  44read_attribute(priority_stats);
  45read_attribute(btree_cache_size);
  46read_attribute(btree_cache_max_chain);
  47read_attribute(cache_available_percent);
  48read_attribute(written);
  49read_attribute(btree_written);
  50read_attribute(metadata_written);
  51read_attribute(active_journal_entries);
  52
  53sysfs_time_stats_attribute(btree_gc,    sec, ms);
  54sysfs_time_stats_attribute(btree_split, sec, us);
  55sysfs_time_stats_attribute(btree_sort,  ms,  us);
  56sysfs_time_stats_attribute(btree_read,  ms,  us);
  57
  58read_attribute(btree_nodes);
  59read_attribute(btree_used_percent);
  60read_attribute(average_key_size);
  61read_attribute(dirty_data);
  62read_attribute(bset_tree_stats);
  63
  64read_attribute(state);
  65read_attribute(cache_read_races);
  66read_attribute(writeback_keys_done);
  67read_attribute(writeback_keys_failed);
  68read_attribute(io_errors);
  69read_attribute(congested);
  70rw_attribute(congested_read_threshold_us);
  71rw_attribute(congested_write_threshold_us);
  72
  73rw_attribute(sequential_cutoff);
  74rw_attribute(data_csum);
  75rw_attribute(cache_mode);
  76rw_attribute(writeback_metadata);
  77rw_attribute(writeback_running);
  78rw_attribute(writeback_percent);
  79rw_attribute(writeback_delay);
  80rw_attribute(writeback_rate);
  81
  82rw_attribute(writeback_rate_update_seconds);
  83rw_attribute(writeback_rate_d_term);
  84rw_attribute(writeback_rate_p_term_inverse);
  85read_attribute(writeback_rate_debug);
  86
  87read_attribute(stripe_size);
  88read_attribute(partial_stripes_expensive);
  89
  90rw_attribute(synchronous);
  91rw_attribute(journal_delay_ms);
  92rw_attribute(discard);
  93rw_attribute(running);
  94rw_attribute(label);
  95rw_attribute(readahead);
  96rw_attribute(errors);
  97rw_attribute(io_error_limit);
  98rw_attribute(io_error_halflife);
  99rw_attribute(verify);
 100rw_attribute(bypass_torture_test);
 101rw_attribute(key_merging_disabled);
 102rw_attribute(gc_always_rewrite);
 103rw_attribute(expensive_debug_checks);
 104rw_attribute(cache_replacement_policy);
 105rw_attribute(btree_shrinker_disabled);
 106rw_attribute(copy_gc_enabled);
 107rw_attribute(size);
 108
 109SHOW(__bch_cached_dev)
 110{
 111        struct cached_dev *dc = container_of(kobj, struct cached_dev,
 112                                             disk.kobj);
 113        const char *states[] = { "no cache", "clean", "dirty", "inconsistent" };
 114
 115#define var(stat)               (dc->stat)
 116
 117        if (attr == &sysfs_cache_mode)
 118                return bch_snprint_string_list(buf, PAGE_SIZE,
 119                                               bch_cache_modes + 1,
 120                                               BDEV_CACHE_MODE(&dc->sb));
 121
 122        sysfs_printf(data_csum,         "%i", dc->disk.data_csum);
 123        var_printf(verify,              "%i");
 124        var_printf(bypass_torture_test, "%i");
 125        var_printf(writeback_metadata,  "%i");
 126        var_printf(writeback_running,   "%i");
 127        var_print(writeback_delay);
 128        var_print(writeback_percent);
 129        sysfs_hprint(writeback_rate,    dc->writeback_rate.rate << 9);
 130
 131        var_print(writeback_rate_update_seconds);
 132        var_print(writeback_rate_d_term);
 133        var_print(writeback_rate_p_term_inverse);
 134
 135        if (attr == &sysfs_writeback_rate_debug) {
 136                char rate[20];
 137                char dirty[20];
 138                char target[20];
 139                char proportional[20];
 140                char derivative[20];
 141                char change[20];
 142                s64 next_io;
 143
 144                bch_hprint(rate,        dc->writeback_rate.rate << 9);
 145                bch_hprint(dirty,       bcache_dev_sectors_dirty(&dc->disk) << 9);
 146                bch_hprint(target,      dc->writeback_rate_target << 9);
 147                bch_hprint(proportional,dc->writeback_rate_proportional << 9);
 148                bch_hprint(derivative,  dc->writeback_rate_derivative << 9);
 149                bch_hprint(change,      dc->writeback_rate_change << 9);
 150
 151                next_io = div64_s64(dc->writeback_rate.next - local_clock(),
 152                                    NSEC_PER_MSEC);
 153
 154                return sprintf(buf,
 155                               "rate:\t\t%s/sec\n"
 156                               "dirty:\t\t%s\n"
 157                               "target:\t\t%s\n"
 158                               "proportional:\t%s\n"
 159                               "derivative:\t%s\n"
 160                               "change:\t\t%s/sec\n"
 161                               "next io:\t%llims\n",
 162                               rate, dirty, target, proportional,
 163                               derivative, change, next_io);
 164        }
 165
 166        sysfs_hprint(dirty_data,
 167                     bcache_dev_sectors_dirty(&dc->disk) << 9);
 168
 169        sysfs_hprint(stripe_size,       dc->disk.stripe_size << 9);
 170        var_printf(partial_stripes_expensive,   "%u");
 171
 172        var_hprint(sequential_cutoff);
 173        var_hprint(readahead);
 174
 175        sysfs_print(running,            atomic_read(&dc->running));
 176        sysfs_print(state,              states[BDEV_STATE(&dc->sb)]);
 177
 178        if (attr == &sysfs_label) {
 179                memcpy(buf, dc->sb.label, SB_LABEL_SIZE);
 180                buf[SB_LABEL_SIZE + 1] = '\0';
 181                strcat(buf, "\n");
 182                return strlen(buf);
 183        }
 184
 185#undef var
 186        return 0;
 187}
 188SHOW_LOCKED(bch_cached_dev)
 189
 190STORE(__cached_dev)
 191{
 192        struct cached_dev *dc = container_of(kobj, struct cached_dev,
 193                                             disk.kobj);
 194        unsigned v = size;
 195        struct cache_set *c;
 196        struct kobj_uevent_env *env;
 197
 198#define d_strtoul(var)          sysfs_strtoul(var, dc->var)
 199#define d_strtoul_nonzero(var)  sysfs_strtoul_clamp(var, dc->var, 1, INT_MAX)
 200#define d_strtoi_h(var)         sysfs_hatoi(var, dc->var)
 201
 202        sysfs_strtoul(data_csum,        dc->disk.data_csum);
 203        d_strtoul(verify);
 204        d_strtoul(bypass_torture_test);
 205        d_strtoul(writeback_metadata);
 206        d_strtoul(writeback_running);
 207        d_strtoul(writeback_delay);
 208
 209        sysfs_strtoul_clamp(writeback_percent, dc->writeback_percent, 0, 40);
 210
 211        sysfs_strtoul_clamp(writeback_rate,
 212                            dc->writeback_rate.rate, 1, INT_MAX);
 213
 214        d_strtoul_nonzero(writeback_rate_update_seconds);
 215        d_strtoul(writeback_rate_d_term);
 216        d_strtoul_nonzero(writeback_rate_p_term_inverse);
 217
 218        d_strtoi_h(sequential_cutoff);
 219        d_strtoi_h(readahead);
 220
 221        if (attr == &sysfs_clear_stats)
 222                bch_cache_accounting_clear(&dc->accounting);
 223
 224        if (attr == &sysfs_running &&
 225            strtoul_or_return(buf))
 226                bch_cached_dev_run(dc);
 227
 228        if (attr == &sysfs_cache_mode) {
 229                ssize_t v = bch_read_string_list(buf, bch_cache_modes + 1);
 230
 231                if (v < 0)
 232                        return v;
 233
 234                if ((unsigned) v != BDEV_CACHE_MODE(&dc->sb)) {
 235                        SET_BDEV_CACHE_MODE(&dc->sb, v);
 236                        bch_write_bdev_super(dc, NULL);
 237                }
 238        }
 239
 240        if (attr == &sysfs_label) {
 241                if (size > SB_LABEL_SIZE)
 242                        return -EINVAL;
 243                memcpy(dc->sb.label, buf, size);
 244                if (size < SB_LABEL_SIZE)
 245                        dc->sb.label[size] = '\0';
 246                if (size && dc->sb.label[size - 1] == '\n')
 247                        dc->sb.label[size - 1] = '\0';
 248                bch_write_bdev_super(dc, NULL);
 249                if (dc->disk.c) {
 250                        memcpy(dc->disk.c->uuids[dc->disk.id].label,
 251                               buf, SB_LABEL_SIZE);
 252                        bch_uuid_write(dc->disk.c);
 253                }
 254                env = kzalloc(sizeof(struct kobj_uevent_env), GFP_KERNEL);
 255                if (!env)
 256                        return -ENOMEM;
 257                add_uevent_var(env, "DRIVER=bcache");
 258                add_uevent_var(env, "CACHED_UUID=%pU", dc->sb.uuid),
 259                add_uevent_var(env, "CACHED_LABEL=%s", buf);
 260                kobject_uevent_env(
 261                        &disk_to_dev(dc->disk.disk)->kobj, KOBJ_CHANGE, env->envp);
 262                kfree(env);
 263        }
 264
 265        if (attr == &sysfs_attach) {
 266                if (bch_parse_uuid(buf, dc->sb.set_uuid) < 16)
 267                        return -EINVAL;
 268
 269                list_for_each_entry(c, &bch_cache_sets, list) {
 270                        v = bch_cached_dev_attach(dc, c);
 271                        if (!v)
 272                                return size;
 273                }
 274
 275                pr_err("Can't attach %s: cache set not found", buf);
 276                size = v;
 277        }
 278
 279        if (attr == &sysfs_detach && dc->disk.c)
 280                bch_cached_dev_detach(dc);
 281
 282        if (attr == &sysfs_stop)
 283                bcache_device_stop(&dc->disk);
 284
 285        return size;
 286}
 287
 288STORE(bch_cached_dev)
 289{
 290        struct cached_dev *dc = container_of(kobj, struct cached_dev,
 291                                             disk.kobj);
 292
 293        mutex_lock(&bch_register_lock);
 294        size = __cached_dev_store(kobj, attr, buf, size);
 295
 296        if (attr == &sysfs_writeback_running)
 297                bch_writeback_queue(dc);
 298
 299        if (attr == &sysfs_writeback_percent)
 300                schedule_delayed_work(&dc->writeback_rate_update,
 301                                      dc->writeback_rate_update_seconds * HZ);
 302
 303        mutex_unlock(&bch_register_lock);
 304        return size;
 305}
 306
 307static struct attribute *bch_cached_dev_files[] = {
 308        &sysfs_attach,
 309        &sysfs_detach,
 310        &sysfs_stop,
 311#if 0
 312        &sysfs_data_csum,
 313#endif
 314        &sysfs_cache_mode,
 315        &sysfs_writeback_metadata,
 316        &sysfs_writeback_running,
 317        &sysfs_writeback_delay,
 318        &sysfs_writeback_percent,
 319        &sysfs_writeback_rate,
 320        &sysfs_writeback_rate_update_seconds,
 321        &sysfs_writeback_rate_d_term,
 322        &sysfs_writeback_rate_p_term_inverse,
 323        &sysfs_writeback_rate_debug,
 324        &sysfs_dirty_data,
 325        &sysfs_stripe_size,
 326        &sysfs_partial_stripes_expensive,
 327        &sysfs_sequential_cutoff,
 328        &sysfs_clear_stats,
 329        &sysfs_running,
 330        &sysfs_state,
 331        &sysfs_label,
 332        &sysfs_readahead,
 333#ifdef CONFIG_BCACHE_DEBUG
 334        &sysfs_verify,
 335        &sysfs_bypass_torture_test,
 336#endif
 337        NULL
 338};
 339KTYPE(bch_cached_dev);
 340
 341SHOW(bch_flash_dev)
 342{
 343        struct bcache_device *d = container_of(kobj, struct bcache_device,
 344                                               kobj);
 345        struct uuid_entry *u = &d->c->uuids[d->id];
 346
 347        sysfs_printf(data_csum, "%i", d->data_csum);
 348        sysfs_hprint(size,      u->sectors << 9);
 349
 350        if (attr == &sysfs_label) {
 351                memcpy(buf, u->label, SB_LABEL_SIZE);
 352                buf[SB_LABEL_SIZE + 1] = '\0';
 353                strcat(buf, "\n");
 354                return strlen(buf);
 355        }
 356
 357        return 0;
 358}
 359
 360STORE(__bch_flash_dev)
 361{
 362        struct bcache_device *d = container_of(kobj, struct bcache_device,
 363                                               kobj);
 364        struct uuid_entry *u = &d->c->uuids[d->id];
 365
 366        sysfs_strtoul(data_csum,        d->data_csum);
 367
 368        if (attr == &sysfs_size) {
 369                uint64_t v;
 370                strtoi_h_or_return(buf, v);
 371
 372                u->sectors = v >> 9;
 373                bch_uuid_write(d->c);
 374                set_capacity(d->disk, u->sectors);
 375        }
 376
 377        if (attr == &sysfs_label) {
 378                memcpy(u->label, buf, SB_LABEL_SIZE);
 379                bch_uuid_write(d->c);
 380        }
 381
 382        if (attr == &sysfs_unregister) {
 383                set_bit(BCACHE_DEV_DETACHING, &d->flags);
 384                bcache_device_stop(d);
 385        }
 386
 387        return size;
 388}
 389STORE_LOCKED(bch_flash_dev)
 390
 391static struct attribute *bch_flash_dev_files[] = {
 392        &sysfs_unregister,
 393#if 0
 394        &sysfs_data_csum,
 395#endif
 396        &sysfs_label,
 397        &sysfs_size,
 398        NULL
 399};
 400KTYPE(bch_flash_dev);
 401
 402struct bset_stats_op {
 403        struct btree_op op;
 404        size_t nodes;
 405        struct bset_stats stats;
 406};
 407
 408static int bch_btree_bset_stats(struct btree_op *b_op, struct btree *b)
 409{
 410        struct bset_stats_op *op = container_of(b_op, struct bset_stats_op, op);
 411
 412        op->nodes++;
 413        bch_btree_keys_stats(&b->keys, &op->stats);
 414
 415        return MAP_CONTINUE;
 416}
 417
 418static int bch_bset_print_stats(struct cache_set *c, char *buf)
 419{
 420        struct bset_stats_op op;
 421        int ret;
 422
 423        memset(&op, 0, sizeof(op));
 424        bch_btree_op_init(&op.op, -1);
 425
 426        ret = bch_btree_map_nodes(&op.op, c, &ZERO_KEY, bch_btree_bset_stats);
 427        if (ret < 0)
 428                return ret;
 429
 430        return snprintf(buf, PAGE_SIZE,
 431                        "btree nodes:           %zu\n"
 432                        "written sets:          %zu\n"
 433                        "unwritten sets:                %zu\n"
 434                        "written key bytes:     %zu\n"
 435                        "unwritten key bytes:   %zu\n"
 436                        "floats:                        %zu\n"
 437                        "failed:                        %zu\n",
 438                        op.nodes,
 439                        op.stats.sets_written, op.stats.sets_unwritten,
 440                        op.stats.bytes_written, op.stats.bytes_unwritten,
 441                        op.stats.floats, op.stats.failed);
 442}
 443
 444static unsigned bch_root_usage(struct cache_set *c)
 445{
 446        unsigned bytes = 0;
 447        struct bkey *k;
 448        struct btree *b;
 449        struct btree_iter iter;
 450
 451        goto lock_root;
 452
 453        do {
 454                rw_unlock(false, b);
 455lock_root:
 456                b = c->root;
 457                rw_lock(false, b, b->level);
 458        } while (b != c->root);
 459
 460        for_each_key_filter(&b->keys, k, &iter, bch_ptr_bad)
 461                bytes += bkey_bytes(k);
 462
 463        rw_unlock(false, b);
 464
 465        return (bytes * 100) / btree_bytes(c);
 466}
 467
 468static size_t bch_cache_size(struct cache_set *c)
 469{
 470        size_t ret = 0;
 471        struct btree *b;
 472
 473        mutex_lock(&c->bucket_lock);
 474        list_for_each_entry(b, &c->btree_cache, list)
 475                ret += 1 << (b->keys.page_order + PAGE_SHIFT);
 476
 477        mutex_unlock(&c->bucket_lock);
 478        return ret;
 479}
 480
 481static unsigned bch_cache_max_chain(struct cache_set *c)
 482{
 483        unsigned ret = 0;
 484        struct hlist_head *h;
 485
 486        mutex_lock(&c->bucket_lock);
 487
 488        for (h = c->bucket_hash;
 489             h < c->bucket_hash + (1 << BUCKET_HASH_BITS);
 490             h++) {
 491                unsigned i = 0;
 492                struct hlist_node *p;
 493
 494                hlist_for_each(p, h)
 495                        i++;
 496
 497                ret = max(ret, i);
 498        }
 499
 500        mutex_unlock(&c->bucket_lock);
 501        return ret;
 502}
 503
 504static unsigned bch_btree_used(struct cache_set *c)
 505{
 506        return div64_u64(c->gc_stats.key_bytes * 100,
 507                         (c->gc_stats.nodes ?: 1) * btree_bytes(c));
 508}
 509
 510static unsigned bch_average_key_size(struct cache_set *c)
 511{
 512        return c->gc_stats.nkeys
 513                ? div64_u64(c->gc_stats.data, c->gc_stats.nkeys)
 514                : 0;
 515}
 516
 517SHOW(__bch_cache_set)
 518{
 519        struct cache_set *c = container_of(kobj, struct cache_set, kobj);
 520
 521        sysfs_print(synchronous,                CACHE_SYNC(&c->sb));
 522        sysfs_print(journal_delay_ms,           c->journal_delay_ms);
 523        sysfs_hprint(bucket_size,               bucket_bytes(c));
 524        sysfs_hprint(block_size,                block_bytes(c));
 525        sysfs_print(tree_depth,                 c->root->level);
 526        sysfs_print(root_usage_percent,         bch_root_usage(c));
 527
 528        sysfs_hprint(btree_cache_size,          bch_cache_size(c));
 529        sysfs_print(btree_cache_max_chain,      bch_cache_max_chain(c));
 530        sysfs_print(cache_available_percent,    100 - c->gc_stats.in_use);
 531
 532        sysfs_print_time_stats(&c->btree_gc_time,       btree_gc, sec, ms);
 533        sysfs_print_time_stats(&c->btree_split_time,    btree_split, sec, us);
 534        sysfs_print_time_stats(&c->sort.time,           btree_sort, ms, us);
 535        sysfs_print_time_stats(&c->btree_read_time,     btree_read, ms, us);
 536
 537        sysfs_print(btree_used_percent, bch_btree_used(c));
 538        sysfs_print(btree_nodes,        c->gc_stats.nodes);
 539        sysfs_hprint(average_key_size,  bch_average_key_size(c));
 540
 541        sysfs_print(cache_read_races,
 542                    atomic_long_read(&c->cache_read_races));
 543
 544        sysfs_print(writeback_keys_done,
 545                    atomic_long_read(&c->writeback_keys_done));
 546        sysfs_print(writeback_keys_failed,
 547                    atomic_long_read(&c->writeback_keys_failed));
 548
 549        if (attr == &sysfs_errors)
 550                return bch_snprint_string_list(buf, PAGE_SIZE, error_actions,
 551                                               c->on_error);
 552
 553        /* See count_io_errors for why 88 */
 554        sysfs_print(io_error_halflife,  c->error_decay * 88);
 555        sysfs_print(io_error_limit,     c->error_limit >> IO_ERROR_SHIFT);
 556
 557        sysfs_hprint(congested,
 558                     ((uint64_t) bch_get_congested(c)) << 9);
 559        sysfs_print(congested_read_threshold_us,
 560                    c->congested_read_threshold_us);
 561        sysfs_print(congested_write_threshold_us,
 562                    c->congested_write_threshold_us);
 563
 564        sysfs_print(active_journal_entries,     fifo_used(&c->journal.pin));
 565        sysfs_printf(verify,                    "%i", c->verify);
 566        sysfs_printf(key_merging_disabled,      "%i", c->key_merging_disabled);
 567        sysfs_printf(expensive_debug_checks,
 568                     "%i", c->expensive_debug_checks);
 569        sysfs_printf(gc_always_rewrite,         "%i", c->gc_always_rewrite);
 570        sysfs_printf(btree_shrinker_disabled,   "%i", c->shrinker_disabled);
 571        sysfs_printf(copy_gc_enabled,           "%i", c->copy_gc_enabled);
 572
 573        if (attr == &sysfs_bset_tree_stats)
 574                return bch_bset_print_stats(c, buf);
 575
 576        return 0;
 577}
 578SHOW_LOCKED(bch_cache_set)
 579
 580STORE(__bch_cache_set)
 581{
 582        struct cache_set *c = container_of(kobj, struct cache_set, kobj);
 583
 584        if (attr == &sysfs_unregister)
 585                bch_cache_set_unregister(c);
 586
 587        if (attr == &sysfs_stop)
 588                bch_cache_set_stop(c);
 589
 590        if (attr == &sysfs_synchronous) {
 591                bool sync = strtoul_or_return(buf);
 592
 593                if (sync != CACHE_SYNC(&c->sb)) {
 594                        SET_CACHE_SYNC(&c->sb, sync);
 595                        bcache_write_super(c);
 596                }
 597        }
 598
 599        if (attr == &sysfs_flash_vol_create) {
 600                int r;
 601                uint64_t v;
 602                strtoi_h_or_return(buf, v);
 603
 604                r = bch_flash_dev_create(c, v);
 605                if (r)
 606                        return r;
 607        }
 608
 609        if (attr == &sysfs_clear_stats) {
 610                atomic_long_set(&c->writeback_keys_done,        0);
 611                atomic_long_set(&c->writeback_keys_failed,      0);
 612
 613                memset(&c->gc_stats, 0, sizeof(struct gc_stat));
 614                bch_cache_accounting_clear(&c->accounting);
 615        }
 616
 617        if (attr == &sysfs_trigger_gc)
 618                wake_up_gc(c);
 619
 620        if (attr == &sysfs_prune_cache) {
 621                struct shrink_control sc;
 622                sc.gfp_mask = GFP_KERNEL;
 623                sc.nr_to_scan = strtoul_or_return(buf);
 624                c->shrink.scan_objects(&c->shrink, &sc);
 625        }
 626
 627        sysfs_strtoul(congested_read_threshold_us,
 628                      c->congested_read_threshold_us);
 629        sysfs_strtoul(congested_write_threshold_us,
 630                      c->congested_write_threshold_us);
 631
 632        if (attr == &sysfs_errors) {
 633                ssize_t v = bch_read_string_list(buf, error_actions);
 634
 635                if (v < 0)
 636                        return v;
 637
 638                c->on_error = v;
 639        }
 640
 641        if (attr == &sysfs_io_error_limit)
 642                c->error_limit = strtoul_or_return(buf) << IO_ERROR_SHIFT;
 643
 644        /* See count_io_errors() for why 88 */
 645        if (attr == &sysfs_io_error_halflife)
 646                c->error_decay = strtoul_or_return(buf) / 88;
 647
 648        sysfs_strtoul(journal_delay_ms,         c->journal_delay_ms);
 649        sysfs_strtoul(verify,                   c->verify);
 650        sysfs_strtoul(key_merging_disabled,     c->key_merging_disabled);
 651        sysfs_strtoul(expensive_debug_checks,   c->expensive_debug_checks);
 652        sysfs_strtoul(gc_always_rewrite,        c->gc_always_rewrite);
 653        sysfs_strtoul(btree_shrinker_disabled,  c->shrinker_disabled);
 654        sysfs_strtoul(copy_gc_enabled,          c->copy_gc_enabled);
 655
 656        return size;
 657}
 658STORE_LOCKED(bch_cache_set)
 659
 660SHOW(bch_cache_set_internal)
 661{
 662        struct cache_set *c = container_of(kobj, struct cache_set, internal);
 663        return bch_cache_set_show(&c->kobj, attr, buf);
 664}
 665
 666STORE(bch_cache_set_internal)
 667{
 668        struct cache_set *c = container_of(kobj, struct cache_set, internal);
 669        return bch_cache_set_store(&c->kobj, attr, buf, size);
 670}
 671
 672static void bch_cache_set_internal_release(struct kobject *k)
 673{
 674}
 675
 676static struct attribute *bch_cache_set_files[] = {
 677        &sysfs_unregister,
 678        &sysfs_stop,
 679        &sysfs_synchronous,
 680        &sysfs_journal_delay_ms,
 681        &sysfs_flash_vol_create,
 682
 683        &sysfs_bucket_size,
 684        &sysfs_block_size,
 685        &sysfs_tree_depth,
 686        &sysfs_root_usage_percent,
 687        &sysfs_btree_cache_size,
 688        &sysfs_cache_available_percent,
 689
 690        &sysfs_average_key_size,
 691
 692        &sysfs_errors,
 693        &sysfs_io_error_limit,
 694        &sysfs_io_error_halflife,
 695        &sysfs_congested,
 696        &sysfs_congested_read_threshold_us,
 697        &sysfs_congested_write_threshold_us,
 698        &sysfs_clear_stats,
 699        NULL
 700};
 701KTYPE(bch_cache_set);
 702
 703static struct attribute *bch_cache_set_internal_files[] = {
 704        &sysfs_active_journal_entries,
 705
 706        sysfs_time_stats_attribute_list(btree_gc, sec, ms)
 707        sysfs_time_stats_attribute_list(btree_split, sec, us)
 708        sysfs_time_stats_attribute_list(btree_sort, ms, us)
 709        sysfs_time_stats_attribute_list(btree_read, ms, us)
 710
 711        &sysfs_btree_nodes,
 712        &sysfs_btree_used_percent,
 713        &sysfs_btree_cache_max_chain,
 714
 715        &sysfs_bset_tree_stats,
 716        &sysfs_cache_read_races,
 717        &sysfs_writeback_keys_done,
 718        &sysfs_writeback_keys_failed,
 719
 720        &sysfs_trigger_gc,
 721        &sysfs_prune_cache,
 722#ifdef CONFIG_BCACHE_DEBUG
 723        &sysfs_verify,
 724        &sysfs_key_merging_disabled,
 725        &sysfs_expensive_debug_checks,
 726#endif
 727        &sysfs_gc_always_rewrite,
 728        &sysfs_btree_shrinker_disabled,
 729        &sysfs_copy_gc_enabled,
 730        NULL
 731};
 732KTYPE(bch_cache_set_internal);
 733
 734SHOW(__bch_cache)
 735{
 736        struct cache *ca = container_of(kobj, struct cache, kobj);
 737
 738        sysfs_hprint(bucket_size,       bucket_bytes(ca));
 739        sysfs_hprint(block_size,        block_bytes(ca));
 740        sysfs_print(nbuckets,           ca->sb.nbuckets);
 741        sysfs_print(discard,            ca->discard);
 742        sysfs_hprint(written, atomic_long_read(&ca->sectors_written) << 9);
 743        sysfs_hprint(btree_written,
 744                     atomic_long_read(&ca->btree_sectors_written) << 9);
 745        sysfs_hprint(metadata_written,
 746                     (atomic_long_read(&ca->meta_sectors_written) +
 747                      atomic_long_read(&ca->btree_sectors_written)) << 9);
 748
 749        sysfs_print(io_errors,
 750                    atomic_read(&ca->io_errors) >> IO_ERROR_SHIFT);
 751
 752        if (attr == &sysfs_cache_replacement_policy)
 753                return bch_snprint_string_list(buf, PAGE_SIZE,
 754                                               cache_replacement_policies,
 755                                               CACHE_REPLACEMENT(&ca->sb));
 756
 757        if (attr == &sysfs_priority_stats) {
 758                int cmp(const void *l, const void *r)
 759                {       return *((uint16_t *) r) - *((uint16_t *) l); }
 760
 761                struct bucket *b;
 762                size_t n = ca->sb.nbuckets, i;
 763                size_t unused = 0, available = 0, dirty = 0, meta = 0;
 764                uint64_t sum = 0;
 765                /* Compute 31 quantiles */
 766                uint16_t q[31], *p, *cached;
 767                ssize_t ret;
 768
 769                cached = p = vmalloc(ca->sb.nbuckets * sizeof(uint16_t));
 770                if (!p)
 771                        return -ENOMEM;
 772
 773                mutex_lock(&ca->set->bucket_lock);
 774                for_each_bucket(b, ca) {
 775                        if (!GC_SECTORS_USED(b))
 776                                unused++;
 777                        if (GC_MARK(b) == GC_MARK_RECLAIMABLE)
 778                                available++;
 779                        if (GC_MARK(b) == GC_MARK_DIRTY)
 780                                dirty++;
 781                        if (GC_MARK(b) == GC_MARK_METADATA)
 782                                meta++;
 783                }
 784
 785                for (i = ca->sb.first_bucket; i < n; i++)
 786                        p[i] = ca->buckets[i].prio;
 787                mutex_unlock(&ca->set->bucket_lock);
 788
 789                sort(p, n, sizeof(uint16_t), cmp, NULL);
 790
 791                while (n &&
 792                       !cached[n - 1])
 793                        --n;
 794
 795                unused = ca->sb.nbuckets - n;
 796
 797                while (cached < p + n &&
 798                       *cached == BTREE_PRIO)
 799                        cached++, n--;
 800
 801                for (i = 0; i < n; i++)
 802                        sum += INITIAL_PRIO - cached[i];
 803
 804                if (n)
 805                        do_div(sum, n);
 806
 807                for (i = 0; i < ARRAY_SIZE(q); i++)
 808                        q[i] = INITIAL_PRIO - cached[n * (i + 1) /
 809                                (ARRAY_SIZE(q) + 1)];
 810
 811                vfree(p);
 812
 813                ret = scnprintf(buf, PAGE_SIZE,
 814                                "Unused:                %zu%%\n"
 815                                "Clean:         %zu%%\n"
 816                                "Dirty:         %zu%%\n"
 817                                "Metadata:      %zu%%\n"
 818                                "Average:       %llu\n"
 819                                "Sectors per Q: %zu\n"
 820                                "Quantiles:     [",
 821                                unused * 100 / (size_t) ca->sb.nbuckets,
 822                                available * 100 / (size_t) ca->sb.nbuckets,
 823                                dirty * 100 / (size_t) ca->sb.nbuckets,
 824                                meta * 100 / (size_t) ca->sb.nbuckets, sum,
 825                                n * ca->sb.bucket_size / (ARRAY_SIZE(q) + 1));
 826
 827                for (i = 0; i < ARRAY_SIZE(q); i++)
 828                        ret += scnprintf(buf + ret, PAGE_SIZE - ret,
 829                                         "%u ", q[i]);
 830                ret--;
 831
 832                ret += scnprintf(buf + ret, PAGE_SIZE - ret, "]\n");
 833
 834                return ret;
 835        }
 836
 837        return 0;
 838}
 839SHOW_LOCKED(bch_cache)
 840
 841STORE(__bch_cache)
 842{
 843        struct cache *ca = container_of(kobj, struct cache, kobj);
 844
 845        if (attr == &sysfs_discard) {
 846                bool v = strtoul_or_return(buf);
 847
 848                if (blk_queue_discard(bdev_get_queue(ca->bdev)))
 849                        ca->discard = v;
 850
 851                if (v != CACHE_DISCARD(&ca->sb)) {
 852                        SET_CACHE_DISCARD(&ca->sb, v);
 853                        bcache_write_super(ca->set);
 854                }
 855        }
 856
 857        if (attr == &sysfs_cache_replacement_policy) {
 858                ssize_t v = bch_read_string_list(buf, cache_replacement_policies);
 859
 860                if (v < 0)
 861                        return v;
 862
 863                if ((unsigned) v != CACHE_REPLACEMENT(&ca->sb)) {
 864                        mutex_lock(&ca->set->bucket_lock);
 865                        SET_CACHE_REPLACEMENT(&ca->sb, v);
 866                        mutex_unlock(&ca->set->bucket_lock);
 867
 868                        bcache_write_super(ca->set);
 869                }
 870        }
 871
 872        if (attr == &sysfs_clear_stats) {
 873                atomic_long_set(&ca->sectors_written, 0);
 874                atomic_long_set(&ca->btree_sectors_written, 0);
 875                atomic_long_set(&ca->meta_sectors_written, 0);
 876                atomic_set(&ca->io_count, 0);
 877                atomic_set(&ca->io_errors, 0);
 878        }
 879
 880        return size;
 881}
 882STORE_LOCKED(bch_cache)
 883
 884static struct attribute *bch_cache_files[] = {
 885        &sysfs_bucket_size,
 886        &sysfs_block_size,
 887        &sysfs_nbuckets,
 888        &sysfs_priority_stats,
 889        &sysfs_discard,
 890        &sysfs_written,
 891        &sysfs_btree_written,
 892        &sysfs_metadata_written,
 893        &sysfs_io_errors,
 894        &sysfs_clear_stats,
 895        &sysfs_cache_replacement_policy,
 896        NULL
 897};
 898KTYPE(bch_cache);
 899