linux/drivers/md/bcache/sysfs.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * bcache sysfs interfaces
   4 *
   5 * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
   6 * Copyright 2012 Google, Inc.
   7 */
   8
   9#include "bcache.h"
  10#include "sysfs.h"
  11#include "btree.h"
  12#include "request.h"
  13#include "writeback.h"
  14
  15#include <linux/blkdev.h>
  16#include <linux/sort.h>
  17#include <linux/sched/clock.h>
  18
  19extern bool bcache_is_reboot;
  20
  21/* Default is 0 ("writethrough") */
  22static const char * const bch_cache_modes[] = {
  23        "writethrough",
  24        "writeback",
  25        "writearound",
  26        "none",
  27        NULL
  28};
  29
  30static const char * const bch_reada_cache_policies[] = {
  31        "all",
  32        "meta-only",
  33        NULL
  34};
  35
  36/* Default is 0 ("auto") */
  37static const char * const bch_stop_on_failure_modes[] = {
  38        "auto",
  39        "always",
  40        NULL
  41};
  42
  43static const char * const cache_replacement_policies[] = {
  44        "lru",
  45        "fifo",
  46        "random",
  47        NULL
  48};
  49
  50static const char * const error_actions[] = {
  51        "unregister",
  52        "panic",
  53        NULL
  54};
  55
  56write_attribute(attach);
  57write_attribute(detach);
  58write_attribute(unregister);
  59write_attribute(stop);
  60write_attribute(clear_stats);
  61write_attribute(trigger_gc);
  62write_attribute(prune_cache);
  63write_attribute(flash_vol_create);
  64
  65read_attribute(bucket_size);
  66read_attribute(block_size);
  67read_attribute(nbuckets);
  68read_attribute(tree_depth);
  69read_attribute(root_usage_percent);
  70read_attribute(priority_stats);
  71read_attribute(btree_cache_size);
  72read_attribute(btree_cache_max_chain);
  73read_attribute(cache_available_percent);
  74read_attribute(written);
  75read_attribute(btree_written);
  76read_attribute(metadata_written);
  77read_attribute(active_journal_entries);
  78read_attribute(backing_dev_name);
  79read_attribute(backing_dev_uuid);
  80
  81sysfs_time_stats_attribute(btree_gc,    sec, ms);
  82sysfs_time_stats_attribute(btree_split, sec, us);
  83sysfs_time_stats_attribute(btree_sort,  ms,  us);
  84sysfs_time_stats_attribute(btree_read,  ms,  us);
  85
  86read_attribute(btree_nodes);
  87read_attribute(btree_used_percent);
  88read_attribute(average_key_size);
  89read_attribute(dirty_data);
  90read_attribute(bset_tree_stats);
  91
  92read_attribute(state);
  93read_attribute(cache_read_races);
  94read_attribute(reclaim);
  95read_attribute(reclaimed_journal_buckets);
  96read_attribute(flush_write);
  97read_attribute(writeback_keys_done);
  98read_attribute(writeback_keys_failed);
  99read_attribute(io_errors);
 100read_attribute(congested);
 101read_attribute(cutoff_writeback);
 102read_attribute(cutoff_writeback_sync);
 103rw_attribute(congested_read_threshold_us);
 104rw_attribute(congested_write_threshold_us);
 105
 106rw_attribute(sequential_cutoff);
 107rw_attribute(data_csum);
 108rw_attribute(cache_mode);
 109rw_attribute(readahead_cache_policy);
 110rw_attribute(stop_when_cache_set_failed);
 111rw_attribute(writeback_metadata);
 112rw_attribute(writeback_running);
 113rw_attribute(writeback_percent);
 114rw_attribute(writeback_delay);
 115rw_attribute(writeback_rate);
 116
 117rw_attribute(writeback_rate_update_seconds);
 118rw_attribute(writeback_rate_i_term_inverse);
 119rw_attribute(writeback_rate_p_term_inverse);
 120rw_attribute(writeback_rate_minimum);
 121read_attribute(writeback_rate_debug);
 122
 123read_attribute(stripe_size);
 124read_attribute(partial_stripes_expensive);
 125
 126rw_attribute(synchronous);
 127rw_attribute(journal_delay_ms);
 128rw_attribute(io_disable);
 129rw_attribute(discard);
 130rw_attribute(running);
 131rw_attribute(label);
 132rw_attribute(readahead);
 133rw_attribute(errors);
 134rw_attribute(io_error_limit);
 135rw_attribute(io_error_halflife);
 136rw_attribute(verify);
 137rw_attribute(bypass_torture_test);
 138rw_attribute(key_merging_disabled);
 139rw_attribute(gc_always_rewrite);
 140rw_attribute(expensive_debug_checks);
 141rw_attribute(cache_replacement_policy);
 142rw_attribute(btree_shrinker_disabled);
 143rw_attribute(copy_gc_enabled);
 144rw_attribute(idle_max_writeback_rate);
 145rw_attribute(gc_after_writeback);
 146rw_attribute(size);
 147
 148static ssize_t bch_snprint_string_list(char *buf,
 149                                       size_t size,
 150                                       const char * const list[],
 151                                       size_t selected)
 152{
 153        char *out = buf;
 154        size_t i;
 155
 156        for (i = 0; list[i]; i++)
 157                out += scnprintf(out, buf + size - out,
 158                                i == selected ? "[%s] " : "%s ", list[i]);
 159
 160        out[-1] = '\n';
 161        return out - buf;
 162}
 163
 164SHOW(__bch_cached_dev)
 165{
 166        struct cached_dev *dc = container_of(kobj, struct cached_dev,
 167                                             disk.kobj);
 168        char const *states[] = { "no cache", "clean", "dirty", "inconsistent" };
 169        int wb = dc->writeback_running;
 170
 171#define var(stat)               (dc->stat)
 172
 173        if (attr == &sysfs_cache_mode)
 174                return bch_snprint_string_list(buf, PAGE_SIZE,
 175                                               bch_cache_modes,
 176                                               BDEV_CACHE_MODE(&dc->sb));
 177
 178        if (attr == &sysfs_readahead_cache_policy)
 179                return bch_snprint_string_list(buf, PAGE_SIZE,
 180                                              bch_reada_cache_policies,
 181                                              dc->cache_readahead_policy);
 182
 183        if (attr == &sysfs_stop_when_cache_set_failed)
 184                return bch_snprint_string_list(buf, PAGE_SIZE,
 185                                               bch_stop_on_failure_modes,
 186                                               dc->stop_when_cache_set_failed);
 187
 188
 189        sysfs_printf(data_csum,         "%i", dc->disk.data_csum);
 190        var_printf(verify,              "%i");
 191        var_printf(bypass_torture_test, "%i");
 192        var_printf(writeback_metadata,  "%i");
 193        var_printf(writeback_running,   "%i");
 194        var_print(writeback_delay);
 195        var_print(writeback_percent);
 196        sysfs_hprint(writeback_rate,
 197                     wb ? atomic_long_read(&dc->writeback_rate.rate) << 9 : 0);
 198        sysfs_printf(io_errors,         "%i", atomic_read(&dc->io_errors));
 199        sysfs_printf(io_error_limit,    "%i", dc->error_limit);
 200        sysfs_printf(io_disable,        "%i", dc->io_disable);
 201        var_print(writeback_rate_update_seconds);
 202        var_print(writeback_rate_i_term_inverse);
 203        var_print(writeback_rate_p_term_inverse);
 204        var_print(writeback_rate_minimum);
 205
 206        if (attr == &sysfs_writeback_rate_debug) {
 207                char rate[20];
 208                char dirty[20];
 209                char target[20];
 210                char proportional[20];
 211                char integral[20];
 212                char change[20];
 213                s64 next_io;
 214
 215                /*
 216                 * Except for dirty and target, other values should
 217                 * be 0 if writeback is not running.
 218                 */
 219                bch_hprint(rate,
 220                           wb ? atomic_long_read(&dc->writeback_rate.rate) << 9
 221                              : 0);
 222                bch_hprint(dirty, bcache_dev_sectors_dirty(&dc->disk) << 9);
 223                bch_hprint(target, dc->writeback_rate_target << 9);
 224                bch_hprint(proportional,
 225                           wb ? dc->writeback_rate_proportional << 9 : 0);
 226                bch_hprint(integral,
 227                           wb ? dc->writeback_rate_integral_scaled << 9 : 0);
 228                bch_hprint(change, wb ? dc->writeback_rate_change << 9 : 0);
 229                next_io = wb ? div64_s64(dc->writeback_rate.next-local_clock(),
 230                                         NSEC_PER_MSEC) : 0;
 231
 232                return sprintf(buf,
 233                               "rate:\t\t%s/sec\n"
 234                               "dirty:\t\t%s\n"
 235                               "target:\t\t%s\n"
 236                               "proportional:\t%s\n"
 237                               "integral:\t%s\n"
 238                               "change:\t\t%s/sec\n"
 239                               "next io:\t%llims\n",
 240                               rate, dirty, target, proportional,
 241                               integral, change, next_io);
 242        }
 243
 244        sysfs_hprint(dirty_data,
 245                     bcache_dev_sectors_dirty(&dc->disk) << 9);
 246
 247        sysfs_hprint(stripe_size,        ((uint64_t)dc->disk.stripe_size) << 9);
 248        var_printf(partial_stripes_expensive,   "%u");
 249
 250        var_hprint(sequential_cutoff);
 251        var_hprint(readahead);
 252
 253        sysfs_print(running,            atomic_read(&dc->running));
 254        sysfs_print(state,              states[BDEV_STATE(&dc->sb)]);
 255
 256        if (attr == &sysfs_label) {
 257                memcpy(buf, dc->sb.label, SB_LABEL_SIZE);
 258                buf[SB_LABEL_SIZE + 1] = '\0';
 259                strcat(buf, "\n");
 260                return strlen(buf);
 261        }
 262
 263        if (attr == &sysfs_backing_dev_name) {
 264                snprintf(buf, BDEVNAME_SIZE + 1, "%s", dc->backing_dev_name);
 265                strcat(buf, "\n");
 266                return strlen(buf);
 267        }
 268
 269        if (attr == &sysfs_backing_dev_uuid) {
 270                /* convert binary uuid into 36-byte string plus '\0' */
 271                snprintf(buf, 36+1, "%pU", dc->sb.uuid);
 272                strcat(buf, "\n");
 273                return strlen(buf);
 274        }
 275
 276#undef var
 277        return 0;
 278}
 279SHOW_LOCKED(bch_cached_dev)
 280
 281STORE(__cached_dev)
 282{
 283        struct cached_dev *dc = container_of(kobj, struct cached_dev,
 284                                             disk.kobj);
 285        ssize_t v;
 286        struct cache_set *c;
 287        struct kobj_uevent_env *env;
 288
 289        /* no user space access if system is rebooting */
 290        if (bcache_is_reboot)
 291                return -EBUSY;
 292
 293#define d_strtoul(var)          sysfs_strtoul(var, dc->var)
 294#define d_strtoul_nonzero(var)  sysfs_strtoul_clamp(var, dc->var, 1, INT_MAX)
 295#define d_strtoi_h(var)         sysfs_hatoi(var, dc->var)
 296
 297        sysfs_strtoul(data_csum,        dc->disk.data_csum);
 298        d_strtoul(verify);
 299        sysfs_strtoul_bool(bypass_torture_test, dc->bypass_torture_test);
 300        sysfs_strtoul_bool(writeback_metadata, dc->writeback_metadata);
 301        sysfs_strtoul_bool(writeback_running, dc->writeback_running);
 302        sysfs_strtoul_clamp(writeback_delay, dc->writeback_delay, 0, UINT_MAX);
 303
 304        sysfs_strtoul_clamp(writeback_percent, dc->writeback_percent,
 305                            0, bch_cutoff_writeback);
 306
 307        if (attr == &sysfs_writeback_rate) {
 308                ssize_t ret;
 309                long int v = atomic_long_read(&dc->writeback_rate.rate);
 310
 311                ret = strtoul_safe_clamp(buf, v, 1, INT_MAX);
 312
 313                if (!ret) {
 314                        atomic_long_set(&dc->writeback_rate.rate, v);
 315                        ret = size;
 316                }
 317
 318                return ret;
 319        }
 320
 321        sysfs_strtoul_clamp(writeback_rate_update_seconds,
 322                            dc->writeback_rate_update_seconds,
 323                            1, WRITEBACK_RATE_UPDATE_SECS_MAX);
 324        sysfs_strtoul_clamp(writeback_rate_i_term_inverse,
 325                            dc->writeback_rate_i_term_inverse,
 326                            1, UINT_MAX);
 327        sysfs_strtoul_clamp(writeback_rate_p_term_inverse,
 328                            dc->writeback_rate_p_term_inverse,
 329                            1, UINT_MAX);
 330        sysfs_strtoul_clamp(writeback_rate_minimum,
 331                            dc->writeback_rate_minimum,
 332                            1, UINT_MAX);
 333
 334        sysfs_strtoul_clamp(io_error_limit, dc->error_limit, 0, INT_MAX);
 335
 336        if (attr == &sysfs_io_disable) {
 337                int v = strtoul_or_return(buf);
 338
 339                dc->io_disable = v ? 1 : 0;
 340        }
 341
 342        sysfs_strtoul_clamp(sequential_cutoff,
 343                            dc->sequential_cutoff,
 344                            0, UINT_MAX);
 345        d_strtoi_h(readahead);
 346
 347        if (attr == &sysfs_clear_stats)
 348                bch_cache_accounting_clear(&dc->accounting);
 349
 350        if (attr == &sysfs_running &&
 351            strtoul_or_return(buf)) {
 352                v = bch_cached_dev_run(dc);
 353                if (v)
 354                        return v;
 355        }
 356
 357        if (attr == &sysfs_cache_mode) {
 358                v = __sysfs_match_string(bch_cache_modes, -1, buf);
 359                if (v < 0)
 360                        return v;
 361
 362                if ((unsigned int) v != BDEV_CACHE_MODE(&dc->sb)) {
 363                        SET_BDEV_CACHE_MODE(&dc->sb, v);
 364                        bch_write_bdev_super(dc, NULL);
 365                }
 366        }
 367
 368        if (attr == &sysfs_readahead_cache_policy) {
 369                v = __sysfs_match_string(bch_reada_cache_policies, -1, buf);
 370                if (v < 0)
 371                        return v;
 372
 373                if ((unsigned int) v != dc->cache_readahead_policy)
 374                        dc->cache_readahead_policy = v;
 375        }
 376
 377        if (attr == &sysfs_stop_when_cache_set_failed) {
 378                v = __sysfs_match_string(bch_stop_on_failure_modes, -1, buf);
 379                if (v < 0)
 380                        return v;
 381
 382                dc->stop_when_cache_set_failed = v;
 383        }
 384
 385        if (attr == &sysfs_label) {
 386                if (size > SB_LABEL_SIZE)
 387                        return -EINVAL;
 388                memcpy(dc->sb.label, buf, size);
 389                if (size < SB_LABEL_SIZE)
 390                        dc->sb.label[size] = '\0';
 391                if (size && dc->sb.label[size - 1] == '\n')
 392                        dc->sb.label[size - 1] = '\0';
 393                bch_write_bdev_super(dc, NULL);
 394                if (dc->disk.c) {
 395                        memcpy(dc->disk.c->uuids[dc->disk.id].label,
 396                               buf, SB_LABEL_SIZE);
 397                        bch_uuid_write(dc->disk.c);
 398                }
 399                env = kzalloc(sizeof(struct kobj_uevent_env), GFP_KERNEL);
 400                if (!env)
 401                        return -ENOMEM;
 402                add_uevent_var(env, "DRIVER=bcache");
 403                add_uevent_var(env, "CACHED_UUID=%pU", dc->sb.uuid),
 404                add_uevent_var(env, "CACHED_LABEL=%s", buf);
 405                kobject_uevent_env(&disk_to_dev(dc->disk.disk)->kobj,
 406                                   KOBJ_CHANGE,
 407                                   env->envp);
 408                kfree(env);
 409        }
 410
 411        if (attr == &sysfs_attach) {
 412                uint8_t         set_uuid[16];
 413
 414                if (bch_parse_uuid(buf, set_uuid) < 16)
 415                        return -EINVAL;
 416
 417                v = -ENOENT;
 418                list_for_each_entry(c, &bch_cache_sets, list) {
 419                        v = bch_cached_dev_attach(dc, c, set_uuid);
 420                        if (!v)
 421                                return size;
 422                }
 423                if (v == -ENOENT)
 424                        pr_err("Can't attach %s: cache set not found\n", buf);
 425                return v;
 426        }
 427
 428        if (attr == &sysfs_detach && dc->disk.c)
 429                bch_cached_dev_detach(dc);
 430
 431        if (attr == &sysfs_stop)
 432                bcache_device_stop(&dc->disk);
 433
 434        return size;
 435}
 436
 437STORE(bch_cached_dev)
 438{
 439        struct cached_dev *dc = container_of(kobj, struct cached_dev,
 440                                             disk.kobj);
 441
 442        /* no user space access if system is rebooting */
 443        if (bcache_is_reboot)
 444                return -EBUSY;
 445
 446        mutex_lock(&bch_register_lock);
 447        size = __cached_dev_store(kobj, attr, buf, size);
 448
 449        if (attr == &sysfs_writeback_running) {
 450                /* dc->writeback_running changed in __cached_dev_store() */
 451                if (IS_ERR_OR_NULL(dc->writeback_thread)) {
 452                        /*
 453                         * reject setting it to 1 via sysfs if writeback
 454                         * kthread is not created yet.
 455                         */
 456                        if (dc->writeback_running) {
 457                                dc->writeback_running = false;
 458                                pr_err("%s: failed to run non-existent writeback thread\n",
 459                                                dc->disk.disk->disk_name);
 460                        }
 461                } else
 462                        /*
 463                         * writeback kthread will check if dc->writeback_running
 464                         * is true or false.
 465                         */
 466                        bch_writeback_queue(dc);
 467        }
 468
 469        /*
 470         * Only set BCACHE_DEV_WB_RUNNING when cached device attached to
 471         * a cache set, otherwise it doesn't make sense.
 472         */
 473        if (attr == &sysfs_writeback_percent)
 474                if ((dc->disk.c != NULL) &&
 475                    (!test_and_set_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags)))
 476                        schedule_delayed_work(&dc->writeback_rate_update,
 477                                      dc->writeback_rate_update_seconds * HZ);
 478
 479        mutex_unlock(&bch_register_lock);
 480        return size;
 481}
 482
 483static struct attribute *bch_cached_dev_files[] = {
 484        &sysfs_attach,
 485        &sysfs_detach,
 486        &sysfs_stop,
 487#if 0
 488        &sysfs_data_csum,
 489#endif
 490        &sysfs_cache_mode,
 491        &sysfs_readahead_cache_policy,
 492        &sysfs_stop_when_cache_set_failed,
 493        &sysfs_writeback_metadata,
 494        &sysfs_writeback_running,
 495        &sysfs_writeback_delay,
 496        &sysfs_writeback_percent,
 497        &sysfs_writeback_rate,
 498        &sysfs_writeback_rate_update_seconds,
 499        &sysfs_writeback_rate_i_term_inverse,
 500        &sysfs_writeback_rate_p_term_inverse,
 501        &sysfs_writeback_rate_minimum,
 502        &sysfs_writeback_rate_debug,
 503        &sysfs_io_errors,
 504        &sysfs_io_error_limit,
 505        &sysfs_io_disable,
 506        &sysfs_dirty_data,
 507        &sysfs_stripe_size,
 508        &sysfs_partial_stripes_expensive,
 509        &sysfs_sequential_cutoff,
 510        &sysfs_clear_stats,
 511        &sysfs_running,
 512        &sysfs_state,
 513        &sysfs_label,
 514        &sysfs_readahead,
 515#ifdef CONFIG_BCACHE_DEBUG
 516        &sysfs_verify,
 517        &sysfs_bypass_torture_test,
 518#endif
 519        &sysfs_backing_dev_name,
 520        &sysfs_backing_dev_uuid,
 521        NULL
 522};
 523KTYPE(bch_cached_dev);
 524
 525SHOW(bch_flash_dev)
 526{
 527        struct bcache_device *d = container_of(kobj, struct bcache_device,
 528                                               kobj);
 529        struct uuid_entry *u = &d->c->uuids[d->id];
 530
 531        sysfs_printf(data_csum, "%i", d->data_csum);
 532        sysfs_hprint(size,      u->sectors << 9);
 533
 534        if (attr == &sysfs_label) {
 535                memcpy(buf, u->label, SB_LABEL_SIZE);
 536                buf[SB_LABEL_SIZE + 1] = '\0';
 537                strcat(buf, "\n");
 538                return strlen(buf);
 539        }
 540
 541        return 0;
 542}
 543
 544STORE(__bch_flash_dev)
 545{
 546        struct bcache_device *d = container_of(kobj, struct bcache_device,
 547                                               kobj);
 548        struct uuid_entry *u = &d->c->uuids[d->id];
 549
 550        /* no user space access if system is rebooting */
 551        if (bcache_is_reboot)
 552                return -EBUSY;
 553
 554        sysfs_strtoul(data_csum,        d->data_csum);
 555
 556        if (attr == &sysfs_size) {
 557                uint64_t v;
 558
 559                strtoi_h_or_return(buf, v);
 560
 561                u->sectors = v >> 9;
 562                bch_uuid_write(d->c);
 563                set_capacity(d->disk, u->sectors);
 564        }
 565
 566        if (attr == &sysfs_label) {
 567                memcpy(u->label, buf, SB_LABEL_SIZE);
 568                bch_uuid_write(d->c);
 569        }
 570
 571        if (attr == &sysfs_unregister) {
 572                set_bit(BCACHE_DEV_DETACHING, &d->flags);
 573                bcache_device_stop(d);
 574        }
 575
 576        return size;
 577}
 578STORE_LOCKED(bch_flash_dev)
 579
 580static struct attribute *bch_flash_dev_files[] = {
 581        &sysfs_unregister,
 582#if 0
 583        &sysfs_data_csum,
 584#endif
 585        &sysfs_label,
 586        &sysfs_size,
 587        NULL
 588};
 589KTYPE(bch_flash_dev);
 590
 591struct bset_stats_op {
 592        struct btree_op op;
 593        size_t nodes;
 594        struct bset_stats stats;
 595};
 596
 597static int bch_btree_bset_stats(struct btree_op *b_op, struct btree *b)
 598{
 599        struct bset_stats_op *op = container_of(b_op, struct bset_stats_op, op);
 600
 601        op->nodes++;
 602        bch_btree_keys_stats(&b->keys, &op->stats);
 603
 604        return MAP_CONTINUE;
 605}
 606
 607static int bch_bset_print_stats(struct cache_set *c, char *buf)
 608{
 609        struct bset_stats_op op;
 610        int ret;
 611
 612        memset(&op, 0, sizeof(op));
 613        bch_btree_op_init(&op.op, -1);
 614
 615        ret = bch_btree_map_nodes(&op.op, c, &ZERO_KEY, bch_btree_bset_stats);
 616        if (ret < 0)
 617                return ret;
 618
 619        return snprintf(buf, PAGE_SIZE,
 620                        "btree nodes:           %zu\n"
 621                        "written sets:          %zu\n"
 622                        "unwritten sets:                %zu\n"
 623                        "written key bytes:     %zu\n"
 624                        "unwritten key bytes:   %zu\n"
 625                        "floats:                        %zu\n"
 626                        "failed:                        %zu\n",
 627                        op.nodes,
 628                        op.stats.sets_written, op.stats.sets_unwritten,
 629                        op.stats.bytes_written, op.stats.bytes_unwritten,
 630                        op.stats.floats, op.stats.failed);
 631}
 632
 633static unsigned int bch_root_usage(struct cache_set *c)
 634{
 635        unsigned int bytes = 0;
 636        struct bkey *k;
 637        struct btree *b;
 638        struct btree_iter iter;
 639
 640        goto lock_root;
 641
 642        do {
 643                rw_unlock(false, b);
 644lock_root:
 645                b = c->root;
 646                rw_lock(false, b, b->level);
 647        } while (b != c->root);
 648
 649        for_each_key_filter(&b->keys, k, &iter, bch_ptr_bad)
 650                bytes += bkey_bytes(k);
 651
 652        rw_unlock(false, b);
 653
 654        return (bytes * 100) / btree_bytes(c);
 655}
 656
 657static size_t bch_cache_size(struct cache_set *c)
 658{
 659        size_t ret = 0;
 660        struct btree *b;
 661
 662        mutex_lock(&c->bucket_lock);
 663        list_for_each_entry(b, &c->btree_cache, list)
 664                ret += 1 << (b->keys.page_order + PAGE_SHIFT);
 665
 666        mutex_unlock(&c->bucket_lock);
 667        return ret;
 668}
 669
 670static unsigned int bch_cache_max_chain(struct cache_set *c)
 671{
 672        unsigned int ret = 0;
 673        struct hlist_head *h;
 674
 675        mutex_lock(&c->bucket_lock);
 676
 677        for (h = c->bucket_hash;
 678             h < c->bucket_hash + (1 << BUCKET_HASH_BITS);
 679             h++) {
 680                unsigned int i = 0;
 681                struct hlist_node *p;
 682
 683                hlist_for_each(p, h)
 684                        i++;
 685
 686                ret = max(ret, i);
 687        }
 688
 689        mutex_unlock(&c->bucket_lock);
 690        return ret;
 691}
 692
 693static unsigned int bch_btree_used(struct cache_set *c)
 694{
 695        return div64_u64(c->gc_stats.key_bytes * 100,
 696                         (c->gc_stats.nodes ?: 1) * btree_bytes(c));
 697}
 698
 699static unsigned int bch_average_key_size(struct cache_set *c)
 700{
 701        return c->gc_stats.nkeys
 702                ? div64_u64(c->gc_stats.data, c->gc_stats.nkeys)
 703                : 0;
 704}
 705
 706SHOW(__bch_cache_set)
 707{
 708        struct cache_set *c = container_of(kobj, struct cache_set, kobj);
 709
 710        sysfs_print(synchronous,                CACHE_SYNC(&c->sb));
 711        sysfs_print(journal_delay_ms,           c->journal_delay_ms);
 712        sysfs_hprint(bucket_size,               bucket_bytes(c));
 713        sysfs_hprint(block_size,                block_bytes(c));
 714        sysfs_print(tree_depth,                 c->root->level);
 715        sysfs_print(root_usage_percent,         bch_root_usage(c));
 716
 717        sysfs_hprint(btree_cache_size,          bch_cache_size(c));
 718        sysfs_print(btree_cache_max_chain,      bch_cache_max_chain(c));
 719        sysfs_print(cache_available_percent,    100 - c->gc_stats.in_use);
 720
 721        sysfs_print_time_stats(&c->btree_gc_time,       btree_gc, sec, ms);
 722        sysfs_print_time_stats(&c->btree_split_time,    btree_split, sec, us);
 723        sysfs_print_time_stats(&c->sort.time,           btree_sort, ms, us);
 724        sysfs_print_time_stats(&c->btree_read_time,     btree_read, ms, us);
 725
 726        sysfs_print(btree_used_percent, bch_btree_used(c));
 727        sysfs_print(btree_nodes,        c->gc_stats.nodes);
 728        sysfs_hprint(average_key_size,  bch_average_key_size(c));
 729
 730        sysfs_print(cache_read_races,
 731                    atomic_long_read(&c->cache_read_races));
 732
 733        sysfs_print(reclaim,
 734                    atomic_long_read(&c->reclaim));
 735
 736        sysfs_print(reclaimed_journal_buckets,
 737                    atomic_long_read(&c->reclaimed_journal_buckets));
 738
 739        sysfs_print(flush_write,
 740                    atomic_long_read(&c->flush_write));
 741
 742        sysfs_print(writeback_keys_done,
 743                    atomic_long_read(&c->writeback_keys_done));
 744        sysfs_print(writeback_keys_failed,
 745                    atomic_long_read(&c->writeback_keys_failed));
 746
 747        if (attr == &sysfs_errors)
 748                return bch_snprint_string_list(buf, PAGE_SIZE, error_actions,
 749                                               c->on_error);
 750
 751        /* See count_io_errors for why 88 */
 752        sysfs_print(io_error_halflife,  c->error_decay * 88);
 753        sysfs_print(io_error_limit,     c->error_limit);
 754
 755        sysfs_hprint(congested,
 756                     ((uint64_t) bch_get_congested(c)) << 9);
 757        sysfs_print(congested_read_threshold_us,
 758                    c->congested_read_threshold_us);
 759        sysfs_print(congested_write_threshold_us,
 760                    c->congested_write_threshold_us);
 761
 762        sysfs_print(cutoff_writeback, bch_cutoff_writeback);
 763        sysfs_print(cutoff_writeback_sync, bch_cutoff_writeback_sync);
 764
 765        sysfs_print(active_journal_entries,     fifo_used(&c->journal.pin));
 766        sysfs_printf(verify,                    "%i", c->verify);
 767        sysfs_printf(key_merging_disabled,      "%i", c->key_merging_disabled);
 768        sysfs_printf(expensive_debug_checks,
 769                     "%i", c->expensive_debug_checks);
 770        sysfs_printf(gc_always_rewrite,         "%i", c->gc_always_rewrite);
 771        sysfs_printf(btree_shrinker_disabled,   "%i", c->shrinker_disabled);
 772        sysfs_printf(copy_gc_enabled,           "%i", c->copy_gc_enabled);
 773        sysfs_printf(idle_max_writeback_rate,   "%i",
 774                     c->idle_max_writeback_rate_enabled);
 775        sysfs_printf(gc_after_writeback,        "%i", c->gc_after_writeback);
 776        sysfs_printf(io_disable,                "%i",
 777                     test_bit(CACHE_SET_IO_DISABLE, &c->flags));
 778
 779        if (attr == &sysfs_bset_tree_stats)
 780                return bch_bset_print_stats(c, buf);
 781
 782        return 0;
 783}
 784SHOW_LOCKED(bch_cache_set)
 785
 786STORE(__bch_cache_set)
 787{
 788        struct cache_set *c = container_of(kobj, struct cache_set, kobj);
 789        ssize_t v;
 790
 791        /* no user space access if system is rebooting */
 792        if (bcache_is_reboot)
 793                return -EBUSY;
 794
 795        if (attr == &sysfs_unregister)
 796                bch_cache_set_unregister(c);
 797
 798        if (attr == &sysfs_stop)
 799                bch_cache_set_stop(c);
 800
 801        if (attr == &sysfs_synchronous) {
 802                bool sync = strtoul_or_return(buf);
 803
 804                if (sync != CACHE_SYNC(&c->sb)) {
 805                        SET_CACHE_SYNC(&c->sb, sync);
 806                        bcache_write_super(c);
 807                }
 808        }
 809
 810        if (attr == &sysfs_flash_vol_create) {
 811                int r;
 812                uint64_t v;
 813
 814                strtoi_h_or_return(buf, v);
 815
 816                r = bch_flash_dev_create(c, v);
 817                if (r)
 818                        return r;
 819        }
 820
 821        if (attr == &sysfs_clear_stats) {
 822                atomic_long_set(&c->writeback_keys_done,        0);
 823                atomic_long_set(&c->writeback_keys_failed,      0);
 824
 825                memset(&c->gc_stats, 0, sizeof(struct gc_stat));
 826                bch_cache_accounting_clear(&c->accounting);
 827        }
 828
 829        if (attr == &sysfs_trigger_gc)
 830                force_wake_up_gc(c);
 831
 832        if (attr == &sysfs_prune_cache) {
 833                struct shrink_control sc;
 834
 835                sc.gfp_mask = GFP_KERNEL;
 836                sc.nr_to_scan = strtoul_or_return(buf);
 837                c->shrink.scan_objects(&c->shrink, &sc);
 838        }
 839
 840        sysfs_strtoul_clamp(congested_read_threshold_us,
 841                            c->congested_read_threshold_us,
 842                            0, UINT_MAX);
 843        sysfs_strtoul_clamp(congested_write_threshold_us,
 844                            c->congested_write_threshold_us,
 845                            0, UINT_MAX);
 846
 847        if (attr == &sysfs_errors) {
 848                v = __sysfs_match_string(error_actions, -1, buf);
 849                if (v < 0)
 850                        return v;
 851
 852                c->on_error = v;
 853        }
 854
 855        sysfs_strtoul_clamp(io_error_limit, c->error_limit, 0, UINT_MAX);
 856
 857        /* See count_io_errors() for why 88 */
 858        if (attr == &sysfs_io_error_halflife) {
 859                unsigned long v = 0;
 860                ssize_t ret;
 861
 862                ret = strtoul_safe_clamp(buf, v, 0, UINT_MAX);
 863                if (!ret) {
 864                        c->error_decay = v / 88;
 865                        return size;
 866                }
 867                return ret;
 868        }
 869
 870        if (attr == &sysfs_io_disable) {
 871                v = strtoul_or_return(buf);
 872                if (v) {
 873                        if (test_and_set_bit(CACHE_SET_IO_DISABLE,
 874                                             &c->flags))
 875                                pr_warn("CACHE_SET_IO_DISABLE already set\n");
 876                } else {
 877                        if (!test_and_clear_bit(CACHE_SET_IO_DISABLE,
 878                                                &c->flags))
 879                                pr_warn("CACHE_SET_IO_DISABLE already cleared\n");
 880                }
 881        }
 882
 883        sysfs_strtoul_clamp(journal_delay_ms,
 884                            c->journal_delay_ms,
 885                            0, USHRT_MAX);
 886        sysfs_strtoul_bool(verify,              c->verify);
 887        sysfs_strtoul_bool(key_merging_disabled, c->key_merging_disabled);
 888        sysfs_strtoul(expensive_debug_checks,   c->expensive_debug_checks);
 889        sysfs_strtoul_bool(gc_always_rewrite,   c->gc_always_rewrite);
 890        sysfs_strtoul_bool(btree_shrinker_disabled, c->shrinker_disabled);
 891        sysfs_strtoul_bool(copy_gc_enabled,     c->copy_gc_enabled);
 892        sysfs_strtoul_bool(idle_max_writeback_rate,
 893                           c->idle_max_writeback_rate_enabled);
 894
 895        /*
 896         * write gc_after_writeback here may overwrite an already set
 897         * BCH_DO_AUTO_GC, it doesn't matter because this flag will be
 898         * set in next chance.
 899         */
 900        sysfs_strtoul_clamp(gc_after_writeback, c->gc_after_writeback, 0, 1);
 901
 902        return size;
 903}
 904STORE_LOCKED(bch_cache_set)
 905
 906SHOW(bch_cache_set_internal)
 907{
 908        struct cache_set *c = container_of(kobj, struct cache_set, internal);
 909
 910        return bch_cache_set_show(&c->kobj, attr, buf);
 911}
 912
 913STORE(bch_cache_set_internal)
 914{
 915        struct cache_set *c = container_of(kobj, struct cache_set, internal);
 916
 917        /* no user space access if system is rebooting */
 918        if (bcache_is_reboot)
 919                return -EBUSY;
 920
 921        return bch_cache_set_store(&c->kobj, attr, buf, size);
 922}
 923
 924static void bch_cache_set_internal_release(struct kobject *k)
 925{
 926}
 927
 928static struct attribute *bch_cache_set_files[] = {
 929        &sysfs_unregister,
 930        &sysfs_stop,
 931        &sysfs_synchronous,
 932        &sysfs_journal_delay_ms,
 933        &sysfs_flash_vol_create,
 934
 935        &sysfs_bucket_size,
 936        &sysfs_block_size,
 937        &sysfs_tree_depth,
 938        &sysfs_root_usage_percent,
 939        &sysfs_btree_cache_size,
 940        &sysfs_cache_available_percent,
 941
 942        &sysfs_average_key_size,
 943
 944        &sysfs_errors,
 945        &sysfs_io_error_limit,
 946        &sysfs_io_error_halflife,
 947        &sysfs_congested,
 948        &sysfs_congested_read_threshold_us,
 949        &sysfs_congested_write_threshold_us,
 950        &sysfs_clear_stats,
 951        NULL
 952};
 953KTYPE(bch_cache_set);
 954
 955static struct attribute *bch_cache_set_internal_files[] = {
 956        &sysfs_active_journal_entries,
 957
 958        sysfs_time_stats_attribute_list(btree_gc, sec, ms)
 959        sysfs_time_stats_attribute_list(btree_split, sec, us)
 960        sysfs_time_stats_attribute_list(btree_sort, ms, us)
 961        sysfs_time_stats_attribute_list(btree_read, ms, us)
 962
 963        &sysfs_btree_nodes,
 964        &sysfs_btree_used_percent,
 965        &sysfs_btree_cache_max_chain,
 966
 967        &sysfs_bset_tree_stats,
 968        &sysfs_cache_read_races,
 969        &sysfs_reclaim,
 970        &sysfs_reclaimed_journal_buckets,
 971        &sysfs_flush_write,
 972        &sysfs_writeback_keys_done,
 973        &sysfs_writeback_keys_failed,
 974
 975        &sysfs_trigger_gc,
 976        &sysfs_prune_cache,
 977#ifdef CONFIG_BCACHE_DEBUG
 978        &sysfs_verify,
 979        &sysfs_key_merging_disabled,
 980        &sysfs_expensive_debug_checks,
 981#endif
 982        &sysfs_gc_always_rewrite,
 983        &sysfs_btree_shrinker_disabled,
 984        &sysfs_copy_gc_enabled,
 985        &sysfs_idle_max_writeback_rate,
 986        &sysfs_gc_after_writeback,
 987        &sysfs_io_disable,
 988        &sysfs_cutoff_writeback,
 989        &sysfs_cutoff_writeback_sync,
 990        NULL
 991};
 992KTYPE(bch_cache_set_internal);
 993
 994static int __bch_cache_cmp(const void *l, const void *r)
 995{
 996        cond_resched();
 997        return *((uint16_t *)r) - *((uint16_t *)l);
 998}
 999
1000SHOW(__bch_cache)
1001{
1002        struct cache *ca = container_of(kobj, struct cache, kobj);
1003
1004        sysfs_hprint(bucket_size,       bucket_bytes(ca));
1005        sysfs_hprint(block_size,        block_bytes(ca));
1006        sysfs_print(nbuckets,           ca->sb.nbuckets);
1007        sysfs_print(discard,            ca->discard);
1008        sysfs_hprint(written, atomic_long_read(&ca->sectors_written) << 9);
1009        sysfs_hprint(btree_written,
1010                     atomic_long_read(&ca->btree_sectors_written) << 9);
1011        sysfs_hprint(metadata_written,
1012                     (atomic_long_read(&ca->meta_sectors_written) +
1013                      atomic_long_read(&ca->btree_sectors_written)) << 9);
1014
1015        sysfs_print(io_errors,
1016                    atomic_read(&ca->io_errors) >> IO_ERROR_SHIFT);
1017
1018        if (attr == &sysfs_cache_replacement_policy)
1019                return bch_snprint_string_list(buf, PAGE_SIZE,
1020                                               cache_replacement_policies,
1021                                               CACHE_REPLACEMENT(&ca->sb));
1022
1023        if (attr == &sysfs_priority_stats) {
1024                struct bucket *b;
1025                size_t n = ca->sb.nbuckets, i;
1026                size_t unused = 0, available = 0, dirty = 0, meta = 0;
1027                uint64_t sum = 0;
1028                /* Compute 31 quantiles */
1029                uint16_t q[31], *p, *cached;
1030                ssize_t ret;
1031
1032                cached = p = vmalloc(array_size(sizeof(uint16_t),
1033                                                ca->sb.nbuckets));
1034                if (!p)
1035                        return -ENOMEM;
1036
1037                mutex_lock(&ca->set->bucket_lock);
1038                for_each_bucket(b, ca) {
1039                        if (!GC_SECTORS_USED(b))
1040                                unused++;
1041                        if (GC_MARK(b) == GC_MARK_RECLAIMABLE)
1042                                available++;
1043                        if (GC_MARK(b) == GC_MARK_DIRTY)
1044                                dirty++;
1045                        if (GC_MARK(b) == GC_MARK_METADATA)
1046                                meta++;
1047                }
1048
1049                for (i = ca->sb.first_bucket; i < n; i++)
1050                        p[i] = ca->buckets[i].prio;
1051                mutex_unlock(&ca->set->bucket_lock);
1052
1053                sort(p, n, sizeof(uint16_t), __bch_cache_cmp, NULL);
1054
1055                while (n &&
1056                       !cached[n - 1])
1057                        --n;
1058
1059                while (cached < p + n &&
1060                       *cached == BTREE_PRIO)
1061                        cached++, n--;
1062
1063                for (i = 0; i < n; i++)
1064                        sum += INITIAL_PRIO - cached[i];
1065
1066                if (n)
1067                        do_div(sum, n);
1068
1069                for (i = 0; i < ARRAY_SIZE(q); i++)
1070                        q[i] = INITIAL_PRIO - cached[n * (i + 1) /
1071                                (ARRAY_SIZE(q) + 1)];
1072
1073                vfree(p);
1074
1075                ret = scnprintf(buf, PAGE_SIZE,
1076                                "Unused:                %zu%%\n"
1077                                "Clean:         %zu%%\n"
1078                                "Dirty:         %zu%%\n"
1079                                "Metadata:      %zu%%\n"
1080                                "Average:       %llu\n"
1081                                "Sectors per Q: %zu\n"
1082                                "Quantiles:     [",
1083                                unused * 100 / (size_t) ca->sb.nbuckets,
1084                                available * 100 / (size_t) ca->sb.nbuckets,
1085                                dirty * 100 / (size_t) ca->sb.nbuckets,
1086                                meta * 100 / (size_t) ca->sb.nbuckets, sum,
1087                                n * ca->sb.bucket_size / (ARRAY_SIZE(q) + 1));
1088
1089                for (i = 0; i < ARRAY_SIZE(q); i++)
1090                        ret += scnprintf(buf + ret, PAGE_SIZE - ret,
1091                                         "%u ", q[i]);
1092                ret--;
1093
1094                ret += scnprintf(buf + ret, PAGE_SIZE - ret, "]\n");
1095
1096                return ret;
1097        }
1098
1099        return 0;
1100}
1101SHOW_LOCKED(bch_cache)
1102
1103STORE(__bch_cache)
1104{
1105        struct cache *ca = container_of(kobj, struct cache, kobj);
1106        ssize_t v;
1107
1108        /* no user space access if system is rebooting */
1109        if (bcache_is_reboot)
1110                return -EBUSY;
1111
1112        if (attr == &sysfs_discard) {
1113                bool v = strtoul_or_return(buf);
1114
1115                if (blk_queue_discard(bdev_get_queue(ca->bdev)))
1116                        ca->discard = v;
1117
1118                if (v != CACHE_DISCARD(&ca->sb)) {
1119                        SET_CACHE_DISCARD(&ca->sb, v);
1120                        bcache_write_super(ca->set);
1121                }
1122        }
1123
1124        if (attr == &sysfs_cache_replacement_policy) {
1125                v = __sysfs_match_string(cache_replacement_policies, -1, buf);
1126                if (v < 0)
1127                        return v;
1128
1129                if ((unsigned int) v != CACHE_REPLACEMENT(&ca->sb)) {
1130                        mutex_lock(&ca->set->bucket_lock);
1131                        SET_CACHE_REPLACEMENT(&ca->sb, v);
1132                        mutex_unlock(&ca->set->bucket_lock);
1133
1134                        bcache_write_super(ca->set);
1135                }
1136        }
1137
1138        if (attr == &sysfs_clear_stats) {
1139                atomic_long_set(&ca->sectors_written, 0);
1140                atomic_long_set(&ca->btree_sectors_written, 0);
1141                atomic_long_set(&ca->meta_sectors_written, 0);
1142                atomic_set(&ca->io_count, 0);
1143                atomic_set(&ca->io_errors, 0);
1144        }
1145
1146        return size;
1147}
1148STORE_LOCKED(bch_cache)
1149
1150static struct attribute *bch_cache_files[] = {
1151        &sysfs_bucket_size,
1152        &sysfs_block_size,
1153        &sysfs_nbuckets,
1154        &sysfs_priority_stats,
1155        &sysfs_discard,
1156        &sysfs_written,
1157        &sysfs_btree_written,
1158        &sysfs_metadata_written,
1159        &sysfs_io_errors,
1160        &sysfs_clear_stats,
1161        &sysfs_cache_replacement_policy,
1162        NULL
1163};
1164KTYPE(bch_cache);
1165