linux/drivers/md/bcache/debug.c
<<
>>
Prefs
   1/*
   2 * Assorted bcache debug code
   3 *
   4 * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
   5 * Copyright 2012 Google, Inc.
   6 */
   7
   8#include "bcache.h"
   9#include "btree.h"
  10#include "debug.h"
  11#include "request.h"
  12
  13#include <linux/console.h>
  14#include <linux/debugfs.h>
  15#include <linux/module.h>
  16#include <linux/random.h>
  17#include <linux/seq_file.h>
  18
  19static struct dentry *debug;
  20
  21const char *bch_ptr_status(struct cache_set *c, const struct bkey *k)
  22{
  23        unsigned i;
  24
  25        for (i = 0; i < KEY_PTRS(k); i++)
  26                if (ptr_available(c, k, i)) {
  27                        struct cache *ca = PTR_CACHE(c, k, i);
  28                        size_t bucket = PTR_BUCKET_NR(c, k, i);
  29                        size_t r = bucket_remainder(c, PTR_OFFSET(k, i));
  30
  31                        if (KEY_SIZE(k) + r > c->sb.bucket_size)
  32                                return "bad, length too big";
  33                        if (bucket <  ca->sb.first_bucket)
  34                                return "bad, short offset";
  35                        if (bucket >= ca->sb.nbuckets)
  36                                return "bad, offset past end of device";
  37                        if (ptr_stale(c, k, i))
  38                                return "stale";
  39                }
  40
  41        if (!bkey_cmp(k, &ZERO_KEY))
  42                return "bad, null key";
  43        if (!KEY_PTRS(k))
  44                return "bad, no pointers";
  45        if (!KEY_SIZE(k))
  46                return "zeroed key";
  47        return "";
  48}
  49
  50struct keyprint_hack bch_pkey(const struct bkey *k)
  51{
  52        unsigned i = 0;
  53        struct keyprint_hack r;
  54        char *out = r.s, *end = r.s + KEYHACK_SIZE;
  55
  56#define p(...)  (out += scnprintf(out, end - out, __VA_ARGS__))
  57
  58        p("%llu:%llu len %llu -> [", KEY_INODE(k), KEY_OFFSET(k), KEY_SIZE(k));
  59
  60        if (KEY_PTRS(k))
  61                while (1) {
  62                        p("%llu:%llu gen %llu",
  63                          PTR_DEV(k, i), PTR_OFFSET(k, i), PTR_GEN(k, i));
  64
  65                        if (++i == KEY_PTRS(k))
  66                                break;
  67
  68                        p(", ");
  69                }
  70
  71        p("]");
  72
  73        if (KEY_DIRTY(k))
  74                p(" dirty");
  75        if (KEY_CSUM(k))
  76                p(" cs%llu %llx", KEY_CSUM(k), k->ptr[1]);
  77#undef p
  78        return r;
  79}
  80
  81struct keyprint_hack bch_pbtree(const struct btree *b)
  82{
  83        struct keyprint_hack r;
  84
  85        snprintf(r.s, 40, "%zu level %i/%i", PTR_BUCKET_NR(b->c, &b->key, 0),
  86                 b->level, b->c->root ? b->c->root->level : -1);
  87        return r;
  88}
  89
  90#if defined(CONFIG_BCACHE_DEBUG) || defined(CONFIG_BCACHE_EDEBUG)
  91
  92static bool skipped_backwards(struct btree *b, struct bkey *k)
  93{
  94        return bkey_cmp(k, (!b->level)
  95                        ? &START_KEY(bkey_next(k))
  96                        : bkey_next(k)) > 0;
  97}
  98
  99static void dump_bset(struct btree *b, struct bset *i)
 100{
 101        struct bkey *k;
 102        unsigned j;
 103
 104        for (k = i->start; k < end(i); k = bkey_next(k)) {
 105                printk(KERN_ERR "block %zu key %zi/%u: %s", index(i, b),
 106                       (uint64_t *) k - i->d, i->keys, pkey(k));
 107
 108                for (j = 0; j < KEY_PTRS(k); j++) {
 109                        size_t n = PTR_BUCKET_NR(b->c, k, j);
 110                        printk(" bucket %zu", n);
 111
 112                        if (n >= b->c->sb.first_bucket && n < b->c->sb.nbuckets)
 113                                printk(" prio %i",
 114                                       PTR_BUCKET(b->c, k, j)->prio);
 115                }
 116
 117                printk(" %s\n", bch_ptr_status(b->c, k));
 118
 119                if (bkey_next(k) < end(i) &&
 120                    skipped_backwards(b, k))
 121                        printk(KERN_ERR "Key skipped backwards\n");
 122        }
 123}
 124
 125#endif
 126
 127#ifdef CONFIG_BCACHE_DEBUG
 128
 129void bch_btree_verify(struct btree *b, struct bset *new)
 130{
 131        struct btree *v = b->c->verify_data;
 132        struct closure cl;
 133        closure_init_stack(&cl);
 134
 135        if (!b->c->verify)
 136                return;
 137
 138        closure_wait_event(&b->io.wait, &cl,
 139                           atomic_read(&b->io.cl.remaining) == -1);
 140
 141        mutex_lock(&b->c->verify_lock);
 142
 143        bkey_copy(&v->key, &b->key);
 144        v->written = 0;
 145        v->level = b->level;
 146
 147        bch_btree_read(v);
 148        closure_wait_event(&v->io.wait, &cl,
 149                           atomic_read(&b->io.cl.remaining) == -1);
 150
 151        if (new->keys != v->sets[0].data->keys ||
 152            memcmp(new->start,
 153                   v->sets[0].data->start,
 154                   (void *) end(new) - (void *) new->start)) {
 155                unsigned i, j;
 156
 157                console_lock();
 158
 159                printk(KERN_ERR "*** original memory node:\n");
 160                for (i = 0; i <= b->nsets; i++)
 161                        dump_bset(b, b->sets[i].data);
 162
 163                printk(KERN_ERR "*** sorted memory node:\n");
 164                dump_bset(b, new);
 165
 166                printk(KERN_ERR "*** on disk node:\n");
 167                dump_bset(v, v->sets[0].data);
 168
 169                for (j = 0; j < new->keys; j++)
 170                        if (new->d[j] != v->sets[0].data->d[j])
 171                                break;
 172
 173                console_unlock();
 174                panic("verify failed at %u\n", j);
 175        }
 176
 177        mutex_unlock(&b->c->verify_lock);
 178}
 179
 180static void data_verify_endio(struct bio *bio, int error)
 181{
 182        struct closure *cl = bio->bi_private;
 183        closure_put(cl);
 184}
 185
 186void bch_data_verify(struct search *s)
 187{
 188        char name[BDEVNAME_SIZE];
 189        struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
 190        struct closure *cl = &s->cl;
 191        struct bio *check;
 192        struct bio_vec *bv;
 193        int i;
 194
 195        if (!s->unaligned_bvec)
 196                bio_for_each_segment(bv, s->orig_bio, i)
 197                        bv->bv_offset = 0, bv->bv_len = PAGE_SIZE;
 198
 199        check = bio_clone(s->orig_bio, GFP_NOIO);
 200        if (!check)
 201                return;
 202
 203        if (bch_bio_alloc_pages(check, GFP_NOIO))
 204                goto out_put;
 205
 206        check->bi_rw            = READ_SYNC;
 207        check->bi_private       = cl;
 208        check->bi_end_io        = data_verify_endio;
 209
 210        closure_bio_submit(check, cl, &dc->disk);
 211        closure_sync(cl);
 212
 213        bio_for_each_segment(bv, s->orig_bio, i) {
 214                void *p1 = kmap(bv->bv_page);
 215                void *p2 = kmap(check->bi_io_vec[i].bv_page);
 216
 217                if (memcmp(p1 + bv->bv_offset,
 218                           p2 + bv->bv_offset,
 219                           bv->bv_len))
 220                        printk(KERN_ERR
 221                               "bcache (%s): verify failed at sector %llu\n",
 222                               bdevname(dc->bdev, name),
 223                               (uint64_t) s->orig_bio->bi_sector);
 224
 225                kunmap(bv->bv_page);
 226                kunmap(check->bi_io_vec[i].bv_page);
 227        }
 228
 229        __bio_for_each_segment(bv, check, i, 0)
 230                __free_page(bv->bv_page);
 231out_put:
 232        bio_put(check);
 233}
 234
 235#endif
 236
 237#ifdef CONFIG_BCACHE_EDEBUG
 238
 239unsigned bch_count_data(struct btree *b)
 240{
 241        unsigned ret = 0;
 242        struct btree_iter iter;
 243        struct bkey *k;
 244
 245        if (!b->level)
 246                for_each_key(b, k, &iter)
 247                        ret += KEY_SIZE(k);
 248        return ret;
 249}
 250
 251static void vdump_bucket_and_panic(struct btree *b, const char *fmt,
 252                                   va_list args)
 253{
 254        unsigned i;
 255
 256        console_lock();
 257
 258        for (i = 0; i <= b->nsets; i++)
 259                dump_bset(b, b->sets[i].data);
 260
 261        vprintk(fmt, args);
 262
 263        console_unlock();
 264
 265        panic("at %s\n", pbtree(b));
 266}
 267
 268void bch_check_key_order_msg(struct btree *b, struct bset *i,
 269                             const char *fmt, ...)
 270{
 271        struct bkey *k;
 272
 273        if (!i->keys)
 274                return;
 275
 276        for (k = i->start; bkey_next(k) < end(i); k = bkey_next(k))
 277                if (skipped_backwards(b, k)) {
 278                        va_list args;
 279                        va_start(args, fmt);
 280
 281                        vdump_bucket_and_panic(b, fmt, args);
 282                        va_end(args);
 283                }
 284}
 285
 286void bch_check_keys(struct btree *b, const char *fmt, ...)
 287{
 288        va_list args;
 289        struct bkey *k, *p = NULL;
 290        struct btree_iter iter;
 291
 292        if (b->level)
 293                return;
 294
 295        for_each_key(b, k, &iter) {
 296                if (p && bkey_cmp(&START_KEY(p), &START_KEY(k)) > 0) {
 297                        printk(KERN_ERR "Keys out of order:\n");
 298                        goto bug;
 299                }
 300
 301                if (bch_ptr_invalid(b, k))
 302                        continue;
 303
 304                if (p && bkey_cmp(p, &START_KEY(k)) > 0) {
 305                        printk(KERN_ERR "Overlapping keys:\n");
 306                        goto bug;
 307                }
 308                p = k;
 309        }
 310        return;
 311bug:
 312        va_start(args, fmt);
 313        vdump_bucket_and_panic(b, fmt, args);
 314        va_end(args);
 315}
 316
 317#endif
 318
 319#ifdef CONFIG_DEBUG_FS
 320
 321/* XXX: cache set refcounting */
 322
 323struct dump_iterator {
 324        char                    buf[PAGE_SIZE];
 325        size_t                  bytes;
 326        struct cache_set        *c;
 327        struct keybuf           keys;
 328};
 329
 330static bool dump_pred(struct keybuf *buf, struct bkey *k)
 331{
 332        return true;
 333}
 334
 335static ssize_t bch_dump_read(struct file *file, char __user *buf,
 336                             size_t size, loff_t *ppos)
 337{
 338        struct dump_iterator *i = file->private_data;
 339        ssize_t ret = 0;
 340
 341        while (size) {
 342                struct keybuf_key *w;
 343                unsigned bytes = min(i->bytes, size);
 344
 345                int err = copy_to_user(buf, i->buf, bytes);
 346                if (err)
 347                        return err;
 348
 349                ret      += bytes;
 350                buf      += bytes;
 351                size     -= bytes;
 352                i->bytes -= bytes;
 353                memmove(i->buf, i->buf + bytes, i->bytes);
 354
 355                if (i->bytes)
 356                        break;
 357
 358                w = bch_keybuf_next_rescan(i->c, &i->keys, &MAX_KEY);
 359                if (!w)
 360                        break;
 361
 362                i->bytes = snprintf(i->buf, PAGE_SIZE, "%s\n", pkey(&w->key));
 363                bch_keybuf_del(&i->keys, w);
 364        }
 365
 366        return ret;
 367}
 368
 369static int bch_dump_open(struct inode *inode, struct file *file)
 370{
 371        struct cache_set *c = inode->i_private;
 372        struct dump_iterator *i;
 373
 374        i = kzalloc(sizeof(struct dump_iterator), GFP_KERNEL);
 375        if (!i)
 376                return -ENOMEM;
 377
 378        file->private_data = i;
 379        i->c = c;
 380        bch_keybuf_init(&i->keys, dump_pred);
 381        i->keys.last_scanned = KEY(0, 0, 0);
 382
 383        return 0;
 384}
 385
 386static int bch_dump_release(struct inode *inode, struct file *file)
 387{
 388        kfree(file->private_data);
 389        return 0;
 390}
 391
 392static const struct file_operations cache_set_debug_ops = {
 393        .owner          = THIS_MODULE,
 394        .open           = bch_dump_open,
 395        .read           = bch_dump_read,
 396        .release        = bch_dump_release
 397};
 398
 399void bch_debug_init_cache_set(struct cache_set *c)
 400{
 401        if (!IS_ERR_OR_NULL(debug)) {
 402                char name[50];
 403                snprintf(name, 50, "bcache-%pU", c->sb.set_uuid);
 404
 405                c->debug = debugfs_create_file(name, 0400, debug, c,
 406                                               &cache_set_debug_ops);
 407        }
 408}
 409
 410#endif
 411
 412/* Fuzz tester has rotted: */
 413#if 0
 414
 415static ssize_t btree_fuzz(struct kobject *k, struct kobj_attribute *a,
 416                          const char *buffer, size_t size)
 417{
 418        void dump(struct btree *b)
 419        {
 420                struct bset *i;
 421
 422                for (i = b->sets[0].data;
 423                     index(i, b) < btree_blocks(b) &&
 424                     i->seq == b->sets[0].data->seq;
 425                     i = ((void *) i) + set_blocks(i, b->c) * block_bytes(b->c))
 426                        dump_bset(b, i);
 427        }
 428
 429        struct cache_sb *sb;
 430        struct cache_set *c;
 431        struct btree *all[3], *b, *fill, *orig;
 432        int j;
 433
 434        struct btree_op op;
 435        bch_btree_op_init_stack(&op);
 436
 437        sb = kzalloc(sizeof(struct cache_sb), GFP_KERNEL);
 438        if (!sb)
 439                return -ENOMEM;
 440
 441        sb->bucket_size = 128;
 442        sb->block_size = 4;
 443
 444        c = bch_cache_set_alloc(sb);
 445        if (!c)
 446                return -ENOMEM;
 447
 448        for (j = 0; j < 3; j++) {
 449                BUG_ON(list_empty(&c->btree_cache));
 450                all[j] = list_first_entry(&c->btree_cache, struct btree, list);
 451                list_del_init(&all[j]->list);
 452
 453                all[j]->key = KEY(0, 0, c->sb.bucket_size);
 454                bkey_copy_key(&all[j]->key, &MAX_KEY);
 455        }
 456
 457        b = all[0];
 458        fill = all[1];
 459        orig = all[2];
 460
 461        while (1) {
 462                for (j = 0; j < 3; j++)
 463                        all[j]->written = all[j]->nsets = 0;
 464
 465                bch_bset_init_next(b);
 466
 467                while (1) {
 468                        struct bset *i = write_block(b);
 469                        struct bkey *k = op.keys.top;
 470                        unsigned rand;
 471
 472                        bkey_init(k);
 473                        rand = get_random_int();
 474
 475                        op.type = rand & 1
 476                                ? BTREE_INSERT
 477                                : BTREE_REPLACE;
 478                        rand >>= 1;
 479
 480                        SET_KEY_SIZE(k, bucket_remainder(c, rand));
 481                        rand >>= c->bucket_bits;
 482                        rand &= 1024 * 512 - 1;
 483                        rand += c->sb.bucket_size;
 484                        SET_KEY_OFFSET(k, rand);
 485#if 0
 486                        SET_KEY_PTRS(k, 1);
 487#endif
 488                        bch_keylist_push(&op.keys);
 489                        bch_btree_insert_keys(b, &op);
 490
 491                        if (should_split(b) ||
 492                            set_blocks(i, b->c) !=
 493                            __set_blocks(i, i->keys + 15, b->c)) {
 494                                i->csum = csum_set(i);
 495
 496                                memcpy(write_block(fill),
 497                                       i, set_bytes(i));
 498
 499                                b->written += set_blocks(i, b->c);
 500                                fill->written = b->written;
 501                                if (b->written == btree_blocks(b))
 502                                        break;
 503
 504                                bch_btree_sort_lazy(b);
 505                                bch_bset_init_next(b);
 506                        }
 507                }
 508
 509                memcpy(orig->sets[0].data,
 510                       fill->sets[0].data,
 511                       btree_bytes(c));
 512
 513                bch_btree_sort(b);
 514                fill->written = 0;
 515                bch_btree_read_done(&fill->io.cl);
 516
 517                if (b->sets[0].data->keys != fill->sets[0].data->keys ||
 518                    memcmp(b->sets[0].data->start,
 519                           fill->sets[0].data->start,
 520                           b->sets[0].data->keys * sizeof(uint64_t))) {
 521                        struct bset *i = b->sets[0].data;
 522                        struct bkey *k, *l;
 523
 524                        for (k = i->start,
 525                             l = fill->sets[0].data->start;
 526                             k < end(i);
 527                             k = bkey_next(k), l = bkey_next(l))
 528                                if (bkey_cmp(k, l) ||
 529                                    KEY_SIZE(k) != KEY_SIZE(l))
 530                                        pr_err("key %zi differs: %s != %s",
 531                                               (uint64_t *) k - i->d,
 532                                               pkey(k), pkey(l));
 533
 534                        for (j = 0; j < 3; j++) {
 535                                pr_err("**** Set %i ****", j);
 536                                dump(all[j]);
 537                        }
 538                        panic("\n");
 539                }
 540
 541                pr_info("fuzz complete: %i keys", b->sets[0].data->keys);
 542        }
 543}
 544
 545kobj_attribute_write(fuzz, btree_fuzz);
 546#endif
 547
 548void bch_debug_exit(void)
 549{
 550        if (!IS_ERR_OR_NULL(debug))
 551                debugfs_remove_recursive(debug);
 552}
 553
 554int __init bch_debug_init(struct kobject *kobj)
 555{
 556        int ret = 0;
 557#if 0
 558        ret = sysfs_create_file(kobj, &ksysfs_fuzz.attr);
 559        if (ret)
 560                return ret;
 561#endif
 562
 563        debug = debugfs_create_dir("bcache", NULL);
 564        return ret;
 565}
 566