linux/include/trace/events/bcache.h
<<
>>
Prefs
   1#undef TRACE_SYSTEM
   2#define TRACE_SYSTEM bcache
   3
   4#if !defined(_TRACE_BCACHE_H) || defined(TRACE_HEADER_MULTI_READ)
   5#define _TRACE_BCACHE_H
   6
   7#include <linux/tracepoint.h>
   8
   9DECLARE_EVENT_CLASS(bcache_request,
  10        TP_PROTO(struct bcache_device *d, struct bio *bio),
  11        TP_ARGS(d, bio),
  12
  13        TP_STRUCT__entry(
  14                __field(dev_t,          dev                     )
  15                __field(unsigned int,   orig_major              )
  16                __field(unsigned int,   orig_minor              )
  17                __field(sector_t,       sector                  )
  18                __field(dev_t,          orig_sector             )
  19                __field(unsigned int,   nr_sector               )
  20                __array(char,           rwbs,   6               )
  21        ),
  22
  23        TP_fast_assign(
  24                __entry->dev            = bio->bi_bdev->bd_dev;
  25                __entry->orig_major     = d->disk->major;
  26                __entry->orig_minor     = d->disk->first_minor;
  27                __entry->sector         = bio->bi_iter.bi_sector;
  28                __entry->orig_sector    = bio->bi_iter.bi_sector - 16;
  29                __entry->nr_sector      = bio->bi_iter.bi_size >> 9;
  30                blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size);
  31        ),
  32
  33        TP_printk("%d,%d %s %llu + %u (from %d,%d @ %llu)",
  34                  MAJOR(__entry->dev), MINOR(__entry->dev),
  35                  __entry->rwbs, (unsigned long long)__entry->sector,
  36                  __entry->nr_sector, __entry->orig_major, __entry->orig_minor,
  37                  (unsigned long long)__entry->orig_sector)
  38);
  39
  40DECLARE_EVENT_CLASS(bkey,
  41        TP_PROTO(struct bkey *k),
  42        TP_ARGS(k),
  43
  44        TP_STRUCT__entry(
  45                __field(u32,    size                            )
  46                __field(u32,    inode                           )
  47                __field(u64,    offset                          )
  48                __field(bool,   dirty                           )
  49        ),
  50
  51        TP_fast_assign(
  52                __entry->inode  = KEY_INODE(k);
  53                __entry->offset = KEY_OFFSET(k);
  54                __entry->size   = KEY_SIZE(k);
  55                __entry->dirty  = KEY_DIRTY(k);
  56        ),
  57
  58        TP_printk("%u:%llu len %u dirty %u", __entry->inode,
  59                  __entry->offset, __entry->size, __entry->dirty)
  60);
  61
  62DECLARE_EVENT_CLASS(btree_node,
  63        TP_PROTO(struct btree *b),
  64        TP_ARGS(b),
  65
  66        TP_STRUCT__entry(
  67                __field(size_t,         bucket                  )
  68        ),
  69
  70        TP_fast_assign(
  71                __entry->bucket = PTR_BUCKET_NR(b->c, &b->key, 0);
  72        ),
  73
  74        TP_printk("bucket %zu", __entry->bucket)
  75);
  76
  77/* request.c */
  78
  79DEFINE_EVENT(bcache_request, bcache_request_start,
  80        TP_PROTO(struct bcache_device *d, struct bio *bio),
  81        TP_ARGS(d, bio)
  82);
  83
  84DEFINE_EVENT(bcache_request, bcache_request_end,
  85        TP_PROTO(struct bcache_device *d, struct bio *bio),
  86        TP_ARGS(d, bio)
  87);
  88
  89DECLARE_EVENT_CLASS(bcache_bio,
  90        TP_PROTO(struct bio *bio),
  91        TP_ARGS(bio),
  92
  93        TP_STRUCT__entry(
  94                __field(dev_t,          dev                     )
  95                __field(sector_t,       sector                  )
  96                __field(unsigned int,   nr_sector               )
  97                __array(char,           rwbs,   6               )
  98        ),
  99
 100        TP_fast_assign(
 101                __entry->dev            = bio->bi_bdev->bd_dev;
 102                __entry->sector         = bio->bi_iter.bi_sector;
 103                __entry->nr_sector      = bio->bi_iter.bi_size >> 9;
 104                blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size);
 105        ),
 106
 107        TP_printk("%d,%d  %s %llu + %u",
 108                  MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
 109                  (unsigned long long)__entry->sector, __entry->nr_sector)
 110);
 111
 112DEFINE_EVENT(bcache_bio, bcache_bypass_sequential,
 113        TP_PROTO(struct bio *bio),
 114        TP_ARGS(bio)
 115);
 116
 117DEFINE_EVENT(bcache_bio, bcache_bypass_congested,
 118        TP_PROTO(struct bio *bio),
 119        TP_ARGS(bio)
 120);
 121
 122TRACE_EVENT(bcache_read,
 123        TP_PROTO(struct bio *bio, bool hit, bool bypass),
 124        TP_ARGS(bio, hit, bypass),
 125
 126        TP_STRUCT__entry(
 127                __field(dev_t,          dev                     )
 128                __field(sector_t,       sector                  )
 129                __field(unsigned int,   nr_sector               )
 130                __array(char,           rwbs,   6               )
 131                __field(bool,           cache_hit               )
 132                __field(bool,           bypass                  )
 133        ),
 134
 135        TP_fast_assign(
 136                __entry->dev            = bio->bi_bdev->bd_dev;
 137                __entry->sector         = bio->bi_iter.bi_sector;
 138                __entry->nr_sector      = bio->bi_iter.bi_size >> 9;
 139                blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size);
 140                __entry->cache_hit = hit;
 141                __entry->bypass = bypass;
 142        ),
 143
 144        TP_printk("%d,%d  %s %llu + %u hit %u bypass %u",
 145                  MAJOR(__entry->dev), MINOR(__entry->dev),
 146                  __entry->rwbs, (unsigned long long)__entry->sector,
 147                  __entry->nr_sector, __entry->cache_hit, __entry->bypass)
 148);
 149
 150TRACE_EVENT(bcache_write,
 151        TP_PROTO(struct cache_set *c, u64 inode, struct bio *bio,
 152                bool writeback, bool bypass),
 153        TP_ARGS(c, inode, bio, writeback, bypass),
 154
 155        TP_STRUCT__entry(
 156                __array(char,           uuid,   16              )
 157                __field(u64,            inode                   )
 158                __field(sector_t,       sector                  )
 159                __field(unsigned int,   nr_sector               )
 160                __array(char,           rwbs,   6               )
 161                __field(bool,           writeback               )
 162                __field(bool,           bypass                  )
 163        ),
 164
 165        TP_fast_assign(
 166                memcpy(__entry->uuid, c->sb.set_uuid, 16);
 167                __entry->inode          = inode;
 168                __entry->sector         = bio->bi_iter.bi_sector;
 169                __entry->nr_sector      = bio->bi_iter.bi_size >> 9;
 170                blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size);
 171                __entry->writeback = writeback;
 172                __entry->bypass = bypass;
 173        ),
 174
 175        TP_printk("%pU inode %llu  %s %llu + %u hit %u bypass %u",
 176                  __entry->uuid, __entry->inode,
 177                  __entry->rwbs, (unsigned long long)__entry->sector,
 178                  __entry->nr_sector, __entry->writeback, __entry->bypass)
 179);
 180
 181DEFINE_EVENT(bcache_bio, bcache_read_retry,
 182        TP_PROTO(struct bio *bio),
 183        TP_ARGS(bio)
 184);
 185
 186DEFINE_EVENT(bkey, bcache_cache_insert,
 187        TP_PROTO(struct bkey *k),
 188        TP_ARGS(k)
 189);
 190
 191/* Journal */
 192
 193DECLARE_EVENT_CLASS(cache_set,
 194        TP_PROTO(struct cache_set *c),
 195        TP_ARGS(c),
 196
 197        TP_STRUCT__entry(
 198                __array(char,           uuid,   16 )
 199        ),
 200
 201        TP_fast_assign(
 202                memcpy(__entry->uuid, c->sb.set_uuid, 16);
 203        ),
 204
 205        TP_printk("%pU", __entry->uuid)
 206);
 207
 208DEFINE_EVENT(bkey, bcache_journal_replay_key,
 209        TP_PROTO(struct bkey *k),
 210        TP_ARGS(k)
 211);
 212
 213DEFINE_EVENT(cache_set, bcache_journal_full,
 214        TP_PROTO(struct cache_set *c),
 215        TP_ARGS(c)
 216);
 217
 218DEFINE_EVENT(cache_set, bcache_journal_entry_full,
 219        TP_PROTO(struct cache_set *c),
 220        TP_ARGS(c)
 221);
 222
 223DEFINE_EVENT(bcache_bio, bcache_journal_write,
 224        TP_PROTO(struct bio *bio),
 225        TP_ARGS(bio)
 226);
 227
 228/* Btree */
 229
 230DEFINE_EVENT(cache_set, bcache_btree_cache_cannibalize,
 231        TP_PROTO(struct cache_set *c),
 232        TP_ARGS(c)
 233);
 234
 235DEFINE_EVENT(btree_node, bcache_btree_read,
 236        TP_PROTO(struct btree *b),
 237        TP_ARGS(b)
 238);
 239
 240TRACE_EVENT(bcache_btree_write,
 241        TP_PROTO(struct btree *b),
 242        TP_ARGS(b),
 243
 244        TP_STRUCT__entry(
 245                __field(size_t,         bucket                  )
 246                __field(unsigned,       block                   )
 247                __field(unsigned,       keys                    )
 248        ),
 249
 250        TP_fast_assign(
 251                __entry->bucket = PTR_BUCKET_NR(b->c, &b->key, 0);
 252                __entry->block  = b->written;
 253                __entry->keys   = b->keys.set[b->keys.nsets].data->keys;
 254        ),
 255
 256        TP_printk("bucket %zu", __entry->bucket)
 257);
 258
 259DEFINE_EVENT(btree_node, bcache_btree_node_alloc,
 260        TP_PROTO(struct btree *b),
 261        TP_ARGS(b)
 262);
 263
 264DEFINE_EVENT(cache_set, bcache_btree_node_alloc_fail,
 265        TP_PROTO(struct cache_set *c),
 266        TP_ARGS(c)
 267);
 268
 269DEFINE_EVENT(btree_node, bcache_btree_node_free,
 270        TP_PROTO(struct btree *b),
 271        TP_ARGS(b)
 272);
 273
 274TRACE_EVENT(bcache_btree_gc_coalesce,
 275        TP_PROTO(unsigned nodes),
 276        TP_ARGS(nodes),
 277
 278        TP_STRUCT__entry(
 279                __field(unsigned,       nodes                   )
 280        ),
 281
 282        TP_fast_assign(
 283                __entry->nodes  = nodes;
 284        ),
 285
 286        TP_printk("coalesced %u nodes", __entry->nodes)
 287);
 288
 289DEFINE_EVENT(cache_set, bcache_gc_start,
 290        TP_PROTO(struct cache_set *c),
 291        TP_ARGS(c)
 292);
 293
 294DEFINE_EVENT(cache_set, bcache_gc_end,
 295        TP_PROTO(struct cache_set *c),
 296        TP_ARGS(c)
 297);
 298
 299DEFINE_EVENT(bkey, bcache_gc_copy,
 300        TP_PROTO(struct bkey *k),
 301        TP_ARGS(k)
 302);
 303
 304DEFINE_EVENT(bkey, bcache_gc_copy_collision,
 305        TP_PROTO(struct bkey *k),
 306        TP_ARGS(k)
 307);
 308
 309TRACE_EVENT(bcache_btree_insert_key,
 310        TP_PROTO(struct btree *b, struct bkey *k, unsigned op, unsigned status),
 311        TP_ARGS(b, k, op, status),
 312
 313        TP_STRUCT__entry(
 314                __field(u64,    btree_node                      )
 315                __field(u32,    btree_level                     )
 316                __field(u32,    inode                           )
 317                __field(u64,    offset                          )
 318                __field(u32,    size                            )
 319                __field(u8,     dirty                           )
 320                __field(u8,     op                              )
 321                __field(u8,     status                          )
 322        ),
 323
 324        TP_fast_assign(
 325                __entry->btree_node = PTR_BUCKET_NR(b->c, &b->key, 0);
 326                __entry->btree_level = b->level;
 327                __entry->inode  = KEY_INODE(k);
 328                __entry->offset = KEY_OFFSET(k);
 329                __entry->size   = KEY_SIZE(k);
 330                __entry->dirty  = KEY_DIRTY(k);
 331                __entry->op = op;
 332                __entry->status = status;
 333        ),
 334
 335        TP_printk("%u for %u at %llu(%u): %u:%llu len %u dirty %u",
 336                  __entry->status, __entry->op,
 337                  __entry->btree_node, __entry->btree_level,
 338                  __entry->inode, __entry->offset,
 339                  __entry->size, __entry->dirty)
 340);
 341
 342DECLARE_EVENT_CLASS(btree_split,
 343        TP_PROTO(struct btree *b, unsigned keys),
 344        TP_ARGS(b, keys),
 345
 346        TP_STRUCT__entry(
 347                __field(size_t,         bucket                  )
 348                __field(unsigned,       keys                    )
 349        ),
 350
 351        TP_fast_assign(
 352                __entry->bucket = PTR_BUCKET_NR(b->c, &b->key, 0);
 353                __entry->keys   = keys;
 354        ),
 355
 356        TP_printk("bucket %zu keys %u", __entry->bucket, __entry->keys)
 357);
 358
 359DEFINE_EVENT(btree_split, bcache_btree_node_split,
 360        TP_PROTO(struct btree *b, unsigned keys),
 361        TP_ARGS(b, keys)
 362);
 363
 364DEFINE_EVENT(btree_split, bcache_btree_node_compact,
 365        TP_PROTO(struct btree *b, unsigned keys),
 366        TP_ARGS(b, keys)
 367);
 368
 369DEFINE_EVENT(btree_node, bcache_btree_set_root,
 370        TP_PROTO(struct btree *b),
 371        TP_ARGS(b)
 372);
 373
 374TRACE_EVENT(bcache_keyscan,
 375        TP_PROTO(unsigned nr_found,
 376                 unsigned start_inode, uint64_t start_offset,
 377                 unsigned end_inode, uint64_t end_offset),
 378        TP_ARGS(nr_found,
 379                start_inode, start_offset,
 380                end_inode, end_offset),
 381
 382        TP_STRUCT__entry(
 383                __field(__u32,  nr_found                        )
 384                __field(__u32,  start_inode                     )
 385                __field(__u64,  start_offset                    )
 386                __field(__u32,  end_inode                       )
 387                __field(__u64,  end_offset                      )
 388        ),
 389
 390        TP_fast_assign(
 391                __entry->nr_found       = nr_found;
 392                __entry->start_inode    = start_inode;
 393                __entry->start_offset   = start_offset;
 394                __entry->end_inode      = end_inode;
 395                __entry->end_offset     = end_offset;
 396        ),
 397
 398        TP_printk("found %u keys from %u:%llu to %u:%llu", __entry->nr_found,
 399                  __entry->start_inode, __entry->start_offset,
 400                  __entry->end_inode, __entry->end_offset)
 401);
 402
 403/* Allocator */
 404
 405TRACE_EVENT(bcache_invalidate,
 406        TP_PROTO(struct cache *ca, size_t bucket),
 407        TP_ARGS(ca, bucket),
 408
 409        TP_STRUCT__entry(
 410                __field(unsigned,       sectors                 )
 411                __field(dev_t,          dev                     )
 412                __field(__u64,          offset                  )
 413        ),
 414
 415        TP_fast_assign(
 416                __entry->dev            = ca->bdev->bd_dev;
 417                __entry->offset         = bucket << ca->set->bucket_bits;
 418                __entry->sectors        = GC_SECTORS_USED(&ca->buckets[bucket]);
 419        ),
 420
 421        TP_printk("invalidated %u sectors at %d,%d sector=%llu",
 422                  __entry->sectors, MAJOR(__entry->dev),
 423                  MINOR(__entry->dev), __entry->offset)
 424);
 425
 426TRACE_EVENT(bcache_alloc,
 427        TP_PROTO(struct cache *ca, size_t bucket),
 428        TP_ARGS(ca, bucket),
 429
 430        TP_STRUCT__entry(
 431                __field(dev_t,          dev                     )
 432                __field(__u64,          offset                  )
 433        ),
 434
 435        TP_fast_assign(
 436                __entry->dev            = ca->bdev->bd_dev;
 437                __entry->offset         = bucket << ca->set->bucket_bits;
 438        ),
 439
 440        TP_printk("allocated %d,%d sector=%llu", MAJOR(__entry->dev),
 441                  MINOR(__entry->dev), __entry->offset)
 442);
 443
 444TRACE_EVENT(bcache_alloc_fail,
 445        TP_PROTO(struct cache *ca, unsigned reserve),
 446        TP_ARGS(ca, reserve),
 447
 448        TP_STRUCT__entry(
 449                __field(dev_t,          dev                     )
 450                __field(unsigned,       free                    )
 451                __field(unsigned,       free_inc                )
 452                __field(unsigned,       blocked                 )
 453        ),
 454
 455        TP_fast_assign(
 456                __entry->dev            = ca->bdev->bd_dev;
 457                __entry->free           = fifo_used(&ca->free[reserve]);
 458                __entry->free_inc       = fifo_used(&ca->free_inc);
 459                __entry->blocked        = atomic_read(&ca->set->prio_blocked);
 460        ),
 461
 462        TP_printk("alloc fail %d,%d free %u free_inc %u blocked %u",
 463                  MAJOR(__entry->dev), MINOR(__entry->dev), __entry->free,
 464                  __entry->free_inc, __entry->blocked)
 465);
 466
 467/* Background writeback */
 468
 469DEFINE_EVENT(bkey, bcache_writeback,
 470        TP_PROTO(struct bkey *k),
 471        TP_ARGS(k)
 472);
 473
 474DEFINE_EVENT(bkey, bcache_writeback_collision,
 475        TP_PROTO(struct bkey *k),
 476        TP_ARGS(k)
 477);
 478
 479#endif /* _TRACE_BCACHE_H */
 480
 481/* This part must be outside protection */
 482#include <trace/define_trace.h>
 483