linux/include/trace/events/writeback.h
<<
>>
Prefs
   1#undef TRACE_SYSTEM
   2#define TRACE_SYSTEM writeback
   3
   4#if !defined(_TRACE_WRITEBACK_H) || defined(TRACE_HEADER_MULTI_READ)
   5#define _TRACE_WRITEBACK_H
   6
   7#include <linux/tracepoint.h>
   8#include <linux/backing-dev.h>
   9#include <linux/writeback.h>
  10
  11#define show_inode_state(state)                                 \
  12        __print_flags(state, "|",                               \
  13                {I_DIRTY_SYNC,          "I_DIRTY_SYNC"},        \
  14                {I_DIRTY_DATASYNC,      "I_DIRTY_DATASYNC"},    \
  15                {I_DIRTY_PAGES,         "I_DIRTY_PAGES"},       \
  16                {I_NEW,                 "I_NEW"},               \
  17                {I_WILL_FREE,           "I_WILL_FREE"},         \
  18                {I_FREEING,             "I_FREEING"},           \
  19                {I_CLEAR,               "I_CLEAR"},             \
  20                {I_SYNC,                "I_SYNC"},              \
  21                {I_DIRTY_TIME,          "I_DIRTY_TIME"},        \
  22                {I_DIRTY_TIME_EXPIRED,  "I_DIRTY_TIME_EXPIRED"}, \
  23                {I_REFERENCED,          "I_REFERENCED"}         \
  24        )
  25
  26/* enums need to be exported to user space */
  27#undef EM
  28#undef EMe
  29#define EM(a,b)         TRACE_DEFINE_ENUM(a);
  30#define EMe(a,b)        TRACE_DEFINE_ENUM(a);
  31
  32#define WB_WORK_REASON                                                  \
  33        EM( WB_REASON_BACKGROUND,               "background")           \
  34        EM( WB_REASON_TRY_TO_FREE_PAGES,        "try_to_free_pages")    \
  35        EM( WB_REASON_SYNC,                     "sync")                 \
  36        EM( WB_REASON_PERIODIC,                 "periodic")             \
  37        EM( WB_REASON_LAPTOP_TIMER,             "laptop_timer")         \
  38        EM( WB_REASON_FREE_MORE_MEM,            "free_more_memory")     \
  39        EM( WB_REASON_FS_FREE_SPACE,            "fs_free_space")        \
  40        EMe(WB_REASON_FORKER_THREAD,            "forker_thread")
  41
  42WB_WORK_REASON
  43
  44/*
  45 * Now redefine the EM() and EMe() macros to map the enums to the strings
  46 * that will be printed in the output.
  47 */
  48#undef EM
  49#undef EMe
  50#define EM(a,b)         { a, b },
  51#define EMe(a,b)        { a, b }
  52
  53struct wb_writeback_work;
  54
  55TRACE_EVENT(writeback_dirty_page,
  56
  57        TP_PROTO(struct page *page, struct address_space *mapping),
  58
  59        TP_ARGS(page, mapping),
  60
  61        TP_STRUCT__entry (
  62                __array(char, name, 32)
  63                __field(unsigned long, ino)
  64                __field(pgoff_t, index)
  65        ),
  66
  67        TP_fast_assign(
  68                strncpy(__entry->name,
  69                        mapping ? dev_name(inode_to_bdi(mapping->host)->dev) : "(unknown)", 32);
  70                __entry->ino = mapping ? mapping->host->i_ino : 0;
  71                __entry->index = page->index;
  72        ),
  73
  74        TP_printk("bdi %s: ino=%lu index=%lu",
  75                __entry->name,
  76                __entry->ino,
  77                __entry->index
  78        )
  79);
  80
  81DECLARE_EVENT_CLASS(writeback_dirty_inode_template,
  82
  83        TP_PROTO(struct inode *inode, int flags),
  84
  85        TP_ARGS(inode, flags),
  86
  87        TP_STRUCT__entry (
  88                __array(char, name, 32)
  89                __field(unsigned long, ino)
  90                __field(unsigned long, state)
  91                __field(unsigned long, flags)
  92        ),
  93
  94        TP_fast_assign(
  95                struct backing_dev_info *bdi = inode_to_bdi(inode);
  96
  97                /* may be called for files on pseudo FSes w/ unregistered bdi */
  98                strncpy(__entry->name,
  99                        bdi->dev ? dev_name(bdi->dev) : "(unknown)", 32);
 100                __entry->ino            = inode->i_ino;
 101                __entry->state          = inode->i_state;
 102                __entry->flags          = flags;
 103        ),
 104
 105        TP_printk("bdi %s: ino=%lu state=%s flags=%s",
 106                __entry->name,
 107                __entry->ino,
 108                show_inode_state(__entry->state),
 109                show_inode_state(__entry->flags)
 110        )
 111);
 112
 113DEFINE_EVENT(writeback_dirty_inode_template, writeback_mark_inode_dirty,
 114
 115        TP_PROTO(struct inode *inode, int flags),
 116
 117        TP_ARGS(inode, flags)
 118);
 119
 120DEFINE_EVENT(writeback_dirty_inode_template, writeback_dirty_inode_start,
 121
 122        TP_PROTO(struct inode *inode, int flags),
 123
 124        TP_ARGS(inode, flags)
 125);
 126
 127DEFINE_EVENT(writeback_dirty_inode_template, writeback_dirty_inode,
 128
 129        TP_PROTO(struct inode *inode, int flags),
 130
 131        TP_ARGS(inode, flags)
 132);
 133
 134#ifdef CREATE_TRACE_POINTS
 135#ifdef CONFIG_CGROUP_WRITEBACK
 136
 137static inline unsigned int __trace_wb_assign_cgroup(struct bdi_writeback *wb)
 138{
 139        return wb->memcg_css->cgroup->kn->ino;
 140}
 141
 142static inline unsigned int __trace_wbc_assign_cgroup(struct writeback_control *wbc)
 143{
 144        if (wbc->wb)
 145                return __trace_wb_assign_cgroup(wbc->wb);
 146        else
 147                return -1U;
 148}
 149#else   /* CONFIG_CGROUP_WRITEBACK */
 150
 151static inline unsigned int __trace_wb_assign_cgroup(struct bdi_writeback *wb)
 152{
 153        return -1U;
 154}
 155
 156static inline unsigned int __trace_wbc_assign_cgroup(struct writeback_control *wbc)
 157{
 158        return -1U;
 159}
 160
 161#endif  /* CONFIG_CGROUP_WRITEBACK */
 162#endif  /* CREATE_TRACE_POINTS */
 163
 164DECLARE_EVENT_CLASS(writeback_write_inode_template,
 165
 166        TP_PROTO(struct inode *inode, struct writeback_control *wbc),
 167
 168        TP_ARGS(inode, wbc),
 169
 170        TP_STRUCT__entry (
 171                __array(char, name, 32)
 172                __field(unsigned long, ino)
 173                __field(int, sync_mode)
 174                __field(unsigned int, cgroup_ino)
 175        ),
 176
 177        TP_fast_assign(
 178                strncpy(__entry->name,
 179                        dev_name(inode_to_bdi(inode)->dev), 32);
 180                __entry->ino            = inode->i_ino;
 181                __entry->sync_mode      = wbc->sync_mode;
 182                __entry->cgroup_ino     = __trace_wbc_assign_cgroup(wbc);
 183        ),
 184
 185        TP_printk("bdi %s: ino=%lu sync_mode=%d cgroup_ino=%u",
 186                __entry->name,
 187                __entry->ino,
 188                __entry->sync_mode,
 189                __entry->cgroup_ino
 190        )
 191);
 192
 193DEFINE_EVENT(writeback_write_inode_template, writeback_write_inode_start,
 194
 195        TP_PROTO(struct inode *inode, struct writeback_control *wbc),
 196
 197        TP_ARGS(inode, wbc)
 198);
 199
 200DEFINE_EVENT(writeback_write_inode_template, writeback_write_inode,
 201
 202        TP_PROTO(struct inode *inode, struct writeback_control *wbc),
 203
 204        TP_ARGS(inode, wbc)
 205);
 206
 207DECLARE_EVENT_CLASS(writeback_work_class,
 208        TP_PROTO(struct bdi_writeback *wb, struct wb_writeback_work *work),
 209        TP_ARGS(wb, work),
 210        TP_STRUCT__entry(
 211                __array(char, name, 32)
 212                __field(long, nr_pages)
 213                __field(dev_t, sb_dev)
 214                __field(int, sync_mode)
 215                __field(int, for_kupdate)
 216                __field(int, range_cyclic)
 217                __field(int, for_background)
 218                __field(int, reason)
 219                __field(unsigned int, cgroup_ino)
 220        ),
 221        TP_fast_assign(
 222                strncpy(__entry->name,
 223                        wb->bdi->dev ? dev_name(wb->bdi->dev) : "(unknown)", 32);
 224                __entry->nr_pages = work->nr_pages;
 225                __entry->sb_dev = work->sb ? work->sb->s_dev : 0;
 226                __entry->sync_mode = work->sync_mode;
 227                __entry->for_kupdate = work->for_kupdate;
 228                __entry->range_cyclic = work->range_cyclic;
 229                __entry->for_background = work->for_background;
 230                __entry->reason = work->reason;
 231                __entry->cgroup_ino = __trace_wb_assign_cgroup(wb);
 232        ),
 233        TP_printk("bdi %s: sb_dev %d:%d nr_pages=%ld sync_mode=%d "
 234                  "kupdate=%d range_cyclic=%d background=%d reason=%s cgroup_ino=%u",
 235                  __entry->name,
 236                  MAJOR(__entry->sb_dev), MINOR(__entry->sb_dev),
 237                  __entry->nr_pages,
 238                  __entry->sync_mode,
 239                  __entry->for_kupdate,
 240                  __entry->range_cyclic,
 241                  __entry->for_background,
 242                  __print_symbolic(__entry->reason, WB_WORK_REASON),
 243                  __entry->cgroup_ino
 244        )
 245);
 246#define DEFINE_WRITEBACK_WORK_EVENT(name) \
 247DEFINE_EVENT(writeback_work_class, name, \
 248        TP_PROTO(struct bdi_writeback *wb, struct wb_writeback_work *work), \
 249        TP_ARGS(wb, work))
 250DEFINE_WRITEBACK_WORK_EVENT(writeback_queue);
 251DEFINE_WRITEBACK_WORK_EVENT(writeback_exec);
 252DEFINE_WRITEBACK_WORK_EVENT(writeback_start);
 253DEFINE_WRITEBACK_WORK_EVENT(writeback_written);
 254DEFINE_WRITEBACK_WORK_EVENT(writeback_wait);
 255
 256TRACE_EVENT(writeback_pages_written,
 257        TP_PROTO(long pages_written),
 258        TP_ARGS(pages_written),
 259        TP_STRUCT__entry(
 260                __field(long,           pages)
 261        ),
 262        TP_fast_assign(
 263                __entry->pages          = pages_written;
 264        ),
 265        TP_printk("%ld", __entry->pages)
 266);
 267
 268DECLARE_EVENT_CLASS(writeback_class,
 269        TP_PROTO(struct bdi_writeback *wb),
 270        TP_ARGS(wb),
 271        TP_STRUCT__entry(
 272                __array(char, name, 32)
 273                __field(unsigned int, cgroup_ino)
 274        ),
 275        TP_fast_assign(
 276                strncpy(__entry->name, dev_name(wb->bdi->dev), 32);
 277                __entry->cgroup_ino = __trace_wb_assign_cgroup(wb);
 278        ),
 279        TP_printk("bdi %s: cgroup_ino=%u",
 280                  __entry->name,
 281                  __entry->cgroup_ino
 282        )
 283);
 284#define DEFINE_WRITEBACK_EVENT(name) \
 285DEFINE_EVENT(writeback_class, name, \
 286        TP_PROTO(struct bdi_writeback *wb), \
 287        TP_ARGS(wb))
 288
 289DEFINE_WRITEBACK_EVENT(writeback_nowork);
 290DEFINE_WRITEBACK_EVENT(writeback_wake_background);
 291
 292TRACE_EVENT(writeback_bdi_register,
 293        TP_PROTO(struct backing_dev_info *bdi),
 294        TP_ARGS(bdi),
 295        TP_STRUCT__entry(
 296                __array(char, name, 32)
 297        ),
 298        TP_fast_assign(
 299                strncpy(__entry->name, dev_name(bdi->dev), 32);
 300        ),
 301        TP_printk("bdi %s",
 302                __entry->name
 303        )
 304);
 305
 306DECLARE_EVENT_CLASS(wbc_class,
 307        TP_PROTO(struct writeback_control *wbc, struct backing_dev_info *bdi),
 308        TP_ARGS(wbc, bdi),
 309        TP_STRUCT__entry(
 310                __array(char, name, 32)
 311                __field(long, nr_to_write)
 312                __field(long, pages_skipped)
 313                __field(int, sync_mode)
 314                __field(int, for_kupdate)
 315                __field(int, for_background)
 316                __field(int, for_reclaim)
 317                __field(int, range_cyclic)
 318                __field(long, range_start)
 319                __field(long, range_end)
 320                __field(unsigned int, cgroup_ino)
 321        ),
 322
 323        TP_fast_assign(
 324                strncpy(__entry->name, dev_name(bdi->dev), 32);
 325                __entry->nr_to_write    = wbc->nr_to_write;
 326                __entry->pages_skipped  = wbc->pages_skipped;
 327                __entry->sync_mode      = wbc->sync_mode;
 328                __entry->for_kupdate    = wbc->for_kupdate;
 329                __entry->for_background = wbc->for_background;
 330                __entry->for_reclaim    = wbc->for_reclaim;
 331                __entry->range_cyclic   = wbc->range_cyclic;
 332                __entry->range_start    = (long)wbc->range_start;
 333                __entry->range_end      = (long)wbc->range_end;
 334                __entry->cgroup_ino     = __trace_wbc_assign_cgroup(wbc);
 335        ),
 336
 337        TP_printk("bdi %s: towrt=%ld skip=%ld mode=%d kupd=%d "
 338                "bgrd=%d reclm=%d cyclic=%d "
 339                "start=0x%lx end=0x%lx cgroup_ino=%u",
 340                __entry->name,
 341                __entry->nr_to_write,
 342                __entry->pages_skipped,
 343                __entry->sync_mode,
 344                __entry->for_kupdate,
 345                __entry->for_background,
 346                __entry->for_reclaim,
 347                __entry->range_cyclic,
 348                __entry->range_start,
 349                __entry->range_end,
 350                __entry->cgroup_ino
 351        )
 352)
 353
 354#define DEFINE_WBC_EVENT(name) \
 355DEFINE_EVENT(wbc_class, name, \
 356        TP_PROTO(struct writeback_control *wbc, struct backing_dev_info *bdi), \
 357        TP_ARGS(wbc, bdi))
 358DEFINE_WBC_EVENT(wbc_writepage);
 359
 360TRACE_EVENT(writeback_queue_io,
 361        TP_PROTO(struct bdi_writeback *wb,
 362                 struct wb_writeback_work *work,
 363                 int moved),
 364        TP_ARGS(wb, work, moved),
 365        TP_STRUCT__entry(
 366                __array(char,           name, 32)
 367                __field(unsigned long,  older)
 368                __field(long,           age)
 369                __field(int,            moved)
 370                __field(int,            reason)
 371                __field(unsigned int,   cgroup_ino)
 372        ),
 373        TP_fast_assign(
 374                unsigned long *older_than_this = work->older_than_this;
 375                strncpy(__entry->name, dev_name(wb->bdi->dev), 32);
 376                __entry->older  = older_than_this ?  *older_than_this : 0;
 377                __entry->age    = older_than_this ?
 378                                  (jiffies - *older_than_this) * 1000 / HZ : -1;
 379                __entry->moved  = moved;
 380                __entry->reason = work->reason;
 381                __entry->cgroup_ino     = __trace_wb_assign_cgroup(wb);
 382        ),
 383        TP_printk("bdi %s: older=%lu age=%ld enqueue=%d reason=%s cgroup_ino=%u",
 384                __entry->name,
 385                __entry->older, /* older_than_this in jiffies */
 386                __entry->age,   /* older_than_this in relative milliseconds */
 387                __entry->moved,
 388                __print_symbolic(__entry->reason, WB_WORK_REASON),
 389                __entry->cgroup_ino
 390        )
 391);
 392
 393TRACE_EVENT(global_dirty_state,
 394
 395        TP_PROTO(unsigned long background_thresh,
 396                 unsigned long dirty_thresh
 397        ),
 398
 399        TP_ARGS(background_thresh,
 400                dirty_thresh
 401        ),
 402
 403        TP_STRUCT__entry(
 404                __field(unsigned long,  nr_dirty)
 405                __field(unsigned long,  nr_writeback)
 406                __field(unsigned long,  nr_unstable)
 407                __field(unsigned long,  background_thresh)
 408                __field(unsigned long,  dirty_thresh)
 409                __field(unsigned long,  dirty_limit)
 410                __field(unsigned long,  nr_dirtied)
 411                __field(unsigned long,  nr_written)
 412        ),
 413
 414        TP_fast_assign(
 415                __entry->nr_dirty       = global_page_state(NR_FILE_DIRTY);
 416                __entry->nr_writeback   = global_page_state(NR_WRITEBACK);
 417                __entry->nr_unstable    = global_page_state(NR_UNSTABLE_NFS);
 418                __entry->nr_dirtied     = global_page_state(NR_DIRTIED);
 419                __entry->nr_written     = global_page_state(NR_WRITTEN);
 420                __entry->background_thresh = background_thresh;
 421                __entry->dirty_thresh   = dirty_thresh;
 422                __entry->dirty_limit    = global_wb_domain.dirty_limit;
 423        ),
 424
 425        TP_printk("dirty=%lu writeback=%lu unstable=%lu "
 426                  "bg_thresh=%lu thresh=%lu limit=%lu "
 427                  "dirtied=%lu written=%lu",
 428                  __entry->nr_dirty,
 429                  __entry->nr_writeback,
 430                  __entry->nr_unstable,
 431                  __entry->background_thresh,
 432                  __entry->dirty_thresh,
 433                  __entry->dirty_limit,
 434                  __entry->nr_dirtied,
 435                  __entry->nr_written
 436        )
 437);
 438
 439#define KBps(x)                 ((x) << (PAGE_SHIFT - 10))
 440
 441TRACE_EVENT(bdi_dirty_ratelimit,
 442
 443        TP_PROTO(struct bdi_writeback *wb,
 444                 unsigned long dirty_rate,
 445                 unsigned long task_ratelimit),
 446
 447        TP_ARGS(wb, dirty_rate, task_ratelimit),
 448
 449        TP_STRUCT__entry(
 450                __array(char,           bdi, 32)
 451                __field(unsigned long,  write_bw)
 452                __field(unsigned long,  avg_write_bw)
 453                __field(unsigned long,  dirty_rate)
 454                __field(unsigned long,  dirty_ratelimit)
 455                __field(unsigned long,  task_ratelimit)
 456                __field(unsigned long,  balanced_dirty_ratelimit)
 457                __field(unsigned int,   cgroup_ino)
 458        ),
 459
 460        TP_fast_assign(
 461                strlcpy(__entry->bdi, dev_name(wb->bdi->dev), 32);
 462                __entry->write_bw       = KBps(wb->write_bandwidth);
 463                __entry->avg_write_bw   = KBps(wb->avg_write_bandwidth);
 464                __entry->dirty_rate     = KBps(dirty_rate);
 465                __entry->dirty_ratelimit = KBps(wb->dirty_ratelimit);
 466                __entry->task_ratelimit = KBps(task_ratelimit);
 467                __entry->balanced_dirty_ratelimit =
 468                                        KBps(wb->balanced_dirty_ratelimit);
 469                __entry->cgroup_ino     = __trace_wb_assign_cgroup(wb);
 470        ),
 471
 472        TP_printk("bdi %s: "
 473                  "write_bw=%lu awrite_bw=%lu dirty_rate=%lu "
 474                  "dirty_ratelimit=%lu task_ratelimit=%lu "
 475                  "balanced_dirty_ratelimit=%lu cgroup_ino=%u",
 476                  __entry->bdi,
 477                  __entry->write_bw,            /* write bandwidth */
 478                  __entry->avg_write_bw,        /* avg write bandwidth */
 479                  __entry->dirty_rate,          /* bdi dirty rate */
 480                  __entry->dirty_ratelimit,     /* base ratelimit */
 481                  __entry->task_ratelimit, /* ratelimit with position control */
 482                  __entry->balanced_dirty_ratelimit, /* the balanced ratelimit */
 483                  __entry->cgroup_ino
 484        )
 485);
 486
 487TRACE_EVENT(balance_dirty_pages,
 488
 489        TP_PROTO(struct bdi_writeback *wb,
 490                 unsigned long thresh,
 491                 unsigned long bg_thresh,
 492                 unsigned long dirty,
 493                 unsigned long bdi_thresh,
 494                 unsigned long bdi_dirty,
 495                 unsigned long dirty_ratelimit,
 496                 unsigned long task_ratelimit,
 497                 unsigned long dirtied,
 498                 unsigned long period,
 499                 long pause,
 500                 unsigned long start_time),
 501
 502        TP_ARGS(wb, thresh, bg_thresh, dirty, bdi_thresh, bdi_dirty,
 503                dirty_ratelimit, task_ratelimit,
 504                dirtied, period, pause, start_time),
 505
 506        TP_STRUCT__entry(
 507                __array(         char,  bdi, 32)
 508                __field(unsigned long,  limit)
 509                __field(unsigned long,  setpoint)
 510                __field(unsigned long,  dirty)
 511                __field(unsigned long,  bdi_setpoint)
 512                __field(unsigned long,  bdi_dirty)
 513                __field(unsigned long,  dirty_ratelimit)
 514                __field(unsigned long,  task_ratelimit)
 515                __field(unsigned int,   dirtied)
 516                __field(unsigned int,   dirtied_pause)
 517                __field(unsigned long,  paused)
 518                __field(         long,  pause)
 519                __field(unsigned long,  period)
 520                __field(         long,  think)
 521                __field(unsigned int,   cgroup_ino)
 522        ),
 523
 524        TP_fast_assign(
 525                unsigned long freerun = (thresh + bg_thresh) / 2;
 526                strlcpy(__entry->bdi, dev_name(wb->bdi->dev), 32);
 527
 528                __entry->limit          = global_wb_domain.dirty_limit;
 529                __entry->setpoint       = (global_wb_domain.dirty_limit +
 530                                                freerun) / 2;
 531                __entry->dirty          = dirty;
 532                __entry->bdi_setpoint   = __entry->setpoint *
 533                                                bdi_thresh / (thresh + 1);
 534                __entry->bdi_dirty      = bdi_dirty;
 535                __entry->dirty_ratelimit = KBps(dirty_ratelimit);
 536                __entry->task_ratelimit = KBps(task_ratelimit);
 537                __entry->dirtied        = dirtied;
 538                __entry->dirtied_pause  = current->nr_dirtied_pause;
 539                __entry->think          = current->dirty_paused_when == 0 ? 0 :
 540                         (long)(jiffies - current->dirty_paused_when) * 1000/HZ;
 541                __entry->period         = period * 1000 / HZ;
 542                __entry->pause          = pause * 1000 / HZ;
 543                __entry->paused         = (jiffies - start_time) * 1000 / HZ;
 544                __entry->cgroup_ino     = __trace_wb_assign_cgroup(wb);
 545        ),
 546
 547
 548        TP_printk("bdi %s: "
 549                  "limit=%lu setpoint=%lu dirty=%lu "
 550                  "bdi_setpoint=%lu bdi_dirty=%lu "
 551                  "dirty_ratelimit=%lu task_ratelimit=%lu "
 552                  "dirtied=%u dirtied_pause=%u "
 553                  "paused=%lu pause=%ld period=%lu think=%ld cgroup_ino=%u",
 554                  __entry->bdi,
 555                  __entry->limit,
 556                  __entry->setpoint,
 557                  __entry->dirty,
 558                  __entry->bdi_setpoint,
 559                  __entry->bdi_dirty,
 560                  __entry->dirty_ratelimit,
 561                  __entry->task_ratelimit,
 562                  __entry->dirtied,
 563                  __entry->dirtied_pause,
 564                  __entry->paused,      /* ms */
 565                  __entry->pause,       /* ms */
 566                  __entry->period,      /* ms */
 567                  __entry->think,       /* ms */
 568                  __entry->cgroup_ino
 569          )
 570);
 571
 572TRACE_EVENT(writeback_sb_inodes_requeue,
 573
 574        TP_PROTO(struct inode *inode),
 575        TP_ARGS(inode),
 576
 577        TP_STRUCT__entry(
 578                __array(char, name, 32)
 579                __field(unsigned long, ino)
 580                __field(unsigned long, state)
 581                __field(unsigned long, dirtied_when)
 582                __field(unsigned int, cgroup_ino)
 583        ),
 584
 585        TP_fast_assign(
 586                strncpy(__entry->name,
 587                        dev_name(inode_to_bdi(inode)->dev), 32);
 588                __entry->ino            = inode->i_ino;
 589                __entry->state          = inode->i_state;
 590                __entry->dirtied_when   = inode->dirtied_when;
 591                __entry->cgroup_ino     = __trace_wb_assign_cgroup(inode_to_wb(inode));
 592        ),
 593
 594        TP_printk("bdi %s: ino=%lu state=%s dirtied_when=%lu age=%lu cgroup_ino=%u",
 595                  __entry->name,
 596                  __entry->ino,
 597                  show_inode_state(__entry->state),
 598                  __entry->dirtied_when,
 599                  (jiffies - __entry->dirtied_when) / HZ,
 600                  __entry->cgroup_ino
 601        )
 602);
 603
 604DECLARE_EVENT_CLASS(writeback_congest_waited_template,
 605
 606        TP_PROTO(unsigned int usec_timeout, unsigned int usec_delayed),
 607
 608        TP_ARGS(usec_timeout, usec_delayed),
 609
 610        TP_STRUCT__entry(
 611                __field(        unsigned int,   usec_timeout    )
 612                __field(        unsigned int,   usec_delayed    )
 613        ),
 614
 615        TP_fast_assign(
 616                __entry->usec_timeout   = usec_timeout;
 617                __entry->usec_delayed   = usec_delayed;
 618        ),
 619
 620        TP_printk("usec_timeout=%u usec_delayed=%u",
 621                        __entry->usec_timeout,
 622                        __entry->usec_delayed)
 623);
 624
 625DEFINE_EVENT(writeback_congest_waited_template, writeback_congestion_wait,
 626
 627        TP_PROTO(unsigned int usec_timeout, unsigned int usec_delayed),
 628
 629        TP_ARGS(usec_timeout, usec_delayed)
 630);
 631
 632DEFINE_EVENT(writeback_congest_waited_template, writeback_wait_iff_congested,
 633
 634        TP_PROTO(unsigned int usec_timeout, unsigned int usec_delayed),
 635
 636        TP_ARGS(usec_timeout, usec_delayed)
 637);
 638
 639DECLARE_EVENT_CLASS(writeback_single_inode_template,
 640
 641        TP_PROTO(struct inode *inode,
 642                 struct writeback_control *wbc,
 643                 unsigned long nr_to_write
 644        ),
 645
 646        TP_ARGS(inode, wbc, nr_to_write),
 647
 648        TP_STRUCT__entry(
 649                __array(char, name, 32)
 650                __field(unsigned long, ino)
 651                __field(unsigned long, state)
 652                __field(unsigned long, dirtied_when)
 653                __field(unsigned long, writeback_index)
 654                __field(long, nr_to_write)
 655                __field(unsigned long, wrote)
 656                __field(unsigned int, cgroup_ino)
 657        ),
 658
 659        TP_fast_assign(
 660                strncpy(__entry->name,
 661                        dev_name(inode_to_bdi(inode)->dev), 32);
 662                __entry->ino            = inode->i_ino;
 663                __entry->state          = inode->i_state;
 664                __entry->dirtied_when   = inode->dirtied_when;
 665                __entry->writeback_index = inode->i_mapping->writeback_index;
 666                __entry->nr_to_write    = nr_to_write;
 667                __entry->wrote          = nr_to_write - wbc->nr_to_write;
 668                __entry->cgroup_ino     = __trace_wbc_assign_cgroup(wbc);
 669        ),
 670
 671        TP_printk("bdi %s: ino=%lu state=%s dirtied_when=%lu age=%lu "
 672                  "index=%lu to_write=%ld wrote=%lu cgroup_ino=%u",
 673                  __entry->name,
 674                  __entry->ino,
 675                  show_inode_state(__entry->state),
 676                  __entry->dirtied_when,
 677                  (jiffies - __entry->dirtied_when) / HZ,
 678                  __entry->writeback_index,
 679                  __entry->nr_to_write,
 680                  __entry->wrote,
 681                  __entry->cgroup_ino
 682        )
 683);
 684
 685DEFINE_EVENT(writeback_single_inode_template, writeback_single_inode_start,
 686        TP_PROTO(struct inode *inode,
 687                 struct writeback_control *wbc,
 688                 unsigned long nr_to_write),
 689        TP_ARGS(inode, wbc, nr_to_write)
 690);
 691
 692DEFINE_EVENT(writeback_single_inode_template, writeback_single_inode,
 693        TP_PROTO(struct inode *inode,
 694                 struct writeback_control *wbc,
 695                 unsigned long nr_to_write),
 696        TP_ARGS(inode, wbc, nr_to_write)
 697);
 698
 699DECLARE_EVENT_CLASS(writeback_lazytime_template,
 700        TP_PROTO(struct inode *inode),
 701
 702        TP_ARGS(inode),
 703
 704        TP_STRUCT__entry(
 705                __field(        dev_t,  dev                     )
 706                __field(unsigned long,  ino                     )
 707                __field(unsigned long,  state                   )
 708                __field(        __u16, mode                     )
 709                __field(unsigned long, dirtied_when             )
 710        ),
 711
 712        TP_fast_assign(
 713                __entry->dev    = inode->i_sb->s_dev;
 714                __entry->ino    = inode->i_ino;
 715                __entry->state  = inode->i_state;
 716                __entry->mode   = inode->i_mode;
 717                __entry->dirtied_when = inode->dirtied_when;
 718        ),
 719
 720        TP_printk("dev %d,%d ino %lu dirtied %lu state %s mode 0%o",
 721                  MAJOR(__entry->dev), MINOR(__entry->dev),
 722                  __entry->ino, __entry->dirtied_when,
 723                  show_inode_state(__entry->state), __entry->mode)
 724);
 725
 726DEFINE_EVENT(writeback_lazytime_template, writeback_lazytime,
 727        TP_PROTO(struct inode *inode),
 728
 729        TP_ARGS(inode)
 730);
 731
 732DEFINE_EVENT(writeback_lazytime_template, writeback_lazytime_iput,
 733        TP_PROTO(struct inode *inode),
 734
 735        TP_ARGS(inode)
 736);
 737
 738DEFINE_EVENT(writeback_lazytime_template, writeback_dirty_inode_enqueue,
 739
 740        TP_PROTO(struct inode *inode),
 741
 742        TP_ARGS(inode)
 743);
 744
 745#endif /* _TRACE_WRITEBACK_H */
 746
 747/* This part must be outside protection */
 748#include <trace/define_trace.h>
 749