1
2#undef TRACE_SYSTEM
3#define TRACE_SYSTEM writeback
4
5#if !defined(_TRACE_WRITEBACK_H) || defined(TRACE_HEADER_MULTI_READ)
6#define _TRACE_WRITEBACK_H
7
8#include <linux/tracepoint.h>
9#include <linux/backing-dev.h>
10#include <linux/writeback.h>
11
12#define show_inode_state(state) \
13 __print_flags(state, "|", \
14 {I_DIRTY_SYNC, "I_DIRTY_SYNC"}, \
15 {I_DIRTY_DATASYNC, "I_DIRTY_DATASYNC"}, \
16 {I_DIRTY_PAGES, "I_DIRTY_PAGES"}, \
17 {I_NEW, "I_NEW"}, \
18 {I_WILL_FREE, "I_WILL_FREE"}, \
19 {I_FREEING, "I_FREEING"}, \
20 {I_CLEAR, "I_CLEAR"}, \
21 {I_SYNC, "I_SYNC"}, \
22 {I_DIRTY_TIME, "I_DIRTY_TIME"}, \
23 {I_DIRTY_TIME_EXPIRED, "I_DIRTY_TIME_EXPIRED"}, \
24 {I_REFERENCED, "I_REFERENCED"} \
25 )
26
27
28#undef EM
29#undef EMe
30#define EM(a,b) TRACE_DEFINE_ENUM(a);
31#define EMe(a,b) TRACE_DEFINE_ENUM(a);
32
33#define WB_WORK_REASON \
34 EM( WB_REASON_BACKGROUND, "background") \
35 EM( WB_REASON_VMSCAN, "vmscan") \
36 EM( WB_REASON_SYNC, "sync") \
37 EM( WB_REASON_PERIODIC, "periodic") \
38 EM( WB_REASON_LAPTOP_TIMER, "laptop_timer") \
39 EM( WB_REASON_FREE_MORE_MEM, "free_more_memory") \
40 EM( WB_REASON_FS_FREE_SPACE, "fs_free_space") \
41 EMe(WB_REASON_FORKER_THREAD, "forker_thread")
42
43WB_WORK_REASON
44
45
46
47
48
49#undef EM
50#undef EMe
51#define EM(a,b) { a, b },
52#define EMe(a,b) { a, b }
53
54struct wb_writeback_work;
55
56DECLARE_EVENT_CLASS(writeback_page_template,
57
58 TP_PROTO(struct page *page, struct address_space *mapping),
59
60 TP_ARGS(page, mapping),
61
62 TP_STRUCT__entry (
63 __array(char, name, 32)
64 __field(unsigned long, ino)
65 __field(pgoff_t, index)
66 ),
67
68 TP_fast_assign(
69 strncpy(__entry->name,
70 mapping ? dev_name(inode_to_bdi(mapping->host)->dev) : "(unknown)", 32);
71 __entry->ino = mapping ? mapping->host->i_ino : 0;
72 __entry->index = page->index;
73 ),
74
75 TP_printk("bdi %s: ino=%lu index=%lu",
76 __entry->name,
77 __entry->ino,
78 __entry->index
79 )
80);
81
82DEFINE_EVENT(writeback_page_template, writeback_dirty_page,
83
84 TP_PROTO(struct page *page, struct address_space *mapping),
85
86 TP_ARGS(page, mapping)
87);
88
89DEFINE_EVENT(writeback_page_template, wait_on_page_writeback,
90
91 TP_PROTO(struct page *page, struct address_space *mapping),
92
93 TP_ARGS(page, mapping)
94);
95
96DECLARE_EVENT_CLASS(writeback_dirty_inode_template,
97
98 TP_PROTO(struct inode *inode, int flags),
99
100 TP_ARGS(inode, flags),
101
102 TP_STRUCT__entry (
103 __array(char, name, 32)
104 __field(unsigned long, ino)
105 __field(unsigned long, state)
106 __field(unsigned long, flags)
107 ),
108
109 TP_fast_assign(
110 struct backing_dev_info *bdi = inode_to_bdi(inode);
111
112
113 strncpy(__entry->name,
114 bdi->dev ? dev_name(bdi->dev) : "(unknown)", 32);
115 __entry->ino = inode->i_ino;
116 __entry->state = inode->i_state;
117 __entry->flags = flags;
118 ),
119
120 TP_printk("bdi %s: ino=%lu state=%s flags=%s",
121 __entry->name,
122 __entry->ino,
123 show_inode_state(__entry->state),
124 show_inode_state(__entry->flags)
125 )
126);
127
128DEFINE_EVENT(writeback_dirty_inode_template, writeback_mark_inode_dirty,
129
130 TP_PROTO(struct inode *inode, int flags),
131
132 TP_ARGS(inode, flags)
133);
134
135DEFINE_EVENT(writeback_dirty_inode_template, writeback_dirty_inode_start,
136
137 TP_PROTO(struct inode *inode, int flags),
138
139 TP_ARGS(inode, flags)
140);
141
142DEFINE_EVENT(writeback_dirty_inode_template, writeback_dirty_inode,
143
144 TP_PROTO(struct inode *inode, int flags),
145
146 TP_ARGS(inode, flags)
147);
148
149#ifdef CREATE_TRACE_POINTS
150#ifdef CONFIG_CGROUP_WRITEBACK
151
152static inline unsigned int __trace_wb_assign_cgroup(struct bdi_writeback *wb)
153{
154 return wb->memcg_css->cgroup->kn->id.ino;
155}
156
157static inline unsigned int __trace_wbc_assign_cgroup(struct writeback_control *wbc)
158{
159 if (wbc->wb)
160 return __trace_wb_assign_cgroup(wbc->wb);
161 else
162 return -1U;
163}
164#else
165
166static inline unsigned int __trace_wb_assign_cgroup(struct bdi_writeback *wb)
167{
168 return -1U;
169}
170
171static inline unsigned int __trace_wbc_assign_cgroup(struct writeback_control *wbc)
172{
173 return -1U;
174}
175
176#endif
177#endif
178
179DECLARE_EVENT_CLASS(writeback_write_inode_template,
180
181 TP_PROTO(struct inode *inode, struct writeback_control *wbc),
182
183 TP_ARGS(inode, wbc),
184
185 TP_STRUCT__entry (
186 __array(char, name, 32)
187 __field(unsigned long, ino)
188 __field(int, sync_mode)
189 __field(unsigned int, cgroup_ino)
190 ),
191
192 TP_fast_assign(
193 strncpy(__entry->name,
194 dev_name(inode_to_bdi(inode)->dev), 32);
195 __entry->ino = inode->i_ino;
196 __entry->sync_mode = wbc->sync_mode;
197 __entry->cgroup_ino = __trace_wbc_assign_cgroup(wbc);
198 ),
199
200 TP_printk("bdi %s: ino=%lu sync_mode=%d cgroup_ino=%u",
201 __entry->name,
202 __entry->ino,
203 __entry->sync_mode,
204 __entry->cgroup_ino
205 )
206);
207
208DEFINE_EVENT(writeback_write_inode_template, writeback_write_inode_start,
209
210 TP_PROTO(struct inode *inode, struct writeback_control *wbc),
211
212 TP_ARGS(inode, wbc)
213);
214
215DEFINE_EVENT(writeback_write_inode_template, writeback_write_inode,
216
217 TP_PROTO(struct inode *inode, struct writeback_control *wbc),
218
219 TP_ARGS(inode, wbc)
220);
221
222DECLARE_EVENT_CLASS(writeback_work_class,
223 TP_PROTO(struct bdi_writeback *wb, struct wb_writeback_work *work),
224 TP_ARGS(wb, work),
225 TP_STRUCT__entry(
226 __array(char, name, 32)
227 __field(long, nr_pages)
228 __field(dev_t, sb_dev)
229 __field(int, sync_mode)
230 __field(int, for_kupdate)
231 __field(int, range_cyclic)
232 __field(int, for_background)
233 __field(int, reason)
234 __field(unsigned int, cgroup_ino)
235 ),
236 TP_fast_assign(
237 strncpy(__entry->name,
238 wb->bdi->dev ? dev_name(wb->bdi->dev) : "(unknown)", 32);
239 __entry->nr_pages = work->nr_pages;
240 __entry->sb_dev = work->sb ? work->sb->s_dev : 0;
241 __entry->sync_mode = work->sync_mode;
242 __entry->for_kupdate = work->for_kupdate;
243 __entry->range_cyclic = work->range_cyclic;
244 __entry->for_background = work->for_background;
245 __entry->reason = work->reason;
246 __entry->cgroup_ino = __trace_wb_assign_cgroup(wb);
247 ),
248 TP_printk("bdi %s: sb_dev %d:%d nr_pages=%ld sync_mode=%d "
249 "kupdate=%d range_cyclic=%d background=%d reason=%s cgroup_ino=%u",
250 __entry->name,
251 MAJOR(__entry->sb_dev), MINOR(__entry->sb_dev),
252 __entry->nr_pages,
253 __entry->sync_mode,
254 __entry->for_kupdate,
255 __entry->range_cyclic,
256 __entry->for_background,
257 __print_symbolic(__entry->reason, WB_WORK_REASON),
258 __entry->cgroup_ino
259 )
260);
261#define DEFINE_WRITEBACK_WORK_EVENT(name) \
262DEFINE_EVENT(writeback_work_class, name, \
263 TP_PROTO(struct bdi_writeback *wb, struct wb_writeback_work *work), \
264 TP_ARGS(wb, work))
265DEFINE_WRITEBACK_WORK_EVENT(writeback_queue);
266DEFINE_WRITEBACK_WORK_EVENT(writeback_exec);
267DEFINE_WRITEBACK_WORK_EVENT(writeback_start);
268DEFINE_WRITEBACK_WORK_EVENT(writeback_written);
269DEFINE_WRITEBACK_WORK_EVENT(writeback_wait);
270
271TRACE_EVENT(writeback_pages_written,
272 TP_PROTO(long pages_written),
273 TP_ARGS(pages_written),
274 TP_STRUCT__entry(
275 __field(long, pages)
276 ),
277 TP_fast_assign(
278 __entry->pages = pages_written;
279 ),
280 TP_printk("%ld", __entry->pages)
281);
282
283DECLARE_EVENT_CLASS(writeback_class,
284 TP_PROTO(struct bdi_writeback *wb),
285 TP_ARGS(wb),
286 TP_STRUCT__entry(
287 __array(char, name, 32)
288 __field(unsigned int, cgroup_ino)
289 ),
290 TP_fast_assign(
291 strncpy(__entry->name, dev_name(wb->bdi->dev), 32);
292 __entry->cgroup_ino = __trace_wb_assign_cgroup(wb);
293 ),
294 TP_printk("bdi %s: cgroup_ino=%u",
295 __entry->name,
296 __entry->cgroup_ino
297 )
298);
299#define DEFINE_WRITEBACK_EVENT(name) \
300DEFINE_EVENT(writeback_class, name, \
301 TP_PROTO(struct bdi_writeback *wb), \
302 TP_ARGS(wb))
303
304DEFINE_WRITEBACK_EVENT(writeback_wake_background);
305
306TRACE_EVENT(writeback_bdi_register,
307 TP_PROTO(struct backing_dev_info *bdi),
308 TP_ARGS(bdi),
309 TP_STRUCT__entry(
310 __array(char, name, 32)
311 ),
312 TP_fast_assign(
313 strncpy(__entry->name, dev_name(bdi->dev), 32);
314 ),
315 TP_printk("bdi %s",
316 __entry->name
317 )
318);
319
320DECLARE_EVENT_CLASS(wbc_class,
321 TP_PROTO(struct writeback_control *wbc, struct backing_dev_info *bdi),
322 TP_ARGS(wbc, bdi),
323 TP_STRUCT__entry(
324 __array(char, name, 32)
325 __field(long, nr_to_write)
326 __field(long, pages_skipped)
327 __field(int, sync_mode)
328 __field(int, for_kupdate)
329 __field(int, for_background)
330 __field(int, for_reclaim)
331 __field(int, range_cyclic)
332 __field(long, range_start)
333 __field(long, range_end)
334 __field(unsigned int, cgroup_ino)
335 ),
336
337 TP_fast_assign(
338 strncpy(__entry->name, dev_name(bdi->dev), 32);
339 __entry->nr_to_write = wbc->nr_to_write;
340 __entry->pages_skipped = wbc->pages_skipped;
341 __entry->sync_mode = wbc->sync_mode;
342 __entry->for_kupdate = wbc->for_kupdate;
343 __entry->for_background = wbc->for_background;
344 __entry->for_reclaim = wbc->for_reclaim;
345 __entry->range_cyclic = wbc->range_cyclic;
346 __entry->range_start = (long)wbc->range_start;
347 __entry->range_end = (long)wbc->range_end;
348 __entry->cgroup_ino = __trace_wbc_assign_cgroup(wbc);
349 ),
350
351 TP_printk("bdi %s: towrt=%ld skip=%ld mode=%d kupd=%d "
352 "bgrd=%d reclm=%d cyclic=%d "
353 "start=0x%lx end=0x%lx cgroup_ino=%u",
354 __entry->name,
355 __entry->nr_to_write,
356 __entry->pages_skipped,
357 __entry->sync_mode,
358 __entry->for_kupdate,
359 __entry->for_background,
360 __entry->for_reclaim,
361 __entry->range_cyclic,
362 __entry->range_start,
363 __entry->range_end,
364 __entry->cgroup_ino
365 )
366)
367
368#define DEFINE_WBC_EVENT(name) \
369DEFINE_EVENT(wbc_class, name, \
370 TP_PROTO(struct writeback_control *wbc, struct backing_dev_info *bdi), \
371 TP_ARGS(wbc, bdi))
372DEFINE_WBC_EVENT(wbc_writepage);
373
374TRACE_EVENT(writeback_queue_io,
375 TP_PROTO(struct bdi_writeback *wb,
376 struct wb_writeback_work *work,
377 int moved),
378 TP_ARGS(wb, work, moved),
379 TP_STRUCT__entry(
380 __array(char, name, 32)
381 __field(unsigned long, older)
382 __field(long, age)
383 __field(int, moved)
384 __field(int, reason)
385 __field(unsigned int, cgroup_ino)
386 ),
387 TP_fast_assign(
388 unsigned long *older_than_this = work->older_than_this;
389 strncpy(__entry->name, dev_name(wb->bdi->dev), 32);
390 __entry->older = older_than_this ? *older_than_this : 0;
391 __entry->age = older_than_this ?
392 (jiffies - *older_than_this) * 1000 / HZ : -1;
393 __entry->moved = moved;
394 __entry->reason = work->reason;
395 __entry->cgroup_ino = __trace_wb_assign_cgroup(wb);
396 ),
397 TP_printk("bdi %s: older=%lu age=%ld enqueue=%d reason=%s cgroup_ino=%u",
398 __entry->name,
399 __entry->older,
400 __entry->age,
401 __entry->moved,
402 __print_symbolic(__entry->reason, WB_WORK_REASON),
403 __entry->cgroup_ino
404 )
405);
406
407TRACE_EVENT(global_dirty_state,
408
409 TP_PROTO(unsigned long background_thresh,
410 unsigned long dirty_thresh
411 ),
412
413 TP_ARGS(background_thresh,
414 dirty_thresh
415 ),
416
417 TP_STRUCT__entry(
418 __field(unsigned long, nr_dirty)
419 __field(unsigned long, nr_writeback)
420 __field(unsigned long, nr_unstable)
421 __field(unsigned long, background_thresh)
422 __field(unsigned long, dirty_thresh)
423 __field(unsigned long, dirty_limit)
424 __field(unsigned long, nr_dirtied)
425 __field(unsigned long, nr_written)
426 ),
427
428 TP_fast_assign(
429 __entry->nr_dirty = global_node_page_state(NR_FILE_DIRTY);
430 __entry->nr_writeback = global_node_page_state(NR_WRITEBACK);
431 __entry->nr_unstable = global_node_page_state(NR_UNSTABLE_NFS);
432 __entry->nr_dirtied = global_node_page_state(NR_DIRTIED);
433 __entry->nr_written = global_node_page_state(NR_WRITTEN);
434 __entry->background_thresh = background_thresh;
435 __entry->dirty_thresh = dirty_thresh;
436 __entry->dirty_limit = global_wb_domain.dirty_limit;
437 ),
438
439 TP_printk("dirty=%lu writeback=%lu unstable=%lu "
440 "bg_thresh=%lu thresh=%lu limit=%lu "
441 "dirtied=%lu written=%lu",
442 __entry->nr_dirty,
443 __entry->nr_writeback,
444 __entry->nr_unstable,
445 __entry->background_thresh,
446 __entry->dirty_thresh,
447 __entry->dirty_limit,
448 __entry->nr_dirtied,
449 __entry->nr_written
450 )
451);
452
453#define KBps(x) ((x) << (PAGE_SHIFT - 10))
454
455TRACE_EVENT(bdi_dirty_ratelimit,
456
457 TP_PROTO(struct bdi_writeback *wb,
458 unsigned long dirty_rate,
459 unsigned long task_ratelimit),
460
461 TP_ARGS(wb, dirty_rate, task_ratelimit),
462
463 TP_STRUCT__entry(
464 __array(char, bdi, 32)
465 __field(unsigned long, write_bw)
466 __field(unsigned long, avg_write_bw)
467 __field(unsigned long, dirty_rate)
468 __field(unsigned long, dirty_ratelimit)
469 __field(unsigned long, task_ratelimit)
470 __field(unsigned long, balanced_dirty_ratelimit)
471 __field(unsigned int, cgroup_ino)
472 ),
473
474 TP_fast_assign(
475 strlcpy(__entry->bdi, dev_name(wb->bdi->dev), 32);
476 __entry->write_bw = KBps(wb->write_bandwidth);
477 __entry->avg_write_bw = KBps(wb->avg_write_bandwidth);
478 __entry->dirty_rate = KBps(dirty_rate);
479 __entry->dirty_ratelimit = KBps(wb->dirty_ratelimit);
480 __entry->task_ratelimit = KBps(task_ratelimit);
481 __entry->balanced_dirty_ratelimit =
482 KBps(wb->balanced_dirty_ratelimit);
483 __entry->cgroup_ino = __trace_wb_assign_cgroup(wb);
484 ),
485
486 TP_printk("bdi %s: "
487 "write_bw=%lu awrite_bw=%lu dirty_rate=%lu "
488 "dirty_ratelimit=%lu task_ratelimit=%lu "
489 "balanced_dirty_ratelimit=%lu cgroup_ino=%u",
490 __entry->bdi,
491 __entry->write_bw,
492 __entry->avg_write_bw,
493 __entry->dirty_rate,
494 __entry->dirty_ratelimit,
495 __entry->task_ratelimit,
496 __entry->balanced_dirty_ratelimit,
497 __entry->cgroup_ino
498 )
499);
500
501TRACE_EVENT(balance_dirty_pages,
502
503 TP_PROTO(struct bdi_writeback *wb,
504 unsigned long thresh,
505 unsigned long bg_thresh,
506 unsigned long dirty,
507 unsigned long bdi_thresh,
508 unsigned long bdi_dirty,
509 unsigned long dirty_ratelimit,
510 unsigned long task_ratelimit,
511 unsigned long dirtied,
512 unsigned long period,
513 long pause,
514 unsigned long start_time),
515
516 TP_ARGS(wb, thresh, bg_thresh, dirty, bdi_thresh, bdi_dirty,
517 dirty_ratelimit, task_ratelimit,
518 dirtied, period, pause, start_time),
519
520 TP_STRUCT__entry(
521 __array( char, bdi, 32)
522 __field(unsigned long, limit)
523 __field(unsigned long, setpoint)
524 __field(unsigned long, dirty)
525 __field(unsigned long, bdi_setpoint)
526 __field(unsigned long, bdi_dirty)
527 __field(unsigned long, dirty_ratelimit)
528 __field(unsigned long, task_ratelimit)
529 __field(unsigned int, dirtied)
530 __field(unsigned int, dirtied_pause)
531 __field(unsigned long, paused)
532 __field( long, pause)
533 __field(unsigned long, period)
534 __field( long, think)
535 __field(unsigned int, cgroup_ino)
536 ),
537
538 TP_fast_assign(
539 unsigned long freerun = (thresh + bg_thresh) / 2;
540 strlcpy(__entry->bdi, dev_name(wb->bdi->dev), 32);
541
542 __entry->limit = global_wb_domain.dirty_limit;
543 __entry->setpoint = (global_wb_domain.dirty_limit +
544 freerun) / 2;
545 __entry->dirty = dirty;
546 __entry->bdi_setpoint = __entry->setpoint *
547 bdi_thresh / (thresh + 1);
548 __entry->bdi_dirty = bdi_dirty;
549 __entry->dirty_ratelimit = KBps(dirty_ratelimit);
550 __entry->task_ratelimit = KBps(task_ratelimit);
551 __entry->dirtied = dirtied;
552 __entry->dirtied_pause = current->nr_dirtied_pause;
553 __entry->think = current->dirty_paused_when == 0 ? 0 :
554 (long)(jiffies - current->dirty_paused_when) * 1000/HZ;
555 __entry->period = period * 1000 / HZ;
556 __entry->pause = pause * 1000 / HZ;
557 __entry->paused = (jiffies - start_time) * 1000 / HZ;
558 __entry->cgroup_ino = __trace_wb_assign_cgroup(wb);
559 ),
560
561
562 TP_printk("bdi %s: "
563 "limit=%lu setpoint=%lu dirty=%lu "
564 "bdi_setpoint=%lu bdi_dirty=%lu "
565 "dirty_ratelimit=%lu task_ratelimit=%lu "
566 "dirtied=%u dirtied_pause=%u "
567 "paused=%lu pause=%ld period=%lu think=%ld cgroup_ino=%u",
568 __entry->bdi,
569 __entry->limit,
570 __entry->setpoint,
571 __entry->dirty,
572 __entry->bdi_setpoint,
573 __entry->bdi_dirty,
574 __entry->dirty_ratelimit,
575 __entry->task_ratelimit,
576 __entry->dirtied,
577 __entry->dirtied_pause,
578 __entry->paused,
579 __entry->pause,
580 __entry->period,
581 __entry->think,
582 __entry->cgroup_ino
583 )
584);
585
586TRACE_EVENT(writeback_sb_inodes_requeue,
587
588 TP_PROTO(struct inode *inode),
589 TP_ARGS(inode),
590
591 TP_STRUCT__entry(
592 __array(char, name, 32)
593 __field(unsigned long, ino)
594 __field(unsigned long, state)
595 __field(unsigned long, dirtied_when)
596 __field(unsigned int, cgroup_ino)
597 ),
598
599 TP_fast_assign(
600 strncpy(__entry->name,
601 dev_name(inode_to_bdi(inode)->dev), 32);
602 __entry->ino = inode->i_ino;
603 __entry->state = inode->i_state;
604 __entry->dirtied_when = inode->dirtied_when;
605 __entry->cgroup_ino = __trace_wb_assign_cgroup(inode_to_wb(inode));
606 ),
607
608 TP_printk("bdi %s: ino=%lu state=%s dirtied_when=%lu age=%lu cgroup_ino=%u",
609 __entry->name,
610 __entry->ino,
611 show_inode_state(__entry->state),
612 __entry->dirtied_when,
613 (jiffies - __entry->dirtied_when) / HZ,
614 __entry->cgroup_ino
615 )
616);
617
618DECLARE_EVENT_CLASS(writeback_congest_waited_template,
619
620 TP_PROTO(unsigned int usec_timeout, unsigned int usec_delayed),
621
622 TP_ARGS(usec_timeout, usec_delayed),
623
624 TP_STRUCT__entry(
625 __field( unsigned int, usec_timeout )
626 __field( unsigned int, usec_delayed )
627 ),
628
629 TP_fast_assign(
630 __entry->usec_timeout = usec_timeout;
631 __entry->usec_delayed = usec_delayed;
632 ),
633
634 TP_printk("usec_timeout=%u usec_delayed=%u",
635 __entry->usec_timeout,
636 __entry->usec_delayed)
637);
638
639DEFINE_EVENT(writeback_congest_waited_template, writeback_congestion_wait,
640
641 TP_PROTO(unsigned int usec_timeout, unsigned int usec_delayed),
642
643 TP_ARGS(usec_timeout, usec_delayed)
644);
645
646DEFINE_EVENT(writeback_congest_waited_template, writeback_wait_iff_congested,
647
648 TP_PROTO(unsigned int usec_timeout, unsigned int usec_delayed),
649
650 TP_ARGS(usec_timeout, usec_delayed)
651);
652
653DECLARE_EVENT_CLASS(writeback_single_inode_template,
654
655 TP_PROTO(struct inode *inode,
656 struct writeback_control *wbc,
657 unsigned long nr_to_write
658 ),
659
660 TP_ARGS(inode, wbc, nr_to_write),
661
662 TP_STRUCT__entry(
663 __array(char, name, 32)
664 __field(unsigned long, ino)
665 __field(unsigned long, state)
666 __field(unsigned long, dirtied_when)
667 __field(unsigned long, writeback_index)
668 __field(long, nr_to_write)
669 __field(unsigned long, wrote)
670 __field(unsigned int, cgroup_ino)
671 ),
672
673 TP_fast_assign(
674 strncpy(__entry->name,
675 dev_name(inode_to_bdi(inode)->dev), 32);
676 __entry->ino = inode->i_ino;
677 __entry->state = inode->i_state;
678 __entry->dirtied_when = inode->dirtied_when;
679 __entry->writeback_index = inode->i_mapping->writeback_index;
680 __entry->nr_to_write = nr_to_write;
681 __entry->wrote = nr_to_write - wbc->nr_to_write;
682 __entry->cgroup_ino = __trace_wbc_assign_cgroup(wbc);
683 ),
684
685 TP_printk("bdi %s: ino=%lu state=%s dirtied_when=%lu age=%lu "
686 "index=%lu to_write=%ld wrote=%lu cgroup_ino=%u",
687 __entry->name,
688 __entry->ino,
689 show_inode_state(__entry->state),
690 __entry->dirtied_when,
691 (jiffies - __entry->dirtied_when) / HZ,
692 __entry->writeback_index,
693 __entry->nr_to_write,
694 __entry->wrote,
695 __entry->cgroup_ino
696 )
697);
698
699DEFINE_EVENT(writeback_single_inode_template, writeback_single_inode_start,
700 TP_PROTO(struct inode *inode,
701 struct writeback_control *wbc,
702 unsigned long nr_to_write),
703 TP_ARGS(inode, wbc, nr_to_write)
704);
705
706DEFINE_EVENT(writeback_single_inode_template, writeback_single_inode,
707 TP_PROTO(struct inode *inode,
708 struct writeback_control *wbc,
709 unsigned long nr_to_write),
710 TP_ARGS(inode, wbc, nr_to_write)
711);
712
713DECLARE_EVENT_CLASS(writeback_inode_template,
714 TP_PROTO(struct inode *inode),
715
716 TP_ARGS(inode),
717
718 TP_STRUCT__entry(
719 __field( dev_t, dev )
720 __field(unsigned long, ino )
721 __field(unsigned long, state )
722 __field( __u16, mode )
723 __field(unsigned long, dirtied_when )
724 ),
725
726 TP_fast_assign(
727 __entry->dev = inode->i_sb->s_dev;
728 __entry->ino = inode->i_ino;
729 __entry->state = inode->i_state;
730 __entry->mode = inode->i_mode;
731 __entry->dirtied_when = inode->dirtied_when;
732 ),
733
734 TP_printk("dev %d,%d ino %lu dirtied %lu state %s mode 0%o",
735 MAJOR(__entry->dev), MINOR(__entry->dev),
736 __entry->ino, __entry->dirtied_when,
737 show_inode_state(__entry->state), __entry->mode)
738);
739
740DEFINE_EVENT(writeback_inode_template, writeback_lazytime,
741 TP_PROTO(struct inode *inode),
742
743 TP_ARGS(inode)
744);
745
746DEFINE_EVENT(writeback_inode_template, writeback_lazytime_iput,
747 TP_PROTO(struct inode *inode),
748
749 TP_ARGS(inode)
750);
751
752DEFINE_EVENT(writeback_inode_template, writeback_dirty_inode_enqueue,
753
754 TP_PROTO(struct inode *inode),
755
756 TP_ARGS(inode)
757);
758
759
760
761
762
763DEFINE_EVENT(writeback_inode_template, sb_mark_inode_writeback,
764 TP_PROTO(struct inode *inode),
765 TP_ARGS(inode)
766);
767
768DEFINE_EVENT(writeback_inode_template, sb_clear_inode_writeback,
769 TP_PROTO(struct inode *inode),
770 TP_ARGS(inode)
771);
772
773#endif
774
775
776#include <trace/define_trace.h>
777