1#undef TRACE_SYSTEM
2#define TRACE_SYSTEM writeback
3
4#if !defined(_TRACE_WRITEBACK_H) || defined(TRACE_HEADER_MULTI_READ)
5#define _TRACE_WRITEBACK_H
6
7#include <linux/backing-dev.h>
8#include <linux/writeback.h>
9
10#define show_inode_state(state) \
11 __print_flags(state, "|", \
12 {I_DIRTY_SYNC, "I_DIRTY_SYNC"}, \
13 {I_DIRTY_DATASYNC, "I_DIRTY_DATASYNC"}, \
14 {I_DIRTY_PAGES, "I_DIRTY_PAGES"}, \
15 {I_NEW, "I_NEW"}, \
16 {I_WILL_FREE, "I_WILL_FREE"}, \
17 {I_FREEING, "I_FREEING"}, \
18 {I_CLEAR, "I_CLEAR"}, \
19 {I_SYNC, "I_SYNC"}, \
20 {I_REFERENCED, "I_REFERENCED"} \
21 )
22
23#define WB_WORK_REASON \
24 {WB_REASON_BACKGROUND, "background"}, \
25 {WB_REASON_TRY_TO_FREE_PAGES, "try_to_free_pages"}, \
26 {WB_REASON_SYNC, "sync"}, \
27 {WB_REASON_PERIODIC, "periodic"}, \
28 {WB_REASON_LAPTOP_TIMER, "laptop_timer"}, \
29 {WB_REASON_FREE_MORE_MEM, "free_more_memory"}, \
30 {WB_REASON_FS_FREE_SPACE, "fs_free_space"}, \
31 {WB_REASON_FORKER_THREAD, "forker_thread"}
32
33struct wb_writeback_work;
34
35TRACE_EVENT(writeback_dirty_page,
36
37 TP_PROTO(struct page *page, struct address_space *mapping),
38
39 TP_ARGS(page, mapping),
40
41 TP_STRUCT__entry (
42 __array(char, name, 32)
43 __field(unsigned long, ino)
44 __field(pgoff_t, index)
45 ),
46
47 TP_fast_assign(
48 strncpy(__entry->name,
49 mapping ? dev_name(mapping->backing_dev_info->dev) : "(unknown)", 32);
50 __entry->ino = mapping ? mapping->host->i_ino : 0;
51 __entry->index = page->index;
52 ),
53
54 TP_printk("bdi %s: ino=%lu index=%lu",
55 __entry->name,
56 __entry->ino,
57 __entry->index
58 )
59);
60
61DECLARE_EVENT_CLASS(writeback_dirty_inode_template,
62
63 TP_PROTO(struct inode *inode, int flags),
64
65 TP_ARGS(inode, flags),
66
67 TP_STRUCT__entry (
68 __array(char, name, 32)
69 __field(unsigned long, ino)
70 __field(unsigned long, flags)
71 ),
72
73 TP_fast_assign(
74 struct backing_dev_info *bdi = inode->i_mapping->backing_dev_info;
75
76
77 strncpy(__entry->name,
78 bdi->dev ? dev_name(bdi->dev) : "(unknown)", 32);
79 __entry->ino = inode->i_ino;
80 __entry->flags = flags;
81 ),
82
83 TP_printk("bdi %s: ino=%lu flags=%s",
84 __entry->name,
85 __entry->ino,
86 show_inode_state(__entry->flags)
87 )
88);
89
90DEFINE_EVENT(writeback_dirty_inode_template, writeback_dirty_inode_start,
91
92 TP_PROTO(struct inode *inode, int flags),
93
94 TP_ARGS(inode, flags)
95);
96
97DEFINE_EVENT(writeback_dirty_inode_template, writeback_dirty_inode,
98
99 TP_PROTO(struct inode *inode, int flags),
100
101 TP_ARGS(inode, flags)
102);
103
104DECLARE_EVENT_CLASS(writeback_write_inode_template,
105
106 TP_PROTO(struct inode *inode, struct writeback_control *wbc),
107
108 TP_ARGS(inode, wbc),
109
110 TP_STRUCT__entry (
111 __array(char, name, 32)
112 __field(unsigned long, ino)
113 __field(int, sync_mode)
114 ),
115
116 TP_fast_assign(
117 strncpy(__entry->name,
118 dev_name(inode->i_mapping->backing_dev_info->dev), 32);
119 __entry->ino = inode->i_ino;
120 __entry->sync_mode = wbc->sync_mode;
121 ),
122
123 TP_printk("bdi %s: ino=%lu sync_mode=%d",
124 __entry->name,
125 __entry->ino,
126 __entry->sync_mode
127 )
128);
129
130DEFINE_EVENT(writeback_write_inode_template, writeback_write_inode_start,
131
132 TP_PROTO(struct inode *inode, struct writeback_control *wbc),
133
134 TP_ARGS(inode, wbc)
135);
136
137DEFINE_EVENT(writeback_write_inode_template, writeback_write_inode,
138
139 TP_PROTO(struct inode *inode, struct writeback_control *wbc),
140
141 TP_ARGS(inode, wbc)
142);
143
144DECLARE_EVENT_CLASS(writeback_work_class,
145 TP_PROTO(struct backing_dev_info *bdi, struct wb_writeback_work *work),
146 TP_ARGS(bdi, work),
147 TP_STRUCT__entry(
148 __array(char, name, 32)
149 __field(long, nr_pages)
150 __field(dev_t, sb_dev)
151 __field(int, sync_mode)
152 __field(int, for_kupdate)
153 __field(int, range_cyclic)
154 __field(int, for_background)
155 __field(int, reason)
156 ),
157 TP_fast_assign(
158 struct device *dev = bdi->dev;
159 if (!dev)
160 dev = default_backing_dev_info.dev;
161 strncpy(__entry->name, dev_name(dev), 32);
162 __entry->nr_pages = work->nr_pages;
163 __entry->sb_dev = work->sb ? work->sb->s_dev : 0;
164 __entry->sync_mode = work->sync_mode;
165 __entry->for_kupdate = work->for_kupdate;
166 __entry->range_cyclic = work->range_cyclic;
167 __entry->for_background = work->for_background;
168 __entry->reason = work->reason;
169 ),
170 TP_printk("bdi %s: sb_dev %d:%d nr_pages=%ld sync_mode=%d "
171 "kupdate=%d range_cyclic=%d background=%d reason=%s",
172 __entry->name,
173 MAJOR(__entry->sb_dev), MINOR(__entry->sb_dev),
174 __entry->nr_pages,
175 __entry->sync_mode,
176 __entry->for_kupdate,
177 __entry->range_cyclic,
178 __entry->for_background,
179 __print_symbolic(__entry->reason, WB_WORK_REASON)
180 )
181);
182#define DEFINE_WRITEBACK_WORK_EVENT(name) \
183DEFINE_EVENT(writeback_work_class, name, \
184 TP_PROTO(struct backing_dev_info *bdi, struct wb_writeback_work *work), \
185 TP_ARGS(bdi, work))
186DEFINE_WRITEBACK_WORK_EVENT(writeback_nothread);
187DEFINE_WRITEBACK_WORK_EVENT(writeback_queue);
188DEFINE_WRITEBACK_WORK_EVENT(writeback_exec);
189DEFINE_WRITEBACK_WORK_EVENT(writeback_start);
190DEFINE_WRITEBACK_WORK_EVENT(writeback_written);
191DEFINE_WRITEBACK_WORK_EVENT(writeback_wait);
192
193TRACE_EVENT(writeback_pages_written,
194 TP_PROTO(long pages_written),
195 TP_ARGS(pages_written),
196 TP_STRUCT__entry(
197 __field(long, pages)
198 ),
199 TP_fast_assign(
200 __entry->pages = pages_written;
201 ),
202 TP_printk("%ld", __entry->pages)
203);
204
205DECLARE_EVENT_CLASS(writeback_class,
206 TP_PROTO(struct backing_dev_info *bdi),
207 TP_ARGS(bdi),
208 TP_STRUCT__entry(
209 __array(char, name, 32)
210 ),
211 TP_fast_assign(
212 strncpy(__entry->name, dev_name(bdi->dev), 32);
213 ),
214 TP_printk("bdi %s",
215 __entry->name
216 )
217);
218#define DEFINE_WRITEBACK_EVENT(name) \
219DEFINE_EVENT(writeback_class, name, \
220 TP_PROTO(struct backing_dev_info *bdi), \
221 TP_ARGS(bdi))
222
223DEFINE_WRITEBACK_EVENT(writeback_nowork);
224DEFINE_WRITEBACK_EVENT(writeback_wake_background);
225DEFINE_WRITEBACK_EVENT(writeback_wake_thread);
226DEFINE_WRITEBACK_EVENT(writeback_wake_forker_thread);
227DEFINE_WRITEBACK_EVENT(writeback_bdi_register);
228DEFINE_WRITEBACK_EVENT(writeback_bdi_unregister);
229DEFINE_WRITEBACK_EVENT(writeback_thread_start);
230DEFINE_WRITEBACK_EVENT(writeback_thread_stop);
231
232DECLARE_EVENT_CLASS(wbc_class,
233 TP_PROTO(struct writeback_control *wbc, struct backing_dev_info *bdi),
234 TP_ARGS(wbc, bdi),
235 TP_STRUCT__entry(
236 __array(char, name, 32)
237 __field(long, nr_to_write)
238 __field(long, pages_skipped)
239 __field(int, sync_mode)
240 __field(int, for_kupdate)
241 __field(int, for_background)
242 __field(int, for_reclaim)
243 __field(int, range_cyclic)
244 __field(long, range_start)
245 __field(long, range_end)
246 ),
247
248 TP_fast_assign(
249 strncpy(__entry->name, dev_name(bdi->dev), 32);
250 __entry->nr_to_write = wbc->nr_to_write;
251 __entry->pages_skipped = wbc->pages_skipped;
252 __entry->sync_mode = wbc->sync_mode;
253 __entry->for_kupdate = wbc->for_kupdate;
254 __entry->for_background = wbc->for_background;
255 __entry->for_reclaim = wbc->for_reclaim;
256 __entry->range_cyclic = wbc->range_cyclic;
257 __entry->range_start = (long)wbc->range_start;
258 __entry->range_end = (long)wbc->range_end;
259 ),
260
261 TP_printk("bdi %s: towrt=%ld skip=%ld mode=%d kupd=%d "
262 "bgrd=%d reclm=%d cyclic=%d "
263 "start=0x%lx end=0x%lx",
264 __entry->name,
265 __entry->nr_to_write,
266 __entry->pages_skipped,
267 __entry->sync_mode,
268 __entry->for_kupdate,
269 __entry->for_background,
270 __entry->for_reclaim,
271 __entry->range_cyclic,
272 __entry->range_start,
273 __entry->range_end)
274)
275
276#define DEFINE_WBC_EVENT(name) \
277DEFINE_EVENT(wbc_class, name, \
278 TP_PROTO(struct writeback_control *wbc, struct backing_dev_info *bdi), \
279 TP_ARGS(wbc, bdi))
280DEFINE_WBC_EVENT(wbc_writepage);
281
282TRACE_EVENT(writeback_queue_io,
283 TP_PROTO(struct bdi_writeback *wb,
284 struct wb_writeback_work *work,
285 int moved),
286 TP_ARGS(wb, work, moved),
287 TP_STRUCT__entry(
288 __array(char, name, 32)
289 __field(unsigned long, older)
290 __field(long, age)
291 __field(int, moved)
292 __field(int, reason)
293 ),
294 TP_fast_assign(
295 unsigned long *older_than_this = work->older_than_this;
296 strncpy(__entry->name, dev_name(wb->bdi->dev), 32);
297 __entry->older = older_than_this ? *older_than_this : 0;
298 __entry->age = older_than_this ?
299 (jiffies - *older_than_this) * 1000 / HZ : -1;
300 __entry->moved = moved;
301 __entry->reason = work->reason;
302 ),
303 TP_printk("bdi %s: older=%lu age=%ld enqueue=%d reason=%s",
304 __entry->name,
305 __entry->older,
306 __entry->age,
307 __entry->moved,
308 __print_symbolic(__entry->reason, WB_WORK_REASON)
309 )
310);
311
312TRACE_EVENT(global_dirty_state,
313
314 TP_PROTO(unsigned long background_thresh,
315 unsigned long dirty_thresh
316 ),
317
318 TP_ARGS(background_thresh,
319 dirty_thresh
320 ),
321
322 TP_STRUCT__entry(
323 __field(unsigned long, nr_dirty)
324 __field(unsigned long, nr_writeback)
325 __field(unsigned long, nr_unstable)
326 __field(unsigned long, background_thresh)
327 __field(unsigned long, dirty_thresh)
328 __field(unsigned long, dirty_limit)
329 __field(unsigned long, nr_dirtied)
330 __field(unsigned long, nr_written)
331 ),
332
333 TP_fast_assign(
334 __entry->nr_dirty = global_page_state(NR_FILE_DIRTY);
335 __entry->nr_writeback = global_page_state(NR_WRITEBACK);
336 __entry->nr_unstable = global_page_state(NR_UNSTABLE_NFS);
337 __entry->nr_dirtied = global_page_state(NR_DIRTIED);
338 __entry->nr_written = global_page_state(NR_WRITTEN);
339 __entry->background_thresh = background_thresh;
340 __entry->dirty_thresh = dirty_thresh;
341 __entry->dirty_limit = global_dirty_limit;
342 ),
343
344 TP_printk("dirty=%lu writeback=%lu unstable=%lu "
345 "bg_thresh=%lu thresh=%lu limit=%lu "
346 "dirtied=%lu written=%lu",
347 __entry->nr_dirty,
348 __entry->nr_writeback,
349 __entry->nr_unstable,
350 __entry->background_thresh,
351 __entry->dirty_thresh,
352 __entry->dirty_limit,
353 __entry->nr_dirtied,
354 __entry->nr_written
355 )
356);
357
358#define KBps(x) ((x) << (PAGE_SHIFT - 10))
359
360TRACE_EVENT(bdi_dirty_ratelimit,
361
362 TP_PROTO(struct backing_dev_info *bdi,
363 unsigned long dirty_rate,
364 unsigned long task_ratelimit),
365
366 TP_ARGS(bdi, dirty_rate, task_ratelimit),
367
368 TP_STRUCT__entry(
369 __array(char, bdi, 32)
370 __field(unsigned long, write_bw)
371 __field(unsigned long, avg_write_bw)
372 __field(unsigned long, dirty_rate)
373 __field(unsigned long, dirty_ratelimit)
374 __field(unsigned long, task_ratelimit)
375 __field(unsigned long, balanced_dirty_ratelimit)
376 ),
377
378 TP_fast_assign(
379 strlcpy(__entry->bdi, dev_name(bdi->dev), 32);
380 __entry->write_bw = KBps(bdi->write_bandwidth);
381 __entry->avg_write_bw = KBps(bdi->avg_write_bandwidth);
382 __entry->dirty_rate = KBps(dirty_rate);
383 __entry->dirty_ratelimit = KBps(bdi->dirty_ratelimit);
384 __entry->task_ratelimit = KBps(task_ratelimit);
385 __entry->balanced_dirty_ratelimit =
386 KBps(bdi->balanced_dirty_ratelimit);
387 ),
388
389 TP_printk("bdi %s: "
390 "write_bw=%lu awrite_bw=%lu dirty_rate=%lu "
391 "dirty_ratelimit=%lu task_ratelimit=%lu "
392 "balanced_dirty_ratelimit=%lu",
393 __entry->bdi,
394 __entry->write_bw,
395 __entry->avg_write_bw,
396 __entry->dirty_rate,
397 __entry->dirty_ratelimit,
398 __entry->task_ratelimit,
399 __entry->balanced_dirty_ratelimit
400 )
401);
402
403TRACE_EVENT(balance_dirty_pages,
404
405 TP_PROTO(struct backing_dev_info *bdi,
406 unsigned long thresh,
407 unsigned long bg_thresh,
408 unsigned long dirty,
409 unsigned long bdi_thresh,
410 unsigned long bdi_dirty,
411 unsigned long dirty_ratelimit,
412 unsigned long task_ratelimit,
413 unsigned long dirtied,
414 unsigned long period,
415 long pause,
416 unsigned long start_time),
417
418 TP_ARGS(bdi, thresh, bg_thresh, dirty, bdi_thresh, bdi_dirty,
419 dirty_ratelimit, task_ratelimit,
420 dirtied, period, pause, start_time),
421
422 TP_STRUCT__entry(
423 __array( char, bdi, 32)
424 __field(unsigned long, limit)
425 __field(unsigned long, setpoint)
426 __field(unsigned long, dirty)
427 __field(unsigned long, bdi_setpoint)
428 __field(unsigned long, bdi_dirty)
429 __field(unsigned long, dirty_ratelimit)
430 __field(unsigned long, task_ratelimit)
431 __field(unsigned int, dirtied)
432 __field(unsigned int, dirtied_pause)
433 __field(unsigned long, paused)
434 __field( long, pause)
435 __field(unsigned long, period)
436 __field( long, think)
437 ),
438
439 TP_fast_assign(
440 unsigned long freerun = (thresh + bg_thresh) / 2;
441 strlcpy(__entry->bdi, dev_name(bdi->dev), 32);
442
443 __entry->limit = global_dirty_limit;
444 __entry->setpoint = (global_dirty_limit + freerun) / 2;
445 __entry->dirty = dirty;
446 __entry->bdi_setpoint = __entry->setpoint *
447 bdi_thresh / (thresh + 1);
448 __entry->bdi_dirty = bdi_dirty;
449 __entry->dirty_ratelimit = KBps(dirty_ratelimit);
450 __entry->task_ratelimit = KBps(task_ratelimit);
451 __entry->dirtied = dirtied;
452 __entry->dirtied_pause = current->nr_dirtied_pause;
453 __entry->think = current->dirty_paused_when == 0 ? 0 :
454 (long)(jiffies - current->dirty_paused_when) * 1000/HZ;
455 __entry->period = period * 1000 / HZ;
456 __entry->pause = pause * 1000 / HZ;
457 __entry->paused = (jiffies - start_time) * 1000 / HZ;
458 ),
459
460
461 TP_printk("bdi %s: "
462 "limit=%lu setpoint=%lu dirty=%lu "
463 "bdi_setpoint=%lu bdi_dirty=%lu "
464 "dirty_ratelimit=%lu task_ratelimit=%lu "
465 "dirtied=%u dirtied_pause=%u "
466 "paused=%lu pause=%ld period=%lu think=%ld",
467 __entry->bdi,
468 __entry->limit,
469 __entry->setpoint,
470 __entry->dirty,
471 __entry->bdi_setpoint,
472 __entry->bdi_dirty,
473 __entry->dirty_ratelimit,
474 __entry->task_ratelimit,
475 __entry->dirtied,
476 __entry->dirtied_pause,
477 __entry->paused,
478 __entry->pause,
479 __entry->period,
480 __entry->think
481 )
482);
483
484TRACE_EVENT(writeback_sb_inodes_requeue,
485
486 TP_PROTO(struct inode *inode),
487 TP_ARGS(inode),
488
489 TP_STRUCT__entry(
490 __array(char, name, 32)
491 __field(unsigned long, ino)
492 __field(unsigned long, state)
493 __field(unsigned long, dirtied_when)
494 ),
495
496 TP_fast_assign(
497 strncpy(__entry->name,
498 dev_name(inode_to_bdi(inode)->dev), 32);
499 __entry->ino = inode->i_ino;
500 __entry->state = inode->i_state;
501 __entry->dirtied_when = inode->dirtied_when;
502 ),
503
504 TP_printk("bdi %s: ino=%lu state=%s dirtied_when=%lu age=%lu",
505 __entry->name,
506 __entry->ino,
507 show_inode_state(__entry->state),
508 __entry->dirtied_when,
509 (jiffies - __entry->dirtied_when) / HZ
510 )
511);
512
513DECLARE_EVENT_CLASS(writeback_congest_waited_template,
514
515 TP_PROTO(unsigned int usec_timeout, unsigned int usec_delayed),
516
517 TP_ARGS(usec_timeout, usec_delayed),
518
519 TP_STRUCT__entry(
520 __field( unsigned int, usec_timeout )
521 __field( unsigned int, usec_delayed )
522 ),
523
524 TP_fast_assign(
525 __entry->usec_timeout = usec_timeout;
526 __entry->usec_delayed = usec_delayed;
527 ),
528
529 TP_printk("usec_timeout=%u usec_delayed=%u",
530 __entry->usec_timeout,
531 __entry->usec_delayed)
532);
533
534DEFINE_EVENT(writeback_congest_waited_template, writeback_congestion_wait,
535
536 TP_PROTO(unsigned int usec_timeout, unsigned int usec_delayed),
537
538 TP_ARGS(usec_timeout, usec_delayed)
539);
540
541DEFINE_EVENT(writeback_congest_waited_template, writeback_wait_iff_congested,
542
543 TP_PROTO(unsigned int usec_timeout, unsigned int usec_delayed),
544
545 TP_ARGS(usec_timeout, usec_delayed)
546);
547
548DECLARE_EVENT_CLASS(writeback_single_inode_template,
549
550 TP_PROTO(struct inode *inode,
551 struct writeback_control *wbc,
552 unsigned long nr_to_write
553 ),
554
555 TP_ARGS(inode, wbc, nr_to_write),
556
557 TP_STRUCT__entry(
558 __array(char, name, 32)
559 __field(unsigned long, ino)
560 __field(unsigned long, state)
561 __field(unsigned long, dirtied_when)
562 __field(unsigned long, writeback_index)
563 __field(long, nr_to_write)
564 __field(unsigned long, wrote)
565 ),
566
567 TP_fast_assign(
568 strncpy(__entry->name,
569 dev_name(inode_to_bdi(inode)->dev), 32);
570 __entry->ino = inode->i_ino;
571 __entry->state = inode->i_state;
572 __entry->dirtied_when = inode->dirtied_when;
573 __entry->writeback_index = inode->i_mapping->writeback_index;
574 __entry->nr_to_write = nr_to_write;
575 __entry->wrote = nr_to_write - wbc->nr_to_write;
576 ),
577
578 TP_printk("bdi %s: ino=%lu state=%s dirtied_when=%lu age=%lu "
579 "index=%lu to_write=%ld wrote=%lu",
580 __entry->name,
581 __entry->ino,
582 show_inode_state(__entry->state),
583 __entry->dirtied_when,
584 (jiffies - __entry->dirtied_when) / HZ,
585 __entry->writeback_index,
586 __entry->nr_to_write,
587 __entry->wrote
588 )
589);
590
591DEFINE_EVENT(writeback_single_inode_template, writeback_single_inode_start,
592 TP_PROTO(struct inode *inode,
593 struct writeback_control *wbc,
594 unsigned long nr_to_write),
595 TP_ARGS(inode, wbc, nr_to_write)
596);
597
598DEFINE_EVENT(writeback_single_inode_template, writeback_single_inode,
599 TP_PROTO(struct inode *inode,
600 struct writeback_control *wbc,
601 unsigned long nr_to_write),
602 TP_ARGS(inode, wbc, nr_to_write)
603);
604
605#endif
606
607
608#include <trace/define_trace.h>
609