1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | #undef TRACE_SYSTEM |
3 | #define TRACE_SYSTEM writeback |
4 | |
5 | #if !defined(_TRACE_WRITEBACK_H) || defined(TRACE_HEADER_MULTI_READ) |
6 | #define _TRACE_WRITEBACK_H |
7 | |
8 | #include <linux/tracepoint.h> |
9 | #include <linux/backing-dev.h> |
10 | #include <linux/writeback.h> |
11 | |
12 | #define show_inode_state(state) \ |
13 | __print_flags(state, "|", \ |
14 | {I_DIRTY_SYNC, "I_DIRTY_SYNC"}, \ |
15 | {I_DIRTY_DATASYNC, "I_DIRTY_DATASYNC"}, \ |
16 | {I_DIRTY_PAGES, "I_DIRTY_PAGES"}, \ |
17 | {I_NEW, "I_NEW"}, \ |
18 | {I_WILL_FREE, "I_WILL_FREE"}, \ |
19 | {I_FREEING, "I_FREEING"}, \ |
20 | {I_CLEAR, "I_CLEAR"}, \ |
21 | {I_SYNC, "I_SYNC"}, \ |
22 | {I_DIRTY_TIME, "I_DIRTY_TIME"}, \ |
23 | {I_REFERENCED, "I_REFERENCED"} \ |
24 | ) |
25 | |
26 | /* enums need to be exported to user space */ |
27 | #undef EM |
28 | #undef EMe |
29 | #define EM(a,b) TRACE_DEFINE_ENUM(a); |
30 | #define EMe(a,b) TRACE_DEFINE_ENUM(a); |
31 | |
32 | #define WB_WORK_REASON \ |
33 | EM( WB_REASON_BACKGROUND, "background") \ |
34 | EM( WB_REASON_VMSCAN, "vmscan") \ |
35 | EM( WB_REASON_SYNC, "sync") \ |
36 | EM( WB_REASON_PERIODIC, "periodic") \ |
37 | EM( WB_REASON_LAPTOP_TIMER, "laptop_timer") \ |
38 | EM( WB_REASON_FS_FREE_SPACE, "fs_free_space") \ |
39 | EM( WB_REASON_FORKER_THREAD, "forker_thread") \ |
40 | EMe(WB_REASON_FOREIGN_FLUSH, "foreign_flush") |
41 | |
42 | WB_WORK_REASON |
43 | |
44 | /* |
45 | * Now redefine the EM() and EMe() macros to map the enums to the strings |
46 | * that will be printed in the output. |
47 | */ |
48 | #undef EM |
49 | #undef EMe |
50 | #define EM(a,b) { a, b }, |
51 | #define EMe(a,b) { a, b } |
52 | |
53 | struct wb_writeback_work; |
54 | |
55 | DECLARE_EVENT_CLASS(writeback_folio_template, |
56 | |
57 | TP_PROTO(struct folio *folio, struct address_space *mapping), |
58 | |
59 | TP_ARGS(folio, mapping), |
60 | |
61 | TP_STRUCT__entry ( |
62 | __array(char, name, 32) |
63 | __field(ino_t, ino) |
64 | __field(pgoff_t, index) |
65 | ), |
66 | |
67 | TP_fast_assign( |
68 | strscpy_pad(__entry->name, |
69 | bdi_dev_name(mapping ? inode_to_bdi(mapping->host) : |
70 | NULL), 32); |
71 | __entry->ino = (mapping && mapping->host) ? mapping->host->i_ino : 0; |
72 | __entry->index = folio->index; |
73 | ), |
74 | |
75 | TP_printk("bdi %s: ino=%lu index=%lu" , |
76 | __entry->name, |
77 | (unsigned long)__entry->ino, |
78 | __entry->index |
79 | ) |
80 | ); |
81 | |
82 | DEFINE_EVENT(writeback_folio_template, writeback_dirty_folio, |
83 | |
84 | TP_PROTO(struct folio *folio, struct address_space *mapping), |
85 | |
86 | TP_ARGS(folio, mapping) |
87 | ); |
88 | |
89 | DEFINE_EVENT(writeback_folio_template, folio_wait_writeback, |
90 | |
91 | TP_PROTO(struct folio *folio, struct address_space *mapping), |
92 | |
93 | TP_ARGS(folio, mapping) |
94 | ); |
95 | |
96 | DECLARE_EVENT_CLASS(writeback_dirty_inode_template, |
97 | |
98 | TP_PROTO(struct inode *inode, int flags), |
99 | |
100 | TP_ARGS(inode, flags), |
101 | |
102 | TP_STRUCT__entry ( |
103 | __array(char, name, 32) |
104 | __field(ino_t, ino) |
105 | __field(unsigned long, state) |
106 | __field(unsigned long, flags) |
107 | ), |
108 | |
109 | TP_fast_assign( |
110 | struct backing_dev_info *bdi = inode_to_bdi(inode); |
111 | |
112 | /* may be called for files on pseudo FSes w/ unregistered bdi */ |
113 | strscpy_pad(__entry->name, bdi_dev_name(bdi), 32); |
114 | __entry->ino = inode->i_ino; |
115 | __entry->state = inode->i_state; |
116 | __entry->flags = flags; |
117 | ), |
118 | |
119 | TP_printk("bdi %s: ino=%lu state=%s flags=%s" , |
120 | __entry->name, |
121 | (unsigned long)__entry->ino, |
122 | show_inode_state(__entry->state), |
123 | show_inode_state(__entry->flags) |
124 | ) |
125 | ); |
126 | |
127 | DEFINE_EVENT(writeback_dirty_inode_template, writeback_mark_inode_dirty, |
128 | |
129 | TP_PROTO(struct inode *inode, int flags), |
130 | |
131 | TP_ARGS(inode, flags) |
132 | ); |
133 | |
134 | DEFINE_EVENT(writeback_dirty_inode_template, writeback_dirty_inode_start, |
135 | |
136 | TP_PROTO(struct inode *inode, int flags), |
137 | |
138 | TP_ARGS(inode, flags) |
139 | ); |
140 | |
141 | DEFINE_EVENT(writeback_dirty_inode_template, writeback_dirty_inode, |
142 | |
143 | TP_PROTO(struct inode *inode, int flags), |
144 | |
145 | TP_ARGS(inode, flags) |
146 | ); |
147 | |
148 | #ifdef CREATE_TRACE_POINTS |
149 | #ifdef CONFIG_CGROUP_WRITEBACK |
150 | |
151 | static inline ino_t __trace_wb_assign_cgroup(struct bdi_writeback *wb) |
152 | { |
153 | return cgroup_ino(wb->memcg_css->cgroup); |
154 | } |
155 | |
156 | static inline ino_t __trace_wbc_assign_cgroup(struct writeback_control *wbc) |
157 | { |
158 | if (wbc->wb) |
159 | return __trace_wb_assign_cgroup(wbc->wb); |
160 | else |
161 | return 1; |
162 | } |
163 | #else /* CONFIG_CGROUP_WRITEBACK */ |
164 | |
165 | static inline ino_t __trace_wb_assign_cgroup(struct bdi_writeback *wb) |
166 | { |
167 | return 1; |
168 | } |
169 | |
170 | static inline ino_t __trace_wbc_assign_cgroup(struct writeback_control *wbc) |
171 | { |
172 | return 1; |
173 | } |
174 | |
175 | #endif /* CONFIG_CGROUP_WRITEBACK */ |
176 | #endif /* CREATE_TRACE_POINTS */ |
177 | |
178 | #ifdef CONFIG_CGROUP_WRITEBACK |
179 | TRACE_EVENT(inode_foreign_history, |
180 | |
181 | TP_PROTO(struct inode *inode, struct writeback_control *wbc, |
182 | unsigned int history), |
183 | |
184 | TP_ARGS(inode, wbc, history), |
185 | |
186 | TP_STRUCT__entry( |
187 | __array(char, name, 32) |
188 | __field(ino_t, ino) |
189 | __field(ino_t, cgroup_ino) |
190 | __field(unsigned int, history) |
191 | ), |
192 | |
193 | TP_fast_assign( |
194 | strscpy_pad(__entry->name, bdi_dev_name(inode_to_bdi(inode)), 32); |
195 | __entry->ino = inode->i_ino; |
196 | __entry->cgroup_ino = __trace_wbc_assign_cgroup(wbc); |
197 | __entry->history = history; |
198 | ), |
199 | |
200 | TP_printk("bdi %s: ino=%lu cgroup_ino=%lu history=0x%x" , |
201 | __entry->name, |
202 | (unsigned long)__entry->ino, |
203 | (unsigned long)__entry->cgroup_ino, |
204 | __entry->history |
205 | ) |
206 | ); |
207 | |
208 | TRACE_EVENT(inode_switch_wbs, |
209 | |
210 | TP_PROTO(struct inode *inode, struct bdi_writeback *old_wb, |
211 | struct bdi_writeback *new_wb), |
212 | |
213 | TP_ARGS(inode, old_wb, new_wb), |
214 | |
215 | TP_STRUCT__entry( |
216 | __array(char, name, 32) |
217 | __field(ino_t, ino) |
218 | __field(ino_t, old_cgroup_ino) |
219 | __field(ino_t, new_cgroup_ino) |
220 | ), |
221 | |
222 | TP_fast_assign( |
223 | strscpy_pad(__entry->name, bdi_dev_name(old_wb->bdi), 32); |
224 | __entry->ino = inode->i_ino; |
225 | __entry->old_cgroup_ino = __trace_wb_assign_cgroup(old_wb); |
226 | __entry->new_cgroup_ino = __trace_wb_assign_cgroup(new_wb); |
227 | ), |
228 | |
229 | TP_printk("bdi %s: ino=%lu old_cgroup_ino=%lu new_cgroup_ino=%lu" , |
230 | __entry->name, |
231 | (unsigned long)__entry->ino, |
232 | (unsigned long)__entry->old_cgroup_ino, |
233 | (unsigned long)__entry->new_cgroup_ino |
234 | ) |
235 | ); |
236 | |
237 | TRACE_EVENT(track_foreign_dirty, |
238 | |
239 | TP_PROTO(struct folio *folio, struct bdi_writeback *wb), |
240 | |
241 | TP_ARGS(folio, wb), |
242 | |
243 | TP_STRUCT__entry( |
244 | __array(char, name, 32) |
245 | __field(u64, bdi_id) |
246 | __field(ino_t, ino) |
247 | __field(unsigned int, memcg_id) |
248 | __field(ino_t, cgroup_ino) |
249 | __field(ino_t, page_cgroup_ino) |
250 | ), |
251 | |
252 | TP_fast_assign( |
253 | struct address_space *mapping = folio_mapping(folio); |
254 | struct inode *inode = mapping ? mapping->host : NULL; |
255 | |
256 | strscpy_pad(__entry->name, bdi_dev_name(wb->bdi), 32); |
257 | __entry->bdi_id = wb->bdi->id; |
258 | __entry->ino = inode ? inode->i_ino : 0; |
259 | __entry->memcg_id = wb->memcg_css->id; |
260 | __entry->cgroup_ino = __trace_wb_assign_cgroup(wb); |
261 | __entry->page_cgroup_ino = cgroup_ino(folio_memcg(folio)->css.cgroup); |
262 | ), |
263 | |
264 | TP_printk("bdi %s[%llu]: ino=%lu memcg_id=%u cgroup_ino=%lu page_cgroup_ino=%lu" , |
265 | __entry->name, |
266 | __entry->bdi_id, |
267 | (unsigned long)__entry->ino, |
268 | __entry->memcg_id, |
269 | (unsigned long)__entry->cgroup_ino, |
270 | (unsigned long)__entry->page_cgroup_ino |
271 | ) |
272 | ); |
273 | |
274 | TRACE_EVENT(flush_foreign, |
275 | |
276 | TP_PROTO(struct bdi_writeback *wb, unsigned int frn_bdi_id, |
277 | unsigned int frn_memcg_id), |
278 | |
279 | TP_ARGS(wb, frn_bdi_id, frn_memcg_id), |
280 | |
281 | TP_STRUCT__entry( |
282 | __array(char, name, 32) |
283 | __field(ino_t, cgroup_ino) |
284 | __field(unsigned int, frn_bdi_id) |
285 | __field(unsigned int, frn_memcg_id) |
286 | ), |
287 | |
288 | TP_fast_assign( |
289 | strscpy_pad(__entry->name, bdi_dev_name(wb->bdi), 32); |
290 | __entry->cgroup_ino = __trace_wb_assign_cgroup(wb); |
291 | __entry->frn_bdi_id = frn_bdi_id; |
292 | __entry->frn_memcg_id = frn_memcg_id; |
293 | ), |
294 | |
295 | TP_printk("bdi %s: cgroup_ino=%lu frn_bdi_id=%u frn_memcg_id=%u" , |
296 | __entry->name, |
297 | (unsigned long)__entry->cgroup_ino, |
298 | __entry->frn_bdi_id, |
299 | __entry->frn_memcg_id |
300 | ) |
301 | ); |
302 | #endif |
303 | |
304 | DECLARE_EVENT_CLASS(writeback_write_inode_template, |
305 | |
306 | TP_PROTO(struct inode *inode, struct writeback_control *wbc), |
307 | |
308 | TP_ARGS(inode, wbc), |
309 | |
310 | TP_STRUCT__entry ( |
311 | __array(char, name, 32) |
312 | __field(ino_t, ino) |
313 | __field(int, sync_mode) |
314 | __field(ino_t, cgroup_ino) |
315 | ), |
316 | |
317 | TP_fast_assign( |
318 | strscpy_pad(__entry->name, |
319 | bdi_dev_name(inode_to_bdi(inode)), 32); |
320 | __entry->ino = inode->i_ino; |
321 | __entry->sync_mode = wbc->sync_mode; |
322 | __entry->cgroup_ino = __trace_wbc_assign_cgroup(wbc); |
323 | ), |
324 | |
325 | TP_printk("bdi %s: ino=%lu sync_mode=%d cgroup_ino=%lu" , |
326 | __entry->name, |
327 | (unsigned long)__entry->ino, |
328 | __entry->sync_mode, |
329 | (unsigned long)__entry->cgroup_ino |
330 | ) |
331 | ); |
332 | |
333 | DEFINE_EVENT(writeback_write_inode_template, writeback_write_inode_start, |
334 | |
335 | TP_PROTO(struct inode *inode, struct writeback_control *wbc), |
336 | |
337 | TP_ARGS(inode, wbc) |
338 | ); |
339 | |
340 | DEFINE_EVENT(writeback_write_inode_template, writeback_write_inode, |
341 | |
342 | TP_PROTO(struct inode *inode, struct writeback_control *wbc), |
343 | |
344 | TP_ARGS(inode, wbc) |
345 | ); |
346 | |
347 | DECLARE_EVENT_CLASS(writeback_work_class, |
348 | TP_PROTO(struct bdi_writeback *wb, struct wb_writeback_work *work), |
349 | TP_ARGS(wb, work), |
350 | TP_STRUCT__entry( |
351 | __array(char, name, 32) |
352 | __field(long, nr_pages) |
353 | __field(dev_t, sb_dev) |
354 | __field(int, sync_mode) |
355 | __field(int, for_kupdate) |
356 | __field(int, range_cyclic) |
357 | __field(int, for_background) |
358 | __field(int, reason) |
359 | __field(ino_t, cgroup_ino) |
360 | ), |
361 | TP_fast_assign( |
362 | strscpy_pad(__entry->name, bdi_dev_name(wb->bdi), 32); |
363 | __entry->nr_pages = work->nr_pages; |
364 | __entry->sb_dev = work->sb ? work->sb->s_dev : 0; |
365 | __entry->sync_mode = work->sync_mode; |
366 | __entry->for_kupdate = work->for_kupdate; |
367 | __entry->range_cyclic = work->range_cyclic; |
368 | __entry->for_background = work->for_background; |
369 | __entry->reason = work->reason; |
370 | __entry->cgroup_ino = __trace_wb_assign_cgroup(wb); |
371 | ), |
372 | TP_printk("bdi %s: sb_dev %d:%d nr_pages=%ld sync_mode=%d " |
373 | "kupdate=%d range_cyclic=%d background=%d reason=%s cgroup_ino=%lu" , |
374 | __entry->name, |
375 | MAJOR(__entry->sb_dev), MINOR(__entry->sb_dev), |
376 | __entry->nr_pages, |
377 | __entry->sync_mode, |
378 | __entry->for_kupdate, |
379 | __entry->range_cyclic, |
380 | __entry->for_background, |
381 | __print_symbolic(__entry->reason, WB_WORK_REASON), |
382 | (unsigned long)__entry->cgroup_ino |
383 | ) |
384 | ); |
385 | #define DEFINE_WRITEBACK_WORK_EVENT(name) \ |
386 | DEFINE_EVENT(writeback_work_class, name, \ |
387 | TP_PROTO(struct bdi_writeback *wb, struct wb_writeback_work *work), \ |
388 | TP_ARGS(wb, work)) |
389 | DEFINE_WRITEBACK_WORK_EVENT(writeback_queue); |
390 | DEFINE_WRITEBACK_WORK_EVENT(writeback_exec); |
391 | DEFINE_WRITEBACK_WORK_EVENT(writeback_start); |
392 | DEFINE_WRITEBACK_WORK_EVENT(writeback_written); |
393 | DEFINE_WRITEBACK_WORK_EVENT(writeback_wait); |
394 | |
395 | TRACE_EVENT(writeback_pages_written, |
396 | TP_PROTO(long pages_written), |
397 | TP_ARGS(pages_written), |
398 | TP_STRUCT__entry( |
399 | __field(long, pages) |
400 | ), |
401 | TP_fast_assign( |
402 | __entry->pages = pages_written; |
403 | ), |
404 | TP_printk("%ld" , __entry->pages) |
405 | ); |
406 | |
407 | DECLARE_EVENT_CLASS(writeback_class, |
408 | TP_PROTO(struct bdi_writeback *wb), |
409 | TP_ARGS(wb), |
410 | TP_STRUCT__entry( |
411 | __array(char, name, 32) |
412 | __field(ino_t, cgroup_ino) |
413 | ), |
414 | TP_fast_assign( |
415 | strscpy_pad(__entry->name, bdi_dev_name(wb->bdi), 32); |
416 | __entry->cgroup_ino = __trace_wb_assign_cgroup(wb); |
417 | ), |
418 | TP_printk("bdi %s: cgroup_ino=%lu" , |
419 | __entry->name, |
420 | (unsigned long)__entry->cgroup_ino |
421 | ) |
422 | ); |
423 | #define DEFINE_WRITEBACK_EVENT(name) \ |
424 | DEFINE_EVENT(writeback_class, name, \ |
425 | TP_PROTO(struct bdi_writeback *wb), \ |
426 | TP_ARGS(wb)) |
427 | |
428 | DEFINE_WRITEBACK_EVENT(writeback_wake_background); |
429 | |
430 | TRACE_EVENT(writeback_bdi_register, |
431 | TP_PROTO(struct backing_dev_info *bdi), |
432 | TP_ARGS(bdi), |
433 | TP_STRUCT__entry( |
434 | __array(char, name, 32) |
435 | ), |
436 | TP_fast_assign( |
437 | strscpy_pad(__entry->name, bdi_dev_name(bdi), 32); |
438 | ), |
439 | TP_printk("bdi %s" , |
440 | __entry->name |
441 | ) |
442 | ); |
443 | |
444 | DECLARE_EVENT_CLASS(wbc_class, |
445 | TP_PROTO(struct writeback_control *wbc, struct backing_dev_info *bdi), |
446 | TP_ARGS(wbc, bdi), |
447 | TP_STRUCT__entry( |
448 | __array(char, name, 32) |
449 | __field(long, nr_to_write) |
450 | __field(long, pages_skipped) |
451 | __field(int, sync_mode) |
452 | __field(int, for_kupdate) |
453 | __field(int, for_background) |
454 | __field(int, for_reclaim) |
455 | __field(int, range_cyclic) |
456 | __field(long, range_start) |
457 | __field(long, range_end) |
458 | __field(ino_t, cgroup_ino) |
459 | ), |
460 | |
461 | TP_fast_assign( |
462 | strscpy_pad(__entry->name, bdi_dev_name(bdi), 32); |
463 | __entry->nr_to_write = wbc->nr_to_write; |
464 | __entry->pages_skipped = wbc->pages_skipped; |
465 | __entry->sync_mode = wbc->sync_mode; |
466 | __entry->for_kupdate = wbc->for_kupdate; |
467 | __entry->for_background = wbc->for_background; |
468 | __entry->for_reclaim = wbc->for_reclaim; |
469 | __entry->range_cyclic = wbc->range_cyclic; |
470 | __entry->range_start = (long)wbc->range_start; |
471 | __entry->range_end = (long)wbc->range_end; |
472 | __entry->cgroup_ino = __trace_wbc_assign_cgroup(wbc); |
473 | ), |
474 | |
475 | TP_printk("bdi %s: towrt=%ld skip=%ld mode=%d kupd=%d " |
476 | "bgrd=%d reclm=%d cyclic=%d " |
477 | "start=0x%lx end=0x%lx cgroup_ino=%lu" , |
478 | __entry->name, |
479 | __entry->nr_to_write, |
480 | __entry->pages_skipped, |
481 | __entry->sync_mode, |
482 | __entry->for_kupdate, |
483 | __entry->for_background, |
484 | __entry->for_reclaim, |
485 | __entry->range_cyclic, |
486 | __entry->range_start, |
487 | __entry->range_end, |
488 | (unsigned long)__entry->cgroup_ino |
489 | ) |
490 | ) |
491 | |
492 | #define DEFINE_WBC_EVENT(name) \ |
493 | DEFINE_EVENT(wbc_class, name, \ |
494 | TP_PROTO(struct writeback_control *wbc, struct backing_dev_info *bdi), \ |
495 | TP_ARGS(wbc, bdi)) |
496 | DEFINE_WBC_EVENT(wbc_writepage); |
497 | |
498 | TRACE_EVENT(writeback_queue_io, |
499 | TP_PROTO(struct bdi_writeback *wb, |
500 | struct wb_writeback_work *work, |
501 | unsigned long dirtied_before, |
502 | int moved), |
503 | TP_ARGS(wb, work, dirtied_before, moved), |
504 | TP_STRUCT__entry( |
505 | __array(char, name, 32) |
506 | __field(unsigned long, older) |
507 | __field(long, age) |
508 | __field(int, moved) |
509 | __field(int, reason) |
510 | __field(ino_t, cgroup_ino) |
511 | ), |
512 | TP_fast_assign( |
513 | strscpy_pad(__entry->name, bdi_dev_name(wb->bdi), 32); |
514 | __entry->older = dirtied_before; |
515 | __entry->age = (jiffies - dirtied_before) * 1000 / HZ; |
516 | __entry->moved = moved; |
517 | __entry->reason = work->reason; |
518 | __entry->cgroup_ino = __trace_wb_assign_cgroup(wb); |
519 | ), |
520 | TP_printk("bdi %s: older=%lu age=%ld enqueue=%d reason=%s cgroup_ino=%lu" , |
521 | __entry->name, |
522 | __entry->older, /* dirtied_before in jiffies */ |
523 | __entry->age, /* dirtied_before in relative milliseconds */ |
524 | __entry->moved, |
525 | __print_symbolic(__entry->reason, WB_WORK_REASON), |
526 | (unsigned long)__entry->cgroup_ino |
527 | ) |
528 | ); |
529 | |
530 | TRACE_EVENT(global_dirty_state, |
531 | |
532 | TP_PROTO(unsigned long background_thresh, |
533 | unsigned long dirty_thresh |
534 | ), |
535 | |
536 | TP_ARGS(background_thresh, |
537 | dirty_thresh |
538 | ), |
539 | |
540 | TP_STRUCT__entry( |
541 | __field(unsigned long, nr_dirty) |
542 | __field(unsigned long, nr_writeback) |
543 | __field(unsigned long, background_thresh) |
544 | __field(unsigned long, dirty_thresh) |
545 | __field(unsigned long, dirty_limit) |
546 | __field(unsigned long, nr_dirtied) |
547 | __field(unsigned long, nr_written) |
548 | ), |
549 | |
550 | TP_fast_assign( |
551 | __entry->nr_dirty = global_node_page_state(NR_FILE_DIRTY); |
552 | __entry->nr_writeback = global_node_page_state(NR_WRITEBACK); |
553 | __entry->nr_dirtied = global_node_page_state(NR_DIRTIED); |
554 | __entry->nr_written = global_node_page_state(NR_WRITTEN); |
555 | __entry->background_thresh = background_thresh; |
556 | __entry->dirty_thresh = dirty_thresh; |
557 | __entry->dirty_limit = global_wb_domain.dirty_limit; |
558 | ), |
559 | |
560 | TP_printk("dirty=%lu writeback=%lu " |
561 | "bg_thresh=%lu thresh=%lu limit=%lu " |
562 | "dirtied=%lu written=%lu" , |
563 | __entry->nr_dirty, |
564 | __entry->nr_writeback, |
565 | __entry->background_thresh, |
566 | __entry->dirty_thresh, |
567 | __entry->dirty_limit, |
568 | __entry->nr_dirtied, |
569 | __entry->nr_written |
570 | ) |
571 | ); |
572 | |
573 | #define KBps(x) ((x) << (PAGE_SHIFT - 10)) |
574 | |
575 | TRACE_EVENT(bdi_dirty_ratelimit, |
576 | |
577 | TP_PROTO(struct bdi_writeback *wb, |
578 | unsigned long dirty_rate, |
579 | unsigned long task_ratelimit), |
580 | |
581 | TP_ARGS(wb, dirty_rate, task_ratelimit), |
582 | |
583 | TP_STRUCT__entry( |
584 | __array(char, bdi, 32) |
585 | __field(unsigned long, write_bw) |
586 | __field(unsigned long, avg_write_bw) |
587 | __field(unsigned long, dirty_rate) |
588 | __field(unsigned long, dirty_ratelimit) |
589 | __field(unsigned long, task_ratelimit) |
590 | __field(unsigned long, balanced_dirty_ratelimit) |
591 | __field(ino_t, cgroup_ino) |
592 | ), |
593 | |
594 | TP_fast_assign( |
595 | strscpy_pad(__entry->bdi, bdi_dev_name(wb->bdi), 32); |
596 | __entry->write_bw = KBps(wb->write_bandwidth); |
597 | __entry->avg_write_bw = KBps(wb->avg_write_bandwidth); |
598 | __entry->dirty_rate = KBps(dirty_rate); |
599 | __entry->dirty_ratelimit = KBps(wb->dirty_ratelimit); |
600 | __entry->task_ratelimit = KBps(task_ratelimit); |
601 | __entry->balanced_dirty_ratelimit = |
602 | KBps(wb->balanced_dirty_ratelimit); |
603 | __entry->cgroup_ino = __trace_wb_assign_cgroup(wb); |
604 | ), |
605 | |
606 | TP_printk("bdi %s: " |
607 | "write_bw=%lu awrite_bw=%lu dirty_rate=%lu " |
608 | "dirty_ratelimit=%lu task_ratelimit=%lu " |
609 | "balanced_dirty_ratelimit=%lu cgroup_ino=%lu" , |
610 | __entry->bdi, |
611 | __entry->write_bw, /* write bandwidth */ |
612 | __entry->avg_write_bw, /* avg write bandwidth */ |
613 | __entry->dirty_rate, /* bdi dirty rate */ |
614 | __entry->dirty_ratelimit, /* base ratelimit */ |
615 | __entry->task_ratelimit, /* ratelimit with position control */ |
616 | __entry->balanced_dirty_ratelimit, /* the balanced ratelimit */ |
617 | (unsigned long)__entry->cgroup_ino |
618 | ) |
619 | ); |
620 | |
621 | TRACE_EVENT(balance_dirty_pages, |
622 | |
623 | TP_PROTO(struct bdi_writeback *wb, |
624 | unsigned long thresh, |
625 | unsigned long bg_thresh, |
626 | unsigned long dirty, |
627 | unsigned long bdi_thresh, |
628 | unsigned long bdi_dirty, |
629 | unsigned long dirty_ratelimit, |
630 | unsigned long task_ratelimit, |
631 | unsigned long dirtied, |
632 | unsigned long period, |
633 | long pause, |
634 | unsigned long start_time), |
635 | |
636 | TP_ARGS(wb, thresh, bg_thresh, dirty, bdi_thresh, bdi_dirty, |
637 | dirty_ratelimit, task_ratelimit, |
638 | dirtied, period, pause, start_time), |
639 | |
640 | TP_STRUCT__entry( |
641 | __array( char, bdi, 32) |
642 | __field(unsigned long, limit) |
643 | __field(unsigned long, setpoint) |
644 | __field(unsigned long, dirty) |
645 | __field(unsigned long, bdi_setpoint) |
646 | __field(unsigned long, bdi_dirty) |
647 | __field(unsigned long, dirty_ratelimit) |
648 | __field(unsigned long, task_ratelimit) |
649 | __field(unsigned int, dirtied) |
650 | __field(unsigned int, dirtied_pause) |
651 | __field(unsigned long, paused) |
652 | __field( long, pause) |
653 | __field(unsigned long, period) |
654 | __field( long, think) |
655 | __field(ino_t, cgroup_ino) |
656 | ), |
657 | |
658 | TP_fast_assign( |
659 | unsigned long freerun = (thresh + bg_thresh) / 2; |
660 | strscpy_pad(__entry->bdi, bdi_dev_name(wb->bdi), 32); |
661 | |
662 | __entry->limit = global_wb_domain.dirty_limit; |
663 | __entry->setpoint = (global_wb_domain.dirty_limit + |
664 | freerun) / 2; |
665 | __entry->dirty = dirty; |
666 | __entry->bdi_setpoint = __entry->setpoint * |
667 | bdi_thresh / (thresh + 1); |
668 | __entry->bdi_dirty = bdi_dirty; |
669 | __entry->dirty_ratelimit = KBps(dirty_ratelimit); |
670 | __entry->task_ratelimit = KBps(task_ratelimit); |
671 | __entry->dirtied = dirtied; |
672 | __entry->dirtied_pause = current->nr_dirtied_pause; |
673 | __entry->think = current->dirty_paused_when == 0 ? 0 : |
674 | (long)(jiffies - current->dirty_paused_when) * 1000/HZ; |
675 | __entry->period = period * 1000 / HZ; |
676 | __entry->pause = pause * 1000 / HZ; |
677 | __entry->paused = (jiffies - start_time) * 1000 / HZ; |
678 | __entry->cgroup_ino = __trace_wb_assign_cgroup(wb); |
679 | ), |
680 | |
681 | |
682 | TP_printk("bdi %s: " |
683 | "limit=%lu setpoint=%lu dirty=%lu " |
684 | "bdi_setpoint=%lu bdi_dirty=%lu " |
685 | "dirty_ratelimit=%lu task_ratelimit=%lu " |
686 | "dirtied=%u dirtied_pause=%u " |
687 | "paused=%lu pause=%ld period=%lu think=%ld cgroup_ino=%lu" , |
688 | __entry->bdi, |
689 | __entry->limit, |
690 | __entry->setpoint, |
691 | __entry->dirty, |
692 | __entry->bdi_setpoint, |
693 | __entry->bdi_dirty, |
694 | __entry->dirty_ratelimit, |
695 | __entry->task_ratelimit, |
696 | __entry->dirtied, |
697 | __entry->dirtied_pause, |
698 | __entry->paused, /* ms */ |
699 | __entry->pause, /* ms */ |
700 | __entry->period, /* ms */ |
701 | __entry->think, /* ms */ |
702 | (unsigned long)__entry->cgroup_ino |
703 | ) |
704 | ); |
705 | |
706 | TRACE_EVENT(writeback_sb_inodes_requeue, |
707 | |
708 | TP_PROTO(struct inode *inode), |
709 | TP_ARGS(inode), |
710 | |
711 | TP_STRUCT__entry( |
712 | __array(char, name, 32) |
713 | __field(ino_t, ino) |
714 | __field(unsigned long, state) |
715 | __field(unsigned long, dirtied_when) |
716 | __field(ino_t, cgroup_ino) |
717 | ), |
718 | |
719 | TP_fast_assign( |
720 | strscpy_pad(__entry->name, |
721 | bdi_dev_name(inode_to_bdi(inode)), 32); |
722 | __entry->ino = inode->i_ino; |
723 | __entry->state = inode->i_state; |
724 | __entry->dirtied_when = inode->dirtied_when; |
725 | __entry->cgroup_ino = __trace_wb_assign_cgroup(inode_to_wb(inode)); |
726 | ), |
727 | |
728 | TP_printk("bdi %s: ino=%lu state=%s dirtied_when=%lu age=%lu cgroup_ino=%lu" , |
729 | __entry->name, |
730 | (unsigned long)__entry->ino, |
731 | show_inode_state(__entry->state), |
732 | __entry->dirtied_when, |
733 | (jiffies - __entry->dirtied_when) / HZ, |
734 | (unsigned long)__entry->cgroup_ino |
735 | ) |
736 | ); |
737 | |
738 | DECLARE_EVENT_CLASS(writeback_single_inode_template, |
739 | |
740 | TP_PROTO(struct inode *inode, |
741 | struct writeback_control *wbc, |
742 | unsigned long nr_to_write |
743 | ), |
744 | |
745 | TP_ARGS(inode, wbc, nr_to_write), |
746 | |
747 | TP_STRUCT__entry( |
748 | __array(char, name, 32) |
749 | __field(ino_t, ino) |
750 | __field(unsigned long, state) |
751 | __field(unsigned long, dirtied_when) |
752 | __field(unsigned long, writeback_index) |
753 | __field(long, nr_to_write) |
754 | __field(unsigned long, wrote) |
755 | __field(ino_t, cgroup_ino) |
756 | ), |
757 | |
758 | TP_fast_assign( |
759 | strscpy_pad(__entry->name, |
760 | bdi_dev_name(inode_to_bdi(inode)), 32); |
761 | __entry->ino = inode->i_ino; |
762 | __entry->state = inode->i_state; |
763 | __entry->dirtied_when = inode->dirtied_when; |
764 | __entry->writeback_index = inode->i_mapping->writeback_index; |
765 | __entry->nr_to_write = nr_to_write; |
766 | __entry->wrote = nr_to_write - wbc->nr_to_write; |
767 | __entry->cgroup_ino = __trace_wbc_assign_cgroup(wbc); |
768 | ), |
769 | |
770 | TP_printk("bdi %s: ino=%lu state=%s dirtied_when=%lu age=%lu " |
771 | "index=%lu to_write=%ld wrote=%lu cgroup_ino=%lu" , |
772 | __entry->name, |
773 | (unsigned long)__entry->ino, |
774 | show_inode_state(__entry->state), |
775 | __entry->dirtied_when, |
776 | (jiffies - __entry->dirtied_when) / HZ, |
777 | __entry->writeback_index, |
778 | __entry->nr_to_write, |
779 | __entry->wrote, |
780 | (unsigned long)__entry->cgroup_ino |
781 | ) |
782 | ); |
783 | |
784 | DEFINE_EVENT(writeback_single_inode_template, writeback_single_inode_start, |
785 | TP_PROTO(struct inode *inode, |
786 | struct writeback_control *wbc, |
787 | unsigned long nr_to_write), |
788 | TP_ARGS(inode, wbc, nr_to_write) |
789 | ); |
790 | |
791 | DEFINE_EVENT(writeback_single_inode_template, writeback_single_inode, |
792 | TP_PROTO(struct inode *inode, |
793 | struct writeback_control *wbc, |
794 | unsigned long nr_to_write), |
795 | TP_ARGS(inode, wbc, nr_to_write) |
796 | ); |
797 | |
798 | DECLARE_EVENT_CLASS(writeback_inode_template, |
799 | TP_PROTO(struct inode *inode), |
800 | |
801 | TP_ARGS(inode), |
802 | |
803 | TP_STRUCT__entry( |
804 | __field( dev_t, dev ) |
805 | __field( ino_t, ino ) |
806 | __field(unsigned long, state ) |
807 | __field( __u16, mode ) |
808 | __field(unsigned long, dirtied_when ) |
809 | ), |
810 | |
811 | TP_fast_assign( |
812 | __entry->dev = inode->i_sb->s_dev; |
813 | __entry->ino = inode->i_ino; |
814 | __entry->state = inode->i_state; |
815 | __entry->mode = inode->i_mode; |
816 | __entry->dirtied_when = inode->dirtied_when; |
817 | ), |
818 | |
819 | TP_printk("dev %d,%d ino %lu dirtied %lu state %s mode 0%o" , |
820 | MAJOR(__entry->dev), MINOR(__entry->dev), |
821 | (unsigned long)__entry->ino, __entry->dirtied_when, |
822 | show_inode_state(__entry->state), __entry->mode) |
823 | ); |
824 | |
825 | DEFINE_EVENT(writeback_inode_template, writeback_lazytime, |
826 | TP_PROTO(struct inode *inode), |
827 | |
828 | TP_ARGS(inode) |
829 | ); |
830 | |
831 | DEFINE_EVENT(writeback_inode_template, writeback_lazytime_iput, |
832 | TP_PROTO(struct inode *inode), |
833 | |
834 | TP_ARGS(inode) |
835 | ); |
836 | |
837 | DEFINE_EVENT(writeback_inode_template, writeback_dirty_inode_enqueue, |
838 | |
839 | TP_PROTO(struct inode *inode), |
840 | |
841 | TP_ARGS(inode) |
842 | ); |
843 | |
844 | /* |
845 | * Inode writeback list tracking. |
846 | */ |
847 | |
848 | DEFINE_EVENT(writeback_inode_template, sb_mark_inode_writeback, |
849 | TP_PROTO(struct inode *inode), |
850 | TP_ARGS(inode) |
851 | ); |
852 | |
853 | DEFINE_EVENT(writeback_inode_template, sb_clear_inode_writeback, |
854 | TP_PROTO(struct inode *inode), |
855 | TP_ARGS(inode) |
856 | ); |
857 | |
858 | #endif /* _TRACE_WRITEBACK_H */ |
859 | |
860 | /* This part must be outside protection */ |
861 | #include <trace/define_trace.h> |
862 | |