writeback.h 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #undef TRACE_SYSTEM
  3. #define TRACE_SYSTEM writeback
  4. #if !defined(_TRACE_WRITEBACK_H) || defined(TRACE_HEADER_MULTI_READ)
  5. #define _TRACE_WRITEBACK_H
  6. #include <linux/tracepoint.h>
  7. #include <linux/backing-dev.h>
  8. #include <linux/writeback.h>
  9. #define show_inode_state(state) \
  10. __print_flags(state, "|", \
  11. {I_DIRTY_SYNC, "I_DIRTY_SYNC"}, \
  12. {I_DIRTY_DATASYNC, "I_DIRTY_DATASYNC"}, \
  13. {I_DIRTY_PAGES, "I_DIRTY_PAGES"}, \
  14. {I_NEW, "I_NEW"}, \
  15. {I_WILL_FREE, "I_WILL_FREE"}, \
  16. {I_FREEING, "I_FREEING"}, \
  17. {I_CLEAR, "I_CLEAR"}, \
  18. {I_SYNC, "I_SYNC"}, \
  19. {I_DIRTY_TIME, "I_DIRTY_TIME"}, \
  20. {I_REFERENCED, "I_REFERENCED"} \
  21. )
  22. /* enums need to be exported to user space */
  23. #undef EM
  24. #undef EMe
  25. #define EM(a,b) TRACE_DEFINE_ENUM(a);
  26. #define EMe(a,b) TRACE_DEFINE_ENUM(a);
  27. #define WB_WORK_REASON \
  28. EM( WB_REASON_BACKGROUND, "background") \
  29. EM( WB_REASON_VMSCAN, "vmscan") \
  30. EM( WB_REASON_SYNC, "sync") \
  31. EM( WB_REASON_PERIODIC, "periodic") \
  32. EM( WB_REASON_LAPTOP_TIMER, "laptop_timer") \
  33. EM( WB_REASON_FREE_MORE_MEM, "free_more_memory") \
  34. EM( WB_REASON_FS_FREE_SPACE, "fs_free_space") \
  35. EMe(WB_REASON_FORKER_THREAD, "forker_thread")
  36. WB_WORK_REASON
  37. /*
  38. * Now redefine the EM() and EMe() macros to map the enums to the strings
  39. * that will be printed in the output.
  40. */
  41. #undef EM
  42. #undef EMe
  43. #define EM(a,b) { a, b },
  44. #define EMe(a,b) { a, b }
  45. struct wb_writeback_work;
  46. TRACE_EVENT(writeback_dirty_page,
  47. TP_PROTO(struct page *page, struct address_space *mapping),
  48. TP_ARGS(page, mapping),
  49. TP_STRUCT__entry (
  50. __array(char, name, 32)
  51. __field(unsigned long, ino)
  52. __field(pgoff_t, index)
  53. ),
  54. TP_fast_assign(
  55. strscpy_pad(__entry->name,
  56. bdi_dev_name(mapping ? inode_to_bdi(mapping->host) :
  57. NULL), 32);
  58. __entry->ino = mapping ? mapping->host->i_ino : 0;
  59. __entry->index = page->index;
  60. ),
  61. TP_printk("bdi %s: ino=%lu index=%lu",
  62. __entry->name,
  63. __entry->ino,
  64. __entry->index
  65. )
  66. );
  67. DECLARE_EVENT_CLASS(writeback_dirty_inode_template,
  68. TP_PROTO(struct inode *inode, int flags),
  69. TP_ARGS(inode, flags),
  70. TP_STRUCT__entry (
  71. __array(char, name, 32)
  72. __field(unsigned long, ino)
  73. __field(unsigned long, state)
  74. __field(unsigned long, flags)
  75. ),
  76. TP_fast_assign(
  77. struct backing_dev_info *bdi = inode_to_bdi(inode);
  78. /* may be called for files on pseudo FSes w/ unregistered bdi */
  79. strscpy_pad(__entry->name, bdi_dev_name(bdi), 32);
  80. __entry->ino = inode->i_ino;
  81. __entry->state = inode->i_state;
  82. __entry->flags = flags;
  83. ),
  84. TP_printk("bdi %s: ino=%lu state=%s flags=%s",
  85. __entry->name,
  86. __entry->ino,
  87. show_inode_state(__entry->state),
  88. show_inode_state(__entry->flags)
  89. )
  90. );
  91. DEFINE_EVENT(writeback_dirty_inode_template, writeback_mark_inode_dirty,
  92. TP_PROTO(struct inode *inode, int flags),
  93. TP_ARGS(inode, flags)
  94. );
  95. DEFINE_EVENT(writeback_dirty_inode_template, writeback_dirty_inode_start,
  96. TP_PROTO(struct inode *inode, int flags),
  97. TP_ARGS(inode, flags)
  98. );
  99. DEFINE_EVENT(writeback_dirty_inode_template, writeback_dirty_inode,
  100. TP_PROTO(struct inode *inode, int flags),
  101. TP_ARGS(inode, flags)
  102. );
  103. #ifdef CREATE_TRACE_POINTS
  104. #ifdef CONFIG_CGROUP_WRITEBACK
  105. static inline unsigned int __trace_wb_assign_cgroup(struct bdi_writeback *wb)
  106. {
  107. return wb->memcg_css->cgroup->kn->id.ino;
  108. }
  109. static inline unsigned int __trace_wbc_assign_cgroup(struct writeback_control *wbc)
  110. {
  111. if (wbc->wb)
  112. return __trace_wb_assign_cgroup(wbc->wb);
  113. else
  114. return -1U;
  115. }
  116. #else /* CONFIG_CGROUP_WRITEBACK */
  117. static inline unsigned int __trace_wb_assign_cgroup(struct bdi_writeback *wb)
  118. {
  119. return -1U;
  120. }
  121. static inline unsigned int __trace_wbc_assign_cgroup(struct writeback_control *wbc)
  122. {
  123. return -1U;
  124. }
  125. #endif /* CONFIG_CGROUP_WRITEBACK */
  126. #endif /* CREATE_TRACE_POINTS */
  127. DECLARE_EVENT_CLASS(writeback_write_inode_template,
  128. TP_PROTO(struct inode *inode, struct writeback_control *wbc),
  129. TP_ARGS(inode, wbc),
  130. TP_STRUCT__entry (
  131. __array(char, name, 32)
  132. __field(unsigned long, ino)
  133. __field(int, sync_mode)
  134. __field(unsigned int, cgroup_ino)
  135. ),
  136. TP_fast_assign(
  137. strscpy_pad(__entry->name,
  138. bdi_dev_name(inode_to_bdi(inode)), 32);
  139. __entry->ino = inode->i_ino;
  140. __entry->sync_mode = wbc->sync_mode;
  141. __entry->cgroup_ino = __trace_wbc_assign_cgroup(wbc);
  142. ),
  143. TP_printk("bdi %s: ino=%lu sync_mode=%d cgroup_ino=%u",
  144. __entry->name,
  145. __entry->ino,
  146. __entry->sync_mode,
  147. __entry->cgroup_ino
  148. )
  149. );
  150. DEFINE_EVENT(writeback_write_inode_template, writeback_write_inode_start,
  151. TP_PROTO(struct inode *inode, struct writeback_control *wbc),
  152. TP_ARGS(inode, wbc)
  153. );
  154. DEFINE_EVENT(writeback_write_inode_template, writeback_write_inode,
  155. TP_PROTO(struct inode *inode, struct writeback_control *wbc),
  156. TP_ARGS(inode, wbc)
  157. );
  158. DECLARE_EVENT_CLASS(writeback_work_class,
  159. TP_PROTO(struct bdi_writeback *wb, struct wb_writeback_work *work),
  160. TP_ARGS(wb, work),
  161. TP_STRUCT__entry(
  162. __array(char, name, 32)
  163. __field(long, nr_pages)
  164. __field(dev_t, sb_dev)
  165. __field(int, sync_mode)
  166. __field(int, for_kupdate)
  167. __field(int, range_cyclic)
  168. __field(int, for_background)
  169. __field(int, reason)
  170. __field(unsigned int, cgroup_ino)
  171. ),
  172. TP_fast_assign(
  173. strscpy_pad(__entry->name, bdi_dev_name(wb->bdi), 32);
  174. __entry->nr_pages = work->nr_pages;
  175. __entry->sb_dev = work->sb ? work->sb->s_dev : 0;
  176. __entry->sync_mode = work->sync_mode;
  177. __entry->for_kupdate = work->for_kupdate;
  178. __entry->range_cyclic = work->range_cyclic;
  179. __entry->for_background = work->for_background;
  180. __entry->reason = work->reason;
  181. __entry->cgroup_ino = __trace_wb_assign_cgroup(wb);
  182. ),
  183. TP_printk("bdi %s: sb_dev %d:%d nr_pages=%ld sync_mode=%d "
  184. "kupdate=%d range_cyclic=%d background=%d reason=%s cgroup_ino=%u",
  185. __entry->name,
  186. MAJOR(__entry->sb_dev), MINOR(__entry->sb_dev),
  187. __entry->nr_pages,
  188. __entry->sync_mode,
  189. __entry->for_kupdate,
  190. __entry->range_cyclic,
  191. __entry->for_background,
  192. __print_symbolic(__entry->reason, WB_WORK_REASON),
  193. __entry->cgroup_ino
  194. )
  195. );
  196. #define DEFINE_WRITEBACK_WORK_EVENT(name) \
  197. DEFINE_EVENT(writeback_work_class, name, \
  198. TP_PROTO(struct bdi_writeback *wb, struct wb_writeback_work *work), \
  199. TP_ARGS(wb, work))
  200. DEFINE_WRITEBACK_WORK_EVENT(writeback_queue);
  201. DEFINE_WRITEBACK_WORK_EVENT(writeback_exec);
  202. DEFINE_WRITEBACK_WORK_EVENT(writeback_start);
  203. DEFINE_WRITEBACK_WORK_EVENT(writeback_written);
  204. DEFINE_WRITEBACK_WORK_EVENT(writeback_wait);
  205. TRACE_EVENT(writeback_pages_written,
  206. TP_PROTO(long pages_written),
  207. TP_ARGS(pages_written),
  208. TP_STRUCT__entry(
  209. __field(long, pages)
  210. ),
  211. TP_fast_assign(
  212. __entry->pages = pages_written;
  213. ),
  214. TP_printk("%ld", __entry->pages)
  215. );
  216. DECLARE_EVENT_CLASS(writeback_class,
  217. TP_PROTO(struct bdi_writeback *wb),
  218. TP_ARGS(wb),
  219. TP_STRUCT__entry(
  220. __array(char, name, 32)
  221. __field(unsigned int, cgroup_ino)
  222. ),
  223. TP_fast_assign(
  224. strscpy_pad(__entry->name, bdi_dev_name(wb->bdi), 32);
  225. __entry->cgroup_ino = __trace_wb_assign_cgroup(wb);
  226. ),
  227. TP_printk("bdi %s: cgroup_ino=%u",
  228. __entry->name,
  229. __entry->cgroup_ino
  230. )
  231. );
  232. #define DEFINE_WRITEBACK_EVENT(name) \
  233. DEFINE_EVENT(writeback_class, name, \
  234. TP_PROTO(struct bdi_writeback *wb), \
  235. TP_ARGS(wb))
  236. DEFINE_WRITEBACK_EVENT(writeback_wake_background);
  237. TRACE_EVENT(writeback_bdi_register,
  238. TP_PROTO(struct backing_dev_info *bdi),
  239. TP_ARGS(bdi),
  240. TP_STRUCT__entry(
  241. __array(char, name, 32)
  242. ),
  243. TP_fast_assign(
  244. strscpy_pad(__entry->name, bdi_dev_name(bdi), 32);
  245. ),
  246. TP_printk("bdi %s",
  247. __entry->name
  248. )
  249. );
  250. DECLARE_EVENT_CLASS(wbc_class,
  251. TP_PROTO(struct writeback_control *wbc, struct backing_dev_info *bdi),
  252. TP_ARGS(wbc, bdi),
  253. TP_STRUCT__entry(
  254. __array(char, name, 32)
  255. __field(long, nr_to_write)
  256. __field(long, pages_skipped)
  257. __field(int, sync_mode)
  258. __field(int, for_kupdate)
  259. __field(int, for_background)
  260. __field(int, for_reclaim)
  261. __field(int, range_cyclic)
  262. __field(long, range_start)
  263. __field(long, range_end)
  264. __field(unsigned int, cgroup_ino)
  265. ),
  266. TP_fast_assign(
  267. strscpy_pad(__entry->name, bdi_dev_name(bdi), 32);
  268. __entry->nr_to_write = wbc->nr_to_write;
  269. __entry->pages_skipped = wbc->pages_skipped;
  270. __entry->sync_mode = wbc->sync_mode;
  271. __entry->for_kupdate = wbc->for_kupdate;
  272. __entry->for_background = wbc->for_background;
  273. __entry->for_reclaim = wbc->for_reclaim;
  274. __entry->range_cyclic = wbc->range_cyclic;
  275. __entry->range_start = (long)wbc->range_start;
  276. __entry->range_end = (long)wbc->range_end;
  277. __entry->cgroup_ino = __trace_wbc_assign_cgroup(wbc);
  278. ),
  279. TP_printk("bdi %s: towrt=%ld skip=%ld mode=%d kupd=%d "
  280. "bgrd=%d reclm=%d cyclic=%d "
  281. "start=0x%lx end=0x%lx cgroup_ino=%u",
  282. __entry->name,
  283. __entry->nr_to_write,
  284. __entry->pages_skipped,
  285. __entry->sync_mode,
  286. __entry->for_kupdate,
  287. __entry->for_background,
  288. __entry->for_reclaim,
  289. __entry->range_cyclic,
  290. __entry->range_start,
  291. __entry->range_end,
  292. __entry->cgroup_ino
  293. )
  294. )
  295. #define DEFINE_WBC_EVENT(name) \
  296. DEFINE_EVENT(wbc_class, name, \
  297. TP_PROTO(struct writeback_control *wbc, struct backing_dev_info *bdi), \
  298. TP_ARGS(wbc, bdi))
  299. DEFINE_WBC_EVENT(wbc_writepage);
  300. TRACE_EVENT(writeback_queue_io,
  301. TP_PROTO(struct bdi_writeback *wb,
  302. struct wb_writeback_work *work,
  303. unsigned long dirtied_before,
  304. int moved),
  305. TP_ARGS(wb, work, dirtied_before, moved),
  306. TP_STRUCT__entry(
  307. __array(char, name, 32)
  308. __field(unsigned long, older)
  309. __field(long, age)
  310. __field(int, moved)
  311. __field(int, reason)
  312. __field(unsigned int, cgroup_ino)
  313. ),
  314. TP_fast_assign(
  315. strscpy_pad(__entry->name, bdi_dev_name(wb->bdi), 32);
  316. __entry->older = dirtied_before;
  317. __entry->age = (jiffies - dirtied_before) * 1000 / HZ;
  318. __entry->moved = moved;
  319. __entry->reason = work->reason;
  320. __entry->cgroup_ino = __trace_wb_assign_cgroup(wb);
  321. ),
  322. TP_printk("bdi %s: older=%lu age=%ld enqueue=%d reason=%s cgroup_ino=%u",
  323. __entry->name,
  324. __entry->older, /* dirtied_before in jiffies */
  325. __entry->age, /* dirtied_before in relative milliseconds */
  326. __entry->moved,
  327. __print_symbolic(__entry->reason, WB_WORK_REASON),
  328. __entry->cgroup_ino
  329. )
  330. );
  331. TRACE_EVENT(global_dirty_state,
  332. TP_PROTO(unsigned long background_thresh,
  333. unsigned long dirty_thresh
  334. ),
  335. TP_ARGS(background_thresh,
  336. dirty_thresh
  337. ),
  338. TP_STRUCT__entry(
  339. __field(unsigned long, nr_dirty)
  340. __field(unsigned long, nr_writeback)
  341. __field(unsigned long, nr_unstable)
  342. __field(unsigned long, background_thresh)
  343. __field(unsigned long, dirty_thresh)
  344. __field(unsigned long, dirty_limit)
  345. __field(unsigned long, nr_dirtied)
  346. __field(unsigned long, nr_written)
  347. ),
  348. TP_fast_assign(
  349. __entry->nr_dirty = global_node_page_state(NR_FILE_DIRTY);
  350. __entry->nr_writeback = global_node_page_state(NR_WRITEBACK);
  351. __entry->nr_unstable = global_node_page_state(NR_UNSTABLE_NFS);
  352. __entry->nr_dirtied = global_node_page_state(NR_DIRTIED);
  353. __entry->nr_written = global_node_page_state(NR_WRITTEN);
  354. __entry->background_thresh = background_thresh;
  355. __entry->dirty_thresh = dirty_thresh;
  356. __entry->dirty_limit = global_wb_domain.dirty_limit;
  357. ),
  358. TP_printk("dirty=%lu writeback=%lu unstable=%lu "
  359. "bg_thresh=%lu thresh=%lu limit=%lu "
  360. "dirtied=%lu written=%lu",
  361. __entry->nr_dirty,
  362. __entry->nr_writeback,
  363. __entry->nr_unstable,
  364. __entry->background_thresh,
  365. __entry->dirty_thresh,
  366. __entry->dirty_limit,
  367. __entry->nr_dirtied,
  368. __entry->nr_written
  369. )
  370. );
  371. #define KBps(x) ((x) << (PAGE_SHIFT - 10))
  372. TRACE_EVENT(bdi_dirty_ratelimit,
  373. TP_PROTO(struct bdi_writeback *wb,
  374. unsigned long dirty_rate,
  375. unsigned long task_ratelimit),
  376. TP_ARGS(wb, dirty_rate, task_ratelimit),
  377. TP_STRUCT__entry(
  378. __array(char, bdi, 32)
  379. __field(unsigned long, write_bw)
  380. __field(unsigned long, avg_write_bw)
  381. __field(unsigned long, dirty_rate)
  382. __field(unsigned long, dirty_ratelimit)
  383. __field(unsigned long, task_ratelimit)
  384. __field(unsigned long, balanced_dirty_ratelimit)
  385. __field(unsigned int, cgroup_ino)
  386. ),
  387. TP_fast_assign(
  388. strscpy_pad(__entry->bdi, bdi_dev_name(wb->bdi), 32);
  389. __entry->write_bw = KBps(wb->write_bandwidth);
  390. __entry->avg_write_bw = KBps(wb->avg_write_bandwidth);
  391. __entry->dirty_rate = KBps(dirty_rate);
  392. __entry->dirty_ratelimit = KBps(wb->dirty_ratelimit);
  393. __entry->task_ratelimit = KBps(task_ratelimit);
  394. __entry->balanced_dirty_ratelimit =
  395. KBps(wb->balanced_dirty_ratelimit);
  396. __entry->cgroup_ino = __trace_wb_assign_cgroup(wb);
  397. ),
  398. TP_printk("bdi %s: "
  399. "write_bw=%lu awrite_bw=%lu dirty_rate=%lu "
  400. "dirty_ratelimit=%lu task_ratelimit=%lu "
  401. "balanced_dirty_ratelimit=%lu cgroup_ino=%u",
  402. __entry->bdi,
  403. __entry->write_bw, /* write bandwidth */
  404. __entry->avg_write_bw, /* avg write bandwidth */
  405. __entry->dirty_rate, /* bdi dirty rate */
  406. __entry->dirty_ratelimit, /* base ratelimit */
  407. __entry->task_ratelimit, /* ratelimit with position control */
  408. __entry->balanced_dirty_ratelimit, /* the balanced ratelimit */
  409. __entry->cgroup_ino
  410. )
  411. );
  412. TRACE_EVENT(balance_dirty_pages,
  413. TP_PROTO(struct bdi_writeback *wb,
  414. unsigned long thresh,
  415. unsigned long bg_thresh,
  416. unsigned long dirty,
  417. unsigned long bdi_thresh,
  418. unsigned long bdi_dirty,
  419. unsigned long dirty_ratelimit,
  420. unsigned long task_ratelimit,
  421. unsigned long dirtied,
  422. unsigned long period,
  423. long pause,
  424. unsigned long start_time),
  425. TP_ARGS(wb, thresh, bg_thresh, dirty, bdi_thresh, bdi_dirty,
  426. dirty_ratelimit, task_ratelimit,
  427. dirtied, period, pause, start_time),
  428. TP_STRUCT__entry(
  429. __array( char, bdi, 32)
  430. __field(unsigned long, limit)
  431. __field(unsigned long, setpoint)
  432. __field(unsigned long, dirty)
  433. __field(unsigned long, bdi_setpoint)
  434. __field(unsigned long, bdi_dirty)
  435. __field(unsigned long, dirty_ratelimit)
  436. __field(unsigned long, task_ratelimit)
  437. __field(unsigned int, dirtied)
  438. __field(unsigned int, dirtied_pause)
  439. __field(unsigned long, paused)
  440. __field( long, pause)
  441. __field(unsigned long, period)
  442. __field( long, think)
  443. __field(unsigned int, cgroup_ino)
  444. ),
  445. TP_fast_assign(
  446. unsigned long freerun = (thresh + bg_thresh) / 2;
  447. strscpy_pad(__entry->bdi, bdi_dev_name(wb->bdi), 32);
  448. __entry->limit = global_wb_domain.dirty_limit;
  449. __entry->setpoint = (global_wb_domain.dirty_limit +
  450. freerun) / 2;
  451. __entry->dirty = dirty;
  452. __entry->bdi_setpoint = __entry->setpoint *
  453. bdi_thresh / (thresh + 1);
  454. __entry->bdi_dirty = bdi_dirty;
  455. __entry->dirty_ratelimit = KBps(dirty_ratelimit);
  456. __entry->task_ratelimit = KBps(task_ratelimit);
  457. __entry->dirtied = dirtied;
  458. __entry->dirtied_pause = current->nr_dirtied_pause;
  459. __entry->think = current->dirty_paused_when == 0 ? 0 :
  460. (long)(jiffies - current->dirty_paused_when) * 1000/HZ;
  461. __entry->period = period * 1000 / HZ;
  462. __entry->pause = pause * 1000 / HZ;
  463. __entry->paused = (jiffies - start_time) * 1000 / HZ;
  464. __entry->cgroup_ino = __trace_wb_assign_cgroup(wb);
  465. ),
  466. TP_printk("bdi %s: "
  467. "limit=%lu setpoint=%lu dirty=%lu "
  468. "bdi_setpoint=%lu bdi_dirty=%lu "
  469. "dirty_ratelimit=%lu task_ratelimit=%lu "
  470. "dirtied=%u dirtied_pause=%u "
  471. "paused=%lu pause=%ld period=%lu think=%ld cgroup_ino=%u",
  472. __entry->bdi,
  473. __entry->limit,
  474. __entry->setpoint,
  475. __entry->dirty,
  476. __entry->bdi_setpoint,
  477. __entry->bdi_dirty,
  478. __entry->dirty_ratelimit,
  479. __entry->task_ratelimit,
  480. __entry->dirtied,
  481. __entry->dirtied_pause,
  482. __entry->paused, /* ms */
  483. __entry->pause, /* ms */
  484. __entry->period, /* ms */
  485. __entry->think, /* ms */
  486. __entry->cgroup_ino
  487. )
  488. );
  489. TRACE_EVENT(writeback_sb_inodes_requeue,
  490. TP_PROTO(struct inode *inode),
  491. TP_ARGS(inode),
  492. TP_STRUCT__entry(
  493. __array(char, name, 32)
  494. __field(unsigned long, ino)
  495. __field(unsigned long, state)
  496. __field(unsigned long, dirtied_when)
  497. __field(unsigned int, cgroup_ino)
  498. ),
  499. TP_fast_assign(
  500. strscpy_pad(__entry->name,
  501. bdi_dev_name(inode_to_bdi(inode)), 32);
  502. __entry->ino = inode->i_ino;
  503. __entry->state = inode->i_state;
  504. __entry->dirtied_when = inode->dirtied_when;
  505. __entry->cgroup_ino = __trace_wb_assign_cgroup(inode_to_wb(inode));
  506. ),
  507. TP_printk("bdi %s: ino=%lu state=%s dirtied_when=%lu age=%lu cgroup_ino=%u",
  508. __entry->name,
  509. __entry->ino,
  510. show_inode_state(__entry->state),
  511. __entry->dirtied_when,
  512. (jiffies - __entry->dirtied_when) / HZ,
  513. __entry->cgroup_ino
  514. )
  515. );
  516. DECLARE_EVENT_CLASS(writeback_congest_waited_template,
  517. TP_PROTO(unsigned int usec_timeout, unsigned int usec_delayed),
  518. TP_ARGS(usec_timeout, usec_delayed),
  519. TP_STRUCT__entry(
  520. __field( unsigned int, usec_timeout )
  521. __field( unsigned int, usec_delayed )
  522. ),
  523. TP_fast_assign(
  524. __entry->usec_timeout = usec_timeout;
  525. __entry->usec_delayed = usec_delayed;
  526. ),
  527. TP_printk("usec_timeout=%u usec_delayed=%u",
  528. __entry->usec_timeout,
  529. __entry->usec_delayed)
  530. );
  531. DEFINE_EVENT(writeback_congest_waited_template, writeback_congestion_wait,
  532. TP_PROTO(unsigned int usec_timeout, unsigned int usec_delayed),
  533. TP_ARGS(usec_timeout, usec_delayed)
  534. );
  535. DEFINE_EVENT(writeback_congest_waited_template, writeback_wait_iff_congested,
  536. TP_PROTO(unsigned int usec_timeout, unsigned int usec_delayed),
  537. TP_ARGS(usec_timeout, usec_delayed)
  538. );
  539. DECLARE_EVENT_CLASS(writeback_single_inode_template,
  540. TP_PROTO(struct inode *inode,
  541. struct writeback_control *wbc,
  542. unsigned long nr_to_write
  543. ),
  544. TP_ARGS(inode, wbc, nr_to_write),
  545. TP_STRUCT__entry(
  546. __array(char, name, 32)
  547. __field(unsigned long, ino)
  548. __field(unsigned long, state)
  549. __field(unsigned long, dirtied_when)
  550. __field(unsigned long, writeback_index)
  551. __field(long, nr_to_write)
  552. __field(unsigned long, wrote)
  553. __field(unsigned int, cgroup_ino)
  554. ),
  555. TP_fast_assign(
  556. strscpy_pad(__entry->name,
  557. bdi_dev_name(inode_to_bdi(inode)), 32);
  558. __entry->ino = inode->i_ino;
  559. __entry->state = inode->i_state;
  560. __entry->dirtied_when = inode->dirtied_when;
  561. __entry->writeback_index = inode->i_mapping->writeback_index;
  562. __entry->nr_to_write = nr_to_write;
  563. __entry->wrote = nr_to_write - wbc->nr_to_write;
  564. __entry->cgroup_ino = __trace_wbc_assign_cgroup(wbc);
  565. ),
  566. TP_printk("bdi %s: ino=%lu state=%s dirtied_when=%lu age=%lu "
  567. "index=%lu to_write=%ld wrote=%lu cgroup_ino=%u",
  568. __entry->name,
  569. __entry->ino,
  570. show_inode_state(__entry->state),
  571. __entry->dirtied_when,
  572. (jiffies - __entry->dirtied_when) / HZ,
  573. __entry->writeback_index,
  574. __entry->nr_to_write,
  575. __entry->wrote,
  576. __entry->cgroup_ino
  577. )
  578. );
  579. DEFINE_EVENT(writeback_single_inode_template, writeback_single_inode_start,
  580. TP_PROTO(struct inode *inode,
  581. struct writeback_control *wbc,
  582. unsigned long nr_to_write),
  583. TP_ARGS(inode, wbc, nr_to_write)
  584. );
  585. DEFINE_EVENT(writeback_single_inode_template, writeback_single_inode,
  586. TP_PROTO(struct inode *inode,
  587. struct writeback_control *wbc,
  588. unsigned long nr_to_write),
  589. TP_ARGS(inode, wbc, nr_to_write)
  590. );
  591. DECLARE_EVENT_CLASS(writeback_inode_template,
  592. TP_PROTO(struct inode *inode),
  593. TP_ARGS(inode),
  594. TP_STRUCT__entry(
  595. __field( dev_t, dev )
  596. __field(unsigned long, ino )
  597. __field(unsigned long, state )
  598. __field( __u16, mode )
  599. __field(unsigned long, dirtied_when )
  600. ),
  601. TP_fast_assign(
  602. __entry->dev = inode->i_sb->s_dev;
  603. __entry->ino = inode->i_ino;
  604. __entry->state = inode->i_state;
  605. __entry->mode = inode->i_mode;
  606. __entry->dirtied_when = inode->dirtied_when;
  607. ),
  608. TP_printk("dev %d,%d ino %lu dirtied %lu state %s mode 0%o",
  609. MAJOR(__entry->dev), MINOR(__entry->dev),
  610. __entry->ino, __entry->dirtied_when,
  611. show_inode_state(__entry->state), __entry->mode)
  612. );
  613. DEFINE_EVENT(writeback_inode_template, writeback_lazytime,
  614. TP_PROTO(struct inode *inode),
  615. TP_ARGS(inode)
  616. );
  617. DEFINE_EVENT(writeback_inode_template, writeback_lazytime_iput,
  618. TP_PROTO(struct inode *inode),
  619. TP_ARGS(inode)
  620. );
  621. DEFINE_EVENT(writeback_inode_template, writeback_dirty_inode_enqueue,
  622. TP_PROTO(struct inode *inode),
  623. TP_ARGS(inode)
  624. );
  625. /*
  626. * Inode writeback list tracking.
  627. */
  628. DEFINE_EVENT(writeback_inode_template, sb_mark_inode_writeback,
  629. TP_PROTO(struct inode *inode),
  630. TP_ARGS(inode)
  631. );
  632. DEFINE_EVENT(writeback_inode_template, sb_clear_inode_writeback,
  633. TP_PROTO(struct inode *inode),
  634. TP_ARGS(inode)
  635. );
  636. #endif /* _TRACE_WRITEBACK_H */
  637. /* This part must be outside protection */
  638. #include <trace/define_trace.h>