iostat.c 8.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * f2fs iostat support
  4. *
  5. * Copyright 2021 Google LLC
  6. * Author: Daeho Jeong <daehojeong@google.com>
  7. */
  8. #include <linux/fs.h>
  9. #include <linux/f2fs_fs.h>
  10. #include <linux/seq_file.h>
  11. #include "f2fs.h"
  12. #include "iostat.h"
  13. #include <trace/events/f2fs.h>
  14. static struct kmem_cache *bio_iostat_ctx_cache;
  15. static mempool_t *bio_iostat_ctx_pool;
  16. static inline unsigned long long iostat_get_avg_bytes(struct f2fs_sb_info *sbi,
  17. enum iostat_type type)
  18. {
  19. return sbi->iostat_count[type] ? div64_u64(sbi->iostat_bytes[type],
  20. sbi->iostat_count[type]) : 0;
  21. }
  22. #define IOSTAT_INFO_SHOW(name, type) \
  23. seq_printf(seq, "%-23s %-16llu %-16llu %-16llu\n", \
  24. name":", sbi->iostat_bytes[type], \
  25. sbi->iostat_count[type], \
  26. iostat_get_avg_bytes(sbi, type))
  27. int __maybe_unused iostat_info_seq_show(struct seq_file *seq, void *offset)
  28. {
  29. struct super_block *sb = seq->private;
  30. struct f2fs_sb_info *sbi = F2FS_SB(sb);
  31. if (!sbi->iostat_enable)
  32. return 0;
  33. seq_printf(seq, "time: %-16llu\n", ktime_get_real_seconds());
  34. seq_printf(seq, "\t\t\t%-16s %-16s %-16s\n",
  35. "io_bytes", "count", "avg_bytes");
  36. /* print app write IOs */
  37. seq_puts(seq, "[WRITE]\n");
  38. IOSTAT_INFO_SHOW("app buffered data", APP_BUFFERED_IO);
  39. IOSTAT_INFO_SHOW("app direct data", APP_DIRECT_IO);
  40. IOSTAT_INFO_SHOW("app mapped data", APP_MAPPED_IO);
  41. IOSTAT_INFO_SHOW("app buffered cdata", APP_BUFFERED_CDATA_IO);
  42. IOSTAT_INFO_SHOW("app mapped cdata", APP_MAPPED_CDATA_IO);
  43. /* print fs write IOs */
  44. IOSTAT_INFO_SHOW("fs data", FS_DATA_IO);
  45. IOSTAT_INFO_SHOW("fs cdata", FS_CDATA_IO);
  46. IOSTAT_INFO_SHOW("fs node", FS_NODE_IO);
  47. IOSTAT_INFO_SHOW("fs meta", FS_META_IO);
  48. IOSTAT_INFO_SHOW("fs gc data", FS_GC_DATA_IO);
  49. IOSTAT_INFO_SHOW("fs gc node", FS_GC_NODE_IO);
  50. IOSTAT_INFO_SHOW("fs cp data", FS_CP_DATA_IO);
  51. IOSTAT_INFO_SHOW("fs cp node", FS_CP_NODE_IO);
  52. IOSTAT_INFO_SHOW("fs cp meta", FS_CP_META_IO);
  53. /* print app read IOs */
  54. seq_puts(seq, "[READ]\n");
  55. IOSTAT_INFO_SHOW("app buffered data", APP_BUFFERED_READ_IO);
  56. IOSTAT_INFO_SHOW("app direct data", APP_DIRECT_READ_IO);
  57. IOSTAT_INFO_SHOW("app mapped data", APP_MAPPED_READ_IO);
  58. IOSTAT_INFO_SHOW("app buffered cdata", APP_BUFFERED_CDATA_READ_IO);
  59. IOSTAT_INFO_SHOW("app mapped cdata", APP_MAPPED_CDATA_READ_IO);
  60. /* print fs read IOs */
  61. IOSTAT_INFO_SHOW("fs data", FS_DATA_READ_IO);
  62. IOSTAT_INFO_SHOW("fs gc data", FS_GDATA_READ_IO);
  63. IOSTAT_INFO_SHOW("fs cdata", FS_CDATA_READ_IO);
  64. IOSTAT_INFO_SHOW("fs node", FS_NODE_READ_IO);
  65. IOSTAT_INFO_SHOW("fs meta", FS_META_READ_IO);
  66. /* print other IOs */
  67. seq_puts(seq, "[OTHER]\n");
  68. IOSTAT_INFO_SHOW("fs discard", FS_DISCARD_IO);
  69. IOSTAT_INFO_SHOW("fs flush", FS_FLUSH_IO);
  70. IOSTAT_INFO_SHOW("fs zone reset", FS_ZONE_RESET_IO);
  71. return 0;
  72. }
  73. static inline void __record_iostat_latency(struct f2fs_sb_info *sbi)
  74. {
  75. int io, idx;
  76. struct f2fs_iostat_latency iostat_lat[MAX_IO_TYPE][NR_PAGE_TYPE];
  77. struct iostat_lat_info *io_lat = sbi->iostat_io_lat;
  78. unsigned long flags;
  79. spin_lock_irqsave(&sbi->iostat_lat_lock, flags);
  80. for (idx = 0; idx < MAX_IO_TYPE; idx++) {
  81. for (io = 0; io < NR_PAGE_TYPE; io++) {
  82. iostat_lat[idx][io].peak_lat =
  83. jiffies_to_msecs(io_lat->peak_lat[idx][io]);
  84. iostat_lat[idx][io].cnt = io_lat->bio_cnt[idx][io];
  85. iostat_lat[idx][io].avg_lat = iostat_lat[idx][io].cnt ?
  86. jiffies_to_msecs(io_lat->sum_lat[idx][io]) / iostat_lat[idx][io].cnt : 0;
  87. io_lat->sum_lat[idx][io] = 0;
  88. io_lat->peak_lat[idx][io] = 0;
  89. io_lat->bio_cnt[idx][io] = 0;
  90. }
  91. }
  92. spin_unlock_irqrestore(&sbi->iostat_lat_lock, flags);
  93. trace_f2fs_iostat_latency(sbi, iostat_lat);
  94. }
  95. static inline void f2fs_record_iostat(struct f2fs_sb_info *sbi)
  96. {
  97. unsigned long long iostat_diff[NR_IO_TYPE];
  98. int i;
  99. unsigned long flags;
  100. if (time_is_after_jiffies(sbi->iostat_next_period))
  101. return;
  102. /* Need double check under the lock */
  103. spin_lock_irqsave(&sbi->iostat_lock, flags);
  104. if (time_is_after_jiffies(sbi->iostat_next_period)) {
  105. spin_unlock_irqrestore(&sbi->iostat_lock, flags);
  106. return;
  107. }
  108. sbi->iostat_next_period = jiffies +
  109. msecs_to_jiffies(sbi->iostat_period_ms);
  110. for (i = 0; i < NR_IO_TYPE; i++) {
  111. iostat_diff[i] = sbi->iostat_bytes[i] -
  112. sbi->prev_iostat_bytes[i];
  113. sbi->prev_iostat_bytes[i] = sbi->iostat_bytes[i];
  114. }
  115. spin_unlock_irqrestore(&sbi->iostat_lock, flags);
  116. trace_f2fs_iostat(sbi, iostat_diff);
  117. __record_iostat_latency(sbi);
  118. }
  119. void f2fs_reset_iostat(struct f2fs_sb_info *sbi)
  120. {
  121. struct iostat_lat_info *io_lat = sbi->iostat_io_lat;
  122. int i;
  123. spin_lock_irq(&sbi->iostat_lock);
  124. for (i = 0; i < NR_IO_TYPE; i++) {
  125. sbi->iostat_count[i] = 0;
  126. sbi->iostat_bytes[i] = 0;
  127. sbi->prev_iostat_bytes[i] = 0;
  128. }
  129. spin_unlock_irq(&sbi->iostat_lock);
  130. spin_lock_irq(&sbi->iostat_lat_lock);
  131. memset(io_lat, 0, sizeof(struct iostat_lat_info));
  132. spin_unlock_irq(&sbi->iostat_lat_lock);
  133. }
  134. static inline void __f2fs_update_iostat(struct f2fs_sb_info *sbi,
  135. enum iostat_type type, unsigned long long io_bytes)
  136. {
  137. sbi->iostat_bytes[type] += io_bytes;
  138. sbi->iostat_count[type]++;
  139. }
  140. void f2fs_update_iostat(struct f2fs_sb_info *sbi, struct inode *inode,
  141. enum iostat_type type, unsigned long long io_bytes)
  142. {
  143. unsigned long flags;
  144. if (!sbi->iostat_enable)
  145. return;
  146. spin_lock_irqsave(&sbi->iostat_lock, flags);
  147. __f2fs_update_iostat(sbi, type, io_bytes);
  148. if (type == APP_BUFFERED_IO || type == APP_DIRECT_IO)
  149. __f2fs_update_iostat(sbi, APP_WRITE_IO, io_bytes);
  150. if (type == APP_BUFFERED_READ_IO || type == APP_DIRECT_READ_IO)
  151. __f2fs_update_iostat(sbi, APP_READ_IO, io_bytes);
  152. #ifdef CONFIG_F2FS_FS_COMPRESSION
  153. if (inode && f2fs_compressed_file(inode)) {
  154. if (type == APP_BUFFERED_IO)
  155. __f2fs_update_iostat(sbi, APP_BUFFERED_CDATA_IO, io_bytes);
  156. if (type == APP_BUFFERED_READ_IO)
  157. __f2fs_update_iostat(sbi, APP_BUFFERED_CDATA_READ_IO, io_bytes);
  158. if (type == APP_MAPPED_READ_IO)
  159. __f2fs_update_iostat(sbi, APP_MAPPED_CDATA_READ_IO, io_bytes);
  160. if (type == APP_MAPPED_IO)
  161. __f2fs_update_iostat(sbi, APP_MAPPED_CDATA_IO, io_bytes);
  162. if (type == FS_DATA_READ_IO)
  163. __f2fs_update_iostat(sbi, FS_CDATA_READ_IO, io_bytes);
  164. if (type == FS_DATA_IO)
  165. __f2fs_update_iostat(sbi, FS_CDATA_IO, io_bytes);
  166. }
  167. #endif
  168. spin_unlock_irqrestore(&sbi->iostat_lock, flags);
  169. f2fs_record_iostat(sbi);
  170. }
  171. static inline void __update_iostat_latency(struct bio_iostat_ctx *iostat_ctx,
  172. enum iostat_lat_type lat_type)
  173. {
  174. unsigned long ts_diff;
  175. unsigned int page_type = iostat_ctx->type;
  176. struct f2fs_sb_info *sbi = iostat_ctx->sbi;
  177. struct iostat_lat_info *io_lat = sbi->iostat_io_lat;
  178. unsigned long flags;
  179. if (!sbi->iostat_enable)
  180. return;
  181. ts_diff = jiffies - iostat_ctx->submit_ts;
  182. if (page_type == META_FLUSH) {
  183. page_type = META;
  184. } else if (page_type >= NR_PAGE_TYPE) {
  185. f2fs_warn(sbi, "%s: %d over NR_PAGE_TYPE", __func__, page_type);
  186. return;
  187. }
  188. spin_lock_irqsave(&sbi->iostat_lat_lock, flags);
  189. io_lat->sum_lat[lat_type][page_type] += ts_diff;
  190. io_lat->bio_cnt[lat_type][page_type]++;
  191. if (ts_diff > io_lat->peak_lat[lat_type][page_type])
  192. io_lat->peak_lat[lat_type][page_type] = ts_diff;
  193. spin_unlock_irqrestore(&sbi->iostat_lat_lock, flags);
  194. }
  195. void iostat_update_and_unbind_ctx(struct bio *bio)
  196. {
  197. struct bio_iostat_ctx *iostat_ctx = bio->bi_private;
  198. enum iostat_lat_type lat_type;
  199. if (op_is_write(bio_op(bio))) {
  200. lat_type = bio->bi_opf & REQ_SYNC ?
  201. WRITE_SYNC_IO : WRITE_ASYNC_IO;
  202. bio->bi_private = iostat_ctx->sbi;
  203. } else {
  204. lat_type = READ_IO;
  205. bio->bi_private = iostat_ctx->post_read_ctx;
  206. }
  207. __update_iostat_latency(iostat_ctx, lat_type);
  208. mempool_free(iostat_ctx, bio_iostat_ctx_pool);
  209. }
  210. void iostat_alloc_and_bind_ctx(struct f2fs_sb_info *sbi,
  211. struct bio *bio, struct bio_post_read_ctx *ctx)
  212. {
  213. struct bio_iostat_ctx *iostat_ctx;
  214. /* Due to the mempool, this never fails. */
  215. iostat_ctx = mempool_alloc(bio_iostat_ctx_pool, GFP_NOFS);
  216. iostat_ctx->sbi = sbi;
  217. iostat_ctx->submit_ts = 0;
  218. iostat_ctx->type = 0;
  219. iostat_ctx->post_read_ctx = ctx;
  220. bio->bi_private = iostat_ctx;
  221. }
  222. int __init f2fs_init_iostat_processing(void)
  223. {
  224. bio_iostat_ctx_cache =
  225. kmem_cache_create("f2fs_bio_iostat_ctx",
  226. sizeof(struct bio_iostat_ctx), 0, 0, NULL);
  227. if (!bio_iostat_ctx_cache)
  228. goto fail;
  229. bio_iostat_ctx_pool =
  230. mempool_create_slab_pool(NUM_PREALLOC_IOSTAT_CTXS,
  231. bio_iostat_ctx_cache);
  232. if (!bio_iostat_ctx_pool)
  233. goto fail_free_cache;
  234. return 0;
  235. fail_free_cache:
  236. kmem_cache_destroy(bio_iostat_ctx_cache);
  237. fail:
  238. return -ENOMEM;
  239. }
  240. void f2fs_destroy_iostat_processing(void)
  241. {
  242. mempool_destroy(bio_iostat_ctx_pool);
  243. kmem_cache_destroy(bio_iostat_ctx_cache);
  244. }
  245. int f2fs_init_iostat(struct f2fs_sb_info *sbi)
  246. {
  247. /* init iostat info */
  248. spin_lock_init(&sbi->iostat_lock);
  249. spin_lock_init(&sbi->iostat_lat_lock);
  250. sbi->iostat_enable = false;
  251. sbi->iostat_period_ms = DEFAULT_IOSTAT_PERIOD_MS;
  252. sbi->iostat_io_lat = f2fs_kzalloc(sbi, sizeof(struct iostat_lat_info),
  253. GFP_KERNEL);
  254. if (!sbi->iostat_io_lat)
  255. return -ENOMEM;
  256. return 0;
  257. }
  258. void f2fs_destroy_iostat(struct f2fs_sb_info *sbi)
  259. {
  260. kfree(sbi->iostat_io_lat);
  261. }