stats.c 9.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * Copyright (C) 2023 Oracle. All Rights Reserved.
  4. * Author: Darrick J. Wong <djwong@kernel.org>
  5. */
  6. #include "xfs.h"
  7. #include "xfs_fs.h"
  8. #include "xfs_shared.h"
  9. #include "xfs_format.h"
  10. #include "xfs_trans_resv.h"
  11. #include "xfs_mount.h"
  12. #include "xfs_sysfs.h"
  13. #include "xfs_btree.h"
  14. #include "xfs_super.h"
  15. #include "scrub/scrub.h"
  16. #include "scrub/stats.h"
  17. #include "scrub/trace.h"
  18. struct xchk_scrub_stats {
  19. /* all 32-bit counters here */
  20. /* checking stats */
  21. uint32_t invocations;
  22. uint32_t clean;
  23. uint32_t corrupt;
  24. uint32_t preen;
  25. uint32_t xfail;
  26. uint32_t xcorrupt;
  27. uint32_t incomplete;
  28. uint32_t warning;
  29. uint32_t retries;
  30. /* repair stats */
  31. uint32_t repair_invocations;
  32. uint32_t repair_success;
  33. /* all 64-bit items here */
  34. /* runtimes */
  35. uint64_t checktime_us;
  36. uint64_t repairtime_us;
  37. /* non-counter state must go at the end for clearall */
  38. spinlock_t css_lock;
  39. };
  40. struct xchk_stats {
  41. struct dentry *cs_debugfs;
  42. struct xchk_scrub_stats cs_stats[XFS_SCRUB_TYPE_NR];
  43. };
  44. static struct xchk_stats global_stats;
  45. static const char *name_map[XFS_SCRUB_TYPE_NR] = {
  46. [XFS_SCRUB_TYPE_SB] = "sb",
  47. [XFS_SCRUB_TYPE_AGF] = "agf",
  48. [XFS_SCRUB_TYPE_AGFL] = "agfl",
  49. [XFS_SCRUB_TYPE_AGI] = "agi",
  50. [XFS_SCRUB_TYPE_BNOBT] = "bnobt",
  51. [XFS_SCRUB_TYPE_CNTBT] = "cntbt",
  52. [XFS_SCRUB_TYPE_INOBT] = "inobt",
  53. [XFS_SCRUB_TYPE_FINOBT] = "finobt",
  54. [XFS_SCRUB_TYPE_RMAPBT] = "rmapbt",
  55. [XFS_SCRUB_TYPE_REFCNTBT] = "refcountbt",
  56. [XFS_SCRUB_TYPE_INODE] = "inode",
  57. [XFS_SCRUB_TYPE_BMBTD] = "bmapbtd",
  58. [XFS_SCRUB_TYPE_BMBTA] = "bmapbta",
  59. [XFS_SCRUB_TYPE_BMBTC] = "bmapbtc",
  60. [XFS_SCRUB_TYPE_DIR] = "directory",
  61. [XFS_SCRUB_TYPE_XATTR] = "xattr",
  62. [XFS_SCRUB_TYPE_SYMLINK] = "symlink",
  63. [XFS_SCRUB_TYPE_PARENT] = "parent",
  64. [XFS_SCRUB_TYPE_RTBITMAP] = "rtbitmap",
  65. [XFS_SCRUB_TYPE_RTSUM] = "rtsummary",
  66. [XFS_SCRUB_TYPE_UQUOTA] = "usrquota",
  67. [XFS_SCRUB_TYPE_GQUOTA] = "grpquota",
  68. [XFS_SCRUB_TYPE_PQUOTA] = "prjquota",
  69. [XFS_SCRUB_TYPE_FSCOUNTERS] = "fscounters",
  70. [XFS_SCRUB_TYPE_QUOTACHECK] = "quotacheck",
  71. [XFS_SCRUB_TYPE_NLINKS] = "nlinks",
  72. [XFS_SCRUB_TYPE_DIRTREE] = "dirtree",
  73. };
  74. /* Format the scrub stats into a text buffer, similar to pcp style. */
  75. STATIC ssize_t
  76. xchk_stats_format(
  77. struct xchk_stats *cs,
  78. char *buf,
  79. size_t remaining)
  80. {
  81. struct xchk_scrub_stats *css = &cs->cs_stats[0];
  82. unsigned int i;
  83. ssize_t copied = 0;
  84. int ret = 0;
  85. for (i = 0; i < XFS_SCRUB_TYPE_NR; i++, css++) {
  86. if (!name_map[i])
  87. continue;
  88. ret = scnprintf(buf, remaining,
  89. "%s %u %u %u %u %u %u %u %u %u %llu %u %u %llu\n",
  90. name_map[i],
  91. (unsigned int)css->invocations,
  92. (unsigned int)css->clean,
  93. (unsigned int)css->corrupt,
  94. (unsigned int)css->preen,
  95. (unsigned int)css->xfail,
  96. (unsigned int)css->xcorrupt,
  97. (unsigned int)css->incomplete,
  98. (unsigned int)css->warning,
  99. (unsigned int)css->retries,
  100. (unsigned long long)css->checktime_us,
  101. (unsigned int)css->repair_invocations,
  102. (unsigned int)css->repair_success,
  103. (unsigned long long)css->repairtime_us);
  104. if (ret <= 0)
  105. break;
  106. remaining -= ret;
  107. copied += ret;
  108. buf += ret;
  109. }
  110. return copied > 0 ? copied : ret;
  111. }
  112. /* Estimate the worst case buffer size required to hold the whole report. */
  113. STATIC size_t
  114. xchk_stats_estimate_bufsize(
  115. struct xchk_stats *cs)
  116. {
  117. struct xchk_scrub_stats *css = &cs->cs_stats[0];
  118. unsigned int i;
  119. size_t field_width;
  120. size_t ret = 0;
  121. /* 4294967296 plus one space for each u32 field */
  122. field_width = 11 * (offsetof(struct xchk_scrub_stats, checktime_us) /
  123. sizeof(uint32_t));
  124. /* 18446744073709551615 plus one space for each u64 field */
  125. field_width += 21 * ((offsetof(struct xchk_scrub_stats, css_lock) -
  126. offsetof(struct xchk_scrub_stats, checktime_us)) /
  127. sizeof(uint64_t));
  128. for (i = 0; i < XFS_SCRUB_TYPE_NR; i++, css++) {
  129. if (!name_map[i])
  130. continue;
  131. /* name plus one space */
  132. ret += 1 + strlen(name_map[i]);
  133. /* all fields, plus newline */
  134. ret += field_width + 1;
  135. }
  136. return ret;
  137. }
  138. /* Clear all counters. */
  139. STATIC void
  140. xchk_stats_clearall(
  141. struct xchk_stats *cs)
  142. {
  143. struct xchk_scrub_stats *css = &cs->cs_stats[0];
  144. unsigned int i;
  145. for (i = 0; i < XFS_SCRUB_TYPE_NR; i++, css++) {
  146. spin_lock(&css->css_lock);
  147. memset(css, 0, offsetof(struct xchk_scrub_stats, css_lock));
  148. spin_unlock(&css->css_lock);
  149. }
  150. }
  151. #define XFS_SCRUB_OFLAG_UNCLEAN (XFS_SCRUB_OFLAG_CORRUPT | \
  152. XFS_SCRUB_OFLAG_PREEN | \
  153. XFS_SCRUB_OFLAG_XFAIL | \
  154. XFS_SCRUB_OFLAG_XCORRUPT | \
  155. XFS_SCRUB_OFLAG_INCOMPLETE | \
  156. XFS_SCRUB_OFLAG_WARNING)
  157. STATIC void
  158. xchk_stats_merge_one(
  159. struct xchk_stats *cs,
  160. const struct xfs_scrub_metadata *sm,
  161. const struct xchk_stats_run *run)
  162. {
  163. struct xchk_scrub_stats *css;
  164. if (sm->sm_type >= XFS_SCRUB_TYPE_NR) {
  165. ASSERT(sm->sm_type < XFS_SCRUB_TYPE_NR);
  166. return;
  167. }
  168. css = &cs->cs_stats[sm->sm_type];
  169. spin_lock(&css->css_lock);
  170. css->invocations++;
  171. if (!(sm->sm_flags & XFS_SCRUB_OFLAG_UNCLEAN))
  172. css->clean++;
  173. if (sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
  174. css->corrupt++;
  175. if (sm->sm_flags & XFS_SCRUB_OFLAG_PREEN)
  176. css->preen++;
  177. if (sm->sm_flags & XFS_SCRUB_OFLAG_XFAIL)
  178. css->xfail++;
  179. if (sm->sm_flags & XFS_SCRUB_OFLAG_XCORRUPT)
  180. css->xcorrupt++;
  181. if (sm->sm_flags & XFS_SCRUB_OFLAG_INCOMPLETE)
  182. css->incomplete++;
  183. if (sm->sm_flags & XFS_SCRUB_OFLAG_WARNING)
  184. css->warning++;
  185. css->retries += run->retries;
  186. css->checktime_us += howmany_64(run->scrub_ns, NSEC_PER_USEC);
  187. if (run->repair_attempted)
  188. css->repair_invocations++;
  189. if (run->repair_succeeded)
  190. css->repair_success++;
  191. css->repairtime_us += howmany_64(run->repair_ns, NSEC_PER_USEC);
  192. spin_unlock(&css->css_lock);
  193. }
  194. /* Merge these scrub-run stats into the global and mount stat data. */
  195. void
  196. xchk_stats_merge(
  197. struct xfs_mount *mp,
  198. const struct xfs_scrub_metadata *sm,
  199. const struct xchk_stats_run *run)
  200. {
  201. xchk_stats_merge_one(&global_stats, sm, run);
  202. xchk_stats_merge_one(mp->m_scrub_stats, sm, run);
  203. }
  204. /* debugfs boilerplate */
  205. static ssize_t
  206. xchk_scrub_stats_read(
  207. struct file *file,
  208. char __user *ubuf,
  209. size_t count,
  210. loff_t *ppos)
  211. {
  212. struct xchk_stats *cs = file->private_data;
  213. char *buf;
  214. size_t bufsize;
  215. ssize_t avail, ret;
  216. /*
  217. * This generates stringly snapshot of all the scrub counters, so we
  218. * do not want userspace to receive garbled text from multiple calls.
  219. * If the file position is greater than 0, return a short read.
  220. */
  221. if (*ppos > 0)
  222. return 0;
  223. bufsize = xchk_stats_estimate_bufsize(cs);
  224. buf = kvmalloc(bufsize, XCHK_GFP_FLAGS);
  225. if (!buf)
  226. return -ENOMEM;
  227. avail = xchk_stats_format(cs, buf, bufsize);
  228. if (avail < 0) {
  229. ret = avail;
  230. goto out;
  231. }
  232. ret = simple_read_from_buffer(ubuf, count, ppos, buf, avail);
  233. out:
  234. kvfree(buf);
  235. return ret;
  236. }
  237. static const struct file_operations scrub_stats_fops = {
  238. .open = simple_open,
  239. .read = xchk_scrub_stats_read,
  240. };
  241. static ssize_t
  242. xchk_clear_scrub_stats_write(
  243. struct file *file,
  244. const char __user *ubuf,
  245. size_t count,
  246. loff_t *ppos)
  247. {
  248. struct xchk_stats *cs = file->private_data;
  249. unsigned int val;
  250. int ret;
  251. ret = kstrtouint_from_user(ubuf, count, 0, &val);
  252. if (ret)
  253. return ret;
  254. if (val != 1)
  255. return -EINVAL;
  256. xchk_stats_clearall(cs);
  257. return count;
  258. }
  259. static const struct file_operations clear_scrub_stats_fops = {
  260. .open = simple_open,
  261. .write = xchk_clear_scrub_stats_write,
  262. };
  263. /* Initialize the stats object. */
  264. STATIC int
  265. xchk_stats_init(
  266. struct xchk_stats *cs,
  267. struct xfs_mount *mp)
  268. {
  269. struct xchk_scrub_stats *css = &cs->cs_stats[0];
  270. unsigned int i;
  271. for (i = 0; i < XFS_SCRUB_TYPE_NR; i++, css++)
  272. spin_lock_init(&css->css_lock);
  273. return 0;
  274. }
  275. /* Connect the stats object to debugfs. */
  276. void
  277. xchk_stats_register(
  278. struct xchk_stats *cs,
  279. struct dentry *parent)
  280. {
  281. if (!parent)
  282. return;
  283. cs->cs_debugfs = xfs_debugfs_mkdir("scrub", parent);
  284. if (!cs->cs_debugfs)
  285. return;
  286. debugfs_create_file("stats", 0444, cs->cs_debugfs, cs,
  287. &scrub_stats_fops);
  288. debugfs_create_file("clear_stats", 0200, cs->cs_debugfs, cs,
  289. &clear_scrub_stats_fops);
  290. }
  291. /* Free all resources related to the stats object. */
  292. STATIC int
  293. xchk_stats_teardown(
  294. struct xchk_stats *cs)
  295. {
  296. return 0;
  297. }
  298. /* Disconnect the stats object from debugfs. */
  299. void
  300. xchk_stats_unregister(
  301. struct xchk_stats *cs)
  302. {
  303. debugfs_remove(cs->cs_debugfs);
  304. }
  305. /* Initialize global stats and register them */
  306. int __init
  307. xchk_global_stats_setup(
  308. struct dentry *parent)
  309. {
  310. int error;
  311. error = xchk_stats_init(&global_stats, NULL);
  312. if (error)
  313. return error;
  314. xchk_stats_register(&global_stats, parent);
  315. return 0;
  316. }
  317. /* Unregister global stats and tear them down */
  318. void
  319. xchk_global_stats_teardown(void)
  320. {
  321. xchk_stats_unregister(&global_stats);
  322. xchk_stats_teardown(&global_stats);
  323. }
  324. /* Allocate per-mount stats */
  325. int
  326. xchk_mount_stats_alloc(
  327. struct xfs_mount *mp)
  328. {
  329. struct xchk_stats *cs;
  330. int error;
  331. cs = kvzalloc(sizeof(struct xchk_stats), GFP_KERNEL);
  332. if (!cs)
  333. return -ENOMEM;
  334. error = xchk_stats_init(cs, mp);
  335. if (error)
  336. goto out_free;
  337. mp->m_scrub_stats = cs;
  338. return 0;
  339. out_free:
  340. kvfree(cs);
  341. return error;
  342. }
  343. /* Free per-mount stats */
  344. void
  345. xchk_mount_stats_free(
  346. struct xfs_mount *mp)
  347. {
  348. xchk_stats_teardown(mp->m_scrub_stats);
  349. kvfree(mp->m_scrub_stats);
  350. mp->m_scrub_stats = NULL;
  351. }