debug.c 7.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * mm/debug.c
  4. *
  5. * mm/ specific debug routines.
  6. *
  7. */
  8. #include <linux/kernel.h>
  9. #include <linux/mm.h>
  10. #include <linux/trace_events.h>
  11. #include <linux/memcontrol.h>
  12. #include <trace/events/mmflags.h>
  13. #include <linux/migrate.h>
  14. #include <linux/page_owner.h>
  15. #include <linux/ctype.h>
  16. #include "internal.h"
  17. #include <trace/events/migrate.h>
  18. /*
  19. * Define EM() and EMe() so that MIGRATE_REASON from trace/events/migrate.h can
  20. * be used to populate migrate_reason_names[].
  21. */
  22. #undef EM
  23. #undef EMe
  24. #define EM(a, b) b,
  25. #define EMe(a, b) b
  26. const char *migrate_reason_names[MR_TYPES] = {
  27. MIGRATE_REASON
  28. };
  29. const struct trace_print_flags pageflag_names[] = {
  30. __def_pageflag_names,
  31. {0, NULL}
  32. };
  33. const struct trace_print_flags gfpflag_names[] = {
  34. __def_gfpflag_names,
  35. {0, NULL}
  36. };
  37. const struct trace_print_flags vmaflag_names[] = {
  38. __def_vmaflag_names,
  39. {0, NULL}
  40. };
  41. #define DEF_PAGETYPE_NAME(_name) [PGTY_##_name - 0xf0] = __stringify(_name)
  42. static const char *page_type_names[] = {
  43. DEF_PAGETYPE_NAME(slab),
  44. DEF_PAGETYPE_NAME(hugetlb),
  45. DEF_PAGETYPE_NAME(offline),
  46. DEF_PAGETYPE_NAME(guard),
  47. DEF_PAGETYPE_NAME(table),
  48. DEF_PAGETYPE_NAME(buddy),
  49. DEF_PAGETYPE_NAME(unaccepted),
  50. };
  51. static const char *page_type_name(unsigned int page_type)
  52. {
  53. unsigned i = (page_type >> 24) - 0xf0;
  54. if (i >= ARRAY_SIZE(page_type_names))
  55. return "unknown";
  56. return page_type_names[i];
  57. }
  58. static void __dump_folio(struct folio *folio, struct page *page,
  59. unsigned long pfn, unsigned long idx)
  60. {
  61. struct address_space *mapping = folio_mapping(folio);
  62. int mapcount = atomic_read(&page->_mapcount);
  63. char *type = "";
  64. mapcount = page_mapcount_is_type(mapcount) ? 0 : mapcount + 1;
  65. pr_warn("page: refcount:%d mapcount:%d mapping:%p index:%#lx pfn:%#lx\n",
  66. folio_ref_count(folio), mapcount, mapping,
  67. folio->index + idx, pfn);
  68. if (folio_test_large(folio)) {
  69. pr_warn("head: order:%u mapcount:%d entire_mapcount:%d nr_pages_mapped:%d pincount:%d\n",
  70. folio_order(folio),
  71. folio_mapcount(folio),
  72. folio_entire_mapcount(folio),
  73. folio_nr_pages_mapped(folio),
  74. atomic_read(&folio->_pincount));
  75. }
  76. #ifdef CONFIG_MEMCG
  77. if (folio->memcg_data)
  78. pr_warn("memcg:%lx\n", folio->memcg_data);
  79. #endif
  80. if (folio_test_ksm(folio))
  81. type = "ksm ";
  82. else if (folio_test_anon(folio))
  83. type = "anon ";
  84. else if (mapping)
  85. dump_mapping(mapping);
  86. BUILD_BUG_ON(ARRAY_SIZE(pageflag_names) != __NR_PAGEFLAGS + 1);
  87. /*
  88. * Accessing the pageblock without the zone lock. It could change to
  89. * "isolate" again in the meantime, but since we are just dumping the
  90. * state for debugging, it should be fine to accept a bit of
  91. * inaccuracy here due to racing.
  92. */
  93. pr_warn("%sflags: %pGp%s\n", type, &folio->flags,
  94. is_migrate_cma_folio(folio, pfn) ? " CMA" : "");
  95. if (page_has_type(&folio->page))
  96. pr_warn("page_type: %x(%s)\n", folio->page.page_type >> 24,
  97. page_type_name(folio->page.page_type));
  98. print_hex_dump(KERN_WARNING, "raw: ", DUMP_PREFIX_NONE, 32,
  99. sizeof(unsigned long), page,
  100. sizeof(struct page), false);
  101. if (folio_test_large(folio))
  102. print_hex_dump(KERN_WARNING, "head: ", DUMP_PREFIX_NONE, 32,
  103. sizeof(unsigned long), folio,
  104. 2 * sizeof(struct page), false);
  105. }
  106. static void __dump_page(const struct page *page)
  107. {
  108. struct folio *foliop, folio;
  109. struct page precise;
  110. unsigned long head;
  111. unsigned long pfn = page_to_pfn(page);
  112. unsigned long idx, nr_pages = 1;
  113. int loops = 5;
  114. again:
  115. memcpy(&precise, page, sizeof(*page));
  116. head = precise.compound_head;
  117. if ((head & 1) == 0) {
  118. foliop = (struct folio *)&precise;
  119. idx = 0;
  120. if (!folio_test_large(foliop))
  121. goto dump;
  122. foliop = (struct folio *)page;
  123. } else {
  124. foliop = (struct folio *)(head - 1);
  125. idx = folio_page_idx(foliop, page);
  126. }
  127. if (idx < MAX_FOLIO_NR_PAGES) {
  128. memcpy(&folio, foliop, 2 * sizeof(struct page));
  129. nr_pages = folio_nr_pages(&folio);
  130. foliop = &folio;
  131. }
  132. if (idx > nr_pages) {
  133. if (loops-- > 0)
  134. goto again;
  135. pr_warn("page does not match folio\n");
  136. precise.compound_head &= ~1UL;
  137. foliop = (struct folio *)&precise;
  138. idx = 0;
  139. }
  140. dump:
  141. __dump_folio(foliop, &precise, pfn, idx);
  142. }
  143. void dump_page(const struct page *page, const char *reason)
  144. {
  145. if (PagePoisoned(page))
  146. pr_warn("page:%p is uninitialized and poisoned", page);
  147. else
  148. __dump_page(page);
  149. if (reason)
  150. pr_warn("page dumped because: %s\n", reason);
  151. dump_page_owner(page);
  152. }
  153. EXPORT_SYMBOL(dump_page);
  154. #ifdef CONFIG_DEBUG_VM
  155. void dump_vma(const struct vm_area_struct *vma)
  156. {
  157. pr_emerg("vma %px start %px end %px mm %px\n"
  158. "prot %lx anon_vma %px vm_ops %px\n"
  159. "pgoff %lx file %px private_data %px\n"
  160. "flags: %#lx(%pGv)\n",
  161. vma, (void *)vma->vm_start, (void *)vma->vm_end, vma->vm_mm,
  162. (unsigned long)pgprot_val(vma->vm_page_prot),
  163. vma->anon_vma, vma->vm_ops, vma->vm_pgoff,
  164. vma->vm_file, vma->vm_private_data,
  165. vma->vm_flags, &vma->vm_flags);
  166. }
  167. EXPORT_SYMBOL(dump_vma);
  168. void dump_mm(const struct mm_struct *mm)
  169. {
  170. pr_emerg("mm %px task_size %lu\n"
  171. "mmap_base %lu mmap_legacy_base %lu\n"
  172. "pgd %px mm_users %d mm_count %d pgtables_bytes %lu map_count %d\n"
  173. "hiwater_rss %lx hiwater_vm %lx total_vm %lx locked_vm %lx\n"
  174. "pinned_vm %llx data_vm %lx exec_vm %lx stack_vm %lx\n"
  175. "start_code %lx end_code %lx start_data %lx end_data %lx\n"
  176. "start_brk %lx brk %lx start_stack %lx\n"
  177. "arg_start %lx arg_end %lx env_start %lx env_end %lx\n"
  178. "binfmt %px flags %lx\n"
  179. #ifdef CONFIG_AIO
  180. "ioctx_table %px\n"
  181. #endif
  182. #ifdef CONFIG_MEMCG
  183. "owner %px "
  184. #endif
  185. "exe_file %px\n"
  186. #ifdef CONFIG_MMU_NOTIFIER
  187. "notifier_subscriptions %px\n"
  188. #endif
  189. #ifdef CONFIG_NUMA_BALANCING
  190. "numa_next_scan %lu numa_scan_offset %lu numa_scan_seq %d\n"
  191. #endif
  192. "tlb_flush_pending %d\n"
  193. "def_flags: %#lx(%pGv)\n",
  194. mm, mm->task_size,
  195. mm->mmap_base, mm->mmap_legacy_base,
  196. mm->pgd, atomic_read(&mm->mm_users),
  197. atomic_read(&mm->mm_count),
  198. mm_pgtables_bytes(mm),
  199. mm->map_count,
  200. mm->hiwater_rss, mm->hiwater_vm, mm->total_vm, mm->locked_vm,
  201. (u64)atomic64_read(&mm->pinned_vm),
  202. mm->data_vm, mm->exec_vm, mm->stack_vm,
  203. mm->start_code, mm->end_code, mm->start_data, mm->end_data,
  204. mm->start_brk, mm->brk, mm->start_stack,
  205. mm->arg_start, mm->arg_end, mm->env_start, mm->env_end,
  206. mm->binfmt, mm->flags,
  207. #ifdef CONFIG_AIO
  208. mm->ioctx_table,
  209. #endif
  210. #ifdef CONFIG_MEMCG
  211. mm->owner,
  212. #endif
  213. mm->exe_file,
  214. #ifdef CONFIG_MMU_NOTIFIER
  215. mm->notifier_subscriptions,
  216. #endif
  217. #ifdef CONFIG_NUMA_BALANCING
  218. mm->numa_next_scan, mm->numa_scan_offset, mm->numa_scan_seq,
  219. #endif
  220. atomic_read(&mm->tlb_flush_pending),
  221. mm->def_flags, &mm->def_flags
  222. );
  223. }
  224. EXPORT_SYMBOL(dump_mm);
  225. static bool page_init_poisoning __read_mostly = true;
  226. static int __init setup_vm_debug(char *str)
  227. {
  228. bool __page_init_poisoning = true;
  229. /*
  230. * Calling vm_debug with no arguments is equivalent to requesting
  231. * to enable all debugging options we can control.
  232. */
  233. if (*str++ != '=' || !*str)
  234. goto out;
  235. __page_init_poisoning = false;
  236. if (*str == '-')
  237. goto out;
  238. while (*str) {
  239. switch (tolower(*str)) {
  240. case'p':
  241. __page_init_poisoning = true;
  242. break;
  243. default:
  244. pr_err("vm_debug option '%c' unknown. skipped\n",
  245. *str);
  246. }
  247. str++;
  248. }
  249. out:
  250. if (page_init_poisoning && !__page_init_poisoning)
  251. pr_warn("Page struct poisoning disabled by kernel command line option 'vm_debug'\n");
  252. page_init_poisoning = __page_init_poisoning;
  253. return 1;
  254. }
  255. __setup("vm_debug", setup_vm_debug);
  256. void page_init_poison(struct page *page, size_t size)
  257. {
  258. if (page_init_poisoning)
  259. memset(page, PAGE_POISON_PATTERN, size);
  260. }
  261. void vma_iter_dump_tree(const struct vma_iterator *vmi)
  262. {
  263. #if defined(CONFIG_DEBUG_VM_MAPLE_TREE)
  264. mas_dump(&vmi->mas);
  265. mt_dump(vmi->mas.tree, mt_dump_hex);
  266. #endif /* CONFIG_DEBUG_VM_MAPLE_TREE */
  267. }
  268. #endif /* CONFIG_DEBUG_VM */