dump_pagetables.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640
  1. /*
  2. * Debug helper to dump the current kernel pagetables of the system
  3. * so that we can see what the various memory ranges are set to.
  4. *
  5. * (C) Copyright 2008 Intel Corporation
  6. *
  7. * Author: Arjan van de Ven <arjan@linux.intel.com>
  8. *
  9. * This program is free software; you can redistribute it and/or
  10. * modify it under the terms of the GNU General Public License
  11. * as published by the Free Software Foundation; version 2
  12. * of the License.
  13. */
  14. #include <linux/debugfs.h>
  15. #include <linux/kasan.h>
  16. #include <linux/mm.h>
  17. #include <linux/init.h>
  18. #include <linux/sched.h>
  19. #include <linux/seq_file.h>
  20. #include <linux/highmem.h>
  21. #include <linux/pci.h>
  22. #include <asm/e820/types.h>
  23. #include <asm/pgtable.h>
  24. /*
  25. * The dumper groups pagetable entries of the same type into one, and for
  26. * that it needs to keep some state when walking, and flush this state
  27. * when a "break" in the continuity is found.
  28. */
  29. struct pg_state {
  30. int level;
  31. pgprot_t current_prot;
  32. pgprotval_t effective_prot;
  33. unsigned long start_address;
  34. unsigned long current_address;
  35. const struct addr_marker *marker;
  36. unsigned long lines;
  37. bool to_dmesg;
  38. bool check_wx;
  39. unsigned long wx_pages;
  40. };
  41. struct addr_marker {
  42. unsigned long start_address;
  43. const char *name;
  44. unsigned long max_lines;
  45. };
  46. /* Address space markers hints */
  47. #ifdef CONFIG_X86_64
  48. enum address_markers_idx {
  49. USER_SPACE_NR = 0,
  50. KERNEL_SPACE_NR,
  51. #ifdef CONFIG_MODIFY_LDT_SYSCALL
  52. LDT_NR,
  53. #endif
  54. LOW_KERNEL_NR,
  55. VMALLOC_START_NR,
  56. VMEMMAP_START_NR,
  57. #ifdef CONFIG_KASAN
  58. KASAN_SHADOW_START_NR,
  59. KASAN_SHADOW_END_NR,
  60. #endif
  61. CPU_ENTRY_AREA_NR,
  62. #ifdef CONFIG_X86_ESPFIX64
  63. ESPFIX_START_NR,
  64. #endif
  65. #ifdef CONFIG_EFI
  66. EFI_END_NR,
  67. #endif
  68. HIGH_KERNEL_NR,
  69. MODULES_VADDR_NR,
  70. MODULES_END_NR,
  71. FIXADDR_START_NR,
  72. END_OF_SPACE_NR,
  73. };
  74. static struct addr_marker address_markers[] = {
  75. [USER_SPACE_NR] = { 0, "User Space" },
  76. [KERNEL_SPACE_NR] = { (1UL << 63), "Kernel Space" },
  77. [LOW_KERNEL_NR] = { 0UL, "Low Kernel Mapping" },
  78. [VMALLOC_START_NR] = { 0UL, "vmalloc() Area" },
  79. [VMEMMAP_START_NR] = { 0UL, "Vmemmap" },
  80. #ifdef CONFIG_KASAN
  81. /*
  82. * These fields get initialized with the (dynamic)
  83. * KASAN_SHADOW_{START,END} values in pt_dump_init().
  84. */
  85. [KASAN_SHADOW_START_NR] = { 0UL, "KASAN shadow" },
  86. [KASAN_SHADOW_END_NR] = { 0UL, "KASAN shadow end" },
  87. #endif
  88. #ifdef CONFIG_MODIFY_LDT_SYSCALL
  89. [LDT_NR] = { 0UL, "LDT remap" },
  90. #endif
  91. [CPU_ENTRY_AREA_NR] = { CPU_ENTRY_AREA_BASE,"CPU entry Area" },
  92. #ifdef CONFIG_X86_ESPFIX64
  93. [ESPFIX_START_NR] = { ESPFIX_BASE_ADDR, "ESPfix Area", 16 },
  94. #endif
  95. #ifdef CONFIG_EFI
  96. [EFI_END_NR] = { EFI_VA_END, "EFI Runtime Services" },
  97. #endif
  98. [HIGH_KERNEL_NR] = { __START_KERNEL_map, "High Kernel Mapping" },
  99. [MODULES_VADDR_NR] = { MODULES_VADDR, "Modules" },
  100. [MODULES_END_NR] = { MODULES_END, "End Modules" },
  101. [FIXADDR_START_NR] = { FIXADDR_START, "Fixmap Area" },
  102. [END_OF_SPACE_NR] = { -1, NULL }
  103. };
  104. #define INIT_PGD ((pgd_t *) &init_top_pgt)
  105. #else /* CONFIG_X86_64 */
  106. enum address_markers_idx {
  107. USER_SPACE_NR = 0,
  108. KERNEL_SPACE_NR,
  109. VMALLOC_START_NR,
  110. VMALLOC_END_NR,
  111. #ifdef CONFIG_HIGHMEM
  112. PKMAP_BASE_NR,
  113. #endif
  114. #ifdef CONFIG_MODIFY_LDT_SYSCALL
  115. LDT_NR,
  116. #endif
  117. CPU_ENTRY_AREA_NR,
  118. FIXADDR_START_NR,
  119. END_OF_SPACE_NR,
  120. };
  121. static struct addr_marker address_markers[] = {
  122. [USER_SPACE_NR] = { 0, "User Space" },
  123. [KERNEL_SPACE_NR] = { PAGE_OFFSET, "Kernel Mapping" },
  124. [VMALLOC_START_NR] = { 0UL, "vmalloc() Area" },
  125. [VMALLOC_END_NR] = { 0UL, "vmalloc() End" },
  126. #ifdef CONFIG_HIGHMEM
  127. [PKMAP_BASE_NR] = { 0UL, "Persistent kmap() Area" },
  128. #endif
  129. #ifdef CONFIG_MODIFY_LDT_SYSCALL
  130. [LDT_NR] = { 0UL, "LDT remap" },
  131. #endif
  132. [CPU_ENTRY_AREA_NR] = { 0UL, "CPU entry area" },
  133. [FIXADDR_START_NR] = { 0UL, "Fixmap area" },
  134. [END_OF_SPACE_NR] = { -1, NULL }
  135. };
  136. #define INIT_PGD (swapper_pg_dir)
  137. #endif /* !CONFIG_X86_64 */
  138. /* Multipliers for offsets within the PTEs */
  139. #define PTE_LEVEL_MULT (PAGE_SIZE)
  140. #define PMD_LEVEL_MULT (PTRS_PER_PTE * PTE_LEVEL_MULT)
  141. #define PUD_LEVEL_MULT (PTRS_PER_PMD * PMD_LEVEL_MULT)
  142. #define P4D_LEVEL_MULT (PTRS_PER_PUD * PUD_LEVEL_MULT)
  143. #define PGD_LEVEL_MULT (PTRS_PER_P4D * P4D_LEVEL_MULT)
  144. #define pt_dump_seq_printf(m, to_dmesg, fmt, args...) \
  145. ({ \
  146. if (to_dmesg) \
  147. printk(KERN_INFO fmt, ##args); \
  148. else \
  149. if (m) \
  150. seq_printf(m, fmt, ##args); \
  151. })
  152. #define pt_dump_cont_printf(m, to_dmesg, fmt, args...) \
  153. ({ \
  154. if (to_dmesg) \
  155. printk(KERN_CONT fmt, ##args); \
  156. else \
  157. if (m) \
  158. seq_printf(m, fmt, ##args); \
  159. })
  160. /*
  161. * Print a readable form of a pgprot_t to the seq_file
  162. */
  163. static void printk_prot(struct seq_file *m, pgprot_t prot, int level, bool dmsg)
  164. {
  165. pgprotval_t pr = pgprot_val(prot);
  166. static const char * const level_name[] =
  167. { "cr3", "pgd", "p4d", "pud", "pmd", "pte" };
  168. if (!(pr & _PAGE_PRESENT)) {
  169. /* Not present */
  170. pt_dump_cont_printf(m, dmsg, " ");
  171. } else {
  172. if (pr & _PAGE_USER)
  173. pt_dump_cont_printf(m, dmsg, "USR ");
  174. else
  175. pt_dump_cont_printf(m, dmsg, " ");
  176. if (pr & _PAGE_RW)
  177. pt_dump_cont_printf(m, dmsg, "RW ");
  178. else
  179. pt_dump_cont_printf(m, dmsg, "ro ");
  180. if (pr & _PAGE_PWT)
  181. pt_dump_cont_printf(m, dmsg, "PWT ");
  182. else
  183. pt_dump_cont_printf(m, dmsg, " ");
  184. if (pr & _PAGE_PCD)
  185. pt_dump_cont_printf(m, dmsg, "PCD ");
  186. else
  187. pt_dump_cont_printf(m, dmsg, " ");
  188. /* Bit 7 has a different meaning on level 3 vs 4 */
  189. if (level <= 4 && pr & _PAGE_PSE)
  190. pt_dump_cont_printf(m, dmsg, "PSE ");
  191. else
  192. pt_dump_cont_printf(m, dmsg, " ");
  193. if ((level == 5 && pr & _PAGE_PAT) ||
  194. ((level == 4 || level == 3) && pr & _PAGE_PAT_LARGE))
  195. pt_dump_cont_printf(m, dmsg, "PAT ");
  196. else
  197. pt_dump_cont_printf(m, dmsg, " ");
  198. if (pr & _PAGE_GLOBAL)
  199. pt_dump_cont_printf(m, dmsg, "GLB ");
  200. else
  201. pt_dump_cont_printf(m, dmsg, " ");
  202. if (pr & _PAGE_NX)
  203. pt_dump_cont_printf(m, dmsg, "NX ");
  204. else
  205. pt_dump_cont_printf(m, dmsg, "x ");
  206. }
  207. pt_dump_cont_printf(m, dmsg, "%s\n", level_name[level]);
  208. }
  209. /*
  210. * On 64 bits, sign-extend the 48 bit address to 64 bit
  211. */
  212. static unsigned long normalize_addr(unsigned long u)
  213. {
  214. int shift;
  215. if (!IS_ENABLED(CONFIG_X86_64))
  216. return u;
  217. shift = 64 - (__VIRTUAL_MASK_SHIFT + 1);
  218. return (signed long)(u << shift) >> shift;
  219. }
  220. static void note_wx(struct pg_state *st)
  221. {
  222. unsigned long npages;
  223. npages = (st->current_address - st->start_address) / PAGE_SIZE;
  224. #ifdef CONFIG_PCI_BIOS
  225. /*
  226. * If PCI BIOS is enabled, the PCI BIOS area is forced to WX.
  227. * Inform about it, but avoid the warning.
  228. */
  229. if (pcibios_enabled && st->start_address >= PAGE_OFFSET + BIOS_BEGIN &&
  230. st->current_address <= PAGE_OFFSET + BIOS_END) {
  231. pr_warn_once("x86/mm: PCI BIOS W+X mapping %lu pages\n", npages);
  232. return;
  233. }
  234. #endif
  235. /* Account the WX pages */
  236. st->wx_pages += npages;
  237. WARN_ONCE(1, "x86/mm: Found insecure W+X mapping at address %pS\n",
  238. (void *)st->start_address);
  239. }
  240. /*
  241. * This function gets called on a break in a continuous series
  242. * of PTE entries; the next one is different so we need to
  243. * print what we collected so far.
  244. */
  245. static void note_page(struct seq_file *m, struct pg_state *st,
  246. pgprot_t new_prot, pgprotval_t new_eff, int level)
  247. {
  248. pgprotval_t prot, cur, eff;
  249. static const char units[] = "BKMGTPE";
  250. /*
  251. * If we have a "break" in the series, we need to flush the state that
  252. * we have now. "break" is either changing perms, levels or
  253. * address space marker.
  254. */
  255. prot = pgprot_val(new_prot);
  256. cur = pgprot_val(st->current_prot);
  257. eff = st->effective_prot;
  258. if (!st->level) {
  259. /* First entry */
  260. st->current_prot = new_prot;
  261. st->effective_prot = new_eff;
  262. st->level = level;
  263. st->marker = address_markers;
  264. st->lines = 0;
  265. pt_dump_seq_printf(m, st->to_dmesg, "---[ %s ]---\n",
  266. st->marker->name);
  267. } else if (prot != cur || new_eff != eff || level != st->level ||
  268. st->current_address >= st->marker[1].start_address) {
  269. const char *unit = units;
  270. unsigned long delta;
  271. int width = sizeof(unsigned long) * 2;
  272. if (st->check_wx && (eff & _PAGE_RW) && !(eff & _PAGE_NX))
  273. note_wx(st);
  274. /*
  275. * Now print the actual finished series
  276. */
  277. if (!st->marker->max_lines ||
  278. st->lines < st->marker->max_lines) {
  279. pt_dump_seq_printf(m, st->to_dmesg,
  280. "0x%0*lx-0x%0*lx ",
  281. width, st->start_address,
  282. width, st->current_address);
  283. delta = st->current_address - st->start_address;
  284. while (!(delta & 1023) && unit[1]) {
  285. delta >>= 10;
  286. unit++;
  287. }
  288. pt_dump_cont_printf(m, st->to_dmesg, "%9lu%c ",
  289. delta, *unit);
  290. printk_prot(m, st->current_prot, st->level,
  291. st->to_dmesg);
  292. }
  293. st->lines++;
  294. /*
  295. * We print markers for special areas of address space,
  296. * such as the start of vmalloc space etc.
  297. * This helps in the interpretation.
  298. */
  299. if (st->current_address >= st->marker[1].start_address) {
  300. if (st->marker->max_lines &&
  301. st->lines > st->marker->max_lines) {
  302. unsigned long nskip =
  303. st->lines - st->marker->max_lines;
  304. pt_dump_seq_printf(m, st->to_dmesg,
  305. "... %lu entr%s skipped ... \n",
  306. nskip,
  307. nskip == 1 ? "y" : "ies");
  308. }
  309. st->marker++;
  310. st->lines = 0;
  311. pt_dump_seq_printf(m, st->to_dmesg, "---[ %s ]---\n",
  312. st->marker->name);
  313. }
  314. st->start_address = st->current_address;
  315. st->current_prot = new_prot;
  316. st->effective_prot = new_eff;
  317. st->level = level;
  318. }
  319. }
  320. static inline pgprotval_t effective_prot(pgprotval_t prot1, pgprotval_t prot2)
  321. {
  322. return (prot1 & prot2 & (_PAGE_USER | _PAGE_RW)) |
  323. ((prot1 | prot2) & _PAGE_NX);
  324. }
  325. static void walk_pte_level(struct seq_file *m, struct pg_state *st, pmd_t addr,
  326. pgprotval_t eff_in, unsigned long P)
  327. {
  328. int i;
  329. pte_t *pte;
  330. pgprotval_t prot, eff;
  331. for (i = 0; i < PTRS_PER_PTE; i++) {
  332. st->current_address = normalize_addr(P + i * PTE_LEVEL_MULT);
  333. pte = pte_offset_map(&addr, st->current_address);
  334. prot = pte_flags(*pte);
  335. eff = effective_prot(eff_in, prot);
  336. note_page(m, st, __pgprot(prot), eff, 5);
  337. pte_unmap(pte);
  338. }
  339. }
  340. #ifdef CONFIG_KASAN
  341. /*
  342. * This is an optimization for KASAN=y case. Since all kasan page tables
  343. * eventually point to the kasan_zero_page we could call note_page()
  344. * right away without walking through lower level page tables. This saves
  345. * us dozens of seconds (minutes for 5-level config) while checking for
  346. * W+X mapping or reading kernel_page_tables debugfs file.
  347. */
  348. static inline bool kasan_page_table(struct seq_file *m, struct pg_state *st,
  349. void *pt)
  350. {
  351. if (__pa(pt) == __pa(kasan_zero_pmd) ||
  352. (pgtable_l5_enabled() && __pa(pt) == __pa(kasan_zero_p4d)) ||
  353. __pa(pt) == __pa(kasan_zero_pud)) {
  354. pgprotval_t prot = pte_flags(kasan_zero_pte[0]);
  355. note_page(m, st, __pgprot(prot), 0, 5);
  356. return true;
  357. }
  358. return false;
  359. }
  360. #else
  361. static inline bool kasan_page_table(struct seq_file *m, struct pg_state *st,
  362. void *pt)
  363. {
  364. return false;
  365. }
  366. #endif
  367. #if PTRS_PER_PMD > 1
  368. static void walk_pmd_level(struct seq_file *m, struct pg_state *st, pud_t addr,
  369. pgprotval_t eff_in, unsigned long P)
  370. {
  371. int i;
  372. pmd_t *start, *pmd_start;
  373. pgprotval_t prot, eff;
  374. pmd_start = start = (pmd_t *)pud_page_vaddr(addr);
  375. for (i = 0; i < PTRS_PER_PMD; i++) {
  376. st->current_address = normalize_addr(P + i * PMD_LEVEL_MULT);
  377. if (!pmd_none(*start)) {
  378. prot = pmd_flags(*start);
  379. eff = effective_prot(eff_in, prot);
  380. if (pmd_large(*start) || !pmd_present(*start)) {
  381. note_page(m, st, __pgprot(prot), eff, 4);
  382. } else if (!kasan_page_table(m, st, pmd_start)) {
  383. walk_pte_level(m, st, *start, eff,
  384. P + i * PMD_LEVEL_MULT);
  385. }
  386. } else
  387. note_page(m, st, __pgprot(0), 0, 4);
  388. start++;
  389. }
  390. }
  391. #else
  392. #define walk_pmd_level(m,s,a,e,p) walk_pte_level(m,s,__pmd(pud_val(a)),e,p)
  393. #define pud_large(a) pmd_large(__pmd(pud_val(a)))
  394. #define pud_none(a) pmd_none(__pmd(pud_val(a)))
  395. #endif
  396. #if PTRS_PER_PUD > 1
  397. static void walk_pud_level(struct seq_file *m, struct pg_state *st, p4d_t addr,
  398. pgprotval_t eff_in, unsigned long P)
  399. {
  400. int i;
  401. pud_t *start, *pud_start;
  402. pgprotval_t prot, eff;
  403. pud_t *prev_pud = NULL;
  404. pud_start = start = (pud_t *)p4d_page_vaddr(addr);
  405. for (i = 0; i < PTRS_PER_PUD; i++) {
  406. st->current_address = normalize_addr(P + i * PUD_LEVEL_MULT);
  407. if (!pud_none(*start)) {
  408. prot = pud_flags(*start);
  409. eff = effective_prot(eff_in, prot);
  410. if (pud_large(*start) || !pud_present(*start)) {
  411. note_page(m, st, __pgprot(prot), eff, 3);
  412. } else if (!kasan_page_table(m, st, pud_start)) {
  413. walk_pmd_level(m, st, *start, eff,
  414. P + i * PUD_LEVEL_MULT);
  415. }
  416. } else
  417. note_page(m, st, __pgprot(0), 0, 3);
  418. prev_pud = start;
  419. start++;
  420. }
  421. }
  422. #else
  423. #define walk_pud_level(m,s,a,e,p) walk_pmd_level(m,s,__pud(p4d_val(a)),e,p)
  424. #define p4d_large(a) pud_large(__pud(p4d_val(a)))
  425. #define p4d_none(a) pud_none(__pud(p4d_val(a)))
  426. #endif
  427. static void walk_p4d_level(struct seq_file *m, struct pg_state *st, pgd_t addr,
  428. pgprotval_t eff_in, unsigned long P)
  429. {
  430. int i;
  431. p4d_t *start, *p4d_start;
  432. pgprotval_t prot, eff;
  433. if (PTRS_PER_P4D == 1)
  434. return walk_pud_level(m, st, __p4d(pgd_val(addr)), eff_in, P);
  435. p4d_start = start = (p4d_t *)pgd_page_vaddr(addr);
  436. for (i = 0; i < PTRS_PER_P4D; i++) {
  437. st->current_address = normalize_addr(P + i * P4D_LEVEL_MULT);
  438. if (!p4d_none(*start)) {
  439. prot = p4d_flags(*start);
  440. eff = effective_prot(eff_in, prot);
  441. if (p4d_large(*start) || !p4d_present(*start)) {
  442. note_page(m, st, __pgprot(prot), eff, 2);
  443. } else if (!kasan_page_table(m, st, p4d_start)) {
  444. walk_pud_level(m, st, *start, eff,
  445. P + i * P4D_LEVEL_MULT);
  446. }
  447. } else
  448. note_page(m, st, __pgprot(0), 0, 2);
  449. start++;
  450. }
  451. }
  452. #define pgd_large(a) (pgtable_l5_enabled() ? pgd_large(a) : p4d_large(__p4d(pgd_val(a))))
  453. #define pgd_none(a) (pgtable_l5_enabled() ? pgd_none(a) : p4d_none(__p4d(pgd_val(a))))
  454. static inline bool is_hypervisor_range(int idx)
  455. {
  456. #ifdef CONFIG_X86_64
  457. /*
  458. * A hole in the beginning of kernel address space reserved
  459. * for a hypervisor.
  460. */
  461. return (idx >= pgd_index(GUARD_HOLE_BASE_ADDR)) &&
  462. (idx < pgd_index(GUARD_HOLE_END_ADDR));
  463. #else
  464. return false;
  465. #endif
  466. }
  467. static void ptdump_walk_pgd_level_core(struct seq_file *m, pgd_t *pgd,
  468. bool checkwx, bool dmesg)
  469. {
  470. pgd_t *start = INIT_PGD;
  471. pgprotval_t prot, eff;
  472. int i;
  473. struct pg_state st = {};
  474. if (pgd) {
  475. start = pgd;
  476. st.to_dmesg = dmesg;
  477. }
  478. st.check_wx = checkwx;
  479. if (checkwx)
  480. st.wx_pages = 0;
  481. for (i = 0; i < PTRS_PER_PGD; i++) {
  482. st.current_address = normalize_addr(i * PGD_LEVEL_MULT);
  483. if (!pgd_none(*start) && !is_hypervisor_range(i)) {
  484. prot = pgd_flags(*start);
  485. #ifdef CONFIG_X86_PAE
  486. eff = _PAGE_USER | _PAGE_RW;
  487. #else
  488. eff = prot;
  489. #endif
  490. if (pgd_large(*start) || !pgd_present(*start)) {
  491. note_page(m, &st, __pgprot(prot), eff, 1);
  492. } else {
  493. walk_p4d_level(m, &st, *start, eff,
  494. i * PGD_LEVEL_MULT);
  495. }
  496. } else
  497. note_page(m, &st, __pgprot(0), 0, 1);
  498. cond_resched();
  499. start++;
  500. }
  501. /* Flush out the last page */
  502. st.current_address = normalize_addr(PTRS_PER_PGD*PGD_LEVEL_MULT);
  503. note_page(m, &st, __pgprot(0), 0, 0);
  504. if (!checkwx)
  505. return;
  506. if (st.wx_pages)
  507. pr_info("x86/mm: Checked W+X mappings: FAILED, %lu W+X pages found.\n",
  508. st.wx_pages);
  509. else
  510. pr_info("x86/mm: Checked W+X mappings: passed, no W+X pages found.\n");
  511. }
  512. void ptdump_walk_pgd_level(struct seq_file *m, pgd_t *pgd)
  513. {
  514. ptdump_walk_pgd_level_core(m, pgd, false, true);
  515. }
  516. void ptdump_walk_pgd_level_debugfs(struct seq_file *m, pgd_t *pgd, bool user)
  517. {
  518. #ifdef CONFIG_PAGE_TABLE_ISOLATION
  519. if (user && static_cpu_has(X86_FEATURE_PTI))
  520. pgd = kernel_to_user_pgdp(pgd);
  521. #endif
  522. ptdump_walk_pgd_level_core(m, pgd, false, false);
  523. }
  524. EXPORT_SYMBOL_GPL(ptdump_walk_pgd_level_debugfs);
  525. void ptdump_walk_user_pgd_level_checkwx(void)
  526. {
  527. #ifdef CONFIG_PAGE_TABLE_ISOLATION
  528. pgd_t *pgd = INIT_PGD;
  529. if (!(__supported_pte_mask & _PAGE_NX) ||
  530. !static_cpu_has(X86_FEATURE_PTI))
  531. return;
  532. pr_info("x86/mm: Checking user space page tables\n");
  533. pgd = kernel_to_user_pgdp(pgd);
  534. ptdump_walk_pgd_level_core(NULL, pgd, true, false);
  535. #endif
  536. }
  537. void ptdump_walk_pgd_level_checkwx(void)
  538. {
  539. ptdump_walk_pgd_level_core(NULL, NULL, true, false);
  540. }
  541. static int __init pt_dump_init(void)
  542. {
  543. /*
  544. * Various markers are not compile-time constants, so assign them
  545. * here.
  546. */
  547. #ifdef CONFIG_X86_64
  548. address_markers[LOW_KERNEL_NR].start_address = PAGE_OFFSET;
  549. address_markers[VMALLOC_START_NR].start_address = VMALLOC_START;
  550. address_markers[VMEMMAP_START_NR].start_address = VMEMMAP_START;
  551. #ifdef CONFIG_MODIFY_LDT_SYSCALL
  552. address_markers[LDT_NR].start_address = LDT_BASE_ADDR;
  553. #endif
  554. #ifdef CONFIG_KASAN
  555. address_markers[KASAN_SHADOW_START_NR].start_address = KASAN_SHADOW_START;
  556. address_markers[KASAN_SHADOW_END_NR].start_address = KASAN_SHADOW_END;
  557. #endif
  558. #endif
  559. #ifdef CONFIG_X86_32
  560. address_markers[VMALLOC_START_NR].start_address = VMALLOC_START;
  561. address_markers[VMALLOC_END_NR].start_address = VMALLOC_END;
  562. # ifdef CONFIG_HIGHMEM
  563. address_markers[PKMAP_BASE_NR].start_address = PKMAP_BASE;
  564. # endif
  565. address_markers[FIXADDR_START_NR].start_address = FIXADDR_START;
  566. address_markers[CPU_ENTRY_AREA_NR].start_address = CPU_ENTRY_AREA_BASE;
  567. # ifdef CONFIG_MODIFY_LDT_SYSCALL
  568. address_markers[LDT_NR].start_address = LDT_BASE_ADDR;
  569. # endif
  570. #endif
  571. return 0;
  572. }
  573. __initcall(pt_dump_init);