page_table_check.c 6.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (c) 2021, Google LLC.
  4. * Pasha Tatashin <pasha.tatashin@soleen.com>
  5. */
  6. #include <linux/kstrtox.h>
  7. #include <linux/mm.h>
  8. #include <linux/page_table_check.h>
  9. #include <linux/swap.h>
  10. #include <linux/swapops.h>
  11. #undef pr_fmt
  12. #define pr_fmt(fmt) "page_table_check: " fmt
  13. struct page_table_check {
  14. atomic_t anon_map_count;
  15. atomic_t file_map_count;
  16. };
  17. static bool __page_table_check_enabled __initdata =
  18. IS_ENABLED(CONFIG_PAGE_TABLE_CHECK_ENFORCED);
  19. DEFINE_STATIC_KEY_TRUE(page_table_check_disabled);
  20. EXPORT_SYMBOL(page_table_check_disabled);
  21. static int __init early_page_table_check_param(char *buf)
  22. {
  23. return kstrtobool(buf, &__page_table_check_enabled);
  24. }
  25. early_param("page_table_check", early_page_table_check_param);
  26. static bool __init need_page_table_check(void)
  27. {
  28. return __page_table_check_enabled;
  29. }
  30. static void __init init_page_table_check(void)
  31. {
  32. if (!__page_table_check_enabled)
  33. return;
  34. static_branch_disable(&page_table_check_disabled);
  35. }
  36. struct page_ext_operations page_table_check_ops = {
  37. .size = sizeof(struct page_table_check),
  38. .need = need_page_table_check,
  39. .init = init_page_table_check,
  40. .need_shared_flags = false,
  41. };
  42. static struct page_table_check *get_page_table_check(struct page_ext *page_ext)
  43. {
  44. BUG_ON(!page_ext);
  45. return page_ext_data(page_ext, &page_table_check_ops);
  46. }
  47. /*
  48. * An entry is removed from the page table, decrement the counters for that page
  49. * verify that it is of correct type and counters do not become negative.
  50. */
  51. static void page_table_check_clear(unsigned long pfn, unsigned long pgcnt)
  52. {
  53. struct page_ext *page_ext;
  54. struct page *page;
  55. unsigned long i;
  56. bool anon;
  57. if (!pfn_valid(pfn))
  58. return;
  59. page = pfn_to_page(pfn);
  60. page_ext = page_ext_get(page);
  61. if (!page_ext)
  62. return;
  63. BUG_ON(PageSlab(page));
  64. anon = PageAnon(page);
  65. for (i = 0; i < pgcnt; i++) {
  66. struct page_table_check *ptc = get_page_table_check(page_ext);
  67. if (anon) {
  68. BUG_ON(atomic_read(&ptc->file_map_count));
  69. BUG_ON(atomic_dec_return(&ptc->anon_map_count) < 0);
  70. } else {
  71. BUG_ON(atomic_read(&ptc->anon_map_count));
  72. BUG_ON(atomic_dec_return(&ptc->file_map_count) < 0);
  73. }
  74. page_ext = page_ext_next(page_ext);
  75. }
  76. page_ext_put(page_ext);
  77. }
  78. /*
  79. * A new entry is added to the page table, increment the counters for that page
  80. * verify that it is of correct type and is not being mapped with a different
  81. * type to a different process.
  82. */
  83. static void page_table_check_set(unsigned long pfn, unsigned long pgcnt,
  84. bool rw)
  85. {
  86. struct page_ext *page_ext;
  87. struct page *page;
  88. unsigned long i;
  89. bool anon;
  90. if (!pfn_valid(pfn))
  91. return;
  92. page = pfn_to_page(pfn);
  93. page_ext = page_ext_get(page);
  94. if (!page_ext)
  95. return;
  96. BUG_ON(PageSlab(page));
  97. anon = PageAnon(page);
  98. for (i = 0; i < pgcnt; i++) {
  99. struct page_table_check *ptc = get_page_table_check(page_ext);
  100. if (anon) {
  101. BUG_ON(atomic_read(&ptc->file_map_count));
  102. BUG_ON(atomic_inc_return(&ptc->anon_map_count) > 1 && rw);
  103. } else {
  104. BUG_ON(atomic_read(&ptc->anon_map_count));
  105. BUG_ON(atomic_inc_return(&ptc->file_map_count) < 0);
  106. }
  107. page_ext = page_ext_next(page_ext);
  108. }
  109. page_ext_put(page_ext);
  110. }
  111. /*
  112. * page is on free list, or is being allocated, verify that counters are zeroes
  113. * crash if they are not.
  114. */
  115. void __page_table_check_zero(struct page *page, unsigned int order)
  116. {
  117. struct page_ext *page_ext;
  118. unsigned long i;
  119. BUG_ON(PageSlab(page));
  120. page_ext = page_ext_get(page);
  121. if (!page_ext)
  122. return;
  123. for (i = 0; i < (1ul << order); i++) {
  124. struct page_table_check *ptc = get_page_table_check(page_ext);
  125. BUG_ON(atomic_read(&ptc->anon_map_count));
  126. BUG_ON(atomic_read(&ptc->file_map_count));
  127. page_ext = page_ext_next(page_ext);
  128. }
  129. page_ext_put(page_ext);
  130. }
  131. void __page_table_check_pte_clear(struct mm_struct *mm, pte_t pte)
  132. {
  133. if (&init_mm == mm)
  134. return;
  135. if (pte_user_accessible_page(pte)) {
  136. page_table_check_clear(pte_pfn(pte), PAGE_SIZE >> PAGE_SHIFT);
  137. }
  138. }
  139. EXPORT_SYMBOL(__page_table_check_pte_clear);
  140. void __page_table_check_pmd_clear(struct mm_struct *mm, pmd_t pmd)
  141. {
  142. if (&init_mm == mm)
  143. return;
  144. if (pmd_user_accessible_page(pmd)) {
  145. page_table_check_clear(pmd_pfn(pmd), PMD_SIZE >> PAGE_SHIFT);
  146. }
  147. }
  148. EXPORT_SYMBOL(__page_table_check_pmd_clear);
  149. void __page_table_check_pud_clear(struct mm_struct *mm, pud_t pud)
  150. {
  151. if (&init_mm == mm)
  152. return;
  153. if (pud_user_accessible_page(pud)) {
  154. page_table_check_clear(pud_pfn(pud), PUD_SIZE >> PAGE_SHIFT);
  155. }
  156. }
  157. EXPORT_SYMBOL(__page_table_check_pud_clear);
  158. /* Whether the swap entry cached writable information */
  159. static inline bool swap_cached_writable(swp_entry_t entry)
  160. {
  161. return is_writable_device_exclusive_entry(entry) ||
  162. is_writable_device_private_entry(entry) ||
  163. is_writable_migration_entry(entry);
  164. }
  165. static inline void page_table_check_pte_flags(pte_t pte)
  166. {
  167. if (pte_present(pte) && pte_uffd_wp(pte))
  168. WARN_ON_ONCE(pte_write(pte));
  169. else if (is_swap_pte(pte) && pte_swp_uffd_wp(pte))
  170. WARN_ON_ONCE(swap_cached_writable(pte_to_swp_entry(pte)));
  171. }
  172. void __page_table_check_ptes_set(struct mm_struct *mm, pte_t *ptep, pte_t pte,
  173. unsigned int nr)
  174. {
  175. unsigned int i;
  176. if (&init_mm == mm)
  177. return;
  178. page_table_check_pte_flags(pte);
  179. for (i = 0; i < nr; i++)
  180. __page_table_check_pte_clear(mm, ptep_get(ptep + i));
  181. if (pte_user_accessible_page(pte))
  182. page_table_check_set(pte_pfn(pte), nr, pte_write(pte));
  183. }
  184. EXPORT_SYMBOL(__page_table_check_ptes_set);
  185. static inline void page_table_check_pmd_flags(pmd_t pmd)
  186. {
  187. if (pmd_present(pmd) && pmd_uffd_wp(pmd))
  188. WARN_ON_ONCE(pmd_write(pmd));
  189. else if (is_swap_pmd(pmd) && pmd_swp_uffd_wp(pmd))
  190. WARN_ON_ONCE(swap_cached_writable(pmd_to_swp_entry(pmd)));
  191. }
  192. void __page_table_check_pmd_set(struct mm_struct *mm, pmd_t *pmdp, pmd_t pmd)
  193. {
  194. if (&init_mm == mm)
  195. return;
  196. page_table_check_pmd_flags(pmd);
  197. __page_table_check_pmd_clear(mm, *pmdp);
  198. if (pmd_user_accessible_page(pmd)) {
  199. page_table_check_set(pmd_pfn(pmd), PMD_SIZE >> PAGE_SHIFT,
  200. pmd_write(pmd));
  201. }
  202. }
  203. EXPORT_SYMBOL(__page_table_check_pmd_set);
  204. void __page_table_check_pud_set(struct mm_struct *mm, pud_t *pudp, pud_t pud)
  205. {
  206. if (&init_mm == mm)
  207. return;
  208. __page_table_check_pud_clear(mm, *pudp);
  209. if (pud_user_accessible_page(pud)) {
  210. page_table_check_set(pud_pfn(pud), PUD_SIZE >> PAGE_SHIFT,
  211. pud_write(pud));
  212. }
  213. }
  214. EXPORT_SYMBOL(__page_table_check_pud_set);
  215. void __page_table_check_pte_clear_range(struct mm_struct *mm,
  216. unsigned long addr,
  217. pmd_t pmd)
  218. {
  219. if (&init_mm == mm)
  220. return;
  221. if (!pmd_bad(pmd) && !pmd_leaf(pmd)) {
  222. pte_t *ptep = pte_offset_map(&pmd, addr);
  223. unsigned long i;
  224. if (WARN_ON(!ptep))
  225. return;
  226. for (i = 0; i < PTRS_PER_PTE; i++) {
  227. __page_table_check_pte_clear(mm, ptep_get(ptep));
  228. addr += PAGE_SIZE;
  229. ptep++;
  230. }
  231. pte_unmap(ptep - PTRS_PER_PTE);
  232. }
  233. }