ident_map.c 5.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Helper routines for building identity mapping page tables. This is
  4. * included by both the compressed kernel and the regular kernel.
  5. */
  6. static void free_pte(struct x86_mapping_info *info, pmd_t *pmd)
  7. {
  8. pte_t *pte = pte_offset_kernel(pmd, 0);
  9. info->free_pgt_page(pte, info->context);
  10. }
  11. static void free_pmd(struct x86_mapping_info *info, pud_t *pud)
  12. {
  13. pmd_t *pmd = pmd_offset(pud, 0);
  14. int i;
  15. for (i = 0; i < PTRS_PER_PMD; i++) {
  16. if (!pmd_present(pmd[i]))
  17. continue;
  18. if (pmd_leaf(pmd[i]))
  19. continue;
  20. free_pte(info, &pmd[i]);
  21. }
  22. info->free_pgt_page(pmd, info->context);
  23. }
  24. static void free_pud(struct x86_mapping_info *info, p4d_t *p4d)
  25. {
  26. pud_t *pud = pud_offset(p4d, 0);
  27. int i;
  28. for (i = 0; i < PTRS_PER_PUD; i++) {
  29. if (!pud_present(pud[i]))
  30. continue;
  31. if (pud_leaf(pud[i]))
  32. continue;
  33. free_pmd(info, &pud[i]);
  34. }
  35. info->free_pgt_page(pud, info->context);
  36. }
  37. static void free_p4d(struct x86_mapping_info *info, pgd_t *pgd)
  38. {
  39. p4d_t *p4d = p4d_offset(pgd, 0);
  40. int i;
  41. for (i = 0; i < PTRS_PER_P4D; i++) {
  42. if (!p4d_present(p4d[i]))
  43. continue;
  44. free_pud(info, &p4d[i]);
  45. }
  46. if (pgtable_l5_enabled())
  47. info->free_pgt_page(p4d, info->context);
  48. }
  49. void kernel_ident_mapping_free(struct x86_mapping_info *info, pgd_t *pgd)
  50. {
  51. int i;
  52. for (i = 0; i < PTRS_PER_PGD; i++) {
  53. if (!pgd_present(pgd[i]))
  54. continue;
  55. free_p4d(info, &pgd[i]);
  56. }
  57. info->free_pgt_page(pgd, info->context);
  58. }
  59. static void ident_pmd_init(struct x86_mapping_info *info, pmd_t *pmd_page,
  60. unsigned long addr, unsigned long end)
  61. {
  62. addr &= PMD_MASK;
  63. for (; addr < end; addr += PMD_SIZE) {
  64. pmd_t *pmd = pmd_page + pmd_index(addr);
  65. if (pmd_present(*pmd))
  66. continue;
  67. set_pmd(pmd, __pmd((addr - info->offset) | info->page_flag));
  68. }
  69. }
  70. static int ident_pud_init(struct x86_mapping_info *info, pud_t *pud_page,
  71. unsigned long addr, unsigned long end)
  72. {
  73. unsigned long next;
  74. for (; addr < end; addr = next) {
  75. pud_t *pud = pud_page + pud_index(addr);
  76. pmd_t *pmd;
  77. bool use_gbpage;
  78. next = (addr & PUD_MASK) + PUD_SIZE;
  79. if (next > end)
  80. next = end;
  81. /* if this is already a gbpage, this portion is already mapped */
  82. if (pud_leaf(*pud))
  83. continue;
  84. /* Is using a gbpage allowed? */
  85. use_gbpage = info->direct_gbpages;
  86. /* Don't use gbpage if it maps more than the requested region. */
  87. /* at the begining: */
  88. use_gbpage &= ((addr & ~PUD_MASK) == 0);
  89. /* ... or at the end: */
  90. use_gbpage &= ((next & ~PUD_MASK) == 0);
  91. /* Never overwrite existing mappings */
  92. use_gbpage &= !pud_present(*pud);
  93. if (use_gbpage) {
  94. pud_t pudval;
  95. pudval = __pud((addr - info->offset) | info->page_flag);
  96. set_pud(pud, pudval);
  97. continue;
  98. }
  99. if (pud_present(*pud)) {
  100. pmd = pmd_offset(pud, 0);
  101. ident_pmd_init(info, pmd, addr, next);
  102. continue;
  103. }
  104. pmd = (pmd_t *)info->alloc_pgt_page(info->context);
  105. if (!pmd)
  106. return -ENOMEM;
  107. ident_pmd_init(info, pmd, addr, next);
  108. set_pud(pud, __pud(__pa(pmd) | info->kernpg_flag));
  109. }
  110. return 0;
  111. }
  112. static int ident_p4d_init(struct x86_mapping_info *info, p4d_t *p4d_page,
  113. unsigned long addr, unsigned long end)
  114. {
  115. unsigned long next;
  116. int result;
  117. for (; addr < end; addr = next) {
  118. p4d_t *p4d = p4d_page + p4d_index(addr);
  119. pud_t *pud;
  120. next = (addr & P4D_MASK) + P4D_SIZE;
  121. if (next > end)
  122. next = end;
  123. if (p4d_present(*p4d)) {
  124. pud = pud_offset(p4d, 0);
  125. result = ident_pud_init(info, pud, addr, next);
  126. if (result)
  127. return result;
  128. continue;
  129. }
  130. pud = (pud_t *)info->alloc_pgt_page(info->context);
  131. if (!pud)
  132. return -ENOMEM;
  133. result = ident_pud_init(info, pud, addr, next);
  134. if (result)
  135. return result;
  136. set_p4d(p4d, __p4d(__pa(pud) | info->kernpg_flag | _PAGE_NOPTISHADOW));
  137. }
  138. return 0;
  139. }
  140. int kernel_ident_mapping_init(struct x86_mapping_info *info, pgd_t *pgd_page,
  141. unsigned long pstart, unsigned long pend)
  142. {
  143. unsigned long addr = pstart + info->offset;
  144. unsigned long end = pend + info->offset;
  145. unsigned long next;
  146. int result;
  147. /* Set the default pagetable flags if not supplied */
  148. if (!info->kernpg_flag)
  149. info->kernpg_flag = _KERNPG_TABLE;
  150. /* Filter out unsupported __PAGE_KERNEL_* bits: */
  151. info->kernpg_flag &= __default_kernel_pte_mask;
  152. for (; addr < end; addr = next) {
  153. pgd_t *pgd = pgd_page + pgd_index(addr);
  154. p4d_t *p4d;
  155. next = (addr & PGDIR_MASK) + PGDIR_SIZE;
  156. if (next > end)
  157. next = end;
  158. if (pgd_present(*pgd)) {
  159. p4d = p4d_offset(pgd, 0);
  160. result = ident_p4d_init(info, p4d, addr, next);
  161. if (result)
  162. return result;
  163. continue;
  164. }
  165. p4d = (p4d_t *)info->alloc_pgt_page(info->context);
  166. if (!p4d)
  167. return -ENOMEM;
  168. result = ident_p4d_init(info, p4d, addr, next);
  169. if (result)
  170. return result;
  171. if (pgtable_l5_enabled()) {
  172. set_pgd(pgd, __pgd(__pa(p4d) | info->kernpg_flag | _PAGE_NOPTISHADOW));
  173. } else {
  174. /*
  175. * With p4d folded, pgd is equal to p4d.
  176. * The pgd entry has to point to the pud page table in this case.
  177. */
  178. pud_t *pud = pud_offset(p4d, 0);
  179. set_pgd(pgd, __pgd(__pa(pud) | info->kernpg_flag | _PAGE_NOPTISHADOW));
  180. }
  181. }
  182. return 0;
  183. }