tlb.c 5.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
  4. */
  5. #include <linux/mm.h>
  6. #include <linux/module.h>
  7. #include <linux/sched/signal.h>
  8. #include <asm/tlbflush.h>
  9. #include <asm/mmu_context.h>
  10. #include <as-layout.h>
  11. #include <mem_user.h>
  12. #include <os.h>
  13. #include <skas.h>
  14. #include <kern_util.h>
  15. struct vm_ops {
  16. struct mm_id *mm_idp;
  17. int (*mmap)(struct mm_id *mm_idp,
  18. unsigned long virt, unsigned long len, int prot,
  19. int phys_fd, unsigned long long offset);
  20. int (*unmap)(struct mm_id *mm_idp,
  21. unsigned long virt, unsigned long len);
  22. int (*mprotect)(struct mm_id *mm_idp,
  23. unsigned long virt, unsigned long len,
  24. unsigned int prot);
  25. };
  26. static int kern_map(struct mm_id *mm_idp,
  27. unsigned long virt, unsigned long len, int prot,
  28. int phys_fd, unsigned long long offset)
  29. {
  30. /* TODO: Why is executable needed to be always set in the kernel? */
  31. return os_map_memory((void *)virt, phys_fd, offset, len,
  32. prot & UM_PROT_READ, prot & UM_PROT_WRITE,
  33. 1);
  34. }
  35. static int kern_unmap(struct mm_id *mm_idp,
  36. unsigned long virt, unsigned long len)
  37. {
  38. return os_unmap_memory((void *)virt, len);
  39. }
  40. static int kern_mprotect(struct mm_id *mm_idp,
  41. unsigned long virt, unsigned long len,
  42. unsigned int prot)
  43. {
  44. return os_protect_memory((void *)virt, len,
  45. prot & UM_PROT_READ, prot & UM_PROT_WRITE,
  46. 1);
  47. }
  48. void report_enomem(void)
  49. {
  50. printk(KERN_ERR "UML ran out of memory on the host side! "
  51. "This can happen due to a memory limitation or "
  52. "vm.max_map_count has been reached.\n");
  53. }
  54. static inline int update_pte_range(pmd_t *pmd, unsigned long addr,
  55. unsigned long end,
  56. struct vm_ops *ops)
  57. {
  58. pte_t *pte;
  59. int r, w, x, prot, ret = 0;
  60. pte = pte_offset_kernel(pmd, addr);
  61. do {
  62. r = pte_read(*pte);
  63. w = pte_write(*pte);
  64. x = pte_exec(*pte);
  65. if (!pte_young(*pte)) {
  66. r = 0;
  67. w = 0;
  68. } else if (!pte_dirty(*pte))
  69. w = 0;
  70. prot = ((r ? UM_PROT_READ : 0) | (w ? UM_PROT_WRITE : 0) |
  71. (x ? UM_PROT_EXEC : 0));
  72. if (pte_newpage(*pte)) {
  73. if (pte_present(*pte)) {
  74. __u64 offset;
  75. unsigned long phys = pte_val(*pte) & PAGE_MASK;
  76. int fd = phys_mapping(phys, &offset);
  77. ret = ops->mmap(ops->mm_idp, addr, PAGE_SIZE,
  78. prot, fd, offset);
  79. } else
  80. ret = ops->unmap(ops->mm_idp, addr, PAGE_SIZE);
  81. } else if (pte_newprot(*pte))
  82. ret = ops->mprotect(ops->mm_idp, addr, PAGE_SIZE, prot);
  83. *pte = pte_mkuptodate(*pte);
  84. } while (pte++, addr += PAGE_SIZE, ((addr < end) && !ret));
  85. return ret;
  86. }
  87. static inline int update_pmd_range(pud_t *pud, unsigned long addr,
  88. unsigned long end,
  89. struct vm_ops *ops)
  90. {
  91. pmd_t *pmd;
  92. unsigned long next;
  93. int ret = 0;
  94. pmd = pmd_offset(pud, addr);
  95. do {
  96. next = pmd_addr_end(addr, end);
  97. if (!pmd_present(*pmd)) {
  98. if (pmd_newpage(*pmd)) {
  99. ret = ops->unmap(ops->mm_idp, addr,
  100. next - addr);
  101. pmd_mkuptodate(*pmd);
  102. }
  103. }
  104. else ret = update_pte_range(pmd, addr, next, ops);
  105. } while (pmd++, addr = next, ((addr < end) && !ret));
  106. return ret;
  107. }
  108. static inline int update_pud_range(p4d_t *p4d, unsigned long addr,
  109. unsigned long end,
  110. struct vm_ops *ops)
  111. {
  112. pud_t *pud;
  113. unsigned long next;
  114. int ret = 0;
  115. pud = pud_offset(p4d, addr);
  116. do {
  117. next = pud_addr_end(addr, end);
  118. if (!pud_present(*pud)) {
  119. if (pud_newpage(*pud)) {
  120. ret = ops->unmap(ops->mm_idp, addr,
  121. next - addr);
  122. pud_mkuptodate(*pud);
  123. }
  124. }
  125. else ret = update_pmd_range(pud, addr, next, ops);
  126. } while (pud++, addr = next, ((addr < end) && !ret));
  127. return ret;
  128. }
  129. static inline int update_p4d_range(pgd_t *pgd, unsigned long addr,
  130. unsigned long end,
  131. struct vm_ops *ops)
  132. {
  133. p4d_t *p4d;
  134. unsigned long next;
  135. int ret = 0;
  136. p4d = p4d_offset(pgd, addr);
  137. do {
  138. next = p4d_addr_end(addr, end);
  139. if (!p4d_present(*p4d)) {
  140. if (p4d_newpage(*p4d)) {
  141. ret = ops->unmap(ops->mm_idp, addr,
  142. next - addr);
  143. p4d_mkuptodate(*p4d);
  144. }
  145. } else
  146. ret = update_pud_range(p4d, addr, next, ops);
  147. } while (p4d++, addr = next, ((addr < end) && !ret));
  148. return ret;
  149. }
  150. int um_tlb_sync(struct mm_struct *mm)
  151. {
  152. pgd_t *pgd;
  153. struct vm_ops ops;
  154. unsigned long addr = mm->context.sync_tlb_range_from, next;
  155. int ret = 0;
  156. if (mm->context.sync_tlb_range_to == 0)
  157. return 0;
  158. ops.mm_idp = &mm->context.id;
  159. if (mm == &init_mm) {
  160. ops.mmap = kern_map;
  161. ops.unmap = kern_unmap;
  162. ops.mprotect = kern_mprotect;
  163. } else {
  164. ops.mmap = map;
  165. ops.unmap = unmap;
  166. ops.mprotect = protect;
  167. }
  168. pgd = pgd_offset(mm, addr);
  169. do {
  170. next = pgd_addr_end(addr, mm->context.sync_tlb_range_to);
  171. if (!pgd_present(*pgd)) {
  172. if (pgd_newpage(*pgd)) {
  173. ret = ops.unmap(ops.mm_idp, addr,
  174. next - addr);
  175. pgd_mkuptodate(*pgd);
  176. }
  177. } else
  178. ret = update_p4d_range(pgd, addr, next, &ops);
  179. } while (pgd++, addr = next,
  180. ((addr < mm->context.sync_tlb_range_to) && !ret));
  181. if (ret == -ENOMEM)
  182. report_enomem();
  183. mm->context.sync_tlb_range_from = 0;
  184. mm->context.sync_tlb_range_to = 0;
  185. return ret;
  186. }
  187. void flush_tlb_all(void)
  188. {
  189. /*
  190. * Don't bother flushing if this address space is about to be
  191. * destroyed.
  192. */
  193. if (atomic_read(&current->mm->mm_users) == 0)
  194. return;
  195. flush_tlb_mm(current->mm);
  196. }
  197. void flush_tlb_mm(struct mm_struct *mm)
  198. {
  199. struct vm_area_struct *vma;
  200. VMA_ITERATOR(vmi, mm, 0);
  201. for_each_vma(vmi, vma)
  202. um_tlb_mark_sync(mm, vma->vm_start, vma->vm_end);
  203. }