subpage-prot.c 6.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270
  1. /*
  2. * Copyright 2007-2008 Paul Mackerras, IBM Corp.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public License
  6. * as published by the Free Software Foundation; either version
  7. * 2 of the License, or (at your option) any later version.
  8. */
  9. #include <linux/errno.h>
  10. #include <linux/kernel.h>
  11. #include <linux/gfp.h>
  12. #include <linux/types.h>
  13. #include <linux/mm.h>
  14. #include <linux/hugetlb.h>
  15. #include <linux/syscalls.h>
  16. #include <asm/pgtable.h>
  17. #include <linux/uaccess.h>
  18. /*
  19. * Free all pages allocated for subpage protection maps and pointers.
  20. * Also makes sure that the subpage_prot_table structure is
  21. * reinitialized for the next user.
  22. */
  23. void subpage_prot_free(struct mm_struct *mm)
  24. {
  25. struct subpage_prot_table *spt = &mm->context.spt;
  26. unsigned long i, j, addr;
  27. u32 **p;
  28. for (i = 0; i < 4; ++i) {
  29. if (spt->low_prot[i]) {
  30. free_page((unsigned long)spt->low_prot[i]);
  31. spt->low_prot[i] = NULL;
  32. }
  33. }
  34. addr = 0;
  35. for (i = 0; i < (TASK_SIZE_USER64 >> 43); ++i) {
  36. p = spt->protptrs[i];
  37. if (!p)
  38. continue;
  39. spt->protptrs[i] = NULL;
  40. for (j = 0; j < SBP_L2_COUNT && addr < spt->maxaddr;
  41. ++j, addr += PAGE_SIZE)
  42. if (p[j])
  43. free_page((unsigned long)p[j]);
  44. free_page((unsigned long)p);
  45. }
  46. spt->maxaddr = 0;
  47. }
  48. void subpage_prot_init_new_context(struct mm_struct *mm)
  49. {
  50. struct subpage_prot_table *spt = &mm->context.spt;
  51. memset(spt, 0, sizeof(*spt));
  52. }
  53. static void hpte_flush_range(struct mm_struct *mm, unsigned long addr,
  54. int npages)
  55. {
  56. pgd_t *pgd;
  57. pud_t *pud;
  58. pmd_t *pmd;
  59. pte_t *pte;
  60. spinlock_t *ptl;
  61. pgd = pgd_offset(mm, addr);
  62. if (pgd_none(*pgd))
  63. return;
  64. pud = pud_offset(pgd, addr);
  65. if (pud_none(*pud))
  66. return;
  67. pmd = pmd_offset(pud, addr);
  68. if (pmd_none(*pmd))
  69. return;
  70. pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
  71. arch_enter_lazy_mmu_mode();
  72. for (; npages > 0; --npages) {
  73. pte_update(mm, addr, pte, 0, 0, 0);
  74. addr += PAGE_SIZE;
  75. ++pte;
  76. }
  77. arch_leave_lazy_mmu_mode();
  78. pte_unmap_unlock(pte - 1, ptl);
  79. }
  80. /*
  81. * Clear the subpage protection map for an address range, allowing
  82. * all accesses that are allowed by the pte permissions.
  83. */
  84. static void subpage_prot_clear(unsigned long addr, unsigned long len)
  85. {
  86. struct mm_struct *mm = current->mm;
  87. struct subpage_prot_table *spt = &mm->context.spt;
  88. u32 **spm, *spp;
  89. unsigned long i;
  90. size_t nw;
  91. unsigned long next, limit;
  92. down_write(&mm->mmap_sem);
  93. limit = addr + len;
  94. if (limit > spt->maxaddr)
  95. limit = spt->maxaddr;
  96. for (; addr < limit; addr = next) {
  97. next = pmd_addr_end(addr, limit);
  98. if (addr < 0x100000000UL) {
  99. spm = spt->low_prot;
  100. } else {
  101. spm = spt->protptrs[addr >> SBP_L3_SHIFT];
  102. if (!spm)
  103. continue;
  104. }
  105. spp = spm[(addr >> SBP_L2_SHIFT) & (SBP_L2_COUNT - 1)];
  106. if (!spp)
  107. continue;
  108. spp += (addr >> PAGE_SHIFT) & (SBP_L1_COUNT - 1);
  109. i = (addr >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
  110. nw = PTRS_PER_PTE - i;
  111. if (addr + (nw << PAGE_SHIFT) > next)
  112. nw = (next - addr) >> PAGE_SHIFT;
  113. memset(spp, 0, nw * sizeof(u32));
  114. /* now flush any existing HPTEs for the range */
  115. hpte_flush_range(mm, addr, nw);
  116. }
  117. up_write(&mm->mmap_sem);
  118. }
  119. #ifdef CONFIG_TRANSPARENT_HUGEPAGE
  120. static int subpage_walk_pmd_entry(pmd_t *pmd, unsigned long addr,
  121. unsigned long end, struct mm_walk *walk)
  122. {
  123. struct vm_area_struct *vma = walk->vma;
  124. split_huge_pmd(vma, pmd, addr);
  125. return 0;
  126. }
  127. static void subpage_mark_vma_nohuge(struct mm_struct *mm, unsigned long addr,
  128. unsigned long len)
  129. {
  130. struct vm_area_struct *vma;
  131. struct mm_walk subpage_proto_walk = {
  132. .mm = mm,
  133. .pmd_entry = subpage_walk_pmd_entry,
  134. };
  135. /*
  136. * We don't try too hard, we just mark all the vma in that range
  137. * VM_NOHUGEPAGE and split them.
  138. */
  139. vma = find_vma(mm, addr);
  140. /*
  141. * If the range is in unmapped range, just return
  142. */
  143. if (vma && ((addr + len) <= vma->vm_start))
  144. return;
  145. while (vma) {
  146. if (vma->vm_start >= (addr + len))
  147. break;
  148. vma->vm_flags |= VM_NOHUGEPAGE;
  149. walk_page_vma(vma, &subpage_proto_walk);
  150. vma = vma->vm_next;
  151. }
  152. }
  153. #else
  154. static void subpage_mark_vma_nohuge(struct mm_struct *mm, unsigned long addr,
  155. unsigned long len)
  156. {
  157. return;
  158. }
  159. #endif
  160. /*
  161. * Copy in a subpage protection map for an address range.
  162. * The map has 2 bits per 4k subpage, so 32 bits per 64k page.
  163. * Each 2-bit field is 0 to allow any access, 1 to prevent writes,
  164. * 2 or 3 to prevent all accesses.
  165. * Note that the normal page protections also apply; the subpage
  166. * protection mechanism is an additional constraint, so putting 0
  167. * in a 2-bit field won't allow writes to a page that is otherwise
  168. * write-protected.
  169. */
  170. SYSCALL_DEFINE3(subpage_prot, unsigned long, addr,
  171. unsigned long, len, u32 __user *, map)
  172. {
  173. struct mm_struct *mm = current->mm;
  174. struct subpage_prot_table *spt = &mm->context.spt;
  175. u32 **spm, *spp;
  176. unsigned long i;
  177. size_t nw;
  178. unsigned long next, limit;
  179. int err;
  180. if (radix_enabled())
  181. return -ENOENT;
  182. /* Check parameters */
  183. if ((addr & ~PAGE_MASK) || (len & ~PAGE_MASK) ||
  184. addr >= mm->task_size || len >= mm->task_size ||
  185. addr + len > mm->task_size)
  186. return -EINVAL;
  187. if (is_hugepage_only_range(mm, addr, len))
  188. return -EINVAL;
  189. if (!map) {
  190. /* Clear out the protection map for the address range */
  191. subpage_prot_clear(addr, len);
  192. return 0;
  193. }
  194. if (!access_ok(VERIFY_READ, map, (len >> PAGE_SHIFT) * sizeof(u32)))
  195. return -EFAULT;
  196. down_write(&mm->mmap_sem);
  197. subpage_mark_vma_nohuge(mm, addr, len);
  198. for (limit = addr + len; addr < limit; addr = next) {
  199. next = pmd_addr_end(addr, limit);
  200. err = -ENOMEM;
  201. if (addr < 0x100000000UL) {
  202. spm = spt->low_prot;
  203. } else {
  204. spm = spt->protptrs[addr >> SBP_L3_SHIFT];
  205. if (!spm) {
  206. spm = (u32 **)get_zeroed_page(GFP_KERNEL);
  207. if (!spm)
  208. goto out;
  209. spt->protptrs[addr >> SBP_L3_SHIFT] = spm;
  210. }
  211. }
  212. spm += (addr >> SBP_L2_SHIFT) & (SBP_L2_COUNT - 1);
  213. spp = *spm;
  214. if (!spp) {
  215. spp = (u32 *)get_zeroed_page(GFP_KERNEL);
  216. if (!spp)
  217. goto out;
  218. *spm = spp;
  219. }
  220. spp += (addr >> PAGE_SHIFT) & (SBP_L1_COUNT - 1);
  221. local_irq_disable();
  222. demote_segment_4k(mm, addr);
  223. local_irq_enable();
  224. i = (addr >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
  225. nw = PTRS_PER_PTE - i;
  226. if (addr + (nw << PAGE_SHIFT) > next)
  227. nw = (next - addr) >> PAGE_SHIFT;
  228. up_write(&mm->mmap_sem);
  229. if (__copy_from_user(spp, map, nw * sizeof(u32)))
  230. return -EFAULT;
  231. map += nw;
  232. down_write(&mm->mmap_sem);
  233. /* now flush any existing HPTEs for the range */
  234. hpte_flush_range(mm, addr, nw);
  235. }
  236. if (limit > spt->maxaddr)
  237. spt->maxaddr = limit;
  238. err = 0;
  239. out:
  240. up_write(&mm->mmap_sem);
  241. return err;
  242. }