vma.c 5.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270
  1. /*
  2. * Set up the VMAs to tell the VM about the vDSO.
  3. * Copyright 2007 Andi Kleen, SUSE Labs.
  4. * Subject to the GPL, v.2
  5. */
  6. /*
  7. * Copyright (c) 2017 Oracle and/or its affiliates. All rights reserved.
  8. */
  9. #include <linux/mm.h>
  10. #include <linux/err.h>
  11. #include <linux/sched.h>
  12. #include <linux/slab.h>
  13. #include <linux/init.h>
  14. #include <linux/linkage.h>
  15. #include <linux/random.h>
  16. #include <linux/elf.h>
  17. #include <asm/vdso.h>
  18. #include <asm/vvar.h>
  19. #include <asm/page.h>
  20. unsigned int __read_mostly vdso_enabled = 1;
  21. static struct vm_special_mapping vvar_mapping = {
  22. .name = "[vvar]"
  23. };
  24. #ifdef CONFIG_SPARC64
  25. static struct vm_special_mapping vdso_mapping64 = {
  26. .name = "[vdso]"
  27. };
  28. #endif
  29. #ifdef CONFIG_COMPAT
  30. static struct vm_special_mapping vdso_mapping32 = {
  31. .name = "[vdso]"
  32. };
  33. #endif
  34. struct vvar_data *vvar_data;
  35. #define SAVE_INSTR_SIZE 4
  36. /*
  37. * Allocate pages for the vdso and vvar, and copy in the vdso text from the
  38. * kernel image.
  39. */
  40. int __init init_vdso_image(const struct vdso_image *image,
  41. struct vm_special_mapping *vdso_mapping)
  42. {
  43. int i;
  44. struct page *dp, **dpp = NULL;
  45. int dnpages = 0;
  46. struct page *cp, **cpp = NULL;
  47. int cnpages = (image->size) / PAGE_SIZE;
  48. /*
  49. * First, the vdso text. This is initialied data, an integral number of
  50. * pages long.
  51. */
  52. if (WARN_ON(image->size % PAGE_SIZE != 0))
  53. goto oom;
  54. cpp = kcalloc(cnpages, sizeof(struct page *), GFP_KERNEL);
  55. vdso_mapping->pages = cpp;
  56. if (!cpp)
  57. goto oom;
  58. if (vdso_fix_stick) {
  59. /*
  60. * If the system uses %tick instead of %stick, patch the VDSO
  61. * with instruction reading %tick instead of %stick.
  62. */
  63. unsigned int j, k = SAVE_INSTR_SIZE;
  64. unsigned char *data = image->data;
  65. for (j = image->sym_vread_tick_patch_start;
  66. j < image->sym_vread_tick_patch_end; j++) {
  67. data[image->sym_vread_tick + k] = data[j];
  68. k++;
  69. }
  70. }
  71. for (i = 0; i < cnpages; i++) {
  72. cp = alloc_page(GFP_KERNEL);
  73. if (!cp)
  74. goto oom;
  75. cpp[i] = cp;
  76. copy_page(page_address(cp), image->data + i * PAGE_SIZE);
  77. }
  78. /*
  79. * Now the vvar page. This is uninitialized data.
  80. */
  81. if (vvar_data == NULL) {
  82. dnpages = (sizeof(struct vvar_data) / PAGE_SIZE) + 1;
  83. if (WARN_ON(dnpages != 1))
  84. goto oom;
  85. dpp = kcalloc(dnpages, sizeof(struct page *), GFP_KERNEL);
  86. vvar_mapping.pages = dpp;
  87. if (!dpp)
  88. goto oom;
  89. dp = alloc_page(GFP_KERNEL);
  90. if (!dp)
  91. goto oom;
  92. dpp[0] = dp;
  93. vvar_data = page_address(dp);
  94. memset(vvar_data, 0, PAGE_SIZE);
  95. vvar_data->seq = 0;
  96. }
  97. return 0;
  98. oom:
  99. if (cpp != NULL) {
  100. for (i = 0; i < cnpages; i++) {
  101. if (cpp[i] != NULL)
  102. __free_page(cpp[i]);
  103. }
  104. kfree(cpp);
  105. vdso_mapping->pages = NULL;
  106. }
  107. if (dpp != NULL) {
  108. for (i = 0; i < dnpages; i++) {
  109. if (dpp[i] != NULL)
  110. __free_page(dpp[i]);
  111. }
  112. kfree(dpp);
  113. vvar_mapping.pages = NULL;
  114. }
  115. pr_warn("Cannot allocate vdso\n");
  116. vdso_enabled = 0;
  117. return -ENOMEM;
  118. }
  119. static int __init init_vdso(void)
  120. {
  121. int err = 0;
  122. #ifdef CONFIG_SPARC64
  123. err = init_vdso_image(&vdso_image_64_builtin, &vdso_mapping64);
  124. if (err)
  125. return err;
  126. #endif
  127. #ifdef CONFIG_COMPAT
  128. err = init_vdso_image(&vdso_image_32_builtin, &vdso_mapping32);
  129. #endif
  130. return err;
  131. }
  132. subsys_initcall(init_vdso);
  133. struct linux_binprm;
  134. /* Shuffle the vdso up a bit, randomly. */
  135. static unsigned long vdso_addr(unsigned long start, unsigned int len)
  136. {
  137. unsigned int offset;
  138. /* This loses some more bits than a modulo, but is cheaper */
  139. offset = get_random_int() & (PTRS_PER_PTE - 1);
  140. return start + (offset << PAGE_SHIFT);
  141. }
  142. static int map_vdso(const struct vdso_image *image,
  143. struct vm_special_mapping *vdso_mapping)
  144. {
  145. struct mm_struct *mm = current->mm;
  146. struct vm_area_struct *vma;
  147. unsigned long text_start, addr = 0;
  148. int ret = 0;
  149. down_write(&mm->mmap_sem);
  150. /*
  151. * First, get an unmapped region: then randomize it, and make sure that
  152. * region is free.
  153. */
  154. if (current->flags & PF_RANDOMIZE) {
  155. addr = get_unmapped_area(NULL, 0,
  156. image->size - image->sym_vvar_start,
  157. 0, 0);
  158. if (IS_ERR_VALUE(addr)) {
  159. ret = addr;
  160. goto up_fail;
  161. }
  162. addr = vdso_addr(addr, image->size - image->sym_vvar_start);
  163. }
  164. addr = get_unmapped_area(NULL, addr,
  165. image->size - image->sym_vvar_start, 0, 0);
  166. if (IS_ERR_VALUE(addr)) {
  167. ret = addr;
  168. goto up_fail;
  169. }
  170. text_start = addr - image->sym_vvar_start;
  171. current->mm->context.vdso = (void __user *)text_start;
  172. /*
  173. * MAYWRITE to allow gdb to COW and set breakpoints
  174. */
  175. vma = _install_special_mapping(mm,
  176. text_start,
  177. image->size,
  178. VM_READ|VM_EXEC|
  179. VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
  180. vdso_mapping);
  181. if (IS_ERR(vma)) {
  182. ret = PTR_ERR(vma);
  183. goto up_fail;
  184. }
  185. vma = _install_special_mapping(mm,
  186. addr,
  187. -image->sym_vvar_start,
  188. VM_READ|VM_MAYREAD,
  189. &vvar_mapping);
  190. if (IS_ERR(vma)) {
  191. ret = PTR_ERR(vma);
  192. do_munmap(mm, text_start, image->size, NULL);
  193. }
  194. up_fail:
  195. if (ret)
  196. current->mm->context.vdso = NULL;
  197. up_write(&mm->mmap_sem);
  198. return ret;
  199. }
  200. int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
  201. {
  202. if (!vdso_enabled)
  203. return 0;
  204. #if defined CONFIG_COMPAT
  205. if (!(is_32bit_task()))
  206. return map_vdso(&vdso_image_64_builtin, &vdso_mapping64);
  207. else
  208. return map_vdso(&vdso_image_32_builtin, &vdso_mapping32);
  209. #else
  210. return map_vdso(&vdso_image_64_builtin, &vdso_mapping64);
  211. #endif
  212. }
  213. static __init int vdso_setup(char *s)
  214. {
  215. int err;
  216. unsigned long val;
  217. err = kstrtoul(s, 10, &val);
  218. if (err)
  219. return err;
  220. vdso_enabled = val;
  221. return 0;
  222. }
  223. __setup("vdso=", vdso_setup);