mm.c 9.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * ACRN: Memory mapping management
  4. *
  5. * Copyright (C) 2020 Intel Corporation. All rights reserved.
  6. *
  7. * Authors:
  8. * Fei Li <lei1.li@intel.com>
  9. * Shuo Liu <shuo.a.liu@intel.com>
  10. */
  11. #include <linux/io.h>
  12. #include <linux/mm.h>
  13. #include <linux/slab.h>
  14. #include <linux/vmalloc.h>
  15. #include "acrn_drv.h"
  16. static int modify_region(struct acrn_vm *vm, struct vm_memory_region_op *region)
  17. {
  18. struct vm_memory_region_batch *regions;
  19. int ret;
  20. regions = kzalloc(sizeof(*regions), GFP_KERNEL);
  21. if (!regions)
  22. return -ENOMEM;
  23. regions->vmid = vm->vmid;
  24. regions->regions_num = 1;
  25. regions->regions_gpa = virt_to_phys(region);
  26. ret = hcall_set_memory_regions(virt_to_phys(regions));
  27. if (ret < 0)
  28. dev_dbg(acrn_dev.this_device,
  29. "Failed to set memory region for VM[%u]!\n", vm->vmid);
  30. kfree(regions);
  31. return ret;
  32. }
  33. /**
  34. * acrn_mm_region_add() - Set up the EPT mapping of a memory region.
  35. * @vm: User VM.
  36. * @user_gpa: A GPA of User VM.
  37. * @service_gpa: A GPA of Service VM.
  38. * @size: Size of the region.
  39. * @mem_type: Combination of ACRN_MEM_TYPE_*.
  40. * @mem_access_right: Combination of ACRN_MEM_ACCESS_*.
  41. *
  42. * Return: 0 on success, <0 on error.
  43. */
  44. int acrn_mm_region_add(struct acrn_vm *vm, u64 user_gpa, u64 service_gpa,
  45. u64 size, u32 mem_type, u32 mem_access_right)
  46. {
  47. struct vm_memory_region_op *region;
  48. int ret = 0;
  49. region = kzalloc(sizeof(*region), GFP_KERNEL);
  50. if (!region)
  51. return -ENOMEM;
  52. region->type = ACRN_MEM_REGION_ADD;
  53. region->user_vm_pa = user_gpa;
  54. region->service_vm_pa = service_gpa;
  55. region->size = size;
  56. region->attr = ((mem_type & ACRN_MEM_TYPE_MASK) |
  57. (mem_access_right & ACRN_MEM_ACCESS_RIGHT_MASK));
  58. ret = modify_region(vm, region);
  59. dev_dbg(acrn_dev.this_device,
  60. "%s: user-GPA[%pK] service-GPA[%pK] size[0x%llx].\n",
  61. __func__, (void *)user_gpa, (void *)service_gpa, size);
  62. kfree(region);
  63. return ret;
  64. }
  65. /**
  66. * acrn_mm_region_del() - Del the EPT mapping of a memory region.
  67. * @vm: User VM.
  68. * @user_gpa: A GPA of the User VM.
  69. * @size: Size of the region.
  70. *
  71. * Return: 0 on success, <0 for error.
  72. */
  73. int acrn_mm_region_del(struct acrn_vm *vm, u64 user_gpa, u64 size)
  74. {
  75. struct vm_memory_region_op *region;
  76. int ret = 0;
  77. region = kzalloc(sizeof(*region), GFP_KERNEL);
  78. if (!region)
  79. return -ENOMEM;
  80. region->type = ACRN_MEM_REGION_DEL;
  81. region->user_vm_pa = user_gpa;
  82. region->service_vm_pa = 0UL;
  83. region->size = size;
  84. region->attr = 0U;
  85. ret = modify_region(vm, region);
  86. dev_dbg(acrn_dev.this_device, "%s: user-GPA[%pK] size[0x%llx].\n",
  87. __func__, (void *)user_gpa, size);
  88. kfree(region);
  89. return ret;
  90. }
  91. int acrn_vm_memseg_map(struct acrn_vm *vm, struct acrn_vm_memmap *memmap)
  92. {
  93. int ret;
  94. if (memmap->type == ACRN_MEMMAP_RAM)
  95. return acrn_vm_ram_map(vm, memmap);
  96. if (memmap->type != ACRN_MEMMAP_MMIO) {
  97. dev_dbg(acrn_dev.this_device,
  98. "Invalid memmap type: %u\n", memmap->type);
  99. return -EINVAL;
  100. }
  101. ret = acrn_mm_region_add(vm, memmap->user_vm_pa,
  102. memmap->service_vm_pa, memmap->len,
  103. ACRN_MEM_TYPE_UC, memmap->attr);
  104. if (ret < 0)
  105. dev_dbg(acrn_dev.this_device,
  106. "Add memory region failed, VM[%u]!\n", vm->vmid);
  107. return ret;
  108. }
  109. int acrn_vm_memseg_unmap(struct acrn_vm *vm, struct acrn_vm_memmap *memmap)
  110. {
  111. int ret;
  112. if (memmap->type != ACRN_MEMMAP_MMIO) {
  113. dev_dbg(acrn_dev.this_device,
  114. "Invalid memmap type: %u\n", memmap->type);
  115. return -EINVAL;
  116. }
  117. ret = acrn_mm_region_del(vm, memmap->user_vm_pa, memmap->len);
  118. if (ret < 0)
  119. dev_dbg(acrn_dev.this_device,
  120. "Del memory region failed, VM[%u]!\n", vm->vmid);
  121. return ret;
  122. }
  123. /**
  124. * acrn_vm_ram_map() - Create a RAM EPT mapping of User VM.
  125. * @vm: The User VM pointer
  126. * @memmap: Info of the EPT mapping
  127. *
  128. * Return: 0 on success, <0 for error.
  129. */
  130. int acrn_vm_ram_map(struct acrn_vm *vm, struct acrn_vm_memmap *memmap)
  131. {
  132. struct vm_memory_region_batch *regions_info;
  133. int nr_pages, i, order, nr_regions = 0;
  134. struct vm_memory_mapping *region_mapping;
  135. struct vm_memory_region_op *vm_region;
  136. struct page **pages = NULL, *page;
  137. void *remap_vaddr;
  138. int ret, pinned;
  139. u64 user_vm_pa;
  140. struct vm_area_struct *vma;
  141. if (!vm || !memmap)
  142. return -EINVAL;
  143. /* Get the page number of the map region */
  144. nr_pages = memmap->len >> PAGE_SHIFT;
  145. if (!nr_pages)
  146. return -EINVAL;
  147. mmap_read_lock(current->mm);
  148. vma = vma_lookup(current->mm, memmap->vma_base);
  149. if (vma && ((vma->vm_flags & VM_PFNMAP) != 0)) {
  150. unsigned long start_pfn, cur_pfn;
  151. bool writable;
  152. if ((memmap->vma_base + memmap->len) > vma->vm_end) {
  153. mmap_read_unlock(current->mm);
  154. return -EINVAL;
  155. }
  156. for (i = 0; i < nr_pages; i++) {
  157. struct follow_pfnmap_args args = {
  158. .vma = vma,
  159. .address = memmap->vma_base + i * PAGE_SIZE,
  160. };
  161. ret = follow_pfnmap_start(&args);
  162. if (ret)
  163. break;
  164. cur_pfn = args.pfn;
  165. if (i == 0)
  166. start_pfn = cur_pfn;
  167. writable = args.writable;
  168. follow_pfnmap_end(&args);
  169. /* Disallow write access if the PTE is not writable. */
  170. if (!writable &&
  171. (memmap->attr & ACRN_MEM_ACCESS_WRITE)) {
  172. ret = -EFAULT;
  173. break;
  174. }
  175. /* Disallow refcounted pages. */
  176. if (pfn_valid(cur_pfn) &&
  177. !PageReserved(pfn_to_page(cur_pfn))) {
  178. ret = -EFAULT;
  179. break;
  180. }
  181. /* Disallow non-contiguous ranges. */
  182. if (cur_pfn != start_pfn + i) {
  183. ret = -EINVAL;
  184. break;
  185. }
  186. }
  187. mmap_read_unlock(current->mm);
  188. if (ret) {
  189. dev_dbg(acrn_dev.this_device,
  190. "Failed to lookup PFN at VMA:%pK.\n", (void *)memmap->vma_base);
  191. return ret;
  192. }
  193. return acrn_mm_region_add(vm, memmap->user_vm_pa,
  194. PFN_PHYS(start_pfn), memmap->len,
  195. ACRN_MEM_TYPE_WB, memmap->attr);
  196. }
  197. mmap_read_unlock(current->mm);
  198. pages = vzalloc(array_size(nr_pages, sizeof(*pages)));
  199. if (!pages)
  200. return -ENOMEM;
  201. /* Lock the pages of user memory map region */
  202. pinned = pin_user_pages_fast(memmap->vma_base,
  203. nr_pages, FOLL_WRITE | FOLL_LONGTERM,
  204. pages);
  205. if (pinned < 0) {
  206. ret = pinned;
  207. goto free_pages;
  208. } else if (pinned != nr_pages) {
  209. ret = -EFAULT;
  210. goto put_pages;
  211. }
  212. /* Create a kernel map for the map region */
  213. remap_vaddr = vmap(pages, nr_pages, VM_MAP, PAGE_KERNEL);
  214. if (!remap_vaddr) {
  215. ret = -ENOMEM;
  216. goto put_pages;
  217. }
  218. /* Record Service VM va <-> User VM pa mapping */
  219. mutex_lock(&vm->regions_mapping_lock);
  220. region_mapping = &vm->regions_mapping[vm->regions_mapping_count];
  221. if (vm->regions_mapping_count < ACRN_MEM_MAPPING_MAX) {
  222. region_mapping->pages = pages;
  223. region_mapping->npages = nr_pages;
  224. region_mapping->size = memmap->len;
  225. region_mapping->service_vm_va = remap_vaddr;
  226. region_mapping->user_vm_pa = memmap->user_vm_pa;
  227. vm->regions_mapping_count++;
  228. } else {
  229. dev_warn(acrn_dev.this_device,
  230. "Run out of memory mapping slots!\n");
  231. ret = -ENOMEM;
  232. mutex_unlock(&vm->regions_mapping_lock);
  233. goto unmap_no_count;
  234. }
  235. mutex_unlock(&vm->regions_mapping_lock);
  236. /* Calculate count of vm_memory_region_op */
  237. for (i = 0; i < nr_pages; i += 1 << order) {
  238. page = pages[i];
  239. VM_BUG_ON_PAGE(PageTail(page), page);
  240. order = compound_order(page);
  241. nr_regions++;
  242. }
  243. /* Prepare the vm_memory_region_batch */
  244. regions_info = kzalloc(struct_size(regions_info, regions_op,
  245. nr_regions), GFP_KERNEL);
  246. if (!regions_info) {
  247. ret = -ENOMEM;
  248. goto unmap_kernel_map;
  249. }
  250. regions_info->regions_num = nr_regions;
  251. /* Fill each vm_memory_region_op */
  252. vm_region = regions_info->regions_op;
  253. regions_info->vmid = vm->vmid;
  254. regions_info->regions_gpa = virt_to_phys(vm_region);
  255. user_vm_pa = memmap->user_vm_pa;
  256. for (i = 0; i < nr_pages; i += 1 << order) {
  257. u32 region_size;
  258. page = pages[i];
  259. VM_BUG_ON_PAGE(PageTail(page), page);
  260. order = compound_order(page);
  261. region_size = PAGE_SIZE << order;
  262. vm_region->type = ACRN_MEM_REGION_ADD;
  263. vm_region->user_vm_pa = user_vm_pa;
  264. vm_region->service_vm_pa = page_to_phys(page);
  265. vm_region->size = region_size;
  266. vm_region->attr = (ACRN_MEM_TYPE_WB & ACRN_MEM_TYPE_MASK) |
  267. (memmap->attr & ACRN_MEM_ACCESS_RIGHT_MASK);
  268. vm_region++;
  269. user_vm_pa += region_size;
  270. }
  271. /* Inform the ACRN Hypervisor to set up EPT mappings */
  272. ret = hcall_set_memory_regions(virt_to_phys(regions_info));
  273. if (ret < 0) {
  274. dev_dbg(acrn_dev.this_device,
  275. "Failed to set regions, VM[%u]!\n", vm->vmid);
  276. goto unset_region;
  277. }
  278. kfree(regions_info);
  279. dev_dbg(acrn_dev.this_device,
  280. "%s: VM[%u] service-GVA[%pK] user-GPA[%pK] size[0x%llx]\n",
  281. __func__, vm->vmid,
  282. remap_vaddr, (void *)memmap->user_vm_pa, memmap->len);
  283. return ret;
  284. unset_region:
  285. kfree(regions_info);
  286. unmap_kernel_map:
  287. mutex_lock(&vm->regions_mapping_lock);
  288. vm->regions_mapping_count--;
  289. mutex_unlock(&vm->regions_mapping_lock);
  290. unmap_no_count:
  291. vunmap(remap_vaddr);
  292. put_pages:
  293. for (i = 0; i < pinned; i++)
  294. unpin_user_page(pages[i]);
  295. free_pages:
  296. vfree(pages);
  297. return ret;
  298. }
  299. /**
  300. * acrn_vm_all_ram_unmap() - Destroy a RAM EPT mapping of User VM.
  301. * @vm: The User VM
  302. */
  303. void acrn_vm_all_ram_unmap(struct acrn_vm *vm)
  304. {
  305. struct vm_memory_mapping *region_mapping;
  306. int i, j;
  307. mutex_lock(&vm->regions_mapping_lock);
  308. for (i = 0; i < vm->regions_mapping_count; i++) {
  309. region_mapping = &vm->regions_mapping[i];
  310. vunmap(region_mapping->service_vm_va);
  311. for (j = 0; j < region_mapping->npages; j++)
  312. unpin_user_page(region_mapping->pages[j]);
  313. vfree(region_mapping->pages);
  314. }
  315. mutex_unlock(&vm->regions_mapping_lock);
  316. }