pfncache.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Kernel-based Virtual Machine driver for Linux
  4. *
  5. * This module enables kernel and guest-mode vCPU access to guest physical
  6. * memory with suitable invalidation mechanisms.
  7. *
  8. * Copyright © 2021 Amazon.com, Inc. or its affiliates.
  9. *
  10. * Authors:
  11. * David Woodhouse <dwmw2@infradead.org>
  12. */
  13. #include <linux/kvm_host.h>
  14. #include <linux/kvm.h>
  15. #include <linux/highmem.h>
  16. #include <linux/module.h>
  17. #include <linux/errno.h>
  18. #include "kvm_mm.h"
  19. /*
  20. * MMU notifier 'invalidate_range_start' hook.
  21. */
  22. void gfn_to_pfn_cache_invalidate_start(struct kvm *kvm, unsigned long start,
  23. unsigned long end)
  24. {
  25. struct gfn_to_pfn_cache *gpc;
  26. spin_lock(&kvm->gpc_lock);
  27. list_for_each_entry(gpc, &kvm->gpc_list, list) {
  28. read_lock_irq(&gpc->lock);
  29. /* Only a single page so no need to care about length */
  30. if (gpc->valid && !is_error_noslot_pfn(gpc->pfn) &&
  31. gpc->uhva >= start && gpc->uhva < end) {
  32. read_unlock_irq(&gpc->lock);
  33. /*
  34. * There is a small window here where the cache could
  35. * be modified, and invalidation would no longer be
  36. * necessary. Hence check again whether invalidation
  37. * is still necessary once the write lock has been
  38. * acquired.
  39. */
  40. write_lock_irq(&gpc->lock);
  41. if (gpc->valid && !is_error_noslot_pfn(gpc->pfn) &&
  42. gpc->uhva >= start && gpc->uhva < end)
  43. gpc->valid = false;
  44. write_unlock_irq(&gpc->lock);
  45. continue;
  46. }
  47. read_unlock_irq(&gpc->lock);
  48. }
  49. spin_unlock(&kvm->gpc_lock);
  50. }
  51. static bool kvm_gpc_is_valid_len(gpa_t gpa, unsigned long uhva,
  52. unsigned long len)
  53. {
  54. unsigned long offset = kvm_is_error_gpa(gpa) ? offset_in_page(uhva) :
  55. offset_in_page(gpa);
  56. /*
  57. * The cached access must fit within a single page. The 'len' argument
  58. * to activate() and refresh() exists only to enforce that.
  59. */
  60. return offset + len <= PAGE_SIZE;
  61. }
  62. bool kvm_gpc_check(struct gfn_to_pfn_cache *gpc, unsigned long len)
  63. {
  64. struct kvm_memslots *slots = kvm_memslots(gpc->kvm);
  65. if (!gpc->active)
  66. return false;
  67. /*
  68. * If the page was cached from a memslot, make sure the memslots have
  69. * not been re-configured.
  70. */
  71. if (!kvm_is_error_gpa(gpc->gpa) && gpc->generation != slots->generation)
  72. return false;
  73. if (kvm_is_error_hva(gpc->uhva))
  74. return false;
  75. if (!kvm_gpc_is_valid_len(gpc->gpa, gpc->uhva, len))
  76. return false;
  77. if (!gpc->valid)
  78. return false;
  79. return true;
  80. }
  81. static void *gpc_map(kvm_pfn_t pfn)
  82. {
  83. if (pfn_valid(pfn))
  84. return kmap(pfn_to_page(pfn));
  85. #ifdef CONFIG_HAS_IOMEM
  86. return memremap(pfn_to_hpa(pfn), PAGE_SIZE, MEMREMAP_WB);
  87. #else
  88. return NULL;
  89. #endif
  90. }
  91. static void gpc_unmap(kvm_pfn_t pfn, void *khva)
  92. {
  93. /* Unmap the old pfn/page if it was mapped before. */
  94. if (is_error_noslot_pfn(pfn) || !khva)
  95. return;
  96. if (pfn_valid(pfn)) {
  97. kunmap(pfn_to_page(pfn));
  98. return;
  99. }
  100. #ifdef CONFIG_HAS_IOMEM
  101. memunmap(khva);
  102. #endif
  103. }
  104. static inline bool mmu_notifier_retry_cache(struct kvm *kvm, unsigned long mmu_seq)
  105. {
  106. /*
  107. * mn_active_invalidate_count acts for all intents and purposes
  108. * like mmu_invalidate_in_progress here; but the latter cannot
  109. * be used here because the invalidation of caches in the
  110. * mmu_notifier event occurs _before_ mmu_invalidate_in_progress
  111. * is elevated.
  112. *
  113. * Note, it does not matter that mn_active_invalidate_count
  114. * is not protected by gpc->lock. It is guaranteed to
  115. * be elevated before the mmu_notifier acquires gpc->lock, and
  116. * isn't dropped until after mmu_invalidate_seq is updated.
  117. */
  118. if (kvm->mn_active_invalidate_count)
  119. return true;
  120. /*
  121. * Ensure mn_active_invalidate_count is read before
  122. * mmu_invalidate_seq. This pairs with the smp_wmb() in
  123. * mmu_notifier_invalidate_range_end() to guarantee either the
  124. * old (non-zero) value of mn_active_invalidate_count or the
  125. * new (incremented) value of mmu_invalidate_seq is observed.
  126. */
  127. smp_rmb();
  128. return kvm->mmu_invalidate_seq != mmu_seq;
  129. }
  130. static kvm_pfn_t hva_to_pfn_retry(struct gfn_to_pfn_cache *gpc)
  131. {
  132. /* Note, the new page offset may be different than the old! */
  133. void *old_khva = (void *)PAGE_ALIGN_DOWN((uintptr_t)gpc->khva);
  134. kvm_pfn_t new_pfn = KVM_PFN_ERR_FAULT;
  135. void *new_khva = NULL;
  136. unsigned long mmu_seq;
  137. lockdep_assert_held(&gpc->refresh_lock);
  138. lockdep_assert_held_write(&gpc->lock);
  139. /*
  140. * Invalidate the cache prior to dropping gpc->lock, the gpa=>uhva
  141. * assets have already been updated and so a concurrent check() from a
  142. * different task may not fail the gpa/uhva/generation checks.
  143. */
  144. gpc->valid = false;
  145. do {
  146. mmu_seq = gpc->kvm->mmu_invalidate_seq;
  147. smp_rmb();
  148. write_unlock_irq(&gpc->lock);
  149. /*
  150. * If the previous iteration "failed" due to an mmu_notifier
  151. * event, release the pfn and unmap the kernel virtual address
  152. * from the previous attempt. Unmapping might sleep, so this
  153. * needs to be done after dropping the lock. Opportunistically
  154. * check for resched while the lock isn't held.
  155. */
  156. if (new_pfn != KVM_PFN_ERR_FAULT) {
  157. /*
  158. * Keep the mapping if the previous iteration reused
  159. * the existing mapping and didn't create a new one.
  160. */
  161. if (new_khva != old_khva)
  162. gpc_unmap(new_pfn, new_khva);
  163. kvm_release_pfn_clean(new_pfn);
  164. cond_resched();
  165. }
  166. /* We always request a writeable mapping */
  167. new_pfn = hva_to_pfn(gpc->uhva, false, false, NULL, true, NULL);
  168. if (is_error_noslot_pfn(new_pfn))
  169. goto out_error;
  170. /*
  171. * Obtain a new kernel mapping if KVM itself will access the
  172. * pfn. Note, kmap() and memremap() can both sleep, so this
  173. * too must be done outside of gpc->lock!
  174. */
  175. if (new_pfn == gpc->pfn)
  176. new_khva = old_khva;
  177. else
  178. new_khva = gpc_map(new_pfn);
  179. if (!new_khva) {
  180. kvm_release_pfn_clean(new_pfn);
  181. goto out_error;
  182. }
  183. write_lock_irq(&gpc->lock);
  184. /*
  185. * Other tasks must wait for _this_ refresh to complete before
  186. * attempting to refresh.
  187. */
  188. WARN_ON_ONCE(gpc->valid);
  189. } while (mmu_notifier_retry_cache(gpc->kvm, mmu_seq));
  190. gpc->valid = true;
  191. gpc->pfn = new_pfn;
  192. gpc->khva = new_khva + offset_in_page(gpc->uhva);
  193. /*
  194. * Put the reference to the _new_ pfn. The pfn is now tracked by the
  195. * cache and can be safely migrated, swapped, etc... as the cache will
  196. * invalidate any mappings in response to relevant mmu_notifier events.
  197. */
  198. kvm_release_pfn_clean(new_pfn);
  199. return 0;
  200. out_error:
  201. write_lock_irq(&gpc->lock);
  202. return -EFAULT;
  203. }
  204. static int __kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, gpa_t gpa, unsigned long uhva)
  205. {
  206. unsigned long page_offset;
  207. bool unmap_old = false;
  208. unsigned long old_uhva;
  209. kvm_pfn_t old_pfn;
  210. bool hva_change = false;
  211. void *old_khva;
  212. int ret;
  213. /* Either gpa or uhva must be valid, but not both */
  214. if (WARN_ON_ONCE(kvm_is_error_gpa(gpa) == kvm_is_error_hva(uhva)))
  215. return -EINVAL;
  216. lockdep_assert_held(&gpc->refresh_lock);
  217. write_lock_irq(&gpc->lock);
  218. if (!gpc->active) {
  219. ret = -EINVAL;
  220. goto out_unlock;
  221. }
  222. old_pfn = gpc->pfn;
  223. old_khva = (void *)PAGE_ALIGN_DOWN((uintptr_t)gpc->khva);
  224. old_uhva = PAGE_ALIGN_DOWN(gpc->uhva);
  225. if (kvm_is_error_gpa(gpa)) {
  226. page_offset = offset_in_page(uhva);
  227. gpc->gpa = INVALID_GPA;
  228. gpc->memslot = NULL;
  229. gpc->uhva = PAGE_ALIGN_DOWN(uhva);
  230. if (gpc->uhva != old_uhva)
  231. hva_change = true;
  232. } else {
  233. struct kvm_memslots *slots = kvm_memslots(gpc->kvm);
  234. page_offset = offset_in_page(gpa);
  235. if (gpc->gpa != gpa || gpc->generation != slots->generation ||
  236. kvm_is_error_hva(gpc->uhva)) {
  237. gfn_t gfn = gpa_to_gfn(gpa);
  238. gpc->gpa = gpa;
  239. gpc->generation = slots->generation;
  240. gpc->memslot = __gfn_to_memslot(slots, gfn);
  241. gpc->uhva = gfn_to_hva_memslot(gpc->memslot, gfn);
  242. if (kvm_is_error_hva(gpc->uhva)) {
  243. ret = -EFAULT;
  244. goto out;
  245. }
  246. /*
  247. * Even if the GPA and/or the memslot generation changed, the
  248. * HVA may still be the same.
  249. */
  250. if (gpc->uhva != old_uhva)
  251. hva_change = true;
  252. } else {
  253. gpc->uhva = old_uhva;
  254. }
  255. }
  256. /* Note: the offset must be correct before calling hva_to_pfn_retry() */
  257. gpc->uhva += page_offset;
  258. /*
  259. * If the userspace HVA changed or the PFN was already invalid,
  260. * drop the lock and do the HVA to PFN lookup again.
  261. */
  262. if (!gpc->valid || hva_change) {
  263. ret = hva_to_pfn_retry(gpc);
  264. } else {
  265. /*
  266. * If the HVA→PFN mapping was already valid, don't unmap it.
  267. * But do update gpc->khva because the offset within the page
  268. * may have changed.
  269. */
  270. gpc->khva = old_khva + page_offset;
  271. ret = 0;
  272. goto out_unlock;
  273. }
  274. out:
  275. /*
  276. * Invalidate the cache and purge the pfn/khva if the refresh failed.
  277. * Some/all of the uhva, gpa, and memslot generation info may still be
  278. * valid, leave it as is.
  279. */
  280. if (ret) {
  281. gpc->valid = false;
  282. gpc->pfn = KVM_PFN_ERR_FAULT;
  283. gpc->khva = NULL;
  284. }
  285. /* Detect a pfn change before dropping the lock! */
  286. unmap_old = (old_pfn != gpc->pfn);
  287. out_unlock:
  288. write_unlock_irq(&gpc->lock);
  289. if (unmap_old)
  290. gpc_unmap(old_pfn, old_khva);
  291. return ret;
  292. }
  293. int kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, unsigned long len)
  294. {
  295. unsigned long uhva;
  296. guard(mutex)(&gpc->refresh_lock);
  297. if (!kvm_gpc_is_valid_len(gpc->gpa, gpc->uhva, len))
  298. return -EINVAL;
  299. /*
  300. * If the GPA is valid then ignore the HVA, as a cache can be GPA-based
  301. * or HVA-based, not both. For GPA-based caches, the HVA will be
  302. * recomputed during refresh if necessary.
  303. */
  304. uhva = kvm_is_error_gpa(gpc->gpa) ? gpc->uhva : KVM_HVA_ERR_BAD;
  305. return __kvm_gpc_refresh(gpc, gpc->gpa, uhva);
  306. }
  307. void kvm_gpc_init(struct gfn_to_pfn_cache *gpc, struct kvm *kvm)
  308. {
  309. rwlock_init(&gpc->lock);
  310. mutex_init(&gpc->refresh_lock);
  311. gpc->kvm = kvm;
  312. gpc->pfn = KVM_PFN_ERR_FAULT;
  313. gpc->gpa = INVALID_GPA;
  314. gpc->uhva = KVM_HVA_ERR_BAD;
  315. gpc->active = gpc->valid = false;
  316. }
  317. static int __kvm_gpc_activate(struct gfn_to_pfn_cache *gpc, gpa_t gpa, unsigned long uhva,
  318. unsigned long len)
  319. {
  320. struct kvm *kvm = gpc->kvm;
  321. if (!kvm_gpc_is_valid_len(gpa, uhva, len))
  322. return -EINVAL;
  323. guard(mutex)(&gpc->refresh_lock);
  324. if (!gpc->active) {
  325. if (KVM_BUG_ON(gpc->valid, kvm))
  326. return -EIO;
  327. spin_lock(&kvm->gpc_lock);
  328. list_add(&gpc->list, &kvm->gpc_list);
  329. spin_unlock(&kvm->gpc_lock);
  330. /*
  331. * Activate the cache after adding it to the list, a concurrent
  332. * refresh must not establish a mapping until the cache is
  333. * reachable by mmu_notifier events.
  334. */
  335. write_lock_irq(&gpc->lock);
  336. gpc->active = true;
  337. write_unlock_irq(&gpc->lock);
  338. }
  339. return __kvm_gpc_refresh(gpc, gpa, uhva);
  340. }
  341. int kvm_gpc_activate(struct gfn_to_pfn_cache *gpc, gpa_t gpa, unsigned long len)
  342. {
  343. /*
  344. * Explicitly disallow INVALID_GPA so that the magic value can be used
  345. * by KVM to differentiate between GPA-based and HVA-based caches.
  346. */
  347. if (WARN_ON_ONCE(kvm_is_error_gpa(gpa)))
  348. return -EINVAL;
  349. return __kvm_gpc_activate(gpc, gpa, KVM_HVA_ERR_BAD, len);
  350. }
  351. int kvm_gpc_activate_hva(struct gfn_to_pfn_cache *gpc, unsigned long uhva, unsigned long len)
  352. {
  353. if (!access_ok((void __user *)uhva, len))
  354. return -EINVAL;
  355. return __kvm_gpc_activate(gpc, INVALID_GPA, uhva, len);
  356. }
  357. void kvm_gpc_deactivate(struct gfn_to_pfn_cache *gpc)
  358. {
  359. struct kvm *kvm = gpc->kvm;
  360. kvm_pfn_t old_pfn;
  361. void *old_khva;
  362. guard(mutex)(&gpc->refresh_lock);
  363. if (gpc->active) {
  364. /*
  365. * Deactivate the cache before removing it from the list, KVM
  366. * must stall mmu_notifier events until all users go away, i.e.
  367. * until gpc->lock is dropped and refresh is guaranteed to fail.
  368. */
  369. write_lock_irq(&gpc->lock);
  370. gpc->active = false;
  371. gpc->valid = false;
  372. /*
  373. * Leave the GPA => uHVA cache intact, it's protected by the
  374. * memslot generation. The PFN lookup needs to be redone every
  375. * time as mmu_notifier protection is lost when the cache is
  376. * removed from the VM's gpc_list.
  377. */
  378. old_khva = gpc->khva - offset_in_page(gpc->khva);
  379. gpc->khva = NULL;
  380. old_pfn = gpc->pfn;
  381. gpc->pfn = KVM_PFN_ERR_FAULT;
  382. write_unlock_irq(&gpc->lock);
  383. spin_lock(&kvm->gpc_lock);
  384. list_del(&gpc->list);
  385. spin_unlock(&kvm->gpc_lock);
  386. gpc_unmap(old_pfn, old_khva);
  387. }
  388. }