guest_memfd.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702
  1. // SPDX-License-Identifier: GPL-2.0
  2. #include <linux/backing-dev.h>
  3. #include <linux/falloc.h>
  4. #include <linux/kvm_host.h>
  5. #include <linux/pagemap.h>
  6. #include <linux/anon_inodes.h>
  7. #include "kvm_mm.h"
  8. struct kvm_gmem {
  9. struct kvm *kvm;
  10. struct xarray bindings;
  11. struct list_head entry;
  12. };
  13. /**
  14. * folio_file_pfn - like folio_file_page, but return a pfn.
  15. * @folio: The folio which contains this index.
  16. * @index: The index we want to look up.
  17. *
  18. * Return: The pfn for this index.
  19. */
  20. static inline kvm_pfn_t folio_file_pfn(struct folio *folio, pgoff_t index)
  21. {
  22. return folio_pfn(folio) + (index & (folio_nr_pages(folio) - 1));
  23. }
  24. static int __kvm_gmem_prepare_folio(struct kvm *kvm, struct kvm_memory_slot *slot,
  25. pgoff_t index, struct folio *folio)
  26. {
  27. #ifdef CONFIG_HAVE_KVM_ARCH_GMEM_PREPARE
  28. kvm_pfn_t pfn = folio_file_pfn(folio, index);
  29. gfn_t gfn = slot->base_gfn + index - slot->gmem.pgoff;
  30. int rc = kvm_arch_gmem_prepare(kvm, gfn, pfn, folio_order(folio));
  31. if (rc) {
  32. pr_warn_ratelimited("gmem: Failed to prepare folio for index %lx GFN %llx PFN %llx error %d.\n",
  33. index, gfn, pfn, rc);
  34. return rc;
  35. }
  36. #endif
  37. return 0;
  38. }
  39. static inline void kvm_gmem_mark_prepared(struct folio *folio)
  40. {
  41. folio_mark_uptodate(folio);
  42. }
  43. /*
  44. * Process @folio, which contains @gfn, so that the guest can use it.
  45. * The folio must be locked and the gfn must be contained in @slot.
  46. * On successful return the guest sees a zero page so as to avoid
  47. * leaking host data and the up-to-date flag is set.
  48. */
  49. static int kvm_gmem_prepare_folio(struct kvm *kvm, struct kvm_memory_slot *slot,
  50. gfn_t gfn, struct folio *folio)
  51. {
  52. unsigned long nr_pages, i;
  53. pgoff_t index;
  54. int r;
  55. nr_pages = folio_nr_pages(folio);
  56. for (i = 0; i < nr_pages; i++)
  57. clear_highpage(folio_page(folio, i));
  58. /*
  59. * Preparing huge folios should always be safe, since it should
  60. * be possible to split them later if needed.
  61. *
  62. * Right now the folio order is always going to be zero, but the
  63. * code is ready for huge folios. The only assumption is that
  64. * the base pgoff of memslots is naturally aligned with the
  65. * requested page order, ensuring that huge folios can also use
  66. * huge page table entries for GPA->HPA mapping.
  67. *
  68. * The order will be passed when creating the guest_memfd, and
  69. * checked when creating memslots.
  70. */
  71. WARN_ON(!IS_ALIGNED(slot->gmem.pgoff, 1 << folio_order(folio)));
  72. index = gfn - slot->base_gfn + slot->gmem.pgoff;
  73. index = ALIGN_DOWN(index, 1 << folio_order(folio));
  74. r = __kvm_gmem_prepare_folio(kvm, slot, index, folio);
  75. if (!r)
  76. kvm_gmem_mark_prepared(folio);
  77. return r;
  78. }
  79. /*
  80. * Returns a locked folio on success. The caller is responsible for
  81. * setting the up-to-date flag before the memory is mapped into the guest.
  82. * There is no backing storage for the memory, so the folio will remain
  83. * up-to-date until it's removed.
  84. *
  85. * Ignore accessed, referenced, and dirty flags. The memory is
  86. * unevictable and there is no storage to write back to.
  87. */
  88. static struct folio *kvm_gmem_get_folio(struct inode *inode, pgoff_t index)
  89. {
  90. /* TODO: Support huge pages. */
  91. return filemap_grab_folio(inode->i_mapping, index);
  92. }
  93. static void kvm_gmem_invalidate_begin(struct kvm_gmem *gmem, pgoff_t start,
  94. pgoff_t end)
  95. {
  96. bool flush = false, found_memslot = false;
  97. struct kvm_memory_slot *slot;
  98. struct kvm *kvm = gmem->kvm;
  99. unsigned long index;
  100. xa_for_each_range(&gmem->bindings, index, slot, start, end - 1) {
  101. pgoff_t pgoff = slot->gmem.pgoff;
  102. struct kvm_gfn_range gfn_range = {
  103. .start = slot->base_gfn + max(pgoff, start) - pgoff,
  104. .end = slot->base_gfn + min(pgoff + slot->npages, end) - pgoff,
  105. .slot = slot,
  106. .may_block = true,
  107. };
  108. if (!found_memslot) {
  109. found_memslot = true;
  110. KVM_MMU_LOCK(kvm);
  111. kvm_mmu_invalidate_begin(kvm);
  112. }
  113. flush |= kvm_mmu_unmap_gfn_range(kvm, &gfn_range);
  114. }
  115. if (flush)
  116. kvm_flush_remote_tlbs(kvm);
  117. if (found_memslot)
  118. KVM_MMU_UNLOCK(kvm);
  119. }
  120. static void kvm_gmem_invalidate_end(struct kvm_gmem *gmem, pgoff_t start,
  121. pgoff_t end)
  122. {
  123. struct kvm *kvm = gmem->kvm;
  124. if (xa_find(&gmem->bindings, &start, end - 1, XA_PRESENT)) {
  125. KVM_MMU_LOCK(kvm);
  126. kvm_mmu_invalidate_end(kvm);
  127. KVM_MMU_UNLOCK(kvm);
  128. }
  129. }
  130. static long kvm_gmem_punch_hole(struct inode *inode, loff_t offset, loff_t len)
  131. {
  132. struct list_head *gmem_list = &inode->i_mapping->i_private_list;
  133. pgoff_t start = offset >> PAGE_SHIFT;
  134. pgoff_t end = (offset + len) >> PAGE_SHIFT;
  135. struct kvm_gmem *gmem;
  136. /*
  137. * Bindings must be stable across invalidation to ensure the start+end
  138. * are balanced.
  139. */
  140. filemap_invalidate_lock(inode->i_mapping);
  141. list_for_each_entry(gmem, gmem_list, entry)
  142. kvm_gmem_invalidate_begin(gmem, start, end);
  143. truncate_inode_pages_range(inode->i_mapping, offset, offset + len - 1);
  144. list_for_each_entry(gmem, gmem_list, entry)
  145. kvm_gmem_invalidate_end(gmem, start, end);
  146. filemap_invalidate_unlock(inode->i_mapping);
  147. return 0;
  148. }
  149. static long kvm_gmem_allocate(struct inode *inode, loff_t offset, loff_t len)
  150. {
  151. struct address_space *mapping = inode->i_mapping;
  152. pgoff_t start, index, end;
  153. int r;
  154. /* Dedicated guest is immutable by default. */
  155. if (offset + len > i_size_read(inode))
  156. return -EINVAL;
  157. filemap_invalidate_lock_shared(mapping);
  158. start = offset >> PAGE_SHIFT;
  159. end = (offset + len) >> PAGE_SHIFT;
  160. r = 0;
  161. for (index = start; index < end; ) {
  162. struct folio *folio;
  163. if (signal_pending(current)) {
  164. r = -EINTR;
  165. break;
  166. }
  167. folio = kvm_gmem_get_folio(inode, index);
  168. if (IS_ERR(folio)) {
  169. r = PTR_ERR(folio);
  170. break;
  171. }
  172. index = folio_next_index(folio);
  173. folio_unlock(folio);
  174. folio_put(folio);
  175. /* 64-bit only, wrapping the index should be impossible. */
  176. if (WARN_ON_ONCE(!index))
  177. break;
  178. cond_resched();
  179. }
  180. filemap_invalidate_unlock_shared(mapping);
  181. return r;
  182. }
  183. static long kvm_gmem_fallocate(struct file *file, int mode, loff_t offset,
  184. loff_t len)
  185. {
  186. int ret;
  187. if (!(mode & FALLOC_FL_KEEP_SIZE))
  188. return -EOPNOTSUPP;
  189. if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
  190. return -EOPNOTSUPP;
  191. if (!PAGE_ALIGNED(offset) || !PAGE_ALIGNED(len))
  192. return -EINVAL;
  193. if (mode & FALLOC_FL_PUNCH_HOLE)
  194. ret = kvm_gmem_punch_hole(file_inode(file), offset, len);
  195. else
  196. ret = kvm_gmem_allocate(file_inode(file), offset, len);
  197. if (!ret)
  198. file_modified(file);
  199. return ret;
  200. }
  201. static int kvm_gmem_release(struct inode *inode, struct file *file)
  202. {
  203. struct kvm_gmem *gmem = file->private_data;
  204. struct kvm_memory_slot *slot;
  205. struct kvm *kvm = gmem->kvm;
  206. unsigned long index;
  207. /*
  208. * Prevent concurrent attempts to *unbind* a memslot. This is the last
  209. * reference to the file and thus no new bindings can be created, but
  210. * dereferencing the slot for existing bindings needs to be protected
  211. * against memslot updates, specifically so that unbind doesn't race
  212. * and free the memslot (kvm_gmem_get_file() will return NULL).
  213. */
  214. mutex_lock(&kvm->slots_lock);
  215. filemap_invalidate_lock(inode->i_mapping);
  216. xa_for_each(&gmem->bindings, index, slot)
  217. rcu_assign_pointer(slot->gmem.file, NULL);
  218. synchronize_rcu();
  219. /*
  220. * All in-flight operations are gone and new bindings can be created.
  221. * Zap all SPTEs pointed at by this file. Do not free the backing
  222. * memory, as its lifetime is associated with the inode, not the file.
  223. */
  224. kvm_gmem_invalidate_begin(gmem, 0, -1ul);
  225. kvm_gmem_invalidate_end(gmem, 0, -1ul);
  226. list_del(&gmem->entry);
  227. filemap_invalidate_unlock(inode->i_mapping);
  228. mutex_unlock(&kvm->slots_lock);
  229. xa_destroy(&gmem->bindings);
  230. kfree(gmem);
  231. kvm_put_kvm(kvm);
  232. return 0;
  233. }
  234. static inline struct file *kvm_gmem_get_file(struct kvm_memory_slot *slot)
  235. {
  236. /*
  237. * Do not return slot->gmem.file if it has already been closed;
  238. * there might be some time between the last fput() and when
  239. * kvm_gmem_release() clears slot->gmem.file, and you do not
  240. * want to spin in the meanwhile.
  241. */
  242. return get_file_active(&slot->gmem.file);
  243. }
  244. static struct file_operations kvm_gmem_fops = {
  245. .open = generic_file_open,
  246. .release = kvm_gmem_release,
  247. .fallocate = kvm_gmem_fallocate,
  248. };
  249. void kvm_gmem_init(struct module *module)
  250. {
  251. kvm_gmem_fops.owner = module;
  252. }
  253. static int kvm_gmem_migrate_folio(struct address_space *mapping,
  254. struct folio *dst, struct folio *src,
  255. enum migrate_mode mode)
  256. {
  257. WARN_ON_ONCE(1);
  258. return -EINVAL;
  259. }
  260. static int kvm_gmem_error_folio(struct address_space *mapping, struct folio *folio)
  261. {
  262. struct list_head *gmem_list = &mapping->i_private_list;
  263. struct kvm_gmem *gmem;
  264. pgoff_t start, end;
  265. filemap_invalidate_lock_shared(mapping);
  266. start = folio->index;
  267. end = start + folio_nr_pages(folio);
  268. list_for_each_entry(gmem, gmem_list, entry)
  269. kvm_gmem_invalidate_begin(gmem, start, end);
  270. /*
  271. * Do not truncate the range, what action is taken in response to the
  272. * error is userspace's decision (assuming the architecture supports
  273. * gracefully handling memory errors). If/when the guest attempts to
  274. * access a poisoned page, kvm_gmem_get_pfn() will return -EHWPOISON,
  275. * at which point KVM can either terminate the VM or propagate the
  276. * error to userspace.
  277. */
  278. list_for_each_entry(gmem, gmem_list, entry)
  279. kvm_gmem_invalidate_end(gmem, start, end);
  280. filemap_invalidate_unlock_shared(mapping);
  281. return MF_DELAYED;
  282. }
  283. #ifdef CONFIG_HAVE_KVM_ARCH_GMEM_INVALIDATE
  284. static void kvm_gmem_free_folio(struct folio *folio)
  285. {
  286. struct page *page = folio_page(folio, 0);
  287. kvm_pfn_t pfn = page_to_pfn(page);
  288. int order = folio_order(folio);
  289. kvm_arch_gmem_invalidate(pfn, pfn + (1ul << order));
  290. }
  291. #endif
  292. static const struct address_space_operations kvm_gmem_aops = {
  293. .dirty_folio = noop_dirty_folio,
  294. .migrate_folio = kvm_gmem_migrate_folio,
  295. .error_remove_folio = kvm_gmem_error_folio,
  296. #ifdef CONFIG_HAVE_KVM_ARCH_GMEM_INVALIDATE
  297. .free_folio = kvm_gmem_free_folio,
  298. #endif
  299. };
  300. static int kvm_gmem_getattr(struct mnt_idmap *idmap, const struct path *path,
  301. struct kstat *stat, u32 request_mask,
  302. unsigned int query_flags)
  303. {
  304. struct inode *inode = path->dentry->d_inode;
  305. generic_fillattr(idmap, request_mask, inode, stat);
  306. return 0;
  307. }
  308. static int kvm_gmem_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
  309. struct iattr *attr)
  310. {
  311. return -EINVAL;
  312. }
  313. static const struct inode_operations kvm_gmem_iops = {
  314. .getattr = kvm_gmem_getattr,
  315. .setattr = kvm_gmem_setattr,
  316. };
  317. static int __kvm_gmem_create(struct kvm *kvm, loff_t size, u64 flags)
  318. {
  319. const char *anon_name = "[kvm-gmem]";
  320. struct kvm_gmem *gmem;
  321. struct inode *inode;
  322. struct file *file;
  323. int fd, err;
  324. fd = get_unused_fd_flags(0);
  325. if (fd < 0)
  326. return fd;
  327. gmem = kzalloc(sizeof(*gmem), GFP_KERNEL);
  328. if (!gmem) {
  329. err = -ENOMEM;
  330. goto err_fd;
  331. }
  332. file = anon_inode_create_getfile(anon_name, &kvm_gmem_fops, gmem,
  333. O_RDWR, NULL);
  334. if (IS_ERR(file)) {
  335. err = PTR_ERR(file);
  336. goto err_gmem;
  337. }
  338. file->f_flags |= O_LARGEFILE;
  339. inode = file->f_inode;
  340. WARN_ON(file->f_mapping != inode->i_mapping);
  341. inode->i_private = (void *)(unsigned long)flags;
  342. inode->i_op = &kvm_gmem_iops;
  343. inode->i_mapping->a_ops = &kvm_gmem_aops;
  344. inode->i_mode |= S_IFREG;
  345. inode->i_size = size;
  346. mapping_set_gfp_mask(inode->i_mapping, GFP_HIGHUSER);
  347. mapping_set_inaccessible(inode->i_mapping);
  348. /* Unmovable mappings are supposed to be marked unevictable as well. */
  349. WARN_ON_ONCE(!mapping_unevictable(inode->i_mapping));
  350. kvm_get_kvm(kvm);
  351. gmem->kvm = kvm;
  352. xa_init(&gmem->bindings);
  353. list_add(&gmem->entry, &inode->i_mapping->i_private_list);
  354. fd_install(fd, file);
  355. return fd;
  356. err_gmem:
  357. kfree(gmem);
  358. err_fd:
  359. put_unused_fd(fd);
  360. return err;
  361. }
  362. int kvm_gmem_create(struct kvm *kvm, struct kvm_create_guest_memfd *args)
  363. {
  364. loff_t size = args->size;
  365. u64 flags = args->flags;
  366. u64 valid_flags = 0;
  367. if (flags & ~valid_flags)
  368. return -EINVAL;
  369. if (size <= 0 || !PAGE_ALIGNED(size))
  370. return -EINVAL;
  371. return __kvm_gmem_create(kvm, size, flags);
  372. }
  373. int kvm_gmem_bind(struct kvm *kvm, struct kvm_memory_slot *slot,
  374. unsigned int fd, loff_t offset)
  375. {
  376. loff_t size = slot->npages << PAGE_SHIFT;
  377. unsigned long start, end;
  378. struct kvm_gmem *gmem;
  379. struct inode *inode;
  380. struct file *file;
  381. int r = -EINVAL;
  382. BUILD_BUG_ON(sizeof(gfn_t) != sizeof(slot->gmem.pgoff));
  383. file = fget(fd);
  384. if (!file)
  385. return -EBADF;
  386. if (file->f_op != &kvm_gmem_fops)
  387. goto err;
  388. gmem = file->private_data;
  389. if (gmem->kvm != kvm)
  390. goto err;
  391. inode = file_inode(file);
  392. if (offset < 0 || !PAGE_ALIGNED(offset) ||
  393. offset + size > i_size_read(inode))
  394. goto err;
  395. filemap_invalidate_lock(inode->i_mapping);
  396. start = offset >> PAGE_SHIFT;
  397. end = start + slot->npages;
  398. if (!xa_empty(&gmem->bindings) &&
  399. xa_find(&gmem->bindings, &start, end - 1, XA_PRESENT)) {
  400. filemap_invalidate_unlock(inode->i_mapping);
  401. goto err;
  402. }
  403. /*
  404. * No synchronize_rcu() needed, any in-flight readers are guaranteed to
  405. * be see either a NULL file or this new file, no need for them to go
  406. * away.
  407. */
  408. rcu_assign_pointer(slot->gmem.file, file);
  409. slot->gmem.pgoff = start;
  410. xa_store_range(&gmem->bindings, start, end - 1, slot, GFP_KERNEL);
  411. filemap_invalidate_unlock(inode->i_mapping);
  412. /*
  413. * Drop the reference to the file, even on success. The file pins KVM,
  414. * not the other way 'round. Active bindings are invalidated if the
  415. * file is closed before memslots are destroyed.
  416. */
  417. r = 0;
  418. err:
  419. fput(file);
  420. return r;
  421. }
  422. void kvm_gmem_unbind(struct kvm_memory_slot *slot)
  423. {
  424. unsigned long start = slot->gmem.pgoff;
  425. unsigned long end = start + slot->npages;
  426. struct kvm_gmem *gmem;
  427. struct file *file;
  428. /*
  429. * Nothing to do if the underlying file was already closed (or is being
  430. * closed right now), kvm_gmem_release() invalidates all bindings.
  431. */
  432. file = kvm_gmem_get_file(slot);
  433. if (!file)
  434. return;
  435. gmem = file->private_data;
  436. filemap_invalidate_lock(file->f_mapping);
  437. xa_store_range(&gmem->bindings, start, end - 1, NULL, GFP_KERNEL);
  438. rcu_assign_pointer(slot->gmem.file, NULL);
  439. synchronize_rcu();
  440. filemap_invalidate_unlock(file->f_mapping);
  441. fput(file);
  442. }
  443. /* Returns a locked folio on success. */
  444. static struct folio *
  445. __kvm_gmem_get_pfn(struct file *file, struct kvm_memory_slot *slot,
  446. gfn_t gfn, kvm_pfn_t *pfn, bool *is_prepared,
  447. int *max_order)
  448. {
  449. pgoff_t index = gfn - slot->base_gfn + slot->gmem.pgoff;
  450. struct kvm_gmem *gmem = file->private_data;
  451. struct folio *folio;
  452. if (file != slot->gmem.file) {
  453. WARN_ON_ONCE(slot->gmem.file);
  454. return ERR_PTR(-EFAULT);
  455. }
  456. gmem = file->private_data;
  457. if (xa_load(&gmem->bindings, index) != slot) {
  458. WARN_ON_ONCE(xa_load(&gmem->bindings, index));
  459. return ERR_PTR(-EIO);
  460. }
  461. folio = kvm_gmem_get_folio(file_inode(file), index);
  462. if (IS_ERR(folio))
  463. return folio;
  464. if (folio_test_hwpoison(folio)) {
  465. folio_unlock(folio);
  466. folio_put(folio);
  467. return ERR_PTR(-EHWPOISON);
  468. }
  469. *pfn = folio_file_pfn(folio, index);
  470. if (max_order)
  471. *max_order = 0;
  472. *is_prepared = folio_test_uptodate(folio);
  473. return folio;
  474. }
  475. int kvm_gmem_get_pfn(struct kvm *kvm, struct kvm_memory_slot *slot,
  476. gfn_t gfn, kvm_pfn_t *pfn, int *max_order)
  477. {
  478. struct file *file = kvm_gmem_get_file(slot);
  479. struct folio *folio;
  480. bool is_prepared = false;
  481. int r = 0;
  482. if (!file)
  483. return -EFAULT;
  484. folio = __kvm_gmem_get_pfn(file, slot, gfn, pfn, &is_prepared, max_order);
  485. if (IS_ERR(folio)) {
  486. r = PTR_ERR(folio);
  487. goto out;
  488. }
  489. if (!is_prepared)
  490. r = kvm_gmem_prepare_folio(kvm, slot, gfn, folio);
  491. folio_unlock(folio);
  492. if (r < 0)
  493. folio_put(folio);
  494. out:
  495. fput(file);
  496. return r;
  497. }
  498. EXPORT_SYMBOL_GPL(kvm_gmem_get_pfn);
  499. #ifdef CONFIG_KVM_GENERIC_PRIVATE_MEM
  500. long kvm_gmem_populate(struct kvm *kvm, gfn_t start_gfn, void __user *src, long npages,
  501. kvm_gmem_populate_cb post_populate, void *opaque)
  502. {
  503. struct file *file;
  504. struct kvm_memory_slot *slot;
  505. void __user *p;
  506. int ret = 0, max_order;
  507. long i;
  508. lockdep_assert_held(&kvm->slots_lock);
  509. if (npages < 0)
  510. return -EINVAL;
  511. slot = gfn_to_memslot(kvm, start_gfn);
  512. if (!kvm_slot_can_be_private(slot))
  513. return -EINVAL;
  514. file = kvm_gmem_get_file(slot);
  515. if (!file)
  516. return -EFAULT;
  517. filemap_invalidate_lock(file->f_mapping);
  518. npages = min_t(ulong, slot->npages - (start_gfn - slot->base_gfn), npages);
  519. for (i = 0; i < npages; i += (1 << max_order)) {
  520. struct folio *folio;
  521. gfn_t gfn = start_gfn + i;
  522. bool is_prepared = false;
  523. kvm_pfn_t pfn;
  524. if (signal_pending(current)) {
  525. ret = -EINTR;
  526. break;
  527. }
  528. folio = __kvm_gmem_get_pfn(file, slot, gfn, &pfn, &is_prepared, &max_order);
  529. if (IS_ERR(folio)) {
  530. ret = PTR_ERR(folio);
  531. break;
  532. }
  533. if (is_prepared) {
  534. folio_unlock(folio);
  535. folio_put(folio);
  536. ret = -EEXIST;
  537. break;
  538. }
  539. folio_unlock(folio);
  540. WARN_ON(!IS_ALIGNED(gfn, 1 << max_order) ||
  541. (npages - i) < (1 << max_order));
  542. ret = -EINVAL;
  543. while (!kvm_range_has_memory_attributes(kvm, gfn, gfn + (1 << max_order),
  544. KVM_MEMORY_ATTRIBUTE_PRIVATE,
  545. KVM_MEMORY_ATTRIBUTE_PRIVATE)) {
  546. if (!max_order)
  547. goto put_folio_and_exit;
  548. max_order--;
  549. }
  550. p = src ? src + i * PAGE_SIZE : NULL;
  551. ret = post_populate(kvm, gfn, pfn, p, max_order, opaque);
  552. if (!ret)
  553. kvm_gmem_mark_prepared(folio);
  554. put_folio_and_exit:
  555. folio_put(folio);
  556. if (ret)
  557. break;
  558. }
  559. filemap_invalidate_unlock(file->f_mapping);
  560. fput(file);
  561. return ret && !i ? ret : i;
  562. }
  563. EXPORT_SYMBOL_GPL(kvm_gmem_populate);
  564. #endif