book3s_64_vio.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677
  1. /*
  2. * This program is free software; you can redistribute it and/or modify
  3. * it under the terms of the GNU General Public License, version 2, as
  4. * published by the Free Software Foundation.
  5. *
  6. * This program is distributed in the hope that it will be useful,
  7. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  8. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  9. * GNU General Public License for more details.
  10. *
  11. * You should have received a copy of the GNU General Public License
  12. * along with this program; if not, write to the Free Software
  13. * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
  14. *
  15. * Copyright 2010 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
  16. * Copyright 2011 David Gibson, IBM Corporation <dwg@au1.ibm.com>
  17. * Copyright 2016 Alexey Kardashevskiy, IBM Corporation <aik@au1.ibm.com>
  18. */
  19. #include <linux/types.h>
  20. #include <linux/string.h>
  21. #include <linux/kvm.h>
  22. #include <linux/kvm_host.h>
  23. #include <linux/highmem.h>
  24. #include <linux/gfp.h>
  25. #include <linux/slab.h>
  26. #include <linux/sched/signal.h>
  27. #include <linux/hugetlb.h>
  28. #include <linux/list.h>
  29. #include <linux/anon_inodes.h>
  30. #include <linux/iommu.h>
  31. #include <linux/file.h>
  32. #include <asm/kvm_ppc.h>
  33. #include <asm/kvm_book3s.h>
  34. #include <asm/book3s/64/mmu-hash.h>
  35. #include <asm/hvcall.h>
  36. #include <asm/synch.h>
  37. #include <asm/ppc-opcode.h>
  38. #include <asm/kvm_host.h>
  39. #include <asm/udbg.h>
  40. #include <asm/iommu.h>
  41. #include <asm/tce.h>
  42. #include <asm/mmu_context.h>
  43. static unsigned long kvmppc_tce_pages(unsigned long iommu_pages)
  44. {
  45. return ALIGN(iommu_pages * sizeof(u64), PAGE_SIZE) / PAGE_SIZE;
  46. }
  47. static unsigned long kvmppc_stt_pages(unsigned long tce_pages)
  48. {
  49. unsigned long stt_bytes = sizeof(struct kvmppc_spapr_tce_table) +
  50. (tce_pages * sizeof(struct page *));
  51. return tce_pages + ALIGN(stt_bytes, PAGE_SIZE) / PAGE_SIZE;
  52. }
  53. static long kvmppc_account_memlimit(unsigned long stt_pages, bool inc)
  54. {
  55. long ret = 0;
  56. if (!current || !current->mm)
  57. return ret; /* process exited */
  58. down_write(&current->mm->mmap_sem);
  59. if (inc) {
  60. unsigned long locked, lock_limit;
  61. locked = current->mm->locked_vm + stt_pages;
  62. lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
  63. if (locked > lock_limit && !capable(CAP_IPC_LOCK))
  64. ret = -ENOMEM;
  65. else
  66. current->mm->locked_vm += stt_pages;
  67. } else {
  68. if (WARN_ON_ONCE(stt_pages > current->mm->locked_vm))
  69. stt_pages = current->mm->locked_vm;
  70. current->mm->locked_vm -= stt_pages;
  71. }
  72. pr_debug("[%d] RLIMIT_MEMLOCK KVM %c%ld %ld/%ld%s\n", current->pid,
  73. inc ? '+' : '-',
  74. stt_pages << PAGE_SHIFT,
  75. current->mm->locked_vm << PAGE_SHIFT,
  76. rlimit(RLIMIT_MEMLOCK),
  77. ret ? " - exceeded" : "");
  78. up_write(&current->mm->mmap_sem);
  79. return ret;
  80. }
  81. static void kvm_spapr_tce_iommu_table_free(struct rcu_head *head)
  82. {
  83. struct kvmppc_spapr_tce_iommu_table *stit = container_of(head,
  84. struct kvmppc_spapr_tce_iommu_table, rcu);
  85. iommu_tce_table_put(stit->tbl);
  86. kfree(stit);
  87. }
  88. static void kvm_spapr_tce_liobn_put(struct kref *kref)
  89. {
  90. struct kvmppc_spapr_tce_iommu_table *stit = container_of(kref,
  91. struct kvmppc_spapr_tce_iommu_table, kref);
  92. list_del_rcu(&stit->next);
  93. call_rcu(&stit->rcu, kvm_spapr_tce_iommu_table_free);
  94. }
  95. extern void kvm_spapr_tce_release_iommu_group(struct kvm *kvm,
  96. struct iommu_group *grp)
  97. {
  98. int i;
  99. struct kvmppc_spapr_tce_table *stt;
  100. struct kvmppc_spapr_tce_iommu_table *stit, *tmp;
  101. struct iommu_table_group *table_group = NULL;
  102. list_for_each_entry_rcu(stt, &kvm->arch.spapr_tce_tables, list) {
  103. table_group = iommu_group_get_iommudata(grp);
  104. if (WARN_ON(!table_group))
  105. continue;
  106. list_for_each_entry_safe(stit, tmp, &stt->iommu_tables, next) {
  107. for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
  108. if (table_group->tables[i] != stit->tbl)
  109. continue;
  110. kref_put(&stit->kref, kvm_spapr_tce_liobn_put);
  111. }
  112. }
  113. }
  114. }
  115. extern long kvm_spapr_tce_attach_iommu_group(struct kvm *kvm, int tablefd,
  116. struct iommu_group *grp)
  117. {
  118. struct kvmppc_spapr_tce_table *stt = NULL;
  119. bool found = false;
  120. struct iommu_table *tbl = NULL;
  121. struct iommu_table_group *table_group;
  122. long i;
  123. struct kvmppc_spapr_tce_iommu_table *stit;
  124. struct fd f;
  125. f = fdget(tablefd);
  126. if (!f.file)
  127. return -EBADF;
  128. list_for_each_entry_rcu(stt, &kvm->arch.spapr_tce_tables, list) {
  129. if (stt == f.file->private_data) {
  130. found = true;
  131. break;
  132. }
  133. }
  134. fdput(f);
  135. if (!found)
  136. return -EINVAL;
  137. table_group = iommu_group_get_iommudata(grp);
  138. if (WARN_ON(!table_group))
  139. return -EFAULT;
  140. for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
  141. struct iommu_table *tbltmp = table_group->tables[i];
  142. if (!tbltmp)
  143. continue;
  144. /* Make sure hardware table parameters are compatible */
  145. if ((tbltmp->it_page_shift <= stt->page_shift) &&
  146. (tbltmp->it_offset << tbltmp->it_page_shift ==
  147. stt->offset << stt->page_shift) &&
  148. (tbltmp->it_size << tbltmp->it_page_shift >=
  149. stt->size << stt->page_shift)) {
  150. /*
  151. * Reference the table to avoid races with
  152. * add/remove DMA windows.
  153. */
  154. tbl = iommu_tce_table_get(tbltmp);
  155. break;
  156. }
  157. }
  158. if (!tbl)
  159. return -EINVAL;
  160. list_for_each_entry_rcu(stit, &stt->iommu_tables, next) {
  161. if (tbl != stit->tbl)
  162. continue;
  163. if (!kref_get_unless_zero(&stit->kref)) {
  164. /* stit is being destroyed */
  165. iommu_tce_table_put(tbl);
  166. return -ENOTTY;
  167. }
  168. /*
  169. * The table is already known to this KVM, we just increased
  170. * its KVM reference counter and can return.
  171. */
  172. return 0;
  173. }
  174. stit = kzalloc(sizeof(*stit), GFP_KERNEL);
  175. if (!stit) {
  176. iommu_tce_table_put(tbl);
  177. return -ENOMEM;
  178. }
  179. stit->tbl = tbl;
  180. kref_init(&stit->kref);
  181. list_add_rcu(&stit->next, &stt->iommu_tables);
  182. return 0;
  183. }
  184. static void release_spapr_tce_table(struct rcu_head *head)
  185. {
  186. struct kvmppc_spapr_tce_table *stt = container_of(head,
  187. struct kvmppc_spapr_tce_table, rcu);
  188. unsigned long i, npages = kvmppc_tce_pages(stt->size);
  189. for (i = 0; i < npages; i++)
  190. __free_page(stt->pages[i]);
  191. kfree(stt);
  192. }
  193. static vm_fault_t kvm_spapr_tce_fault(struct vm_fault *vmf)
  194. {
  195. struct kvmppc_spapr_tce_table *stt = vmf->vma->vm_file->private_data;
  196. struct page *page;
  197. if (vmf->pgoff >= kvmppc_tce_pages(stt->size))
  198. return VM_FAULT_SIGBUS;
  199. page = stt->pages[vmf->pgoff];
  200. get_page(page);
  201. vmf->page = page;
  202. return 0;
  203. }
  204. static const struct vm_operations_struct kvm_spapr_tce_vm_ops = {
  205. .fault = kvm_spapr_tce_fault,
  206. };
  207. static int kvm_spapr_tce_mmap(struct file *file, struct vm_area_struct *vma)
  208. {
  209. vma->vm_ops = &kvm_spapr_tce_vm_ops;
  210. return 0;
  211. }
  212. static int kvm_spapr_tce_release(struct inode *inode, struct file *filp)
  213. {
  214. struct kvmppc_spapr_tce_table *stt = filp->private_data;
  215. struct kvmppc_spapr_tce_iommu_table *stit, *tmp;
  216. struct kvm *kvm = stt->kvm;
  217. mutex_lock(&kvm->lock);
  218. list_del_rcu(&stt->list);
  219. mutex_unlock(&kvm->lock);
  220. list_for_each_entry_safe(stit, tmp, &stt->iommu_tables, next) {
  221. WARN_ON(!kref_read(&stit->kref));
  222. while (1) {
  223. if (kref_put(&stit->kref, kvm_spapr_tce_liobn_put))
  224. break;
  225. }
  226. }
  227. kvm_put_kvm(stt->kvm);
  228. kvmppc_account_memlimit(
  229. kvmppc_stt_pages(kvmppc_tce_pages(stt->size)), false);
  230. call_rcu(&stt->rcu, release_spapr_tce_table);
  231. return 0;
  232. }
  233. static const struct file_operations kvm_spapr_tce_fops = {
  234. .mmap = kvm_spapr_tce_mmap,
  235. .release = kvm_spapr_tce_release,
  236. };
  237. long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
  238. struct kvm_create_spapr_tce_64 *args)
  239. {
  240. struct kvmppc_spapr_tce_table *stt = NULL;
  241. struct kvmppc_spapr_tce_table *siter;
  242. unsigned long npages, size = args->size;
  243. int ret = -ENOMEM;
  244. int i;
  245. if (!args->size || args->page_shift < 12 || args->page_shift > 34 ||
  246. (args->offset + args->size > (ULLONG_MAX >> args->page_shift)))
  247. return -EINVAL;
  248. npages = kvmppc_tce_pages(size);
  249. ret = kvmppc_account_memlimit(kvmppc_stt_pages(npages), true);
  250. if (ret)
  251. return ret;
  252. ret = -ENOMEM;
  253. stt = kzalloc(sizeof(*stt) + npages * sizeof(struct page *),
  254. GFP_KERNEL);
  255. if (!stt)
  256. goto fail_acct;
  257. stt->liobn = args->liobn;
  258. stt->page_shift = args->page_shift;
  259. stt->offset = args->offset;
  260. stt->size = size;
  261. stt->kvm = kvm;
  262. INIT_LIST_HEAD_RCU(&stt->iommu_tables);
  263. for (i = 0; i < npages; i++) {
  264. stt->pages[i] = alloc_page(GFP_KERNEL | __GFP_ZERO);
  265. if (!stt->pages[i])
  266. goto fail;
  267. }
  268. mutex_lock(&kvm->lock);
  269. /* Check this LIOBN hasn't been previously allocated */
  270. ret = 0;
  271. list_for_each_entry(siter, &kvm->arch.spapr_tce_tables, list) {
  272. if (siter->liobn == args->liobn) {
  273. ret = -EBUSY;
  274. break;
  275. }
  276. }
  277. if (!ret)
  278. ret = anon_inode_getfd("kvm-spapr-tce", &kvm_spapr_tce_fops,
  279. stt, O_RDWR | O_CLOEXEC);
  280. if (ret >= 0) {
  281. list_add_rcu(&stt->list, &kvm->arch.spapr_tce_tables);
  282. kvm_get_kvm(kvm);
  283. }
  284. mutex_unlock(&kvm->lock);
  285. if (ret >= 0)
  286. return ret;
  287. fail:
  288. for (i = 0; i < npages; i++)
  289. if (stt->pages[i])
  290. __free_page(stt->pages[i]);
  291. kfree(stt);
  292. fail_acct:
  293. kvmppc_account_memlimit(kvmppc_stt_pages(npages), false);
  294. return ret;
  295. }
  296. static void kvmppc_clear_tce(struct iommu_table *tbl, unsigned long entry)
  297. {
  298. unsigned long hpa = 0;
  299. enum dma_data_direction dir = DMA_NONE;
  300. iommu_tce_xchg(tbl, entry, &hpa, &dir);
  301. }
  302. static long kvmppc_tce_iommu_mapped_dec(struct kvm *kvm,
  303. struct iommu_table *tbl, unsigned long entry)
  304. {
  305. struct mm_iommu_table_group_mem_t *mem = NULL;
  306. const unsigned long pgsize = 1ULL << tbl->it_page_shift;
  307. __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry);
  308. if (!pua)
  309. /* it_userspace allocation might be delayed */
  310. return H_TOO_HARD;
  311. mem = mm_iommu_lookup(kvm->mm, be64_to_cpu(*pua), pgsize);
  312. if (!mem)
  313. return H_TOO_HARD;
  314. mm_iommu_mapped_dec(mem);
  315. *pua = cpu_to_be64(0);
  316. return H_SUCCESS;
  317. }
  318. static long kvmppc_tce_iommu_do_unmap(struct kvm *kvm,
  319. struct iommu_table *tbl, unsigned long entry)
  320. {
  321. enum dma_data_direction dir = DMA_NONE;
  322. unsigned long hpa = 0;
  323. long ret;
  324. if (WARN_ON_ONCE(iommu_tce_xchg(tbl, entry, &hpa, &dir)))
  325. return H_TOO_HARD;
  326. if (dir == DMA_NONE)
  327. return H_SUCCESS;
  328. ret = kvmppc_tce_iommu_mapped_dec(kvm, tbl, entry);
  329. if (ret != H_SUCCESS)
  330. iommu_tce_xchg(tbl, entry, &hpa, &dir);
  331. return ret;
  332. }
  333. static long kvmppc_tce_iommu_unmap(struct kvm *kvm,
  334. struct kvmppc_spapr_tce_table *stt, struct iommu_table *tbl,
  335. unsigned long entry)
  336. {
  337. unsigned long i, ret = H_SUCCESS;
  338. unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift);
  339. unsigned long io_entry = entry * subpages;
  340. for (i = 0; i < subpages; ++i) {
  341. ret = kvmppc_tce_iommu_do_unmap(kvm, tbl, io_entry + i);
  342. if (ret != H_SUCCESS)
  343. break;
  344. }
  345. return ret;
  346. }
  347. long kvmppc_tce_iommu_do_map(struct kvm *kvm, struct iommu_table *tbl,
  348. unsigned long entry, unsigned long ua,
  349. enum dma_data_direction dir)
  350. {
  351. long ret;
  352. unsigned long hpa;
  353. __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry);
  354. struct mm_iommu_table_group_mem_t *mem;
  355. if (!pua)
  356. /* it_userspace allocation might be delayed */
  357. return H_TOO_HARD;
  358. mem = mm_iommu_lookup(kvm->mm, ua, 1ULL << tbl->it_page_shift);
  359. if (!mem)
  360. /* This only handles v2 IOMMU type, v1 is handled via ioctl() */
  361. return H_TOO_HARD;
  362. if (WARN_ON_ONCE(mm_iommu_ua_to_hpa(mem, ua, tbl->it_page_shift, &hpa)))
  363. return H_TOO_HARD;
  364. if (mm_iommu_mapped_inc(mem))
  365. return H_TOO_HARD;
  366. ret = iommu_tce_xchg(tbl, entry, &hpa, &dir);
  367. if (WARN_ON_ONCE(ret)) {
  368. mm_iommu_mapped_dec(mem);
  369. return H_TOO_HARD;
  370. }
  371. if (dir != DMA_NONE)
  372. kvmppc_tce_iommu_mapped_dec(kvm, tbl, entry);
  373. *pua = cpu_to_be64(ua);
  374. return 0;
  375. }
  376. static long kvmppc_tce_iommu_map(struct kvm *kvm,
  377. struct kvmppc_spapr_tce_table *stt, struct iommu_table *tbl,
  378. unsigned long entry, unsigned long ua,
  379. enum dma_data_direction dir)
  380. {
  381. unsigned long i, pgoff, ret = H_SUCCESS;
  382. unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift);
  383. unsigned long io_entry = entry * subpages;
  384. for (i = 0, pgoff = 0; i < subpages;
  385. ++i, pgoff += IOMMU_PAGE_SIZE(tbl)) {
  386. ret = kvmppc_tce_iommu_do_map(kvm, tbl,
  387. io_entry + i, ua + pgoff, dir);
  388. if (ret != H_SUCCESS)
  389. break;
  390. }
  391. return ret;
  392. }
  393. long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
  394. unsigned long ioba, unsigned long tce)
  395. {
  396. struct kvmppc_spapr_tce_table *stt;
  397. long ret, idx;
  398. struct kvmppc_spapr_tce_iommu_table *stit;
  399. unsigned long entry, ua = 0;
  400. enum dma_data_direction dir;
  401. /* udbg_printf("H_PUT_TCE(): liobn=0x%lx ioba=0x%lx, tce=0x%lx\n", */
  402. /* liobn, ioba, tce); */
  403. stt = kvmppc_find_table(vcpu->kvm, liobn);
  404. if (!stt)
  405. return H_TOO_HARD;
  406. ret = kvmppc_ioba_validate(stt, ioba, 1);
  407. if (ret != H_SUCCESS)
  408. return ret;
  409. ret = kvmppc_tce_validate(stt, tce);
  410. if (ret != H_SUCCESS)
  411. return ret;
  412. dir = iommu_tce_direction(tce);
  413. idx = srcu_read_lock(&vcpu->kvm->srcu);
  414. if ((dir != DMA_NONE) && kvmppc_gpa_to_ua(vcpu->kvm,
  415. tce & ~(TCE_PCI_READ | TCE_PCI_WRITE), &ua, NULL)) {
  416. ret = H_PARAMETER;
  417. goto unlock_exit;
  418. }
  419. entry = ioba >> stt->page_shift;
  420. list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
  421. if (dir == DMA_NONE)
  422. ret = kvmppc_tce_iommu_unmap(vcpu->kvm, stt,
  423. stit->tbl, entry);
  424. else
  425. ret = kvmppc_tce_iommu_map(vcpu->kvm, stt, stit->tbl,
  426. entry, ua, dir);
  427. if (ret == H_SUCCESS)
  428. continue;
  429. if (ret == H_TOO_HARD)
  430. goto unlock_exit;
  431. WARN_ON_ONCE(1);
  432. kvmppc_clear_tce(stit->tbl, entry);
  433. }
  434. kvmppc_tce_put(stt, entry, tce);
  435. unlock_exit:
  436. srcu_read_unlock(&vcpu->kvm->srcu, idx);
  437. return ret;
  438. }
  439. EXPORT_SYMBOL_GPL(kvmppc_h_put_tce);
  440. long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu,
  441. unsigned long liobn, unsigned long ioba,
  442. unsigned long tce_list, unsigned long npages)
  443. {
  444. struct kvmppc_spapr_tce_table *stt;
  445. long i, ret = H_SUCCESS, idx;
  446. unsigned long entry, ua = 0;
  447. u64 __user *tces;
  448. u64 tce;
  449. struct kvmppc_spapr_tce_iommu_table *stit;
  450. stt = kvmppc_find_table(vcpu->kvm, liobn);
  451. if (!stt)
  452. return H_TOO_HARD;
  453. entry = ioba >> stt->page_shift;
  454. /*
  455. * SPAPR spec says that the maximum size of the list is 512 TCEs
  456. * so the whole table fits in 4K page
  457. */
  458. if (npages > 512)
  459. return H_PARAMETER;
  460. if (tce_list & (SZ_4K - 1))
  461. return H_PARAMETER;
  462. ret = kvmppc_ioba_validate(stt, ioba, npages);
  463. if (ret != H_SUCCESS)
  464. return ret;
  465. idx = srcu_read_lock(&vcpu->kvm->srcu);
  466. if (kvmppc_gpa_to_ua(vcpu->kvm, tce_list, &ua, NULL)) {
  467. ret = H_TOO_HARD;
  468. goto unlock_exit;
  469. }
  470. tces = (u64 __user *) ua;
  471. for (i = 0; i < npages; ++i) {
  472. if (get_user(tce, tces + i)) {
  473. ret = H_TOO_HARD;
  474. goto unlock_exit;
  475. }
  476. tce = be64_to_cpu(tce);
  477. ret = kvmppc_tce_validate(stt, tce);
  478. if (ret != H_SUCCESS)
  479. goto unlock_exit;
  480. if (kvmppc_gpa_to_ua(vcpu->kvm,
  481. tce & ~(TCE_PCI_READ | TCE_PCI_WRITE),
  482. &ua, NULL)) {
  483. ret = H_PARAMETER;
  484. goto unlock_exit;
  485. }
  486. list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
  487. ret = kvmppc_tce_iommu_map(vcpu->kvm, stt,
  488. stit->tbl, entry + i, ua,
  489. iommu_tce_direction(tce));
  490. if (ret == H_SUCCESS)
  491. continue;
  492. if (ret == H_TOO_HARD)
  493. goto unlock_exit;
  494. WARN_ON_ONCE(1);
  495. kvmppc_clear_tce(stit->tbl, entry);
  496. }
  497. kvmppc_tce_put(stt, entry + i, tce);
  498. }
  499. unlock_exit:
  500. srcu_read_unlock(&vcpu->kvm->srcu, idx);
  501. return ret;
  502. }
  503. EXPORT_SYMBOL_GPL(kvmppc_h_put_tce_indirect);
  504. long kvmppc_h_stuff_tce(struct kvm_vcpu *vcpu,
  505. unsigned long liobn, unsigned long ioba,
  506. unsigned long tce_value, unsigned long npages)
  507. {
  508. struct kvmppc_spapr_tce_table *stt;
  509. long i, ret;
  510. struct kvmppc_spapr_tce_iommu_table *stit;
  511. stt = kvmppc_find_table(vcpu->kvm, liobn);
  512. if (!stt)
  513. return H_TOO_HARD;
  514. ret = kvmppc_ioba_validate(stt, ioba, npages);
  515. if (ret != H_SUCCESS)
  516. return ret;
  517. /* Check permission bits only to allow userspace poison TCE for debug */
  518. if (tce_value & (TCE_PCI_WRITE | TCE_PCI_READ))
  519. return H_PARAMETER;
  520. list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
  521. unsigned long entry = ioba >> stt->page_shift;
  522. for (i = 0; i < npages; ++i) {
  523. ret = kvmppc_tce_iommu_unmap(vcpu->kvm, stt,
  524. stit->tbl, entry + i);
  525. if (ret == H_SUCCESS)
  526. continue;
  527. if (ret == H_TOO_HARD)
  528. return ret;
  529. WARN_ON_ONCE(1);
  530. kvmppc_clear_tce(stit->tbl, entry);
  531. }
  532. }
  533. for (i = 0; i < npages; ++i, ioba += (1ULL << stt->page_shift))
  534. kvmppc_tce_put(stt, ioba >> stt->page_shift, tce_value);
  535. return H_SUCCESS;
  536. }
  537. EXPORT_SYMBOL_GPL(kvmppc_h_stuff_tce);