privcmd.c 39 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /******************************************************************************
  3. * privcmd.c
  4. *
  5. * Interface to privileged domain-0 commands.
  6. *
  7. * Copyright (c) 2002-2004, K A Fraser, B Dragovic
  8. */
  9. #define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
  10. #include <linux/eventfd.h>
  11. #include <linux/file.h>
  12. #include <linux/kernel.h>
  13. #include <linux/module.h>
  14. #include <linux/mutex.h>
  15. #include <linux/poll.h>
  16. #include <linux/sched.h>
  17. #include <linux/slab.h>
  18. #include <linux/srcu.h>
  19. #include <linux/string.h>
  20. #include <linux/workqueue.h>
  21. #include <linux/errno.h>
  22. #include <linux/mm.h>
  23. #include <linux/mman.h>
  24. #include <linux/uaccess.h>
  25. #include <linux/swap.h>
  26. #include <linux/highmem.h>
  27. #include <linux/pagemap.h>
  28. #include <linux/seq_file.h>
  29. #include <linux/miscdevice.h>
  30. #include <linux/moduleparam.h>
  31. #include <linux/virtio_mmio.h>
  32. #include <asm/xen/hypervisor.h>
  33. #include <asm/xen/hypercall.h>
  34. #include <xen/xen.h>
  35. #include <xen/events.h>
  36. #include <xen/privcmd.h>
  37. #include <xen/interface/xen.h>
  38. #include <xen/interface/memory.h>
  39. #include <xen/interface/hvm/dm_op.h>
  40. #include <xen/interface/hvm/ioreq.h>
  41. #include <xen/features.h>
  42. #include <xen/page.h>
  43. #include <xen/xen-ops.h>
  44. #include <xen/balloon.h>
  45. #ifdef CONFIG_XEN_ACPI
  46. #include <xen/acpi.h>
  47. #endif
  48. #include "privcmd.h"
  49. MODULE_DESCRIPTION("Xen hypercall passthrough driver");
  50. MODULE_LICENSE("GPL");
  51. #define PRIV_VMA_LOCKED ((void *)1)
  52. static unsigned int privcmd_dm_op_max_num = 16;
  53. module_param_named(dm_op_max_nr_bufs, privcmd_dm_op_max_num, uint, 0644);
  54. MODULE_PARM_DESC(dm_op_max_nr_bufs,
  55. "Maximum number of buffers per dm_op hypercall");
  56. static unsigned int privcmd_dm_op_buf_max_size = 4096;
  57. module_param_named(dm_op_buf_max_size, privcmd_dm_op_buf_max_size, uint,
  58. 0644);
  59. MODULE_PARM_DESC(dm_op_buf_max_size,
  60. "Maximum size of a dm_op hypercall buffer");
  61. struct privcmd_data {
  62. domid_t domid;
  63. };
  64. static int privcmd_vma_range_is_mapped(
  65. struct vm_area_struct *vma,
  66. unsigned long addr,
  67. unsigned long nr_pages);
  68. static long privcmd_ioctl_hypercall(struct file *file, void __user *udata)
  69. {
  70. struct privcmd_data *data = file->private_data;
  71. struct privcmd_hypercall hypercall;
  72. long ret;
  73. /* Disallow arbitrary hypercalls if restricted */
  74. if (data->domid != DOMID_INVALID)
  75. return -EPERM;
  76. if (copy_from_user(&hypercall, udata, sizeof(hypercall)))
  77. return -EFAULT;
  78. xen_preemptible_hcall_begin();
  79. ret = privcmd_call(hypercall.op,
  80. hypercall.arg[0], hypercall.arg[1],
  81. hypercall.arg[2], hypercall.arg[3],
  82. hypercall.arg[4]);
  83. xen_preemptible_hcall_end();
  84. return ret;
  85. }
  86. static void free_page_list(struct list_head *pages)
  87. {
  88. struct page *p, *n;
  89. list_for_each_entry_safe(p, n, pages, lru)
  90. __free_page(p);
  91. INIT_LIST_HEAD(pages);
  92. }
  93. /*
  94. * Given an array of items in userspace, return a list of pages
  95. * containing the data. If copying fails, either because of memory
  96. * allocation failure or a problem reading user memory, return an
  97. * error code; its up to the caller to dispose of any partial list.
  98. */
  99. static int gather_array(struct list_head *pagelist,
  100. unsigned nelem, size_t size,
  101. const void __user *data)
  102. {
  103. unsigned pageidx;
  104. void *pagedata;
  105. int ret;
  106. if (size > PAGE_SIZE)
  107. return 0;
  108. pageidx = PAGE_SIZE;
  109. pagedata = NULL; /* quiet, gcc */
  110. while (nelem--) {
  111. if (pageidx > PAGE_SIZE-size) {
  112. struct page *page = alloc_page(GFP_KERNEL);
  113. ret = -ENOMEM;
  114. if (page == NULL)
  115. goto fail;
  116. pagedata = page_address(page);
  117. list_add_tail(&page->lru, pagelist);
  118. pageidx = 0;
  119. }
  120. ret = -EFAULT;
  121. if (copy_from_user(pagedata + pageidx, data, size))
  122. goto fail;
  123. data += size;
  124. pageidx += size;
  125. }
  126. ret = 0;
  127. fail:
  128. return ret;
  129. }
  130. /*
  131. * Call function "fn" on each element of the array fragmented
  132. * over a list of pages.
  133. */
  134. static int traverse_pages(unsigned nelem, size_t size,
  135. struct list_head *pos,
  136. int (*fn)(void *data, void *state),
  137. void *state)
  138. {
  139. void *pagedata;
  140. unsigned pageidx;
  141. int ret = 0;
  142. BUG_ON(size > PAGE_SIZE);
  143. pageidx = PAGE_SIZE;
  144. pagedata = NULL; /* hush, gcc */
  145. while (nelem--) {
  146. if (pageidx > PAGE_SIZE-size) {
  147. struct page *page;
  148. pos = pos->next;
  149. page = list_entry(pos, struct page, lru);
  150. pagedata = page_address(page);
  151. pageidx = 0;
  152. }
  153. ret = (*fn)(pagedata + pageidx, state);
  154. if (ret)
  155. break;
  156. pageidx += size;
  157. }
  158. return ret;
  159. }
  160. /*
  161. * Similar to traverse_pages, but use each page as a "block" of
  162. * data to be processed as one unit.
  163. */
  164. static int traverse_pages_block(unsigned nelem, size_t size,
  165. struct list_head *pos,
  166. int (*fn)(void *data, int nr, void *state),
  167. void *state)
  168. {
  169. void *pagedata;
  170. int ret = 0;
  171. BUG_ON(size > PAGE_SIZE);
  172. while (nelem) {
  173. int nr = (PAGE_SIZE/size);
  174. struct page *page;
  175. if (nr > nelem)
  176. nr = nelem;
  177. pos = pos->next;
  178. page = list_entry(pos, struct page, lru);
  179. pagedata = page_address(page);
  180. ret = (*fn)(pagedata, nr, state);
  181. if (ret)
  182. break;
  183. nelem -= nr;
  184. }
  185. return ret;
  186. }
  187. struct mmap_gfn_state {
  188. unsigned long va;
  189. struct vm_area_struct *vma;
  190. domid_t domain;
  191. };
  192. static int mmap_gfn_range(void *data, void *state)
  193. {
  194. struct privcmd_mmap_entry *msg = data;
  195. struct mmap_gfn_state *st = state;
  196. struct vm_area_struct *vma = st->vma;
  197. int rc;
  198. /* Do not allow range to wrap the address space. */
  199. if ((msg->npages > (LONG_MAX >> PAGE_SHIFT)) ||
  200. ((unsigned long)(msg->npages << PAGE_SHIFT) >= -st->va))
  201. return -EINVAL;
  202. /* Range chunks must be contiguous in va space. */
  203. if ((msg->va != st->va) ||
  204. ((msg->va+(msg->npages<<PAGE_SHIFT)) > vma->vm_end))
  205. return -EINVAL;
  206. rc = xen_remap_domain_gfn_range(vma,
  207. msg->va & PAGE_MASK,
  208. msg->mfn, msg->npages,
  209. vma->vm_page_prot,
  210. st->domain, NULL);
  211. if (rc < 0)
  212. return rc;
  213. st->va += msg->npages << PAGE_SHIFT;
  214. return 0;
  215. }
  216. static long privcmd_ioctl_mmap(struct file *file, void __user *udata)
  217. {
  218. struct privcmd_data *data = file->private_data;
  219. struct privcmd_mmap mmapcmd;
  220. struct mm_struct *mm = current->mm;
  221. struct vm_area_struct *vma;
  222. int rc;
  223. LIST_HEAD(pagelist);
  224. struct mmap_gfn_state state;
  225. /* We only support privcmd_ioctl_mmap_batch for non-auto-translated. */
  226. if (xen_feature(XENFEAT_auto_translated_physmap))
  227. return -ENOSYS;
  228. if (copy_from_user(&mmapcmd, udata, sizeof(mmapcmd)))
  229. return -EFAULT;
  230. /* If restriction is in place, check the domid matches */
  231. if (data->domid != DOMID_INVALID && data->domid != mmapcmd.dom)
  232. return -EPERM;
  233. rc = gather_array(&pagelist,
  234. mmapcmd.num, sizeof(struct privcmd_mmap_entry),
  235. mmapcmd.entry);
  236. if (rc || list_empty(&pagelist))
  237. goto out;
  238. mmap_write_lock(mm);
  239. {
  240. struct page *page = list_first_entry(&pagelist,
  241. struct page, lru);
  242. struct privcmd_mmap_entry *msg = page_address(page);
  243. vma = vma_lookup(mm, msg->va);
  244. rc = -EINVAL;
  245. if (!vma || (msg->va != vma->vm_start) || vma->vm_private_data)
  246. goto out_up;
  247. vma->vm_private_data = PRIV_VMA_LOCKED;
  248. }
  249. state.va = vma->vm_start;
  250. state.vma = vma;
  251. state.domain = mmapcmd.dom;
  252. rc = traverse_pages(mmapcmd.num, sizeof(struct privcmd_mmap_entry),
  253. &pagelist,
  254. mmap_gfn_range, &state);
  255. out_up:
  256. mmap_write_unlock(mm);
  257. out:
  258. free_page_list(&pagelist);
  259. return rc;
  260. }
  261. struct mmap_batch_state {
  262. domid_t domain;
  263. unsigned long va;
  264. struct vm_area_struct *vma;
  265. int index;
  266. /* A tristate:
  267. * 0 for no errors
  268. * 1 if at least one error has happened (and no
  269. * -ENOENT errors have happened)
  270. * -ENOENT if at least 1 -ENOENT has happened.
  271. */
  272. int global_error;
  273. int version;
  274. /* User-space gfn array to store errors in the second pass for V1. */
  275. xen_pfn_t __user *user_gfn;
  276. /* User-space int array to store errors in the second pass for V2. */
  277. int __user *user_err;
  278. };
  279. /* auto translated dom0 note: if domU being created is PV, then gfn is
  280. * mfn(addr on bus). If it's auto xlated, then gfn is pfn (input to HAP).
  281. */
  282. static int mmap_batch_fn(void *data, int nr, void *state)
  283. {
  284. xen_pfn_t *gfnp = data;
  285. struct mmap_batch_state *st = state;
  286. struct vm_area_struct *vma = st->vma;
  287. struct page **pages = vma->vm_private_data;
  288. struct page **cur_pages = NULL;
  289. int ret;
  290. if (xen_feature(XENFEAT_auto_translated_physmap))
  291. cur_pages = &pages[st->index];
  292. BUG_ON(nr < 0);
  293. ret = xen_remap_domain_gfn_array(st->vma, st->va & PAGE_MASK, gfnp, nr,
  294. (int *)gfnp, st->vma->vm_page_prot,
  295. st->domain, cur_pages);
  296. /* Adjust the global_error? */
  297. if (ret != nr) {
  298. if (ret == -ENOENT)
  299. st->global_error = -ENOENT;
  300. else {
  301. /* Record that at least one error has happened. */
  302. if (st->global_error == 0)
  303. st->global_error = 1;
  304. }
  305. }
  306. st->va += XEN_PAGE_SIZE * nr;
  307. st->index += nr / XEN_PFN_PER_PAGE;
  308. return 0;
  309. }
  310. static int mmap_return_error(int err, struct mmap_batch_state *st)
  311. {
  312. int ret;
  313. if (st->version == 1) {
  314. if (err) {
  315. xen_pfn_t gfn;
  316. ret = get_user(gfn, st->user_gfn);
  317. if (ret < 0)
  318. return ret;
  319. /*
  320. * V1 encodes the error codes in the 32bit top
  321. * nibble of the gfn (with its known
  322. * limitations vis-a-vis 64 bit callers).
  323. */
  324. gfn |= (err == -ENOENT) ?
  325. PRIVCMD_MMAPBATCH_PAGED_ERROR :
  326. PRIVCMD_MMAPBATCH_MFN_ERROR;
  327. return __put_user(gfn, st->user_gfn++);
  328. } else
  329. st->user_gfn++;
  330. } else { /* st->version == 2 */
  331. if (err)
  332. return __put_user(err, st->user_err++);
  333. else
  334. st->user_err++;
  335. }
  336. return 0;
  337. }
  338. static int mmap_return_errors(void *data, int nr, void *state)
  339. {
  340. struct mmap_batch_state *st = state;
  341. int *errs = data;
  342. int i;
  343. int ret;
  344. for (i = 0; i < nr; i++) {
  345. ret = mmap_return_error(errs[i], st);
  346. if (ret < 0)
  347. return ret;
  348. }
  349. return 0;
  350. }
  351. /* Allocate pfns that are then mapped with gfns from foreign domid. Update
  352. * the vma with the page info to use later.
  353. * Returns: 0 if success, otherwise -errno
  354. */
  355. static int alloc_empty_pages(struct vm_area_struct *vma, int numpgs)
  356. {
  357. int rc;
  358. struct page **pages;
  359. pages = kvcalloc(numpgs, sizeof(pages[0]), GFP_KERNEL);
  360. if (pages == NULL)
  361. return -ENOMEM;
  362. rc = xen_alloc_unpopulated_pages(numpgs, pages);
  363. if (rc != 0) {
  364. pr_warn("%s Could not alloc %d pfns rc:%d\n", __func__,
  365. numpgs, rc);
  366. kvfree(pages);
  367. return -ENOMEM;
  368. }
  369. BUG_ON(vma->vm_private_data != NULL);
  370. vma->vm_private_data = pages;
  371. return 0;
  372. }
  373. static const struct vm_operations_struct privcmd_vm_ops;
  374. static long privcmd_ioctl_mmap_batch(
  375. struct file *file, void __user *udata, int version)
  376. {
  377. struct privcmd_data *data = file->private_data;
  378. int ret;
  379. struct privcmd_mmapbatch_v2 m;
  380. struct mm_struct *mm = current->mm;
  381. struct vm_area_struct *vma;
  382. unsigned long nr_pages;
  383. LIST_HEAD(pagelist);
  384. struct mmap_batch_state state;
  385. switch (version) {
  386. case 1:
  387. if (copy_from_user(&m, udata, sizeof(struct privcmd_mmapbatch)))
  388. return -EFAULT;
  389. /* Returns per-frame error in m.arr. */
  390. m.err = NULL;
  391. if (!access_ok(m.arr, m.num * sizeof(*m.arr)))
  392. return -EFAULT;
  393. break;
  394. case 2:
  395. if (copy_from_user(&m, udata, sizeof(struct privcmd_mmapbatch_v2)))
  396. return -EFAULT;
  397. /* Returns per-frame error code in m.err. */
  398. if (!access_ok(m.err, m.num * (sizeof(*m.err))))
  399. return -EFAULT;
  400. break;
  401. default:
  402. return -EINVAL;
  403. }
  404. /* If restriction is in place, check the domid matches */
  405. if (data->domid != DOMID_INVALID && data->domid != m.dom)
  406. return -EPERM;
  407. nr_pages = DIV_ROUND_UP(m.num, XEN_PFN_PER_PAGE);
  408. if ((m.num <= 0) || (nr_pages > (LONG_MAX >> PAGE_SHIFT)))
  409. return -EINVAL;
  410. ret = gather_array(&pagelist, m.num, sizeof(xen_pfn_t), m.arr);
  411. if (ret)
  412. goto out;
  413. if (list_empty(&pagelist)) {
  414. ret = -EINVAL;
  415. goto out;
  416. }
  417. if (version == 2) {
  418. /* Zero error array now to only copy back actual errors. */
  419. if (clear_user(m.err, sizeof(int) * m.num)) {
  420. ret = -EFAULT;
  421. goto out;
  422. }
  423. }
  424. mmap_write_lock(mm);
  425. vma = find_vma(mm, m.addr);
  426. if (!vma ||
  427. vma->vm_ops != &privcmd_vm_ops) {
  428. ret = -EINVAL;
  429. goto out_unlock;
  430. }
  431. /*
  432. * Caller must either:
  433. *
  434. * Map the whole VMA range, which will also allocate all the
  435. * pages required for the auto_translated_physmap case.
  436. *
  437. * Or
  438. *
  439. * Map unmapped holes left from a previous map attempt (e.g.,
  440. * because those foreign frames were previously paged out).
  441. */
  442. if (vma->vm_private_data == NULL) {
  443. if (m.addr != vma->vm_start ||
  444. m.addr + (nr_pages << PAGE_SHIFT) != vma->vm_end) {
  445. ret = -EINVAL;
  446. goto out_unlock;
  447. }
  448. if (xen_feature(XENFEAT_auto_translated_physmap)) {
  449. ret = alloc_empty_pages(vma, nr_pages);
  450. if (ret < 0)
  451. goto out_unlock;
  452. } else
  453. vma->vm_private_data = PRIV_VMA_LOCKED;
  454. } else {
  455. if (m.addr < vma->vm_start ||
  456. m.addr + (nr_pages << PAGE_SHIFT) > vma->vm_end) {
  457. ret = -EINVAL;
  458. goto out_unlock;
  459. }
  460. if (privcmd_vma_range_is_mapped(vma, m.addr, nr_pages)) {
  461. ret = -EINVAL;
  462. goto out_unlock;
  463. }
  464. }
  465. state.domain = m.dom;
  466. state.vma = vma;
  467. state.va = m.addr;
  468. state.index = 0;
  469. state.global_error = 0;
  470. state.version = version;
  471. BUILD_BUG_ON(((PAGE_SIZE / sizeof(xen_pfn_t)) % XEN_PFN_PER_PAGE) != 0);
  472. /* mmap_batch_fn guarantees ret == 0 */
  473. BUG_ON(traverse_pages_block(m.num, sizeof(xen_pfn_t),
  474. &pagelist, mmap_batch_fn, &state));
  475. mmap_write_unlock(mm);
  476. if (state.global_error) {
  477. /* Write back errors in second pass. */
  478. state.user_gfn = (xen_pfn_t *)m.arr;
  479. state.user_err = m.err;
  480. ret = traverse_pages_block(m.num, sizeof(xen_pfn_t),
  481. &pagelist, mmap_return_errors, &state);
  482. } else
  483. ret = 0;
  484. /* If we have not had any EFAULT-like global errors then set the global
  485. * error to -ENOENT if necessary. */
  486. if ((ret == 0) && (state.global_error == -ENOENT))
  487. ret = -ENOENT;
  488. out:
  489. free_page_list(&pagelist);
  490. return ret;
  491. out_unlock:
  492. mmap_write_unlock(mm);
  493. goto out;
  494. }
  495. static int lock_pages(
  496. struct privcmd_dm_op_buf kbufs[], unsigned int num,
  497. struct page *pages[], unsigned int nr_pages, unsigned int *pinned)
  498. {
  499. unsigned int i, off = 0;
  500. for (i = 0; i < num; ) {
  501. unsigned int requested;
  502. int page_count;
  503. requested = DIV_ROUND_UP(
  504. offset_in_page(kbufs[i].uptr) + kbufs[i].size,
  505. PAGE_SIZE) - off;
  506. if (requested > nr_pages)
  507. return -ENOSPC;
  508. page_count = pin_user_pages_fast(
  509. (unsigned long)kbufs[i].uptr + off * PAGE_SIZE,
  510. requested, FOLL_WRITE, pages);
  511. if (page_count <= 0)
  512. return page_count ? : -EFAULT;
  513. *pinned += page_count;
  514. nr_pages -= page_count;
  515. pages += page_count;
  516. off = (requested == page_count) ? 0 : off + page_count;
  517. i += !off;
  518. }
  519. return 0;
  520. }
  521. static void unlock_pages(struct page *pages[], unsigned int nr_pages)
  522. {
  523. unpin_user_pages_dirty_lock(pages, nr_pages, true);
  524. }
  525. static long privcmd_ioctl_dm_op(struct file *file, void __user *udata)
  526. {
  527. struct privcmd_data *data = file->private_data;
  528. struct privcmd_dm_op kdata;
  529. struct privcmd_dm_op_buf *kbufs;
  530. unsigned int nr_pages = 0;
  531. struct page **pages = NULL;
  532. struct xen_dm_op_buf *xbufs = NULL;
  533. unsigned int i;
  534. long rc;
  535. unsigned int pinned = 0;
  536. if (copy_from_user(&kdata, udata, sizeof(kdata)))
  537. return -EFAULT;
  538. /* If restriction is in place, check the domid matches */
  539. if (data->domid != DOMID_INVALID && data->domid != kdata.dom)
  540. return -EPERM;
  541. if (kdata.num == 0)
  542. return 0;
  543. if (kdata.num > privcmd_dm_op_max_num)
  544. return -E2BIG;
  545. kbufs = kcalloc(kdata.num, sizeof(*kbufs), GFP_KERNEL);
  546. if (!kbufs)
  547. return -ENOMEM;
  548. if (copy_from_user(kbufs, kdata.ubufs,
  549. sizeof(*kbufs) * kdata.num)) {
  550. rc = -EFAULT;
  551. goto out;
  552. }
  553. for (i = 0; i < kdata.num; i++) {
  554. if (kbufs[i].size > privcmd_dm_op_buf_max_size) {
  555. rc = -E2BIG;
  556. goto out;
  557. }
  558. if (!access_ok(kbufs[i].uptr,
  559. kbufs[i].size)) {
  560. rc = -EFAULT;
  561. goto out;
  562. }
  563. nr_pages += DIV_ROUND_UP(
  564. offset_in_page(kbufs[i].uptr) + kbufs[i].size,
  565. PAGE_SIZE);
  566. }
  567. pages = kcalloc(nr_pages, sizeof(*pages), GFP_KERNEL);
  568. if (!pages) {
  569. rc = -ENOMEM;
  570. goto out;
  571. }
  572. xbufs = kcalloc(kdata.num, sizeof(*xbufs), GFP_KERNEL);
  573. if (!xbufs) {
  574. rc = -ENOMEM;
  575. goto out;
  576. }
  577. rc = lock_pages(kbufs, kdata.num, pages, nr_pages, &pinned);
  578. if (rc < 0)
  579. goto out;
  580. for (i = 0; i < kdata.num; i++) {
  581. set_xen_guest_handle(xbufs[i].h, kbufs[i].uptr);
  582. xbufs[i].size = kbufs[i].size;
  583. }
  584. xen_preemptible_hcall_begin();
  585. rc = HYPERVISOR_dm_op(kdata.dom, kdata.num, xbufs);
  586. xen_preemptible_hcall_end();
  587. out:
  588. unlock_pages(pages, pinned);
  589. kfree(xbufs);
  590. kfree(pages);
  591. kfree(kbufs);
  592. return rc;
  593. }
  594. static long privcmd_ioctl_restrict(struct file *file, void __user *udata)
  595. {
  596. struct privcmd_data *data = file->private_data;
  597. domid_t dom;
  598. if (copy_from_user(&dom, udata, sizeof(dom)))
  599. return -EFAULT;
  600. /* Set restriction to the specified domain, or check it matches */
  601. if (data->domid == DOMID_INVALID)
  602. data->domid = dom;
  603. else if (data->domid != dom)
  604. return -EINVAL;
  605. return 0;
  606. }
  607. static long privcmd_ioctl_mmap_resource(struct file *file,
  608. struct privcmd_mmap_resource __user *udata)
  609. {
  610. struct privcmd_data *data = file->private_data;
  611. struct mm_struct *mm = current->mm;
  612. struct vm_area_struct *vma;
  613. struct privcmd_mmap_resource kdata;
  614. xen_pfn_t *pfns = NULL;
  615. struct xen_mem_acquire_resource xdata = { };
  616. int rc;
  617. if (copy_from_user(&kdata, udata, sizeof(kdata)))
  618. return -EFAULT;
  619. /* If restriction is in place, check the domid matches */
  620. if (data->domid != DOMID_INVALID && data->domid != kdata.dom)
  621. return -EPERM;
  622. /* Both fields must be set or unset */
  623. if (!!kdata.addr != !!kdata.num)
  624. return -EINVAL;
  625. xdata.domid = kdata.dom;
  626. xdata.type = kdata.type;
  627. xdata.id = kdata.id;
  628. if (!kdata.addr && !kdata.num) {
  629. /* Query the size of the resource. */
  630. rc = HYPERVISOR_memory_op(XENMEM_acquire_resource, &xdata);
  631. if (rc)
  632. return rc;
  633. return __put_user(xdata.nr_frames, &udata->num);
  634. }
  635. mmap_write_lock(mm);
  636. vma = find_vma(mm, kdata.addr);
  637. if (!vma || vma->vm_ops != &privcmd_vm_ops) {
  638. rc = -EINVAL;
  639. goto out;
  640. }
  641. pfns = kcalloc(kdata.num, sizeof(*pfns), GFP_KERNEL | __GFP_NOWARN);
  642. if (!pfns) {
  643. rc = -ENOMEM;
  644. goto out;
  645. }
  646. if (IS_ENABLED(CONFIG_XEN_AUTO_XLATE) &&
  647. xen_feature(XENFEAT_auto_translated_physmap)) {
  648. unsigned int nr = DIV_ROUND_UP(kdata.num, XEN_PFN_PER_PAGE);
  649. struct page **pages;
  650. unsigned int i;
  651. rc = alloc_empty_pages(vma, nr);
  652. if (rc < 0)
  653. goto out;
  654. pages = vma->vm_private_data;
  655. for (i = 0; i < kdata.num; i++) {
  656. xen_pfn_t pfn =
  657. page_to_xen_pfn(pages[i / XEN_PFN_PER_PAGE]);
  658. pfns[i] = pfn + (i % XEN_PFN_PER_PAGE);
  659. }
  660. } else
  661. vma->vm_private_data = PRIV_VMA_LOCKED;
  662. xdata.frame = kdata.idx;
  663. xdata.nr_frames = kdata.num;
  664. set_xen_guest_handle(xdata.frame_list, pfns);
  665. xen_preemptible_hcall_begin();
  666. rc = HYPERVISOR_memory_op(XENMEM_acquire_resource, &xdata);
  667. xen_preemptible_hcall_end();
  668. if (rc)
  669. goto out;
  670. if (IS_ENABLED(CONFIG_XEN_AUTO_XLATE) &&
  671. xen_feature(XENFEAT_auto_translated_physmap)) {
  672. rc = xen_remap_vma_range(vma, kdata.addr, kdata.num << PAGE_SHIFT);
  673. } else {
  674. unsigned int domid =
  675. (xdata.flags & XENMEM_rsrc_acq_caller_owned) ?
  676. DOMID_SELF : kdata.dom;
  677. int num, *errs = (int *)pfns;
  678. BUILD_BUG_ON(sizeof(*errs) > sizeof(*pfns));
  679. num = xen_remap_domain_mfn_array(vma,
  680. kdata.addr & PAGE_MASK,
  681. pfns, kdata.num, errs,
  682. vma->vm_page_prot,
  683. domid);
  684. if (num < 0)
  685. rc = num;
  686. else if (num != kdata.num) {
  687. unsigned int i;
  688. for (i = 0; i < num; i++) {
  689. rc = errs[i];
  690. if (rc < 0)
  691. break;
  692. }
  693. } else
  694. rc = 0;
  695. }
  696. out:
  697. mmap_write_unlock(mm);
  698. kfree(pfns);
  699. return rc;
  700. }
  701. static long privcmd_ioctl_pcidev_get_gsi(struct file *file, void __user *udata)
  702. {
  703. #if defined(CONFIG_XEN_ACPI)
  704. int rc;
  705. struct privcmd_pcidev_get_gsi kdata;
  706. if (copy_from_user(&kdata, udata, sizeof(kdata)))
  707. return -EFAULT;
  708. rc = xen_acpi_get_gsi_from_sbdf(kdata.sbdf);
  709. if (rc < 0)
  710. return rc;
  711. kdata.gsi = rc;
  712. if (copy_to_user(udata, &kdata, sizeof(kdata)))
  713. return -EFAULT;
  714. return 0;
  715. #else
  716. return -EINVAL;
  717. #endif
  718. }
  719. #ifdef CONFIG_XEN_PRIVCMD_EVENTFD
  720. /* Irqfd support */
  721. static struct workqueue_struct *irqfd_cleanup_wq;
  722. static DEFINE_SPINLOCK(irqfds_lock);
  723. DEFINE_STATIC_SRCU(irqfds_srcu);
  724. static LIST_HEAD(irqfds_list);
  725. struct privcmd_kernel_irqfd {
  726. struct xen_dm_op_buf xbufs;
  727. domid_t dom;
  728. bool error;
  729. struct eventfd_ctx *eventfd;
  730. struct work_struct shutdown;
  731. wait_queue_entry_t wait;
  732. struct list_head list;
  733. poll_table pt;
  734. };
  735. static void irqfd_deactivate(struct privcmd_kernel_irqfd *kirqfd)
  736. {
  737. lockdep_assert_held(&irqfds_lock);
  738. list_del_init(&kirqfd->list);
  739. queue_work(irqfd_cleanup_wq, &kirqfd->shutdown);
  740. }
  741. static void irqfd_shutdown(struct work_struct *work)
  742. {
  743. struct privcmd_kernel_irqfd *kirqfd =
  744. container_of(work, struct privcmd_kernel_irqfd, shutdown);
  745. u64 cnt;
  746. /* Make sure irqfd has been initialized in assign path */
  747. synchronize_srcu(&irqfds_srcu);
  748. eventfd_ctx_remove_wait_queue(kirqfd->eventfd, &kirqfd->wait, &cnt);
  749. eventfd_ctx_put(kirqfd->eventfd);
  750. kfree(kirqfd);
  751. }
  752. static void irqfd_inject(struct privcmd_kernel_irqfd *kirqfd)
  753. {
  754. u64 cnt;
  755. long rc;
  756. eventfd_ctx_do_read(kirqfd->eventfd, &cnt);
  757. xen_preemptible_hcall_begin();
  758. rc = HYPERVISOR_dm_op(kirqfd->dom, 1, &kirqfd->xbufs);
  759. xen_preemptible_hcall_end();
  760. /* Don't repeat the error message for consecutive failures */
  761. if (rc && !kirqfd->error) {
  762. pr_err("Failed to configure irq for guest domain: %d\n",
  763. kirqfd->dom);
  764. }
  765. kirqfd->error = rc;
  766. }
  767. static int
  768. irqfd_wakeup(wait_queue_entry_t *wait, unsigned int mode, int sync, void *key)
  769. {
  770. struct privcmd_kernel_irqfd *kirqfd =
  771. container_of(wait, struct privcmd_kernel_irqfd, wait);
  772. __poll_t flags = key_to_poll(key);
  773. if (flags & EPOLLIN)
  774. irqfd_inject(kirqfd);
  775. if (flags & EPOLLHUP) {
  776. unsigned long flags;
  777. spin_lock_irqsave(&irqfds_lock, flags);
  778. irqfd_deactivate(kirqfd);
  779. spin_unlock_irqrestore(&irqfds_lock, flags);
  780. }
  781. return 0;
  782. }
  783. static void
  784. irqfd_poll_func(struct file *file, wait_queue_head_t *wqh, poll_table *pt)
  785. {
  786. struct privcmd_kernel_irqfd *kirqfd =
  787. container_of(pt, struct privcmd_kernel_irqfd, pt);
  788. add_wait_queue_priority(wqh, &kirqfd->wait);
  789. }
  790. static int privcmd_irqfd_assign(struct privcmd_irqfd *irqfd)
  791. {
  792. struct privcmd_kernel_irqfd *kirqfd, *tmp;
  793. unsigned long flags;
  794. __poll_t events;
  795. struct fd f;
  796. void *dm_op;
  797. int ret, idx;
  798. kirqfd = kzalloc(sizeof(*kirqfd) + irqfd->size, GFP_KERNEL);
  799. if (!kirqfd)
  800. return -ENOMEM;
  801. dm_op = kirqfd + 1;
  802. if (copy_from_user(dm_op, u64_to_user_ptr(irqfd->dm_op), irqfd->size)) {
  803. ret = -EFAULT;
  804. goto error_kfree;
  805. }
  806. kirqfd->xbufs.size = irqfd->size;
  807. set_xen_guest_handle(kirqfd->xbufs.h, dm_op);
  808. kirqfd->dom = irqfd->dom;
  809. INIT_WORK(&kirqfd->shutdown, irqfd_shutdown);
  810. f = fdget(irqfd->fd);
  811. if (!fd_file(f)) {
  812. ret = -EBADF;
  813. goto error_kfree;
  814. }
  815. kirqfd->eventfd = eventfd_ctx_fileget(fd_file(f));
  816. if (IS_ERR(kirqfd->eventfd)) {
  817. ret = PTR_ERR(kirqfd->eventfd);
  818. goto error_fd_put;
  819. }
  820. /*
  821. * Install our own custom wake-up handling so we are notified via a
  822. * callback whenever someone signals the underlying eventfd.
  823. */
  824. init_waitqueue_func_entry(&kirqfd->wait, irqfd_wakeup);
  825. init_poll_funcptr(&kirqfd->pt, irqfd_poll_func);
  826. spin_lock_irqsave(&irqfds_lock, flags);
  827. list_for_each_entry(tmp, &irqfds_list, list) {
  828. if (kirqfd->eventfd == tmp->eventfd) {
  829. ret = -EBUSY;
  830. spin_unlock_irqrestore(&irqfds_lock, flags);
  831. goto error_eventfd;
  832. }
  833. }
  834. idx = srcu_read_lock(&irqfds_srcu);
  835. list_add_tail(&kirqfd->list, &irqfds_list);
  836. spin_unlock_irqrestore(&irqfds_lock, flags);
  837. /*
  838. * Check if there was an event already pending on the eventfd before we
  839. * registered, and trigger it as if we didn't miss it.
  840. */
  841. events = vfs_poll(fd_file(f), &kirqfd->pt);
  842. if (events & EPOLLIN)
  843. irqfd_inject(kirqfd);
  844. srcu_read_unlock(&irqfds_srcu, idx);
  845. /*
  846. * Do not drop the file until the kirqfd is fully initialized, otherwise
  847. * we might race against the EPOLLHUP.
  848. */
  849. fdput(f);
  850. return 0;
  851. error_eventfd:
  852. eventfd_ctx_put(kirqfd->eventfd);
  853. error_fd_put:
  854. fdput(f);
  855. error_kfree:
  856. kfree(kirqfd);
  857. return ret;
  858. }
  859. static int privcmd_irqfd_deassign(struct privcmd_irqfd *irqfd)
  860. {
  861. struct privcmd_kernel_irqfd *kirqfd;
  862. struct eventfd_ctx *eventfd;
  863. unsigned long flags;
  864. eventfd = eventfd_ctx_fdget(irqfd->fd);
  865. if (IS_ERR(eventfd))
  866. return PTR_ERR(eventfd);
  867. spin_lock_irqsave(&irqfds_lock, flags);
  868. list_for_each_entry(kirqfd, &irqfds_list, list) {
  869. if (kirqfd->eventfd == eventfd) {
  870. irqfd_deactivate(kirqfd);
  871. break;
  872. }
  873. }
  874. spin_unlock_irqrestore(&irqfds_lock, flags);
  875. eventfd_ctx_put(eventfd);
  876. /*
  877. * Block until we know all outstanding shutdown jobs have completed so
  878. * that we guarantee there will not be any more interrupts once this
  879. * deassign function returns.
  880. */
  881. flush_workqueue(irqfd_cleanup_wq);
  882. return 0;
  883. }
  884. static long privcmd_ioctl_irqfd(struct file *file, void __user *udata)
  885. {
  886. struct privcmd_data *data = file->private_data;
  887. struct privcmd_irqfd irqfd;
  888. if (copy_from_user(&irqfd, udata, sizeof(irqfd)))
  889. return -EFAULT;
  890. /* No other flags should be set */
  891. if (irqfd.flags & ~PRIVCMD_IRQFD_FLAG_DEASSIGN)
  892. return -EINVAL;
  893. /* If restriction is in place, check the domid matches */
  894. if (data->domid != DOMID_INVALID && data->domid != irqfd.dom)
  895. return -EPERM;
  896. if (irqfd.flags & PRIVCMD_IRQFD_FLAG_DEASSIGN)
  897. return privcmd_irqfd_deassign(&irqfd);
  898. return privcmd_irqfd_assign(&irqfd);
  899. }
  900. static int privcmd_irqfd_init(void)
  901. {
  902. irqfd_cleanup_wq = alloc_workqueue("privcmd-irqfd-cleanup", 0, 0);
  903. if (!irqfd_cleanup_wq)
  904. return -ENOMEM;
  905. return 0;
  906. }
  907. static void privcmd_irqfd_exit(void)
  908. {
  909. struct privcmd_kernel_irqfd *kirqfd, *tmp;
  910. unsigned long flags;
  911. spin_lock_irqsave(&irqfds_lock, flags);
  912. list_for_each_entry_safe(kirqfd, tmp, &irqfds_list, list)
  913. irqfd_deactivate(kirqfd);
  914. spin_unlock_irqrestore(&irqfds_lock, flags);
  915. destroy_workqueue(irqfd_cleanup_wq);
  916. }
  917. /* Ioeventfd Support */
  918. #define QUEUE_NOTIFY_VQ_MASK 0xFFFF
  919. static DEFINE_MUTEX(ioreq_lock);
  920. static LIST_HEAD(ioreq_list);
  921. /* per-eventfd structure */
  922. struct privcmd_kernel_ioeventfd {
  923. struct eventfd_ctx *eventfd;
  924. struct list_head list;
  925. u64 addr;
  926. unsigned int addr_len;
  927. unsigned int vq;
  928. };
  929. /* per-guest CPU / port structure */
  930. struct ioreq_port {
  931. int vcpu;
  932. unsigned int port;
  933. struct privcmd_kernel_ioreq *kioreq;
  934. };
  935. /* per-guest structure */
  936. struct privcmd_kernel_ioreq {
  937. domid_t dom;
  938. unsigned int vcpus;
  939. u64 uioreq;
  940. struct ioreq *ioreq;
  941. spinlock_t lock; /* Protects ioeventfds list */
  942. struct list_head ioeventfds;
  943. struct list_head list;
  944. struct ioreq_port ports[] __counted_by(vcpus);
  945. };
  946. static irqreturn_t ioeventfd_interrupt(int irq, void *dev_id)
  947. {
  948. struct ioreq_port *port = dev_id;
  949. struct privcmd_kernel_ioreq *kioreq = port->kioreq;
  950. struct ioreq *ioreq = &kioreq->ioreq[port->vcpu];
  951. struct privcmd_kernel_ioeventfd *kioeventfd;
  952. unsigned int state = STATE_IOREQ_READY;
  953. if (ioreq->state != STATE_IOREQ_READY ||
  954. ioreq->type != IOREQ_TYPE_COPY || ioreq->dir != IOREQ_WRITE)
  955. return IRQ_NONE;
  956. /*
  957. * We need a barrier, smp_mb(), here to ensure reads are finished before
  958. * `state` is updated. Since the lock implementation ensures that
  959. * appropriate barrier will be added anyway, we can avoid adding
  960. * explicit barrier here.
  961. *
  962. * Ideally we don't need to update `state` within the locks, but we do
  963. * that here to avoid adding explicit barrier.
  964. */
  965. spin_lock(&kioreq->lock);
  966. ioreq->state = STATE_IOREQ_INPROCESS;
  967. list_for_each_entry(kioeventfd, &kioreq->ioeventfds, list) {
  968. if (ioreq->addr == kioeventfd->addr + VIRTIO_MMIO_QUEUE_NOTIFY &&
  969. ioreq->size == kioeventfd->addr_len &&
  970. (ioreq->data & QUEUE_NOTIFY_VQ_MASK) == kioeventfd->vq) {
  971. eventfd_signal(kioeventfd->eventfd);
  972. state = STATE_IORESP_READY;
  973. break;
  974. }
  975. }
  976. spin_unlock(&kioreq->lock);
  977. /*
  978. * We need a barrier, smp_mb(), here to ensure writes are finished
  979. * before `state` is updated. Since the lock implementation ensures that
  980. * appropriate barrier will be added anyway, we can avoid adding
  981. * explicit barrier here.
  982. */
  983. ioreq->state = state;
  984. if (state == STATE_IORESP_READY) {
  985. notify_remote_via_evtchn(port->port);
  986. return IRQ_HANDLED;
  987. }
  988. return IRQ_NONE;
  989. }
  990. static void ioreq_free(struct privcmd_kernel_ioreq *kioreq)
  991. {
  992. struct ioreq_port *ports = kioreq->ports;
  993. int i;
  994. lockdep_assert_held(&ioreq_lock);
  995. list_del(&kioreq->list);
  996. for (i = kioreq->vcpus - 1; i >= 0; i--)
  997. unbind_from_irqhandler(irq_from_evtchn(ports[i].port), &ports[i]);
  998. kfree(kioreq);
  999. }
  1000. static
  1001. struct privcmd_kernel_ioreq *alloc_ioreq(struct privcmd_ioeventfd *ioeventfd)
  1002. {
  1003. struct privcmd_kernel_ioreq *kioreq;
  1004. struct mm_struct *mm = current->mm;
  1005. struct vm_area_struct *vma;
  1006. struct page **pages;
  1007. unsigned int *ports;
  1008. int ret, size, i;
  1009. lockdep_assert_held(&ioreq_lock);
  1010. size = struct_size(kioreq, ports, ioeventfd->vcpus);
  1011. kioreq = kzalloc(size, GFP_KERNEL);
  1012. if (!kioreq)
  1013. return ERR_PTR(-ENOMEM);
  1014. kioreq->dom = ioeventfd->dom;
  1015. kioreq->vcpus = ioeventfd->vcpus;
  1016. kioreq->uioreq = ioeventfd->ioreq;
  1017. spin_lock_init(&kioreq->lock);
  1018. INIT_LIST_HEAD(&kioreq->ioeventfds);
  1019. /* The memory for ioreq server must have been mapped earlier */
  1020. mmap_write_lock(mm);
  1021. vma = find_vma(mm, (unsigned long)ioeventfd->ioreq);
  1022. if (!vma) {
  1023. pr_err("Failed to find vma for ioreq page!\n");
  1024. mmap_write_unlock(mm);
  1025. ret = -EFAULT;
  1026. goto error_kfree;
  1027. }
  1028. pages = vma->vm_private_data;
  1029. kioreq->ioreq = (struct ioreq *)(page_to_virt(pages[0]));
  1030. mmap_write_unlock(mm);
  1031. ports = memdup_array_user(u64_to_user_ptr(ioeventfd->ports),
  1032. kioreq->vcpus, sizeof(*ports));
  1033. if (IS_ERR(ports)) {
  1034. ret = PTR_ERR(ports);
  1035. goto error_kfree;
  1036. }
  1037. for (i = 0; i < kioreq->vcpus; i++) {
  1038. kioreq->ports[i].vcpu = i;
  1039. kioreq->ports[i].port = ports[i];
  1040. kioreq->ports[i].kioreq = kioreq;
  1041. ret = bind_evtchn_to_irqhandler_lateeoi(ports[i],
  1042. ioeventfd_interrupt, IRQF_SHARED, "ioeventfd",
  1043. &kioreq->ports[i]);
  1044. if (ret < 0)
  1045. goto error_unbind;
  1046. }
  1047. kfree(ports);
  1048. list_add_tail(&kioreq->list, &ioreq_list);
  1049. return kioreq;
  1050. error_unbind:
  1051. while (--i >= 0)
  1052. unbind_from_irqhandler(irq_from_evtchn(ports[i]), &kioreq->ports[i]);
  1053. kfree(ports);
  1054. error_kfree:
  1055. kfree(kioreq);
  1056. return ERR_PTR(ret);
  1057. }
  1058. static struct privcmd_kernel_ioreq *
  1059. get_ioreq(struct privcmd_ioeventfd *ioeventfd, struct eventfd_ctx *eventfd)
  1060. {
  1061. struct privcmd_kernel_ioreq *kioreq;
  1062. unsigned long flags;
  1063. list_for_each_entry(kioreq, &ioreq_list, list) {
  1064. struct privcmd_kernel_ioeventfd *kioeventfd;
  1065. /*
  1066. * kioreq fields can be accessed here without a lock as they are
  1067. * never updated after being added to the ioreq_list.
  1068. */
  1069. if (kioreq->uioreq != ioeventfd->ioreq) {
  1070. continue;
  1071. } else if (kioreq->dom != ioeventfd->dom ||
  1072. kioreq->vcpus != ioeventfd->vcpus) {
  1073. pr_err("Invalid ioeventfd configuration mismatch, dom (%u vs %u), vcpus (%u vs %u)\n",
  1074. kioreq->dom, ioeventfd->dom, kioreq->vcpus,
  1075. ioeventfd->vcpus);
  1076. return ERR_PTR(-EINVAL);
  1077. }
  1078. /* Look for a duplicate eventfd for the same guest */
  1079. spin_lock_irqsave(&kioreq->lock, flags);
  1080. list_for_each_entry(kioeventfd, &kioreq->ioeventfds, list) {
  1081. if (eventfd == kioeventfd->eventfd) {
  1082. spin_unlock_irqrestore(&kioreq->lock, flags);
  1083. return ERR_PTR(-EBUSY);
  1084. }
  1085. }
  1086. spin_unlock_irqrestore(&kioreq->lock, flags);
  1087. return kioreq;
  1088. }
  1089. /* Matching kioreq isn't found, allocate a new one */
  1090. return alloc_ioreq(ioeventfd);
  1091. }
  1092. static void ioeventfd_free(struct privcmd_kernel_ioeventfd *kioeventfd)
  1093. {
  1094. list_del(&kioeventfd->list);
  1095. eventfd_ctx_put(kioeventfd->eventfd);
  1096. kfree(kioeventfd);
  1097. }
  1098. static int privcmd_ioeventfd_assign(struct privcmd_ioeventfd *ioeventfd)
  1099. {
  1100. struct privcmd_kernel_ioeventfd *kioeventfd;
  1101. struct privcmd_kernel_ioreq *kioreq;
  1102. unsigned long flags;
  1103. struct fd f;
  1104. int ret;
  1105. /* Check for range overflow */
  1106. if (ioeventfd->addr + ioeventfd->addr_len < ioeventfd->addr)
  1107. return -EINVAL;
  1108. /* Vhost requires us to support length 1, 2, 4, and 8 */
  1109. if (!(ioeventfd->addr_len == 1 || ioeventfd->addr_len == 2 ||
  1110. ioeventfd->addr_len == 4 || ioeventfd->addr_len == 8))
  1111. return -EINVAL;
  1112. /* 4096 vcpus limit enough ? */
  1113. if (!ioeventfd->vcpus || ioeventfd->vcpus > 4096)
  1114. return -EINVAL;
  1115. kioeventfd = kzalloc(sizeof(*kioeventfd), GFP_KERNEL);
  1116. if (!kioeventfd)
  1117. return -ENOMEM;
  1118. f = fdget(ioeventfd->event_fd);
  1119. if (!fd_file(f)) {
  1120. ret = -EBADF;
  1121. goto error_kfree;
  1122. }
  1123. kioeventfd->eventfd = eventfd_ctx_fileget(fd_file(f));
  1124. fdput(f);
  1125. if (IS_ERR(kioeventfd->eventfd)) {
  1126. ret = PTR_ERR(kioeventfd->eventfd);
  1127. goto error_kfree;
  1128. }
  1129. kioeventfd->addr = ioeventfd->addr;
  1130. kioeventfd->addr_len = ioeventfd->addr_len;
  1131. kioeventfd->vq = ioeventfd->vq;
  1132. mutex_lock(&ioreq_lock);
  1133. kioreq = get_ioreq(ioeventfd, kioeventfd->eventfd);
  1134. if (IS_ERR(kioreq)) {
  1135. mutex_unlock(&ioreq_lock);
  1136. ret = PTR_ERR(kioreq);
  1137. goto error_eventfd;
  1138. }
  1139. spin_lock_irqsave(&kioreq->lock, flags);
  1140. list_add_tail(&kioeventfd->list, &kioreq->ioeventfds);
  1141. spin_unlock_irqrestore(&kioreq->lock, flags);
  1142. mutex_unlock(&ioreq_lock);
  1143. return 0;
  1144. error_eventfd:
  1145. eventfd_ctx_put(kioeventfd->eventfd);
  1146. error_kfree:
  1147. kfree(kioeventfd);
  1148. return ret;
  1149. }
  1150. static int privcmd_ioeventfd_deassign(struct privcmd_ioeventfd *ioeventfd)
  1151. {
  1152. struct privcmd_kernel_ioreq *kioreq, *tkioreq;
  1153. struct eventfd_ctx *eventfd;
  1154. unsigned long flags;
  1155. int ret = 0;
  1156. eventfd = eventfd_ctx_fdget(ioeventfd->event_fd);
  1157. if (IS_ERR(eventfd))
  1158. return PTR_ERR(eventfd);
  1159. mutex_lock(&ioreq_lock);
  1160. list_for_each_entry_safe(kioreq, tkioreq, &ioreq_list, list) {
  1161. struct privcmd_kernel_ioeventfd *kioeventfd, *tmp;
  1162. /*
  1163. * kioreq fields can be accessed here without a lock as they are
  1164. * never updated after being added to the ioreq_list.
  1165. */
  1166. if (kioreq->dom != ioeventfd->dom ||
  1167. kioreq->uioreq != ioeventfd->ioreq ||
  1168. kioreq->vcpus != ioeventfd->vcpus)
  1169. continue;
  1170. spin_lock_irqsave(&kioreq->lock, flags);
  1171. list_for_each_entry_safe(kioeventfd, tmp, &kioreq->ioeventfds, list) {
  1172. if (eventfd == kioeventfd->eventfd) {
  1173. ioeventfd_free(kioeventfd);
  1174. spin_unlock_irqrestore(&kioreq->lock, flags);
  1175. if (list_empty(&kioreq->ioeventfds))
  1176. ioreq_free(kioreq);
  1177. goto unlock;
  1178. }
  1179. }
  1180. spin_unlock_irqrestore(&kioreq->lock, flags);
  1181. break;
  1182. }
  1183. pr_err("Ioeventfd isn't already assigned, dom: %u, addr: %llu\n",
  1184. ioeventfd->dom, ioeventfd->addr);
  1185. ret = -ENODEV;
  1186. unlock:
  1187. mutex_unlock(&ioreq_lock);
  1188. eventfd_ctx_put(eventfd);
  1189. return ret;
  1190. }
  1191. static long privcmd_ioctl_ioeventfd(struct file *file, void __user *udata)
  1192. {
  1193. struct privcmd_data *data = file->private_data;
  1194. struct privcmd_ioeventfd ioeventfd;
  1195. if (copy_from_user(&ioeventfd, udata, sizeof(ioeventfd)))
  1196. return -EFAULT;
  1197. /* No other flags should be set */
  1198. if (ioeventfd.flags & ~PRIVCMD_IOEVENTFD_FLAG_DEASSIGN)
  1199. return -EINVAL;
  1200. /* If restriction is in place, check the domid matches */
  1201. if (data->domid != DOMID_INVALID && data->domid != ioeventfd.dom)
  1202. return -EPERM;
  1203. if (ioeventfd.flags & PRIVCMD_IOEVENTFD_FLAG_DEASSIGN)
  1204. return privcmd_ioeventfd_deassign(&ioeventfd);
  1205. return privcmd_ioeventfd_assign(&ioeventfd);
  1206. }
  1207. static void privcmd_ioeventfd_exit(void)
  1208. {
  1209. struct privcmd_kernel_ioreq *kioreq, *tmp;
  1210. unsigned long flags;
  1211. mutex_lock(&ioreq_lock);
  1212. list_for_each_entry_safe(kioreq, tmp, &ioreq_list, list) {
  1213. struct privcmd_kernel_ioeventfd *kioeventfd, *tmp;
  1214. spin_lock_irqsave(&kioreq->lock, flags);
  1215. list_for_each_entry_safe(kioeventfd, tmp, &kioreq->ioeventfds, list)
  1216. ioeventfd_free(kioeventfd);
  1217. spin_unlock_irqrestore(&kioreq->lock, flags);
  1218. ioreq_free(kioreq);
  1219. }
  1220. mutex_unlock(&ioreq_lock);
  1221. }
  1222. #else
  1223. static inline long privcmd_ioctl_irqfd(struct file *file, void __user *udata)
  1224. {
  1225. return -EOPNOTSUPP;
  1226. }
  1227. static inline int privcmd_irqfd_init(void)
  1228. {
  1229. return 0;
  1230. }
  1231. static inline void privcmd_irqfd_exit(void)
  1232. {
  1233. }
  1234. static inline long privcmd_ioctl_ioeventfd(struct file *file, void __user *udata)
  1235. {
  1236. return -EOPNOTSUPP;
  1237. }
  1238. static inline void privcmd_ioeventfd_exit(void)
  1239. {
  1240. }
  1241. #endif /* CONFIG_XEN_PRIVCMD_EVENTFD */
  1242. static long privcmd_ioctl(struct file *file,
  1243. unsigned int cmd, unsigned long data)
  1244. {
  1245. int ret = -ENOTTY;
  1246. void __user *udata = (void __user *) data;
  1247. switch (cmd) {
  1248. case IOCTL_PRIVCMD_HYPERCALL:
  1249. ret = privcmd_ioctl_hypercall(file, udata);
  1250. break;
  1251. case IOCTL_PRIVCMD_MMAP:
  1252. ret = privcmd_ioctl_mmap(file, udata);
  1253. break;
  1254. case IOCTL_PRIVCMD_MMAPBATCH:
  1255. ret = privcmd_ioctl_mmap_batch(file, udata, 1);
  1256. break;
  1257. case IOCTL_PRIVCMD_MMAPBATCH_V2:
  1258. ret = privcmd_ioctl_mmap_batch(file, udata, 2);
  1259. break;
  1260. case IOCTL_PRIVCMD_DM_OP:
  1261. ret = privcmd_ioctl_dm_op(file, udata);
  1262. break;
  1263. case IOCTL_PRIVCMD_RESTRICT:
  1264. ret = privcmd_ioctl_restrict(file, udata);
  1265. break;
  1266. case IOCTL_PRIVCMD_MMAP_RESOURCE:
  1267. ret = privcmd_ioctl_mmap_resource(file, udata);
  1268. break;
  1269. case IOCTL_PRIVCMD_IRQFD:
  1270. ret = privcmd_ioctl_irqfd(file, udata);
  1271. break;
  1272. case IOCTL_PRIVCMD_IOEVENTFD:
  1273. ret = privcmd_ioctl_ioeventfd(file, udata);
  1274. break;
  1275. case IOCTL_PRIVCMD_PCIDEV_GET_GSI:
  1276. ret = privcmd_ioctl_pcidev_get_gsi(file, udata);
  1277. break;
  1278. default:
  1279. break;
  1280. }
  1281. return ret;
  1282. }
  1283. static int privcmd_open(struct inode *ino, struct file *file)
  1284. {
  1285. struct privcmd_data *data = kzalloc(sizeof(*data), GFP_KERNEL);
  1286. if (!data)
  1287. return -ENOMEM;
  1288. /* DOMID_INVALID implies no restriction */
  1289. data->domid = DOMID_INVALID;
  1290. file->private_data = data;
  1291. return 0;
  1292. }
  1293. static int privcmd_release(struct inode *ino, struct file *file)
  1294. {
  1295. struct privcmd_data *data = file->private_data;
  1296. kfree(data);
  1297. return 0;
  1298. }
  1299. static void privcmd_close(struct vm_area_struct *vma)
  1300. {
  1301. struct page **pages = vma->vm_private_data;
  1302. int numpgs = vma_pages(vma);
  1303. int numgfns = (vma->vm_end - vma->vm_start) >> XEN_PAGE_SHIFT;
  1304. int rc;
  1305. if (!xen_feature(XENFEAT_auto_translated_physmap) || !numpgs || !pages)
  1306. return;
  1307. rc = xen_unmap_domain_gfn_range(vma, numgfns, pages);
  1308. if (rc == 0)
  1309. xen_free_unpopulated_pages(numpgs, pages);
  1310. else
  1311. pr_crit("unable to unmap MFN range: leaking %d pages. rc=%d\n",
  1312. numpgs, rc);
  1313. kvfree(pages);
  1314. }
  1315. static vm_fault_t privcmd_fault(struct vm_fault *vmf)
  1316. {
  1317. printk(KERN_DEBUG "privcmd_fault: vma=%p %lx-%lx, pgoff=%lx, uv=%p\n",
  1318. vmf->vma, vmf->vma->vm_start, vmf->vma->vm_end,
  1319. vmf->pgoff, (void *)vmf->address);
  1320. return VM_FAULT_SIGBUS;
  1321. }
  1322. static const struct vm_operations_struct privcmd_vm_ops = {
  1323. .close = privcmd_close,
  1324. .fault = privcmd_fault
  1325. };
  1326. static int privcmd_mmap(struct file *file, struct vm_area_struct *vma)
  1327. {
  1328. /* DONTCOPY is essential for Xen because copy_page_range doesn't know
  1329. * how to recreate these mappings */
  1330. vm_flags_set(vma, VM_IO | VM_PFNMAP | VM_DONTCOPY |
  1331. VM_DONTEXPAND | VM_DONTDUMP);
  1332. vma->vm_ops = &privcmd_vm_ops;
  1333. vma->vm_private_data = NULL;
  1334. return 0;
  1335. }
  1336. /*
  1337. * For MMAPBATCH*. This allows asserting the singleshot mapping
  1338. * on a per pfn/pte basis. Mapping calls that fail with ENOENT
  1339. * can be then retried until success.
  1340. */
  1341. static int is_mapped_fn(pte_t *pte, unsigned long addr, void *data)
  1342. {
  1343. return pte_none(ptep_get(pte)) ? 0 : -EBUSY;
  1344. }
  1345. static int privcmd_vma_range_is_mapped(
  1346. struct vm_area_struct *vma,
  1347. unsigned long addr,
  1348. unsigned long nr_pages)
  1349. {
  1350. return apply_to_page_range(vma->vm_mm, addr, nr_pages << PAGE_SHIFT,
  1351. is_mapped_fn, NULL) != 0;
  1352. }
  1353. const struct file_operations xen_privcmd_fops = {
  1354. .owner = THIS_MODULE,
  1355. .unlocked_ioctl = privcmd_ioctl,
  1356. .open = privcmd_open,
  1357. .release = privcmd_release,
  1358. .mmap = privcmd_mmap,
  1359. };
  1360. EXPORT_SYMBOL_GPL(xen_privcmd_fops);
  1361. static struct miscdevice privcmd_dev = {
  1362. .minor = MISC_DYNAMIC_MINOR,
  1363. .name = "xen/privcmd",
  1364. .fops = &xen_privcmd_fops,
  1365. };
  1366. static int __init privcmd_init(void)
  1367. {
  1368. int err;
  1369. if (!xen_domain())
  1370. return -ENODEV;
  1371. err = misc_register(&privcmd_dev);
  1372. if (err != 0) {
  1373. pr_err("Could not register Xen privcmd device\n");
  1374. return err;
  1375. }
  1376. err = misc_register(&xen_privcmdbuf_dev);
  1377. if (err != 0) {
  1378. pr_err("Could not register Xen hypercall-buf device\n");
  1379. goto err_privcmdbuf;
  1380. }
  1381. err = privcmd_irqfd_init();
  1382. if (err != 0) {
  1383. pr_err("irqfd init failed\n");
  1384. goto err_irqfd;
  1385. }
  1386. return 0;
  1387. err_irqfd:
  1388. misc_deregister(&xen_privcmdbuf_dev);
  1389. err_privcmdbuf:
  1390. misc_deregister(&privcmd_dev);
  1391. return err;
  1392. }
  1393. static void __exit privcmd_exit(void)
  1394. {
  1395. privcmd_ioeventfd_exit();
  1396. privcmd_irqfd_exit();
  1397. misc_deregister(&privcmd_dev);
  1398. misc_deregister(&xen_privcmdbuf_dev);
  1399. }
  1400. module_init(privcmd_init);
  1401. module_exit(privcmd_exit);