selftest.c 40 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580
  1. // SPDX-License-Identifier: GPL-2.0
  2. /* Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES.
  3. *
  4. * Kernel side components to support tools/testing/selftests/iommu
  5. */
  6. #include <linux/anon_inodes.h>
  7. #include <linux/debugfs.h>
  8. #include <linux/fault-inject.h>
  9. #include <linux/file.h>
  10. #include <linux/iommu.h>
  11. #include <linux/platform_device.h>
  12. #include <linux/slab.h>
  13. #include <linux/xarray.h>
  14. #include <uapi/linux/iommufd.h>
  15. #include "../iommu-priv.h"
  16. #include "io_pagetable.h"
  17. #include "iommufd_private.h"
  18. #include "iommufd_test.h"
  19. static DECLARE_FAULT_ATTR(fail_iommufd);
  20. static struct dentry *dbgfs_root;
  21. static struct platform_device *selftest_iommu_dev;
  22. static const struct iommu_ops mock_ops;
  23. static struct iommu_domain_ops domain_nested_ops;
  24. size_t iommufd_test_memory_limit = 65536;
  25. struct mock_bus_type {
  26. struct bus_type bus;
  27. struct notifier_block nb;
  28. };
  29. static struct mock_bus_type iommufd_mock_bus_type = {
  30. .bus = {
  31. .name = "iommufd_mock",
  32. },
  33. };
  34. static DEFINE_IDA(mock_dev_ida);
  35. enum {
  36. MOCK_DIRTY_TRACK = 1,
  37. MOCK_IO_PAGE_SIZE = PAGE_SIZE / 2,
  38. MOCK_HUGE_PAGE_SIZE = 512 * MOCK_IO_PAGE_SIZE,
  39. /*
  40. * Like a real page table alignment requires the low bits of the address
  41. * to be zero. xarray also requires the high bit to be zero, so we store
  42. * the pfns shifted. The upper bits are used for metadata.
  43. */
  44. MOCK_PFN_MASK = ULONG_MAX / MOCK_IO_PAGE_SIZE,
  45. _MOCK_PFN_START = MOCK_PFN_MASK + 1,
  46. MOCK_PFN_START_IOVA = _MOCK_PFN_START,
  47. MOCK_PFN_LAST_IOVA = _MOCK_PFN_START,
  48. MOCK_PFN_DIRTY_IOVA = _MOCK_PFN_START << 1,
  49. MOCK_PFN_HUGE_IOVA = _MOCK_PFN_START << 2,
  50. };
  51. /*
  52. * Syzkaller has trouble randomizing the correct iova to use since it is linked
  53. * to the map ioctl's output, and it has no ide about that. So, simplify things.
  54. * In syzkaller mode the 64 bit IOVA is converted into an nth area and offset
  55. * value. This has a much smaller randomization space and syzkaller can hit it.
  56. */
  57. static unsigned long __iommufd_test_syz_conv_iova(struct io_pagetable *iopt,
  58. u64 *iova)
  59. {
  60. struct syz_layout {
  61. __u32 nth_area;
  62. __u32 offset;
  63. };
  64. struct syz_layout *syz = (void *)iova;
  65. unsigned int nth = syz->nth_area;
  66. struct iopt_area *area;
  67. down_read(&iopt->iova_rwsem);
  68. for (area = iopt_area_iter_first(iopt, 0, ULONG_MAX); area;
  69. area = iopt_area_iter_next(area, 0, ULONG_MAX)) {
  70. if (nth == 0) {
  71. up_read(&iopt->iova_rwsem);
  72. return iopt_area_iova(area) + syz->offset;
  73. }
  74. nth--;
  75. }
  76. up_read(&iopt->iova_rwsem);
  77. return 0;
  78. }
  79. static unsigned long iommufd_test_syz_conv_iova(struct iommufd_access *access,
  80. u64 *iova)
  81. {
  82. unsigned long ret;
  83. mutex_lock(&access->ioas_lock);
  84. if (!access->ioas) {
  85. mutex_unlock(&access->ioas_lock);
  86. return 0;
  87. }
  88. ret = __iommufd_test_syz_conv_iova(&access->ioas->iopt, iova);
  89. mutex_unlock(&access->ioas_lock);
  90. return ret;
  91. }
  92. void iommufd_test_syz_conv_iova_id(struct iommufd_ucmd *ucmd,
  93. unsigned int ioas_id, u64 *iova, u32 *flags)
  94. {
  95. struct iommufd_ioas *ioas;
  96. if (!(*flags & MOCK_FLAGS_ACCESS_SYZ))
  97. return;
  98. *flags &= ~(u32)MOCK_FLAGS_ACCESS_SYZ;
  99. ioas = iommufd_get_ioas(ucmd->ictx, ioas_id);
  100. if (IS_ERR(ioas))
  101. return;
  102. *iova = __iommufd_test_syz_conv_iova(&ioas->iopt, iova);
  103. iommufd_put_object(ucmd->ictx, &ioas->obj);
  104. }
  105. struct mock_iommu_domain {
  106. unsigned long flags;
  107. struct iommu_domain domain;
  108. struct xarray pfns;
  109. };
  110. struct mock_iommu_domain_nested {
  111. struct iommu_domain domain;
  112. struct mock_iommu_domain *parent;
  113. u32 iotlb[MOCK_NESTED_DOMAIN_IOTLB_NUM];
  114. };
  115. enum selftest_obj_type {
  116. TYPE_IDEV,
  117. };
  118. struct mock_dev {
  119. struct device dev;
  120. unsigned long flags;
  121. int id;
  122. };
  123. struct selftest_obj {
  124. struct iommufd_object obj;
  125. enum selftest_obj_type type;
  126. union {
  127. struct {
  128. struct iommufd_device *idev;
  129. struct iommufd_ctx *ictx;
  130. struct mock_dev *mock_dev;
  131. } idev;
  132. };
  133. };
  134. static int mock_domain_nop_attach(struct iommu_domain *domain,
  135. struct device *dev)
  136. {
  137. struct mock_dev *mdev = container_of(dev, struct mock_dev, dev);
  138. if (domain->dirty_ops && (mdev->flags & MOCK_FLAGS_DEVICE_NO_DIRTY))
  139. return -EINVAL;
  140. return 0;
  141. }
  142. static const struct iommu_domain_ops mock_blocking_ops = {
  143. .attach_dev = mock_domain_nop_attach,
  144. };
  145. static struct iommu_domain mock_blocking_domain = {
  146. .type = IOMMU_DOMAIN_BLOCKED,
  147. .ops = &mock_blocking_ops,
  148. };
  149. static void *mock_domain_hw_info(struct device *dev, u32 *length, u32 *type)
  150. {
  151. struct iommu_test_hw_info *info;
  152. info = kzalloc(sizeof(*info), GFP_KERNEL);
  153. if (!info)
  154. return ERR_PTR(-ENOMEM);
  155. info->test_reg = IOMMU_HW_INFO_SELFTEST_REGVAL;
  156. *length = sizeof(*info);
  157. *type = IOMMU_HW_INFO_TYPE_SELFTEST;
  158. return info;
  159. }
  160. static int mock_domain_set_dirty_tracking(struct iommu_domain *domain,
  161. bool enable)
  162. {
  163. struct mock_iommu_domain *mock =
  164. container_of(domain, struct mock_iommu_domain, domain);
  165. unsigned long flags = mock->flags;
  166. if (enable && !domain->dirty_ops)
  167. return -EINVAL;
  168. /* No change? */
  169. if (!(enable ^ !!(flags & MOCK_DIRTY_TRACK)))
  170. return 0;
  171. flags = (enable ? flags | MOCK_DIRTY_TRACK : flags & ~MOCK_DIRTY_TRACK);
  172. mock->flags = flags;
  173. return 0;
  174. }
  175. static bool mock_test_and_clear_dirty(struct mock_iommu_domain *mock,
  176. unsigned long iova, size_t page_size,
  177. unsigned long flags)
  178. {
  179. unsigned long cur, end = iova + page_size - 1;
  180. bool dirty = false;
  181. void *ent, *old;
  182. for (cur = iova; cur < end; cur += MOCK_IO_PAGE_SIZE) {
  183. ent = xa_load(&mock->pfns, cur / MOCK_IO_PAGE_SIZE);
  184. if (!ent || !(xa_to_value(ent) & MOCK_PFN_DIRTY_IOVA))
  185. continue;
  186. dirty = true;
  187. /* Clear dirty */
  188. if (!(flags & IOMMU_DIRTY_NO_CLEAR)) {
  189. unsigned long val;
  190. val = xa_to_value(ent) & ~MOCK_PFN_DIRTY_IOVA;
  191. old = xa_store(&mock->pfns, cur / MOCK_IO_PAGE_SIZE,
  192. xa_mk_value(val), GFP_KERNEL);
  193. WARN_ON_ONCE(ent != old);
  194. }
  195. }
  196. return dirty;
  197. }
  198. static int mock_domain_read_and_clear_dirty(struct iommu_domain *domain,
  199. unsigned long iova, size_t size,
  200. unsigned long flags,
  201. struct iommu_dirty_bitmap *dirty)
  202. {
  203. struct mock_iommu_domain *mock =
  204. container_of(domain, struct mock_iommu_domain, domain);
  205. unsigned long end = iova + size;
  206. void *ent;
  207. if (!(mock->flags & MOCK_DIRTY_TRACK) && dirty->bitmap)
  208. return -EINVAL;
  209. do {
  210. unsigned long pgsize = MOCK_IO_PAGE_SIZE;
  211. unsigned long head;
  212. ent = xa_load(&mock->pfns, iova / MOCK_IO_PAGE_SIZE);
  213. if (!ent) {
  214. iova += pgsize;
  215. continue;
  216. }
  217. if (xa_to_value(ent) & MOCK_PFN_HUGE_IOVA)
  218. pgsize = MOCK_HUGE_PAGE_SIZE;
  219. head = iova & ~(pgsize - 1);
  220. /* Clear dirty */
  221. if (mock_test_and_clear_dirty(mock, head, pgsize, flags))
  222. iommu_dirty_bitmap_record(dirty, iova, pgsize);
  223. iova += pgsize;
  224. } while (iova < end);
  225. return 0;
  226. }
  227. static const struct iommu_dirty_ops dirty_ops = {
  228. .set_dirty_tracking = mock_domain_set_dirty_tracking,
  229. .read_and_clear_dirty = mock_domain_read_and_clear_dirty,
  230. };
  231. static struct iommu_domain *mock_domain_alloc_paging(struct device *dev)
  232. {
  233. struct mock_dev *mdev = container_of(dev, struct mock_dev, dev);
  234. struct mock_iommu_domain *mock;
  235. mock = kzalloc(sizeof(*mock), GFP_KERNEL);
  236. if (!mock)
  237. return NULL;
  238. mock->domain.geometry.aperture_start = MOCK_APERTURE_START;
  239. mock->domain.geometry.aperture_end = MOCK_APERTURE_LAST;
  240. mock->domain.pgsize_bitmap = MOCK_IO_PAGE_SIZE;
  241. if (dev && mdev->flags & MOCK_FLAGS_DEVICE_HUGE_IOVA)
  242. mock->domain.pgsize_bitmap |= MOCK_HUGE_PAGE_SIZE;
  243. mock->domain.ops = mock_ops.default_domain_ops;
  244. mock->domain.type = IOMMU_DOMAIN_UNMANAGED;
  245. xa_init(&mock->pfns);
  246. return &mock->domain;
  247. }
  248. static struct iommu_domain *
  249. __mock_domain_alloc_nested(struct mock_iommu_domain *mock_parent,
  250. const struct iommu_hwpt_selftest *user_cfg)
  251. {
  252. struct mock_iommu_domain_nested *mock_nested;
  253. int i;
  254. mock_nested = kzalloc(sizeof(*mock_nested), GFP_KERNEL);
  255. if (!mock_nested)
  256. return ERR_PTR(-ENOMEM);
  257. mock_nested->parent = mock_parent;
  258. mock_nested->domain.ops = &domain_nested_ops;
  259. mock_nested->domain.type = IOMMU_DOMAIN_NESTED;
  260. for (i = 0; i < MOCK_NESTED_DOMAIN_IOTLB_NUM; i++)
  261. mock_nested->iotlb[i] = user_cfg->iotlb;
  262. return &mock_nested->domain;
  263. }
  264. static struct iommu_domain *
  265. mock_domain_alloc_user(struct device *dev, u32 flags,
  266. struct iommu_domain *parent,
  267. const struct iommu_user_data *user_data)
  268. {
  269. struct mock_iommu_domain *mock_parent;
  270. struct iommu_hwpt_selftest user_cfg;
  271. int rc;
  272. /* must be mock_domain */
  273. if (!parent) {
  274. struct mock_dev *mdev = container_of(dev, struct mock_dev, dev);
  275. bool has_dirty_flag = flags & IOMMU_HWPT_ALLOC_DIRTY_TRACKING;
  276. bool no_dirty_ops = mdev->flags & MOCK_FLAGS_DEVICE_NO_DIRTY;
  277. struct iommu_domain *domain;
  278. if (flags & (~(IOMMU_HWPT_ALLOC_NEST_PARENT |
  279. IOMMU_HWPT_ALLOC_DIRTY_TRACKING)))
  280. return ERR_PTR(-EOPNOTSUPP);
  281. if (user_data || (has_dirty_flag && no_dirty_ops))
  282. return ERR_PTR(-EOPNOTSUPP);
  283. domain = mock_domain_alloc_paging(dev);
  284. if (!domain)
  285. return ERR_PTR(-ENOMEM);
  286. if (has_dirty_flag)
  287. container_of(domain, struct mock_iommu_domain, domain)
  288. ->domain.dirty_ops = &dirty_ops;
  289. return domain;
  290. }
  291. /* must be mock_domain_nested */
  292. if (user_data->type != IOMMU_HWPT_DATA_SELFTEST || flags)
  293. return ERR_PTR(-EOPNOTSUPP);
  294. if (!parent || parent->ops != mock_ops.default_domain_ops)
  295. return ERR_PTR(-EINVAL);
  296. mock_parent = container_of(parent, struct mock_iommu_domain, domain);
  297. if (!mock_parent)
  298. return ERR_PTR(-EINVAL);
  299. rc = iommu_copy_struct_from_user(&user_cfg, user_data,
  300. IOMMU_HWPT_DATA_SELFTEST, iotlb);
  301. if (rc)
  302. return ERR_PTR(rc);
  303. return __mock_domain_alloc_nested(mock_parent, &user_cfg);
  304. }
  305. static void mock_domain_free(struct iommu_domain *domain)
  306. {
  307. struct mock_iommu_domain *mock =
  308. container_of(domain, struct mock_iommu_domain, domain);
  309. WARN_ON(!xa_empty(&mock->pfns));
  310. kfree(mock);
  311. }
  312. static int mock_domain_map_pages(struct iommu_domain *domain,
  313. unsigned long iova, phys_addr_t paddr,
  314. size_t pgsize, size_t pgcount, int prot,
  315. gfp_t gfp, size_t *mapped)
  316. {
  317. struct mock_iommu_domain *mock =
  318. container_of(domain, struct mock_iommu_domain, domain);
  319. unsigned long flags = MOCK_PFN_START_IOVA;
  320. unsigned long start_iova = iova;
  321. /*
  322. * xarray does not reliably work with fault injection because it does a
  323. * retry allocation, so put our own failure point.
  324. */
  325. if (iommufd_should_fail())
  326. return -ENOENT;
  327. WARN_ON(iova % MOCK_IO_PAGE_SIZE);
  328. WARN_ON(pgsize % MOCK_IO_PAGE_SIZE);
  329. for (; pgcount; pgcount--) {
  330. size_t cur;
  331. for (cur = 0; cur != pgsize; cur += MOCK_IO_PAGE_SIZE) {
  332. void *old;
  333. if (pgcount == 1 && cur + MOCK_IO_PAGE_SIZE == pgsize)
  334. flags = MOCK_PFN_LAST_IOVA;
  335. if (pgsize != MOCK_IO_PAGE_SIZE) {
  336. flags |= MOCK_PFN_HUGE_IOVA;
  337. }
  338. old = xa_store(&mock->pfns, iova / MOCK_IO_PAGE_SIZE,
  339. xa_mk_value((paddr / MOCK_IO_PAGE_SIZE) |
  340. flags),
  341. gfp);
  342. if (xa_is_err(old)) {
  343. for (; start_iova != iova;
  344. start_iova += MOCK_IO_PAGE_SIZE)
  345. xa_erase(&mock->pfns,
  346. start_iova /
  347. MOCK_IO_PAGE_SIZE);
  348. return xa_err(old);
  349. }
  350. WARN_ON(old);
  351. iova += MOCK_IO_PAGE_SIZE;
  352. paddr += MOCK_IO_PAGE_SIZE;
  353. *mapped += MOCK_IO_PAGE_SIZE;
  354. flags = 0;
  355. }
  356. }
  357. return 0;
  358. }
  359. static size_t mock_domain_unmap_pages(struct iommu_domain *domain,
  360. unsigned long iova, size_t pgsize,
  361. size_t pgcount,
  362. struct iommu_iotlb_gather *iotlb_gather)
  363. {
  364. struct mock_iommu_domain *mock =
  365. container_of(domain, struct mock_iommu_domain, domain);
  366. bool first = true;
  367. size_t ret = 0;
  368. void *ent;
  369. WARN_ON(iova % MOCK_IO_PAGE_SIZE);
  370. WARN_ON(pgsize % MOCK_IO_PAGE_SIZE);
  371. for (; pgcount; pgcount--) {
  372. size_t cur;
  373. for (cur = 0; cur != pgsize; cur += MOCK_IO_PAGE_SIZE) {
  374. ent = xa_erase(&mock->pfns, iova / MOCK_IO_PAGE_SIZE);
  375. /*
  376. * iommufd generates unmaps that must be a strict
  377. * superset of the map's performend So every
  378. * starting/ending IOVA should have been an iova passed
  379. * to map.
  380. *
  381. * This simple logic doesn't work when the HUGE_PAGE is
  382. * turned on since the core code will automatically
  383. * switch between the two page sizes creating a break in
  384. * the unmap calls. The break can land in the middle of
  385. * contiguous IOVA.
  386. */
  387. if (!(domain->pgsize_bitmap & MOCK_HUGE_PAGE_SIZE)) {
  388. if (first) {
  389. WARN_ON(ent && !(xa_to_value(ent) &
  390. MOCK_PFN_START_IOVA));
  391. first = false;
  392. }
  393. if (pgcount == 1 &&
  394. cur + MOCK_IO_PAGE_SIZE == pgsize)
  395. WARN_ON(ent && !(xa_to_value(ent) &
  396. MOCK_PFN_LAST_IOVA));
  397. }
  398. iova += MOCK_IO_PAGE_SIZE;
  399. ret += MOCK_IO_PAGE_SIZE;
  400. }
  401. }
  402. return ret;
  403. }
  404. static phys_addr_t mock_domain_iova_to_phys(struct iommu_domain *domain,
  405. dma_addr_t iova)
  406. {
  407. struct mock_iommu_domain *mock =
  408. container_of(domain, struct mock_iommu_domain, domain);
  409. void *ent;
  410. WARN_ON(iova % MOCK_IO_PAGE_SIZE);
  411. ent = xa_load(&mock->pfns, iova / MOCK_IO_PAGE_SIZE);
  412. WARN_ON(!ent);
  413. return (xa_to_value(ent) & MOCK_PFN_MASK) * MOCK_IO_PAGE_SIZE;
  414. }
  415. static bool mock_domain_capable(struct device *dev, enum iommu_cap cap)
  416. {
  417. struct mock_dev *mdev = container_of(dev, struct mock_dev, dev);
  418. switch (cap) {
  419. case IOMMU_CAP_CACHE_COHERENCY:
  420. return true;
  421. case IOMMU_CAP_DIRTY_TRACKING:
  422. return !(mdev->flags & MOCK_FLAGS_DEVICE_NO_DIRTY);
  423. default:
  424. break;
  425. }
  426. return false;
  427. }
  428. static struct iopf_queue *mock_iommu_iopf_queue;
  429. static struct iommu_device mock_iommu_device = {
  430. };
  431. static struct iommu_device *mock_probe_device(struct device *dev)
  432. {
  433. if (dev->bus != &iommufd_mock_bus_type.bus)
  434. return ERR_PTR(-ENODEV);
  435. return &mock_iommu_device;
  436. }
  437. static void mock_domain_page_response(struct device *dev, struct iopf_fault *evt,
  438. struct iommu_page_response *msg)
  439. {
  440. }
  441. static int mock_dev_enable_feat(struct device *dev, enum iommu_dev_features feat)
  442. {
  443. if (feat != IOMMU_DEV_FEAT_IOPF || !mock_iommu_iopf_queue)
  444. return -ENODEV;
  445. return iopf_queue_add_device(mock_iommu_iopf_queue, dev);
  446. }
  447. static int mock_dev_disable_feat(struct device *dev, enum iommu_dev_features feat)
  448. {
  449. if (feat != IOMMU_DEV_FEAT_IOPF || !mock_iommu_iopf_queue)
  450. return -ENODEV;
  451. iopf_queue_remove_device(mock_iommu_iopf_queue, dev);
  452. return 0;
  453. }
  454. static const struct iommu_ops mock_ops = {
  455. /*
  456. * IOMMU_DOMAIN_BLOCKED cannot be returned from def_domain_type()
  457. * because it is zero.
  458. */
  459. .default_domain = &mock_blocking_domain,
  460. .blocked_domain = &mock_blocking_domain,
  461. .owner = THIS_MODULE,
  462. .pgsize_bitmap = MOCK_IO_PAGE_SIZE,
  463. .hw_info = mock_domain_hw_info,
  464. .domain_alloc_paging = mock_domain_alloc_paging,
  465. .domain_alloc_user = mock_domain_alloc_user,
  466. .capable = mock_domain_capable,
  467. .device_group = generic_device_group,
  468. .probe_device = mock_probe_device,
  469. .page_response = mock_domain_page_response,
  470. .dev_enable_feat = mock_dev_enable_feat,
  471. .dev_disable_feat = mock_dev_disable_feat,
  472. .user_pasid_table = true,
  473. .default_domain_ops =
  474. &(struct iommu_domain_ops){
  475. .free = mock_domain_free,
  476. .attach_dev = mock_domain_nop_attach,
  477. .map_pages = mock_domain_map_pages,
  478. .unmap_pages = mock_domain_unmap_pages,
  479. .iova_to_phys = mock_domain_iova_to_phys,
  480. },
  481. };
  482. static void mock_domain_free_nested(struct iommu_domain *domain)
  483. {
  484. struct mock_iommu_domain_nested *mock_nested =
  485. container_of(domain, struct mock_iommu_domain_nested, domain);
  486. kfree(mock_nested);
  487. }
  488. static int
  489. mock_domain_cache_invalidate_user(struct iommu_domain *domain,
  490. struct iommu_user_data_array *array)
  491. {
  492. struct mock_iommu_domain_nested *mock_nested =
  493. container_of(domain, struct mock_iommu_domain_nested, domain);
  494. struct iommu_hwpt_invalidate_selftest inv;
  495. u32 processed = 0;
  496. int i = 0, j;
  497. int rc = 0;
  498. if (array->type != IOMMU_HWPT_INVALIDATE_DATA_SELFTEST) {
  499. rc = -EINVAL;
  500. goto out;
  501. }
  502. for ( ; i < array->entry_num; i++) {
  503. rc = iommu_copy_struct_from_user_array(&inv, array,
  504. IOMMU_HWPT_INVALIDATE_DATA_SELFTEST,
  505. i, iotlb_id);
  506. if (rc)
  507. break;
  508. if (inv.flags & ~IOMMU_TEST_INVALIDATE_FLAG_ALL) {
  509. rc = -EOPNOTSUPP;
  510. break;
  511. }
  512. if (inv.iotlb_id > MOCK_NESTED_DOMAIN_IOTLB_ID_MAX) {
  513. rc = -EINVAL;
  514. break;
  515. }
  516. if (inv.flags & IOMMU_TEST_INVALIDATE_FLAG_ALL) {
  517. /* Invalidate all mock iotlb entries and ignore iotlb_id */
  518. for (j = 0; j < MOCK_NESTED_DOMAIN_IOTLB_NUM; j++)
  519. mock_nested->iotlb[j] = 0;
  520. } else {
  521. mock_nested->iotlb[inv.iotlb_id] = 0;
  522. }
  523. processed++;
  524. }
  525. out:
  526. array->entry_num = processed;
  527. return rc;
  528. }
  529. static struct iommu_domain_ops domain_nested_ops = {
  530. .free = mock_domain_free_nested,
  531. .attach_dev = mock_domain_nop_attach,
  532. .cache_invalidate_user = mock_domain_cache_invalidate_user,
  533. };
  534. static inline struct iommufd_hw_pagetable *
  535. __get_md_pagetable(struct iommufd_ucmd *ucmd, u32 mockpt_id, u32 hwpt_type)
  536. {
  537. struct iommufd_object *obj;
  538. obj = iommufd_get_object(ucmd->ictx, mockpt_id, hwpt_type);
  539. if (IS_ERR(obj))
  540. return ERR_CAST(obj);
  541. return container_of(obj, struct iommufd_hw_pagetable, obj);
  542. }
  543. static inline struct iommufd_hw_pagetable *
  544. get_md_pagetable(struct iommufd_ucmd *ucmd, u32 mockpt_id,
  545. struct mock_iommu_domain **mock)
  546. {
  547. struct iommufd_hw_pagetable *hwpt;
  548. hwpt = __get_md_pagetable(ucmd, mockpt_id, IOMMUFD_OBJ_HWPT_PAGING);
  549. if (IS_ERR(hwpt))
  550. return hwpt;
  551. if (hwpt->domain->type != IOMMU_DOMAIN_UNMANAGED ||
  552. hwpt->domain->ops != mock_ops.default_domain_ops) {
  553. iommufd_put_object(ucmd->ictx, &hwpt->obj);
  554. return ERR_PTR(-EINVAL);
  555. }
  556. *mock = container_of(hwpt->domain, struct mock_iommu_domain, domain);
  557. return hwpt;
  558. }
  559. static inline struct iommufd_hw_pagetable *
  560. get_md_pagetable_nested(struct iommufd_ucmd *ucmd, u32 mockpt_id,
  561. struct mock_iommu_domain_nested **mock_nested)
  562. {
  563. struct iommufd_hw_pagetable *hwpt;
  564. hwpt = __get_md_pagetable(ucmd, mockpt_id, IOMMUFD_OBJ_HWPT_NESTED);
  565. if (IS_ERR(hwpt))
  566. return hwpt;
  567. if (hwpt->domain->type != IOMMU_DOMAIN_NESTED ||
  568. hwpt->domain->ops != &domain_nested_ops) {
  569. iommufd_put_object(ucmd->ictx, &hwpt->obj);
  570. return ERR_PTR(-EINVAL);
  571. }
  572. *mock_nested = container_of(hwpt->domain,
  573. struct mock_iommu_domain_nested, domain);
  574. return hwpt;
  575. }
  576. static void mock_dev_release(struct device *dev)
  577. {
  578. struct mock_dev *mdev = container_of(dev, struct mock_dev, dev);
  579. ida_free(&mock_dev_ida, mdev->id);
  580. kfree(mdev);
  581. }
  582. static struct mock_dev *mock_dev_create(unsigned long dev_flags)
  583. {
  584. struct mock_dev *mdev;
  585. int rc;
  586. if (dev_flags &
  587. ~(MOCK_FLAGS_DEVICE_NO_DIRTY | MOCK_FLAGS_DEVICE_HUGE_IOVA))
  588. return ERR_PTR(-EINVAL);
  589. mdev = kzalloc(sizeof(*mdev), GFP_KERNEL);
  590. if (!mdev)
  591. return ERR_PTR(-ENOMEM);
  592. device_initialize(&mdev->dev);
  593. mdev->flags = dev_flags;
  594. mdev->dev.release = mock_dev_release;
  595. mdev->dev.bus = &iommufd_mock_bus_type.bus;
  596. rc = ida_alloc(&mock_dev_ida, GFP_KERNEL);
  597. if (rc < 0)
  598. goto err_put;
  599. mdev->id = rc;
  600. rc = dev_set_name(&mdev->dev, "iommufd_mock%u", mdev->id);
  601. if (rc)
  602. goto err_put;
  603. rc = device_add(&mdev->dev);
  604. if (rc)
  605. goto err_put;
  606. return mdev;
  607. err_put:
  608. put_device(&mdev->dev);
  609. return ERR_PTR(rc);
  610. }
  611. static void mock_dev_destroy(struct mock_dev *mdev)
  612. {
  613. device_unregister(&mdev->dev);
  614. }
  615. bool iommufd_selftest_is_mock_dev(struct device *dev)
  616. {
  617. return dev->release == mock_dev_release;
  618. }
  619. /* Create an hw_pagetable with the mock domain so we can test the domain ops */
  620. static int iommufd_test_mock_domain(struct iommufd_ucmd *ucmd,
  621. struct iommu_test_cmd *cmd)
  622. {
  623. struct iommufd_device *idev;
  624. struct selftest_obj *sobj;
  625. u32 pt_id = cmd->id;
  626. u32 dev_flags = 0;
  627. u32 idev_id;
  628. int rc;
  629. sobj = iommufd_object_alloc(ucmd->ictx, sobj, IOMMUFD_OBJ_SELFTEST);
  630. if (IS_ERR(sobj))
  631. return PTR_ERR(sobj);
  632. sobj->idev.ictx = ucmd->ictx;
  633. sobj->type = TYPE_IDEV;
  634. if (cmd->op == IOMMU_TEST_OP_MOCK_DOMAIN_FLAGS)
  635. dev_flags = cmd->mock_domain_flags.dev_flags;
  636. sobj->idev.mock_dev = mock_dev_create(dev_flags);
  637. if (IS_ERR(sobj->idev.mock_dev)) {
  638. rc = PTR_ERR(sobj->idev.mock_dev);
  639. goto out_sobj;
  640. }
  641. idev = iommufd_device_bind(ucmd->ictx, &sobj->idev.mock_dev->dev,
  642. &idev_id);
  643. if (IS_ERR(idev)) {
  644. rc = PTR_ERR(idev);
  645. goto out_mdev;
  646. }
  647. sobj->idev.idev = idev;
  648. rc = iommufd_device_attach(idev, &pt_id);
  649. if (rc)
  650. goto out_unbind;
  651. /* Userspace must destroy the device_id to destroy the object */
  652. cmd->mock_domain.out_hwpt_id = pt_id;
  653. cmd->mock_domain.out_stdev_id = sobj->obj.id;
  654. cmd->mock_domain.out_idev_id = idev_id;
  655. rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd));
  656. if (rc)
  657. goto out_detach;
  658. iommufd_object_finalize(ucmd->ictx, &sobj->obj);
  659. return 0;
  660. out_detach:
  661. iommufd_device_detach(idev);
  662. out_unbind:
  663. iommufd_device_unbind(idev);
  664. out_mdev:
  665. mock_dev_destroy(sobj->idev.mock_dev);
  666. out_sobj:
  667. iommufd_object_abort(ucmd->ictx, &sobj->obj);
  668. return rc;
  669. }
  670. /* Replace the mock domain with a manually allocated hw_pagetable */
  671. static int iommufd_test_mock_domain_replace(struct iommufd_ucmd *ucmd,
  672. unsigned int device_id, u32 pt_id,
  673. struct iommu_test_cmd *cmd)
  674. {
  675. struct iommufd_object *dev_obj;
  676. struct selftest_obj *sobj;
  677. int rc;
  678. /*
  679. * Prefer to use the OBJ_SELFTEST because the destroy_rwsem will ensure
  680. * it doesn't race with detach, which is not allowed.
  681. */
  682. dev_obj =
  683. iommufd_get_object(ucmd->ictx, device_id, IOMMUFD_OBJ_SELFTEST);
  684. if (IS_ERR(dev_obj))
  685. return PTR_ERR(dev_obj);
  686. sobj = container_of(dev_obj, struct selftest_obj, obj);
  687. if (sobj->type != TYPE_IDEV) {
  688. rc = -EINVAL;
  689. goto out_dev_obj;
  690. }
  691. rc = iommufd_device_replace(sobj->idev.idev, &pt_id);
  692. if (rc)
  693. goto out_dev_obj;
  694. cmd->mock_domain_replace.pt_id = pt_id;
  695. rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd));
  696. out_dev_obj:
  697. iommufd_put_object(ucmd->ictx, dev_obj);
  698. return rc;
  699. }
  700. /* Add an additional reserved IOVA to the IOAS */
  701. static int iommufd_test_add_reserved(struct iommufd_ucmd *ucmd,
  702. unsigned int mockpt_id,
  703. unsigned long start, size_t length)
  704. {
  705. struct iommufd_ioas *ioas;
  706. int rc;
  707. ioas = iommufd_get_ioas(ucmd->ictx, mockpt_id);
  708. if (IS_ERR(ioas))
  709. return PTR_ERR(ioas);
  710. down_write(&ioas->iopt.iova_rwsem);
  711. rc = iopt_reserve_iova(&ioas->iopt, start, start + length - 1, NULL);
  712. up_write(&ioas->iopt.iova_rwsem);
  713. iommufd_put_object(ucmd->ictx, &ioas->obj);
  714. return rc;
  715. }
  716. /* Check that every pfn under each iova matches the pfn under a user VA */
  717. static int iommufd_test_md_check_pa(struct iommufd_ucmd *ucmd,
  718. unsigned int mockpt_id, unsigned long iova,
  719. size_t length, void __user *uptr)
  720. {
  721. struct iommufd_hw_pagetable *hwpt;
  722. struct mock_iommu_domain *mock;
  723. uintptr_t end;
  724. int rc;
  725. if (iova % MOCK_IO_PAGE_SIZE || length % MOCK_IO_PAGE_SIZE ||
  726. (uintptr_t)uptr % MOCK_IO_PAGE_SIZE ||
  727. check_add_overflow((uintptr_t)uptr, (uintptr_t)length, &end))
  728. return -EINVAL;
  729. hwpt = get_md_pagetable(ucmd, mockpt_id, &mock);
  730. if (IS_ERR(hwpt))
  731. return PTR_ERR(hwpt);
  732. for (; length; length -= MOCK_IO_PAGE_SIZE) {
  733. struct page *pages[1];
  734. unsigned long pfn;
  735. long npages;
  736. void *ent;
  737. npages = get_user_pages_fast((uintptr_t)uptr & PAGE_MASK, 1, 0,
  738. pages);
  739. if (npages < 0) {
  740. rc = npages;
  741. goto out_put;
  742. }
  743. if (WARN_ON(npages != 1)) {
  744. rc = -EFAULT;
  745. goto out_put;
  746. }
  747. pfn = page_to_pfn(pages[0]);
  748. put_page(pages[0]);
  749. ent = xa_load(&mock->pfns, iova / MOCK_IO_PAGE_SIZE);
  750. if (!ent ||
  751. (xa_to_value(ent) & MOCK_PFN_MASK) * MOCK_IO_PAGE_SIZE !=
  752. pfn * PAGE_SIZE + ((uintptr_t)uptr % PAGE_SIZE)) {
  753. rc = -EINVAL;
  754. goto out_put;
  755. }
  756. iova += MOCK_IO_PAGE_SIZE;
  757. uptr += MOCK_IO_PAGE_SIZE;
  758. }
  759. rc = 0;
  760. out_put:
  761. iommufd_put_object(ucmd->ictx, &hwpt->obj);
  762. return rc;
  763. }
  764. /* Check that the page ref count matches, to look for missing pin/unpins */
  765. static int iommufd_test_md_check_refs(struct iommufd_ucmd *ucmd,
  766. void __user *uptr, size_t length,
  767. unsigned int refs)
  768. {
  769. uintptr_t end;
  770. if (length % PAGE_SIZE || (uintptr_t)uptr % PAGE_SIZE ||
  771. check_add_overflow((uintptr_t)uptr, (uintptr_t)length, &end))
  772. return -EINVAL;
  773. for (; length; length -= PAGE_SIZE) {
  774. struct page *pages[1];
  775. long npages;
  776. npages = get_user_pages_fast((uintptr_t)uptr, 1, 0, pages);
  777. if (npages < 0)
  778. return npages;
  779. if (WARN_ON(npages != 1))
  780. return -EFAULT;
  781. if (!PageCompound(pages[0])) {
  782. unsigned int count;
  783. count = page_ref_count(pages[0]);
  784. if (count / GUP_PIN_COUNTING_BIAS != refs) {
  785. put_page(pages[0]);
  786. return -EIO;
  787. }
  788. }
  789. put_page(pages[0]);
  790. uptr += PAGE_SIZE;
  791. }
  792. return 0;
  793. }
  794. static int iommufd_test_md_check_iotlb(struct iommufd_ucmd *ucmd,
  795. u32 mockpt_id, unsigned int iotlb_id,
  796. u32 iotlb)
  797. {
  798. struct mock_iommu_domain_nested *mock_nested;
  799. struct iommufd_hw_pagetable *hwpt;
  800. int rc = 0;
  801. hwpt = get_md_pagetable_nested(ucmd, mockpt_id, &mock_nested);
  802. if (IS_ERR(hwpt))
  803. return PTR_ERR(hwpt);
  804. mock_nested = container_of(hwpt->domain,
  805. struct mock_iommu_domain_nested, domain);
  806. if (iotlb_id > MOCK_NESTED_DOMAIN_IOTLB_ID_MAX ||
  807. mock_nested->iotlb[iotlb_id] != iotlb)
  808. rc = -EINVAL;
  809. iommufd_put_object(ucmd->ictx, &hwpt->obj);
  810. return rc;
  811. }
  812. struct selftest_access {
  813. struct iommufd_access *access;
  814. struct file *file;
  815. struct mutex lock;
  816. struct list_head items;
  817. unsigned int next_id;
  818. bool destroying;
  819. };
  820. struct selftest_access_item {
  821. struct list_head items_elm;
  822. unsigned long iova;
  823. size_t length;
  824. unsigned int id;
  825. };
  826. static const struct file_operations iommfd_test_staccess_fops;
  827. static struct selftest_access *iommufd_access_get(int fd)
  828. {
  829. struct file *file;
  830. file = fget(fd);
  831. if (!file)
  832. return ERR_PTR(-EBADFD);
  833. if (file->f_op != &iommfd_test_staccess_fops) {
  834. fput(file);
  835. return ERR_PTR(-EBADFD);
  836. }
  837. return file->private_data;
  838. }
  839. static void iommufd_test_access_unmap(void *data, unsigned long iova,
  840. unsigned long length)
  841. {
  842. unsigned long iova_last = iova + length - 1;
  843. struct selftest_access *staccess = data;
  844. struct selftest_access_item *item;
  845. struct selftest_access_item *tmp;
  846. mutex_lock(&staccess->lock);
  847. list_for_each_entry_safe(item, tmp, &staccess->items, items_elm) {
  848. if (iova > item->iova + item->length - 1 ||
  849. iova_last < item->iova)
  850. continue;
  851. list_del(&item->items_elm);
  852. iommufd_access_unpin_pages(staccess->access, item->iova,
  853. item->length);
  854. kfree(item);
  855. }
  856. mutex_unlock(&staccess->lock);
  857. }
  858. static int iommufd_test_access_item_destroy(struct iommufd_ucmd *ucmd,
  859. unsigned int access_id,
  860. unsigned int item_id)
  861. {
  862. struct selftest_access_item *item;
  863. struct selftest_access *staccess;
  864. staccess = iommufd_access_get(access_id);
  865. if (IS_ERR(staccess))
  866. return PTR_ERR(staccess);
  867. mutex_lock(&staccess->lock);
  868. list_for_each_entry(item, &staccess->items, items_elm) {
  869. if (item->id == item_id) {
  870. list_del(&item->items_elm);
  871. iommufd_access_unpin_pages(staccess->access, item->iova,
  872. item->length);
  873. mutex_unlock(&staccess->lock);
  874. kfree(item);
  875. fput(staccess->file);
  876. return 0;
  877. }
  878. }
  879. mutex_unlock(&staccess->lock);
  880. fput(staccess->file);
  881. return -ENOENT;
  882. }
  883. static int iommufd_test_staccess_release(struct inode *inode,
  884. struct file *filep)
  885. {
  886. struct selftest_access *staccess = filep->private_data;
  887. if (staccess->access) {
  888. iommufd_test_access_unmap(staccess, 0, ULONG_MAX);
  889. iommufd_access_destroy(staccess->access);
  890. }
  891. mutex_destroy(&staccess->lock);
  892. kfree(staccess);
  893. return 0;
  894. }
  895. static const struct iommufd_access_ops selftest_access_ops_pin = {
  896. .needs_pin_pages = 1,
  897. .unmap = iommufd_test_access_unmap,
  898. };
  899. static const struct iommufd_access_ops selftest_access_ops = {
  900. .unmap = iommufd_test_access_unmap,
  901. };
  902. static const struct file_operations iommfd_test_staccess_fops = {
  903. .release = iommufd_test_staccess_release,
  904. };
  905. static struct selftest_access *iommufd_test_alloc_access(void)
  906. {
  907. struct selftest_access *staccess;
  908. struct file *filep;
  909. staccess = kzalloc(sizeof(*staccess), GFP_KERNEL_ACCOUNT);
  910. if (!staccess)
  911. return ERR_PTR(-ENOMEM);
  912. INIT_LIST_HEAD(&staccess->items);
  913. mutex_init(&staccess->lock);
  914. filep = anon_inode_getfile("[iommufd_test_staccess]",
  915. &iommfd_test_staccess_fops, staccess,
  916. O_RDWR);
  917. if (IS_ERR(filep)) {
  918. kfree(staccess);
  919. return ERR_CAST(filep);
  920. }
  921. staccess->file = filep;
  922. return staccess;
  923. }
  924. static int iommufd_test_create_access(struct iommufd_ucmd *ucmd,
  925. unsigned int ioas_id, unsigned int flags)
  926. {
  927. struct iommu_test_cmd *cmd = ucmd->cmd;
  928. struct selftest_access *staccess;
  929. struct iommufd_access *access;
  930. u32 id;
  931. int fdno;
  932. int rc;
  933. if (flags & ~MOCK_FLAGS_ACCESS_CREATE_NEEDS_PIN_PAGES)
  934. return -EOPNOTSUPP;
  935. staccess = iommufd_test_alloc_access();
  936. if (IS_ERR(staccess))
  937. return PTR_ERR(staccess);
  938. fdno = get_unused_fd_flags(O_CLOEXEC);
  939. if (fdno < 0) {
  940. rc = -ENOMEM;
  941. goto out_free_staccess;
  942. }
  943. access = iommufd_access_create(
  944. ucmd->ictx,
  945. (flags & MOCK_FLAGS_ACCESS_CREATE_NEEDS_PIN_PAGES) ?
  946. &selftest_access_ops_pin :
  947. &selftest_access_ops,
  948. staccess, &id);
  949. if (IS_ERR(access)) {
  950. rc = PTR_ERR(access);
  951. goto out_put_fdno;
  952. }
  953. rc = iommufd_access_attach(access, ioas_id);
  954. if (rc)
  955. goto out_destroy;
  956. cmd->create_access.out_access_fd = fdno;
  957. rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd));
  958. if (rc)
  959. goto out_destroy;
  960. staccess->access = access;
  961. fd_install(fdno, staccess->file);
  962. return 0;
  963. out_destroy:
  964. iommufd_access_destroy(access);
  965. out_put_fdno:
  966. put_unused_fd(fdno);
  967. out_free_staccess:
  968. fput(staccess->file);
  969. return rc;
  970. }
  971. static int iommufd_test_access_replace_ioas(struct iommufd_ucmd *ucmd,
  972. unsigned int access_id,
  973. unsigned int ioas_id)
  974. {
  975. struct selftest_access *staccess;
  976. int rc;
  977. staccess = iommufd_access_get(access_id);
  978. if (IS_ERR(staccess))
  979. return PTR_ERR(staccess);
  980. rc = iommufd_access_replace(staccess->access, ioas_id);
  981. fput(staccess->file);
  982. return rc;
  983. }
  984. /* Check that the pages in a page array match the pages in the user VA */
  985. static int iommufd_test_check_pages(void __user *uptr, struct page **pages,
  986. size_t npages)
  987. {
  988. for (; npages; npages--) {
  989. struct page *tmp_pages[1];
  990. long rc;
  991. rc = get_user_pages_fast((uintptr_t)uptr, 1, 0, tmp_pages);
  992. if (rc < 0)
  993. return rc;
  994. if (WARN_ON(rc != 1))
  995. return -EFAULT;
  996. put_page(tmp_pages[0]);
  997. if (tmp_pages[0] != *pages)
  998. return -EBADE;
  999. pages++;
  1000. uptr += PAGE_SIZE;
  1001. }
  1002. return 0;
  1003. }
  1004. static int iommufd_test_access_pages(struct iommufd_ucmd *ucmd,
  1005. unsigned int access_id, unsigned long iova,
  1006. size_t length, void __user *uptr,
  1007. u32 flags)
  1008. {
  1009. struct iommu_test_cmd *cmd = ucmd->cmd;
  1010. struct selftest_access_item *item;
  1011. struct selftest_access *staccess;
  1012. struct page **pages;
  1013. size_t npages;
  1014. int rc;
  1015. /* Prevent syzkaller from triggering a WARN_ON in kvzalloc() */
  1016. if (length > 16*1024*1024)
  1017. return -ENOMEM;
  1018. if (flags & ~(MOCK_FLAGS_ACCESS_WRITE | MOCK_FLAGS_ACCESS_SYZ))
  1019. return -EOPNOTSUPP;
  1020. staccess = iommufd_access_get(access_id);
  1021. if (IS_ERR(staccess))
  1022. return PTR_ERR(staccess);
  1023. if (staccess->access->ops != &selftest_access_ops_pin) {
  1024. rc = -EOPNOTSUPP;
  1025. goto out_put;
  1026. }
  1027. if (flags & MOCK_FLAGS_ACCESS_SYZ)
  1028. iova = iommufd_test_syz_conv_iova(staccess->access,
  1029. &cmd->access_pages.iova);
  1030. npages = (ALIGN(iova + length, PAGE_SIZE) -
  1031. ALIGN_DOWN(iova, PAGE_SIZE)) /
  1032. PAGE_SIZE;
  1033. pages = kvcalloc(npages, sizeof(*pages), GFP_KERNEL_ACCOUNT);
  1034. if (!pages) {
  1035. rc = -ENOMEM;
  1036. goto out_put;
  1037. }
  1038. /*
  1039. * Drivers will need to think very carefully about this locking. The
  1040. * core code can do multiple unmaps instantaneously after
  1041. * iommufd_access_pin_pages() and *all* the unmaps must not return until
  1042. * the range is unpinned. This simple implementation puts a global lock
  1043. * around the pin, which may not suit drivers that want this to be a
  1044. * performance path. drivers that get this wrong will trigger WARN_ON
  1045. * races and cause EDEADLOCK failures to userspace.
  1046. */
  1047. mutex_lock(&staccess->lock);
  1048. rc = iommufd_access_pin_pages(staccess->access, iova, length, pages,
  1049. flags & MOCK_FLAGS_ACCESS_WRITE);
  1050. if (rc)
  1051. goto out_unlock;
  1052. /* For syzkaller allow uptr to be NULL to skip this check */
  1053. if (uptr) {
  1054. rc = iommufd_test_check_pages(
  1055. uptr - (iova - ALIGN_DOWN(iova, PAGE_SIZE)), pages,
  1056. npages);
  1057. if (rc)
  1058. goto out_unaccess;
  1059. }
  1060. item = kzalloc(sizeof(*item), GFP_KERNEL_ACCOUNT);
  1061. if (!item) {
  1062. rc = -ENOMEM;
  1063. goto out_unaccess;
  1064. }
  1065. item->iova = iova;
  1066. item->length = length;
  1067. item->id = staccess->next_id++;
  1068. list_add_tail(&item->items_elm, &staccess->items);
  1069. cmd->access_pages.out_access_pages_id = item->id;
  1070. rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd));
  1071. if (rc)
  1072. goto out_free_item;
  1073. goto out_unlock;
  1074. out_free_item:
  1075. list_del(&item->items_elm);
  1076. kfree(item);
  1077. out_unaccess:
  1078. iommufd_access_unpin_pages(staccess->access, iova, length);
  1079. out_unlock:
  1080. mutex_unlock(&staccess->lock);
  1081. kvfree(pages);
  1082. out_put:
  1083. fput(staccess->file);
  1084. return rc;
  1085. }
  1086. static int iommufd_test_access_rw(struct iommufd_ucmd *ucmd,
  1087. unsigned int access_id, unsigned long iova,
  1088. size_t length, void __user *ubuf,
  1089. unsigned int flags)
  1090. {
  1091. struct iommu_test_cmd *cmd = ucmd->cmd;
  1092. struct selftest_access *staccess;
  1093. void *tmp;
  1094. int rc;
  1095. /* Prevent syzkaller from triggering a WARN_ON in kvzalloc() */
  1096. if (length > 16*1024*1024)
  1097. return -ENOMEM;
  1098. if (flags & ~(MOCK_ACCESS_RW_WRITE | MOCK_ACCESS_RW_SLOW_PATH |
  1099. MOCK_FLAGS_ACCESS_SYZ))
  1100. return -EOPNOTSUPP;
  1101. staccess = iommufd_access_get(access_id);
  1102. if (IS_ERR(staccess))
  1103. return PTR_ERR(staccess);
  1104. tmp = kvzalloc(length, GFP_KERNEL_ACCOUNT);
  1105. if (!tmp) {
  1106. rc = -ENOMEM;
  1107. goto out_put;
  1108. }
  1109. if (flags & MOCK_ACCESS_RW_WRITE) {
  1110. if (copy_from_user(tmp, ubuf, length)) {
  1111. rc = -EFAULT;
  1112. goto out_free;
  1113. }
  1114. }
  1115. if (flags & MOCK_FLAGS_ACCESS_SYZ)
  1116. iova = iommufd_test_syz_conv_iova(staccess->access,
  1117. &cmd->access_rw.iova);
  1118. rc = iommufd_access_rw(staccess->access, iova, tmp, length, flags);
  1119. if (rc)
  1120. goto out_free;
  1121. if (!(flags & MOCK_ACCESS_RW_WRITE)) {
  1122. if (copy_to_user(ubuf, tmp, length)) {
  1123. rc = -EFAULT;
  1124. goto out_free;
  1125. }
  1126. }
  1127. out_free:
  1128. kvfree(tmp);
  1129. out_put:
  1130. fput(staccess->file);
  1131. return rc;
  1132. }
  1133. static_assert((unsigned int)MOCK_ACCESS_RW_WRITE == IOMMUFD_ACCESS_RW_WRITE);
  1134. static_assert((unsigned int)MOCK_ACCESS_RW_SLOW_PATH ==
  1135. __IOMMUFD_ACCESS_RW_SLOW_PATH);
  1136. static int iommufd_test_dirty(struct iommufd_ucmd *ucmd, unsigned int mockpt_id,
  1137. unsigned long iova, size_t length,
  1138. unsigned long page_size, void __user *uptr,
  1139. u32 flags)
  1140. {
  1141. unsigned long i, max;
  1142. struct iommu_test_cmd *cmd = ucmd->cmd;
  1143. struct iommufd_hw_pagetable *hwpt;
  1144. struct mock_iommu_domain *mock;
  1145. int rc, count = 0;
  1146. void *tmp;
  1147. if (!page_size || !length || iova % page_size || length % page_size ||
  1148. !uptr)
  1149. return -EINVAL;
  1150. hwpt = get_md_pagetable(ucmd, mockpt_id, &mock);
  1151. if (IS_ERR(hwpt))
  1152. return PTR_ERR(hwpt);
  1153. if (!(mock->flags & MOCK_DIRTY_TRACK)) {
  1154. rc = -EINVAL;
  1155. goto out_put;
  1156. }
  1157. max = length / page_size;
  1158. tmp = kvzalloc(DIV_ROUND_UP(max, BITS_PER_LONG) * sizeof(unsigned long),
  1159. GFP_KERNEL_ACCOUNT);
  1160. if (!tmp) {
  1161. rc = -ENOMEM;
  1162. goto out_put;
  1163. }
  1164. if (copy_from_user(tmp, uptr,DIV_ROUND_UP(max, BITS_PER_BYTE))) {
  1165. rc = -EFAULT;
  1166. goto out_free;
  1167. }
  1168. for (i = 0; i < max; i++) {
  1169. unsigned long cur = iova + i * page_size;
  1170. void *ent, *old;
  1171. if (!test_bit(i, (unsigned long *)tmp))
  1172. continue;
  1173. ent = xa_load(&mock->pfns, cur / page_size);
  1174. if (ent) {
  1175. unsigned long val;
  1176. val = xa_to_value(ent) | MOCK_PFN_DIRTY_IOVA;
  1177. old = xa_store(&mock->pfns, cur / page_size,
  1178. xa_mk_value(val), GFP_KERNEL);
  1179. WARN_ON_ONCE(ent != old);
  1180. count++;
  1181. }
  1182. }
  1183. cmd->dirty.out_nr_dirty = count;
  1184. rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd));
  1185. out_free:
  1186. kvfree(tmp);
  1187. out_put:
  1188. iommufd_put_object(ucmd->ictx, &hwpt->obj);
  1189. return rc;
  1190. }
  1191. static int iommufd_test_trigger_iopf(struct iommufd_ucmd *ucmd,
  1192. struct iommu_test_cmd *cmd)
  1193. {
  1194. struct iopf_fault event = { };
  1195. struct iommufd_device *idev;
  1196. idev = iommufd_get_device(ucmd, cmd->trigger_iopf.dev_id);
  1197. if (IS_ERR(idev))
  1198. return PTR_ERR(idev);
  1199. event.fault.prm.flags = IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE;
  1200. if (cmd->trigger_iopf.pasid != IOMMU_NO_PASID)
  1201. event.fault.prm.flags |= IOMMU_FAULT_PAGE_REQUEST_PASID_VALID;
  1202. event.fault.type = IOMMU_FAULT_PAGE_REQ;
  1203. event.fault.prm.addr = cmd->trigger_iopf.addr;
  1204. event.fault.prm.pasid = cmd->trigger_iopf.pasid;
  1205. event.fault.prm.grpid = cmd->trigger_iopf.grpid;
  1206. event.fault.prm.perm = cmd->trigger_iopf.perm;
  1207. iommu_report_device_fault(idev->dev, &event);
  1208. iommufd_put_object(ucmd->ictx, &idev->obj);
  1209. return 0;
  1210. }
  1211. void iommufd_selftest_destroy(struct iommufd_object *obj)
  1212. {
  1213. struct selftest_obj *sobj = container_of(obj, struct selftest_obj, obj);
  1214. switch (sobj->type) {
  1215. case TYPE_IDEV:
  1216. iommufd_device_detach(sobj->idev.idev);
  1217. iommufd_device_unbind(sobj->idev.idev);
  1218. mock_dev_destroy(sobj->idev.mock_dev);
  1219. break;
  1220. }
  1221. }
  1222. int iommufd_test(struct iommufd_ucmd *ucmd)
  1223. {
  1224. struct iommu_test_cmd *cmd = ucmd->cmd;
  1225. switch (cmd->op) {
  1226. case IOMMU_TEST_OP_ADD_RESERVED:
  1227. return iommufd_test_add_reserved(ucmd, cmd->id,
  1228. cmd->add_reserved.start,
  1229. cmd->add_reserved.length);
  1230. case IOMMU_TEST_OP_MOCK_DOMAIN:
  1231. case IOMMU_TEST_OP_MOCK_DOMAIN_FLAGS:
  1232. return iommufd_test_mock_domain(ucmd, cmd);
  1233. case IOMMU_TEST_OP_MOCK_DOMAIN_REPLACE:
  1234. return iommufd_test_mock_domain_replace(
  1235. ucmd, cmd->id, cmd->mock_domain_replace.pt_id, cmd);
  1236. case IOMMU_TEST_OP_MD_CHECK_MAP:
  1237. return iommufd_test_md_check_pa(
  1238. ucmd, cmd->id, cmd->check_map.iova,
  1239. cmd->check_map.length,
  1240. u64_to_user_ptr(cmd->check_map.uptr));
  1241. case IOMMU_TEST_OP_MD_CHECK_REFS:
  1242. return iommufd_test_md_check_refs(
  1243. ucmd, u64_to_user_ptr(cmd->check_refs.uptr),
  1244. cmd->check_refs.length, cmd->check_refs.refs);
  1245. case IOMMU_TEST_OP_MD_CHECK_IOTLB:
  1246. return iommufd_test_md_check_iotlb(ucmd, cmd->id,
  1247. cmd->check_iotlb.id,
  1248. cmd->check_iotlb.iotlb);
  1249. case IOMMU_TEST_OP_CREATE_ACCESS:
  1250. return iommufd_test_create_access(ucmd, cmd->id,
  1251. cmd->create_access.flags);
  1252. case IOMMU_TEST_OP_ACCESS_REPLACE_IOAS:
  1253. return iommufd_test_access_replace_ioas(
  1254. ucmd, cmd->id, cmd->access_replace_ioas.ioas_id);
  1255. case IOMMU_TEST_OP_ACCESS_PAGES:
  1256. return iommufd_test_access_pages(
  1257. ucmd, cmd->id, cmd->access_pages.iova,
  1258. cmd->access_pages.length,
  1259. u64_to_user_ptr(cmd->access_pages.uptr),
  1260. cmd->access_pages.flags);
  1261. case IOMMU_TEST_OP_ACCESS_RW:
  1262. return iommufd_test_access_rw(
  1263. ucmd, cmd->id, cmd->access_rw.iova,
  1264. cmd->access_rw.length,
  1265. u64_to_user_ptr(cmd->access_rw.uptr),
  1266. cmd->access_rw.flags);
  1267. case IOMMU_TEST_OP_DESTROY_ACCESS_PAGES:
  1268. return iommufd_test_access_item_destroy(
  1269. ucmd, cmd->id, cmd->destroy_access_pages.access_pages_id);
  1270. case IOMMU_TEST_OP_SET_TEMP_MEMORY_LIMIT:
  1271. /* Protect _batch_init(), can not be less than elmsz */
  1272. if (cmd->memory_limit.limit <
  1273. sizeof(unsigned long) + sizeof(u32))
  1274. return -EINVAL;
  1275. iommufd_test_memory_limit = cmd->memory_limit.limit;
  1276. return 0;
  1277. case IOMMU_TEST_OP_DIRTY:
  1278. return iommufd_test_dirty(ucmd, cmd->id, cmd->dirty.iova,
  1279. cmd->dirty.length,
  1280. cmd->dirty.page_size,
  1281. u64_to_user_ptr(cmd->dirty.uptr),
  1282. cmd->dirty.flags);
  1283. case IOMMU_TEST_OP_TRIGGER_IOPF:
  1284. return iommufd_test_trigger_iopf(ucmd, cmd);
  1285. default:
  1286. return -EOPNOTSUPP;
  1287. }
  1288. }
  1289. bool iommufd_should_fail(void)
  1290. {
  1291. return should_fail(&fail_iommufd, 1);
  1292. }
  1293. int __init iommufd_test_init(void)
  1294. {
  1295. struct platform_device_info pdevinfo = {
  1296. .name = "iommufd_selftest_iommu",
  1297. };
  1298. int rc;
  1299. dbgfs_root =
  1300. fault_create_debugfs_attr("fail_iommufd", NULL, &fail_iommufd);
  1301. selftest_iommu_dev = platform_device_register_full(&pdevinfo);
  1302. if (IS_ERR(selftest_iommu_dev)) {
  1303. rc = PTR_ERR(selftest_iommu_dev);
  1304. goto err_dbgfs;
  1305. }
  1306. rc = bus_register(&iommufd_mock_bus_type.bus);
  1307. if (rc)
  1308. goto err_platform;
  1309. rc = iommu_device_sysfs_add(&mock_iommu_device,
  1310. &selftest_iommu_dev->dev, NULL, "%s",
  1311. dev_name(&selftest_iommu_dev->dev));
  1312. if (rc)
  1313. goto err_bus;
  1314. rc = iommu_device_register_bus(&mock_iommu_device, &mock_ops,
  1315. &iommufd_mock_bus_type.bus,
  1316. &iommufd_mock_bus_type.nb);
  1317. if (rc)
  1318. goto err_sysfs;
  1319. mock_iommu_iopf_queue = iopf_queue_alloc("mock-iopfq");
  1320. return 0;
  1321. err_sysfs:
  1322. iommu_device_sysfs_remove(&mock_iommu_device);
  1323. err_bus:
  1324. bus_unregister(&iommufd_mock_bus_type.bus);
  1325. err_platform:
  1326. platform_device_unregister(selftest_iommu_dev);
  1327. err_dbgfs:
  1328. debugfs_remove_recursive(dbgfs_root);
  1329. return rc;
  1330. }
  1331. void iommufd_test_exit(void)
  1332. {
  1333. if (mock_iommu_iopf_queue) {
  1334. iopf_queue_free(mock_iommu_iopf_queue);
  1335. mock_iommu_iopf_queue = NULL;
  1336. }
  1337. iommu_device_sysfs_remove(&mock_iommu_device);
  1338. iommu_device_unregister_bus(&mock_iommu_device,
  1339. &iommufd_mock_bus_type.bus,
  1340. &iommufd_mock_bus_type.nb);
  1341. bus_unregister(&iommufd_mock_bus_type.bus);
  1342. platform_device_unregister(selftest_iommu_dev);
  1343. debugfs_remove_recursive(dbgfs_root);
  1344. }