vfio_platform_common.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (C) 2013 - Virtual Open Systems
  4. * Author: Antonios Motakis <a.motakis@virtualopensystems.com>
  5. */
  6. #define dev_fmt(fmt) "VFIO: " fmt
  7. #include <linux/device.h>
  8. #include <linux/acpi.h>
  9. #include <linux/iommu.h>
  10. #include <linux/module.h>
  11. #include <linux/mutex.h>
  12. #include <linux/pm_runtime.h>
  13. #include <linux/slab.h>
  14. #include <linux/types.h>
  15. #include <linux/uaccess.h>
  16. #include <linux/vfio.h>
  17. #include "vfio_platform_private.h"
  18. #define DRIVER_VERSION "0.10"
  19. #define DRIVER_AUTHOR "Antonios Motakis <a.motakis@virtualopensystems.com>"
  20. #define DRIVER_DESC "VFIO platform base module"
  21. #define VFIO_PLATFORM_IS_ACPI(vdev) ((vdev)->acpihid != NULL)
  22. static LIST_HEAD(reset_list);
  23. static DEFINE_MUTEX(driver_lock);
  24. static vfio_platform_reset_fn_t vfio_platform_lookup_reset(const char *compat,
  25. struct module **module)
  26. {
  27. struct vfio_platform_reset_node *iter;
  28. vfio_platform_reset_fn_t reset_fn = NULL;
  29. mutex_lock(&driver_lock);
  30. list_for_each_entry(iter, &reset_list, link) {
  31. if (!strcmp(iter->compat, compat) &&
  32. try_module_get(iter->owner)) {
  33. *module = iter->owner;
  34. reset_fn = iter->of_reset;
  35. break;
  36. }
  37. }
  38. mutex_unlock(&driver_lock);
  39. return reset_fn;
  40. }
  41. static int vfio_platform_acpi_probe(struct vfio_platform_device *vdev,
  42. struct device *dev)
  43. {
  44. struct acpi_device *adev;
  45. if (acpi_disabled)
  46. return -ENOENT;
  47. adev = ACPI_COMPANION(dev);
  48. if (!adev) {
  49. dev_err(dev, "ACPI companion device not found for %s\n",
  50. vdev->name);
  51. return -ENODEV;
  52. }
  53. #ifdef CONFIG_ACPI
  54. vdev->acpihid = acpi_device_hid(adev);
  55. #endif
  56. return WARN_ON(!vdev->acpihid) ? -EINVAL : 0;
  57. }
  58. static int vfio_platform_acpi_call_reset(struct vfio_platform_device *vdev,
  59. const char **extra_dbg)
  60. {
  61. #ifdef CONFIG_ACPI
  62. struct device *dev = vdev->device;
  63. acpi_handle handle = ACPI_HANDLE(dev);
  64. acpi_status acpi_ret;
  65. acpi_ret = acpi_evaluate_object(handle, "_RST", NULL, NULL);
  66. if (ACPI_FAILURE(acpi_ret)) {
  67. if (extra_dbg)
  68. *extra_dbg = acpi_format_exception(acpi_ret);
  69. return -EINVAL;
  70. }
  71. return 0;
  72. #else
  73. return -ENOENT;
  74. #endif
  75. }
  76. static bool vfio_platform_acpi_has_reset(struct vfio_platform_device *vdev)
  77. {
  78. #ifdef CONFIG_ACPI
  79. struct device *dev = vdev->device;
  80. acpi_handle handle = ACPI_HANDLE(dev);
  81. return acpi_has_method(handle, "_RST");
  82. #else
  83. return false;
  84. #endif
  85. }
  86. static bool vfio_platform_has_reset(struct vfio_platform_device *vdev)
  87. {
  88. if (VFIO_PLATFORM_IS_ACPI(vdev))
  89. return vfio_platform_acpi_has_reset(vdev);
  90. return vdev->of_reset ? true : false;
  91. }
  92. static int vfio_platform_get_reset(struct vfio_platform_device *vdev)
  93. {
  94. if (VFIO_PLATFORM_IS_ACPI(vdev))
  95. return vfio_platform_acpi_has_reset(vdev) ? 0 : -ENOENT;
  96. vdev->of_reset = vfio_platform_lookup_reset(vdev->compat,
  97. &vdev->reset_module);
  98. if (!vdev->of_reset) {
  99. request_module("vfio-reset:%s", vdev->compat);
  100. vdev->of_reset = vfio_platform_lookup_reset(vdev->compat,
  101. &vdev->reset_module);
  102. }
  103. return vdev->of_reset ? 0 : -ENOENT;
  104. }
  105. static void vfio_platform_put_reset(struct vfio_platform_device *vdev)
  106. {
  107. if (VFIO_PLATFORM_IS_ACPI(vdev))
  108. return;
  109. if (vdev->of_reset)
  110. module_put(vdev->reset_module);
  111. }
  112. static int vfio_platform_regions_init(struct vfio_platform_device *vdev)
  113. {
  114. int cnt = 0, i;
  115. while (vdev->get_resource(vdev, cnt))
  116. cnt++;
  117. vdev->regions = kcalloc(cnt, sizeof(struct vfio_platform_region),
  118. GFP_KERNEL_ACCOUNT);
  119. if (!vdev->regions)
  120. return -ENOMEM;
  121. for (i = 0; i < cnt; i++) {
  122. struct resource *res =
  123. vdev->get_resource(vdev, i);
  124. vdev->regions[i].addr = res->start;
  125. vdev->regions[i].size = resource_size(res);
  126. vdev->regions[i].flags = 0;
  127. switch (resource_type(res)) {
  128. case IORESOURCE_MEM:
  129. vdev->regions[i].type = VFIO_PLATFORM_REGION_TYPE_MMIO;
  130. vdev->regions[i].flags |= VFIO_REGION_INFO_FLAG_READ;
  131. if (!(res->flags & IORESOURCE_READONLY))
  132. vdev->regions[i].flags |=
  133. VFIO_REGION_INFO_FLAG_WRITE;
  134. /*
  135. * Only regions addressed with PAGE granularity may be
  136. * MMAPed securely.
  137. */
  138. if (!(vdev->regions[i].addr & ~PAGE_MASK) &&
  139. !(vdev->regions[i].size & ~PAGE_MASK))
  140. vdev->regions[i].flags |=
  141. VFIO_REGION_INFO_FLAG_MMAP;
  142. break;
  143. case IORESOURCE_IO:
  144. vdev->regions[i].type = VFIO_PLATFORM_REGION_TYPE_PIO;
  145. break;
  146. default:
  147. goto err;
  148. }
  149. }
  150. vdev->num_regions = cnt;
  151. return 0;
  152. err:
  153. kfree(vdev->regions);
  154. return -EINVAL;
  155. }
  156. static void vfio_platform_regions_cleanup(struct vfio_platform_device *vdev)
  157. {
  158. int i;
  159. for (i = 0; i < vdev->num_regions; i++)
  160. iounmap(vdev->regions[i].ioaddr);
  161. vdev->num_regions = 0;
  162. kfree(vdev->regions);
  163. }
  164. static int vfio_platform_call_reset(struct vfio_platform_device *vdev,
  165. const char **extra_dbg)
  166. {
  167. if (VFIO_PLATFORM_IS_ACPI(vdev)) {
  168. dev_info(vdev->device, "reset\n");
  169. return vfio_platform_acpi_call_reset(vdev, extra_dbg);
  170. } else if (vdev->of_reset) {
  171. dev_info(vdev->device, "reset\n");
  172. return vdev->of_reset(vdev);
  173. }
  174. dev_warn(vdev->device, "no reset function found!\n");
  175. return -EINVAL;
  176. }
  177. void vfio_platform_close_device(struct vfio_device *core_vdev)
  178. {
  179. struct vfio_platform_device *vdev =
  180. container_of(core_vdev, struct vfio_platform_device, vdev);
  181. const char *extra_dbg = NULL;
  182. int ret;
  183. ret = vfio_platform_call_reset(vdev, &extra_dbg);
  184. if (WARN_ON(ret && vdev->reset_required)) {
  185. dev_warn(
  186. vdev->device,
  187. "reset driver is required and reset call failed in release (%d) %s\n",
  188. ret, extra_dbg ? extra_dbg : "");
  189. }
  190. pm_runtime_put(vdev->device);
  191. vfio_platform_regions_cleanup(vdev);
  192. vfio_platform_irq_cleanup(vdev);
  193. }
  194. EXPORT_SYMBOL_GPL(vfio_platform_close_device);
  195. int vfio_platform_open_device(struct vfio_device *core_vdev)
  196. {
  197. struct vfio_platform_device *vdev =
  198. container_of(core_vdev, struct vfio_platform_device, vdev);
  199. const char *extra_dbg = NULL;
  200. int ret;
  201. ret = vfio_platform_regions_init(vdev);
  202. if (ret)
  203. return ret;
  204. ret = vfio_platform_irq_init(vdev);
  205. if (ret)
  206. goto err_irq;
  207. ret = pm_runtime_get_sync(vdev->device);
  208. if (ret < 0)
  209. goto err_rst;
  210. ret = vfio_platform_call_reset(vdev, &extra_dbg);
  211. if (ret && vdev->reset_required) {
  212. dev_warn(
  213. vdev->device,
  214. "reset driver is required and reset call failed in open (%d) %s\n",
  215. ret, extra_dbg ? extra_dbg : "");
  216. goto err_rst;
  217. }
  218. return 0;
  219. err_rst:
  220. pm_runtime_put(vdev->device);
  221. vfio_platform_irq_cleanup(vdev);
  222. err_irq:
  223. vfio_platform_regions_cleanup(vdev);
  224. return ret;
  225. }
  226. EXPORT_SYMBOL_GPL(vfio_platform_open_device);
  227. long vfio_platform_ioctl(struct vfio_device *core_vdev,
  228. unsigned int cmd, unsigned long arg)
  229. {
  230. struct vfio_platform_device *vdev =
  231. container_of(core_vdev, struct vfio_platform_device, vdev);
  232. unsigned long minsz;
  233. if (cmd == VFIO_DEVICE_GET_INFO) {
  234. struct vfio_device_info info;
  235. minsz = offsetofend(struct vfio_device_info, num_irqs);
  236. if (copy_from_user(&info, (void __user *)arg, minsz))
  237. return -EFAULT;
  238. if (info.argsz < minsz)
  239. return -EINVAL;
  240. if (vfio_platform_has_reset(vdev))
  241. vdev->flags |= VFIO_DEVICE_FLAGS_RESET;
  242. info.flags = vdev->flags;
  243. info.num_regions = vdev->num_regions;
  244. info.num_irqs = vdev->num_irqs;
  245. return copy_to_user((void __user *)arg, &info, minsz) ?
  246. -EFAULT : 0;
  247. } else if (cmd == VFIO_DEVICE_GET_REGION_INFO) {
  248. struct vfio_region_info info;
  249. minsz = offsetofend(struct vfio_region_info, offset);
  250. if (copy_from_user(&info, (void __user *)arg, minsz))
  251. return -EFAULT;
  252. if (info.argsz < minsz)
  253. return -EINVAL;
  254. if (info.index >= vdev->num_regions)
  255. return -EINVAL;
  256. /* map offset to the physical address */
  257. info.offset = VFIO_PLATFORM_INDEX_TO_OFFSET(info.index);
  258. info.size = vdev->regions[info.index].size;
  259. info.flags = vdev->regions[info.index].flags;
  260. return copy_to_user((void __user *)arg, &info, minsz) ?
  261. -EFAULT : 0;
  262. } else if (cmd == VFIO_DEVICE_GET_IRQ_INFO) {
  263. struct vfio_irq_info info;
  264. minsz = offsetofend(struct vfio_irq_info, count);
  265. if (copy_from_user(&info, (void __user *)arg, minsz))
  266. return -EFAULT;
  267. if (info.argsz < minsz)
  268. return -EINVAL;
  269. if (info.index >= vdev->num_irqs)
  270. return -EINVAL;
  271. info.flags = vdev->irqs[info.index].flags;
  272. info.count = vdev->irqs[info.index].count;
  273. return copy_to_user((void __user *)arg, &info, minsz) ?
  274. -EFAULT : 0;
  275. } else if (cmd == VFIO_DEVICE_SET_IRQS) {
  276. struct vfio_irq_set hdr;
  277. u8 *data = NULL;
  278. int ret = 0;
  279. size_t data_size = 0;
  280. minsz = offsetofend(struct vfio_irq_set, count);
  281. if (copy_from_user(&hdr, (void __user *)arg, minsz))
  282. return -EFAULT;
  283. ret = vfio_set_irqs_validate_and_prepare(&hdr, vdev->num_irqs,
  284. vdev->num_irqs, &data_size);
  285. if (ret)
  286. return ret;
  287. if (data_size) {
  288. data = memdup_user((void __user *)(arg + minsz),
  289. data_size);
  290. if (IS_ERR(data))
  291. return PTR_ERR(data);
  292. }
  293. mutex_lock(&vdev->igate);
  294. ret = vfio_platform_set_irqs_ioctl(vdev, hdr.flags, hdr.index,
  295. hdr.start, hdr.count, data);
  296. mutex_unlock(&vdev->igate);
  297. kfree(data);
  298. return ret;
  299. } else if (cmd == VFIO_DEVICE_RESET) {
  300. return vfio_platform_call_reset(vdev, NULL);
  301. }
  302. return -ENOTTY;
  303. }
  304. EXPORT_SYMBOL_GPL(vfio_platform_ioctl);
  305. static ssize_t vfio_platform_read_mmio(struct vfio_platform_region *reg,
  306. char __user *buf, size_t count,
  307. loff_t off)
  308. {
  309. unsigned int done = 0;
  310. if (off >= reg->size)
  311. return -EINVAL;
  312. count = min_t(size_t, count, reg->size - off);
  313. if (!reg->ioaddr) {
  314. reg->ioaddr =
  315. ioremap(reg->addr, reg->size);
  316. if (!reg->ioaddr)
  317. return -ENOMEM;
  318. }
  319. while (count) {
  320. size_t filled;
  321. if (count >= 4 && !(off % 4)) {
  322. u32 val;
  323. val = ioread32(reg->ioaddr + off);
  324. if (copy_to_user(buf, &val, 4))
  325. goto err;
  326. filled = 4;
  327. } else if (count >= 2 && !(off % 2)) {
  328. u16 val;
  329. val = ioread16(reg->ioaddr + off);
  330. if (copy_to_user(buf, &val, 2))
  331. goto err;
  332. filled = 2;
  333. } else {
  334. u8 val;
  335. val = ioread8(reg->ioaddr + off);
  336. if (copy_to_user(buf, &val, 1))
  337. goto err;
  338. filled = 1;
  339. }
  340. count -= filled;
  341. done += filled;
  342. off += filled;
  343. buf += filled;
  344. }
  345. return done;
  346. err:
  347. return -EFAULT;
  348. }
  349. ssize_t vfio_platform_read(struct vfio_device *core_vdev,
  350. char __user *buf, size_t count, loff_t *ppos)
  351. {
  352. struct vfio_platform_device *vdev =
  353. container_of(core_vdev, struct vfio_platform_device, vdev);
  354. unsigned int index = VFIO_PLATFORM_OFFSET_TO_INDEX(*ppos);
  355. loff_t off = *ppos & VFIO_PLATFORM_OFFSET_MASK;
  356. if (index >= vdev->num_regions)
  357. return -EINVAL;
  358. if (!(vdev->regions[index].flags & VFIO_REGION_INFO_FLAG_READ))
  359. return -EINVAL;
  360. if (vdev->regions[index].type & VFIO_PLATFORM_REGION_TYPE_MMIO)
  361. return vfio_platform_read_mmio(&vdev->regions[index],
  362. buf, count, off);
  363. else if (vdev->regions[index].type & VFIO_PLATFORM_REGION_TYPE_PIO)
  364. return -EINVAL; /* not implemented */
  365. return -EINVAL;
  366. }
  367. EXPORT_SYMBOL_GPL(vfio_platform_read);
  368. static ssize_t vfio_platform_write_mmio(struct vfio_platform_region *reg,
  369. const char __user *buf, size_t count,
  370. loff_t off)
  371. {
  372. unsigned int done = 0;
  373. if (off >= reg->size)
  374. return -EINVAL;
  375. count = min_t(size_t, count, reg->size - off);
  376. if (!reg->ioaddr) {
  377. reg->ioaddr =
  378. ioremap(reg->addr, reg->size);
  379. if (!reg->ioaddr)
  380. return -ENOMEM;
  381. }
  382. while (count) {
  383. size_t filled;
  384. if (count >= 4 && !(off % 4)) {
  385. u32 val;
  386. if (copy_from_user(&val, buf, 4))
  387. goto err;
  388. iowrite32(val, reg->ioaddr + off);
  389. filled = 4;
  390. } else if (count >= 2 && !(off % 2)) {
  391. u16 val;
  392. if (copy_from_user(&val, buf, 2))
  393. goto err;
  394. iowrite16(val, reg->ioaddr + off);
  395. filled = 2;
  396. } else {
  397. u8 val;
  398. if (copy_from_user(&val, buf, 1))
  399. goto err;
  400. iowrite8(val, reg->ioaddr + off);
  401. filled = 1;
  402. }
  403. count -= filled;
  404. done += filled;
  405. off += filled;
  406. buf += filled;
  407. }
  408. return done;
  409. err:
  410. return -EFAULT;
  411. }
  412. ssize_t vfio_platform_write(struct vfio_device *core_vdev, const char __user *buf,
  413. size_t count, loff_t *ppos)
  414. {
  415. struct vfio_platform_device *vdev =
  416. container_of(core_vdev, struct vfio_platform_device, vdev);
  417. unsigned int index = VFIO_PLATFORM_OFFSET_TO_INDEX(*ppos);
  418. loff_t off = *ppos & VFIO_PLATFORM_OFFSET_MASK;
  419. if (index >= vdev->num_regions)
  420. return -EINVAL;
  421. if (!(vdev->regions[index].flags & VFIO_REGION_INFO_FLAG_WRITE))
  422. return -EINVAL;
  423. if (vdev->regions[index].type & VFIO_PLATFORM_REGION_TYPE_MMIO)
  424. return vfio_platform_write_mmio(&vdev->regions[index],
  425. buf, count, off);
  426. else if (vdev->regions[index].type & VFIO_PLATFORM_REGION_TYPE_PIO)
  427. return -EINVAL; /* not implemented */
  428. return -EINVAL;
  429. }
  430. EXPORT_SYMBOL_GPL(vfio_platform_write);
  431. static int vfio_platform_mmap_mmio(struct vfio_platform_region region,
  432. struct vm_area_struct *vma)
  433. {
  434. u64 req_len, pgoff, req_start;
  435. req_len = vma->vm_end - vma->vm_start;
  436. pgoff = vma->vm_pgoff &
  437. ((1U << (VFIO_PLATFORM_OFFSET_SHIFT - PAGE_SHIFT)) - 1);
  438. req_start = pgoff << PAGE_SHIFT;
  439. if (region.size < PAGE_SIZE || req_start + req_len > region.size)
  440. return -EINVAL;
  441. vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
  442. vma->vm_pgoff = (region.addr >> PAGE_SHIFT) + pgoff;
  443. return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
  444. req_len, vma->vm_page_prot);
  445. }
  446. int vfio_platform_mmap(struct vfio_device *core_vdev, struct vm_area_struct *vma)
  447. {
  448. struct vfio_platform_device *vdev =
  449. container_of(core_vdev, struct vfio_platform_device, vdev);
  450. unsigned int index;
  451. index = vma->vm_pgoff >> (VFIO_PLATFORM_OFFSET_SHIFT - PAGE_SHIFT);
  452. if (vma->vm_end < vma->vm_start)
  453. return -EINVAL;
  454. if (!(vma->vm_flags & VM_SHARED))
  455. return -EINVAL;
  456. if (index >= vdev->num_regions)
  457. return -EINVAL;
  458. if (vma->vm_start & ~PAGE_MASK)
  459. return -EINVAL;
  460. if (vma->vm_end & ~PAGE_MASK)
  461. return -EINVAL;
  462. if (!(vdev->regions[index].flags & VFIO_REGION_INFO_FLAG_MMAP))
  463. return -EINVAL;
  464. if (!(vdev->regions[index].flags & VFIO_REGION_INFO_FLAG_READ)
  465. && (vma->vm_flags & VM_READ))
  466. return -EINVAL;
  467. if (!(vdev->regions[index].flags & VFIO_REGION_INFO_FLAG_WRITE)
  468. && (vma->vm_flags & VM_WRITE))
  469. return -EINVAL;
  470. vma->vm_private_data = vdev;
  471. if (vdev->regions[index].type & VFIO_PLATFORM_REGION_TYPE_MMIO)
  472. return vfio_platform_mmap_mmio(vdev->regions[index], vma);
  473. else if (vdev->regions[index].type & VFIO_PLATFORM_REGION_TYPE_PIO)
  474. return -EINVAL; /* not implemented */
  475. return -EINVAL;
  476. }
  477. EXPORT_SYMBOL_GPL(vfio_platform_mmap);
  478. static int vfio_platform_of_probe(struct vfio_platform_device *vdev,
  479. struct device *dev)
  480. {
  481. int ret;
  482. ret = device_property_read_string(dev, "compatible",
  483. &vdev->compat);
  484. if (ret)
  485. dev_err(dev, "Cannot retrieve compat for %s\n", vdev->name);
  486. return ret;
  487. }
  488. /*
  489. * There can be two kernel build combinations. One build where
  490. * ACPI is not selected in Kconfig and another one with the ACPI Kconfig.
  491. *
  492. * In the first case, vfio_platform_acpi_probe will return since
  493. * acpi_disabled is 1. DT user will not see any kind of messages from
  494. * ACPI.
  495. *
  496. * In the second case, both DT and ACPI is compiled in but the system is
  497. * booting with any of these combinations.
  498. *
  499. * If the firmware is DT type, then acpi_disabled is 1. The ACPI probe routine
  500. * terminates immediately without any messages.
  501. *
  502. * If the firmware is ACPI type, then acpi_disabled is 0. All other checks are
  503. * valid checks. We cannot claim that this system is DT.
  504. */
  505. int vfio_platform_init_common(struct vfio_platform_device *vdev)
  506. {
  507. int ret;
  508. struct device *dev = vdev->vdev.dev;
  509. ret = vfio_platform_acpi_probe(vdev, dev);
  510. if (ret)
  511. ret = vfio_platform_of_probe(vdev, dev);
  512. if (ret)
  513. return ret;
  514. vdev->device = dev;
  515. mutex_init(&vdev->igate);
  516. ret = vfio_platform_get_reset(vdev);
  517. if (ret && vdev->reset_required) {
  518. dev_err(dev, "No reset function found for device %s\n",
  519. vdev->name);
  520. return ret;
  521. }
  522. return 0;
  523. }
  524. EXPORT_SYMBOL_GPL(vfio_platform_init_common);
  525. void vfio_platform_release_common(struct vfio_platform_device *vdev)
  526. {
  527. vfio_platform_put_reset(vdev);
  528. }
  529. EXPORT_SYMBOL_GPL(vfio_platform_release_common);
  530. void __vfio_platform_register_reset(struct vfio_platform_reset_node *node)
  531. {
  532. mutex_lock(&driver_lock);
  533. list_add(&node->link, &reset_list);
  534. mutex_unlock(&driver_lock);
  535. }
  536. EXPORT_SYMBOL_GPL(__vfio_platform_register_reset);
  537. void vfio_platform_unregister_reset(const char *compat,
  538. vfio_platform_reset_fn_t fn)
  539. {
  540. struct vfio_platform_reset_node *iter, *temp;
  541. mutex_lock(&driver_lock);
  542. list_for_each_entry_safe(iter, temp, &reset_list, link) {
  543. if (!strcmp(iter->compat, compat) && (iter->of_reset == fn)) {
  544. list_del(&iter->link);
  545. break;
  546. }
  547. }
  548. mutex_unlock(&driver_lock);
  549. }
  550. EXPORT_SYMBOL_GPL(vfio_platform_unregister_reset);
  551. MODULE_VERSION(DRIVER_VERSION);
  552. MODULE_LICENSE("GPL v2");
  553. MODULE_AUTHOR(DRIVER_AUTHOR);
  554. MODULE_DESCRIPTION(DRIVER_DESC);