iommufd.c 6.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES
  4. */
  5. #include <linux/vfio.h>
  6. #include <linux/iommufd.h>
  7. #include "vfio.h"
  8. MODULE_IMPORT_NS(IOMMUFD);
  9. MODULE_IMPORT_NS(IOMMUFD_VFIO);
  10. bool vfio_iommufd_device_has_compat_ioas(struct vfio_device *vdev,
  11. struct iommufd_ctx *ictx)
  12. {
  13. u32 ioas_id;
  14. return !iommufd_vfio_compat_ioas_get_id(ictx, &ioas_id);
  15. }
  16. int vfio_df_iommufd_bind(struct vfio_device_file *df)
  17. {
  18. struct vfio_device *vdev = df->device;
  19. struct iommufd_ctx *ictx = df->iommufd;
  20. lockdep_assert_held(&vdev->dev_set->lock);
  21. return vdev->ops->bind_iommufd(vdev, ictx, &df->devid);
  22. }
  23. int vfio_iommufd_compat_attach_ioas(struct vfio_device *vdev,
  24. struct iommufd_ctx *ictx)
  25. {
  26. u32 ioas_id;
  27. int ret;
  28. lockdep_assert_held(&vdev->dev_set->lock);
  29. /* compat noiommu does not need to do ioas attach */
  30. if (vfio_device_is_noiommu(vdev))
  31. return 0;
  32. ret = iommufd_vfio_compat_ioas_get_id(ictx, &ioas_id);
  33. if (ret)
  34. return ret;
  35. /* The legacy path has no way to return the selected pt_id */
  36. return vdev->ops->attach_ioas(vdev, &ioas_id);
  37. }
  38. void vfio_df_iommufd_unbind(struct vfio_device_file *df)
  39. {
  40. struct vfio_device *vdev = df->device;
  41. lockdep_assert_held(&vdev->dev_set->lock);
  42. if (vfio_device_is_noiommu(vdev))
  43. return;
  44. if (vdev->ops->unbind_iommufd)
  45. vdev->ops->unbind_iommufd(vdev);
  46. }
  47. struct iommufd_ctx *vfio_iommufd_device_ictx(struct vfio_device *vdev)
  48. {
  49. if (vdev->iommufd_device)
  50. return iommufd_device_to_ictx(vdev->iommufd_device);
  51. return NULL;
  52. }
  53. EXPORT_SYMBOL_GPL(vfio_iommufd_device_ictx);
  54. static int vfio_iommufd_device_id(struct vfio_device *vdev)
  55. {
  56. if (vdev->iommufd_device)
  57. return iommufd_device_to_id(vdev->iommufd_device);
  58. return -EINVAL;
  59. }
  60. /*
  61. * Return devid for a device.
  62. * valid ID for the device that is owned by the ictx
  63. * -ENOENT = device is owned but there is no ID
  64. * -ENODEV or other error = device is not owned
  65. */
  66. int vfio_iommufd_get_dev_id(struct vfio_device *vdev, struct iommufd_ctx *ictx)
  67. {
  68. struct iommu_group *group;
  69. int devid;
  70. if (vfio_iommufd_device_ictx(vdev) == ictx)
  71. return vfio_iommufd_device_id(vdev);
  72. group = iommu_group_get(vdev->dev);
  73. if (!group)
  74. return -ENODEV;
  75. if (iommufd_ctx_has_group(ictx, group))
  76. devid = -ENOENT;
  77. else
  78. devid = -ENODEV;
  79. iommu_group_put(group);
  80. return devid;
  81. }
  82. EXPORT_SYMBOL_GPL(vfio_iommufd_get_dev_id);
  83. /*
  84. * The physical standard ops mean that the iommufd_device is bound to the
  85. * physical device vdev->dev that was provided to vfio_init_group_dev(). Drivers
  86. * using this ops set should call vfio_register_group_dev()
  87. */
  88. int vfio_iommufd_physical_bind(struct vfio_device *vdev,
  89. struct iommufd_ctx *ictx, u32 *out_device_id)
  90. {
  91. struct iommufd_device *idev;
  92. idev = iommufd_device_bind(ictx, vdev->dev, out_device_id);
  93. if (IS_ERR(idev))
  94. return PTR_ERR(idev);
  95. vdev->iommufd_device = idev;
  96. return 0;
  97. }
  98. EXPORT_SYMBOL_GPL(vfio_iommufd_physical_bind);
  99. void vfio_iommufd_physical_unbind(struct vfio_device *vdev)
  100. {
  101. lockdep_assert_held(&vdev->dev_set->lock);
  102. if (vdev->iommufd_attached) {
  103. iommufd_device_detach(vdev->iommufd_device);
  104. vdev->iommufd_attached = false;
  105. }
  106. iommufd_device_unbind(vdev->iommufd_device);
  107. vdev->iommufd_device = NULL;
  108. }
  109. EXPORT_SYMBOL_GPL(vfio_iommufd_physical_unbind);
  110. int vfio_iommufd_physical_attach_ioas(struct vfio_device *vdev, u32 *pt_id)
  111. {
  112. int rc;
  113. lockdep_assert_held(&vdev->dev_set->lock);
  114. if (WARN_ON(!vdev->iommufd_device))
  115. return -EINVAL;
  116. if (vdev->iommufd_attached)
  117. rc = iommufd_device_replace(vdev->iommufd_device, pt_id);
  118. else
  119. rc = iommufd_device_attach(vdev->iommufd_device, pt_id);
  120. if (rc)
  121. return rc;
  122. vdev->iommufd_attached = true;
  123. return 0;
  124. }
  125. EXPORT_SYMBOL_GPL(vfio_iommufd_physical_attach_ioas);
  126. void vfio_iommufd_physical_detach_ioas(struct vfio_device *vdev)
  127. {
  128. lockdep_assert_held(&vdev->dev_set->lock);
  129. if (WARN_ON(!vdev->iommufd_device) || !vdev->iommufd_attached)
  130. return;
  131. iommufd_device_detach(vdev->iommufd_device);
  132. vdev->iommufd_attached = false;
  133. }
  134. EXPORT_SYMBOL_GPL(vfio_iommufd_physical_detach_ioas);
  135. /*
  136. * The emulated standard ops mean that vfio_device is going to use the
  137. * "mdev path" and will call vfio_pin_pages()/vfio_dma_rw(). Drivers using this
  138. * ops set should call vfio_register_emulated_iommu_dev(). Drivers that do
  139. * not call vfio_pin_pages()/vfio_dma_rw() have no need to provide dma_unmap.
  140. */
  141. static void vfio_emulated_unmap(void *data, unsigned long iova,
  142. unsigned long length)
  143. {
  144. struct vfio_device *vdev = data;
  145. if (vdev->ops->dma_unmap)
  146. vdev->ops->dma_unmap(vdev, iova, length);
  147. }
  148. static const struct iommufd_access_ops vfio_user_ops = {
  149. .needs_pin_pages = 1,
  150. .unmap = vfio_emulated_unmap,
  151. };
  152. int vfio_iommufd_emulated_bind(struct vfio_device *vdev,
  153. struct iommufd_ctx *ictx, u32 *out_device_id)
  154. {
  155. struct iommufd_access *user;
  156. lockdep_assert_held(&vdev->dev_set->lock);
  157. user = iommufd_access_create(ictx, &vfio_user_ops, vdev, out_device_id);
  158. if (IS_ERR(user))
  159. return PTR_ERR(user);
  160. vdev->iommufd_access = user;
  161. return 0;
  162. }
  163. EXPORT_SYMBOL_GPL(vfio_iommufd_emulated_bind);
  164. void vfio_iommufd_emulated_unbind(struct vfio_device *vdev)
  165. {
  166. lockdep_assert_held(&vdev->dev_set->lock);
  167. if (vdev->iommufd_access) {
  168. iommufd_access_destroy(vdev->iommufd_access);
  169. vdev->iommufd_attached = false;
  170. vdev->iommufd_access = NULL;
  171. }
  172. }
  173. EXPORT_SYMBOL_GPL(vfio_iommufd_emulated_unbind);
  174. int vfio_iommufd_emulated_attach_ioas(struct vfio_device *vdev, u32 *pt_id)
  175. {
  176. int rc;
  177. lockdep_assert_held(&vdev->dev_set->lock);
  178. if (vdev->iommufd_attached)
  179. rc = iommufd_access_replace(vdev->iommufd_access, *pt_id);
  180. else
  181. rc = iommufd_access_attach(vdev->iommufd_access, *pt_id);
  182. if (rc)
  183. return rc;
  184. vdev->iommufd_attached = true;
  185. return 0;
  186. }
  187. EXPORT_SYMBOL_GPL(vfio_iommufd_emulated_attach_ioas);
  188. void vfio_iommufd_emulated_detach_ioas(struct vfio_device *vdev)
  189. {
  190. lockdep_assert_held(&vdev->dev_set->lock);
  191. if (WARN_ON(!vdev->iommufd_access) ||
  192. !vdev->iommufd_attached)
  193. return;
  194. iommufd_access_detach(vdev->iommufd_access);
  195. vdev->iommufd_attached = false;
  196. }
  197. EXPORT_SYMBOL_GPL(vfio_iommufd_emulated_detach_ioas);