device_cdev.c 5.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2023 Intel Corporation.
  4. */
  5. #include <linux/vfio.h>
  6. #include <linux/iommufd.h>
  7. #include "vfio.h"
  8. static dev_t device_devt;
  9. void vfio_init_device_cdev(struct vfio_device *device)
  10. {
  11. device->device.devt = MKDEV(MAJOR(device_devt), device->index);
  12. cdev_init(&device->cdev, &vfio_device_fops);
  13. device->cdev.owner = THIS_MODULE;
  14. }
  15. /*
  16. * device access via the fd opened by this function is blocked until
  17. * .open_device() is called successfully during BIND_IOMMUFD.
  18. */
  19. int vfio_device_fops_cdev_open(struct inode *inode, struct file *filep)
  20. {
  21. struct vfio_device *device = container_of(inode->i_cdev,
  22. struct vfio_device, cdev);
  23. struct vfio_device_file *df;
  24. int ret;
  25. /* Paired with the put in vfio_device_fops_release() */
  26. if (!vfio_device_try_get_registration(device))
  27. return -ENODEV;
  28. df = vfio_allocate_device_file(device);
  29. if (IS_ERR(df)) {
  30. ret = PTR_ERR(df);
  31. goto err_put_registration;
  32. }
  33. filep->private_data = df;
  34. /*
  35. * Use the pseudo fs inode on the device to link all mmaps
  36. * to the same address space, allowing us to unmap all vmas
  37. * associated to this device using unmap_mapping_range().
  38. */
  39. filep->f_mapping = device->inode->i_mapping;
  40. return 0;
  41. err_put_registration:
  42. vfio_device_put_registration(device);
  43. return ret;
  44. }
  45. static void vfio_df_get_kvm_safe(struct vfio_device_file *df)
  46. {
  47. spin_lock(&df->kvm_ref_lock);
  48. vfio_device_get_kvm_safe(df->device, df->kvm);
  49. spin_unlock(&df->kvm_ref_lock);
  50. }
  51. long vfio_df_ioctl_bind_iommufd(struct vfio_device_file *df,
  52. struct vfio_device_bind_iommufd __user *arg)
  53. {
  54. struct vfio_device *device = df->device;
  55. struct vfio_device_bind_iommufd bind;
  56. unsigned long minsz;
  57. int ret;
  58. static_assert(__same_type(arg->out_devid, df->devid));
  59. minsz = offsetofend(struct vfio_device_bind_iommufd, out_devid);
  60. if (copy_from_user(&bind, arg, minsz))
  61. return -EFAULT;
  62. if (bind.argsz < minsz || bind.flags || bind.iommufd < 0)
  63. return -EINVAL;
  64. /* BIND_IOMMUFD only allowed for cdev fds */
  65. if (df->group)
  66. return -EINVAL;
  67. ret = vfio_device_block_group(device);
  68. if (ret)
  69. return ret;
  70. mutex_lock(&device->dev_set->lock);
  71. /* one device cannot be bound twice */
  72. if (df->access_granted) {
  73. ret = -EINVAL;
  74. goto out_unlock;
  75. }
  76. df->iommufd = iommufd_ctx_from_fd(bind.iommufd);
  77. if (IS_ERR(df->iommufd)) {
  78. ret = PTR_ERR(df->iommufd);
  79. df->iommufd = NULL;
  80. goto out_unlock;
  81. }
  82. /*
  83. * Before the device open, get the KVM pointer currently
  84. * associated with the device file (if there is) and obtain
  85. * a reference. This reference is held until device closed.
  86. * Save the pointer in the device for use by drivers.
  87. */
  88. vfio_df_get_kvm_safe(df);
  89. ret = vfio_df_open(df);
  90. if (ret)
  91. goto out_put_kvm;
  92. ret = copy_to_user(&arg->out_devid, &df->devid,
  93. sizeof(df->devid)) ? -EFAULT : 0;
  94. if (ret)
  95. goto out_close_device;
  96. device->cdev_opened = true;
  97. /*
  98. * Paired with smp_load_acquire() in vfio_device_fops::ioctl/
  99. * read/write/mmap
  100. */
  101. smp_store_release(&df->access_granted, true);
  102. mutex_unlock(&device->dev_set->lock);
  103. return 0;
  104. out_close_device:
  105. vfio_df_close(df);
  106. out_put_kvm:
  107. vfio_device_put_kvm(device);
  108. iommufd_ctx_put(df->iommufd);
  109. df->iommufd = NULL;
  110. out_unlock:
  111. mutex_unlock(&device->dev_set->lock);
  112. vfio_device_unblock_group(device);
  113. return ret;
  114. }
  115. void vfio_df_unbind_iommufd(struct vfio_device_file *df)
  116. {
  117. struct vfio_device *device = df->device;
  118. /*
  119. * In the time of close, there is no contention with another one
  120. * changing this flag. So read df->access_granted without lock
  121. * and no smp_load_acquire() is ok.
  122. */
  123. if (!df->access_granted)
  124. return;
  125. mutex_lock(&device->dev_set->lock);
  126. vfio_df_close(df);
  127. vfio_device_put_kvm(device);
  128. iommufd_ctx_put(df->iommufd);
  129. device->cdev_opened = false;
  130. mutex_unlock(&device->dev_set->lock);
  131. vfio_device_unblock_group(device);
  132. }
  133. int vfio_df_ioctl_attach_pt(struct vfio_device_file *df,
  134. struct vfio_device_attach_iommufd_pt __user *arg)
  135. {
  136. struct vfio_device *device = df->device;
  137. struct vfio_device_attach_iommufd_pt attach;
  138. unsigned long minsz;
  139. int ret;
  140. minsz = offsetofend(struct vfio_device_attach_iommufd_pt, pt_id);
  141. if (copy_from_user(&attach, arg, minsz))
  142. return -EFAULT;
  143. if (attach.argsz < minsz || attach.flags)
  144. return -EINVAL;
  145. mutex_lock(&device->dev_set->lock);
  146. ret = device->ops->attach_ioas(device, &attach.pt_id);
  147. if (ret)
  148. goto out_unlock;
  149. if (copy_to_user(&arg->pt_id, &attach.pt_id, sizeof(attach.pt_id))) {
  150. ret = -EFAULT;
  151. goto out_detach;
  152. }
  153. mutex_unlock(&device->dev_set->lock);
  154. return 0;
  155. out_detach:
  156. device->ops->detach_ioas(device);
  157. out_unlock:
  158. mutex_unlock(&device->dev_set->lock);
  159. return ret;
  160. }
  161. int vfio_df_ioctl_detach_pt(struct vfio_device_file *df,
  162. struct vfio_device_detach_iommufd_pt __user *arg)
  163. {
  164. struct vfio_device *device = df->device;
  165. struct vfio_device_detach_iommufd_pt detach;
  166. unsigned long minsz;
  167. minsz = offsetofend(struct vfio_device_detach_iommufd_pt, flags);
  168. if (copy_from_user(&detach, arg, minsz))
  169. return -EFAULT;
  170. if (detach.argsz < minsz || detach.flags)
  171. return -EINVAL;
  172. mutex_lock(&device->dev_set->lock);
  173. device->ops->detach_ioas(device);
  174. mutex_unlock(&device->dev_set->lock);
  175. return 0;
  176. }
  177. static char *vfio_device_devnode(const struct device *dev, umode_t *mode)
  178. {
  179. return kasprintf(GFP_KERNEL, "vfio/devices/%s", dev_name(dev));
  180. }
  181. int vfio_cdev_init(struct class *device_class)
  182. {
  183. device_class->devnode = vfio_device_devnode;
  184. return alloc_chrdev_region(&device_devt, 0,
  185. MINORMASK + 1, "vfio-dev");
  186. }
  187. void vfio_cdev_cleanup(void)
  188. {
  189. unregister_chrdev_region(device_devt, MINORMASK + 1);
  190. }