vfio.h 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465
  1. /* SPDX-License-Identifier: GPL-2.0-only */
  2. /*
  3. * Copyright (C) 2012 Red Hat, Inc. All rights reserved.
  4. * Author: Alex Williamson <alex.williamson@redhat.com>
  5. */
  6. #ifndef __VFIO_VFIO_H__
  7. #define __VFIO_VFIO_H__
  8. #include <linux/file.h>
  9. #include <linux/device.h>
  10. #include <linux/cdev.h>
  11. #include <linux/module.h>
  12. #include <linux/vfio.h>
  13. struct iommufd_ctx;
  14. struct iommu_group;
  15. struct vfio_container;
  16. struct vfio_device_file {
  17. struct vfio_device *device;
  18. struct vfio_group *group;
  19. u8 access_granted;
  20. u32 devid; /* only valid when iommufd is valid */
  21. spinlock_t kvm_ref_lock; /* protect kvm field */
  22. struct kvm *kvm;
  23. struct iommufd_ctx *iommufd; /* protected by struct vfio_device_set::lock */
  24. };
  25. void vfio_device_put_registration(struct vfio_device *device);
  26. bool vfio_device_try_get_registration(struct vfio_device *device);
  27. int vfio_df_open(struct vfio_device_file *df);
  28. void vfio_df_close(struct vfio_device_file *df);
  29. struct vfio_device_file *
  30. vfio_allocate_device_file(struct vfio_device *device);
  31. extern const struct file_operations vfio_device_fops;
  32. #ifdef CONFIG_VFIO_NOIOMMU
  33. extern bool vfio_noiommu __read_mostly;
  34. #else
  35. enum { vfio_noiommu = false };
  36. #endif
  37. enum vfio_group_type {
  38. /*
  39. * Physical device with IOMMU backing.
  40. */
  41. VFIO_IOMMU,
  42. /*
  43. * Virtual device without IOMMU backing. The VFIO core fakes up an
  44. * iommu_group as the iommu_group sysfs interface is part of the
  45. * userspace ABI. The user of these devices must not be able to
  46. * directly trigger unmediated DMA.
  47. */
  48. VFIO_EMULATED_IOMMU,
  49. /*
  50. * Physical device without IOMMU backing. The VFIO core fakes up an
  51. * iommu_group as the iommu_group sysfs interface is part of the
  52. * userspace ABI. Users can trigger unmediated DMA by the device,
  53. * usage is highly dangerous, requires an explicit opt-in and will
  54. * taint the kernel.
  55. */
  56. VFIO_NO_IOMMU,
  57. };
  58. #if IS_ENABLED(CONFIG_VFIO_GROUP)
  59. struct vfio_group {
  60. struct device dev;
  61. struct cdev cdev;
  62. /*
  63. * When drivers is non-zero a driver is attached to the struct device
  64. * that provided the iommu_group and thus the iommu_group is a valid
  65. * pointer. When drivers is 0 the driver is being detached. Once users
  66. * reaches 0 then the iommu_group is invalid.
  67. */
  68. refcount_t drivers;
  69. unsigned int container_users;
  70. struct iommu_group *iommu_group;
  71. struct vfio_container *container;
  72. struct list_head device_list;
  73. struct mutex device_lock;
  74. struct list_head vfio_next;
  75. #if IS_ENABLED(CONFIG_VFIO_CONTAINER)
  76. struct list_head container_next;
  77. #endif
  78. enum vfio_group_type type;
  79. struct mutex group_lock;
  80. struct kvm *kvm;
  81. struct file *opened_file;
  82. struct blocking_notifier_head notifier;
  83. struct iommufd_ctx *iommufd;
  84. spinlock_t kvm_ref_lock;
  85. unsigned int cdev_device_open_cnt;
  86. };
  87. int vfio_device_block_group(struct vfio_device *device);
  88. void vfio_device_unblock_group(struct vfio_device *device);
  89. int vfio_device_set_group(struct vfio_device *device,
  90. enum vfio_group_type type);
  91. void vfio_device_remove_group(struct vfio_device *device);
  92. void vfio_device_group_register(struct vfio_device *device);
  93. void vfio_device_group_unregister(struct vfio_device *device);
  94. int vfio_device_group_use_iommu(struct vfio_device *device);
  95. void vfio_device_group_unuse_iommu(struct vfio_device *device);
  96. void vfio_df_group_close(struct vfio_device_file *df);
  97. struct vfio_group *vfio_group_from_file(struct file *file);
  98. bool vfio_group_enforced_coherent(struct vfio_group *group);
  99. void vfio_group_set_kvm(struct vfio_group *group, struct kvm *kvm);
  100. bool vfio_device_has_container(struct vfio_device *device);
  101. int __init vfio_group_init(void);
  102. void vfio_group_cleanup(void);
  103. static inline bool vfio_device_is_noiommu(struct vfio_device *vdev)
  104. {
  105. return IS_ENABLED(CONFIG_VFIO_NOIOMMU) &&
  106. vdev->group->type == VFIO_NO_IOMMU;
  107. }
  108. #else
  109. struct vfio_group;
  110. static inline int vfio_device_block_group(struct vfio_device *device)
  111. {
  112. return 0;
  113. }
  114. static inline void vfio_device_unblock_group(struct vfio_device *device)
  115. {
  116. }
  117. static inline int vfio_device_set_group(struct vfio_device *device,
  118. enum vfio_group_type type)
  119. {
  120. return 0;
  121. }
  122. static inline void vfio_device_remove_group(struct vfio_device *device)
  123. {
  124. }
  125. static inline void vfio_device_group_register(struct vfio_device *device)
  126. {
  127. }
  128. static inline void vfio_device_group_unregister(struct vfio_device *device)
  129. {
  130. }
  131. static inline int vfio_device_group_use_iommu(struct vfio_device *device)
  132. {
  133. return -EOPNOTSUPP;
  134. }
  135. static inline void vfio_device_group_unuse_iommu(struct vfio_device *device)
  136. {
  137. }
  138. static inline void vfio_df_group_close(struct vfio_device_file *df)
  139. {
  140. }
  141. static inline struct vfio_group *vfio_group_from_file(struct file *file)
  142. {
  143. return NULL;
  144. }
  145. static inline bool vfio_group_enforced_coherent(struct vfio_group *group)
  146. {
  147. return true;
  148. }
  149. static inline void vfio_group_set_kvm(struct vfio_group *group, struct kvm *kvm)
  150. {
  151. }
  152. static inline bool vfio_device_has_container(struct vfio_device *device)
  153. {
  154. return false;
  155. }
  156. static inline int __init vfio_group_init(void)
  157. {
  158. return 0;
  159. }
  160. static inline void vfio_group_cleanup(void)
  161. {
  162. }
  163. static inline bool vfio_device_is_noiommu(struct vfio_device *vdev)
  164. {
  165. return false;
  166. }
  167. #endif /* CONFIG_VFIO_GROUP */
  168. #if IS_ENABLED(CONFIG_VFIO_CONTAINER)
  169. /**
  170. * struct vfio_iommu_driver_ops - VFIO IOMMU driver callbacks
  171. */
  172. struct vfio_iommu_driver_ops {
  173. char *name;
  174. struct module *owner;
  175. void *(*open)(unsigned long arg);
  176. void (*release)(void *iommu_data);
  177. long (*ioctl)(void *iommu_data, unsigned int cmd,
  178. unsigned long arg);
  179. int (*attach_group)(void *iommu_data,
  180. struct iommu_group *group,
  181. enum vfio_group_type);
  182. void (*detach_group)(void *iommu_data,
  183. struct iommu_group *group);
  184. int (*pin_pages)(void *iommu_data,
  185. struct iommu_group *group,
  186. dma_addr_t user_iova,
  187. int npage, int prot,
  188. struct page **pages);
  189. void (*unpin_pages)(void *iommu_data,
  190. dma_addr_t user_iova, int npage);
  191. void (*register_device)(void *iommu_data,
  192. struct vfio_device *vdev);
  193. void (*unregister_device)(void *iommu_data,
  194. struct vfio_device *vdev);
  195. int (*dma_rw)(void *iommu_data, dma_addr_t user_iova,
  196. void *data, size_t count, bool write);
  197. struct iommu_domain *(*group_iommu_domain)(void *iommu_data,
  198. struct iommu_group *group);
  199. };
  200. struct vfio_iommu_driver {
  201. const struct vfio_iommu_driver_ops *ops;
  202. struct list_head vfio_next;
  203. };
  204. int vfio_register_iommu_driver(const struct vfio_iommu_driver_ops *ops);
  205. void vfio_unregister_iommu_driver(const struct vfio_iommu_driver_ops *ops);
  206. struct vfio_container *vfio_container_from_file(struct file *filep);
  207. int vfio_group_use_container(struct vfio_group *group);
  208. void vfio_group_unuse_container(struct vfio_group *group);
  209. int vfio_container_attach_group(struct vfio_container *container,
  210. struct vfio_group *group);
  211. void vfio_group_detach_container(struct vfio_group *group);
  212. void vfio_device_container_register(struct vfio_device *device);
  213. void vfio_device_container_unregister(struct vfio_device *device);
  214. int vfio_device_container_pin_pages(struct vfio_device *device,
  215. dma_addr_t iova, int npage,
  216. int prot, struct page **pages);
  217. void vfio_device_container_unpin_pages(struct vfio_device *device,
  218. dma_addr_t iova, int npage);
  219. int vfio_device_container_dma_rw(struct vfio_device *device,
  220. dma_addr_t iova, void *data,
  221. size_t len, bool write);
  222. int __init vfio_container_init(void);
  223. void vfio_container_cleanup(void);
  224. #else
  225. static inline struct vfio_container *
  226. vfio_container_from_file(struct file *filep)
  227. {
  228. return NULL;
  229. }
  230. static inline int vfio_group_use_container(struct vfio_group *group)
  231. {
  232. return -EOPNOTSUPP;
  233. }
  234. static inline void vfio_group_unuse_container(struct vfio_group *group)
  235. {
  236. }
  237. static inline int vfio_container_attach_group(struct vfio_container *container,
  238. struct vfio_group *group)
  239. {
  240. return -EOPNOTSUPP;
  241. }
  242. static inline void vfio_group_detach_container(struct vfio_group *group)
  243. {
  244. }
  245. static inline void vfio_device_container_register(struct vfio_device *device)
  246. {
  247. }
  248. static inline void vfio_device_container_unregister(struct vfio_device *device)
  249. {
  250. }
  251. static inline int vfio_device_container_pin_pages(struct vfio_device *device,
  252. dma_addr_t iova, int npage,
  253. int prot, struct page **pages)
  254. {
  255. return -EOPNOTSUPP;
  256. }
  257. static inline void vfio_device_container_unpin_pages(struct vfio_device *device,
  258. dma_addr_t iova, int npage)
  259. {
  260. }
  261. static inline int vfio_device_container_dma_rw(struct vfio_device *device,
  262. dma_addr_t iova, void *data,
  263. size_t len, bool write)
  264. {
  265. return -EOPNOTSUPP;
  266. }
  267. static inline int vfio_container_init(void)
  268. {
  269. return 0;
  270. }
  271. static inline void vfio_container_cleanup(void)
  272. {
  273. }
  274. #endif
  275. #if IS_ENABLED(CONFIG_IOMMUFD)
  276. bool vfio_iommufd_device_has_compat_ioas(struct vfio_device *vdev,
  277. struct iommufd_ctx *ictx);
  278. int vfio_df_iommufd_bind(struct vfio_device_file *df);
  279. void vfio_df_iommufd_unbind(struct vfio_device_file *df);
  280. int vfio_iommufd_compat_attach_ioas(struct vfio_device *device,
  281. struct iommufd_ctx *ictx);
  282. #else
  283. static inline bool
  284. vfio_iommufd_device_has_compat_ioas(struct vfio_device *vdev,
  285. struct iommufd_ctx *ictx)
  286. {
  287. return false;
  288. }
  289. static inline int vfio_df_iommufd_bind(struct vfio_device_file *fd)
  290. {
  291. return -EOPNOTSUPP;
  292. }
  293. static inline void vfio_df_iommufd_unbind(struct vfio_device_file *df)
  294. {
  295. }
  296. static inline int
  297. vfio_iommufd_compat_attach_ioas(struct vfio_device *device,
  298. struct iommufd_ctx *ictx)
  299. {
  300. return -EOPNOTSUPP;
  301. }
  302. #endif
  303. int vfio_df_ioctl_attach_pt(struct vfio_device_file *df,
  304. struct vfio_device_attach_iommufd_pt __user *arg);
  305. int vfio_df_ioctl_detach_pt(struct vfio_device_file *df,
  306. struct vfio_device_detach_iommufd_pt __user *arg);
  307. #if IS_ENABLED(CONFIG_VFIO_DEVICE_CDEV)
  308. void vfio_init_device_cdev(struct vfio_device *device);
  309. static inline int vfio_device_add(struct vfio_device *device)
  310. {
  311. /* cdev does not support noiommu device */
  312. if (vfio_device_is_noiommu(device))
  313. return device_add(&device->device);
  314. vfio_init_device_cdev(device);
  315. return cdev_device_add(&device->cdev, &device->device);
  316. }
  317. static inline void vfio_device_del(struct vfio_device *device)
  318. {
  319. if (vfio_device_is_noiommu(device))
  320. device_del(&device->device);
  321. else
  322. cdev_device_del(&device->cdev, &device->device);
  323. }
  324. int vfio_device_fops_cdev_open(struct inode *inode, struct file *filep);
  325. long vfio_df_ioctl_bind_iommufd(struct vfio_device_file *df,
  326. struct vfio_device_bind_iommufd __user *arg);
  327. void vfio_df_unbind_iommufd(struct vfio_device_file *df);
  328. int vfio_cdev_init(struct class *device_class);
  329. void vfio_cdev_cleanup(void);
  330. #else
  331. static inline void vfio_init_device_cdev(struct vfio_device *device)
  332. {
  333. }
  334. static inline int vfio_device_add(struct vfio_device *device)
  335. {
  336. return device_add(&device->device);
  337. }
  338. static inline void vfio_device_del(struct vfio_device *device)
  339. {
  340. device_del(&device->device);
  341. }
  342. static inline int vfio_device_fops_cdev_open(struct inode *inode,
  343. struct file *filep)
  344. {
  345. return 0;
  346. }
  347. static inline long vfio_df_ioctl_bind_iommufd(struct vfio_device_file *df,
  348. struct vfio_device_bind_iommufd __user *arg)
  349. {
  350. return -ENOTTY;
  351. }
  352. static inline void vfio_df_unbind_iommufd(struct vfio_device_file *df)
  353. {
  354. }
  355. static inline int vfio_cdev_init(struct class *device_class)
  356. {
  357. return 0;
  358. }
  359. static inline void vfio_cdev_cleanup(void)
  360. {
  361. }
  362. #endif /* CONFIG_VFIO_DEVICE_CDEV */
  363. #if IS_ENABLED(CONFIG_VFIO_VIRQFD)
  364. int __init vfio_virqfd_init(void);
  365. void vfio_virqfd_exit(void);
  366. #else
  367. static inline int __init vfio_virqfd_init(void)
  368. {
  369. return 0;
  370. }
  371. static inline void vfio_virqfd_exit(void)
  372. {
  373. }
  374. #endif
  375. #if IS_ENABLED(CONFIG_KVM)
  376. void vfio_device_get_kvm_safe(struct vfio_device *device, struct kvm *kvm);
  377. void vfio_device_put_kvm(struct vfio_device *device);
  378. #else
  379. static inline void vfio_device_get_kvm_safe(struct vfio_device *device,
  380. struct kvm *kvm)
  381. {
  382. }
  383. static inline void vfio_device_put_kvm(struct vfio_device *device)
  384. {
  385. }
  386. #endif
  387. #ifdef CONFIG_VFIO_DEBUGFS
  388. void vfio_debugfs_create_root(void);
  389. void vfio_debugfs_remove_root(void);
  390. void vfio_device_debugfs_init(struct vfio_device *vdev);
  391. void vfio_device_debugfs_exit(struct vfio_device *vdev);
  392. #else
  393. static inline void vfio_debugfs_create_root(void) { }
  394. static inline void vfio_debugfs_remove_root(void) { }
  395. static inline void vfio_device_debugfs_init(struct vfio_device *vdev) { }
  396. static inline void vfio_device_debugfs_exit(struct vfio_device *vdev) { }
  397. #endif /* CONFIG_VFIO_DEBUGFS */
  398. #endif