grant-dma-ops.c 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Xen grant DMA-mapping layer - contains special DMA-mapping routines
  4. * for providing grant references as DMA addresses to be used by frontends
  5. * (e.g. virtio) in Xen guests
  6. *
  7. * Copyright (c) 2021, Juergen Gross <jgross@suse.com>
  8. */
  9. #include <linux/module.h>
  10. #include <linux/dma-map-ops.h>
  11. #include <linux/of.h>
  12. #include <linux/pci.h>
  13. #include <linux/pfn.h>
  14. #include <linux/xarray.h>
  15. #include <linux/virtio_anchor.h>
  16. #include <linux/virtio.h>
  17. #include <xen/xen.h>
  18. #include <xen/xen-ops.h>
  19. #include <xen/grant_table.h>
  20. struct xen_grant_dma_data {
  21. /* The ID of backend domain */
  22. domid_t backend_domid;
  23. /* Is device behaving sane? */
  24. bool broken;
  25. };
  26. static DEFINE_XARRAY_FLAGS(xen_grant_dma_devices, XA_FLAGS_LOCK_IRQ);
  27. #define XEN_GRANT_DMA_ADDR_OFF (1ULL << 63)
  28. static inline dma_addr_t grant_to_dma(grant_ref_t grant)
  29. {
  30. return XEN_GRANT_DMA_ADDR_OFF | ((dma_addr_t)grant << XEN_PAGE_SHIFT);
  31. }
  32. static inline grant_ref_t dma_to_grant(dma_addr_t dma)
  33. {
  34. return (grant_ref_t)((dma & ~XEN_GRANT_DMA_ADDR_OFF) >> XEN_PAGE_SHIFT);
  35. }
  36. static struct xen_grant_dma_data *find_xen_grant_dma_data(struct device *dev)
  37. {
  38. struct xen_grant_dma_data *data;
  39. unsigned long flags;
  40. xa_lock_irqsave(&xen_grant_dma_devices, flags);
  41. data = xa_load(&xen_grant_dma_devices, (unsigned long)dev);
  42. xa_unlock_irqrestore(&xen_grant_dma_devices, flags);
  43. return data;
  44. }
  45. static int store_xen_grant_dma_data(struct device *dev,
  46. struct xen_grant_dma_data *data)
  47. {
  48. unsigned long flags;
  49. int ret;
  50. xa_lock_irqsave(&xen_grant_dma_devices, flags);
  51. ret = xa_err(__xa_store(&xen_grant_dma_devices, (unsigned long)dev, data,
  52. GFP_ATOMIC));
  53. xa_unlock_irqrestore(&xen_grant_dma_devices, flags);
  54. return ret;
  55. }
  56. /*
  57. * DMA ops for Xen frontends (e.g. virtio).
  58. *
  59. * Used to act as a kind of software IOMMU for Xen guests by using grants as
  60. * DMA addresses.
  61. * Such a DMA address is formed by using the grant reference as a frame
  62. * number and setting the highest address bit (this bit is for the backend
  63. * to be able to distinguish it from e.g. a mmio address).
  64. */
  65. static void *xen_grant_dma_alloc(struct device *dev, size_t size,
  66. dma_addr_t *dma_handle, gfp_t gfp,
  67. unsigned long attrs)
  68. {
  69. struct xen_grant_dma_data *data;
  70. unsigned int i, n_pages = XEN_PFN_UP(size);
  71. unsigned long pfn;
  72. grant_ref_t grant;
  73. void *ret;
  74. data = find_xen_grant_dma_data(dev);
  75. if (!data)
  76. return NULL;
  77. if (unlikely(data->broken))
  78. return NULL;
  79. ret = alloc_pages_exact(n_pages * XEN_PAGE_SIZE, gfp);
  80. if (!ret)
  81. return NULL;
  82. pfn = virt_to_pfn(ret);
  83. if (gnttab_alloc_grant_reference_seq(n_pages, &grant)) {
  84. free_pages_exact(ret, n_pages * XEN_PAGE_SIZE);
  85. return NULL;
  86. }
  87. for (i = 0; i < n_pages; i++) {
  88. gnttab_grant_foreign_access_ref(grant + i, data->backend_domid,
  89. pfn_to_gfn(pfn + i), 0);
  90. }
  91. *dma_handle = grant_to_dma(grant);
  92. return ret;
  93. }
  94. static void xen_grant_dma_free(struct device *dev, size_t size, void *vaddr,
  95. dma_addr_t dma_handle, unsigned long attrs)
  96. {
  97. struct xen_grant_dma_data *data;
  98. unsigned int i, n_pages = XEN_PFN_UP(size);
  99. grant_ref_t grant;
  100. data = find_xen_grant_dma_data(dev);
  101. if (!data)
  102. return;
  103. if (unlikely(data->broken))
  104. return;
  105. grant = dma_to_grant(dma_handle);
  106. for (i = 0; i < n_pages; i++) {
  107. if (unlikely(!gnttab_end_foreign_access_ref(grant + i))) {
  108. dev_alert(dev, "Grant still in use by backend domain, disabled for further use\n");
  109. data->broken = true;
  110. return;
  111. }
  112. }
  113. gnttab_free_grant_reference_seq(grant, n_pages);
  114. free_pages_exact(vaddr, n_pages * XEN_PAGE_SIZE);
  115. }
  116. static struct page *xen_grant_dma_alloc_pages(struct device *dev, size_t size,
  117. dma_addr_t *dma_handle,
  118. enum dma_data_direction dir,
  119. gfp_t gfp)
  120. {
  121. void *vaddr;
  122. vaddr = xen_grant_dma_alloc(dev, size, dma_handle, gfp, 0);
  123. if (!vaddr)
  124. return NULL;
  125. return virt_to_page(vaddr);
  126. }
  127. static void xen_grant_dma_free_pages(struct device *dev, size_t size,
  128. struct page *vaddr, dma_addr_t dma_handle,
  129. enum dma_data_direction dir)
  130. {
  131. xen_grant_dma_free(dev, size, page_to_virt(vaddr), dma_handle, 0);
  132. }
  133. static dma_addr_t xen_grant_dma_map_page(struct device *dev, struct page *page,
  134. unsigned long offset, size_t size,
  135. enum dma_data_direction dir,
  136. unsigned long attrs)
  137. {
  138. struct xen_grant_dma_data *data;
  139. unsigned long dma_offset = xen_offset_in_page(offset),
  140. pfn_offset = XEN_PFN_DOWN(offset);
  141. unsigned int i, n_pages = XEN_PFN_UP(dma_offset + size);
  142. grant_ref_t grant;
  143. dma_addr_t dma_handle;
  144. if (WARN_ON(dir == DMA_NONE))
  145. return DMA_MAPPING_ERROR;
  146. data = find_xen_grant_dma_data(dev);
  147. if (!data)
  148. return DMA_MAPPING_ERROR;
  149. if (unlikely(data->broken))
  150. return DMA_MAPPING_ERROR;
  151. if (gnttab_alloc_grant_reference_seq(n_pages, &grant))
  152. return DMA_MAPPING_ERROR;
  153. for (i = 0; i < n_pages; i++) {
  154. gnttab_grant_foreign_access_ref(grant + i, data->backend_domid,
  155. pfn_to_gfn(page_to_xen_pfn(page) + i + pfn_offset),
  156. dir == DMA_TO_DEVICE);
  157. }
  158. dma_handle = grant_to_dma(grant) + dma_offset;
  159. return dma_handle;
  160. }
  161. static void xen_grant_dma_unmap_page(struct device *dev, dma_addr_t dma_handle,
  162. size_t size, enum dma_data_direction dir,
  163. unsigned long attrs)
  164. {
  165. struct xen_grant_dma_data *data;
  166. unsigned long dma_offset = xen_offset_in_page(dma_handle);
  167. unsigned int i, n_pages = XEN_PFN_UP(dma_offset + size);
  168. grant_ref_t grant;
  169. if (WARN_ON(dir == DMA_NONE))
  170. return;
  171. data = find_xen_grant_dma_data(dev);
  172. if (!data)
  173. return;
  174. if (unlikely(data->broken))
  175. return;
  176. grant = dma_to_grant(dma_handle);
  177. for (i = 0; i < n_pages; i++) {
  178. if (unlikely(!gnttab_end_foreign_access_ref(grant + i))) {
  179. dev_alert(dev, "Grant still in use by backend domain, disabled for further use\n");
  180. data->broken = true;
  181. return;
  182. }
  183. }
  184. gnttab_free_grant_reference_seq(grant, n_pages);
  185. }
  186. static void xen_grant_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
  187. int nents, enum dma_data_direction dir,
  188. unsigned long attrs)
  189. {
  190. struct scatterlist *s;
  191. unsigned int i;
  192. if (WARN_ON(dir == DMA_NONE))
  193. return;
  194. for_each_sg(sg, s, nents, i)
  195. xen_grant_dma_unmap_page(dev, s->dma_address, sg_dma_len(s), dir,
  196. attrs);
  197. }
  198. static int xen_grant_dma_map_sg(struct device *dev, struct scatterlist *sg,
  199. int nents, enum dma_data_direction dir,
  200. unsigned long attrs)
  201. {
  202. struct scatterlist *s;
  203. unsigned int i;
  204. if (WARN_ON(dir == DMA_NONE))
  205. return -EINVAL;
  206. for_each_sg(sg, s, nents, i) {
  207. s->dma_address = xen_grant_dma_map_page(dev, sg_page(s), s->offset,
  208. s->length, dir, attrs);
  209. if (s->dma_address == DMA_MAPPING_ERROR)
  210. goto out;
  211. sg_dma_len(s) = s->length;
  212. }
  213. return nents;
  214. out:
  215. xen_grant_dma_unmap_sg(dev, sg, i, dir, attrs | DMA_ATTR_SKIP_CPU_SYNC);
  216. sg_dma_len(sg) = 0;
  217. return -EIO;
  218. }
  219. static int xen_grant_dma_supported(struct device *dev, u64 mask)
  220. {
  221. return mask == DMA_BIT_MASK(64);
  222. }
  223. static const struct dma_map_ops xen_grant_dma_ops = {
  224. .alloc = xen_grant_dma_alloc,
  225. .free = xen_grant_dma_free,
  226. .alloc_pages_op = xen_grant_dma_alloc_pages,
  227. .free_pages = xen_grant_dma_free_pages,
  228. .mmap = dma_common_mmap,
  229. .get_sgtable = dma_common_get_sgtable,
  230. .map_page = xen_grant_dma_map_page,
  231. .unmap_page = xen_grant_dma_unmap_page,
  232. .map_sg = xen_grant_dma_map_sg,
  233. .unmap_sg = xen_grant_dma_unmap_sg,
  234. .dma_supported = xen_grant_dma_supported,
  235. };
  236. static struct device_node *xen_dt_get_node(struct device *dev)
  237. {
  238. if (dev_is_pci(dev)) {
  239. struct pci_dev *pdev = to_pci_dev(dev);
  240. struct pci_bus *bus = pdev->bus;
  241. /* Walk up to the root bus to look for PCI Host controller */
  242. while (!pci_is_root_bus(bus))
  243. bus = bus->parent;
  244. if (!bus->bridge->parent)
  245. return NULL;
  246. return of_node_get(bus->bridge->parent->of_node);
  247. }
  248. return of_node_get(dev->of_node);
  249. }
  250. static int xen_dt_grant_init_backend_domid(struct device *dev,
  251. struct device_node *np,
  252. domid_t *backend_domid)
  253. {
  254. struct of_phandle_args iommu_spec = { .args_count = 1 };
  255. if (dev_is_pci(dev)) {
  256. struct pci_dev *pdev = to_pci_dev(dev);
  257. u32 rid = PCI_DEVID(pdev->bus->number, pdev->devfn);
  258. if (of_map_id(np, rid, "iommu-map", "iommu-map-mask", &iommu_spec.np,
  259. iommu_spec.args)) {
  260. dev_dbg(dev, "Cannot translate ID\n");
  261. return -ESRCH;
  262. }
  263. } else {
  264. if (of_parse_phandle_with_args(np, "iommus", "#iommu-cells",
  265. 0, &iommu_spec)) {
  266. dev_dbg(dev, "Cannot parse iommus property\n");
  267. return -ESRCH;
  268. }
  269. }
  270. if (!of_device_is_compatible(iommu_spec.np, "xen,grant-dma") ||
  271. iommu_spec.args_count != 1) {
  272. dev_dbg(dev, "Incompatible IOMMU node\n");
  273. of_node_put(iommu_spec.np);
  274. return -ESRCH;
  275. }
  276. of_node_put(iommu_spec.np);
  277. /*
  278. * The endpoint ID here means the ID of the domain where the
  279. * corresponding backend is running
  280. */
  281. *backend_domid = iommu_spec.args[0];
  282. return 0;
  283. }
  284. static int xen_grant_init_backend_domid(struct device *dev,
  285. domid_t *backend_domid)
  286. {
  287. struct device_node *np;
  288. int ret = -ENODEV;
  289. np = xen_dt_get_node(dev);
  290. if (np) {
  291. ret = xen_dt_grant_init_backend_domid(dev, np, backend_domid);
  292. of_node_put(np);
  293. } else if (IS_ENABLED(CONFIG_XEN_VIRTIO_FORCE_GRANT) || xen_pv_domain()) {
  294. dev_info(dev, "Using dom0 as backend\n");
  295. *backend_domid = 0;
  296. ret = 0;
  297. }
  298. return ret;
  299. }
  300. static void xen_grant_setup_dma_ops(struct device *dev, domid_t backend_domid)
  301. {
  302. struct xen_grant_dma_data *data;
  303. data = find_xen_grant_dma_data(dev);
  304. if (data) {
  305. dev_err(dev, "Xen grant DMA data is already created\n");
  306. return;
  307. }
  308. data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
  309. if (!data)
  310. goto err;
  311. data->backend_domid = backend_domid;
  312. if (store_xen_grant_dma_data(dev, data)) {
  313. dev_err(dev, "Cannot store Xen grant DMA data\n");
  314. goto err;
  315. }
  316. dev->dma_ops = &xen_grant_dma_ops;
  317. return;
  318. err:
  319. devm_kfree(dev, data);
  320. dev_err(dev, "Cannot set up Xen grant DMA ops, retain platform DMA ops\n");
  321. }
  322. bool xen_virtio_restricted_mem_acc(struct virtio_device *dev)
  323. {
  324. domid_t backend_domid;
  325. if (!xen_grant_init_backend_domid(dev->dev.parent, &backend_domid)) {
  326. xen_grant_setup_dma_ops(dev->dev.parent, backend_domid);
  327. return true;
  328. }
  329. return false;
  330. }
  331. MODULE_DESCRIPTION("Xen grant DMA-mapping layer");
  332. MODULE_AUTHOR("Juergen Gross <jgross@suse.com>");
  333. MODULE_LICENSE("GPL");