mtk_gem.c 6.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2015 MediaTek Inc.
  4. */
  5. #include <linux/dma-buf.h>
  6. #include <linux/vmalloc.h>
  7. #include <drm/drm.h>
  8. #include <drm/drm_device.h>
  9. #include <drm/drm_gem.h>
  10. #include <drm/drm_gem_dma_helper.h>
  11. #include <drm/drm_prime.h>
  12. #include "mtk_drm_drv.h"
  13. #include "mtk_gem.h"
  14. static int mtk_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
  15. static const struct vm_operations_struct vm_ops = {
  16. .open = drm_gem_vm_open,
  17. .close = drm_gem_vm_close,
  18. };
  19. static const struct drm_gem_object_funcs mtk_gem_object_funcs = {
  20. .free = mtk_gem_free_object,
  21. .get_sg_table = mtk_gem_prime_get_sg_table,
  22. .vmap = mtk_gem_prime_vmap,
  23. .vunmap = mtk_gem_prime_vunmap,
  24. .mmap = mtk_gem_object_mmap,
  25. .vm_ops = &vm_ops,
  26. };
  27. static struct mtk_gem_obj *mtk_gem_init(struct drm_device *dev,
  28. unsigned long size)
  29. {
  30. struct mtk_gem_obj *mtk_gem_obj;
  31. int ret;
  32. size = round_up(size, PAGE_SIZE);
  33. if (size == 0)
  34. return ERR_PTR(-EINVAL);
  35. mtk_gem_obj = kzalloc(sizeof(*mtk_gem_obj), GFP_KERNEL);
  36. if (!mtk_gem_obj)
  37. return ERR_PTR(-ENOMEM);
  38. mtk_gem_obj->base.funcs = &mtk_gem_object_funcs;
  39. ret = drm_gem_object_init(dev, &mtk_gem_obj->base, size);
  40. if (ret < 0) {
  41. DRM_ERROR("failed to initialize gem object\n");
  42. kfree(mtk_gem_obj);
  43. return ERR_PTR(ret);
  44. }
  45. return mtk_gem_obj;
  46. }
  47. struct mtk_gem_obj *mtk_gem_create(struct drm_device *dev,
  48. size_t size, bool alloc_kmap)
  49. {
  50. struct mtk_drm_private *priv = dev->dev_private;
  51. struct mtk_gem_obj *mtk_gem;
  52. struct drm_gem_object *obj;
  53. int ret;
  54. mtk_gem = mtk_gem_init(dev, size);
  55. if (IS_ERR(mtk_gem))
  56. return ERR_CAST(mtk_gem);
  57. obj = &mtk_gem->base;
  58. mtk_gem->dma_attrs = DMA_ATTR_WRITE_COMBINE;
  59. if (!alloc_kmap)
  60. mtk_gem->dma_attrs |= DMA_ATTR_NO_KERNEL_MAPPING;
  61. mtk_gem->cookie = dma_alloc_attrs(priv->dma_dev, obj->size,
  62. &mtk_gem->dma_addr, GFP_KERNEL,
  63. mtk_gem->dma_attrs);
  64. if (!mtk_gem->cookie) {
  65. DRM_ERROR("failed to allocate %zx byte dma buffer", obj->size);
  66. ret = -ENOMEM;
  67. goto err_gem_free;
  68. }
  69. if (alloc_kmap)
  70. mtk_gem->kvaddr = mtk_gem->cookie;
  71. DRM_DEBUG_DRIVER("cookie = %p dma_addr = %pad size = %zu\n",
  72. mtk_gem->cookie, &mtk_gem->dma_addr,
  73. size);
  74. return mtk_gem;
  75. err_gem_free:
  76. drm_gem_object_release(obj);
  77. kfree(mtk_gem);
  78. return ERR_PTR(ret);
  79. }
  80. void mtk_gem_free_object(struct drm_gem_object *obj)
  81. {
  82. struct mtk_gem_obj *mtk_gem = to_mtk_gem_obj(obj);
  83. struct mtk_drm_private *priv = obj->dev->dev_private;
  84. if (mtk_gem->sg)
  85. drm_prime_gem_destroy(obj, mtk_gem->sg);
  86. else
  87. dma_free_attrs(priv->dma_dev, obj->size, mtk_gem->cookie,
  88. mtk_gem->dma_addr, mtk_gem->dma_attrs);
  89. /* release file pointer to gem object. */
  90. drm_gem_object_release(obj);
  91. kfree(mtk_gem);
  92. }
  93. int mtk_gem_dumb_create(struct drm_file *file_priv, struct drm_device *dev,
  94. struct drm_mode_create_dumb *args)
  95. {
  96. struct mtk_gem_obj *mtk_gem;
  97. int ret;
  98. args->pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
  99. /*
  100. * Multiply 2 variables of different types,
  101. * for example: args->size = args->spacing * args->height;
  102. * may cause coverity issue with unintentional overflow.
  103. */
  104. args->size = args->pitch;
  105. args->size *= args->height;
  106. mtk_gem = mtk_gem_create(dev, args->size, false);
  107. if (IS_ERR(mtk_gem))
  108. return PTR_ERR(mtk_gem);
  109. /*
  110. * allocate a id of idr table where the obj is registered
  111. * and handle has the id what user can see.
  112. */
  113. ret = drm_gem_handle_create(file_priv, &mtk_gem->base, &args->handle);
  114. if (ret)
  115. goto err_handle_create;
  116. /* drop reference from allocate - handle holds it now. */
  117. drm_gem_object_put(&mtk_gem->base);
  118. return 0;
  119. err_handle_create:
  120. mtk_gem_free_object(&mtk_gem->base);
  121. return ret;
  122. }
  123. static int mtk_gem_object_mmap(struct drm_gem_object *obj,
  124. struct vm_area_struct *vma)
  125. {
  126. int ret;
  127. struct mtk_gem_obj *mtk_gem = to_mtk_gem_obj(obj);
  128. struct mtk_drm_private *priv = obj->dev->dev_private;
  129. /*
  130. * Set vm_pgoff (used as a fake buffer offset by DRM) to 0 and map the
  131. * whole buffer from the start.
  132. */
  133. vma->vm_pgoff = 0;
  134. /*
  135. * dma_alloc_attrs() allocated a struct page table for mtk_gem, so clear
  136. * VM_PFNMAP flag that was set by drm_gem_mmap_obj()/drm_gem_mmap().
  137. */
  138. vm_flags_set(vma, VM_IO | VM_DONTEXPAND | VM_DONTDUMP);
  139. vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
  140. vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
  141. ret = dma_mmap_attrs(priv->dma_dev, vma, mtk_gem->cookie,
  142. mtk_gem->dma_addr, obj->size, mtk_gem->dma_attrs);
  143. return ret;
  144. }
  145. /*
  146. * Allocate a sg_table for this GEM object.
  147. * Note: Both the table's contents, and the sg_table itself must be freed by
  148. * the caller.
  149. * Returns a pointer to the newly allocated sg_table, or an ERR_PTR() error.
  150. */
  151. struct sg_table *mtk_gem_prime_get_sg_table(struct drm_gem_object *obj)
  152. {
  153. struct mtk_gem_obj *mtk_gem = to_mtk_gem_obj(obj);
  154. struct mtk_drm_private *priv = obj->dev->dev_private;
  155. struct sg_table *sgt;
  156. int ret;
  157. sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
  158. if (!sgt)
  159. return ERR_PTR(-ENOMEM);
  160. ret = dma_get_sgtable_attrs(priv->dma_dev, sgt, mtk_gem->cookie,
  161. mtk_gem->dma_addr, obj->size,
  162. mtk_gem->dma_attrs);
  163. if (ret) {
  164. DRM_ERROR("failed to allocate sgt, %d\n", ret);
  165. kfree(sgt);
  166. return ERR_PTR(ret);
  167. }
  168. return sgt;
  169. }
  170. struct drm_gem_object *mtk_gem_prime_import_sg_table(struct drm_device *dev,
  171. struct dma_buf_attachment *attach, struct sg_table *sg)
  172. {
  173. struct mtk_gem_obj *mtk_gem;
  174. /* check if the entries in the sg_table are contiguous */
  175. if (drm_prime_get_contiguous_size(sg) < attach->dmabuf->size) {
  176. DRM_ERROR("sg_table is not contiguous");
  177. return ERR_PTR(-EINVAL);
  178. }
  179. mtk_gem = mtk_gem_init(dev, attach->dmabuf->size);
  180. if (IS_ERR(mtk_gem))
  181. return ERR_CAST(mtk_gem);
  182. mtk_gem->dma_addr = sg_dma_address(sg->sgl);
  183. mtk_gem->sg = sg;
  184. return &mtk_gem->base;
  185. }
  186. int mtk_gem_prime_vmap(struct drm_gem_object *obj, struct iosys_map *map)
  187. {
  188. struct mtk_gem_obj *mtk_gem = to_mtk_gem_obj(obj);
  189. struct sg_table *sgt = NULL;
  190. unsigned int npages;
  191. if (mtk_gem->kvaddr)
  192. goto out;
  193. sgt = mtk_gem_prime_get_sg_table(obj);
  194. if (IS_ERR(sgt))
  195. return PTR_ERR(sgt);
  196. npages = obj->size >> PAGE_SHIFT;
  197. mtk_gem->pages = kcalloc(npages, sizeof(*mtk_gem->pages), GFP_KERNEL);
  198. if (!mtk_gem->pages) {
  199. sg_free_table(sgt);
  200. kfree(sgt);
  201. return -ENOMEM;
  202. }
  203. drm_prime_sg_to_page_array(sgt, mtk_gem->pages, npages);
  204. mtk_gem->kvaddr = vmap(mtk_gem->pages, npages, VM_MAP,
  205. pgprot_writecombine(PAGE_KERNEL));
  206. if (!mtk_gem->kvaddr) {
  207. sg_free_table(sgt);
  208. kfree(sgt);
  209. kfree(mtk_gem->pages);
  210. return -ENOMEM;
  211. }
  212. sg_free_table(sgt);
  213. kfree(sgt);
  214. out:
  215. iosys_map_set_vaddr(map, mtk_gem->kvaddr);
  216. return 0;
  217. }
  218. void mtk_gem_prime_vunmap(struct drm_gem_object *obj, struct iosys_map *map)
  219. {
  220. struct mtk_gem_obj *mtk_gem = to_mtk_gem_obj(obj);
  221. void *vaddr = map->vaddr;
  222. if (!mtk_gem->pages)
  223. return;
  224. vunmap(vaddr);
  225. mtk_gem->kvaddr = NULL;
  226. kfree(mtk_gem->pages);
  227. }