armada_gem.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (C) 2012 Russell King
  4. */
  5. #include <linux/dma-buf.h>
  6. #include <linux/dma-mapping.h>
  7. #include <linux/mman.h>
  8. #include <linux/shmem_fs.h>
  9. #include <drm/armada_drm.h>
  10. #include <drm/drm_prime.h>
  11. #include "armada_drm.h"
  12. #include "armada_gem.h"
  13. #include "armada_ioctlP.h"
  14. MODULE_IMPORT_NS(DMA_BUF);
  15. static vm_fault_t armada_gem_vm_fault(struct vm_fault *vmf)
  16. {
  17. struct drm_gem_object *gobj = vmf->vma->vm_private_data;
  18. struct armada_gem_object *obj = drm_to_armada_gem(gobj);
  19. unsigned long pfn = obj->phys_addr >> PAGE_SHIFT;
  20. pfn += (vmf->address - vmf->vma->vm_start) >> PAGE_SHIFT;
  21. return vmf_insert_pfn(vmf->vma, vmf->address, pfn);
  22. }
  23. static const struct vm_operations_struct armada_gem_vm_ops = {
  24. .fault = armada_gem_vm_fault,
  25. .open = drm_gem_vm_open,
  26. .close = drm_gem_vm_close,
  27. };
  28. static size_t roundup_gem_size(size_t size)
  29. {
  30. return roundup(size, PAGE_SIZE);
  31. }
  32. void armada_gem_free_object(struct drm_gem_object *obj)
  33. {
  34. struct armada_gem_object *dobj = drm_to_armada_gem(obj);
  35. struct armada_private *priv = drm_to_armada_dev(obj->dev);
  36. DRM_DEBUG_DRIVER("release obj %p\n", dobj);
  37. drm_gem_free_mmap_offset(&dobj->obj);
  38. might_lock(&priv->linear_lock);
  39. if (dobj->page) {
  40. /* page backed memory */
  41. unsigned int order = get_order(dobj->obj.size);
  42. __free_pages(dobj->page, order);
  43. } else if (dobj->linear) {
  44. /* linear backed memory */
  45. mutex_lock(&priv->linear_lock);
  46. drm_mm_remove_node(dobj->linear);
  47. mutex_unlock(&priv->linear_lock);
  48. kfree(dobj->linear);
  49. if (dobj->addr)
  50. iounmap(dobj->addr);
  51. }
  52. if (dobj->obj.import_attach) {
  53. /* We only ever display imported data */
  54. if (dobj->sgt)
  55. dma_buf_unmap_attachment_unlocked(dobj->obj.import_attach,
  56. dobj->sgt, DMA_TO_DEVICE);
  57. drm_prime_gem_destroy(&dobj->obj, NULL);
  58. }
  59. drm_gem_object_release(&dobj->obj);
  60. kfree(dobj);
  61. }
  62. int
  63. armada_gem_linear_back(struct drm_device *dev, struct armada_gem_object *obj)
  64. {
  65. struct armada_private *priv = drm_to_armada_dev(dev);
  66. size_t size = obj->obj.size;
  67. if (obj->page || obj->linear)
  68. return 0;
  69. /*
  70. * If it is a small allocation (typically cursor, which will
  71. * be 32x64 or 64x32 ARGB pixels) try to get it from the system.
  72. * Framebuffers will never be this small (our minimum size for
  73. * framebuffers is larger than this anyway.) Such objects are
  74. * only accessed by the CPU so we don't need any special handing
  75. * here.
  76. */
  77. if (size <= 8192) {
  78. unsigned int order = get_order(size);
  79. struct page *p = alloc_pages(GFP_KERNEL, order);
  80. if (p) {
  81. obj->addr = page_address(p);
  82. obj->phys_addr = page_to_phys(p);
  83. obj->page = p;
  84. memset(obj->addr, 0, PAGE_ALIGN(size));
  85. }
  86. }
  87. /*
  88. * We could grab something from DMA if it's enabled, but that
  89. * involves building in a problem:
  90. *
  91. * GEM DMA helper interface uses dma_alloc_coherent(), which provides
  92. * us with an CPU virtual address and a device address.
  93. *
  94. * The CPU virtual address may be either an address in the kernel
  95. * direct mapped region (for example, as it would be on x86) or
  96. * it may be remapped into another part of kernel memory space
  97. * (eg, as it would be on ARM.) This means virt_to_phys() on the
  98. * returned virtual address is invalid depending on the architecture
  99. * implementation.
  100. *
  101. * The device address may also not be a physical address; it may
  102. * be that there is some kind of remapping between the device and
  103. * system RAM, which makes the use of the device address also
  104. * unsafe to re-use as a physical address.
  105. *
  106. * This makes DRM usage of dma_alloc_coherent() in a generic way
  107. * at best very questionable and unsafe.
  108. */
  109. /* Otherwise, grab it from our linear allocation */
  110. if (!obj->page) {
  111. struct drm_mm_node *node;
  112. unsigned align = min_t(unsigned, size, SZ_2M);
  113. void __iomem *ptr;
  114. int ret;
  115. node = kzalloc(sizeof(*node), GFP_KERNEL);
  116. if (!node)
  117. return -ENOSPC;
  118. mutex_lock(&priv->linear_lock);
  119. ret = drm_mm_insert_node_generic(&priv->linear, node,
  120. size, align, 0, 0);
  121. mutex_unlock(&priv->linear_lock);
  122. if (ret) {
  123. kfree(node);
  124. return ret;
  125. }
  126. obj->linear = node;
  127. /* Ensure that the memory we're returning is cleared. */
  128. ptr = ioremap_wc(obj->linear->start, size);
  129. if (!ptr) {
  130. mutex_lock(&priv->linear_lock);
  131. drm_mm_remove_node(obj->linear);
  132. mutex_unlock(&priv->linear_lock);
  133. kfree(obj->linear);
  134. obj->linear = NULL;
  135. return -ENOMEM;
  136. }
  137. memset_io(ptr, 0, size);
  138. iounmap(ptr);
  139. obj->phys_addr = obj->linear->start;
  140. obj->dev_addr = obj->linear->start;
  141. obj->mapped = true;
  142. }
  143. DRM_DEBUG_DRIVER("obj %p phys %#llx dev %#llx\n", obj,
  144. (unsigned long long)obj->phys_addr,
  145. (unsigned long long)obj->dev_addr);
  146. return 0;
  147. }
  148. void *
  149. armada_gem_map_object(struct drm_device *dev, struct armada_gem_object *dobj)
  150. {
  151. /* only linear objects need to be ioremap'd */
  152. if (!dobj->addr && dobj->linear)
  153. dobj->addr = ioremap_wc(dobj->phys_addr, dobj->obj.size);
  154. return dobj->addr;
  155. }
  156. static const struct drm_gem_object_funcs armada_gem_object_funcs = {
  157. .free = armada_gem_free_object,
  158. .export = armada_gem_prime_export,
  159. .vm_ops = &armada_gem_vm_ops,
  160. };
  161. struct armada_gem_object *
  162. armada_gem_alloc_private_object(struct drm_device *dev, size_t size)
  163. {
  164. struct armada_gem_object *obj;
  165. size = roundup_gem_size(size);
  166. obj = kzalloc(sizeof(*obj), GFP_KERNEL);
  167. if (!obj)
  168. return NULL;
  169. obj->obj.funcs = &armada_gem_object_funcs;
  170. drm_gem_private_object_init(dev, &obj->obj, size);
  171. DRM_DEBUG_DRIVER("alloc private obj %p size %zu\n", obj, size);
  172. return obj;
  173. }
  174. static struct armada_gem_object *armada_gem_alloc_object(struct drm_device *dev,
  175. size_t size)
  176. {
  177. struct armada_gem_object *obj;
  178. struct address_space *mapping;
  179. size = roundup_gem_size(size);
  180. obj = kzalloc(sizeof(*obj), GFP_KERNEL);
  181. if (!obj)
  182. return NULL;
  183. obj->obj.funcs = &armada_gem_object_funcs;
  184. if (drm_gem_object_init(dev, &obj->obj, size)) {
  185. kfree(obj);
  186. return NULL;
  187. }
  188. mapping = obj->obj.filp->f_mapping;
  189. mapping_set_gfp_mask(mapping, GFP_HIGHUSER | __GFP_RECLAIMABLE);
  190. DRM_DEBUG_DRIVER("alloc obj %p size %zu\n", obj, size);
  191. return obj;
  192. }
  193. /* Dumb alloc support */
  194. int armada_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
  195. struct drm_mode_create_dumb *args)
  196. {
  197. struct armada_gem_object *dobj;
  198. u32 handle;
  199. size_t size;
  200. int ret;
  201. args->pitch = armada_pitch(args->width, args->bpp);
  202. args->size = size = args->pitch * args->height;
  203. dobj = armada_gem_alloc_private_object(dev, size);
  204. if (dobj == NULL)
  205. return -ENOMEM;
  206. ret = armada_gem_linear_back(dev, dobj);
  207. if (ret)
  208. goto err;
  209. ret = drm_gem_handle_create(file, &dobj->obj, &handle);
  210. if (ret)
  211. goto err;
  212. args->handle = handle;
  213. /* drop reference from allocate - handle holds it now */
  214. DRM_DEBUG_DRIVER("obj %p size %zu handle %#x\n", dobj, size, handle);
  215. err:
  216. drm_gem_object_put(&dobj->obj);
  217. return ret;
  218. }
  219. /* Private driver gem ioctls */
  220. int armada_gem_create_ioctl(struct drm_device *dev, void *data,
  221. struct drm_file *file)
  222. {
  223. struct drm_armada_gem_create *args = data;
  224. struct armada_gem_object *dobj;
  225. size_t size;
  226. u32 handle;
  227. int ret;
  228. if (args->size == 0)
  229. return -ENOMEM;
  230. size = args->size;
  231. dobj = armada_gem_alloc_object(dev, size);
  232. if (dobj == NULL)
  233. return -ENOMEM;
  234. ret = drm_gem_handle_create(file, &dobj->obj, &handle);
  235. if (ret)
  236. goto err;
  237. args->handle = handle;
  238. /* drop reference from allocate - handle holds it now */
  239. DRM_DEBUG_DRIVER("obj %p size %zu handle %#x\n", dobj, size, handle);
  240. err:
  241. drm_gem_object_put(&dobj->obj);
  242. return ret;
  243. }
  244. /* Map a shmem-backed object into process memory space */
  245. int armada_gem_mmap_ioctl(struct drm_device *dev, void *data,
  246. struct drm_file *file)
  247. {
  248. struct drm_armada_gem_mmap *args = data;
  249. struct armada_gem_object *dobj;
  250. unsigned long addr;
  251. dobj = armada_gem_object_lookup(file, args->handle);
  252. if (dobj == NULL)
  253. return -ENOENT;
  254. if (!dobj->obj.filp) {
  255. drm_gem_object_put(&dobj->obj);
  256. return -EINVAL;
  257. }
  258. addr = vm_mmap(dobj->obj.filp, 0, args->size, PROT_READ | PROT_WRITE,
  259. MAP_SHARED, args->offset);
  260. drm_gem_object_put(&dobj->obj);
  261. if (IS_ERR_VALUE(addr))
  262. return addr;
  263. args->addr = addr;
  264. return 0;
  265. }
  266. int armada_gem_pwrite_ioctl(struct drm_device *dev, void *data,
  267. struct drm_file *file)
  268. {
  269. struct drm_armada_gem_pwrite *args = data;
  270. struct armada_gem_object *dobj;
  271. char __user *ptr;
  272. int ret = 0;
  273. DRM_DEBUG_DRIVER("handle %u off %u size %u ptr 0x%llx\n",
  274. args->handle, args->offset, args->size, args->ptr);
  275. if (args->size == 0)
  276. return 0;
  277. ptr = (char __user *)(uintptr_t)args->ptr;
  278. if (!access_ok(ptr, args->size))
  279. return -EFAULT;
  280. if (fault_in_readable(ptr, args->size))
  281. return -EFAULT;
  282. dobj = armada_gem_object_lookup(file, args->handle);
  283. if (dobj == NULL)
  284. return -ENOENT;
  285. /* Must be a kernel-mapped object */
  286. if (!dobj->addr)
  287. return -EINVAL;
  288. if (args->offset > dobj->obj.size ||
  289. args->size > dobj->obj.size - args->offset) {
  290. DRM_ERROR("invalid size: object size %u\n", dobj->obj.size);
  291. ret = -EINVAL;
  292. goto unref;
  293. }
  294. if (copy_from_user(dobj->addr + args->offset, ptr, args->size)) {
  295. ret = -EFAULT;
  296. } else if (dobj->update) {
  297. dobj->update(dobj->update_data);
  298. ret = 0;
  299. }
  300. unref:
  301. drm_gem_object_put(&dobj->obj);
  302. return ret;
  303. }
  304. /* Prime support */
  305. static struct sg_table *
  306. armada_gem_prime_map_dma_buf(struct dma_buf_attachment *attach,
  307. enum dma_data_direction dir)
  308. {
  309. struct drm_gem_object *obj = attach->dmabuf->priv;
  310. struct armada_gem_object *dobj = drm_to_armada_gem(obj);
  311. struct scatterlist *sg;
  312. struct sg_table *sgt;
  313. int i;
  314. sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
  315. if (!sgt)
  316. return NULL;
  317. if (dobj->obj.filp) {
  318. struct address_space *mapping;
  319. int count;
  320. count = dobj->obj.size / PAGE_SIZE;
  321. if (sg_alloc_table(sgt, count, GFP_KERNEL))
  322. goto free_sgt;
  323. mapping = dobj->obj.filp->f_mapping;
  324. for_each_sgtable_sg(sgt, sg, i) {
  325. struct page *page;
  326. page = shmem_read_mapping_page(mapping, i);
  327. if (IS_ERR(page))
  328. goto release;
  329. sg_set_page(sg, page, PAGE_SIZE, 0);
  330. }
  331. if (dma_map_sgtable(attach->dev, sgt, dir, 0))
  332. goto release;
  333. } else if (dobj->page) {
  334. /* Single contiguous page */
  335. if (sg_alloc_table(sgt, 1, GFP_KERNEL))
  336. goto free_sgt;
  337. sg_set_page(sgt->sgl, dobj->page, dobj->obj.size, 0);
  338. if (dma_map_sgtable(attach->dev, sgt, dir, 0))
  339. goto free_table;
  340. } else if (dobj->linear) {
  341. /* Single contiguous physical region - no struct page */
  342. if (sg_alloc_table(sgt, 1, GFP_KERNEL))
  343. goto free_sgt;
  344. sg_dma_address(sgt->sgl) = dobj->dev_addr;
  345. sg_dma_len(sgt->sgl) = dobj->obj.size;
  346. } else {
  347. goto free_sgt;
  348. }
  349. return sgt;
  350. release:
  351. for_each_sgtable_sg(sgt, sg, i)
  352. if (sg_page(sg))
  353. put_page(sg_page(sg));
  354. free_table:
  355. sg_free_table(sgt);
  356. free_sgt:
  357. kfree(sgt);
  358. return NULL;
  359. }
  360. static void armada_gem_prime_unmap_dma_buf(struct dma_buf_attachment *attach,
  361. struct sg_table *sgt, enum dma_data_direction dir)
  362. {
  363. struct drm_gem_object *obj = attach->dmabuf->priv;
  364. struct armada_gem_object *dobj = drm_to_armada_gem(obj);
  365. int i;
  366. if (!dobj->linear)
  367. dma_unmap_sgtable(attach->dev, sgt, dir, 0);
  368. if (dobj->obj.filp) {
  369. struct scatterlist *sg;
  370. for_each_sgtable_sg(sgt, sg, i)
  371. put_page(sg_page(sg));
  372. }
  373. sg_free_table(sgt);
  374. kfree(sgt);
  375. }
  376. static int
  377. armada_gem_dmabuf_mmap(struct dma_buf *buf, struct vm_area_struct *vma)
  378. {
  379. return -EINVAL;
  380. }
  381. static const struct dma_buf_ops armada_gem_prime_dmabuf_ops = {
  382. .map_dma_buf = armada_gem_prime_map_dma_buf,
  383. .unmap_dma_buf = armada_gem_prime_unmap_dma_buf,
  384. .release = drm_gem_dmabuf_release,
  385. .mmap = armada_gem_dmabuf_mmap,
  386. };
  387. struct dma_buf *
  388. armada_gem_prime_export(struct drm_gem_object *obj, int flags)
  389. {
  390. DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
  391. exp_info.ops = &armada_gem_prime_dmabuf_ops;
  392. exp_info.size = obj->size;
  393. exp_info.flags = O_RDWR;
  394. exp_info.priv = obj;
  395. return drm_gem_dmabuf_export(obj->dev, &exp_info);
  396. }
  397. struct drm_gem_object *
  398. armada_gem_prime_import(struct drm_device *dev, struct dma_buf *buf)
  399. {
  400. struct dma_buf_attachment *attach;
  401. struct armada_gem_object *dobj;
  402. if (buf->ops == &armada_gem_prime_dmabuf_ops) {
  403. struct drm_gem_object *obj = buf->priv;
  404. if (obj->dev == dev) {
  405. /*
  406. * Importing our own dmabuf(s) increases the
  407. * refcount on the gem object itself.
  408. */
  409. drm_gem_object_get(obj);
  410. return obj;
  411. }
  412. }
  413. attach = dma_buf_attach(buf, dev->dev);
  414. if (IS_ERR(attach))
  415. return ERR_CAST(attach);
  416. dobj = armada_gem_alloc_private_object(dev, buf->size);
  417. if (!dobj) {
  418. dma_buf_detach(buf, attach);
  419. return ERR_PTR(-ENOMEM);
  420. }
  421. dobj->obj.import_attach = attach;
  422. get_dma_buf(buf);
  423. /*
  424. * Don't call dma_buf_map_attachment() here - it maps the
  425. * scatterlist immediately for DMA, and this is not always
  426. * an appropriate thing to do.
  427. */
  428. return &dobj->obj;
  429. }
  430. int armada_gem_map_import(struct armada_gem_object *dobj)
  431. {
  432. int ret;
  433. dobj->sgt = dma_buf_map_attachment_unlocked(dobj->obj.import_attach,
  434. DMA_TO_DEVICE);
  435. if (IS_ERR(dobj->sgt)) {
  436. ret = PTR_ERR(dobj->sgt);
  437. dobj->sgt = NULL;
  438. DRM_ERROR("dma_buf_map_attachment() error: %d\n", ret);
  439. return ret;
  440. }
  441. if (dobj->sgt->nents > 1) {
  442. DRM_ERROR("dma_buf_map_attachment() returned an (unsupported) scattered list\n");
  443. return -EINVAL;
  444. }
  445. if (sg_dma_len(dobj->sgt->sgl) < dobj->obj.size) {
  446. DRM_ERROR("dma_buf_map_attachment() returned a small buffer\n");
  447. return -EINVAL;
  448. }
  449. dobj->dev_addr = sg_dma_address(dobj->sgt->sgl);
  450. dobj->mapped = true;
  451. return 0;
  452. }