gem.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * NVIDIA Tegra DRM GEM helper functions
  4. *
  5. * Copyright (C) 2012 Sascha Hauer, Pengutronix
  6. * Copyright (C) 2013-2015 NVIDIA CORPORATION, All rights reserved.
  7. *
  8. * Based on the GEM/CMA helpers
  9. *
  10. * Copyright (c) 2011 Samsung Electronics Co., Ltd.
  11. */
  12. #include <linux/dma-buf.h>
  13. #include <linux/iommu.h>
  14. #include <linux/module.h>
  15. #include <linux/vmalloc.h>
  16. #include <drm/drm_drv.h>
  17. #include <drm/drm_prime.h>
  18. #include <drm/tegra_drm.h>
  19. #include "drm.h"
  20. #include "gem.h"
  21. MODULE_IMPORT_NS(DMA_BUF);
  22. static unsigned int sg_dma_count_chunks(struct scatterlist *sgl, unsigned int nents)
  23. {
  24. dma_addr_t next = ~(dma_addr_t)0;
  25. unsigned int count = 0, i;
  26. struct scatterlist *s;
  27. for_each_sg(sgl, s, nents, i) {
  28. /* sg_dma_address(s) is only valid for entries that have sg_dma_len(s) != 0. */
  29. if (!sg_dma_len(s))
  30. continue;
  31. if (sg_dma_address(s) != next) {
  32. next = sg_dma_address(s) + sg_dma_len(s);
  33. count++;
  34. }
  35. }
  36. return count;
  37. }
  38. static inline unsigned int sgt_dma_count_chunks(struct sg_table *sgt)
  39. {
  40. return sg_dma_count_chunks(sgt->sgl, sgt->nents);
  41. }
  42. static void tegra_bo_put(struct host1x_bo *bo)
  43. {
  44. struct tegra_bo *obj = host1x_to_tegra_bo(bo);
  45. drm_gem_object_put(&obj->gem);
  46. }
  47. static struct host1x_bo_mapping *tegra_bo_pin(struct device *dev, struct host1x_bo *bo,
  48. enum dma_data_direction direction)
  49. {
  50. struct tegra_bo *obj = host1x_to_tegra_bo(bo);
  51. struct drm_gem_object *gem = &obj->gem;
  52. struct host1x_bo_mapping *map;
  53. int err;
  54. map = kzalloc(sizeof(*map), GFP_KERNEL);
  55. if (!map)
  56. return ERR_PTR(-ENOMEM);
  57. kref_init(&map->ref);
  58. map->bo = host1x_bo_get(bo);
  59. map->direction = direction;
  60. map->dev = dev;
  61. /*
  62. * Imported buffers need special treatment to satisfy the semantics of DMA-BUF.
  63. */
  64. if (gem->import_attach) {
  65. struct dma_buf *buf = gem->import_attach->dmabuf;
  66. map->attach = dma_buf_attach(buf, dev);
  67. if (IS_ERR(map->attach)) {
  68. err = PTR_ERR(map->attach);
  69. goto free;
  70. }
  71. map->sgt = dma_buf_map_attachment_unlocked(map->attach, direction);
  72. if (IS_ERR(map->sgt)) {
  73. dma_buf_detach(buf, map->attach);
  74. err = PTR_ERR(map->sgt);
  75. map->sgt = NULL;
  76. goto free;
  77. }
  78. err = sgt_dma_count_chunks(map->sgt);
  79. map->size = gem->size;
  80. goto out;
  81. }
  82. /*
  83. * If we don't have a mapping for this buffer yet, return an SG table
  84. * so that host1x can do the mapping for us via the DMA API.
  85. */
  86. map->sgt = kzalloc(sizeof(*map->sgt), GFP_KERNEL);
  87. if (!map->sgt) {
  88. err = -ENOMEM;
  89. goto free;
  90. }
  91. if (obj->pages) {
  92. /*
  93. * If the buffer object was allocated from the explicit IOMMU
  94. * API code paths, construct an SG table from the pages.
  95. */
  96. err = sg_alloc_table_from_pages(map->sgt, obj->pages, obj->num_pages, 0, gem->size,
  97. GFP_KERNEL);
  98. if (err < 0)
  99. goto free;
  100. } else {
  101. /*
  102. * If the buffer object had no pages allocated and if it was
  103. * not imported, it had to be allocated with the DMA API, so
  104. * the DMA API helper can be used.
  105. */
  106. err = dma_get_sgtable(dev, map->sgt, obj->vaddr, obj->iova, gem->size);
  107. if (err < 0)
  108. goto free;
  109. }
  110. err = dma_map_sgtable(dev, map->sgt, direction, 0);
  111. if (err)
  112. goto free_sgt;
  113. out:
  114. /*
  115. * If we've manually mapped the buffer object through the IOMMU, make sure to return the
  116. * existing IOVA address of our mapping.
  117. */
  118. if (!obj->mm) {
  119. map->phys = sg_dma_address(map->sgt->sgl);
  120. map->chunks = err;
  121. } else {
  122. map->phys = obj->iova;
  123. map->chunks = 1;
  124. }
  125. map->size = gem->size;
  126. return map;
  127. free_sgt:
  128. sg_free_table(map->sgt);
  129. free:
  130. kfree(map->sgt);
  131. kfree(map);
  132. return ERR_PTR(err);
  133. }
  134. static void tegra_bo_unpin(struct host1x_bo_mapping *map)
  135. {
  136. if (map->attach) {
  137. dma_buf_unmap_attachment_unlocked(map->attach, map->sgt,
  138. map->direction);
  139. dma_buf_detach(map->attach->dmabuf, map->attach);
  140. } else {
  141. dma_unmap_sgtable(map->dev, map->sgt, map->direction, 0);
  142. sg_free_table(map->sgt);
  143. kfree(map->sgt);
  144. }
  145. host1x_bo_put(map->bo);
  146. kfree(map);
  147. }
  148. static void *tegra_bo_mmap(struct host1x_bo *bo)
  149. {
  150. struct tegra_bo *obj = host1x_to_tegra_bo(bo);
  151. struct iosys_map map = { 0 };
  152. void *vaddr;
  153. int ret;
  154. if (obj->vaddr)
  155. return obj->vaddr;
  156. if (obj->gem.import_attach) {
  157. ret = dma_buf_vmap_unlocked(obj->gem.import_attach->dmabuf, &map);
  158. if (ret < 0)
  159. return ERR_PTR(ret);
  160. return map.vaddr;
  161. }
  162. vaddr = vmap(obj->pages, obj->num_pages, VM_MAP,
  163. pgprot_writecombine(PAGE_KERNEL));
  164. if (!vaddr)
  165. return ERR_PTR(-ENOMEM);
  166. return vaddr;
  167. }
  168. static void tegra_bo_munmap(struct host1x_bo *bo, void *addr)
  169. {
  170. struct tegra_bo *obj = host1x_to_tegra_bo(bo);
  171. struct iosys_map map = IOSYS_MAP_INIT_VADDR(addr);
  172. if (obj->vaddr)
  173. return;
  174. if (obj->gem.import_attach)
  175. return dma_buf_vunmap_unlocked(obj->gem.import_attach->dmabuf, &map);
  176. vunmap(addr);
  177. }
  178. static struct host1x_bo *tegra_bo_get(struct host1x_bo *bo)
  179. {
  180. struct tegra_bo *obj = host1x_to_tegra_bo(bo);
  181. drm_gem_object_get(&obj->gem);
  182. return bo;
  183. }
  184. static const struct host1x_bo_ops tegra_bo_ops = {
  185. .get = tegra_bo_get,
  186. .put = tegra_bo_put,
  187. .pin = tegra_bo_pin,
  188. .unpin = tegra_bo_unpin,
  189. .mmap = tegra_bo_mmap,
  190. .munmap = tegra_bo_munmap,
  191. };
  192. static int tegra_bo_iommu_map(struct tegra_drm *tegra, struct tegra_bo *bo)
  193. {
  194. int prot = IOMMU_READ | IOMMU_WRITE;
  195. int err;
  196. if (bo->mm)
  197. return -EBUSY;
  198. bo->mm = kzalloc(sizeof(*bo->mm), GFP_KERNEL);
  199. if (!bo->mm)
  200. return -ENOMEM;
  201. mutex_lock(&tegra->mm_lock);
  202. err = drm_mm_insert_node_generic(&tegra->mm,
  203. bo->mm, bo->gem.size, PAGE_SIZE, 0, 0);
  204. if (err < 0) {
  205. dev_err(tegra->drm->dev, "out of I/O virtual memory: %d\n",
  206. err);
  207. goto unlock;
  208. }
  209. bo->iova = bo->mm->start;
  210. bo->size = iommu_map_sgtable(tegra->domain, bo->iova, bo->sgt, prot);
  211. if (!bo->size) {
  212. dev_err(tegra->drm->dev, "failed to map buffer\n");
  213. err = -ENOMEM;
  214. goto remove;
  215. }
  216. mutex_unlock(&tegra->mm_lock);
  217. return 0;
  218. remove:
  219. drm_mm_remove_node(bo->mm);
  220. unlock:
  221. mutex_unlock(&tegra->mm_lock);
  222. kfree(bo->mm);
  223. return err;
  224. }
  225. static int tegra_bo_iommu_unmap(struct tegra_drm *tegra, struct tegra_bo *bo)
  226. {
  227. if (!bo->mm)
  228. return 0;
  229. mutex_lock(&tegra->mm_lock);
  230. iommu_unmap(tegra->domain, bo->iova, bo->size);
  231. drm_mm_remove_node(bo->mm);
  232. mutex_unlock(&tegra->mm_lock);
  233. kfree(bo->mm);
  234. return 0;
  235. }
  236. static const struct drm_gem_object_funcs tegra_gem_object_funcs = {
  237. .free = tegra_bo_free_object,
  238. .export = tegra_gem_prime_export,
  239. .vm_ops = &tegra_bo_vm_ops,
  240. };
  241. static struct tegra_bo *tegra_bo_alloc_object(struct drm_device *drm,
  242. size_t size)
  243. {
  244. struct tegra_bo *bo;
  245. int err;
  246. bo = kzalloc(sizeof(*bo), GFP_KERNEL);
  247. if (!bo)
  248. return ERR_PTR(-ENOMEM);
  249. bo->gem.funcs = &tegra_gem_object_funcs;
  250. host1x_bo_init(&bo->base, &tegra_bo_ops);
  251. size = round_up(size, PAGE_SIZE);
  252. err = drm_gem_object_init(drm, &bo->gem, size);
  253. if (err < 0)
  254. goto free;
  255. err = drm_gem_create_mmap_offset(&bo->gem);
  256. if (err < 0)
  257. goto release;
  258. return bo;
  259. release:
  260. drm_gem_object_release(&bo->gem);
  261. free:
  262. kfree(bo);
  263. return ERR_PTR(err);
  264. }
  265. static void tegra_bo_free(struct drm_device *drm, struct tegra_bo *bo)
  266. {
  267. if (bo->pages) {
  268. dma_unmap_sgtable(drm->dev, bo->sgt, DMA_FROM_DEVICE, 0);
  269. drm_gem_put_pages(&bo->gem, bo->pages, true, true);
  270. sg_free_table(bo->sgt);
  271. kfree(bo->sgt);
  272. } else if (bo->vaddr) {
  273. dma_free_wc(drm->dev, bo->gem.size, bo->vaddr, bo->iova);
  274. }
  275. }
  276. static int tegra_bo_get_pages(struct drm_device *drm, struct tegra_bo *bo)
  277. {
  278. int err;
  279. bo->pages = drm_gem_get_pages(&bo->gem);
  280. if (IS_ERR(bo->pages))
  281. return PTR_ERR(bo->pages);
  282. bo->num_pages = bo->gem.size >> PAGE_SHIFT;
  283. bo->sgt = drm_prime_pages_to_sg(bo->gem.dev, bo->pages, bo->num_pages);
  284. if (IS_ERR(bo->sgt)) {
  285. err = PTR_ERR(bo->sgt);
  286. goto put_pages;
  287. }
  288. err = dma_map_sgtable(drm->dev, bo->sgt, DMA_FROM_DEVICE, 0);
  289. if (err)
  290. goto free_sgt;
  291. return 0;
  292. free_sgt:
  293. sg_free_table(bo->sgt);
  294. kfree(bo->sgt);
  295. put_pages:
  296. drm_gem_put_pages(&bo->gem, bo->pages, false, false);
  297. return err;
  298. }
  299. static int tegra_bo_alloc(struct drm_device *drm, struct tegra_bo *bo)
  300. {
  301. struct tegra_drm *tegra = drm->dev_private;
  302. int err;
  303. if (tegra->domain) {
  304. err = tegra_bo_get_pages(drm, bo);
  305. if (err < 0)
  306. return err;
  307. err = tegra_bo_iommu_map(tegra, bo);
  308. if (err < 0) {
  309. tegra_bo_free(drm, bo);
  310. return err;
  311. }
  312. } else {
  313. size_t size = bo->gem.size;
  314. bo->vaddr = dma_alloc_wc(drm->dev, size, &bo->iova,
  315. GFP_KERNEL | __GFP_NOWARN);
  316. if (!bo->vaddr) {
  317. dev_err(drm->dev,
  318. "failed to allocate buffer of size %zu\n",
  319. size);
  320. return -ENOMEM;
  321. }
  322. }
  323. return 0;
  324. }
  325. struct tegra_bo *tegra_bo_create(struct drm_device *drm, size_t size,
  326. unsigned long flags)
  327. {
  328. struct tegra_bo *bo;
  329. int err;
  330. bo = tegra_bo_alloc_object(drm, size);
  331. if (IS_ERR(bo))
  332. return bo;
  333. err = tegra_bo_alloc(drm, bo);
  334. if (err < 0)
  335. goto release;
  336. if (flags & DRM_TEGRA_GEM_CREATE_TILED)
  337. bo->tiling.mode = TEGRA_BO_TILING_MODE_TILED;
  338. if (flags & DRM_TEGRA_GEM_CREATE_BOTTOM_UP)
  339. bo->flags |= TEGRA_BO_BOTTOM_UP;
  340. return bo;
  341. release:
  342. drm_gem_object_release(&bo->gem);
  343. kfree(bo);
  344. return ERR_PTR(err);
  345. }
  346. struct tegra_bo *tegra_bo_create_with_handle(struct drm_file *file,
  347. struct drm_device *drm,
  348. size_t size,
  349. unsigned long flags,
  350. u32 *handle)
  351. {
  352. struct tegra_bo *bo;
  353. int err;
  354. bo = tegra_bo_create(drm, size, flags);
  355. if (IS_ERR(bo))
  356. return bo;
  357. err = drm_gem_handle_create(file, &bo->gem, handle);
  358. if (err) {
  359. tegra_bo_free_object(&bo->gem);
  360. return ERR_PTR(err);
  361. }
  362. drm_gem_object_put(&bo->gem);
  363. return bo;
  364. }
  365. static struct tegra_bo *tegra_bo_import(struct drm_device *drm,
  366. struct dma_buf *buf)
  367. {
  368. struct tegra_drm *tegra = drm->dev_private;
  369. struct dma_buf_attachment *attach;
  370. struct tegra_bo *bo;
  371. int err;
  372. bo = tegra_bo_alloc_object(drm, buf->size);
  373. if (IS_ERR(bo))
  374. return bo;
  375. attach = dma_buf_attach(buf, drm->dev);
  376. if (IS_ERR(attach)) {
  377. err = PTR_ERR(attach);
  378. goto free;
  379. }
  380. get_dma_buf(buf);
  381. bo->sgt = dma_buf_map_attachment_unlocked(attach, DMA_TO_DEVICE);
  382. if (IS_ERR(bo->sgt)) {
  383. err = PTR_ERR(bo->sgt);
  384. goto detach;
  385. }
  386. if (tegra->domain) {
  387. err = tegra_bo_iommu_map(tegra, bo);
  388. if (err < 0)
  389. goto detach;
  390. }
  391. bo->gem.import_attach = attach;
  392. return bo;
  393. detach:
  394. if (!IS_ERR_OR_NULL(bo->sgt))
  395. dma_buf_unmap_attachment_unlocked(attach, bo->sgt, DMA_TO_DEVICE);
  396. dma_buf_detach(buf, attach);
  397. dma_buf_put(buf);
  398. free:
  399. drm_gem_object_release(&bo->gem);
  400. kfree(bo);
  401. return ERR_PTR(err);
  402. }
  403. void tegra_bo_free_object(struct drm_gem_object *gem)
  404. {
  405. struct tegra_drm *tegra = gem->dev->dev_private;
  406. struct host1x_bo_mapping *mapping, *tmp;
  407. struct tegra_bo *bo = to_tegra_bo(gem);
  408. /* remove all mappings of this buffer object from any caches */
  409. list_for_each_entry_safe(mapping, tmp, &bo->base.mappings, list) {
  410. if (mapping->cache)
  411. host1x_bo_unpin(mapping);
  412. else
  413. dev_err(gem->dev->dev, "mapping %p stale for device %s\n", mapping,
  414. dev_name(mapping->dev));
  415. }
  416. if (tegra->domain)
  417. tegra_bo_iommu_unmap(tegra, bo);
  418. if (gem->import_attach) {
  419. dma_buf_unmap_attachment_unlocked(gem->import_attach, bo->sgt,
  420. DMA_TO_DEVICE);
  421. drm_prime_gem_destroy(gem, NULL);
  422. } else {
  423. tegra_bo_free(gem->dev, bo);
  424. }
  425. drm_gem_object_release(gem);
  426. kfree(bo);
  427. }
  428. int tegra_bo_dumb_create(struct drm_file *file, struct drm_device *drm,
  429. struct drm_mode_create_dumb *args)
  430. {
  431. unsigned int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
  432. struct tegra_drm *tegra = drm->dev_private;
  433. struct tegra_bo *bo;
  434. args->pitch = round_up(min_pitch, tegra->pitch_align);
  435. args->size = args->pitch * args->height;
  436. bo = tegra_bo_create_with_handle(file, drm, args->size, 0,
  437. &args->handle);
  438. if (IS_ERR(bo))
  439. return PTR_ERR(bo);
  440. return 0;
  441. }
  442. static vm_fault_t tegra_bo_fault(struct vm_fault *vmf)
  443. {
  444. struct vm_area_struct *vma = vmf->vma;
  445. struct drm_gem_object *gem = vma->vm_private_data;
  446. struct tegra_bo *bo = to_tegra_bo(gem);
  447. struct page *page;
  448. pgoff_t offset;
  449. if (!bo->pages)
  450. return VM_FAULT_SIGBUS;
  451. offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
  452. page = bo->pages[offset];
  453. return vmf_insert_page(vma, vmf->address, page);
  454. }
  455. const struct vm_operations_struct tegra_bo_vm_ops = {
  456. .fault = tegra_bo_fault,
  457. .open = drm_gem_vm_open,
  458. .close = drm_gem_vm_close,
  459. };
  460. int __tegra_gem_mmap(struct drm_gem_object *gem, struct vm_area_struct *vma)
  461. {
  462. struct tegra_bo *bo = to_tegra_bo(gem);
  463. if (!bo->pages) {
  464. unsigned long vm_pgoff = vma->vm_pgoff;
  465. int err;
  466. /*
  467. * Clear the VM_PFNMAP flag that was set by drm_gem_mmap(),
  468. * and set the vm_pgoff (used as a fake buffer offset by DRM)
  469. * to 0 as we want to map the whole buffer.
  470. */
  471. vm_flags_clear(vma, VM_PFNMAP);
  472. vma->vm_pgoff = 0;
  473. err = dma_mmap_wc(gem->dev->dev, vma, bo->vaddr, bo->iova,
  474. gem->size);
  475. if (err < 0) {
  476. drm_gem_vm_close(vma);
  477. return err;
  478. }
  479. vma->vm_pgoff = vm_pgoff;
  480. } else {
  481. pgprot_t prot = vm_get_page_prot(vma->vm_flags);
  482. vm_flags_mod(vma, VM_MIXEDMAP, VM_PFNMAP);
  483. vma->vm_page_prot = pgprot_writecombine(prot);
  484. }
  485. return 0;
  486. }
  487. int tegra_drm_mmap(struct file *file, struct vm_area_struct *vma)
  488. {
  489. struct drm_gem_object *gem;
  490. int err;
  491. err = drm_gem_mmap(file, vma);
  492. if (err < 0)
  493. return err;
  494. gem = vma->vm_private_data;
  495. return __tegra_gem_mmap(gem, vma);
  496. }
  497. static struct sg_table *
  498. tegra_gem_prime_map_dma_buf(struct dma_buf_attachment *attach,
  499. enum dma_data_direction dir)
  500. {
  501. struct drm_gem_object *gem = attach->dmabuf->priv;
  502. struct tegra_bo *bo = to_tegra_bo(gem);
  503. struct sg_table *sgt;
  504. sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
  505. if (!sgt)
  506. return NULL;
  507. if (bo->pages) {
  508. if (sg_alloc_table_from_pages(sgt, bo->pages, bo->num_pages,
  509. 0, gem->size, GFP_KERNEL) < 0)
  510. goto free;
  511. } else {
  512. if (dma_get_sgtable(attach->dev, sgt, bo->vaddr, bo->iova,
  513. gem->size) < 0)
  514. goto free;
  515. }
  516. if (dma_map_sgtable(attach->dev, sgt, dir, 0))
  517. goto free;
  518. return sgt;
  519. free:
  520. sg_free_table(sgt);
  521. kfree(sgt);
  522. return NULL;
  523. }
  524. static void tegra_gem_prime_unmap_dma_buf(struct dma_buf_attachment *attach,
  525. struct sg_table *sgt,
  526. enum dma_data_direction dir)
  527. {
  528. struct drm_gem_object *gem = attach->dmabuf->priv;
  529. struct tegra_bo *bo = to_tegra_bo(gem);
  530. if (bo->pages)
  531. dma_unmap_sgtable(attach->dev, sgt, dir, 0);
  532. sg_free_table(sgt);
  533. kfree(sgt);
  534. }
  535. static void tegra_gem_prime_release(struct dma_buf *buf)
  536. {
  537. drm_gem_dmabuf_release(buf);
  538. }
  539. static int tegra_gem_prime_begin_cpu_access(struct dma_buf *buf,
  540. enum dma_data_direction direction)
  541. {
  542. struct drm_gem_object *gem = buf->priv;
  543. struct tegra_bo *bo = to_tegra_bo(gem);
  544. struct drm_device *drm = gem->dev;
  545. if (bo->pages)
  546. dma_sync_sgtable_for_cpu(drm->dev, bo->sgt, DMA_FROM_DEVICE);
  547. return 0;
  548. }
  549. static int tegra_gem_prime_end_cpu_access(struct dma_buf *buf,
  550. enum dma_data_direction direction)
  551. {
  552. struct drm_gem_object *gem = buf->priv;
  553. struct tegra_bo *bo = to_tegra_bo(gem);
  554. struct drm_device *drm = gem->dev;
  555. if (bo->pages)
  556. dma_sync_sgtable_for_device(drm->dev, bo->sgt, DMA_TO_DEVICE);
  557. return 0;
  558. }
  559. static int tegra_gem_prime_mmap(struct dma_buf *buf, struct vm_area_struct *vma)
  560. {
  561. struct drm_gem_object *gem = buf->priv;
  562. int err;
  563. err = drm_gem_mmap_obj(gem, gem->size, vma);
  564. if (err < 0)
  565. return err;
  566. return __tegra_gem_mmap(gem, vma);
  567. }
  568. static int tegra_gem_prime_vmap(struct dma_buf *buf, struct iosys_map *map)
  569. {
  570. struct drm_gem_object *gem = buf->priv;
  571. struct tegra_bo *bo = to_tegra_bo(gem);
  572. void *vaddr;
  573. vaddr = tegra_bo_mmap(&bo->base);
  574. if (IS_ERR(vaddr))
  575. return PTR_ERR(vaddr);
  576. iosys_map_set_vaddr(map, vaddr);
  577. return 0;
  578. }
  579. static void tegra_gem_prime_vunmap(struct dma_buf *buf, struct iosys_map *map)
  580. {
  581. struct drm_gem_object *gem = buf->priv;
  582. struct tegra_bo *bo = to_tegra_bo(gem);
  583. tegra_bo_munmap(&bo->base, map->vaddr);
  584. }
  585. static const struct dma_buf_ops tegra_gem_prime_dmabuf_ops = {
  586. .map_dma_buf = tegra_gem_prime_map_dma_buf,
  587. .unmap_dma_buf = tegra_gem_prime_unmap_dma_buf,
  588. .release = tegra_gem_prime_release,
  589. .begin_cpu_access = tegra_gem_prime_begin_cpu_access,
  590. .end_cpu_access = tegra_gem_prime_end_cpu_access,
  591. .mmap = tegra_gem_prime_mmap,
  592. .vmap = tegra_gem_prime_vmap,
  593. .vunmap = tegra_gem_prime_vunmap,
  594. };
  595. struct dma_buf *tegra_gem_prime_export(struct drm_gem_object *gem,
  596. int flags)
  597. {
  598. DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
  599. exp_info.exp_name = KBUILD_MODNAME;
  600. exp_info.owner = gem->dev->driver->fops->owner;
  601. exp_info.ops = &tegra_gem_prime_dmabuf_ops;
  602. exp_info.size = gem->size;
  603. exp_info.flags = flags;
  604. exp_info.priv = gem;
  605. return drm_gem_dmabuf_export(gem->dev, &exp_info);
  606. }
  607. struct drm_gem_object *tegra_gem_prime_import(struct drm_device *drm,
  608. struct dma_buf *buf)
  609. {
  610. struct tegra_bo *bo;
  611. if (buf->ops == &tegra_gem_prime_dmabuf_ops) {
  612. struct drm_gem_object *gem = buf->priv;
  613. if (gem->dev == drm) {
  614. drm_gem_object_get(gem);
  615. return gem;
  616. }
  617. }
  618. bo = tegra_bo_import(drm, buf);
  619. if (IS_ERR(bo))
  620. return ERR_CAST(bo);
  621. return &bo->gem;
  622. }
  623. struct host1x_bo *tegra_gem_lookup(struct drm_file *file, u32 handle)
  624. {
  625. struct drm_gem_object *gem;
  626. struct tegra_bo *bo;
  627. gem = drm_gem_object_lookup(file, handle);
  628. if (!gem)
  629. return NULL;
  630. bo = to_tegra_bo(gem);
  631. return &bo->base;
  632. }