drm_gem_shmem_helper.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright 2018 Noralf Trønnes
  4. */
  5. #include <linux/dma-buf.h>
  6. #include <linux/export.h>
  7. #include <linux/module.h>
  8. #include <linux/mutex.h>
  9. #include <linux/shmem_fs.h>
  10. #include <linux/slab.h>
  11. #include <linux/vmalloc.h>
  12. #ifdef CONFIG_X86
  13. #include <asm/set_memory.h>
  14. #endif
  15. #include <drm/drm.h>
  16. #include <drm/drm_device.h>
  17. #include <drm/drm_drv.h>
  18. #include <drm/drm_gem_shmem_helper.h>
  19. #include <drm/drm_prime.h>
  20. #include <drm/drm_print.h>
  21. MODULE_IMPORT_NS(DMA_BUF);
  22. /**
  23. * DOC: overview
  24. *
  25. * This library provides helpers for GEM objects backed by shmem buffers
  26. * allocated using anonymous pageable memory.
  27. *
  28. * Functions that operate on the GEM object receive struct &drm_gem_shmem_object.
  29. * For GEM callback helpers in struct &drm_gem_object functions, see likewise
  30. * named functions with an _object_ infix (e.g., drm_gem_shmem_object_vmap() wraps
  31. * drm_gem_shmem_vmap()). These helpers perform the necessary type conversion.
  32. */
  33. static const struct drm_gem_object_funcs drm_gem_shmem_funcs = {
  34. .free = drm_gem_shmem_object_free,
  35. .print_info = drm_gem_shmem_object_print_info,
  36. .pin = drm_gem_shmem_object_pin,
  37. .unpin = drm_gem_shmem_object_unpin,
  38. .get_sg_table = drm_gem_shmem_object_get_sg_table,
  39. .vmap = drm_gem_shmem_object_vmap,
  40. .vunmap = drm_gem_shmem_object_vunmap,
  41. .mmap = drm_gem_shmem_object_mmap,
  42. .vm_ops = &drm_gem_shmem_vm_ops,
  43. };
  44. static struct drm_gem_shmem_object *
  45. __drm_gem_shmem_create(struct drm_device *dev, size_t size, bool private)
  46. {
  47. struct drm_gem_shmem_object *shmem;
  48. struct drm_gem_object *obj;
  49. int ret = 0;
  50. size = PAGE_ALIGN(size);
  51. if (dev->driver->gem_create_object) {
  52. obj = dev->driver->gem_create_object(dev, size);
  53. if (IS_ERR(obj))
  54. return ERR_CAST(obj);
  55. shmem = to_drm_gem_shmem_obj(obj);
  56. } else {
  57. shmem = kzalloc(sizeof(*shmem), GFP_KERNEL);
  58. if (!shmem)
  59. return ERR_PTR(-ENOMEM);
  60. obj = &shmem->base;
  61. }
  62. if (!obj->funcs)
  63. obj->funcs = &drm_gem_shmem_funcs;
  64. if (private) {
  65. drm_gem_private_object_init(dev, obj, size);
  66. shmem->map_wc = false; /* dma-buf mappings use always writecombine */
  67. } else {
  68. ret = drm_gem_object_init(dev, obj, size);
  69. }
  70. if (ret) {
  71. drm_gem_private_object_fini(obj);
  72. goto err_free;
  73. }
  74. ret = drm_gem_create_mmap_offset(obj);
  75. if (ret)
  76. goto err_release;
  77. INIT_LIST_HEAD(&shmem->madv_list);
  78. if (!private) {
  79. /*
  80. * Our buffers are kept pinned, so allocating them
  81. * from the MOVABLE zone is a really bad idea, and
  82. * conflicts with CMA. See comments above new_inode()
  83. * why this is required _and_ expected if you're
  84. * going to pin these pages.
  85. */
  86. mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER |
  87. __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
  88. }
  89. return shmem;
  90. err_release:
  91. drm_gem_object_release(obj);
  92. err_free:
  93. kfree(obj);
  94. return ERR_PTR(ret);
  95. }
  96. /**
  97. * drm_gem_shmem_create - Allocate an object with the given size
  98. * @dev: DRM device
  99. * @size: Size of the object to allocate
  100. *
  101. * This function creates a shmem GEM object.
  102. *
  103. * Returns:
  104. * A struct drm_gem_shmem_object * on success or an ERR_PTR()-encoded negative
  105. * error code on failure.
  106. */
  107. struct drm_gem_shmem_object *drm_gem_shmem_create(struct drm_device *dev, size_t size)
  108. {
  109. return __drm_gem_shmem_create(dev, size, false);
  110. }
  111. EXPORT_SYMBOL_GPL(drm_gem_shmem_create);
  112. /**
  113. * drm_gem_shmem_free - Free resources associated with a shmem GEM object
  114. * @shmem: shmem GEM object to free
  115. *
  116. * This function cleans up the GEM object state and frees the memory used to
  117. * store the object itself.
  118. */
  119. void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem)
  120. {
  121. struct drm_gem_object *obj = &shmem->base;
  122. if (obj->import_attach) {
  123. drm_prime_gem_destroy(obj, shmem->sgt);
  124. } else {
  125. dma_resv_lock(shmem->base.resv, NULL);
  126. drm_WARN_ON(obj->dev, shmem->vmap_use_count);
  127. if (shmem->sgt) {
  128. dma_unmap_sgtable(obj->dev->dev, shmem->sgt,
  129. DMA_BIDIRECTIONAL, 0);
  130. sg_free_table(shmem->sgt);
  131. kfree(shmem->sgt);
  132. }
  133. if (shmem->pages)
  134. drm_gem_shmem_put_pages(shmem);
  135. drm_WARN_ON(obj->dev, shmem->pages_use_count);
  136. dma_resv_unlock(shmem->base.resv);
  137. }
  138. drm_gem_object_release(obj);
  139. kfree(shmem);
  140. }
  141. EXPORT_SYMBOL_GPL(drm_gem_shmem_free);
  142. static int drm_gem_shmem_get_pages(struct drm_gem_shmem_object *shmem)
  143. {
  144. struct drm_gem_object *obj = &shmem->base;
  145. struct page **pages;
  146. dma_resv_assert_held(shmem->base.resv);
  147. if (shmem->pages_use_count++ > 0)
  148. return 0;
  149. pages = drm_gem_get_pages(obj);
  150. if (IS_ERR(pages)) {
  151. drm_dbg_kms(obj->dev, "Failed to get pages (%ld)\n",
  152. PTR_ERR(pages));
  153. shmem->pages_use_count = 0;
  154. return PTR_ERR(pages);
  155. }
  156. /*
  157. * TODO: Allocating WC pages which are correctly flushed is only
  158. * supported on x86. Ideal solution would be a GFP_WC flag, which also
  159. * ttm_pool.c could use.
  160. */
  161. #ifdef CONFIG_X86
  162. if (shmem->map_wc)
  163. set_pages_array_wc(pages, obj->size >> PAGE_SHIFT);
  164. #endif
  165. shmem->pages = pages;
  166. return 0;
  167. }
  168. /*
  169. * drm_gem_shmem_put_pages - Decrease use count on the backing pages for a shmem GEM object
  170. * @shmem: shmem GEM object
  171. *
  172. * This function decreases the use count and puts the backing pages when use drops to zero.
  173. */
  174. void drm_gem_shmem_put_pages(struct drm_gem_shmem_object *shmem)
  175. {
  176. struct drm_gem_object *obj = &shmem->base;
  177. dma_resv_assert_held(shmem->base.resv);
  178. if (drm_WARN_ON_ONCE(obj->dev, !shmem->pages_use_count))
  179. return;
  180. if (--shmem->pages_use_count > 0)
  181. return;
  182. #ifdef CONFIG_X86
  183. if (shmem->map_wc)
  184. set_pages_array_wb(shmem->pages, obj->size >> PAGE_SHIFT);
  185. #endif
  186. drm_gem_put_pages(obj, shmem->pages,
  187. shmem->pages_mark_dirty_on_put,
  188. shmem->pages_mark_accessed_on_put);
  189. shmem->pages = NULL;
  190. }
  191. EXPORT_SYMBOL(drm_gem_shmem_put_pages);
  192. int drm_gem_shmem_pin_locked(struct drm_gem_shmem_object *shmem)
  193. {
  194. int ret;
  195. dma_resv_assert_held(shmem->base.resv);
  196. drm_WARN_ON(shmem->base.dev, shmem->base.import_attach);
  197. ret = drm_gem_shmem_get_pages(shmem);
  198. return ret;
  199. }
  200. EXPORT_SYMBOL(drm_gem_shmem_pin_locked);
  201. void drm_gem_shmem_unpin_locked(struct drm_gem_shmem_object *shmem)
  202. {
  203. dma_resv_assert_held(shmem->base.resv);
  204. drm_gem_shmem_put_pages(shmem);
  205. }
  206. EXPORT_SYMBOL(drm_gem_shmem_unpin_locked);
  207. /**
  208. * drm_gem_shmem_pin - Pin backing pages for a shmem GEM object
  209. * @shmem: shmem GEM object
  210. *
  211. * This function makes sure the backing pages are pinned in memory while the
  212. * buffer is exported.
  213. *
  214. * Returns:
  215. * 0 on success or a negative error code on failure.
  216. */
  217. int drm_gem_shmem_pin(struct drm_gem_shmem_object *shmem)
  218. {
  219. struct drm_gem_object *obj = &shmem->base;
  220. int ret;
  221. drm_WARN_ON(obj->dev, obj->import_attach);
  222. ret = dma_resv_lock_interruptible(shmem->base.resv, NULL);
  223. if (ret)
  224. return ret;
  225. ret = drm_gem_shmem_pin_locked(shmem);
  226. dma_resv_unlock(shmem->base.resv);
  227. return ret;
  228. }
  229. EXPORT_SYMBOL(drm_gem_shmem_pin);
  230. /**
  231. * drm_gem_shmem_unpin - Unpin backing pages for a shmem GEM object
  232. * @shmem: shmem GEM object
  233. *
  234. * This function removes the requirement that the backing pages are pinned in
  235. * memory.
  236. */
  237. void drm_gem_shmem_unpin(struct drm_gem_shmem_object *shmem)
  238. {
  239. struct drm_gem_object *obj = &shmem->base;
  240. drm_WARN_ON(obj->dev, obj->import_attach);
  241. dma_resv_lock(shmem->base.resv, NULL);
  242. drm_gem_shmem_unpin_locked(shmem);
  243. dma_resv_unlock(shmem->base.resv);
  244. }
  245. EXPORT_SYMBOL(drm_gem_shmem_unpin);
  246. /*
  247. * drm_gem_shmem_vmap - Create a virtual mapping for a shmem GEM object
  248. * @shmem: shmem GEM object
  249. * @map: Returns the kernel virtual address of the SHMEM GEM object's backing
  250. * store.
  251. *
  252. * This function makes sure that a contiguous kernel virtual address mapping
  253. * exists for the buffer backing the shmem GEM object. It hides the differences
  254. * between dma-buf imported and natively allocated objects.
  255. *
  256. * Acquired mappings should be cleaned up by calling drm_gem_shmem_vunmap().
  257. *
  258. * Returns:
  259. * 0 on success or a negative error code on failure.
  260. */
  261. int drm_gem_shmem_vmap(struct drm_gem_shmem_object *shmem,
  262. struct iosys_map *map)
  263. {
  264. struct drm_gem_object *obj = &shmem->base;
  265. int ret = 0;
  266. if (obj->import_attach) {
  267. ret = dma_buf_vmap(obj->import_attach->dmabuf, map);
  268. if (!ret) {
  269. if (drm_WARN_ON(obj->dev, map->is_iomem)) {
  270. dma_buf_vunmap(obj->import_attach->dmabuf, map);
  271. return -EIO;
  272. }
  273. }
  274. } else {
  275. pgprot_t prot = PAGE_KERNEL;
  276. dma_resv_assert_held(shmem->base.resv);
  277. if (shmem->vmap_use_count++ > 0) {
  278. iosys_map_set_vaddr(map, shmem->vaddr);
  279. return 0;
  280. }
  281. ret = drm_gem_shmem_get_pages(shmem);
  282. if (ret)
  283. goto err_zero_use;
  284. if (shmem->map_wc)
  285. prot = pgprot_writecombine(prot);
  286. shmem->vaddr = vmap(shmem->pages, obj->size >> PAGE_SHIFT,
  287. VM_MAP, prot);
  288. if (!shmem->vaddr)
  289. ret = -ENOMEM;
  290. else
  291. iosys_map_set_vaddr(map, shmem->vaddr);
  292. }
  293. if (ret) {
  294. drm_dbg_kms(obj->dev, "Failed to vmap pages, error %d\n", ret);
  295. goto err_put_pages;
  296. }
  297. return 0;
  298. err_put_pages:
  299. if (!obj->import_attach)
  300. drm_gem_shmem_put_pages(shmem);
  301. err_zero_use:
  302. shmem->vmap_use_count = 0;
  303. return ret;
  304. }
  305. EXPORT_SYMBOL(drm_gem_shmem_vmap);
  306. /*
  307. * drm_gem_shmem_vunmap - Unmap a virtual mapping for a shmem GEM object
  308. * @shmem: shmem GEM object
  309. * @map: Kernel virtual address where the SHMEM GEM object was mapped
  310. *
  311. * This function cleans up a kernel virtual address mapping acquired by
  312. * drm_gem_shmem_vmap(). The mapping is only removed when the use count drops to
  313. * zero.
  314. *
  315. * This function hides the differences between dma-buf imported and natively
  316. * allocated objects.
  317. */
  318. void drm_gem_shmem_vunmap(struct drm_gem_shmem_object *shmem,
  319. struct iosys_map *map)
  320. {
  321. struct drm_gem_object *obj = &shmem->base;
  322. if (obj->import_attach) {
  323. dma_buf_vunmap(obj->import_attach->dmabuf, map);
  324. } else {
  325. dma_resv_assert_held(shmem->base.resv);
  326. if (drm_WARN_ON_ONCE(obj->dev, !shmem->vmap_use_count))
  327. return;
  328. if (--shmem->vmap_use_count > 0)
  329. return;
  330. vunmap(shmem->vaddr);
  331. drm_gem_shmem_put_pages(shmem);
  332. }
  333. shmem->vaddr = NULL;
  334. }
  335. EXPORT_SYMBOL(drm_gem_shmem_vunmap);
  336. static int
  337. drm_gem_shmem_create_with_handle(struct drm_file *file_priv,
  338. struct drm_device *dev, size_t size,
  339. uint32_t *handle)
  340. {
  341. struct drm_gem_shmem_object *shmem;
  342. int ret;
  343. shmem = drm_gem_shmem_create(dev, size);
  344. if (IS_ERR(shmem))
  345. return PTR_ERR(shmem);
  346. /*
  347. * Allocate an id of idr table where the obj is registered
  348. * and handle has the id what user can see.
  349. */
  350. ret = drm_gem_handle_create(file_priv, &shmem->base, handle);
  351. /* drop reference from allocate - handle holds it now. */
  352. drm_gem_object_put(&shmem->base);
  353. return ret;
  354. }
  355. /* Update madvise status, returns true if not purged, else
  356. * false or -errno.
  357. */
  358. int drm_gem_shmem_madvise(struct drm_gem_shmem_object *shmem, int madv)
  359. {
  360. dma_resv_assert_held(shmem->base.resv);
  361. if (shmem->madv >= 0)
  362. shmem->madv = madv;
  363. madv = shmem->madv;
  364. return (madv >= 0);
  365. }
  366. EXPORT_SYMBOL(drm_gem_shmem_madvise);
  367. void drm_gem_shmem_purge(struct drm_gem_shmem_object *shmem)
  368. {
  369. struct drm_gem_object *obj = &shmem->base;
  370. struct drm_device *dev = obj->dev;
  371. dma_resv_assert_held(shmem->base.resv);
  372. drm_WARN_ON(obj->dev, !drm_gem_shmem_is_purgeable(shmem));
  373. dma_unmap_sgtable(dev->dev, shmem->sgt, DMA_BIDIRECTIONAL, 0);
  374. sg_free_table(shmem->sgt);
  375. kfree(shmem->sgt);
  376. shmem->sgt = NULL;
  377. drm_gem_shmem_put_pages(shmem);
  378. shmem->madv = -1;
  379. drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
  380. drm_gem_free_mmap_offset(obj);
  381. /* Our goal here is to return as much of the memory as
  382. * is possible back to the system as we are called from OOM.
  383. * To do this we must instruct the shmfs to drop all of its
  384. * backing pages, *now*.
  385. */
  386. shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1);
  387. invalidate_mapping_pages(file_inode(obj->filp)->i_mapping, 0, (loff_t)-1);
  388. }
  389. EXPORT_SYMBOL(drm_gem_shmem_purge);
  390. /**
  391. * drm_gem_shmem_dumb_create - Create a dumb shmem buffer object
  392. * @file: DRM file structure to create the dumb buffer for
  393. * @dev: DRM device
  394. * @args: IOCTL data
  395. *
  396. * This function computes the pitch of the dumb buffer and rounds it up to an
  397. * integer number of bytes per pixel. Drivers for hardware that doesn't have
  398. * any additional restrictions on the pitch can directly use this function as
  399. * their &drm_driver.dumb_create callback.
  400. *
  401. * For hardware with additional restrictions, drivers can adjust the fields
  402. * set up by userspace before calling into this function.
  403. *
  404. * Returns:
  405. * 0 on success or a negative error code on failure.
  406. */
  407. int drm_gem_shmem_dumb_create(struct drm_file *file, struct drm_device *dev,
  408. struct drm_mode_create_dumb *args)
  409. {
  410. u32 min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
  411. if (!args->pitch || !args->size) {
  412. args->pitch = min_pitch;
  413. args->size = PAGE_ALIGN(args->pitch * args->height);
  414. } else {
  415. /* ensure sane minimum values */
  416. if (args->pitch < min_pitch)
  417. args->pitch = min_pitch;
  418. if (args->size < args->pitch * args->height)
  419. args->size = PAGE_ALIGN(args->pitch * args->height);
  420. }
  421. return drm_gem_shmem_create_with_handle(file, dev, args->size, &args->handle);
  422. }
  423. EXPORT_SYMBOL_GPL(drm_gem_shmem_dumb_create);
  424. static vm_fault_t drm_gem_shmem_fault(struct vm_fault *vmf)
  425. {
  426. struct vm_area_struct *vma = vmf->vma;
  427. struct drm_gem_object *obj = vma->vm_private_data;
  428. struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
  429. loff_t num_pages = obj->size >> PAGE_SHIFT;
  430. vm_fault_t ret;
  431. struct page *page;
  432. pgoff_t page_offset;
  433. /* We don't use vmf->pgoff since that has the fake offset */
  434. page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
  435. dma_resv_lock(shmem->base.resv, NULL);
  436. if (page_offset >= num_pages ||
  437. drm_WARN_ON_ONCE(obj->dev, !shmem->pages) ||
  438. shmem->madv < 0) {
  439. ret = VM_FAULT_SIGBUS;
  440. } else {
  441. page = shmem->pages[page_offset];
  442. ret = vmf_insert_pfn(vma, vmf->address, page_to_pfn(page));
  443. }
  444. dma_resv_unlock(shmem->base.resv);
  445. return ret;
  446. }
  447. static void drm_gem_shmem_vm_open(struct vm_area_struct *vma)
  448. {
  449. struct drm_gem_object *obj = vma->vm_private_data;
  450. struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
  451. drm_WARN_ON(obj->dev, obj->import_attach);
  452. dma_resv_lock(shmem->base.resv, NULL);
  453. /*
  454. * We should have already pinned the pages when the buffer was first
  455. * mmap'd, vm_open() just grabs an additional reference for the new
  456. * mm the vma is getting copied into (ie. on fork()).
  457. */
  458. if (!drm_WARN_ON_ONCE(obj->dev, !shmem->pages_use_count))
  459. shmem->pages_use_count++;
  460. dma_resv_unlock(shmem->base.resv);
  461. drm_gem_vm_open(vma);
  462. }
  463. static void drm_gem_shmem_vm_close(struct vm_area_struct *vma)
  464. {
  465. struct drm_gem_object *obj = vma->vm_private_data;
  466. struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
  467. dma_resv_lock(shmem->base.resv, NULL);
  468. drm_gem_shmem_put_pages(shmem);
  469. dma_resv_unlock(shmem->base.resv);
  470. drm_gem_vm_close(vma);
  471. }
  472. const struct vm_operations_struct drm_gem_shmem_vm_ops = {
  473. .fault = drm_gem_shmem_fault,
  474. .open = drm_gem_shmem_vm_open,
  475. .close = drm_gem_shmem_vm_close,
  476. };
  477. EXPORT_SYMBOL_GPL(drm_gem_shmem_vm_ops);
  478. /**
  479. * drm_gem_shmem_mmap - Memory-map a shmem GEM object
  480. * @shmem: shmem GEM object
  481. * @vma: VMA for the area to be mapped
  482. *
  483. * This function implements an augmented version of the GEM DRM file mmap
  484. * operation for shmem objects.
  485. *
  486. * Returns:
  487. * 0 on success or a negative error code on failure.
  488. */
  489. int drm_gem_shmem_mmap(struct drm_gem_shmem_object *shmem, struct vm_area_struct *vma)
  490. {
  491. struct drm_gem_object *obj = &shmem->base;
  492. int ret;
  493. if (obj->import_attach) {
  494. /* Reset both vm_ops and vm_private_data, so we don't end up with
  495. * vm_ops pointing to our implementation if the dma-buf backend
  496. * doesn't set those fields.
  497. */
  498. vma->vm_private_data = NULL;
  499. vma->vm_ops = NULL;
  500. ret = dma_buf_mmap(obj->dma_buf, vma, 0);
  501. /* Drop the reference drm_gem_mmap_obj() acquired.*/
  502. if (!ret)
  503. drm_gem_object_put(obj);
  504. return ret;
  505. }
  506. if (is_cow_mapping(vma->vm_flags))
  507. return -EINVAL;
  508. dma_resv_lock(shmem->base.resv, NULL);
  509. ret = drm_gem_shmem_get_pages(shmem);
  510. dma_resv_unlock(shmem->base.resv);
  511. if (ret)
  512. return ret;
  513. vm_flags_set(vma, VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);
  514. vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
  515. if (shmem->map_wc)
  516. vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
  517. return 0;
  518. }
  519. EXPORT_SYMBOL_GPL(drm_gem_shmem_mmap);
  520. /**
  521. * drm_gem_shmem_print_info() - Print &drm_gem_shmem_object info for debugfs
  522. * @shmem: shmem GEM object
  523. * @p: DRM printer
  524. * @indent: Tab indentation level
  525. */
  526. void drm_gem_shmem_print_info(const struct drm_gem_shmem_object *shmem,
  527. struct drm_printer *p, unsigned int indent)
  528. {
  529. if (shmem->base.import_attach)
  530. return;
  531. drm_printf_indent(p, indent, "pages_use_count=%u\n", shmem->pages_use_count);
  532. drm_printf_indent(p, indent, "vmap_use_count=%u\n", shmem->vmap_use_count);
  533. drm_printf_indent(p, indent, "vaddr=%p\n", shmem->vaddr);
  534. }
  535. EXPORT_SYMBOL(drm_gem_shmem_print_info);
  536. /**
  537. * drm_gem_shmem_get_sg_table - Provide a scatter/gather table of pinned
  538. * pages for a shmem GEM object
  539. * @shmem: shmem GEM object
  540. *
  541. * This function exports a scatter/gather table suitable for PRIME usage by
  542. * calling the standard DMA mapping API.
  543. *
  544. * Drivers who need to acquire an scatter/gather table for objects need to call
  545. * drm_gem_shmem_get_pages_sgt() instead.
  546. *
  547. * Returns:
  548. * A pointer to the scatter/gather table of pinned pages or error pointer on failure.
  549. */
  550. struct sg_table *drm_gem_shmem_get_sg_table(struct drm_gem_shmem_object *shmem)
  551. {
  552. struct drm_gem_object *obj = &shmem->base;
  553. drm_WARN_ON(obj->dev, obj->import_attach);
  554. return drm_prime_pages_to_sg(obj->dev, shmem->pages, obj->size >> PAGE_SHIFT);
  555. }
  556. EXPORT_SYMBOL_GPL(drm_gem_shmem_get_sg_table);
  557. static struct sg_table *drm_gem_shmem_get_pages_sgt_locked(struct drm_gem_shmem_object *shmem)
  558. {
  559. struct drm_gem_object *obj = &shmem->base;
  560. int ret;
  561. struct sg_table *sgt;
  562. if (shmem->sgt)
  563. return shmem->sgt;
  564. drm_WARN_ON(obj->dev, obj->import_attach);
  565. ret = drm_gem_shmem_get_pages(shmem);
  566. if (ret)
  567. return ERR_PTR(ret);
  568. sgt = drm_gem_shmem_get_sg_table(shmem);
  569. if (IS_ERR(sgt)) {
  570. ret = PTR_ERR(sgt);
  571. goto err_put_pages;
  572. }
  573. /* Map the pages for use by the h/w. */
  574. ret = dma_map_sgtable(obj->dev->dev, sgt, DMA_BIDIRECTIONAL, 0);
  575. if (ret)
  576. goto err_free_sgt;
  577. shmem->sgt = sgt;
  578. return sgt;
  579. err_free_sgt:
  580. sg_free_table(sgt);
  581. kfree(sgt);
  582. err_put_pages:
  583. drm_gem_shmem_put_pages(shmem);
  584. return ERR_PTR(ret);
  585. }
  586. /**
  587. * drm_gem_shmem_get_pages_sgt - Pin pages, dma map them, and return a
  588. * scatter/gather table for a shmem GEM object.
  589. * @shmem: shmem GEM object
  590. *
  591. * This function returns a scatter/gather table suitable for driver usage. If
  592. * the sg table doesn't exist, the pages are pinned, dma-mapped, and a sg
  593. * table created.
  594. *
  595. * This is the main function for drivers to get at backing storage, and it hides
  596. * and difference between dma-buf imported and natively allocated objects.
  597. * drm_gem_shmem_get_sg_table() should not be directly called by drivers.
  598. *
  599. * Returns:
  600. * A pointer to the scatter/gather table of pinned pages or errno on failure.
  601. */
  602. struct sg_table *drm_gem_shmem_get_pages_sgt(struct drm_gem_shmem_object *shmem)
  603. {
  604. int ret;
  605. struct sg_table *sgt;
  606. ret = dma_resv_lock_interruptible(shmem->base.resv, NULL);
  607. if (ret)
  608. return ERR_PTR(ret);
  609. sgt = drm_gem_shmem_get_pages_sgt_locked(shmem);
  610. dma_resv_unlock(shmem->base.resv);
  611. return sgt;
  612. }
  613. EXPORT_SYMBOL_GPL(drm_gem_shmem_get_pages_sgt);
  614. /**
  615. * drm_gem_shmem_prime_import_sg_table - Produce a shmem GEM object from
  616. * another driver's scatter/gather table of pinned pages
  617. * @dev: Device to import into
  618. * @attach: DMA-BUF attachment
  619. * @sgt: Scatter/gather table of pinned pages
  620. *
  621. * This function imports a scatter/gather table exported via DMA-BUF by
  622. * another driver. Drivers that use the shmem helpers should set this as their
  623. * &drm_driver.gem_prime_import_sg_table callback.
  624. *
  625. * Returns:
  626. * A pointer to a newly created GEM object or an ERR_PTR-encoded negative
  627. * error code on failure.
  628. */
  629. struct drm_gem_object *
  630. drm_gem_shmem_prime_import_sg_table(struct drm_device *dev,
  631. struct dma_buf_attachment *attach,
  632. struct sg_table *sgt)
  633. {
  634. size_t size = PAGE_ALIGN(attach->dmabuf->size);
  635. struct drm_gem_shmem_object *shmem;
  636. shmem = __drm_gem_shmem_create(dev, size, true);
  637. if (IS_ERR(shmem))
  638. return ERR_CAST(shmem);
  639. shmem->sgt = sgt;
  640. drm_dbg_prime(dev, "size = %zu\n", size);
  641. return &shmem->base;
  642. }
  643. EXPORT_SYMBOL_GPL(drm_gem_shmem_prime_import_sg_table);
  644. MODULE_DESCRIPTION("DRM SHMEM memory-management helpers");
  645. MODULE_IMPORT_NS(DMA_BUF);
  646. MODULE_LICENSE("GPL v2");