etnaviv_gem.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (C) 2015-2018 Etnaviv Project
  4. */
  5. #include <linux/spinlock.h>
  6. #include <linux/shmem_fs.h>
  7. #include <linux/sched/mm.h>
  8. #include <linux/sched/task.h>
  9. #include "etnaviv_drv.h"
  10. #include "etnaviv_gem.h"
  11. #include "etnaviv_gpu.h"
  12. #include "etnaviv_mmu.h"
  13. static struct lock_class_key etnaviv_shm_lock_class;
  14. static struct lock_class_key etnaviv_userptr_lock_class;
  15. static void etnaviv_gem_scatter_map(struct etnaviv_gem_object *etnaviv_obj)
  16. {
  17. struct drm_device *dev = etnaviv_obj->base.dev;
  18. struct sg_table *sgt = etnaviv_obj->sgt;
  19. /*
  20. * For non-cached buffers, ensure the new pages are clean
  21. * because display controller, GPU, etc. are not coherent.
  22. */
  23. if (etnaviv_obj->flags & ETNA_BO_CACHE_MASK)
  24. dma_map_sg(dev->dev, sgt->sgl, sgt->nents, DMA_BIDIRECTIONAL);
  25. }
  26. static void etnaviv_gem_scatterlist_unmap(struct etnaviv_gem_object *etnaviv_obj)
  27. {
  28. struct drm_device *dev = etnaviv_obj->base.dev;
  29. struct sg_table *sgt = etnaviv_obj->sgt;
  30. /*
  31. * For non-cached buffers, ensure the new pages are clean
  32. * because display controller, GPU, etc. are not coherent:
  33. *
  34. * WARNING: The DMA API does not support concurrent CPU
  35. * and device access to the memory area. With BIDIRECTIONAL,
  36. * we will clean the cache lines which overlap the region,
  37. * and invalidate all cache lines (partially) contained in
  38. * the region.
  39. *
  40. * If you have dirty data in the overlapping cache lines,
  41. * that will corrupt the GPU-written data. If you have
  42. * written into the remainder of the region, this can
  43. * discard those writes.
  44. */
  45. if (etnaviv_obj->flags & ETNA_BO_CACHE_MASK)
  46. dma_unmap_sg(dev->dev, sgt->sgl, sgt->nents, DMA_BIDIRECTIONAL);
  47. }
  48. /* called with etnaviv_obj->lock held */
  49. static int etnaviv_gem_shmem_get_pages(struct etnaviv_gem_object *etnaviv_obj)
  50. {
  51. struct drm_device *dev = etnaviv_obj->base.dev;
  52. struct page **p = drm_gem_get_pages(&etnaviv_obj->base);
  53. if (IS_ERR(p)) {
  54. dev_dbg(dev->dev, "could not get pages: %ld\n", PTR_ERR(p));
  55. return PTR_ERR(p);
  56. }
  57. etnaviv_obj->pages = p;
  58. return 0;
  59. }
  60. static void put_pages(struct etnaviv_gem_object *etnaviv_obj)
  61. {
  62. if (etnaviv_obj->sgt) {
  63. etnaviv_gem_scatterlist_unmap(etnaviv_obj);
  64. sg_free_table(etnaviv_obj->sgt);
  65. kfree(etnaviv_obj->sgt);
  66. etnaviv_obj->sgt = NULL;
  67. }
  68. if (etnaviv_obj->pages) {
  69. drm_gem_put_pages(&etnaviv_obj->base, etnaviv_obj->pages,
  70. true, false);
  71. etnaviv_obj->pages = NULL;
  72. }
  73. }
  74. struct page **etnaviv_gem_get_pages(struct etnaviv_gem_object *etnaviv_obj)
  75. {
  76. int ret;
  77. lockdep_assert_held(&etnaviv_obj->lock);
  78. if (!etnaviv_obj->pages) {
  79. ret = etnaviv_obj->ops->get_pages(etnaviv_obj);
  80. if (ret < 0)
  81. return ERR_PTR(ret);
  82. }
  83. if (!etnaviv_obj->sgt) {
  84. struct drm_device *dev = etnaviv_obj->base.dev;
  85. int npages = etnaviv_obj->base.size >> PAGE_SHIFT;
  86. struct sg_table *sgt;
  87. sgt = drm_prime_pages_to_sg(etnaviv_obj->pages, npages);
  88. if (IS_ERR(sgt)) {
  89. dev_err(dev->dev, "failed to allocate sgt: %ld\n",
  90. PTR_ERR(sgt));
  91. return ERR_CAST(sgt);
  92. }
  93. etnaviv_obj->sgt = sgt;
  94. etnaviv_gem_scatter_map(etnaviv_obj);
  95. }
  96. return etnaviv_obj->pages;
  97. }
  98. void etnaviv_gem_put_pages(struct etnaviv_gem_object *etnaviv_obj)
  99. {
  100. lockdep_assert_held(&etnaviv_obj->lock);
  101. /* when we start tracking the pin count, then do something here */
  102. }
  103. static int etnaviv_gem_mmap_obj(struct etnaviv_gem_object *etnaviv_obj,
  104. struct vm_area_struct *vma)
  105. {
  106. pgprot_t vm_page_prot;
  107. vma->vm_flags &= ~VM_PFNMAP;
  108. vma->vm_flags |= VM_MIXEDMAP;
  109. vm_page_prot = vm_get_page_prot(vma->vm_flags);
  110. if (etnaviv_obj->flags & ETNA_BO_WC) {
  111. vma->vm_page_prot = pgprot_writecombine(vm_page_prot);
  112. } else if (etnaviv_obj->flags & ETNA_BO_UNCACHED) {
  113. vma->vm_page_prot = pgprot_noncached(vm_page_prot);
  114. } else {
  115. /*
  116. * Shunt off cached objs to shmem file so they have their own
  117. * address_space (so unmap_mapping_range does what we want,
  118. * in particular in the case of mmap'd dmabufs)
  119. */
  120. fput(vma->vm_file);
  121. get_file(etnaviv_obj->base.filp);
  122. vma->vm_pgoff = 0;
  123. vma->vm_file = etnaviv_obj->base.filp;
  124. vma->vm_page_prot = vm_page_prot;
  125. }
  126. return 0;
  127. }
  128. int etnaviv_gem_mmap(struct file *filp, struct vm_area_struct *vma)
  129. {
  130. struct etnaviv_gem_object *obj;
  131. int ret;
  132. ret = drm_gem_mmap(filp, vma);
  133. if (ret) {
  134. DBG("mmap failed: %d", ret);
  135. return ret;
  136. }
  137. obj = to_etnaviv_bo(vma->vm_private_data);
  138. return obj->ops->mmap(obj, vma);
  139. }
  140. vm_fault_t etnaviv_gem_fault(struct vm_fault *vmf)
  141. {
  142. struct vm_area_struct *vma = vmf->vma;
  143. struct drm_gem_object *obj = vma->vm_private_data;
  144. struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
  145. struct page **pages, *page;
  146. pgoff_t pgoff;
  147. int err;
  148. /*
  149. * Make sure we don't parallel update on a fault, nor move or remove
  150. * something from beneath our feet. Note that vmf_insert_page() is
  151. * specifically coded to take care of this, so we don't have to.
  152. */
  153. err = mutex_lock_interruptible(&etnaviv_obj->lock);
  154. if (err)
  155. return VM_FAULT_NOPAGE;
  156. /* make sure we have pages attached now */
  157. pages = etnaviv_gem_get_pages(etnaviv_obj);
  158. mutex_unlock(&etnaviv_obj->lock);
  159. if (IS_ERR(pages)) {
  160. err = PTR_ERR(pages);
  161. return vmf_error(err);
  162. }
  163. /* We don't use vmf->pgoff since that has the fake offset: */
  164. pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
  165. page = pages[pgoff];
  166. VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
  167. page_to_pfn(page), page_to_pfn(page) << PAGE_SHIFT);
  168. return vmf_insert_page(vma, vmf->address, page);
  169. }
  170. int etnaviv_gem_mmap_offset(struct drm_gem_object *obj, u64 *offset)
  171. {
  172. int ret;
  173. /* Make it mmapable */
  174. ret = drm_gem_create_mmap_offset(obj);
  175. if (ret)
  176. dev_err(obj->dev->dev, "could not allocate mmap offset\n");
  177. else
  178. *offset = drm_vma_node_offset_addr(&obj->vma_node);
  179. return ret;
  180. }
  181. static struct etnaviv_vram_mapping *
  182. etnaviv_gem_get_vram_mapping(struct etnaviv_gem_object *obj,
  183. struct etnaviv_iommu *mmu)
  184. {
  185. struct etnaviv_vram_mapping *mapping;
  186. list_for_each_entry(mapping, &obj->vram_list, obj_node) {
  187. if (mapping->mmu == mmu)
  188. return mapping;
  189. }
  190. return NULL;
  191. }
  192. void etnaviv_gem_mapping_reference(struct etnaviv_vram_mapping *mapping)
  193. {
  194. struct etnaviv_gem_object *etnaviv_obj = mapping->object;
  195. drm_gem_object_get(&etnaviv_obj->base);
  196. mutex_lock(&etnaviv_obj->lock);
  197. WARN_ON(mapping->use == 0);
  198. mapping->use += 1;
  199. mutex_unlock(&etnaviv_obj->lock);
  200. }
  201. void etnaviv_gem_mapping_unreference(struct etnaviv_vram_mapping *mapping)
  202. {
  203. struct etnaviv_gem_object *etnaviv_obj = mapping->object;
  204. mutex_lock(&etnaviv_obj->lock);
  205. WARN_ON(mapping->use == 0);
  206. mapping->use -= 1;
  207. mutex_unlock(&etnaviv_obj->lock);
  208. drm_gem_object_put_unlocked(&etnaviv_obj->base);
  209. }
  210. struct etnaviv_vram_mapping *etnaviv_gem_mapping_get(
  211. struct drm_gem_object *obj, struct etnaviv_gpu *gpu)
  212. {
  213. struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
  214. struct etnaviv_vram_mapping *mapping;
  215. struct page **pages;
  216. int ret = 0;
  217. mutex_lock(&etnaviv_obj->lock);
  218. mapping = etnaviv_gem_get_vram_mapping(etnaviv_obj, gpu->mmu);
  219. if (mapping) {
  220. /*
  221. * Holding the object lock prevents the use count changing
  222. * beneath us. If the use count is zero, the MMU might be
  223. * reaping this object, so take the lock and re-check that
  224. * the MMU owns this mapping to close this race.
  225. */
  226. if (mapping->use == 0) {
  227. mutex_lock(&gpu->mmu->lock);
  228. if (mapping->mmu == gpu->mmu)
  229. mapping->use += 1;
  230. else
  231. mapping = NULL;
  232. mutex_unlock(&gpu->mmu->lock);
  233. if (mapping)
  234. goto out;
  235. } else {
  236. mapping->use += 1;
  237. goto out;
  238. }
  239. }
  240. pages = etnaviv_gem_get_pages(etnaviv_obj);
  241. if (IS_ERR(pages)) {
  242. ret = PTR_ERR(pages);
  243. goto out;
  244. }
  245. /*
  246. * See if we have a reaped vram mapping we can re-use before
  247. * allocating a fresh mapping.
  248. */
  249. mapping = etnaviv_gem_get_vram_mapping(etnaviv_obj, NULL);
  250. if (!mapping) {
  251. mapping = kzalloc(sizeof(*mapping), GFP_KERNEL);
  252. if (!mapping) {
  253. ret = -ENOMEM;
  254. goto out;
  255. }
  256. INIT_LIST_HEAD(&mapping->scan_node);
  257. mapping->object = etnaviv_obj;
  258. } else {
  259. list_del(&mapping->obj_node);
  260. }
  261. mapping->mmu = gpu->mmu;
  262. mapping->use = 1;
  263. ret = etnaviv_iommu_map_gem(gpu->mmu, etnaviv_obj, gpu->memory_base,
  264. mapping);
  265. if (ret < 0)
  266. kfree(mapping);
  267. else
  268. list_add_tail(&mapping->obj_node, &etnaviv_obj->vram_list);
  269. out:
  270. mutex_unlock(&etnaviv_obj->lock);
  271. if (ret)
  272. return ERR_PTR(ret);
  273. /* Take a reference on the object */
  274. drm_gem_object_get(obj);
  275. return mapping;
  276. }
  277. void *etnaviv_gem_vmap(struct drm_gem_object *obj)
  278. {
  279. struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
  280. if (etnaviv_obj->vaddr)
  281. return etnaviv_obj->vaddr;
  282. mutex_lock(&etnaviv_obj->lock);
  283. /*
  284. * Need to check again, as we might have raced with another thread
  285. * while waiting for the mutex.
  286. */
  287. if (!etnaviv_obj->vaddr)
  288. etnaviv_obj->vaddr = etnaviv_obj->ops->vmap(etnaviv_obj);
  289. mutex_unlock(&etnaviv_obj->lock);
  290. return etnaviv_obj->vaddr;
  291. }
  292. static void *etnaviv_gem_vmap_impl(struct etnaviv_gem_object *obj)
  293. {
  294. struct page **pages;
  295. lockdep_assert_held(&obj->lock);
  296. pages = etnaviv_gem_get_pages(obj);
  297. if (IS_ERR(pages))
  298. return NULL;
  299. return vmap(pages, obj->base.size >> PAGE_SHIFT,
  300. VM_MAP, pgprot_writecombine(PAGE_KERNEL));
  301. }
  302. static inline enum dma_data_direction etnaviv_op_to_dma_dir(u32 op)
  303. {
  304. if (op & ETNA_PREP_READ)
  305. return DMA_FROM_DEVICE;
  306. else if (op & ETNA_PREP_WRITE)
  307. return DMA_TO_DEVICE;
  308. else
  309. return DMA_BIDIRECTIONAL;
  310. }
  311. int etnaviv_gem_cpu_prep(struct drm_gem_object *obj, u32 op,
  312. struct timespec *timeout)
  313. {
  314. struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
  315. struct drm_device *dev = obj->dev;
  316. bool write = !!(op & ETNA_PREP_WRITE);
  317. int ret;
  318. if (!etnaviv_obj->sgt) {
  319. void *ret;
  320. mutex_lock(&etnaviv_obj->lock);
  321. ret = etnaviv_gem_get_pages(etnaviv_obj);
  322. mutex_unlock(&etnaviv_obj->lock);
  323. if (IS_ERR(ret))
  324. return PTR_ERR(ret);
  325. }
  326. if (op & ETNA_PREP_NOSYNC) {
  327. if (!reservation_object_test_signaled_rcu(etnaviv_obj->resv,
  328. write))
  329. return -EBUSY;
  330. } else {
  331. unsigned long remain = etnaviv_timeout_to_jiffies(timeout);
  332. ret = reservation_object_wait_timeout_rcu(etnaviv_obj->resv,
  333. write, true, remain);
  334. if (ret <= 0)
  335. return ret == 0 ? -ETIMEDOUT : ret;
  336. }
  337. if (etnaviv_obj->flags & ETNA_BO_CACHED) {
  338. dma_sync_sg_for_cpu(dev->dev, etnaviv_obj->sgt->sgl,
  339. etnaviv_obj->sgt->nents,
  340. etnaviv_op_to_dma_dir(op));
  341. etnaviv_obj->last_cpu_prep_op = op;
  342. }
  343. return 0;
  344. }
  345. int etnaviv_gem_cpu_fini(struct drm_gem_object *obj)
  346. {
  347. struct drm_device *dev = obj->dev;
  348. struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
  349. if (etnaviv_obj->flags & ETNA_BO_CACHED) {
  350. /* fini without a prep is almost certainly a userspace error */
  351. WARN_ON(etnaviv_obj->last_cpu_prep_op == 0);
  352. dma_sync_sg_for_device(dev->dev, etnaviv_obj->sgt->sgl,
  353. etnaviv_obj->sgt->nents,
  354. etnaviv_op_to_dma_dir(etnaviv_obj->last_cpu_prep_op));
  355. etnaviv_obj->last_cpu_prep_op = 0;
  356. }
  357. return 0;
  358. }
  359. int etnaviv_gem_wait_bo(struct etnaviv_gpu *gpu, struct drm_gem_object *obj,
  360. struct timespec *timeout)
  361. {
  362. struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
  363. return etnaviv_gpu_wait_obj_inactive(gpu, etnaviv_obj, timeout);
  364. }
  365. #ifdef CONFIG_DEBUG_FS
  366. static void etnaviv_gem_describe_fence(struct dma_fence *fence,
  367. const char *type, struct seq_file *m)
  368. {
  369. if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
  370. seq_printf(m, "\t%9s: %s %s seq %u\n",
  371. type,
  372. fence->ops->get_driver_name(fence),
  373. fence->ops->get_timeline_name(fence),
  374. fence->seqno);
  375. }
  376. static void etnaviv_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
  377. {
  378. struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
  379. struct reservation_object *robj = etnaviv_obj->resv;
  380. struct reservation_object_list *fobj;
  381. struct dma_fence *fence;
  382. unsigned long off = drm_vma_node_start(&obj->vma_node);
  383. seq_printf(m, "%08x: %c %2d (%2d) %08lx %p %zd\n",
  384. etnaviv_obj->flags, is_active(etnaviv_obj) ? 'A' : 'I',
  385. obj->name, kref_read(&obj->refcount),
  386. off, etnaviv_obj->vaddr, obj->size);
  387. rcu_read_lock();
  388. fobj = rcu_dereference(robj->fence);
  389. if (fobj) {
  390. unsigned int i, shared_count = fobj->shared_count;
  391. for (i = 0; i < shared_count; i++) {
  392. fence = rcu_dereference(fobj->shared[i]);
  393. etnaviv_gem_describe_fence(fence, "Shared", m);
  394. }
  395. }
  396. fence = rcu_dereference(robj->fence_excl);
  397. if (fence)
  398. etnaviv_gem_describe_fence(fence, "Exclusive", m);
  399. rcu_read_unlock();
  400. }
  401. void etnaviv_gem_describe_objects(struct etnaviv_drm_private *priv,
  402. struct seq_file *m)
  403. {
  404. struct etnaviv_gem_object *etnaviv_obj;
  405. int count = 0;
  406. size_t size = 0;
  407. mutex_lock(&priv->gem_lock);
  408. list_for_each_entry(etnaviv_obj, &priv->gem_list, gem_node) {
  409. struct drm_gem_object *obj = &etnaviv_obj->base;
  410. seq_puts(m, " ");
  411. etnaviv_gem_describe(obj, m);
  412. count++;
  413. size += obj->size;
  414. }
  415. mutex_unlock(&priv->gem_lock);
  416. seq_printf(m, "Total %d objects, %zu bytes\n", count, size);
  417. }
  418. #endif
  419. static void etnaviv_gem_shmem_release(struct etnaviv_gem_object *etnaviv_obj)
  420. {
  421. vunmap(etnaviv_obj->vaddr);
  422. put_pages(etnaviv_obj);
  423. }
  424. static const struct etnaviv_gem_ops etnaviv_gem_shmem_ops = {
  425. .get_pages = etnaviv_gem_shmem_get_pages,
  426. .release = etnaviv_gem_shmem_release,
  427. .vmap = etnaviv_gem_vmap_impl,
  428. .mmap = etnaviv_gem_mmap_obj,
  429. };
  430. void etnaviv_gem_free_object(struct drm_gem_object *obj)
  431. {
  432. struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
  433. struct etnaviv_drm_private *priv = obj->dev->dev_private;
  434. struct etnaviv_vram_mapping *mapping, *tmp;
  435. /* object should not be active */
  436. WARN_ON(is_active(etnaviv_obj));
  437. mutex_lock(&priv->gem_lock);
  438. list_del(&etnaviv_obj->gem_node);
  439. mutex_unlock(&priv->gem_lock);
  440. list_for_each_entry_safe(mapping, tmp, &etnaviv_obj->vram_list,
  441. obj_node) {
  442. struct etnaviv_iommu *mmu = mapping->mmu;
  443. WARN_ON(mapping->use);
  444. if (mmu)
  445. etnaviv_iommu_unmap_gem(mmu, mapping);
  446. list_del(&mapping->obj_node);
  447. kfree(mapping);
  448. }
  449. drm_gem_free_mmap_offset(obj);
  450. etnaviv_obj->ops->release(etnaviv_obj);
  451. if (etnaviv_obj->resv == &etnaviv_obj->_resv)
  452. reservation_object_fini(&etnaviv_obj->_resv);
  453. drm_gem_object_release(obj);
  454. kfree(etnaviv_obj);
  455. }
  456. void etnaviv_gem_obj_add(struct drm_device *dev, struct drm_gem_object *obj)
  457. {
  458. struct etnaviv_drm_private *priv = dev->dev_private;
  459. struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
  460. mutex_lock(&priv->gem_lock);
  461. list_add_tail(&etnaviv_obj->gem_node, &priv->gem_list);
  462. mutex_unlock(&priv->gem_lock);
  463. }
  464. static int etnaviv_gem_new_impl(struct drm_device *dev, u32 size, u32 flags,
  465. struct reservation_object *robj, const struct etnaviv_gem_ops *ops,
  466. struct drm_gem_object **obj)
  467. {
  468. struct etnaviv_gem_object *etnaviv_obj;
  469. unsigned sz = sizeof(*etnaviv_obj);
  470. bool valid = true;
  471. /* validate flags */
  472. switch (flags & ETNA_BO_CACHE_MASK) {
  473. case ETNA_BO_UNCACHED:
  474. case ETNA_BO_CACHED:
  475. case ETNA_BO_WC:
  476. break;
  477. default:
  478. valid = false;
  479. }
  480. if (!valid) {
  481. dev_err(dev->dev, "invalid cache flag: %x\n",
  482. (flags & ETNA_BO_CACHE_MASK));
  483. return -EINVAL;
  484. }
  485. etnaviv_obj = kzalloc(sz, GFP_KERNEL);
  486. if (!etnaviv_obj)
  487. return -ENOMEM;
  488. etnaviv_obj->flags = flags;
  489. etnaviv_obj->ops = ops;
  490. if (robj) {
  491. etnaviv_obj->resv = robj;
  492. } else {
  493. etnaviv_obj->resv = &etnaviv_obj->_resv;
  494. reservation_object_init(&etnaviv_obj->_resv);
  495. }
  496. mutex_init(&etnaviv_obj->lock);
  497. INIT_LIST_HEAD(&etnaviv_obj->vram_list);
  498. *obj = &etnaviv_obj->base;
  499. return 0;
  500. }
  501. /* convenience method to construct a GEM buffer object, and userspace handle */
  502. int etnaviv_gem_new_handle(struct drm_device *dev, struct drm_file *file,
  503. u32 size, u32 flags, u32 *handle)
  504. {
  505. struct drm_gem_object *obj = NULL;
  506. int ret;
  507. size = PAGE_ALIGN(size);
  508. ret = etnaviv_gem_new_impl(dev, size, flags, NULL,
  509. &etnaviv_gem_shmem_ops, &obj);
  510. if (ret)
  511. goto fail;
  512. lockdep_set_class(&to_etnaviv_bo(obj)->lock, &etnaviv_shm_lock_class);
  513. ret = drm_gem_object_init(dev, obj, size);
  514. if (ret == 0) {
  515. struct address_space *mapping;
  516. /*
  517. * Our buffers are kept pinned, so allocating them
  518. * from the MOVABLE zone is a really bad idea, and
  519. * conflicts with CMA. See comments above new_inode()
  520. * why this is required _and_ expected if you're
  521. * going to pin these pages.
  522. */
  523. mapping = obj->filp->f_mapping;
  524. mapping_set_gfp_mask(mapping, GFP_HIGHUSER |
  525. __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
  526. }
  527. if (ret)
  528. goto fail;
  529. etnaviv_gem_obj_add(dev, obj);
  530. ret = drm_gem_handle_create(file, obj, handle);
  531. /* drop reference from allocate - handle holds it now */
  532. fail:
  533. drm_gem_object_put_unlocked(obj);
  534. return ret;
  535. }
  536. int etnaviv_gem_new_private(struct drm_device *dev, size_t size, u32 flags,
  537. struct reservation_object *robj, const struct etnaviv_gem_ops *ops,
  538. struct etnaviv_gem_object **res)
  539. {
  540. struct drm_gem_object *obj;
  541. int ret;
  542. ret = etnaviv_gem_new_impl(dev, size, flags, robj, ops, &obj);
  543. if (ret)
  544. return ret;
  545. drm_gem_private_object_init(dev, obj, size);
  546. *res = to_etnaviv_bo(obj);
  547. return 0;
  548. }
  549. static int etnaviv_gem_userptr_get_pages(struct etnaviv_gem_object *etnaviv_obj)
  550. {
  551. struct page **pvec = NULL;
  552. struct etnaviv_gem_userptr *userptr = &etnaviv_obj->userptr;
  553. int ret, pinned = 0, npages = etnaviv_obj->base.size >> PAGE_SHIFT;
  554. might_lock_read(&current->mm->mmap_sem);
  555. if (userptr->mm != current->mm)
  556. return -EPERM;
  557. pvec = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
  558. if (!pvec)
  559. return -ENOMEM;
  560. do {
  561. unsigned num_pages = npages - pinned;
  562. uint64_t ptr = userptr->ptr + pinned * PAGE_SIZE;
  563. struct page **pages = pvec + pinned;
  564. ret = get_user_pages_fast(ptr, num_pages,
  565. !userptr->ro ? FOLL_WRITE : 0, pages);
  566. if (ret < 0) {
  567. release_pages(pvec, pinned);
  568. kvfree(pvec);
  569. return ret;
  570. }
  571. pinned += ret;
  572. } while (pinned < npages);
  573. etnaviv_obj->pages = pvec;
  574. return 0;
  575. }
  576. static void etnaviv_gem_userptr_release(struct etnaviv_gem_object *etnaviv_obj)
  577. {
  578. if (etnaviv_obj->sgt) {
  579. etnaviv_gem_scatterlist_unmap(etnaviv_obj);
  580. sg_free_table(etnaviv_obj->sgt);
  581. kfree(etnaviv_obj->sgt);
  582. }
  583. if (etnaviv_obj->pages) {
  584. int npages = etnaviv_obj->base.size >> PAGE_SHIFT;
  585. release_pages(etnaviv_obj->pages, npages);
  586. kvfree(etnaviv_obj->pages);
  587. }
  588. }
  589. static int etnaviv_gem_userptr_mmap_obj(struct etnaviv_gem_object *etnaviv_obj,
  590. struct vm_area_struct *vma)
  591. {
  592. return -EINVAL;
  593. }
  594. static const struct etnaviv_gem_ops etnaviv_gem_userptr_ops = {
  595. .get_pages = etnaviv_gem_userptr_get_pages,
  596. .release = etnaviv_gem_userptr_release,
  597. .vmap = etnaviv_gem_vmap_impl,
  598. .mmap = etnaviv_gem_userptr_mmap_obj,
  599. };
  600. int etnaviv_gem_new_userptr(struct drm_device *dev, struct drm_file *file,
  601. uintptr_t ptr, u32 size, u32 flags, u32 *handle)
  602. {
  603. struct etnaviv_gem_object *etnaviv_obj;
  604. int ret;
  605. ret = etnaviv_gem_new_private(dev, size, ETNA_BO_CACHED, NULL,
  606. &etnaviv_gem_userptr_ops, &etnaviv_obj);
  607. if (ret)
  608. return ret;
  609. lockdep_set_class(&etnaviv_obj->lock, &etnaviv_userptr_lock_class);
  610. etnaviv_obj->userptr.ptr = ptr;
  611. etnaviv_obj->userptr.mm = current->mm;
  612. etnaviv_obj->userptr.ro = !(flags & ETNA_USERPTR_WRITE);
  613. etnaviv_gem_obj_add(dev, &etnaviv_obj->base);
  614. ret = drm_gem_handle_create(file, &etnaviv_obj->base, handle);
  615. /* drop reference from allocate - handle holds it now */
  616. drm_gem_object_put_unlocked(&etnaviv_obj->base);
  617. return ret;
  618. }