etnaviv_drv.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (C) 2015-2018 Etnaviv Project
  4. */
  5. #include <linux/component.h>
  6. #include <linux/of_platform.h>
  7. #include <drm/drm_of.h>
  8. #include "etnaviv_cmdbuf.h"
  9. #include "etnaviv_drv.h"
  10. #include "etnaviv_gpu.h"
  11. #include "etnaviv_gem.h"
  12. #include "etnaviv_mmu.h"
  13. #include "etnaviv_perfmon.h"
  14. /*
  15. * DRM operations:
  16. */
  17. static void load_gpu(struct drm_device *dev)
  18. {
  19. struct etnaviv_drm_private *priv = dev->dev_private;
  20. unsigned int i;
  21. for (i = 0; i < ETNA_MAX_PIPES; i++) {
  22. struct etnaviv_gpu *g = priv->gpu[i];
  23. if (g) {
  24. int ret;
  25. ret = etnaviv_gpu_init(g);
  26. if (ret)
  27. priv->gpu[i] = NULL;
  28. }
  29. }
  30. }
  31. static int etnaviv_open(struct drm_device *dev, struct drm_file *file)
  32. {
  33. struct etnaviv_drm_private *priv = dev->dev_private;
  34. struct etnaviv_file_private *ctx;
  35. int i;
  36. ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
  37. if (!ctx)
  38. return -ENOMEM;
  39. for (i = 0; i < ETNA_MAX_PIPES; i++) {
  40. struct etnaviv_gpu *gpu = priv->gpu[i];
  41. struct drm_sched_rq *rq;
  42. if (gpu) {
  43. rq = &gpu->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
  44. drm_sched_entity_init(&ctx->sched_entity[i],
  45. &rq, 1, NULL);
  46. }
  47. }
  48. file->driver_priv = ctx;
  49. return 0;
  50. }
  51. static void etnaviv_postclose(struct drm_device *dev, struct drm_file *file)
  52. {
  53. struct etnaviv_drm_private *priv = dev->dev_private;
  54. struct etnaviv_file_private *ctx = file->driver_priv;
  55. unsigned int i;
  56. for (i = 0; i < ETNA_MAX_PIPES; i++) {
  57. struct etnaviv_gpu *gpu = priv->gpu[i];
  58. if (gpu) {
  59. mutex_lock(&gpu->lock);
  60. if (gpu->lastctx == ctx)
  61. gpu->lastctx = NULL;
  62. mutex_unlock(&gpu->lock);
  63. drm_sched_entity_destroy(&ctx->sched_entity[i]);
  64. }
  65. }
  66. kfree(ctx);
  67. }
  68. /*
  69. * DRM debugfs:
  70. */
  71. #ifdef CONFIG_DEBUG_FS
  72. static int etnaviv_gem_show(struct drm_device *dev, struct seq_file *m)
  73. {
  74. struct etnaviv_drm_private *priv = dev->dev_private;
  75. etnaviv_gem_describe_objects(priv, m);
  76. return 0;
  77. }
  78. static int etnaviv_mm_show(struct drm_device *dev, struct seq_file *m)
  79. {
  80. struct drm_printer p = drm_seq_file_printer(m);
  81. read_lock(&dev->vma_offset_manager->vm_lock);
  82. drm_mm_print(&dev->vma_offset_manager->vm_addr_space_mm, &p);
  83. read_unlock(&dev->vma_offset_manager->vm_lock);
  84. return 0;
  85. }
  86. static int etnaviv_mmu_show(struct etnaviv_gpu *gpu, struct seq_file *m)
  87. {
  88. struct drm_printer p = drm_seq_file_printer(m);
  89. seq_printf(m, "Active Objects (%s):\n", dev_name(gpu->dev));
  90. mutex_lock(&gpu->mmu->lock);
  91. drm_mm_print(&gpu->mmu->mm, &p);
  92. mutex_unlock(&gpu->mmu->lock);
  93. return 0;
  94. }
  95. static void etnaviv_buffer_dump(struct etnaviv_gpu *gpu, struct seq_file *m)
  96. {
  97. struct etnaviv_cmdbuf *buf = &gpu->buffer;
  98. u32 size = buf->size;
  99. u32 *ptr = buf->vaddr;
  100. u32 i;
  101. seq_printf(m, "virt %p - phys 0x%llx - free 0x%08x\n",
  102. buf->vaddr, (u64)etnaviv_cmdbuf_get_pa(buf),
  103. size - buf->user_size);
  104. for (i = 0; i < size / 4; i++) {
  105. if (i && !(i % 4))
  106. seq_puts(m, "\n");
  107. if (i % 4 == 0)
  108. seq_printf(m, "\t0x%p: ", ptr + i);
  109. seq_printf(m, "%08x ", *(ptr + i));
  110. }
  111. seq_puts(m, "\n");
  112. }
  113. static int etnaviv_ring_show(struct etnaviv_gpu *gpu, struct seq_file *m)
  114. {
  115. seq_printf(m, "Ring Buffer (%s): ", dev_name(gpu->dev));
  116. mutex_lock(&gpu->lock);
  117. etnaviv_buffer_dump(gpu, m);
  118. mutex_unlock(&gpu->lock);
  119. return 0;
  120. }
  121. static int show_unlocked(struct seq_file *m, void *arg)
  122. {
  123. struct drm_info_node *node = (struct drm_info_node *) m->private;
  124. struct drm_device *dev = node->minor->dev;
  125. int (*show)(struct drm_device *dev, struct seq_file *m) =
  126. node->info_ent->data;
  127. return show(dev, m);
  128. }
  129. static int show_each_gpu(struct seq_file *m, void *arg)
  130. {
  131. struct drm_info_node *node = (struct drm_info_node *) m->private;
  132. struct drm_device *dev = node->minor->dev;
  133. struct etnaviv_drm_private *priv = dev->dev_private;
  134. struct etnaviv_gpu *gpu;
  135. int (*show)(struct etnaviv_gpu *gpu, struct seq_file *m) =
  136. node->info_ent->data;
  137. unsigned int i;
  138. int ret = 0;
  139. for (i = 0; i < ETNA_MAX_PIPES; i++) {
  140. gpu = priv->gpu[i];
  141. if (!gpu)
  142. continue;
  143. ret = show(gpu, m);
  144. if (ret < 0)
  145. break;
  146. }
  147. return ret;
  148. }
  149. static struct drm_info_list etnaviv_debugfs_list[] = {
  150. {"gpu", show_each_gpu, 0, etnaviv_gpu_debugfs},
  151. {"gem", show_unlocked, 0, etnaviv_gem_show},
  152. { "mm", show_unlocked, 0, etnaviv_mm_show },
  153. {"mmu", show_each_gpu, 0, etnaviv_mmu_show},
  154. {"ring", show_each_gpu, 0, etnaviv_ring_show},
  155. };
  156. static int etnaviv_debugfs_init(struct drm_minor *minor)
  157. {
  158. struct drm_device *dev = minor->dev;
  159. int ret;
  160. ret = drm_debugfs_create_files(etnaviv_debugfs_list,
  161. ARRAY_SIZE(etnaviv_debugfs_list),
  162. minor->debugfs_root, minor);
  163. if (ret) {
  164. dev_err(dev->dev, "could not install etnaviv_debugfs_list\n");
  165. return ret;
  166. }
  167. return ret;
  168. }
  169. #endif
  170. /*
  171. * DRM ioctls:
  172. */
  173. static int etnaviv_ioctl_get_param(struct drm_device *dev, void *data,
  174. struct drm_file *file)
  175. {
  176. struct etnaviv_drm_private *priv = dev->dev_private;
  177. struct drm_etnaviv_param *args = data;
  178. struct etnaviv_gpu *gpu;
  179. if (args->pipe >= ETNA_MAX_PIPES)
  180. return -EINVAL;
  181. gpu = priv->gpu[args->pipe];
  182. if (!gpu)
  183. return -ENXIO;
  184. return etnaviv_gpu_get_param(gpu, args->param, &args->value);
  185. }
  186. static int etnaviv_ioctl_gem_new(struct drm_device *dev, void *data,
  187. struct drm_file *file)
  188. {
  189. struct drm_etnaviv_gem_new *args = data;
  190. if (args->flags & ~(ETNA_BO_CACHED | ETNA_BO_WC | ETNA_BO_UNCACHED |
  191. ETNA_BO_FORCE_MMU))
  192. return -EINVAL;
  193. return etnaviv_gem_new_handle(dev, file, args->size,
  194. args->flags, &args->handle);
  195. }
  196. #define TS(t) ((struct timespec){ \
  197. .tv_sec = (t).tv_sec, \
  198. .tv_nsec = (t).tv_nsec \
  199. })
  200. static int etnaviv_ioctl_gem_cpu_prep(struct drm_device *dev, void *data,
  201. struct drm_file *file)
  202. {
  203. struct drm_etnaviv_gem_cpu_prep *args = data;
  204. struct drm_gem_object *obj;
  205. int ret;
  206. if (args->op & ~(ETNA_PREP_READ | ETNA_PREP_WRITE | ETNA_PREP_NOSYNC))
  207. return -EINVAL;
  208. obj = drm_gem_object_lookup(file, args->handle);
  209. if (!obj)
  210. return -ENOENT;
  211. ret = etnaviv_gem_cpu_prep(obj, args->op, &TS(args->timeout));
  212. drm_gem_object_put_unlocked(obj);
  213. return ret;
  214. }
  215. static int etnaviv_ioctl_gem_cpu_fini(struct drm_device *dev, void *data,
  216. struct drm_file *file)
  217. {
  218. struct drm_etnaviv_gem_cpu_fini *args = data;
  219. struct drm_gem_object *obj;
  220. int ret;
  221. if (args->flags)
  222. return -EINVAL;
  223. obj = drm_gem_object_lookup(file, args->handle);
  224. if (!obj)
  225. return -ENOENT;
  226. ret = etnaviv_gem_cpu_fini(obj);
  227. drm_gem_object_put_unlocked(obj);
  228. return ret;
  229. }
  230. static int etnaviv_ioctl_gem_info(struct drm_device *dev, void *data,
  231. struct drm_file *file)
  232. {
  233. struct drm_etnaviv_gem_info *args = data;
  234. struct drm_gem_object *obj;
  235. int ret;
  236. if (args->pad)
  237. return -EINVAL;
  238. obj = drm_gem_object_lookup(file, args->handle);
  239. if (!obj)
  240. return -ENOENT;
  241. ret = etnaviv_gem_mmap_offset(obj, &args->offset);
  242. drm_gem_object_put_unlocked(obj);
  243. return ret;
  244. }
  245. static int etnaviv_ioctl_wait_fence(struct drm_device *dev, void *data,
  246. struct drm_file *file)
  247. {
  248. struct drm_etnaviv_wait_fence *args = data;
  249. struct etnaviv_drm_private *priv = dev->dev_private;
  250. struct timespec *timeout = &TS(args->timeout);
  251. struct etnaviv_gpu *gpu;
  252. if (args->flags & ~(ETNA_WAIT_NONBLOCK))
  253. return -EINVAL;
  254. if (args->pipe >= ETNA_MAX_PIPES)
  255. return -EINVAL;
  256. gpu = priv->gpu[args->pipe];
  257. if (!gpu)
  258. return -ENXIO;
  259. if (args->flags & ETNA_WAIT_NONBLOCK)
  260. timeout = NULL;
  261. return etnaviv_gpu_wait_fence_interruptible(gpu, args->fence,
  262. timeout);
  263. }
  264. static int etnaviv_ioctl_gem_userptr(struct drm_device *dev, void *data,
  265. struct drm_file *file)
  266. {
  267. struct drm_etnaviv_gem_userptr *args = data;
  268. int access;
  269. if (args->flags & ~(ETNA_USERPTR_READ|ETNA_USERPTR_WRITE) ||
  270. args->flags == 0)
  271. return -EINVAL;
  272. if (offset_in_page(args->user_ptr | args->user_size) ||
  273. (uintptr_t)args->user_ptr != args->user_ptr ||
  274. (u32)args->user_size != args->user_size ||
  275. args->user_ptr & ~PAGE_MASK)
  276. return -EINVAL;
  277. if (args->flags & ETNA_USERPTR_WRITE)
  278. access = VERIFY_WRITE;
  279. else
  280. access = VERIFY_READ;
  281. if (!access_ok(access, (void __user *)(unsigned long)args->user_ptr,
  282. args->user_size))
  283. return -EFAULT;
  284. return etnaviv_gem_new_userptr(dev, file, args->user_ptr,
  285. args->user_size, args->flags,
  286. &args->handle);
  287. }
  288. static int etnaviv_ioctl_gem_wait(struct drm_device *dev, void *data,
  289. struct drm_file *file)
  290. {
  291. struct etnaviv_drm_private *priv = dev->dev_private;
  292. struct drm_etnaviv_gem_wait *args = data;
  293. struct timespec *timeout = &TS(args->timeout);
  294. struct drm_gem_object *obj;
  295. struct etnaviv_gpu *gpu;
  296. int ret;
  297. if (args->flags & ~(ETNA_WAIT_NONBLOCK))
  298. return -EINVAL;
  299. if (args->pipe >= ETNA_MAX_PIPES)
  300. return -EINVAL;
  301. gpu = priv->gpu[args->pipe];
  302. if (!gpu)
  303. return -ENXIO;
  304. obj = drm_gem_object_lookup(file, args->handle);
  305. if (!obj)
  306. return -ENOENT;
  307. if (args->flags & ETNA_WAIT_NONBLOCK)
  308. timeout = NULL;
  309. ret = etnaviv_gem_wait_bo(gpu, obj, timeout);
  310. drm_gem_object_put_unlocked(obj);
  311. return ret;
  312. }
  313. static int etnaviv_ioctl_pm_query_dom(struct drm_device *dev, void *data,
  314. struct drm_file *file)
  315. {
  316. struct etnaviv_drm_private *priv = dev->dev_private;
  317. struct drm_etnaviv_pm_domain *args = data;
  318. struct etnaviv_gpu *gpu;
  319. if (args->pipe >= ETNA_MAX_PIPES)
  320. return -EINVAL;
  321. gpu = priv->gpu[args->pipe];
  322. if (!gpu)
  323. return -ENXIO;
  324. return etnaviv_pm_query_dom(gpu, args);
  325. }
  326. static int etnaviv_ioctl_pm_query_sig(struct drm_device *dev, void *data,
  327. struct drm_file *file)
  328. {
  329. struct etnaviv_drm_private *priv = dev->dev_private;
  330. struct drm_etnaviv_pm_signal *args = data;
  331. struct etnaviv_gpu *gpu;
  332. if (args->pipe >= ETNA_MAX_PIPES)
  333. return -EINVAL;
  334. gpu = priv->gpu[args->pipe];
  335. if (!gpu)
  336. return -ENXIO;
  337. return etnaviv_pm_query_sig(gpu, args);
  338. }
  339. static const struct drm_ioctl_desc etnaviv_ioctls[] = {
  340. #define ETNA_IOCTL(n, func, flags) \
  341. DRM_IOCTL_DEF_DRV(ETNAVIV_##n, etnaviv_ioctl_##func, flags)
  342. ETNA_IOCTL(GET_PARAM, get_param, DRM_AUTH|DRM_RENDER_ALLOW),
  343. ETNA_IOCTL(GEM_NEW, gem_new, DRM_AUTH|DRM_RENDER_ALLOW),
  344. ETNA_IOCTL(GEM_INFO, gem_info, DRM_AUTH|DRM_RENDER_ALLOW),
  345. ETNA_IOCTL(GEM_CPU_PREP, gem_cpu_prep, DRM_AUTH|DRM_RENDER_ALLOW),
  346. ETNA_IOCTL(GEM_CPU_FINI, gem_cpu_fini, DRM_AUTH|DRM_RENDER_ALLOW),
  347. ETNA_IOCTL(GEM_SUBMIT, gem_submit, DRM_AUTH|DRM_RENDER_ALLOW),
  348. ETNA_IOCTL(WAIT_FENCE, wait_fence, DRM_AUTH|DRM_RENDER_ALLOW),
  349. ETNA_IOCTL(GEM_USERPTR, gem_userptr, DRM_AUTH|DRM_RENDER_ALLOW),
  350. ETNA_IOCTL(GEM_WAIT, gem_wait, DRM_AUTH|DRM_RENDER_ALLOW),
  351. ETNA_IOCTL(PM_QUERY_DOM, pm_query_dom, DRM_AUTH|DRM_RENDER_ALLOW),
  352. ETNA_IOCTL(PM_QUERY_SIG, pm_query_sig, DRM_AUTH|DRM_RENDER_ALLOW),
  353. };
  354. static const struct vm_operations_struct vm_ops = {
  355. .fault = etnaviv_gem_fault,
  356. .open = drm_gem_vm_open,
  357. .close = drm_gem_vm_close,
  358. };
  359. static const struct file_operations fops = {
  360. .owner = THIS_MODULE,
  361. .open = drm_open,
  362. .release = drm_release,
  363. .unlocked_ioctl = drm_ioctl,
  364. .compat_ioctl = drm_compat_ioctl,
  365. .poll = drm_poll,
  366. .read = drm_read,
  367. .llseek = no_llseek,
  368. .mmap = etnaviv_gem_mmap,
  369. };
  370. static struct drm_driver etnaviv_drm_driver = {
  371. .driver_features = DRIVER_GEM |
  372. DRIVER_PRIME |
  373. DRIVER_RENDER,
  374. .open = etnaviv_open,
  375. .postclose = etnaviv_postclose,
  376. .gem_free_object_unlocked = etnaviv_gem_free_object,
  377. .gem_vm_ops = &vm_ops,
  378. .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
  379. .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
  380. .gem_prime_export = drm_gem_prime_export,
  381. .gem_prime_import = drm_gem_prime_import,
  382. .gem_prime_res_obj = etnaviv_gem_prime_res_obj,
  383. .gem_prime_pin = etnaviv_gem_prime_pin,
  384. .gem_prime_unpin = etnaviv_gem_prime_unpin,
  385. .gem_prime_get_sg_table = etnaviv_gem_prime_get_sg_table,
  386. .gem_prime_import_sg_table = etnaviv_gem_prime_import_sg_table,
  387. .gem_prime_vmap = etnaviv_gem_prime_vmap,
  388. .gem_prime_vunmap = etnaviv_gem_prime_vunmap,
  389. .gem_prime_mmap = etnaviv_gem_prime_mmap,
  390. #ifdef CONFIG_DEBUG_FS
  391. .debugfs_init = etnaviv_debugfs_init,
  392. #endif
  393. .ioctls = etnaviv_ioctls,
  394. .num_ioctls = DRM_ETNAVIV_NUM_IOCTLS,
  395. .fops = &fops,
  396. .name = "etnaviv",
  397. .desc = "etnaviv DRM",
  398. .date = "20151214",
  399. .major = 1,
  400. .minor = 2,
  401. };
  402. /*
  403. * Platform driver:
  404. */
  405. static int etnaviv_bind(struct device *dev)
  406. {
  407. struct etnaviv_drm_private *priv;
  408. struct drm_device *drm;
  409. int ret;
  410. drm = drm_dev_alloc(&etnaviv_drm_driver, dev);
  411. if (IS_ERR(drm))
  412. return PTR_ERR(drm);
  413. priv = kzalloc(sizeof(*priv), GFP_KERNEL);
  414. if (!priv) {
  415. dev_err(dev, "failed to allocate private data\n");
  416. ret = -ENOMEM;
  417. goto out_unref;
  418. }
  419. drm->dev_private = priv;
  420. dev->dma_parms = &priv->dma_parms;
  421. dma_set_max_seg_size(dev, SZ_2G);
  422. mutex_init(&priv->gem_lock);
  423. INIT_LIST_HEAD(&priv->gem_list);
  424. priv->num_gpus = 0;
  425. dev_set_drvdata(dev, drm);
  426. ret = component_bind_all(dev, drm);
  427. if (ret < 0)
  428. goto out_bind;
  429. load_gpu(drm);
  430. ret = drm_dev_register(drm, 0);
  431. if (ret)
  432. goto out_register;
  433. return 0;
  434. out_register:
  435. component_unbind_all(dev, drm);
  436. out_bind:
  437. kfree(priv);
  438. out_unref:
  439. drm_dev_unref(drm);
  440. return ret;
  441. }
  442. static void etnaviv_unbind(struct device *dev)
  443. {
  444. struct drm_device *drm = dev_get_drvdata(dev);
  445. struct etnaviv_drm_private *priv = drm->dev_private;
  446. drm_dev_unregister(drm);
  447. component_unbind_all(dev, drm);
  448. dev->dma_parms = NULL;
  449. drm->dev_private = NULL;
  450. kfree(priv);
  451. drm_dev_unref(drm);
  452. }
  453. static const struct component_master_ops etnaviv_master_ops = {
  454. .bind = etnaviv_bind,
  455. .unbind = etnaviv_unbind,
  456. };
  457. static int compare_of(struct device *dev, void *data)
  458. {
  459. struct device_node *np = data;
  460. return dev->of_node == np;
  461. }
  462. static int compare_str(struct device *dev, void *data)
  463. {
  464. return !strcmp(dev_name(dev), data);
  465. }
  466. static int etnaviv_pdev_probe(struct platform_device *pdev)
  467. {
  468. struct device *dev = &pdev->dev;
  469. struct component_match *match = NULL;
  470. if (!dev->platform_data) {
  471. struct device_node *core_node;
  472. for_each_compatible_node(core_node, NULL, "vivante,gc") {
  473. if (!of_device_is_available(core_node))
  474. continue;
  475. drm_of_component_match_add(&pdev->dev, &match,
  476. compare_of, core_node);
  477. }
  478. } else {
  479. char **names = dev->platform_data;
  480. unsigned i;
  481. for (i = 0; names[i]; i++)
  482. component_match_add(dev, &match, compare_str, names[i]);
  483. }
  484. return component_master_add_with_match(dev, &etnaviv_master_ops, match);
  485. }
  486. static int etnaviv_pdev_remove(struct platform_device *pdev)
  487. {
  488. component_master_del(&pdev->dev, &etnaviv_master_ops);
  489. return 0;
  490. }
  491. static struct platform_driver etnaviv_platform_driver = {
  492. .probe = etnaviv_pdev_probe,
  493. .remove = etnaviv_pdev_remove,
  494. .driver = {
  495. .name = "etnaviv",
  496. },
  497. };
  498. static struct platform_device *etnaviv_drm;
  499. static int __init etnaviv_init(void)
  500. {
  501. struct platform_device *pdev;
  502. int ret;
  503. struct device_node *np;
  504. etnaviv_validate_init();
  505. ret = platform_driver_register(&etnaviv_gpu_driver);
  506. if (ret != 0)
  507. return ret;
  508. ret = platform_driver_register(&etnaviv_platform_driver);
  509. if (ret != 0)
  510. goto unregister_gpu_driver;
  511. /*
  512. * If the DT contains at least one available GPU device, instantiate
  513. * the DRM platform device.
  514. */
  515. for_each_compatible_node(np, NULL, "vivante,gc") {
  516. if (!of_device_is_available(np))
  517. continue;
  518. pdev = platform_device_alloc("etnaviv", -1);
  519. if (!pdev) {
  520. ret = -ENOMEM;
  521. of_node_put(np);
  522. goto unregister_platform_driver;
  523. }
  524. pdev->dev.coherent_dma_mask = DMA_BIT_MASK(40);
  525. pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
  526. /*
  527. * Apply the same DMA configuration to the virtual etnaviv
  528. * device as the GPU we found. This assumes that all Vivante
  529. * GPUs in the system share the same DMA constraints.
  530. */
  531. of_dma_configure(&pdev->dev, np, true);
  532. ret = platform_device_add(pdev);
  533. if (ret) {
  534. platform_device_put(pdev);
  535. of_node_put(np);
  536. goto unregister_platform_driver;
  537. }
  538. etnaviv_drm = pdev;
  539. of_node_put(np);
  540. break;
  541. }
  542. return 0;
  543. unregister_platform_driver:
  544. platform_driver_unregister(&etnaviv_platform_driver);
  545. unregister_gpu_driver:
  546. platform_driver_unregister(&etnaviv_gpu_driver);
  547. return ret;
  548. }
  549. module_init(etnaviv_init);
  550. static void __exit etnaviv_exit(void)
  551. {
  552. platform_device_unregister(etnaviv_drm);
  553. platform_driver_unregister(&etnaviv_platform_driver);
  554. platform_driver_unregister(&etnaviv_gpu_driver);
  555. }
  556. module_exit(etnaviv_exit);
  557. MODULE_AUTHOR("Christian Gmeiner <christian.gmeiner@gmail.com>");
  558. MODULE_AUTHOR("Russell King <rmk+kernel@arm.linux.org.uk>");
  559. MODULE_AUTHOR("Lucas Stach <l.stach@pengutronix.de>");
  560. MODULE_DESCRIPTION("etnaviv DRM Driver");
  561. MODULE_LICENSE("GPL v2");
  562. MODULE_ALIAS("platform:etnaviv");