drm.c 35 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (C) 2012 Avionic Design GmbH
  4. * Copyright (C) 2012-2016 NVIDIA CORPORATION. All rights reserved.
  5. */
  6. #include <linux/bitops.h>
  7. #include <linux/host1x.h>
  8. #include <linux/idr.h>
  9. #include <linux/iommu.h>
  10. #include <linux/module.h>
  11. #include <linux/platform_device.h>
  12. #include <linux/pm_runtime.h>
  13. #include <drm/drm_aperture.h>
  14. #include <drm/drm_atomic.h>
  15. #include <drm/drm_atomic_helper.h>
  16. #include <drm/drm_debugfs.h>
  17. #include <drm/drm_drv.h>
  18. #include <drm/drm_fourcc.h>
  19. #include <drm/drm_framebuffer.h>
  20. #include <drm/drm_ioctl.h>
  21. #include <drm/drm_prime.h>
  22. #include <drm/drm_vblank.h>
  23. #if IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)
  24. #include <asm/dma-iommu.h>
  25. #endif
  26. #include "dc.h"
  27. #include "drm.h"
  28. #include "gem.h"
  29. #include "uapi.h"
  30. #define DRIVER_NAME "tegra"
  31. #define DRIVER_DESC "NVIDIA Tegra graphics"
  32. #define DRIVER_DATE "20120330"
  33. #define DRIVER_MAJOR 1
  34. #define DRIVER_MINOR 0
  35. #define DRIVER_PATCHLEVEL 0
  36. #define CARVEOUT_SZ SZ_64M
  37. #define CDMA_GATHER_FETCHES_MAX_NB 16383
  38. static int tegra_atomic_check(struct drm_device *drm,
  39. struct drm_atomic_state *state)
  40. {
  41. int err;
  42. err = drm_atomic_helper_check(drm, state);
  43. if (err < 0)
  44. return err;
  45. return tegra_display_hub_atomic_check(drm, state);
  46. }
  47. static const struct drm_mode_config_funcs tegra_drm_mode_config_funcs = {
  48. .fb_create = tegra_fb_create,
  49. .atomic_check = tegra_atomic_check,
  50. .atomic_commit = drm_atomic_helper_commit,
  51. };
  52. static void tegra_atomic_post_commit(struct drm_device *drm,
  53. struct drm_atomic_state *old_state)
  54. {
  55. struct drm_crtc_state *old_crtc_state __maybe_unused;
  56. struct drm_crtc *crtc;
  57. unsigned int i;
  58. for_each_old_crtc_in_state(old_state, crtc, old_crtc_state, i)
  59. tegra_crtc_atomic_post_commit(crtc, old_state);
  60. }
  61. static void tegra_atomic_commit_tail(struct drm_atomic_state *old_state)
  62. {
  63. struct drm_device *drm = old_state->dev;
  64. struct tegra_drm *tegra = drm->dev_private;
  65. if (tegra->hub) {
  66. bool fence_cookie = dma_fence_begin_signalling();
  67. drm_atomic_helper_commit_modeset_disables(drm, old_state);
  68. tegra_display_hub_atomic_commit(drm, old_state);
  69. drm_atomic_helper_commit_planes(drm, old_state, 0);
  70. drm_atomic_helper_commit_modeset_enables(drm, old_state);
  71. drm_atomic_helper_commit_hw_done(old_state);
  72. dma_fence_end_signalling(fence_cookie);
  73. drm_atomic_helper_wait_for_vblanks(drm, old_state);
  74. drm_atomic_helper_cleanup_planes(drm, old_state);
  75. } else {
  76. drm_atomic_helper_commit_tail_rpm(old_state);
  77. }
  78. tegra_atomic_post_commit(drm, old_state);
  79. }
  80. static const struct drm_mode_config_helper_funcs
  81. tegra_drm_mode_config_helpers = {
  82. .atomic_commit_tail = tegra_atomic_commit_tail,
  83. };
  84. static int tegra_drm_open(struct drm_device *drm, struct drm_file *filp)
  85. {
  86. struct tegra_drm_file *fpriv;
  87. fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL);
  88. if (!fpriv)
  89. return -ENOMEM;
  90. idr_init_base(&fpriv->legacy_contexts, 1);
  91. xa_init_flags(&fpriv->contexts, XA_FLAGS_ALLOC1);
  92. xa_init(&fpriv->syncpoints);
  93. mutex_init(&fpriv->lock);
  94. filp->driver_priv = fpriv;
  95. return 0;
  96. }
  97. static void tegra_drm_context_free(struct tegra_drm_context *context)
  98. {
  99. context->client->ops->close_channel(context);
  100. pm_runtime_put(context->client->base.dev);
  101. kfree(context);
  102. }
  103. static int host1x_reloc_copy_from_user(struct host1x_reloc *dest,
  104. struct drm_tegra_reloc __user *src,
  105. struct drm_device *drm,
  106. struct drm_file *file)
  107. {
  108. u32 cmdbuf, target;
  109. int err;
  110. err = get_user(cmdbuf, &src->cmdbuf.handle);
  111. if (err < 0)
  112. return err;
  113. err = get_user(dest->cmdbuf.offset, &src->cmdbuf.offset);
  114. if (err < 0)
  115. return err;
  116. err = get_user(target, &src->target.handle);
  117. if (err < 0)
  118. return err;
  119. err = get_user(dest->target.offset, &src->target.offset);
  120. if (err < 0)
  121. return err;
  122. err = get_user(dest->shift, &src->shift);
  123. if (err < 0)
  124. return err;
  125. dest->flags = HOST1X_RELOC_READ | HOST1X_RELOC_WRITE;
  126. dest->cmdbuf.bo = tegra_gem_lookup(file, cmdbuf);
  127. if (!dest->cmdbuf.bo)
  128. return -ENOENT;
  129. dest->target.bo = tegra_gem_lookup(file, target);
  130. if (!dest->target.bo)
  131. return -ENOENT;
  132. return 0;
  133. }
  134. int tegra_drm_submit(struct tegra_drm_context *context,
  135. struct drm_tegra_submit *args, struct drm_device *drm,
  136. struct drm_file *file)
  137. {
  138. struct host1x_client *client = &context->client->base;
  139. unsigned int num_cmdbufs = args->num_cmdbufs;
  140. unsigned int num_relocs = args->num_relocs;
  141. struct drm_tegra_cmdbuf __user *user_cmdbufs;
  142. struct drm_tegra_reloc __user *user_relocs;
  143. struct drm_tegra_syncpt __user *user_syncpt;
  144. struct drm_tegra_syncpt syncpt;
  145. struct host1x *host1x = dev_get_drvdata(drm->dev->parent);
  146. struct drm_gem_object **refs;
  147. struct host1x_syncpt *sp = NULL;
  148. struct host1x_job *job;
  149. unsigned int num_refs;
  150. int err;
  151. user_cmdbufs = u64_to_user_ptr(args->cmdbufs);
  152. user_relocs = u64_to_user_ptr(args->relocs);
  153. user_syncpt = u64_to_user_ptr(args->syncpts);
  154. /* We don't yet support other than one syncpt_incr struct per submit */
  155. if (args->num_syncpts != 1)
  156. return -EINVAL;
  157. /* We don't yet support waitchks */
  158. if (args->num_waitchks != 0)
  159. return -EINVAL;
  160. job = host1x_job_alloc(context->channel, args->num_cmdbufs,
  161. args->num_relocs, false);
  162. if (!job)
  163. return -ENOMEM;
  164. job->num_relocs = args->num_relocs;
  165. job->client = client;
  166. job->class = client->class;
  167. job->serialize = true;
  168. job->syncpt_recovery = true;
  169. /*
  170. * Track referenced BOs so that they can be unreferenced after the
  171. * submission is complete.
  172. */
  173. num_refs = num_cmdbufs + num_relocs * 2;
  174. refs = kmalloc_array(num_refs, sizeof(*refs), GFP_KERNEL);
  175. if (!refs) {
  176. err = -ENOMEM;
  177. goto put;
  178. }
  179. /* reuse as an iterator later */
  180. num_refs = 0;
  181. while (num_cmdbufs) {
  182. struct drm_tegra_cmdbuf cmdbuf;
  183. struct host1x_bo *bo;
  184. struct tegra_bo *obj;
  185. u64 offset;
  186. if (copy_from_user(&cmdbuf, user_cmdbufs, sizeof(cmdbuf))) {
  187. err = -EFAULT;
  188. goto fail;
  189. }
  190. /*
  191. * The maximum number of CDMA gather fetches is 16383, a higher
  192. * value means the words count is malformed.
  193. */
  194. if (cmdbuf.words > CDMA_GATHER_FETCHES_MAX_NB) {
  195. err = -EINVAL;
  196. goto fail;
  197. }
  198. bo = tegra_gem_lookup(file, cmdbuf.handle);
  199. if (!bo) {
  200. err = -ENOENT;
  201. goto fail;
  202. }
  203. offset = (u64)cmdbuf.offset + (u64)cmdbuf.words * sizeof(u32);
  204. obj = host1x_to_tegra_bo(bo);
  205. refs[num_refs++] = &obj->gem;
  206. /*
  207. * Gather buffer base address must be 4-bytes aligned,
  208. * unaligned offset is malformed and cause commands stream
  209. * corruption on the buffer address relocation.
  210. */
  211. if (offset & 3 || offset > obj->gem.size) {
  212. err = -EINVAL;
  213. goto fail;
  214. }
  215. host1x_job_add_gather(job, bo, cmdbuf.words, cmdbuf.offset);
  216. num_cmdbufs--;
  217. user_cmdbufs++;
  218. }
  219. /* copy and resolve relocations from submit */
  220. while (num_relocs--) {
  221. struct host1x_reloc *reloc;
  222. struct tegra_bo *obj;
  223. err = host1x_reloc_copy_from_user(&job->relocs[num_relocs],
  224. &user_relocs[num_relocs], drm,
  225. file);
  226. if (err < 0)
  227. goto fail;
  228. reloc = &job->relocs[num_relocs];
  229. obj = host1x_to_tegra_bo(reloc->cmdbuf.bo);
  230. refs[num_refs++] = &obj->gem;
  231. /*
  232. * The unaligned cmdbuf offset will cause an unaligned write
  233. * during of the relocations patching, corrupting the commands
  234. * stream.
  235. */
  236. if (reloc->cmdbuf.offset & 3 ||
  237. reloc->cmdbuf.offset >= obj->gem.size) {
  238. err = -EINVAL;
  239. goto fail;
  240. }
  241. obj = host1x_to_tegra_bo(reloc->target.bo);
  242. refs[num_refs++] = &obj->gem;
  243. if (reloc->target.offset >= obj->gem.size) {
  244. err = -EINVAL;
  245. goto fail;
  246. }
  247. }
  248. if (copy_from_user(&syncpt, user_syncpt, sizeof(syncpt))) {
  249. err = -EFAULT;
  250. goto fail;
  251. }
  252. /* Syncpoint ref will be dropped on job release. */
  253. sp = host1x_syncpt_get_by_id(host1x, syncpt.id);
  254. if (!sp) {
  255. err = -ENOENT;
  256. goto fail;
  257. }
  258. job->is_addr_reg = context->client->ops->is_addr_reg;
  259. job->is_valid_class = context->client->ops->is_valid_class;
  260. job->syncpt_incrs = syncpt.incrs;
  261. job->syncpt = sp;
  262. job->timeout = 10000;
  263. if (args->timeout && args->timeout < 10000)
  264. job->timeout = args->timeout;
  265. err = host1x_job_pin(job, context->client->base.dev);
  266. if (err)
  267. goto fail;
  268. err = host1x_job_submit(job);
  269. if (err) {
  270. host1x_job_unpin(job);
  271. goto fail;
  272. }
  273. args->fence = job->syncpt_end;
  274. fail:
  275. while (num_refs--)
  276. drm_gem_object_put(refs[num_refs]);
  277. kfree(refs);
  278. put:
  279. host1x_job_put(job);
  280. return err;
  281. }
  282. #ifdef CONFIG_DRM_TEGRA_STAGING
  283. static int tegra_gem_create(struct drm_device *drm, void *data,
  284. struct drm_file *file)
  285. {
  286. struct drm_tegra_gem_create *args = data;
  287. struct tegra_bo *bo;
  288. bo = tegra_bo_create_with_handle(file, drm, args->size, args->flags,
  289. &args->handle);
  290. if (IS_ERR(bo))
  291. return PTR_ERR(bo);
  292. return 0;
  293. }
  294. static int tegra_gem_mmap(struct drm_device *drm, void *data,
  295. struct drm_file *file)
  296. {
  297. struct drm_tegra_gem_mmap *args = data;
  298. struct drm_gem_object *gem;
  299. struct tegra_bo *bo;
  300. gem = drm_gem_object_lookup(file, args->handle);
  301. if (!gem)
  302. return -EINVAL;
  303. bo = to_tegra_bo(gem);
  304. args->offset = drm_vma_node_offset_addr(&bo->gem.vma_node);
  305. drm_gem_object_put(gem);
  306. return 0;
  307. }
  308. static int tegra_syncpt_read(struct drm_device *drm, void *data,
  309. struct drm_file *file)
  310. {
  311. struct host1x *host = dev_get_drvdata(drm->dev->parent);
  312. struct drm_tegra_syncpt_read *args = data;
  313. struct host1x_syncpt *sp;
  314. sp = host1x_syncpt_get_by_id_noref(host, args->id);
  315. if (!sp)
  316. return -EINVAL;
  317. args->value = host1x_syncpt_read_min(sp);
  318. return 0;
  319. }
  320. static int tegra_syncpt_incr(struct drm_device *drm, void *data,
  321. struct drm_file *file)
  322. {
  323. struct host1x *host1x = dev_get_drvdata(drm->dev->parent);
  324. struct drm_tegra_syncpt_incr *args = data;
  325. struct host1x_syncpt *sp;
  326. sp = host1x_syncpt_get_by_id_noref(host1x, args->id);
  327. if (!sp)
  328. return -EINVAL;
  329. return host1x_syncpt_incr(sp);
  330. }
  331. static int tegra_syncpt_wait(struct drm_device *drm, void *data,
  332. struct drm_file *file)
  333. {
  334. struct host1x *host1x = dev_get_drvdata(drm->dev->parent);
  335. struct drm_tegra_syncpt_wait *args = data;
  336. struct host1x_syncpt *sp;
  337. sp = host1x_syncpt_get_by_id_noref(host1x, args->id);
  338. if (!sp)
  339. return -EINVAL;
  340. return host1x_syncpt_wait(sp, args->thresh,
  341. msecs_to_jiffies(args->timeout),
  342. &args->value);
  343. }
  344. static int tegra_client_open(struct tegra_drm_file *fpriv,
  345. struct tegra_drm_client *client,
  346. struct tegra_drm_context *context)
  347. {
  348. int err;
  349. err = pm_runtime_resume_and_get(client->base.dev);
  350. if (err)
  351. return err;
  352. err = client->ops->open_channel(client, context);
  353. if (err < 0) {
  354. pm_runtime_put(client->base.dev);
  355. return err;
  356. }
  357. err = idr_alloc(&fpriv->legacy_contexts, context, 1, 0, GFP_KERNEL);
  358. if (err < 0) {
  359. client->ops->close_channel(context);
  360. pm_runtime_put(client->base.dev);
  361. return err;
  362. }
  363. context->client = client;
  364. context->id = err;
  365. return 0;
  366. }
  367. static int tegra_open_channel(struct drm_device *drm, void *data,
  368. struct drm_file *file)
  369. {
  370. struct tegra_drm_file *fpriv = file->driver_priv;
  371. struct tegra_drm *tegra = drm->dev_private;
  372. struct drm_tegra_open_channel *args = data;
  373. struct tegra_drm_context *context;
  374. struct tegra_drm_client *client;
  375. int err = -ENODEV;
  376. context = kzalloc(sizeof(*context), GFP_KERNEL);
  377. if (!context)
  378. return -ENOMEM;
  379. mutex_lock(&fpriv->lock);
  380. list_for_each_entry(client, &tegra->clients, list)
  381. if (client->base.class == args->client) {
  382. err = tegra_client_open(fpriv, client, context);
  383. if (err < 0)
  384. break;
  385. args->context = context->id;
  386. break;
  387. }
  388. if (err < 0)
  389. kfree(context);
  390. mutex_unlock(&fpriv->lock);
  391. return err;
  392. }
  393. static int tegra_close_channel(struct drm_device *drm, void *data,
  394. struct drm_file *file)
  395. {
  396. struct tegra_drm_file *fpriv = file->driver_priv;
  397. struct drm_tegra_close_channel *args = data;
  398. struct tegra_drm_context *context;
  399. int err = 0;
  400. mutex_lock(&fpriv->lock);
  401. context = idr_find(&fpriv->legacy_contexts, args->context);
  402. if (!context) {
  403. err = -EINVAL;
  404. goto unlock;
  405. }
  406. idr_remove(&fpriv->legacy_contexts, context->id);
  407. tegra_drm_context_free(context);
  408. unlock:
  409. mutex_unlock(&fpriv->lock);
  410. return err;
  411. }
  412. static int tegra_get_syncpt(struct drm_device *drm, void *data,
  413. struct drm_file *file)
  414. {
  415. struct tegra_drm_file *fpriv = file->driver_priv;
  416. struct drm_tegra_get_syncpt *args = data;
  417. struct tegra_drm_context *context;
  418. struct host1x_syncpt *syncpt;
  419. int err = 0;
  420. mutex_lock(&fpriv->lock);
  421. context = idr_find(&fpriv->legacy_contexts, args->context);
  422. if (!context) {
  423. err = -ENODEV;
  424. goto unlock;
  425. }
  426. if (args->index >= context->client->base.num_syncpts) {
  427. err = -EINVAL;
  428. goto unlock;
  429. }
  430. syncpt = context->client->base.syncpts[args->index];
  431. args->id = host1x_syncpt_id(syncpt);
  432. unlock:
  433. mutex_unlock(&fpriv->lock);
  434. return err;
  435. }
  436. static int tegra_submit(struct drm_device *drm, void *data,
  437. struct drm_file *file)
  438. {
  439. struct tegra_drm_file *fpriv = file->driver_priv;
  440. struct drm_tegra_submit *args = data;
  441. struct tegra_drm_context *context;
  442. int err;
  443. mutex_lock(&fpriv->lock);
  444. context = idr_find(&fpriv->legacy_contexts, args->context);
  445. if (!context) {
  446. err = -ENODEV;
  447. goto unlock;
  448. }
  449. err = context->client->ops->submit(context, args, drm, file);
  450. unlock:
  451. mutex_unlock(&fpriv->lock);
  452. return err;
  453. }
  454. static int tegra_get_syncpt_base(struct drm_device *drm, void *data,
  455. struct drm_file *file)
  456. {
  457. struct tegra_drm_file *fpriv = file->driver_priv;
  458. struct drm_tegra_get_syncpt_base *args = data;
  459. struct tegra_drm_context *context;
  460. struct host1x_syncpt_base *base;
  461. struct host1x_syncpt *syncpt;
  462. int err = 0;
  463. mutex_lock(&fpriv->lock);
  464. context = idr_find(&fpriv->legacy_contexts, args->context);
  465. if (!context) {
  466. err = -ENODEV;
  467. goto unlock;
  468. }
  469. if (args->syncpt >= context->client->base.num_syncpts) {
  470. err = -EINVAL;
  471. goto unlock;
  472. }
  473. syncpt = context->client->base.syncpts[args->syncpt];
  474. base = host1x_syncpt_get_base(syncpt);
  475. if (!base) {
  476. err = -ENXIO;
  477. goto unlock;
  478. }
  479. args->id = host1x_syncpt_base_id(base);
  480. unlock:
  481. mutex_unlock(&fpriv->lock);
  482. return err;
  483. }
  484. static int tegra_gem_set_tiling(struct drm_device *drm, void *data,
  485. struct drm_file *file)
  486. {
  487. struct drm_tegra_gem_set_tiling *args = data;
  488. enum tegra_bo_tiling_mode mode;
  489. struct drm_gem_object *gem;
  490. unsigned long value = 0;
  491. struct tegra_bo *bo;
  492. switch (args->mode) {
  493. case DRM_TEGRA_GEM_TILING_MODE_PITCH:
  494. mode = TEGRA_BO_TILING_MODE_PITCH;
  495. if (args->value != 0)
  496. return -EINVAL;
  497. break;
  498. case DRM_TEGRA_GEM_TILING_MODE_TILED:
  499. mode = TEGRA_BO_TILING_MODE_TILED;
  500. if (args->value != 0)
  501. return -EINVAL;
  502. break;
  503. case DRM_TEGRA_GEM_TILING_MODE_BLOCK:
  504. mode = TEGRA_BO_TILING_MODE_BLOCK;
  505. if (args->value > 5)
  506. return -EINVAL;
  507. value = args->value;
  508. break;
  509. default:
  510. return -EINVAL;
  511. }
  512. gem = drm_gem_object_lookup(file, args->handle);
  513. if (!gem)
  514. return -ENOENT;
  515. bo = to_tegra_bo(gem);
  516. bo->tiling.mode = mode;
  517. bo->tiling.value = value;
  518. drm_gem_object_put(gem);
  519. return 0;
  520. }
  521. static int tegra_gem_get_tiling(struct drm_device *drm, void *data,
  522. struct drm_file *file)
  523. {
  524. struct drm_tegra_gem_get_tiling *args = data;
  525. struct drm_gem_object *gem;
  526. struct tegra_bo *bo;
  527. int err = 0;
  528. gem = drm_gem_object_lookup(file, args->handle);
  529. if (!gem)
  530. return -ENOENT;
  531. bo = to_tegra_bo(gem);
  532. switch (bo->tiling.mode) {
  533. case TEGRA_BO_TILING_MODE_PITCH:
  534. args->mode = DRM_TEGRA_GEM_TILING_MODE_PITCH;
  535. args->value = 0;
  536. break;
  537. case TEGRA_BO_TILING_MODE_TILED:
  538. args->mode = DRM_TEGRA_GEM_TILING_MODE_TILED;
  539. args->value = 0;
  540. break;
  541. case TEGRA_BO_TILING_MODE_BLOCK:
  542. args->mode = DRM_TEGRA_GEM_TILING_MODE_BLOCK;
  543. args->value = bo->tiling.value;
  544. break;
  545. default:
  546. err = -EINVAL;
  547. break;
  548. }
  549. drm_gem_object_put(gem);
  550. return err;
  551. }
  552. static int tegra_gem_set_flags(struct drm_device *drm, void *data,
  553. struct drm_file *file)
  554. {
  555. struct drm_tegra_gem_set_flags *args = data;
  556. struct drm_gem_object *gem;
  557. struct tegra_bo *bo;
  558. if (args->flags & ~DRM_TEGRA_GEM_FLAGS)
  559. return -EINVAL;
  560. gem = drm_gem_object_lookup(file, args->handle);
  561. if (!gem)
  562. return -ENOENT;
  563. bo = to_tegra_bo(gem);
  564. bo->flags = 0;
  565. if (args->flags & DRM_TEGRA_GEM_BOTTOM_UP)
  566. bo->flags |= TEGRA_BO_BOTTOM_UP;
  567. drm_gem_object_put(gem);
  568. return 0;
  569. }
  570. static int tegra_gem_get_flags(struct drm_device *drm, void *data,
  571. struct drm_file *file)
  572. {
  573. struct drm_tegra_gem_get_flags *args = data;
  574. struct drm_gem_object *gem;
  575. struct tegra_bo *bo;
  576. gem = drm_gem_object_lookup(file, args->handle);
  577. if (!gem)
  578. return -ENOENT;
  579. bo = to_tegra_bo(gem);
  580. args->flags = 0;
  581. if (bo->flags & TEGRA_BO_BOTTOM_UP)
  582. args->flags |= DRM_TEGRA_GEM_BOTTOM_UP;
  583. drm_gem_object_put(gem);
  584. return 0;
  585. }
  586. #endif
  587. static const struct drm_ioctl_desc tegra_drm_ioctls[] = {
  588. #ifdef CONFIG_DRM_TEGRA_STAGING
  589. DRM_IOCTL_DEF_DRV(TEGRA_CHANNEL_OPEN, tegra_drm_ioctl_channel_open,
  590. DRM_RENDER_ALLOW),
  591. DRM_IOCTL_DEF_DRV(TEGRA_CHANNEL_CLOSE, tegra_drm_ioctl_channel_close,
  592. DRM_RENDER_ALLOW),
  593. DRM_IOCTL_DEF_DRV(TEGRA_CHANNEL_MAP, tegra_drm_ioctl_channel_map,
  594. DRM_RENDER_ALLOW),
  595. DRM_IOCTL_DEF_DRV(TEGRA_CHANNEL_UNMAP, tegra_drm_ioctl_channel_unmap,
  596. DRM_RENDER_ALLOW),
  597. DRM_IOCTL_DEF_DRV(TEGRA_CHANNEL_SUBMIT, tegra_drm_ioctl_channel_submit,
  598. DRM_RENDER_ALLOW),
  599. DRM_IOCTL_DEF_DRV(TEGRA_SYNCPOINT_ALLOCATE, tegra_drm_ioctl_syncpoint_allocate,
  600. DRM_RENDER_ALLOW),
  601. DRM_IOCTL_DEF_DRV(TEGRA_SYNCPOINT_FREE, tegra_drm_ioctl_syncpoint_free,
  602. DRM_RENDER_ALLOW),
  603. DRM_IOCTL_DEF_DRV(TEGRA_SYNCPOINT_WAIT, tegra_drm_ioctl_syncpoint_wait,
  604. DRM_RENDER_ALLOW),
  605. DRM_IOCTL_DEF_DRV(TEGRA_GEM_CREATE, tegra_gem_create, DRM_RENDER_ALLOW),
  606. DRM_IOCTL_DEF_DRV(TEGRA_GEM_MMAP, tegra_gem_mmap, DRM_RENDER_ALLOW),
  607. DRM_IOCTL_DEF_DRV(TEGRA_SYNCPT_READ, tegra_syncpt_read,
  608. DRM_RENDER_ALLOW),
  609. DRM_IOCTL_DEF_DRV(TEGRA_SYNCPT_INCR, tegra_syncpt_incr,
  610. DRM_RENDER_ALLOW),
  611. DRM_IOCTL_DEF_DRV(TEGRA_SYNCPT_WAIT, tegra_syncpt_wait,
  612. DRM_RENDER_ALLOW),
  613. DRM_IOCTL_DEF_DRV(TEGRA_OPEN_CHANNEL, tegra_open_channel,
  614. DRM_RENDER_ALLOW),
  615. DRM_IOCTL_DEF_DRV(TEGRA_CLOSE_CHANNEL, tegra_close_channel,
  616. DRM_RENDER_ALLOW),
  617. DRM_IOCTL_DEF_DRV(TEGRA_GET_SYNCPT, tegra_get_syncpt,
  618. DRM_RENDER_ALLOW),
  619. DRM_IOCTL_DEF_DRV(TEGRA_SUBMIT, tegra_submit,
  620. DRM_RENDER_ALLOW),
  621. DRM_IOCTL_DEF_DRV(TEGRA_GET_SYNCPT_BASE, tegra_get_syncpt_base,
  622. DRM_RENDER_ALLOW),
  623. DRM_IOCTL_DEF_DRV(TEGRA_GEM_SET_TILING, tegra_gem_set_tiling,
  624. DRM_RENDER_ALLOW),
  625. DRM_IOCTL_DEF_DRV(TEGRA_GEM_GET_TILING, tegra_gem_get_tiling,
  626. DRM_RENDER_ALLOW),
  627. DRM_IOCTL_DEF_DRV(TEGRA_GEM_SET_FLAGS, tegra_gem_set_flags,
  628. DRM_RENDER_ALLOW),
  629. DRM_IOCTL_DEF_DRV(TEGRA_GEM_GET_FLAGS, tegra_gem_get_flags,
  630. DRM_RENDER_ALLOW),
  631. #endif
  632. };
  633. static const struct file_operations tegra_drm_fops = {
  634. .owner = THIS_MODULE,
  635. .open = drm_open,
  636. .release = drm_release,
  637. .unlocked_ioctl = drm_ioctl,
  638. .mmap = tegra_drm_mmap,
  639. .poll = drm_poll,
  640. .read = drm_read,
  641. .compat_ioctl = drm_compat_ioctl,
  642. .llseek = noop_llseek,
  643. .fop_flags = FOP_UNSIGNED_OFFSET,
  644. };
  645. static int tegra_drm_context_cleanup(int id, void *p, void *data)
  646. {
  647. struct tegra_drm_context *context = p;
  648. tegra_drm_context_free(context);
  649. return 0;
  650. }
  651. static void tegra_drm_postclose(struct drm_device *drm, struct drm_file *file)
  652. {
  653. struct tegra_drm_file *fpriv = file->driver_priv;
  654. mutex_lock(&fpriv->lock);
  655. idr_for_each(&fpriv->legacy_contexts, tegra_drm_context_cleanup, NULL);
  656. tegra_drm_uapi_close_file(fpriv);
  657. mutex_unlock(&fpriv->lock);
  658. idr_destroy(&fpriv->legacy_contexts);
  659. mutex_destroy(&fpriv->lock);
  660. kfree(fpriv);
  661. }
  662. #ifdef CONFIG_DEBUG_FS
  663. static int tegra_debugfs_framebuffers(struct seq_file *s, void *data)
  664. {
  665. struct drm_info_node *node = (struct drm_info_node *)s->private;
  666. struct drm_device *drm = node->minor->dev;
  667. struct drm_framebuffer *fb;
  668. mutex_lock(&drm->mode_config.fb_lock);
  669. list_for_each_entry(fb, &drm->mode_config.fb_list, head) {
  670. seq_printf(s, "%3d: user size: %d x %d, depth %d, %d bpp, refcount %d\n",
  671. fb->base.id, fb->width, fb->height,
  672. fb->format->depth,
  673. fb->format->cpp[0] * 8,
  674. drm_framebuffer_read_refcount(fb));
  675. }
  676. mutex_unlock(&drm->mode_config.fb_lock);
  677. return 0;
  678. }
  679. static int tegra_debugfs_iova(struct seq_file *s, void *data)
  680. {
  681. struct drm_info_node *node = (struct drm_info_node *)s->private;
  682. struct drm_device *drm = node->minor->dev;
  683. struct tegra_drm *tegra = drm->dev_private;
  684. struct drm_printer p = drm_seq_file_printer(s);
  685. if (tegra->domain) {
  686. mutex_lock(&tegra->mm_lock);
  687. drm_mm_print(&tegra->mm, &p);
  688. mutex_unlock(&tegra->mm_lock);
  689. }
  690. return 0;
  691. }
  692. static struct drm_info_list tegra_debugfs_list[] = {
  693. { "framebuffers", tegra_debugfs_framebuffers, 0 },
  694. { "iova", tegra_debugfs_iova, 0 },
  695. };
  696. static void tegra_debugfs_init(struct drm_minor *minor)
  697. {
  698. drm_debugfs_create_files(tegra_debugfs_list,
  699. ARRAY_SIZE(tegra_debugfs_list),
  700. minor->debugfs_root, minor);
  701. }
  702. #endif
  703. static const struct drm_driver tegra_drm_driver = {
  704. .driver_features = DRIVER_MODESET | DRIVER_GEM |
  705. DRIVER_ATOMIC | DRIVER_RENDER | DRIVER_SYNCOBJ,
  706. .open = tegra_drm_open,
  707. .postclose = tegra_drm_postclose,
  708. #if defined(CONFIG_DEBUG_FS)
  709. .debugfs_init = tegra_debugfs_init,
  710. #endif
  711. .gem_prime_import = tegra_gem_prime_import,
  712. .dumb_create = tegra_bo_dumb_create,
  713. .ioctls = tegra_drm_ioctls,
  714. .num_ioctls = ARRAY_SIZE(tegra_drm_ioctls),
  715. .fops = &tegra_drm_fops,
  716. .name = DRIVER_NAME,
  717. .desc = DRIVER_DESC,
  718. .date = DRIVER_DATE,
  719. .major = DRIVER_MAJOR,
  720. .minor = DRIVER_MINOR,
  721. .patchlevel = DRIVER_PATCHLEVEL,
  722. };
  723. int tegra_drm_register_client(struct tegra_drm *tegra,
  724. struct tegra_drm_client *client)
  725. {
  726. /*
  727. * When MLOCKs are implemented, change to allocate a shared channel
  728. * only when MLOCKs are disabled.
  729. */
  730. client->shared_channel = host1x_channel_request(&client->base);
  731. if (!client->shared_channel)
  732. return -EBUSY;
  733. mutex_lock(&tegra->clients_lock);
  734. list_add_tail(&client->list, &tegra->clients);
  735. client->drm = tegra;
  736. mutex_unlock(&tegra->clients_lock);
  737. return 0;
  738. }
  739. int tegra_drm_unregister_client(struct tegra_drm *tegra,
  740. struct tegra_drm_client *client)
  741. {
  742. mutex_lock(&tegra->clients_lock);
  743. list_del_init(&client->list);
  744. client->drm = NULL;
  745. mutex_unlock(&tegra->clients_lock);
  746. if (client->shared_channel)
  747. host1x_channel_put(client->shared_channel);
  748. return 0;
  749. }
  750. int host1x_client_iommu_attach(struct host1x_client *client)
  751. {
  752. struct iommu_domain *domain = iommu_get_domain_for_dev(client->dev);
  753. struct drm_device *drm = dev_get_drvdata(client->host);
  754. struct tegra_drm *tegra = drm->dev_private;
  755. struct iommu_group *group = NULL;
  756. int err;
  757. #if IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)
  758. if (client->dev->archdata.mapping) {
  759. struct dma_iommu_mapping *mapping =
  760. to_dma_iommu_mapping(client->dev);
  761. arm_iommu_detach_device(client->dev);
  762. arm_iommu_release_mapping(mapping);
  763. domain = iommu_get_domain_for_dev(client->dev);
  764. }
  765. #endif
  766. /*
  767. * If the host1x client is already attached to an IOMMU domain that is
  768. * not the shared IOMMU domain, don't try to attach it to a different
  769. * domain. This allows using the IOMMU-backed DMA API.
  770. */
  771. if (domain && domain->type != IOMMU_DOMAIN_IDENTITY &&
  772. domain != tegra->domain)
  773. return 0;
  774. if (tegra->domain) {
  775. group = iommu_group_get(client->dev);
  776. if (!group)
  777. return -ENODEV;
  778. if (domain != tegra->domain) {
  779. err = iommu_attach_group(tegra->domain, group);
  780. if (err < 0) {
  781. iommu_group_put(group);
  782. return err;
  783. }
  784. }
  785. tegra->use_explicit_iommu = true;
  786. }
  787. client->group = group;
  788. return 0;
  789. }
  790. void host1x_client_iommu_detach(struct host1x_client *client)
  791. {
  792. struct drm_device *drm = dev_get_drvdata(client->host);
  793. struct tegra_drm *tegra = drm->dev_private;
  794. struct iommu_domain *domain;
  795. if (client->group) {
  796. /*
  797. * Devices that are part of the same group may no longer be
  798. * attached to a domain at this point because their group may
  799. * have been detached by an earlier client.
  800. */
  801. domain = iommu_get_domain_for_dev(client->dev);
  802. if (domain)
  803. iommu_detach_group(tegra->domain, client->group);
  804. iommu_group_put(client->group);
  805. client->group = NULL;
  806. }
  807. }
  808. void *tegra_drm_alloc(struct tegra_drm *tegra, size_t size, dma_addr_t *dma)
  809. {
  810. struct iova *alloc;
  811. void *virt;
  812. gfp_t gfp;
  813. int err;
  814. if (tegra->domain)
  815. size = iova_align(&tegra->carveout.domain, size);
  816. else
  817. size = PAGE_ALIGN(size);
  818. gfp = GFP_KERNEL | __GFP_ZERO;
  819. if (!tegra->domain) {
  820. /*
  821. * Many units only support 32-bit addresses, even on 64-bit
  822. * SoCs. If there is no IOMMU to translate into a 32-bit IO
  823. * virtual address space, force allocations to be in the
  824. * lower 32-bit range.
  825. */
  826. gfp |= GFP_DMA;
  827. }
  828. virt = (void *)__get_free_pages(gfp, get_order(size));
  829. if (!virt)
  830. return ERR_PTR(-ENOMEM);
  831. if (!tegra->domain) {
  832. /*
  833. * If IOMMU is disabled, devices address physical memory
  834. * directly.
  835. */
  836. *dma = virt_to_phys(virt);
  837. return virt;
  838. }
  839. alloc = alloc_iova(&tegra->carveout.domain,
  840. size >> tegra->carveout.shift,
  841. tegra->carveout.limit, true);
  842. if (!alloc) {
  843. err = -EBUSY;
  844. goto free_pages;
  845. }
  846. *dma = iova_dma_addr(&tegra->carveout.domain, alloc);
  847. err = iommu_map(tegra->domain, *dma, virt_to_phys(virt),
  848. size, IOMMU_READ | IOMMU_WRITE, GFP_KERNEL);
  849. if (err < 0)
  850. goto free_iova;
  851. return virt;
  852. free_iova:
  853. __free_iova(&tegra->carveout.domain, alloc);
  854. free_pages:
  855. free_pages((unsigned long)virt, get_order(size));
  856. return ERR_PTR(err);
  857. }
  858. void tegra_drm_free(struct tegra_drm *tegra, size_t size, void *virt,
  859. dma_addr_t dma)
  860. {
  861. if (tegra->domain)
  862. size = iova_align(&tegra->carveout.domain, size);
  863. else
  864. size = PAGE_ALIGN(size);
  865. if (tegra->domain) {
  866. iommu_unmap(tegra->domain, dma, size);
  867. free_iova(&tegra->carveout.domain,
  868. iova_pfn(&tegra->carveout.domain, dma));
  869. }
  870. free_pages((unsigned long)virt, get_order(size));
  871. }
  872. static bool host1x_drm_wants_iommu(struct host1x_device *dev)
  873. {
  874. struct host1x *host1x = dev_get_drvdata(dev->dev.parent);
  875. struct iommu_domain *domain;
  876. /* Our IOMMU usage policy doesn't currently play well with GART */
  877. if (of_machine_is_compatible("nvidia,tegra20"))
  878. return false;
  879. /*
  880. * If the Tegra DRM clients are backed by an IOMMU, push buffers are
  881. * likely to be allocated beyond the 32-bit boundary if sufficient
  882. * system memory is available. This is problematic on earlier Tegra
  883. * generations where host1x supports a maximum of 32 address bits in
  884. * the GATHER opcode. In this case, unless host1x is behind an IOMMU
  885. * as well it won't be able to process buffers allocated beyond the
  886. * 32-bit boundary.
  887. *
  888. * The DMA API will use bounce buffers in this case, so that could
  889. * perhaps still be made to work, even if less efficient, but there
  890. * is another catch: in order to perform cache maintenance on pages
  891. * allocated for discontiguous buffers we need to map and unmap the
  892. * SG table representing these buffers. This is fine for something
  893. * small like a push buffer, but it exhausts the bounce buffer pool
  894. * (typically on the order of a few MiB) for framebuffers (many MiB
  895. * for any modern resolution).
  896. *
  897. * Work around this by making sure that Tegra DRM clients only use
  898. * an IOMMU if the parent host1x also uses an IOMMU.
  899. *
  900. * Note that there's still a small gap here that we don't cover: if
  901. * the DMA API is backed by an IOMMU there's no way to control which
  902. * device is attached to an IOMMU and which isn't, except via wiring
  903. * up the device tree appropriately. This is considered an problem
  904. * of integration, so care must be taken for the DT to be consistent.
  905. */
  906. domain = iommu_get_domain_for_dev(dev->dev.parent);
  907. /*
  908. * Tegra20 and Tegra30 don't support addressing memory beyond the
  909. * 32-bit boundary, so the regular GATHER opcodes will always be
  910. * sufficient and whether or not the host1x is attached to an IOMMU
  911. * doesn't matter.
  912. */
  913. if (!domain && host1x_get_dma_mask(host1x) <= DMA_BIT_MASK(32))
  914. return true;
  915. return domain != NULL;
  916. }
  917. static int host1x_drm_probe(struct host1x_device *dev)
  918. {
  919. struct device *dma_dev = dev->dev.parent;
  920. struct tegra_drm *tegra;
  921. struct drm_device *drm;
  922. int err;
  923. drm = drm_dev_alloc(&tegra_drm_driver, &dev->dev);
  924. if (IS_ERR(drm))
  925. return PTR_ERR(drm);
  926. tegra = kzalloc(sizeof(*tegra), GFP_KERNEL);
  927. if (!tegra) {
  928. err = -ENOMEM;
  929. goto put;
  930. }
  931. if (host1x_drm_wants_iommu(dev) && device_iommu_mapped(dma_dev)) {
  932. tegra->domain = iommu_paging_domain_alloc(dma_dev);
  933. if (IS_ERR(tegra->domain)) {
  934. err = PTR_ERR(tegra->domain);
  935. goto free;
  936. }
  937. err = iova_cache_get();
  938. if (err < 0)
  939. goto domain;
  940. }
  941. mutex_init(&tegra->clients_lock);
  942. INIT_LIST_HEAD(&tegra->clients);
  943. dev_set_drvdata(&dev->dev, drm);
  944. drm->dev_private = tegra;
  945. tegra->drm = drm;
  946. drm_mode_config_init(drm);
  947. drm->mode_config.min_width = 0;
  948. drm->mode_config.min_height = 0;
  949. drm->mode_config.max_width = 0;
  950. drm->mode_config.max_height = 0;
  951. drm->mode_config.normalize_zpos = true;
  952. drm->mode_config.funcs = &tegra_drm_mode_config_funcs;
  953. drm->mode_config.helper_private = &tegra_drm_mode_config_helpers;
  954. drm_kms_helper_poll_init(drm);
  955. err = host1x_device_init(dev);
  956. if (err < 0)
  957. goto poll;
  958. /*
  959. * Now that all display controller have been initialized, the maximum
  960. * supported resolution is known and the bitmask for horizontal and
  961. * vertical bitfields can be computed.
  962. */
  963. tegra->hmask = drm->mode_config.max_width - 1;
  964. tegra->vmask = drm->mode_config.max_height - 1;
  965. if (tegra->use_explicit_iommu) {
  966. u64 carveout_start, carveout_end, gem_start, gem_end;
  967. u64 dma_mask = dma_get_mask(&dev->dev);
  968. dma_addr_t start, end;
  969. unsigned long order;
  970. start = tegra->domain->geometry.aperture_start & dma_mask;
  971. end = tegra->domain->geometry.aperture_end & dma_mask;
  972. gem_start = start;
  973. gem_end = end - CARVEOUT_SZ;
  974. carveout_start = gem_end + 1;
  975. carveout_end = end;
  976. order = __ffs(tegra->domain->pgsize_bitmap);
  977. init_iova_domain(&tegra->carveout.domain, 1UL << order,
  978. carveout_start >> order);
  979. tegra->carveout.shift = iova_shift(&tegra->carveout.domain);
  980. tegra->carveout.limit = carveout_end >> tegra->carveout.shift;
  981. drm_mm_init(&tegra->mm, gem_start, gem_end - gem_start + 1);
  982. mutex_init(&tegra->mm_lock);
  983. DRM_DEBUG_DRIVER("IOMMU apertures:\n");
  984. DRM_DEBUG_DRIVER(" GEM: %#llx-%#llx\n", gem_start, gem_end);
  985. DRM_DEBUG_DRIVER(" Carveout: %#llx-%#llx\n", carveout_start,
  986. carveout_end);
  987. } else if (tegra->domain) {
  988. iommu_domain_free(tegra->domain);
  989. tegra->domain = NULL;
  990. iova_cache_put();
  991. }
  992. if (tegra->hub) {
  993. err = tegra_display_hub_prepare(tegra->hub);
  994. if (err < 0)
  995. goto device;
  996. }
  997. /* syncpoints are used for full 32-bit hardware VBLANK counters */
  998. drm->max_vblank_count = 0xffffffff;
  999. err = drm_vblank_init(drm, drm->mode_config.num_crtc);
  1000. if (err < 0)
  1001. goto hub;
  1002. drm_mode_config_reset(drm);
  1003. /*
  1004. * Only take over from a potential firmware framebuffer if any CRTCs
  1005. * have been registered. This must not be a fatal error because there
  1006. * are other accelerators that are exposed via this driver.
  1007. *
  1008. * Another case where this happens is on Tegra234 where the display
  1009. * hardware is no longer part of the host1x complex, so this driver
  1010. * will not expose any modesetting features.
  1011. */
  1012. if (drm->mode_config.num_crtc > 0) {
  1013. err = drm_aperture_remove_framebuffers(&tegra_drm_driver);
  1014. if (err < 0)
  1015. goto hub;
  1016. } else {
  1017. /*
  1018. * Indicate to userspace that this doesn't expose any display
  1019. * capabilities.
  1020. */
  1021. drm->driver_features &= ~(DRIVER_MODESET | DRIVER_ATOMIC);
  1022. }
  1023. err = drm_dev_register(drm, 0);
  1024. if (err < 0)
  1025. goto hub;
  1026. tegra_fbdev_setup(drm);
  1027. return 0;
  1028. hub:
  1029. if (tegra->hub)
  1030. tegra_display_hub_cleanup(tegra->hub);
  1031. device:
  1032. if (tegra->domain) {
  1033. mutex_destroy(&tegra->mm_lock);
  1034. drm_mm_takedown(&tegra->mm);
  1035. put_iova_domain(&tegra->carveout.domain);
  1036. iova_cache_put();
  1037. }
  1038. host1x_device_exit(dev);
  1039. poll:
  1040. drm_kms_helper_poll_fini(drm);
  1041. drm_mode_config_cleanup(drm);
  1042. domain:
  1043. if (tegra->domain)
  1044. iommu_domain_free(tegra->domain);
  1045. free:
  1046. kfree(tegra);
  1047. put:
  1048. drm_dev_put(drm);
  1049. return err;
  1050. }
  1051. static int host1x_drm_remove(struct host1x_device *dev)
  1052. {
  1053. struct drm_device *drm = dev_get_drvdata(&dev->dev);
  1054. struct tegra_drm *tegra = drm->dev_private;
  1055. int err;
  1056. drm_dev_unregister(drm);
  1057. drm_kms_helper_poll_fini(drm);
  1058. drm_atomic_helper_shutdown(drm);
  1059. drm_mode_config_cleanup(drm);
  1060. if (tegra->hub)
  1061. tegra_display_hub_cleanup(tegra->hub);
  1062. err = host1x_device_exit(dev);
  1063. if (err < 0)
  1064. dev_err(&dev->dev, "host1x device cleanup failed: %d\n", err);
  1065. if (tegra->domain) {
  1066. mutex_destroy(&tegra->mm_lock);
  1067. drm_mm_takedown(&tegra->mm);
  1068. put_iova_domain(&tegra->carveout.domain);
  1069. iova_cache_put();
  1070. iommu_domain_free(tegra->domain);
  1071. }
  1072. kfree(tegra);
  1073. drm_dev_put(drm);
  1074. return 0;
  1075. }
  1076. static void host1x_drm_shutdown(struct host1x_device *dev)
  1077. {
  1078. drm_atomic_helper_shutdown(dev_get_drvdata(&dev->dev));
  1079. }
  1080. #ifdef CONFIG_PM_SLEEP
  1081. static int host1x_drm_suspend(struct device *dev)
  1082. {
  1083. struct drm_device *drm = dev_get_drvdata(dev);
  1084. return drm_mode_config_helper_suspend(drm);
  1085. }
  1086. static int host1x_drm_resume(struct device *dev)
  1087. {
  1088. struct drm_device *drm = dev_get_drvdata(dev);
  1089. return drm_mode_config_helper_resume(drm);
  1090. }
  1091. #endif
  1092. static SIMPLE_DEV_PM_OPS(host1x_drm_pm_ops, host1x_drm_suspend,
  1093. host1x_drm_resume);
  1094. static const struct of_device_id host1x_drm_subdevs[] = {
  1095. { .compatible = "nvidia,tegra20-dc", },
  1096. { .compatible = "nvidia,tegra20-hdmi", },
  1097. { .compatible = "nvidia,tegra20-gr2d", },
  1098. { .compatible = "nvidia,tegra20-gr3d", },
  1099. { .compatible = "nvidia,tegra30-dc", },
  1100. { .compatible = "nvidia,tegra30-hdmi", },
  1101. { .compatible = "nvidia,tegra30-gr2d", },
  1102. { .compatible = "nvidia,tegra30-gr3d", },
  1103. { .compatible = "nvidia,tegra114-dc", },
  1104. { .compatible = "nvidia,tegra114-dsi", },
  1105. { .compatible = "nvidia,tegra114-hdmi", },
  1106. { .compatible = "nvidia,tegra114-gr2d", },
  1107. { .compatible = "nvidia,tegra114-gr3d", },
  1108. { .compatible = "nvidia,tegra124-dc", },
  1109. { .compatible = "nvidia,tegra124-sor", },
  1110. { .compatible = "nvidia,tegra124-hdmi", },
  1111. { .compatible = "nvidia,tegra124-dsi", },
  1112. { .compatible = "nvidia,tegra124-vic", },
  1113. { .compatible = "nvidia,tegra132-dsi", },
  1114. { .compatible = "nvidia,tegra210-dc", },
  1115. { .compatible = "nvidia,tegra210-dsi", },
  1116. { .compatible = "nvidia,tegra210-sor", },
  1117. { .compatible = "nvidia,tegra210-sor1", },
  1118. { .compatible = "nvidia,tegra210-vic", },
  1119. { .compatible = "nvidia,tegra210-nvdec", },
  1120. { .compatible = "nvidia,tegra186-display", },
  1121. { .compatible = "nvidia,tegra186-dc", },
  1122. { .compatible = "nvidia,tegra186-sor", },
  1123. { .compatible = "nvidia,tegra186-sor1", },
  1124. { .compatible = "nvidia,tegra186-vic", },
  1125. { .compatible = "nvidia,tegra186-nvdec", },
  1126. { .compatible = "nvidia,tegra194-display", },
  1127. { .compatible = "nvidia,tegra194-dc", },
  1128. { .compatible = "nvidia,tegra194-sor", },
  1129. { .compatible = "nvidia,tegra194-vic", },
  1130. { .compatible = "nvidia,tegra194-nvdec", },
  1131. { .compatible = "nvidia,tegra234-vic", },
  1132. { .compatible = "nvidia,tegra234-nvdec", },
  1133. { /* sentinel */ }
  1134. };
  1135. static struct host1x_driver host1x_drm_driver = {
  1136. .driver = {
  1137. .name = "drm",
  1138. .pm = &host1x_drm_pm_ops,
  1139. },
  1140. .probe = host1x_drm_probe,
  1141. .remove = host1x_drm_remove,
  1142. .shutdown = host1x_drm_shutdown,
  1143. .subdevs = host1x_drm_subdevs,
  1144. };
  1145. static struct platform_driver * const drivers[] = {
  1146. &tegra_display_hub_driver,
  1147. &tegra_dc_driver,
  1148. &tegra_hdmi_driver,
  1149. &tegra_dsi_driver,
  1150. &tegra_dpaux_driver,
  1151. &tegra_sor_driver,
  1152. &tegra_gr2d_driver,
  1153. &tegra_gr3d_driver,
  1154. &tegra_vic_driver,
  1155. &tegra_nvdec_driver,
  1156. };
  1157. static int __init host1x_drm_init(void)
  1158. {
  1159. int err;
  1160. if (drm_firmware_drivers_only())
  1161. return -ENODEV;
  1162. err = host1x_driver_register(&host1x_drm_driver);
  1163. if (err < 0)
  1164. return err;
  1165. err = platform_register_drivers(drivers, ARRAY_SIZE(drivers));
  1166. if (err < 0)
  1167. goto unregister_host1x;
  1168. return 0;
  1169. unregister_host1x:
  1170. host1x_driver_unregister(&host1x_drm_driver);
  1171. return err;
  1172. }
  1173. module_init(host1x_drm_init);
  1174. static void __exit host1x_drm_exit(void)
  1175. {
  1176. platform_unregister_drivers(drivers, ARRAY_SIZE(drivers));
  1177. host1x_driver_unregister(&host1x_drm_driver);
  1178. }
  1179. module_exit(host1x_drm_exit);
  1180. MODULE_AUTHOR("Thierry Reding <thierry.reding@avionic-design.de>");
  1181. MODULE_DESCRIPTION("NVIDIA Tegra DRM driver");
  1182. MODULE_LICENSE("GPL v2");