v3d_submit.c 33 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * Copyright (C) 2014-2018 Broadcom
  4. * Copyright (C) 2023 Raspberry Pi
  5. */
  6. #include <drm/drm_syncobj.h>
  7. #include "v3d_drv.h"
  8. #include "v3d_regs.h"
  9. #include "v3d_trace.h"
  10. /* Takes the reservation lock on all the BOs being referenced, so that
  11. * at queue submit time we can update the reservations.
  12. *
  13. * We don't lock the RCL the tile alloc/state BOs, or overflow memory
  14. * (all of which are on exec->unref_list). They're entirely private
  15. * to v3d, so we don't attach dma-buf fences to them.
  16. */
  17. static int
  18. v3d_lock_bo_reservations(struct v3d_job *job,
  19. struct ww_acquire_ctx *acquire_ctx)
  20. {
  21. int i, ret;
  22. ret = drm_gem_lock_reservations(job->bo, job->bo_count, acquire_ctx);
  23. if (ret)
  24. return ret;
  25. for (i = 0; i < job->bo_count; i++) {
  26. ret = dma_resv_reserve_fences(job->bo[i]->resv, 1);
  27. if (ret)
  28. goto fail;
  29. ret = drm_sched_job_add_implicit_dependencies(&job->base,
  30. job->bo[i], true);
  31. if (ret)
  32. goto fail;
  33. }
  34. return 0;
  35. fail:
  36. drm_gem_unlock_reservations(job->bo, job->bo_count, acquire_ctx);
  37. return ret;
  38. }
  39. /**
  40. * v3d_lookup_bos() - Sets up job->bo[] with the GEM objects
  41. * referenced by the job.
  42. * @dev: DRM device
  43. * @file_priv: DRM file for this fd
  44. * @job: V3D job being set up
  45. * @bo_handles: GEM handles
  46. * @bo_count: Number of GEM handles passed in
  47. *
  48. * The command validator needs to reference BOs by their index within
  49. * the submitted job's BO list. This does the validation of the job's
  50. * BO list and reference counting for the lifetime of the job.
  51. *
  52. * Note that this function doesn't need to unreference the BOs on
  53. * failure, because that will happen at v3d_exec_cleanup() time.
  54. */
  55. static int
  56. v3d_lookup_bos(struct drm_device *dev,
  57. struct drm_file *file_priv,
  58. struct v3d_job *job,
  59. u64 bo_handles,
  60. u32 bo_count)
  61. {
  62. job->bo_count = bo_count;
  63. if (!job->bo_count) {
  64. /* See comment on bo_index for why we have to check
  65. * this.
  66. */
  67. DRM_DEBUG("Rendering requires BOs\n");
  68. return -EINVAL;
  69. }
  70. return drm_gem_objects_lookup(file_priv,
  71. (void __user *)(uintptr_t)bo_handles,
  72. job->bo_count, &job->bo);
  73. }
  74. static void
  75. v3d_job_free(struct kref *ref)
  76. {
  77. struct v3d_job *job = container_of(ref, struct v3d_job, refcount);
  78. int i;
  79. if (job->bo) {
  80. for (i = 0; i < job->bo_count; i++)
  81. drm_gem_object_put(job->bo[i]);
  82. kvfree(job->bo);
  83. }
  84. dma_fence_put(job->irq_fence);
  85. dma_fence_put(job->done_fence);
  86. if (job->perfmon)
  87. v3d_perfmon_put(job->perfmon);
  88. kfree(job);
  89. }
  90. static void
  91. v3d_render_job_free(struct kref *ref)
  92. {
  93. struct v3d_render_job *job = container_of(ref, struct v3d_render_job,
  94. base.refcount);
  95. struct v3d_bo *bo, *save;
  96. list_for_each_entry_safe(bo, save, &job->unref_list, unref_head) {
  97. drm_gem_object_put(&bo->base.base);
  98. }
  99. v3d_job_free(ref);
  100. }
  101. void v3d_job_cleanup(struct v3d_job *job)
  102. {
  103. if (!job)
  104. return;
  105. drm_sched_job_cleanup(&job->base);
  106. v3d_job_put(job);
  107. }
  108. void v3d_job_put(struct v3d_job *job)
  109. {
  110. if (!job)
  111. return;
  112. kref_put(&job->refcount, job->free);
  113. }
  114. static int
  115. v3d_job_allocate(void **container, size_t size)
  116. {
  117. *container = kcalloc(1, size, GFP_KERNEL);
  118. if (!*container) {
  119. DRM_ERROR("Cannot allocate memory for V3D job.\n");
  120. return -ENOMEM;
  121. }
  122. return 0;
  123. }
  124. static void
  125. v3d_job_deallocate(void **container)
  126. {
  127. kfree(*container);
  128. *container = NULL;
  129. }
  130. static int
  131. v3d_job_init(struct v3d_dev *v3d, struct drm_file *file_priv,
  132. struct v3d_job *job, void (*free)(struct kref *ref),
  133. u32 in_sync, struct v3d_submit_ext *se, enum v3d_queue queue)
  134. {
  135. struct v3d_file_priv *v3d_priv = file_priv->driver_priv;
  136. bool has_multisync = se && (se->flags & DRM_V3D_EXT_ID_MULTI_SYNC);
  137. int ret, i;
  138. job->v3d = v3d;
  139. job->free = free;
  140. job->file = file_priv;
  141. ret = drm_sched_job_init(&job->base, &v3d_priv->sched_entity[queue],
  142. 1, v3d_priv);
  143. if (ret)
  144. return ret;
  145. if (has_multisync) {
  146. if (se->in_sync_count && se->wait_stage == queue) {
  147. struct drm_v3d_sem __user *handle = u64_to_user_ptr(se->in_syncs);
  148. for (i = 0; i < se->in_sync_count; i++) {
  149. struct drm_v3d_sem in;
  150. if (copy_from_user(&in, handle++, sizeof(in))) {
  151. ret = -EFAULT;
  152. DRM_DEBUG("Failed to copy wait dep handle.\n");
  153. goto fail_deps;
  154. }
  155. ret = drm_sched_job_add_syncobj_dependency(&job->base, file_priv, in.handle, 0);
  156. // TODO: Investigate why this was filtered out for the IOCTL.
  157. if (ret && ret != -ENOENT)
  158. goto fail_deps;
  159. }
  160. }
  161. } else {
  162. ret = drm_sched_job_add_syncobj_dependency(&job->base, file_priv, in_sync, 0);
  163. // TODO: Investigate why this was filtered out for the IOCTL.
  164. if (ret && ret != -ENOENT)
  165. goto fail_deps;
  166. }
  167. kref_init(&job->refcount);
  168. return 0;
  169. fail_deps:
  170. drm_sched_job_cleanup(&job->base);
  171. return ret;
  172. }
  173. static void
  174. v3d_push_job(struct v3d_job *job)
  175. {
  176. drm_sched_job_arm(&job->base);
  177. job->done_fence = dma_fence_get(&job->base.s_fence->finished);
  178. /* put by scheduler job completion */
  179. kref_get(&job->refcount);
  180. drm_sched_entity_push_job(&job->base);
  181. }
  182. static void
  183. v3d_attach_fences_and_unlock_reservation(struct drm_file *file_priv,
  184. struct v3d_job *job,
  185. struct ww_acquire_ctx *acquire_ctx,
  186. u32 out_sync,
  187. struct v3d_submit_ext *se,
  188. struct dma_fence *done_fence)
  189. {
  190. struct drm_syncobj *sync_out;
  191. bool has_multisync = se && (se->flags & DRM_V3D_EXT_ID_MULTI_SYNC);
  192. int i;
  193. for (i = 0; i < job->bo_count; i++) {
  194. /* XXX: Use shared fences for read-only objects. */
  195. dma_resv_add_fence(job->bo[i]->resv, job->done_fence,
  196. DMA_RESV_USAGE_WRITE);
  197. }
  198. drm_gem_unlock_reservations(job->bo, job->bo_count, acquire_ctx);
  199. /* Update the return sync object for the job */
  200. /* If it only supports a single signal semaphore*/
  201. if (!has_multisync) {
  202. sync_out = drm_syncobj_find(file_priv, out_sync);
  203. if (sync_out) {
  204. drm_syncobj_replace_fence(sync_out, done_fence);
  205. drm_syncobj_put(sync_out);
  206. }
  207. return;
  208. }
  209. /* If multiple semaphores extension is supported */
  210. if (se->out_sync_count) {
  211. for (i = 0; i < se->out_sync_count; i++) {
  212. drm_syncobj_replace_fence(se->out_syncs[i].syncobj,
  213. done_fence);
  214. drm_syncobj_put(se->out_syncs[i].syncobj);
  215. }
  216. kvfree(se->out_syncs);
  217. }
  218. }
  219. static int
  220. v3d_setup_csd_jobs_and_bos(struct drm_file *file_priv,
  221. struct v3d_dev *v3d,
  222. struct drm_v3d_submit_csd *args,
  223. struct v3d_csd_job **job,
  224. struct v3d_job **clean_job,
  225. struct v3d_submit_ext *se,
  226. struct ww_acquire_ctx *acquire_ctx)
  227. {
  228. int ret;
  229. ret = v3d_job_allocate((void *)job, sizeof(**job));
  230. if (ret)
  231. return ret;
  232. ret = v3d_job_init(v3d, file_priv, &(*job)->base,
  233. v3d_job_free, args->in_sync, se, V3D_CSD);
  234. if (ret) {
  235. v3d_job_deallocate((void *)job);
  236. return ret;
  237. }
  238. ret = v3d_job_allocate((void *)clean_job, sizeof(**clean_job));
  239. if (ret)
  240. return ret;
  241. ret = v3d_job_init(v3d, file_priv, *clean_job,
  242. v3d_job_free, 0, NULL, V3D_CACHE_CLEAN);
  243. if (ret) {
  244. v3d_job_deallocate((void *)clean_job);
  245. return ret;
  246. }
  247. (*job)->args = *args;
  248. ret = v3d_lookup_bos(&v3d->drm, file_priv, *clean_job,
  249. args->bo_handles, args->bo_handle_count);
  250. if (ret)
  251. return ret;
  252. return v3d_lock_bo_reservations(*clean_job, acquire_ctx);
  253. }
  254. static void
  255. v3d_put_multisync_post_deps(struct v3d_submit_ext *se)
  256. {
  257. unsigned int i;
  258. if (!(se && se->out_sync_count))
  259. return;
  260. for (i = 0; i < se->out_sync_count; i++)
  261. drm_syncobj_put(se->out_syncs[i].syncobj);
  262. kvfree(se->out_syncs);
  263. }
  264. static int
  265. v3d_get_multisync_post_deps(struct drm_file *file_priv,
  266. struct v3d_submit_ext *se,
  267. u32 count, u64 handles)
  268. {
  269. struct drm_v3d_sem __user *post_deps;
  270. int i, ret;
  271. if (!count)
  272. return 0;
  273. se->out_syncs = (struct v3d_submit_outsync *)
  274. kvmalloc_array(count,
  275. sizeof(struct v3d_submit_outsync),
  276. GFP_KERNEL);
  277. if (!se->out_syncs)
  278. return -ENOMEM;
  279. post_deps = u64_to_user_ptr(handles);
  280. for (i = 0; i < count; i++) {
  281. struct drm_v3d_sem out;
  282. if (copy_from_user(&out, post_deps++, sizeof(out))) {
  283. ret = -EFAULT;
  284. DRM_DEBUG("Failed to copy post dep handles\n");
  285. goto fail;
  286. }
  287. se->out_syncs[i].syncobj = drm_syncobj_find(file_priv,
  288. out.handle);
  289. if (!se->out_syncs[i].syncobj) {
  290. ret = -EINVAL;
  291. goto fail;
  292. }
  293. }
  294. se->out_sync_count = count;
  295. return 0;
  296. fail:
  297. for (i--; i >= 0; i--)
  298. drm_syncobj_put(se->out_syncs[i].syncobj);
  299. kvfree(se->out_syncs);
  300. return ret;
  301. }
  302. /* Get data for multiple binary semaphores synchronization. Parse syncobj
  303. * to be signaled when job completes (out_sync).
  304. */
  305. static int
  306. v3d_get_multisync_submit_deps(struct drm_file *file_priv,
  307. struct drm_v3d_extension __user *ext,
  308. struct v3d_submit_ext *se)
  309. {
  310. struct drm_v3d_multi_sync multisync;
  311. int ret;
  312. if (se->in_sync_count || se->out_sync_count) {
  313. DRM_DEBUG("Two multisync extensions were added to the same job.");
  314. return -EINVAL;
  315. }
  316. if (copy_from_user(&multisync, ext, sizeof(multisync)))
  317. return -EFAULT;
  318. if (multisync.pad)
  319. return -EINVAL;
  320. ret = v3d_get_multisync_post_deps(file_priv, se, multisync.out_sync_count,
  321. multisync.out_syncs);
  322. if (ret)
  323. return ret;
  324. se->in_sync_count = multisync.in_sync_count;
  325. se->in_syncs = multisync.in_syncs;
  326. se->flags |= DRM_V3D_EXT_ID_MULTI_SYNC;
  327. se->wait_stage = multisync.wait_stage;
  328. return 0;
  329. }
  330. /* Get data for the indirect CSD job submission. */
  331. static int
  332. v3d_get_cpu_indirect_csd_params(struct drm_file *file_priv,
  333. struct drm_v3d_extension __user *ext,
  334. struct v3d_cpu_job *job)
  335. {
  336. struct v3d_file_priv *v3d_priv = file_priv->driver_priv;
  337. struct v3d_dev *v3d = v3d_priv->v3d;
  338. struct drm_v3d_indirect_csd indirect_csd;
  339. struct v3d_indirect_csd_info *info = &job->indirect_csd;
  340. if (!job) {
  341. DRM_DEBUG("CPU job extension was attached to a GPU job.\n");
  342. return -EINVAL;
  343. }
  344. if (job->job_type) {
  345. DRM_DEBUG("Two CPU job extensions were added to the same CPU job.\n");
  346. return -EINVAL;
  347. }
  348. if (copy_from_user(&indirect_csd, ext, sizeof(indirect_csd)))
  349. return -EFAULT;
  350. if (!v3d_has_csd(v3d)) {
  351. DRM_DEBUG("Attempting CSD submit on non-CSD hardware.\n");
  352. return -EINVAL;
  353. }
  354. job->job_type = V3D_CPU_JOB_TYPE_INDIRECT_CSD;
  355. info->offset = indirect_csd.offset;
  356. info->wg_size = indirect_csd.wg_size;
  357. memcpy(&info->wg_uniform_offsets, &indirect_csd.wg_uniform_offsets,
  358. sizeof(indirect_csd.wg_uniform_offsets));
  359. info->indirect = drm_gem_object_lookup(file_priv, indirect_csd.indirect);
  360. return v3d_setup_csd_jobs_and_bos(file_priv, v3d, &indirect_csd.submit,
  361. &info->job, &info->clean_job,
  362. NULL, &info->acquire_ctx);
  363. }
  364. /* Get data for the query timestamp job submission. */
  365. static int
  366. v3d_get_cpu_timestamp_query_params(struct drm_file *file_priv,
  367. struct drm_v3d_extension __user *ext,
  368. struct v3d_cpu_job *job)
  369. {
  370. u32 __user *offsets, *syncs;
  371. struct drm_v3d_timestamp_query timestamp;
  372. struct v3d_timestamp_query_info *query_info = &job->timestamp_query;
  373. unsigned int i;
  374. int err;
  375. if (!job) {
  376. DRM_DEBUG("CPU job extension was attached to a GPU job.\n");
  377. return -EINVAL;
  378. }
  379. if (job->job_type) {
  380. DRM_DEBUG("Two CPU job extensions were added to the same CPU job.\n");
  381. return -EINVAL;
  382. }
  383. if (copy_from_user(&timestamp, ext, sizeof(timestamp)))
  384. return -EFAULT;
  385. if (timestamp.pad)
  386. return -EINVAL;
  387. job->job_type = V3D_CPU_JOB_TYPE_TIMESTAMP_QUERY;
  388. query_info->queries = kvmalloc_array(timestamp.count,
  389. sizeof(struct v3d_timestamp_query),
  390. GFP_KERNEL);
  391. if (!query_info->queries)
  392. return -ENOMEM;
  393. offsets = u64_to_user_ptr(timestamp.offsets);
  394. syncs = u64_to_user_ptr(timestamp.syncs);
  395. for (i = 0; i < timestamp.count; i++) {
  396. u32 offset, sync;
  397. if (get_user(offset, offsets++)) {
  398. err = -EFAULT;
  399. goto error;
  400. }
  401. query_info->queries[i].offset = offset;
  402. if (get_user(sync, syncs++)) {
  403. err = -EFAULT;
  404. goto error;
  405. }
  406. query_info->queries[i].syncobj = drm_syncobj_find(file_priv,
  407. sync);
  408. if (!query_info->queries[i].syncobj) {
  409. err = -ENOENT;
  410. goto error;
  411. }
  412. }
  413. query_info->count = timestamp.count;
  414. return 0;
  415. error:
  416. v3d_timestamp_query_info_free(&job->timestamp_query, i);
  417. return err;
  418. }
  419. static int
  420. v3d_get_cpu_reset_timestamp_params(struct drm_file *file_priv,
  421. struct drm_v3d_extension __user *ext,
  422. struct v3d_cpu_job *job)
  423. {
  424. u32 __user *syncs;
  425. struct drm_v3d_reset_timestamp_query reset;
  426. struct v3d_timestamp_query_info *query_info = &job->timestamp_query;
  427. unsigned int i;
  428. int err;
  429. if (!job) {
  430. DRM_DEBUG("CPU job extension was attached to a GPU job.\n");
  431. return -EINVAL;
  432. }
  433. if (job->job_type) {
  434. DRM_DEBUG("Two CPU job extensions were added to the same CPU job.\n");
  435. return -EINVAL;
  436. }
  437. if (copy_from_user(&reset, ext, sizeof(reset)))
  438. return -EFAULT;
  439. job->job_type = V3D_CPU_JOB_TYPE_RESET_TIMESTAMP_QUERY;
  440. query_info->queries = kvmalloc_array(reset.count,
  441. sizeof(struct v3d_timestamp_query),
  442. GFP_KERNEL);
  443. if (!query_info->queries)
  444. return -ENOMEM;
  445. syncs = u64_to_user_ptr(reset.syncs);
  446. for (i = 0; i < reset.count; i++) {
  447. u32 sync;
  448. query_info->queries[i].offset = reset.offset + 8 * i;
  449. if (get_user(sync, syncs++)) {
  450. err = -EFAULT;
  451. goto error;
  452. }
  453. query_info->queries[i].syncobj = drm_syncobj_find(file_priv,
  454. sync);
  455. if (!query_info->queries[i].syncobj) {
  456. err = -ENOENT;
  457. goto error;
  458. }
  459. }
  460. query_info->count = reset.count;
  461. return 0;
  462. error:
  463. v3d_timestamp_query_info_free(&job->timestamp_query, i);
  464. return err;
  465. }
  466. /* Get data for the copy timestamp query results job submission. */
  467. static int
  468. v3d_get_cpu_copy_query_results_params(struct drm_file *file_priv,
  469. struct drm_v3d_extension __user *ext,
  470. struct v3d_cpu_job *job)
  471. {
  472. u32 __user *offsets, *syncs;
  473. struct drm_v3d_copy_timestamp_query copy;
  474. struct v3d_timestamp_query_info *query_info = &job->timestamp_query;
  475. unsigned int i;
  476. int err;
  477. if (!job) {
  478. DRM_DEBUG("CPU job extension was attached to a GPU job.\n");
  479. return -EINVAL;
  480. }
  481. if (job->job_type) {
  482. DRM_DEBUG("Two CPU job extensions were added to the same CPU job.\n");
  483. return -EINVAL;
  484. }
  485. if (copy_from_user(&copy, ext, sizeof(copy)))
  486. return -EFAULT;
  487. if (copy.pad)
  488. return -EINVAL;
  489. job->job_type = V3D_CPU_JOB_TYPE_COPY_TIMESTAMP_QUERY;
  490. query_info->queries = kvmalloc_array(copy.count,
  491. sizeof(struct v3d_timestamp_query),
  492. GFP_KERNEL);
  493. if (!query_info->queries)
  494. return -ENOMEM;
  495. offsets = u64_to_user_ptr(copy.offsets);
  496. syncs = u64_to_user_ptr(copy.syncs);
  497. for (i = 0; i < copy.count; i++) {
  498. u32 offset, sync;
  499. if (get_user(offset, offsets++)) {
  500. err = -EFAULT;
  501. goto error;
  502. }
  503. query_info->queries[i].offset = offset;
  504. if (get_user(sync, syncs++)) {
  505. err = -EFAULT;
  506. goto error;
  507. }
  508. query_info->queries[i].syncobj = drm_syncobj_find(file_priv,
  509. sync);
  510. if (!query_info->queries[i].syncobj) {
  511. err = -ENOENT;
  512. goto error;
  513. }
  514. }
  515. query_info->count = copy.count;
  516. job->copy.do_64bit = copy.do_64bit;
  517. job->copy.do_partial = copy.do_partial;
  518. job->copy.availability_bit = copy.availability_bit;
  519. job->copy.offset = copy.offset;
  520. job->copy.stride = copy.stride;
  521. return 0;
  522. error:
  523. v3d_timestamp_query_info_free(&job->timestamp_query, i);
  524. return err;
  525. }
  526. static int
  527. v3d_copy_query_info(struct v3d_performance_query_info *query_info,
  528. unsigned int count,
  529. unsigned int nperfmons,
  530. u32 __user *syncs,
  531. u64 __user *kperfmon_ids,
  532. struct drm_file *file_priv)
  533. {
  534. unsigned int i, j;
  535. int err;
  536. for (i = 0; i < count; i++) {
  537. struct v3d_performance_query *query = &query_info->queries[i];
  538. u32 __user *ids_pointer;
  539. u32 sync, id;
  540. u64 ids;
  541. if (get_user(sync, syncs++)) {
  542. err = -EFAULT;
  543. goto error;
  544. }
  545. if (get_user(ids, kperfmon_ids++)) {
  546. err = -EFAULT;
  547. goto error;
  548. }
  549. query->kperfmon_ids =
  550. kvmalloc_array(nperfmons,
  551. sizeof(struct v3d_performance_query *),
  552. GFP_KERNEL);
  553. if (!query->kperfmon_ids) {
  554. err = -ENOMEM;
  555. goto error;
  556. }
  557. ids_pointer = u64_to_user_ptr(ids);
  558. for (j = 0; j < nperfmons; j++) {
  559. if (get_user(id, ids_pointer++)) {
  560. kvfree(query->kperfmon_ids);
  561. err = -EFAULT;
  562. goto error;
  563. }
  564. query->kperfmon_ids[j] = id;
  565. }
  566. query->syncobj = drm_syncobj_find(file_priv, sync);
  567. if (!query->syncobj) {
  568. kvfree(query->kperfmon_ids);
  569. err = -ENOENT;
  570. goto error;
  571. }
  572. }
  573. return 0;
  574. error:
  575. v3d_performance_query_info_free(query_info, i);
  576. return err;
  577. }
  578. static int
  579. v3d_get_cpu_reset_performance_params(struct drm_file *file_priv,
  580. struct drm_v3d_extension __user *ext,
  581. struct v3d_cpu_job *job)
  582. {
  583. struct v3d_performance_query_info *query_info = &job->performance_query;
  584. struct drm_v3d_reset_performance_query reset;
  585. int err;
  586. if (!job) {
  587. DRM_DEBUG("CPU job extension was attached to a GPU job.\n");
  588. return -EINVAL;
  589. }
  590. if (job->job_type) {
  591. DRM_DEBUG("Two CPU job extensions were added to the same CPU job.\n");
  592. return -EINVAL;
  593. }
  594. if (copy_from_user(&reset, ext, sizeof(reset)))
  595. return -EFAULT;
  596. job->job_type = V3D_CPU_JOB_TYPE_RESET_PERFORMANCE_QUERY;
  597. query_info->queries =
  598. kvmalloc_array(reset.count,
  599. sizeof(struct v3d_performance_query),
  600. GFP_KERNEL);
  601. if (!query_info->queries)
  602. return -ENOMEM;
  603. err = v3d_copy_query_info(query_info,
  604. reset.count,
  605. reset.nperfmons,
  606. u64_to_user_ptr(reset.syncs),
  607. u64_to_user_ptr(reset.kperfmon_ids),
  608. file_priv);
  609. if (err)
  610. return err;
  611. query_info->count = reset.count;
  612. query_info->nperfmons = reset.nperfmons;
  613. return 0;
  614. }
  615. static int
  616. v3d_get_cpu_copy_performance_query_params(struct drm_file *file_priv,
  617. struct drm_v3d_extension __user *ext,
  618. struct v3d_cpu_job *job)
  619. {
  620. struct v3d_performance_query_info *query_info = &job->performance_query;
  621. struct drm_v3d_copy_performance_query copy;
  622. int err;
  623. if (!job) {
  624. DRM_DEBUG("CPU job extension was attached to a GPU job.\n");
  625. return -EINVAL;
  626. }
  627. if (job->job_type) {
  628. DRM_DEBUG("Two CPU job extensions were added to the same CPU job.\n");
  629. return -EINVAL;
  630. }
  631. if (copy_from_user(&copy, ext, sizeof(copy)))
  632. return -EFAULT;
  633. if (copy.pad)
  634. return -EINVAL;
  635. job->job_type = V3D_CPU_JOB_TYPE_COPY_PERFORMANCE_QUERY;
  636. query_info->queries =
  637. kvmalloc_array(copy.count,
  638. sizeof(struct v3d_performance_query),
  639. GFP_KERNEL);
  640. if (!query_info->queries)
  641. return -ENOMEM;
  642. err = v3d_copy_query_info(query_info,
  643. copy.count,
  644. copy.nperfmons,
  645. u64_to_user_ptr(copy.syncs),
  646. u64_to_user_ptr(copy.kperfmon_ids),
  647. file_priv);
  648. if (err)
  649. return err;
  650. query_info->count = copy.count;
  651. query_info->nperfmons = copy.nperfmons;
  652. query_info->ncounters = copy.ncounters;
  653. job->copy.do_64bit = copy.do_64bit;
  654. job->copy.do_partial = copy.do_partial;
  655. job->copy.availability_bit = copy.availability_bit;
  656. job->copy.offset = copy.offset;
  657. job->copy.stride = copy.stride;
  658. return 0;
  659. }
  660. /* Whenever userspace sets ioctl extensions, v3d_get_extensions parses data
  661. * according to the extension id (name).
  662. */
  663. static int
  664. v3d_get_extensions(struct drm_file *file_priv,
  665. u64 ext_handles,
  666. struct v3d_submit_ext *se,
  667. struct v3d_cpu_job *job)
  668. {
  669. struct drm_v3d_extension __user *user_ext;
  670. int ret;
  671. user_ext = u64_to_user_ptr(ext_handles);
  672. while (user_ext) {
  673. struct drm_v3d_extension ext;
  674. if (copy_from_user(&ext, user_ext, sizeof(ext))) {
  675. DRM_DEBUG("Failed to copy submit extension\n");
  676. return -EFAULT;
  677. }
  678. switch (ext.id) {
  679. case DRM_V3D_EXT_ID_MULTI_SYNC:
  680. ret = v3d_get_multisync_submit_deps(file_priv, user_ext, se);
  681. break;
  682. case DRM_V3D_EXT_ID_CPU_INDIRECT_CSD:
  683. ret = v3d_get_cpu_indirect_csd_params(file_priv, user_ext, job);
  684. break;
  685. case DRM_V3D_EXT_ID_CPU_TIMESTAMP_QUERY:
  686. ret = v3d_get_cpu_timestamp_query_params(file_priv, user_ext, job);
  687. break;
  688. case DRM_V3D_EXT_ID_CPU_RESET_TIMESTAMP_QUERY:
  689. ret = v3d_get_cpu_reset_timestamp_params(file_priv, user_ext, job);
  690. break;
  691. case DRM_V3D_EXT_ID_CPU_COPY_TIMESTAMP_QUERY:
  692. ret = v3d_get_cpu_copy_query_results_params(file_priv, user_ext, job);
  693. break;
  694. case DRM_V3D_EXT_ID_CPU_RESET_PERFORMANCE_QUERY:
  695. ret = v3d_get_cpu_reset_performance_params(file_priv, user_ext, job);
  696. break;
  697. case DRM_V3D_EXT_ID_CPU_COPY_PERFORMANCE_QUERY:
  698. ret = v3d_get_cpu_copy_performance_query_params(file_priv, user_ext, job);
  699. break;
  700. default:
  701. DRM_DEBUG_DRIVER("Unknown extension id: %d\n", ext.id);
  702. return -EINVAL;
  703. }
  704. if (ret)
  705. return ret;
  706. user_ext = u64_to_user_ptr(ext.next);
  707. }
  708. return 0;
  709. }
  710. /**
  711. * v3d_submit_cl_ioctl() - Submits a job (frame) to the V3D.
  712. * @dev: DRM device
  713. * @data: ioctl argument
  714. * @file_priv: DRM file for this fd
  715. *
  716. * This is the main entrypoint for userspace to submit a 3D frame to
  717. * the GPU. Userspace provides the binner command list (if
  718. * applicable), and the kernel sets up the render command list to draw
  719. * to the framebuffer described in the ioctl, using the command lists
  720. * that the 3D engine's binner will produce.
  721. */
  722. int
  723. v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
  724. struct drm_file *file_priv)
  725. {
  726. struct v3d_dev *v3d = to_v3d_dev(dev);
  727. struct v3d_file_priv *v3d_priv = file_priv->driver_priv;
  728. struct drm_v3d_submit_cl *args = data;
  729. struct v3d_submit_ext se = {0};
  730. struct v3d_bin_job *bin = NULL;
  731. struct v3d_render_job *render = NULL;
  732. struct v3d_job *clean_job = NULL;
  733. struct v3d_job *last_job;
  734. struct ww_acquire_ctx acquire_ctx;
  735. int ret = 0;
  736. trace_v3d_submit_cl_ioctl(&v3d->drm, args->rcl_start, args->rcl_end);
  737. if (args->pad)
  738. return -EINVAL;
  739. if (args->flags &&
  740. args->flags & ~(DRM_V3D_SUBMIT_CL_FLUSH_CACHE |
  741. DRM_V3D_SUBMIT_EXTENSION)) {
  742. DRM_INFO("invalid flags: %d\n", args->flags);
  743. return -EINVAL;
  744. }
  745. if (args->flags & DRM_V3D_SUBMIT_EXTENSION) {
  746. ret = v3d_get_extensions(file_priv, args->extensions, &se, NULL);
  747. if (ret) {
  748. DRM_DEBUG("Failed to get extensions.\n");
  749. return ret;
  750. }
  751. }
  752. ret = v3d_job_allocate((void *)&render, sizeof(*render));
  753. if (ret)
  754. return ret;
  755. ret = v3d_job_init(v3d, file_priv, &render->base,
  756. v3d_render_job_free, args->in_sync_rcl, &se, V3D_RENDER);
  757. if (ret) {
  758. v3d_job_deallocate((void *)&render);
  759. goto fail;
  760. }
  761. render->start = args->rcl_start;
  762. render->end = args->rcl_end;
  763. INIT_LIST_HEAD(&render->unref_list);
  764. if (args->bcl_start != args->bcl_end) {
  765. ret = v3d_job_allocate((void *)&bin, sizeof(*bin));
  766. if (ret)
  767. goto fail;
  768. ret = v3d_job_init(v3d, file_priv, &bin->base,
  769. v3d_job_free, args->in_sync_bcl, &se, V3D_BIN);
  770. if (ret) {
  771. v3d_job_deallocate((void *)&bin);
  772. goto fail;
  773. }
  774. bin->start = args->bcl_start;
  775. bin->end = args->bcl_end;
  776. bin->qma = args->qma;
  777. bin->qms = args->qms;
  778. bin->qts = args->qts;
  779. bin->render = render;
  780. }
  781. if (args->flags & DRM_V3D_SUBMIT_CL_FLUSH_CACHE) {
  782. ret = v3d_job_allocate((void *)&clean_job, sizeof(*clean_job));
  783. if (ret)
  784. goto fail;
  785. ret = v3d_job_init(v3d, file_priv, clean_job,
  786. v3d_job_free, 0, NULL, V3D_CACHE_CLEAN);
  787. if (ret) {
  788. v3d_job_deallocate((void *)&clean_job);
  789. goto fail;
  790. }
  791. last_job = clean_job;
  792. } else {
  793. last_job = &render->base;
  794. }
  795. ret = v3d_lookup_bos(dev, file_priv, last_job,
  796. args->bo_handles, args->bo_handle_count);
  797. if (ret)
  798. goto fail;
  799. ret = v3d_lock_bo_reservations(last_job, &acquire_ctx);
  800. if (ret)
  801. goto fail;
  802. if (args->perfmon_id) {
  803. render->base.perfmon = v3d_perfmon_find(v3d_priv,
  804. args->perfmon_id);
  805. if (!render->base.perfmon) {
  806. ret = -ENOENT;
  807. goto fail_perfmon;
  808. }
  809. }
  810. mutex_lock(&v3d->sched_lock);
  811. if (bin) {
  812. bin->base.perfmon = render->base.perfmon;
  813. v3d_perfmon_get(bin->base.perfmon);
  814. v3d_push_job(&bin->base);
  815. ret = drm_sched_job_add_dependency(&render->base.base,
  816. dma_fence_get(bin->base.done_fence));
  817. if (ret)
  818. goto fail_unreserve;
  819. }
  820. v3d_push_job(&render->base);
  821. if (clean_job) {
  822. struct dma_fence *render_fence =
  823. dma_fence_get(render->base.done_fence);
  824. ret = drm_sched_job_add_dependency(&clean_job->base,
  825. render_fence);
  826. if (ret)
  827. goto fail_unreserve;
  828. clean_job->perfmon = render->base.perfmon;
  829. v3d_perfmon_get(clean_job->perfmon);
  830. v3d_push_job(clean_job);
  831. }
  832. mutex_unlock(&v3d->sched_lock);
  833. v3d_attach_fences_and_unlock_reservation(file_priv,
  834. last_job,
  835. &acquire_ctx,
  836. args->out_sync,
  837. &se,
  838. last_job->done_fence);
  839. v3d_job_put(&bin->base);
  840. v3d_job_put(&render->base);
  841. v3d_job_put(clean_job);
  842. return 0;
  843. fail_unreserve:
  844. mutex_unlock(&v3d->sched_lock);
  845. fail_perfmon:
  846. drm_gem_unlock_reservations(last_job->bo,
  847. last_job->bo_count, &acquire_ctx);
  848. fail:
  849. v3d_job_cleanup((void *)bin);
  850. v3d_job_cleanup((void *)render);
  851. v3d_job_cleanup(clean_job);
  852. v3d_put_multisync_post_deps(&se);
  853. return ret;
  854. }
  855. /**
  856. * v3d_submit_tfu_ioctl() - Submits a TFU (texture formatting) job to the V3D.
  857. * @dev: DRM device
  858. * @data: ioctl argument
  859. * @file_priv: DRM file for this fd
  860. *
  861. * Userspace provides the register setup for the TFU, which we don't
  862. * need to validate since the TFU is behind the MMU.
  863. */
  864. int
  865. v3d_submit_tfu_ioctl(struct drm_device *dev, void *data,
  866. struct drm_file *file_priv)
  867. {
  868. struct v3d_dev *v3d = to_v3d_dev(dev);
  869. struct drm_v3d_submit_tfu *args = data;
  870. struct v3d_submit_ext se = {0};
  871. struct v3d_tfu_job *job = NULL;
  872. struct ww_acquire_ctx acquire_ctx;
  873. int ret = 0;
  874. trace_v3d_submit_tfu_ioctl(&v3d->drm, args->iia);
  875. if (args->flags && !(args->flags & DRM_V3D_SUBMIT_EXTENSION)) {
  876. DRM_DEBUG("invalid flags: %d\n", args->flags);
  877. return -EINVAL;
  878. }
  879. if (args->flags & DRM_V3D_SUBMIT_EXTENSION) {
  880. ret = v3d_get_extensions(file_priv, args->extensions, &se, NULL);
  881. if (ret) {
  882. DRM_DEBUG("Failed to get extensions.\n");
  883. return ret;
  884. }
  885. }
  886. ret = v3d_job_allocate((void *)&job, sizeof(*job));
  887. if (ret)
  888. return ret;
  889. ret = v3d_job_init(v3d, file_priv, &job->base,
  890. v3d_job_free, args->in_sync, &se, V3D_TFU);
  891. if (ret) {
  892. v3d_job_deallocate((void *)&job);
  893. goto fail;
  894. }
  895. job->base.bo = kcalloc(ARRAY_SIZE(args->bo_handles),
  896. sizeof(*job->base.bo), GFP_KERNEL);
  897. if (!job->base.bo) {
  898. ret = -ENOMEM;
  899. goto fail;
  900. }
  901. job->args = *args;
  902. for (job->base.bo_count = 0;
  903. job->base.bo_count < ARRAY_SIZE(args->bo_handles);
  904. job->base.bo_count++) {
  905. struct drm_gem_object *bo;
  906. if (!args->bo_handles[job->base.bo_count])
  907. break;
  908. bo = drm_gem_object_lookup(file_priv, args->bo_handles[job->base.bo_count]);
  909. if (!bo) {
  910. DRM_DEBUG("Failed to look up GEM BO %d: %d\n",
  911. job->base.bo_count,
  912. args->bo_handles[job->base.bo_count]);
  913. ret = -ENOENT;
  914. goto fail;
  915. }
  916. job->base.bo[job->base.bo_count] = bo;
  917. }
  918. ret = v3d_lock_bo_reservations(&job->base, &acquire_ctx);
  919. if (ret)
  920. goto fail;
  921. mutex_lock(&v3d->sched_lock);
  922. v3d_push_job(&job->base);
  923. mutex_unlock(&v3d->sched_lock);
  924. v3d_attach_fences_and_unlock_reservation(file_priv,
  925. &job->base, &acquire_ctx,
  926. args->out_sync,
  927. &se,
  928. job->base.done_fence);
  929. v3d_job_put(&job->base);
  930. return 0;
  931. fail:
  932. v3d_job_cleanup((void *)job);
  933. v3d_put_multisync_post_deps(&se);
  934. return ret;
  935. }
  936. /**
  937. * v3d_submit_csd_ioctl() - Submits a CSD (compute shader) job to the V3D.
  938. * @dev: DRM device
  939. * @data: ioctl argument
  940. * @file_priv: DRM file for this fd
  941. *
  942. * Userspace provides the register setup for the CSD, which we don't
  943. * need to validate since the CSD is behind the MMU.
  944. */
  945. int
  946. v3d_submit_csd_ioctl(struct drm_device *dev, void *data,
  947. struct drm_file *file_priv)
  948. {
  949. struct v3d_dev *v3d = to_v3d_dev(dev);
  950. struct v3d_file_priv *v3d_priv = file_priv->driver_priv;
  951. struct drm_v3d_submit_csd *args = data;
  952. struct v3d_submit_ext se = {0};
  953. struct v3d_csd_job *job = NULL;
  954. struct v3d_job *clean_job = NULL;
  955. struct ww_acquire_ctx acquire_ctx;
  956. int ret;
  957. trace_v3d_submit_csd_ioctl(&v3d->drm, args->cfg[5], args->cfg[6]);
  958. if (args->pad)
  959. return -EINVAL;
  960. if (!v3d_has_csd(v3d)) {
  961. DRM_DEBUG("Attempting CSD submit on non-CSD hardware\n");
  962. return -EINVAL;
  963. }
  964. if (args->flags && !(args->flags & DRM_V3D_SUBMIT_EXTENSION)) {
  965. DRM_INFO("invalid flags: %d\n", args->flags);
  966. return -EINVAL;
  967. }
  968. if (args->flags & DRM_V3D_SUBMIT_EXTENSION) {
  969. ret = v3d_get_extensions(file_priv, args->extensions, &se, NULL);
  970. if (ret) {
  971. DRM_DEBUG("Failed to get extensions.\n");
  972. return ret;
  973. }
  974. }
  975. ret = v3d_setup_csd_jobs_and_bos(file_priv, v3d, args,
  976. &job, &clean_job, &se,
  977. &acquire_ctx);
  978. if (ret)
  979. goto fail;
  980. if (args->perfmon_id) {
  981. job->base.perfmon = v3d_perfmon_find(v3d_priv,
  982. args->perfmon_id);
  983. if (!job->base.perfmon) {
  984. ret = -ENOENT;
  985. goto fail_perfmon;
  986. }
  987. }
  988. mutex_lock(&v3d->sched_lock);
  989. v3d_push_job(&job->base);
  990. ret = drm_sched_job_add_dependency(&clean_job->base,
  991. dma_fence_get(job->base.done_fence));
  992. if (ret)
  993. goto fail_unreserve;
  994. v3d_push_job(clean_job);
  995. mutex_unlock(&v3d->sched_lock);
  996. v3d_attach_fences_and_unlock_reservation(file_priv,
  997. clean_job,
  998. &acquire_ctx,
  999. args->out_sync,
  1000. &se,
  1001. clean_job->done_fence);
  1002. v3d_job_put(&job->base);
  1003. v3d_job_put(clean_job);
  1004. return 0;
  1005. fail_unreserve:
  1006. mutex_unlock(&v3d->sched_lock);
  1007. fail_perfmon:
  1008. drm_gem_unlock_reservations(clean_job->bo, clean_job->bo_count,
  1009. &acquire_ctx);
  1010. fail:
  1011. v3d_job_cleanup((void *)job);
  1012. v3d_job_cleanup(clean_job);
  1013. v3d_put_multisync_post_deps(&se);
  1014. return ret;
  1015. }
  1016. static const unsigned int cpu_job_bo_handle_count[] = {
  1017. [V3D_CPU_JOB_TYPE_INDIRECT_CSD] = 1,
  1018. [V3D_CPU_JOB_TYPE_TIMESTAMP_QUERY] = 1,
  1019. [V3D_CPU_JOB_TYPE_RESET_TIMESTAMP_QUERY] = 1,
  1020. [V3D_CPU_JOB_TYPE_COPY_TIMESTAMP_QUERY] = 2,
  1021. [V3D_CPU_JOB_TYPE_RESET_PERFORMANCE_QUERY] = 0,
  1022. [V3D_CPU_JOB_TYPE_COPY_PERFORMANCE_QUERY] = 1,
  1023. };
  1024. /**
  1025. * v3d_submit_cpu_ioctl() - Submits a CPU job to the V3D.
  1026. * @dev: DRM device
  1027. * @data: ioctl argument
  1028. * @file_priv: DRM file for this fd
  1029. *
  1030. * Userspace specifies the CPU job type and data required to perform its
  1031. * operations through the drm_v3d_extension struct.
  1032. */
  1033. int
  1034. v3d_submit_cpu_ioctl(struct drm_device *dev, void *data,
  1035. struct drm_file *file_priv)
  1036. {
  1037. struct v3d_dev *v3d = to_v3d_dev(dev);
  1038. struct drm_v3d_submit_cpu *args = data;
  1039. struct v3d_submit_ext se = {0};
  1040. struct v3d_submit_ext *out_se = NULL;
  1041. struct v3d_cpu_job *cpu_job = NULL;
  1042. struct v3d_csd_job *csd_job = NULL;
  1043. struct v3d_job *clean_job = NULL;
  1044. struct ww_acquire_ctx acquire_ctx;
  1045. int ret;
  1046. if (args->flags && !(args->flags & DRM_V3D_SUBMIT_EXTENSION)) {
  1047. DRM_INFO("Invalid flags: %d\n", args->flags);
  1048. return -EINVAL;
  1049. }
  1050. ret = v3d_job_allocate((void *)&cpu_job, sizeof(*cpu_job));
  1051. if (ret)
  1052. return ret;
  1053. if (args->flags & DRM_V3D_SUBMIT_EXTENSION) {
  1054. ret = v3d_get_extensions(file_priv, args->extensions, &se, cpu_job);
  1055. if (ret) {
  1056. DRM_DEBUG("Failed to get extensions.\n");
  1057. goto fail;
  1058. }
  1059. }
  1060. /* Every CPU job must have a CPU job user extension */
  1061. if (!cpu_job->job_type) {
  1062. DRM_DEBUG("CPU job must have a CPU job user extension.\n");
  1063. ret = -EINVAL;
  1064. goto fail;
  1065. }
  1066. if (args->bo_handle_count != cpu_job_bo_handle_count[cpu_job->job_type]) {
  1067. DRM_DEBUG("This CPU job was not submitted with the proper number of BOs.\n");
  1068. ret = -EINVAL;
  1069. goto fail;
  1070. }
  1071. trace_v3d_submit_cpu_ioctl(&v3d->drm, cpu_job->job_type);
  1072. ret = v3d_job_init(v3d, file_priv, &cpu_job->base,
  1073. v3d_job_free, 0, &se, V3D_CPU);
  1074. if (ret) {
  1075. v3d_job_deallocate((void *)&cpu_job);
  1076. goto fail;
  1077. }
  1078. clean_job = cpu_job->indirect_csd.clean_job;
  1079. csd_job = cpu_job->indirect_csd.job;
  1080. if (args->bo_handle_count) {
  1081. ret = v3d_lookup_bos(dev, file_priv, &cpu_job->base,
  1082. args->bo_handles, args->bo_handle_count);
  1083. if (ret)
  1084. goto fail;
  1085. ret = v3d_lock_bo_reservations(&cpu_job->base, &acquire_ctx);
  1086. if (ret)
  1087. goto fail;
  1088. }
  1089. mutex_lock(&v3d->sched_lock);
  1090. v3d_push_job(&cpu_job->base);
  1091. switch (cpu_job->job_type) {
  1092. case V3D_CPU_JOB_TYPE_INDIRECT_CSD:
  1093. ret = drm_sched_job_add_dependency(&csd_job->base.base,
  1094. dma_fence_get(cpu_job->base.done_fence));
  1095. if (ret)
  1096. goto fail_unreserve;
  1097. v3d_push_job(&csd_job->base);
  1098. ret = drm_sched_job_add_dependency(&clean_job->base,
  1099. dma_fence_get(csd_job->base.done_fence));
  1100. if (ret)
  1101. goto fail_unreserve;
  1102. v3d_push_job(clean_job);
  1103. break;
  1104. default:
  1105. break;
  1106. }
  1107. mutex_unlock(&v3d->sched_lock);
  1108. out_se = (cpu_job->job_type == V3D_CPU_JOB_TYPE_INDIRECT_CSD) ? NULL : &se;
  1109. v3d_attach_fences_and_unlock_reservation(file_priv,
  1110. &cpu_job->base,
  1111. &acquire_ctx, 0,
  1112. out_se, cpu_job->base.done_fence);
  1113. switch (cpu_job->job_type) {
  1114. case V3D_CPU_JOB_TYPE_INDIRECT_CSD:
  1115. v3d_attach_fences_and_unlock_reservation(file_priv,
  1116. clean_job,
  1117. &cpu_job->indirect_csd.acquire_ctx,
  1118. 0, &se, clean_job->done_fence);
  1119. break;
  1120. default:
  1121. break;
  1122. }
  1123. v3d_job_put(&cpu_job->base);
  1124. v3d_job_put(&csd_job->base);
  1125. v3d_job_put(clean_job);
  1126. return 0;
  1127. fail_unreserve:
  1128. mutex_unlock(&v3d->sched_lock);
  1129. drm_gem_unlock_reservations(cpu_job->base.bo, cpu_job->base.bo_count,
  1130. &acquire_ctx);
  1131. drm_gem_unlock_reservations(clean_job->bo, clean_job->bo_count,
  1132. &cpu_job->indirect_csd.acquire_ctx);
  1133. fail:
  1134. v3d_job_cleanup((void *)cpu_job);
  1135. v3d_job_cleanup((void *)csd_job);
  1136. v3d_job_cleanup(clean_job);
  1137. v3d_put_multisync_post_deps(&se);
  1138. kvfree(cpu_job->timestamp_query.queries);
  1139. kvfree(cpu_job->performance_query.queries);
  1140. return ret;
  1141. }