submit.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /* Copyright (c) 2020 NVIDIA Corporation */
  3. #include <linux/dma-fence-array.h>
  4. #include <linux/dma-mapping.h>
  5. #include <linux/file.h>
  6. #include <linux/host1x.h>
  7. #include <linux/iommu.h>
  8. #include <linux/kref.h>
  9. #include <linux/list.h>
  10. #include <linux/nospec.h>
  11. #include <linux/pm_runtime.h>
  12. #include <linux/scatterlist.h>
  13. #include <linux/slab.h>
  14. #include <linux/sync_file.h>
  15. #include <drm/drm_drv.h>
  16. #include <drm/drm_file.h>
  17. #include <drm/drm_syncobj.h>
  18. #include "drm.h"
  19. #include "gem.h"
  20. #include "submit.h"
  21. #include "uapi.h"
  22. #define SUBMIT_ERR(context, fmt, ...) \
  23. dev_err_ratelimited(context->client->base.dev, \
  24. "%s: job submission failed: " fmt "\n", \
  25. current->comm, ##__VA_ARGS__)
  26. struct gather_bo {
  27. struct host1x_bo base;
  28. struct kref ref;
  29. struct device *dev;
  30. u32 *gather_data;
  31. dma_addr_t gather_data_dma;
  32. size_t gather_data_words;
  33. };
  34. static struct host1x_bo *gather_bo_get(struct host1x_bo *host_bo)
  35. {
  36. struct gather_bo *bo = container_of(host_bo, struct gather_bo, base);
  37. kref_get(&bo->ref);
  38. return host_bo;
  39. }
  40. static void gather_bo_release(struct kref *ref)
  41. {
  42. struct gather_bo *bo = container_of(ref, struct gather_bo, ref);
  43. dma_free_attrs(bo->dev, bo->gather_data_words * 4, bo->gather_data, bo->gather_data_dma,
  44. 0);
  45. kfree(bo);
  46. }
  47. static void gather_bo_put(struct host1x_bo *host_bo)
  48. {
  49. struct gather_bo *bo = container_of(host_bo, struct gather_bo, base);
  50. kref_put(&bo->ref, gather_bo_release);
  51. }
  52. static struct host1x_bo_mapping *
  53. gather_bo_pin(struct device *dev, struct host1x_bo *bo, enum dma_data_direction direction)
  54. {
  55. struct gather_bo *gather = container_of(bo, struct gather_bo, base);
  56. struct host1x_bo_mapping *map;
  57. int err;
  58. map = kzalloc(sizeof(*map), GFP_KERNEL);
  59. if (!map)
  60. return ERR_PTR(-ENOMEM);
  61. kref_init(&map->ref);
  62. map->bo = host1x_bo_get(bo);
  63. map->direction = direction;
  64. map->dev = dev;
  65. map->sgt = kzalloc(sizeof(*map->sgt), GFP_KERNEL);
  66. if (!map->sgt) {
  67. err = -ENOMEM;
  68. goto free;
  69. }
  70. err = dma_get_sgtable(gather->dev, map->sgt, gather->gather_data, gather->gather_data_dma,
  71. gather->gather_data_words * 4);
  72. if (err)
  73. goto free_sgt;
  74. err = dma_map_sgtable(dev, map->sgt, direction, 0);
  75. if (err)
  76. goto free_sgt;
  77. map->phys = sg_dma_address(map->sgt->sgl);
  78. map->size = gather->gather_data_words * 4;
  79. map->chunks = err;
  80. return map;
  81. free_sgt:
  82. sg_free_table(map->sgt);
  83. kfree(map->sgt);
  84. free:
  85. kfree(map);
  86. return ERR_PTR(err);
  87. }
  88. static void gather_bo_unpin(struct host1x_bo_mapping *map)
  89. {
  90. if (!map)
  91. return;
  92. dma_unmap_sgtable(map->dev, map->sgt, map->direction, 0);
  93. sg_free_table(map->sgt);
  94. kfree(map->sgt);
  95. host1x_bo_put(map->bo);
  96. kfree(map);
  97. }
  98. static void *gather_bo_mmap(struct host1x_bo *host_bo)
  99. {
  100. struct gather_bo *bo = container_of(host_bo, struct gather_bo, base);
  101. return bo->gather_data;
  102. }
  103. static void gather_bo_munmap(struct host1x_bo *host_bo, void *addr)
  104. {
  105. }
  106. static const struct host1x_bo_ops gather_bo_ops = {
  107. .get = gather_bo_get,
  108. .put = gather_bo_put,
  109. .pin = gather_bo_pin,
  110. .unpin = gather_bo_unpin,
  111. .mmap = gather_bo_mmap,
  112. .munmap = gather_bo_munmap,
  113. };
  114. static struct tegra_drm_mapping *
  115. tegra_drm_mapping_get(struct tegra_drm_context *context, u32 id)
  116. {
  117. struct tegra_drm_mapping *mapping;
  118. xa_lock(&context->mappings);
  119. mapping = xa_load(&context->mappings, id);
  120. if (mapping)
  121. kref_get(&mapping->ref);
  122. xa_unlock(&context->mappings);
  123. return mapping;
  124. }
  125. static void *alloc_copy_user_array(void __user *from, size_t count, size_t size)
  126. {
  127. size_t copy_len;
  128. void *data;
  129. if (check_mul_overflow(count, size, &copy_len))
  130. return ERR_PTR(-EINVAL);
  131. if (copy_len > 0x4000)
  132. return ERR_PTR(-E2BIG);
  133. data = vmemdup_user(from, copy_len);
  134. if (IS_ERR(data))
  135. return ERR_CAST(data);
  136. return data;
  137. }
  138. static int submit_copy_gather_data(struct gather_bo **pbo, struct device *dev,
  139. struct tegra_drm_context *context,
  140. struct drm_tegra_channel_submit *args)
  141. {
  142. struct gather_bo *bo;
  143. size_t copy_len;
  144. if (args->gather_data_words == 0) {
  145. SUBMIT_ERR(context, "gather_data_words cannot be zero");
  146. return -EINVAL;
  147. }
  148. if (check_mul_overflow((size_t)args->gather_data_words, (size_t)4, &copy_len)) {
  149. SUBMIT_ERR(context, "gather_data_words is too large");
  150. return -EINVAL;
  151. }
  152. bo = kzalloc(sizeof(*bo), GFP_KERNEL);
  153. if (!bo) {
  154. SUBMIT_ERR(context, "failed to allocate memory for bo info");
  155. return -ENOMEM;
  156. }
  157. host1x_bo_init(&bo->base, &gather_bo_ops);
  158. kref_init(&bo->ref);
  159. bo->dev = dev;
  160. bo->gather_data = dma_alloc_attrs(dev, copy_len, &bo->gather_data_dma,
  161. GFP_KERNEL | __GFP_NOWARN, 0);
  162. if (!bo->gather_data) {
  163. SUBMIT_ERR(context, "failed to allocate memory for gather data");
  164. kfree(bo);
  165. return -ENOMEM;
  166. }
  167. if (copy_from_user(bo->gather_data, u64_to_user_ptr(args->gather_data_ptr), copy_len)) {
  168. SUBMIT_ERR(context, "failed to copy gather data from userspace");
  169. dma_free_attrs(dev, copy_len, bo->gather_data, bo->gather_data_dma, 0);
  170. kfree(bo);
  171. return -EFAULT;
  172. }
  173. bo->gather_data_words = args->gather_data_words;
  174. *pbo = bo;
  175. return 0;
  176. }
  177. static int submit_write_reloc(struct tegra_drm_context *context, struct gather_bo *bo,
  178. struct drm_tegra_submit_buf *buf, struct tegra_drm_mapping *mapping)
  179. {
  180. /* TODO check that target_offset is within bounds */
  181. dma_addr_t iova = mapping->iova + buf->reloc.target_offset;
  182. u32 written_ptr;
  183. #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
  184. if (buf->flags & DRM_TEGRA_SUBMIT_RELOC_SECTOR_LAYOUT)
  185. iova |= BIT_ULL(39);
  186. #endif
  187. written_ptr = iova >> buf->reloc.shift;
  188. if (buf->reloc.gather_offset_words >= bo->gather_data_words) {
  189. SUBMIT_ERR(context,
  190. "relocation has too large gather offset (%u vs gather length %zu)",
  191. buf->reloc.gather_offset_words, bo->gather_data_words);
  192. return -EINVAL;
  193. }
  194. buf->reloc.gather_offset_words = array_index_nospec(buf->reloc.gather_offset_words,
  195. bo->gather_data_words);
  196. bo->gather_data[buf->reloc.gather_offset_words] = written_ptr;
  197. return 0;
  198. }
  199. static int submit_process_bufs(struct tegra_drm_context *context, struct gather_bo *bo,
  200. struct drm_tegra_channel_submit *args,
  201. struct tegra_drm_submit_data *job_data)
  202. {
  203. struct tegra_drm_used_mapping *mappings;
  204. struct drm_tegra_submit_buf *bufs;
  205. int err;
  206. u32 i;
  207. bufs = alloc_copy_user_array(u64_to_user_ptr(args->bufs_ptr), args->num_bufs,
  208. sizeof(*bufs));
  209. if (IS_ERR(bufs)) {
  210. SUBMIT_ERR(context, "failed to copy bufs array from userspace");
  211. return PTR_ERR(bufs);
  212. }
  213. mappings = kcalloc(args->num_bufs, sizeof(*mappings), GFP_KERNEL);
  214. if (!mappings) {
  215. SUBMIT_ERR(context, "failed to allocate memory for mapping info");
  216. err = -ENOMEM;
  217. goto done;
  218. }
  219. for (i = 0; i < args->num_bufs; i++) {
  220. struct drm_tegra_submit_buf *buf = &bufs[i];
  221. struct tegra_drm_mapping *mapping;
  222. if (buf->flags & ~DRM_TEGRA_SUBMIT_RELOC_SECTOR_LAYOUT) {
  223. SUBMIT_ERR(context, "invalid flag specified for buffer");
  224. err = -EINVAL;
  225. goto drop_refs;
  226. }
  227. mapping = tegra_drm_mapping_get(context, buf->mapping);
  228. if (!mapping) {
  229. SUBMIT_ERR(context, "invalid mapping ID '%u' for buffer", buf->mapping);
  230. err = -EINVAL;
  231. goto drop_refs;
  232. }
  233. err = submit_write_reloc(context, bo, buf, mapping);
  234. if (err) {
  235. tegra_drm_mapping_put(mapping);
  236. goto drop_refs;
  237. }
  238. mappings[i].mapping = mapping;
  239. mappings[i].flags = buf->flags;
  240. }
  241. job_data->used_mappings = mappings;
  242. job_data->num_used_mappings = i;
  243. err = 0;
  244. goto done;
  245. drop_refs:
  246. while (i--)
  247. tegra_drm_mapping_put(mappings[i].mapping);
  248. kfree(mappings);
  249. job_data->used_mappings = NULL;
  250. done:
  251. kvfree(bufs);
  252. return err;
  253. }
  254. static int submit_get_syncpt(struct tegra_drm_context *context, struct host1x_job *job,
  255. struct xarray *syncpoints, struct drm_tegra_channel_submit *args)
  256. {
  257. struct host1x_syncpt *sp;
  258. if (args->syncpt.flags) {
  259. SUBMIT_ERR(context, "invalid flag specified for syncpt");
  260. return -EINVAL;
  261. }
  262. /* Syncpt ref will be dropped on job release */
  263. sp = xa_load(syncpoints, args->syncpt.id);
  264. if (!sp) {
  265. SUBMIT_ERR(context, "syncpoint specified in syncpt was not allocated");
  266. return -EINVAL;
  267. }
  268. job->syncpt = host1x_syncpt_get(sp);
  269. job->syncpt_incrs = args->syncpt.increments;
  270. return 0;
  271. }
  272. static int submit_job_add_gather(struct host1x_job *job, struct tegra_drm_context *context,
  273. struct drm_tegra_submit_cmd_gather_uptr *cmd,
  274. struct gather_bo *bo, u32 *offset,
  275. struct tegra_drm_submit_data *job_data,
  276. u32 *class)
  277. {
  278. u32 next_offset;
  279. if (cmd->reserved[0] || cmd->reserved[1] || cmd->reserved[2]) {
  280. SUBMIT_ERR(context, "non-zero reserved field in GATHER_UPTR command");
  281. return -EINVAL;
  282. }
  283. /* Check for maximum gather size */
  284. if (cmd->words > 16383) {
  285. SUBMIT_ERR(context, "too many words in GATHER_UPTR command");
  286. return -EINVAL;
  287. }
  288. if (check_add_overflow(*offset, cmd->words, &next_offset)) {
  289. SUBMIT_ERR(context, "too many total words in job");
  290. return -EINVAL;
  291. }
  292. if (next_offset > bo->gather_data_words) {
  293. SUBMIT_ERR(context, "GATHER_UPTR command overflows gather data");
  294. return -EINVAL;
  295. }
  296. if (tegra_drm_fw_validate(context->client, bo->gather_data, *offset,
  297. cmd->words, job_data, class)) {
  298. SUBMIT_ERR(context, "job was rejected by firewall");
  299. return -EINVAL;
  300. }
  301. host1x_job_add_gather(job, &bo->base, cmd->words, *offset * 4);
  302. *offset = next_offset;
  303. return 0;
  304. }
  305. static struct host1x_job *
  306. submit_create_job(struct tegra_drm_context *context, struct gather_bo *bo,
  307. struct drm_tegra_channel_submit *args, struct tegra_drm_submit_data *job_data,
  308. struct xarray *syncpoints)
  309. {
  310. struct drm_tegra_submit_cmd *cmds;
  311. u32 i, gather_offset = 0, class;
  312. struct host1x_job *job;
  313. int err;
  314. /* Set initial class for firewall. */
  315. class = context->client->base.class;
  316. cmds = alloc_copy_user_array(u64_to_user_ptr(args->cmds_ptr), args->num_cmds,
  317. sizeof(*cmds));
  318. if (IS_ERR(cmds)) {
  319. SUBMIT_ERR(context, "failed to copy cmds array from userspace");
  320. return ERR_CAST(cmds);
  321. }
  322. job = host1x_job_alloc(context->channel, args->num_cmds, 0, true);
  323. if (!job) {
  324. SUBMIT_ERR(context, "failed to allocate memory for job");
  325. job = ERR_PTR(-ENOMEM);
  326. goto done;
  327. }
  328. err = submit_get_syncpt(context, job, syncpoints, args);
  329. if (err < 0)
  330. goto free_job;
  331. job->client = &context->client->base;
  332. job->class = context->client->base.class;
  333. job->serialize = true;
  334. for (i = 0; i < args->num_cmds; i++) {
  335. struct drm_tegra_submit_cmd *cmd = &cmds[i];
  336. if (cmd->flags) {
  337. SUBMIT_ERR(context, "unknown flags given for cmd");
  338. err = -EINVAL;
  339. goto free_job;
  340. }
  341. if (cmd->type == DRM_TEGRA_SUBMIT_CMD_GATHER_UPTR) {
  342. err = submit_job_add_gather(job, context, &cmd->gather_uptr, bo,
  343. &gather_offset, job_data, &class);
  344. if (err)
  345. goto free_job;
  346. } else if (cmd->type == DRM_TEGRA_SUBMIT_CMD_WAIT_SYNCPT) {
  347. if (cmd->wait_syncpt.reserved[0] || cmd->wait_syncpt.reserved[1]) {
  348. SUBMIT_ERR(context, "non-zero reserved value");
  349. err = -EINVAL;
  350. goto free_job;
  351. }
  352. host1x_job_add_wait(job, cmd->wait_syncpt.id, cmd->wait_syncpt.value,
  353. false, class);
  354. } else if (cmd->type == DRM_TEGRA_SUBMIT_CMD_WAIT_SYNCPT_RELATIVE) {
  355. if (cmd->wait_syncpt.reserved[0] || cmd->wait_syncpt.reserved[1]) {
  356. SUBMIT_ERR(context, "non-zero reserved value");
  357. err = -EINVAL;
  358. goto free_job;
  359. }
  360. if (cmd->wait_syncpt.id != args->syncpt.id) {
  361. SUBMIT_ERR(context, "syncpoint ID in CMD_WAIT_SYNCPT_RELATIVE is not used by the job");
  362. err = -EINVAL;
  363. goto free_job;
  364. }
  365. host1x_job_add_wait(job, cmd->wait_syncpt.id, cmd->wait_syncpt.value,
  366. true, class);
  367. } else {
  368. SUBMIT_ERR(context, "unknown cmd type");
  369. err = -EINVAL;
  370. goto free_job;
  371. }
  372. }
  373. if (gather_offset == 0) {
  374. SUBMIT_ERR(context, "job must have at least one gather");
  375. err = -EINVAL;
  376. goto free_job;
  377. }
  378. goto done;
  379. free_job:
  380. host1x_job_put(job);
  381. job = ERR_PTR(err);
  382. done:
  383. kvfree(cmds);
  384. return job;
  385. }
  386. static void release_job(struct host1x_job *job)
  387. {
  388. struct tegra_drm_client *client = container_of(job->client, struct tegra_drm_client, base);
  389. struct tegra_drm_submit_data *job_data = job->user_data;
  390. u32 i;
  391. if (job->memory_context)
  392. host1x_memory_context_put(job->memory_context);
  393. for (i = 0; i < job_data->num_used_mappings; i++)
  394. tegra_drm_mapping_put(job_data->used_mappings[i].mapping);
  395. kfree(job_data->used_mappings);
  396. kfree(job_data);
  397. pm_runtime_mark_last_busy(client->base.dev);
  398. pm_runtime_put_autosuspend(client->base.dev);
  399. }
  400. int tegra_drm_ioctl_channel_submit(struct drm_device *drm, void *data,
  401. struct drm_file *file)
  402. {
  403. struct tegra_drm_file *fpriv = file->driver_priv;
  404. struct drm_tegra_channel_submit *args = data;
  405. struct tegra_drm_submit_data *job_data;
  406. struct drm_syncobj *syncobj = NULL;
  407. struct tegra_drm_context *context;
  408. struct host1x_job *job;
  409. struct gather_bo *bo;
  410. u32 i;
  411. int err;
  412. mutex_lock(&fpriv->lock);
  413. context = xa_load(&fpriv->contexts, args->context);
  414. if (!context) {
  415. mutex_unlock(&fpriv->lock);
  416. pr_err_ratelimited("%s: %s: invalid channel context '%#x'", __func__,
  417. current->comm, args->context);
  418. return -EINVAL;
  419. }
  420. if (args->syncobj_in) {
  421. struct dma_fence *fence;
  422. err = drm_syncobj_find_fence(file, args->syncobj_in, 0, 0, &fence);
  423. if (err) {
  424. SUBMIT_ERR(context, "invalid syncobj_in '%#x'", args->syncobj_in);
  425. goto unlock;
  426. }
  427. err = dma_fence_wait_timeout(fence, true, msecs_to_jiffies(10000));
  428. dma_fence_put(fence);
  429. if (err) {
  430. SUBMIT_ERR(context, "wait for syncobj_in timed out");
  431. goto unlock;
  432. }
  433. }
  434. if (args->syncobj_out) {
  435. syncobj = drm_syncobj_find(file, args->syncobj_out);
  436. if (!syncobj) {
  437. SUBMIT_ERR(context, "invalid syncobj_out '%#x'", args->syncobj_out);
  438. err = -ENOENT;
  439. goto unlock;
  440. }
  441. }
  442. /* Allocate gather BO and copy gather words in. */
  443. err = submit_copy_gather_data(&bo, drm->dev, context, args);
  444. if (err)
  445. goto unlock;
  446. job_data = kzalloc(sizeof(*job_data), GFP_KERNEL);
  447. if (!job_data) {
  448. SUBMIT_ERR(context, "failed to allocate memory for job data");
  449. err = -ENOMEM;
  450. goto put_bo;
  451. }
  452. /* Get data buffer mappings and do relocation patching. */
  453. err = submit_process_bufs(context, bo, args, job_data);
  454. if (err)
  455. goto free_job_data;
  456. /* Allocate host1x_job and add gathers and waits to it. */
  457. job = submit_create_job(context, bo, args, job_data, &fpriv->syncpoints);
  458. if (IS_ERR(job)) {
  459. err = PTR_ERR(job);
  460. goto free_job_data;
  461. }
  462. /* Map gather data for Host1x. */
  463. err = host1x_job_pin(job, context->client->base.dev);
  464. if (err) {
  465. SUBMIT_ERR(context, "failed to pin job: %d", err);
  466. goto put_job;
  467. }
  468. if (context->client->ops->get_streamid_offset) {
  469. err = context->client->ops->get_streamid_offset(
  470. context->client, &job->engine_streamid_offset);
  471. if (err) {
  472. SUBMIT_ERR(context, "failed to get streamid offset: %d", err);
  473. goto unpin_job;
  474. }
  475. }
  476. if (context->memory_context && context->client->ops->can_use_memory_ctx) {
  477. bool supported;
  478. err = context->client->ops->can_use_memory_ctx(context->client, &supported);
  479. if (err) {
  480. SUBMIT_ERR(context, "failed to detect if engine can use memory context: %d", err);
  481. goto unpin_job;
  482. }
  483. if (supported) {
  484. job->memory_context = context->memory_context;
  485. host1x_memory_context_get(job->memory_context);
  486. }
  487. } else if (context->client->ops->get_streamid_offset) {
  488. /*
  489. * Job submission will need to temporarily change stream ID,
  490. * so need to tell it what to change it back to.
  491. */
  492. if (!tegra_dev_iommu_get_stream_id(context->client->base.dev,
  493. &job->engine_fallback_streamid))
  494. job->engine_fallback_streamid = TEGRA_STREAM_ID_BYPASS;
  495. }
  496. /* Boot engine. */
  497. err = pm_runtime_resume_and_get(context->client->base.dev);
  498. if (err < 0) {
  499. SUBMIT_ERR(context, "could not power up engine: %d", err);
  500. goto put_memory_context;
  501. }
  502. job->user_data = job_data;
  503. job->release = release_job;
  504. job->timeout = 10000;
  505. /*
  506. * job_data is now part of job reference counting, so don't release
  507. * it from here.
  508. */
  509. job_data = NULL;
  510. /* Submit job to hardware. */
  511. err = host1x_job_submit(job);
  512. if (err) {
  513. SUBMIT_ERR(context, "host1x job submission failed: %d", err);
  514. goto unpin_job;
  515. }
  516. /* Return postfences to userspace and add fences to DMA reservations. */
  517. args->syncpt.value = job->syncpt_end;
  518. if (syncobj) {
  519. struct dma_fence *fence = host1x_fence_create(job->syncpt, job->syncpt_end, true);
  520. if (IS_ERR(fence)) {
  521. err = PTR_ERR(fence);
  522. SUBMIT_ERR(context, "failed to create postfence: %d", err);
  523. }
  524. drm_syncobj_replace_fence(syncobj, fence);
  525. }
  526. goto put_job;
  527. put_memory_context:
  528. if (job->memory_context)
  529. host1x_memory_context_put(job->memory_context);
  530. unpin_job:
  531. host1x_job_unpin(job);
  532. put_job:
  533. host1x_job_put(job);
  534. free_job_data:
  535. if (job_data && job_data->used_mappings) {
  536. for (i = 0; i < job_data->num_used_mappings; i++)
  537. tegra_drm_mapping_put(job_data->used_mappings[i].mapping);
  538. kfree(job_data->used_mappings);
  539. }
  540. kfree(job_data);
  541. put_bo:
  542. gather_bo_put(&bo->base);
  543. unlock:
  544. if (syncobj)
  545. drm_syncobj_put(syncobj);
  546. mutex_unlock(&fpriv->lock);
  547. return err;
  548. }