nvdec.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2015-2022, NVIDIA Corporation.
  4. */
  5. #include <linux/clk.h>
  6. #include <linux/delay.h>
  7. #include <linux/dma-mapping.h>
  8. #include <linux/host1x.h>
  9. #include <linux/iommu.h>
  10. #include <linux/iopoll.h>
  11. #include <linux/module.h>
  12. #include <linux/of.h>
  13. #include <linux/platform_device.h>
  14. #include <linux/pm_runtime.h>
  15. #include <linux/reset.h>
  16. #include <soc/tegra/mc.h>
  17. #include "drm.h"
  18. #include "falcon.h"
  19. #include "riscv.h"
  20. #include "vic.h"
  21. #define NVDEC_FALCON_DEBUGINFO 0x1094
  22. #define NVDEC_TFBIF_TRANSCFG 0x2c44
  23. struct nvdec_config {
  24. const char *firmware;
  25. unsigned int version;
  26. bool supports_sid;
  27. bool has_riscv;
  28. bool has_extra_clocks;
  29. };
  30. struct nvdec {
  31. struct falcon falcon;
  32. void __iomem *regs;
  33. struct tegra_drm_client client;
  34. struct host1x_channel *channel;
  35. struct device *dev;
  36. struct clk_bulk_data clks[3];
  37. unsigned int num_clks;
  38. struct reset_control *reset;
  39. /* Platform configuration */
  40. const struct nvdec_config *config;
  41. /* RISC-V specific data */
  42. struct tegra_drm_riscv riscv;
  43. phys_addr_t carveout_base;
  44. };
  45. static inline struct nvdec *to_nvdec(struct tegra_drm_client *client)
  46. {
  47. return container_of(client, struct nvdec, client);
  48. }
  49. static inline void nvdec_writel(struct nvdec *nvdec, u32 value,
  50. unsigned int offset)
  51. {
  52. writel(value, nvdec->regs + offset);
  53. }
  54. static int nvdec_boot_falcon(struct nvdec *nvdec)
  55. {
  56. u32 stream_id;
  57. int err;
  58. if (nvdec->config->supports_sid && tegra_dev_iommu_get_stream_id(nvdec->dev, &stream_id)) {
  59. u32 value;
  60. value = TRANSCFG_ATT(1, TRANSCFG_SID_FALCON) | TRANSCFG_ATT(0, TRANSCFG_SID_HW);
  61. nvdec_writel(nvdec, value, NVDEC_TFBIF_TRANSCFG);
  62. nvdec_writel(nvdec, stream_id, VIC_THI_STREAMID0);
  63. nvdec_writel(nvdec, stream_id, VIC_THI_STREAMID1);
  64. }
  65. err = falcon_boot(&nvdec->falcon);
  66. if (err < 0)
  67. return err;
  68. err = falcon_wait_idle(&nvdec->falcon);
  69. if (err < 0) {
  70. dev_err(nvdec->dev, "falcon boot timed out\n");
  71. return err;
  72. }
  73. return 0;
  74. }
  75. static int nvdec_wait_debuginfo(struct nvdec *nvdec, const char *phase)
  76. {
  77. int err;
  78. u32 val;
  79. err = readl_poll_timeout(nvdec->regs + NVDEC_FALCON_DEBUGINFO, val, val == 0x0, 10, 100000);
  80. if (err) {
  81. dev_err(nvdec->dev, "failed to boot %s, debuginfo=0x%x\n", phase, val);
  82. return err;
  83. }
  84. return 0;
  85. }
  86. static int nvdec_boot_riscv(struct nvdec *nvdec)
  87. {
  88. int err;
  89. err = reset_control_acquire(nvdec->reset);
  90. if (err)
  91. return err;
  92. nvdec_writel(nvdec, 0xabcd1234, NVDEC_FALCON_DEBUGINFO);
  93. err = tegra_drm_riscv_boot_bootrom(&nvdec->riscv, nvdec->carveout_base, 1,
  94. &nvdec->riscv.bl_desc);
  95. if (err) {
  96. dev_err(nvdec->dev, "failed to execute bootloader\n");
  97. goto release_reset;
  98. }
  99. err = nvdec_wait_debuginfo(nvdec, "bootloader");
  100. if (err)
  101. goto release_reset;
  102. err = reset_control_reset(nvdec->reset);
  103. if (err)
  104. goto release_reset;
  105. nvdec_writel(nvdec, 0xabcd1234, NVDEC_FALCON_DEBUGINFO);
  106. err = tegra_drm_riscv_boot_bootrom(&nvdec->riscv, nvdec->carveout_base, 1,
  107. &nvdec->riscv.os_desc);
  108. if (err) {
  109. dev_err(nvdec->dev, "failed to execute firmware\n");
  110. goto release_reset;
  111. }
  112. err = nvdec_wait_debuginfo(nvdec, "firmware");
  113. if (err)
  114. goto release_reset;
  115. release_reset:
  116. reset_control_release(nvdec->reset);
  117. return err;
  118. }
  119. static int nvdec_init(struct host1x_client *client)
  120. {
  121. struct tegra_drm_client *drm = host1x_to_drm_client(client);
  122. struct drm_device *dev = dev_get_drvdata(client->host);
  123. struct tegra_drm *tegra = dev->dev_private;
  124. struct nvdec *nvdec = to_nvdec(drm);
  125. int err;
  126. err = host1x_client_iommu_attach(client);
  127. if (err < 0 && err != -ENODEV) {
  128. dev_err(nvdec->dev, "failed to attach to domain: %d\n", err);
  129. return err;
  130. }
  131. nvdec->channel = host1x_channel_request(client);
  132. if (!nvdec->channel) {
  133. err = -ENOMEM;
  134. goto detach;
  135. }
  136. client->syncpts[0] = host1x_syncpt_request(client, 0);
  137. if (!client->syncpts[0]) {
  138. err = -ENOMEM;
  139. goto free_channel;
  140. }
  141. err = tegra_drm_register_client(tegra, drm);
  142. if (err < 0)
  143. goto free_syncpt;
  144. /*
  145. * Inherit the DMA parameters (such as maximum segment size) from the
  146. * parent host1x device.
  147. */
  148. client->dev->dma_parms = client->host->dma_parms;
  149. return 0;
  150. free_syncpt:
  151. host1x_syncpt_put(client->syncpts[0]);
  152. free_channel:
  153. host1x_channel_put(nvdec->channel);
  154. detach:
  155. host1x_client_iommu_detach(client);
  156. return err;
  157. }
  158. static int nvdec_exit(struct host1x_client *client)
  159. {
  160. struct tegra_drm_client *drm = host1x_to_drm_client(client);
  161. struct drm_device *dev = dev_get_drvdata(client->host);
  162. struct tegra_drm *tegra = dev->dev_private;
  163. struct nvdec *nvdec = to_nvdec(drm);
  164. int err;
  165. /* avoid a dangling pointer just in case this disappears */
  166. client->dev->dma_parms = NULL;
  167. err = tegra_drm_unregister_client(tegra, drm);
  168. if (err < 0)
  169. return err;
  170. pm_runtime_dont_use_autosuspend(client->dev);
  171. pm_runtime_force_suspend(client->dev);
  172. host1x_syncpt_put(client->syncpts[0]);
  173. host1x_channel_put(nvdec->channel);
  174. host1x_client_iommu_detach(client);
  175. nvdec->channel = NULL;
  176. if (client->group) {
  177. dma_unmap_single(nvdec->dev, nvdec->falcon.firmware.phys,
  178. nvdec->falcon.firmware.size, DMA_TO_DEVICE);
  179. tegra_drm_free(tegra, nvdec->falcon.firmware.size,
  180. nvdec->falcon.firmware.virt,
  181. nvdec->falcon.firmware.iova);
  182. } else {
  183. dma_free_coherent(nvdec->dev, nvdec->falcon.firmware.size,
  184. nvdec->falcon.firmware.virt,
  185. nvdec->falcon.firmware.iova);
  186. }
  187. return 0;
  188. }
  189. static const struct host1x_client_ops nvdec_client_ops = {
  190. .init = nvdec_init,
  191. .exit = nvdec_exit,
  192. };
  193. static int nvdec_load_falcon_firmware(struct nvdec *nvdec)
  194. {
  195. struct host1x_client *client = &nvdec->client.base;
  196. struct tegra_drm *tegra = nvdec->client.drm;
  197. dma_addr_t iova;
  198. size_t size;
  199. void *virt;
  200. int err;
  201. if (nvdec->falcon.firmware.virt)
  202. return 0;
  203. err = falcon_read_firmware(&nvdec->falcon, nvdec->config->firmware);
  204. if (err < 0)
  205. return err;
  206. size = nvdec->falcon.firmware.size;
  207. if (!client->group) {
  208. virt = dma_alloc_coherent(nvdec->dev, size, &iova, GFP_KERNEL);
  209. if (!virt)
  210. return -ENOMEM;
  211. } else {
  212. virt = tegra_drm_alloc(tegra, size, &iova);
  213. if (IS_ERR(virt))
  214. return PTR_ERR(virt);
  215. }
  216. nvdec->falcon.firmware.virt = virt;
  217. nvdec->falcon.firmware.iova = iova;
  218. err = falcon_load_firmware(&nvdec->falcon);
  219. if (err < 0)
  220. goto cleanup;
  221. /*
  222. * In this case we have received an IOVA from the shared domain, so we
  223. * need to make sure to get the physical address so that the DMA API
  224. * knows what memory pages to flush the cache for.
  225. */
  226. if (client->group) {
  227. dma_addr_t phys;
  228. phys = dma_map_single(nvdec->dev, virt, size, DMA_TO_DEVICE);
  229. err = dma_mapping_error(nvdec->dev, phys);
  230. if (err < 0)
  231. goto cleanup;
  232. nvdec->falcon.firmware.phys = phys;
  233. }
  234. return 0;
  235. cleanup:
  236. if (!client->group)
  237. dma_free_coherent(nvdec->dev, size, virt, iova);
  238. else
  239. tegra_drm_free(tegra, size, virt, iova);
  240. return err;
  241. }
  242. static __maybe_unused int nvdec_runtime_resume(struct device *dev)
  243. {
  244. struct nvdec *nvdec = dev_get_drvdata(dev);
  245. int err;
  246. err = clk_bulk_prepare_enable(nvdec->num_clks, nvdec->clks);
  247. if (err < 0)
  248. return err;
  249. usleep_range(10, 20);
  250. if (nvdec->config->has_riscv) {
  251. err = nvdec_boot_riscv(nvdec);
  252. if (err < 0)
  253. goto disable;
  254. } else {
  255. err = nvdec_load_falcon_firmware(nvdec);
  256. if (err < 0)
  257. goto disable;
  258. err = nvdec_boot_falcon(nvdec);
  259. if (err < 0)
  260. goto disable;
  261. }
  262. return 0;
  263. disable:
  264. clk_bulk_disable_unprepare(nvdec->num_clks, nvdec->clks);
  265. return err;
  266. }
  267. static __maybe_unused int nvdec_runtime_suspend(struct device *dev)
  268. {
  269. struct nvdec *nvdec = dev_get_drvdata(dev);
  270. host1x_channel_stop(nvdec->channel);
  271. clk_bulk_disable_unprepare(nvdec->num_clks, nvdec->clks);
  272. return 0;
  273. }
  274. static int nvdec_open_channel(struct tegra_drm_client *client,
  275. struct tegra_drm_context *context)
  276. {
  277. struct nvdec *nvdec = to_nvdec(client);
  278. context->channel = host1x_channel_get(nvdec->channel);
  279. if (!context->channel)
  280. return -ENOMEM;
  281. return 0;
  282. }
  283. static void nvdec_close_channel(struct tegra_drm_context *context)
  284. {
  285. host1x_channel_put(context->channel);
  286. }
  287. static int nvdec_can_use_memory_ctx(struct tegra_drm_client *client, bool *supported)
  288. {
  289. *supported = true;
  290. return 0;
  291. }
  292. static const struct tegra_drm_client_ops nvdec_ops = {
  293. .open_channel = nvdec_open_channel,
  294. .close_channel = nvdec_close_channel,
  295. .submit = tegra_drm_submit,
  296. .get_streamid_offset = tegra_drm_get_streamid_offset_thi,
  297. .can_use_memory_ctx = nvdec_can_use_memory_ctx,
  298. };
  299. #define NVIDIA_TEGRA_210_NVDEC_FIRMWARE "nvidia/tegra210/nvdec.bin"
  300. static const struct nvdec_config nvdec_t210_config = {
  301. .firmware = NVIDIA_TEGRA_210_NVDEC_FIRMWARE,
  302. .version = 0x21,
  303. .supports_sid = false,
  304. };
  305. #define NVIDIA_TEGRA_186_NVDEC_FIRMWARE "nvidia/tegra186/nvdec.bin"
  306. static const struct nvdec_config nvdec_t186_config = {
  307. .firmware = NVIDIA_TEGRA_186_NVDEC_FIRMWARE,
  308. .version = 0x18,
  309. .supports_sid = true,
  310. };
  311. #define NVIDIA_TEGRA_194_NVDEC_FIRMWARE "nvidia/tegra194/nvdec.bin"
  312. static const struct nvdec_config nvdec_t194_config = {
  313. .firmware = NVIDIA_TEGRA_194_NVDEC_FIRMWARE,
  314. .version = 0x19,
  315. .supports_sid = true,
  316. };
  317. static const struct nvdec_config nvdec_t234_config = {
  318. .version = 0x23,
  319. .supports_sid = true,
  320. .has_riscv = true,
  321. .has_extra_clocks = true,
  322. };
  323. static const struct of_device_id tegra_nvdec_of_match[] = {
  324. { .compatible = "nvidia,tegra210-nvdec", .data = &nvdec_t210_config },
  325. { .compatible = "nvidia,tegra186-nvdec", .data = &nvdec_t186_config },
  326. { .compatible = "nvidia,tegra194-nvdec", .data = &nvdec_t194_config },
  327. { .compatible = "nvidia,tegra234-nvdec", .data = &nvdec_t234_config },
  328. { },
  329. };
  330. MODULE_DEVICE_TABLE(of, tegra_nvdec_of_match);
  331. static int nvdec_probe(struct platform_device *pdev)
  332. {
  333. struct device *dev = &pdev->dev;
  334. struct host1x_syncpt **syncpts;
  335. struct nvdec *nvdec;
  336. u32 host_class;
  337. int err;
  338. /* inherit DMA mask from host1x parent */
  339. err = dma_coerce_mask_and_coherent(dev, *dev->parent->dma_mask);
  340. if (err < 0) {
  341. dev_err(&pdev->dev, "failed to set DMA mask: %d\n", err);
  342. return err;
  343. }
  344. nvdec = devm_kzalloc(dev, sizeof(*nvdec), GFP_KERNEL);
  345. if (!nvdec)
  346. return -ENOMEM;
  347. nvdec->config = of_device_get_match_data(dev);
  348. syncpts = devm_kzalloc(dev, sizeof(*syncpts), GFP_KERNEL);
  349. if (!syncpts)
  350. return -ENOMEM;
  351. nvdec->regs = devm_platform_get_and_ioremap_resource(pdev, 0, NULL);
  352. if (IS_ERR(nvdec->regs))
  353. return PTR_ERR(nvdec->regs);
  354. nvdec->clks[0].id = "nvdec";
  355. nvdec->num_clks = 1;
  356. if (nvdec->config->has_extra_clocks) {
  357. nvdec->num_clks = 3;
  358. nvdec->clks[1].id = "fuse";
  359. nvdec->clks[2].id = "tsec_pka";
  360. }
  361. err = devm_clk_bulk_get(dev, nvdec->num_clks, nvdec->clks);
  362. if (err) {
  363. dev_err(&pdev->dev, "failed to get clock(s)\n");
  364. return err;
  365. }
  366. err = clk_set_rate(nvdec->clks[0].clk, ULONG_MAX);
  367. if (err < 0) {
  368. dev_err(&pdev->dev, "failed to set clock rate\n");
  369. return err;
  370. }
  371. err = of_property_read_u32(dev->of_node, "nvidia,host1x-class", &host_class);
  372. if (err < 0)
  373. host_class = HOST1X_CLASS_NVDEC;
  374. if (nvdec->config->has_riscv) {
  375. struct tegra_mc *mc;
  376. mc = devm_tegra_memory_controller_get(dev);
  377. if (IS_ERR(mc)) {
  378. dev_err_probe(dev, PTR_ERR(mc),
  379. "failed to get memory controller handle\n");
  380. return PTR_ERR(mc);
  381. }
  382. err = tegra_mc_get_carveout_info(mc, 1, &nvdec->carveout_base, NULL);
  383. if (err) {
  384. dev_err(dev, "failed to get carveout info: %d\n", err);
  385. return err;
  386. }
  387. nvdec->reset = devm_reset_control_get_exclusive_released(dev, "nvdec");
  388. if (IS_ERR(nvdec->reset)) {
  389. dev_err_probe(dev, PTR_ERR(nvdec->reset), "failed to get reset\n");
  390. return PTR_ERR(nvdec->reset);
  391. }
  392. nvdec->riscv.dev = dev;
  393. nvdec->riscv.regs = nvdec->regs;
  394. err = tegra_drm_riscv_read_descriptors(&nvdec->riscv);
  395. if (err < 0)
  396. return err;
  397. } else {
  398. nvdec->falcon.dev = dev;
  399. nvdec->falcon.regs = nvdec->regs;
  400. err = falcon_init(&nvdec->falcon);
  401. if (err < 0)
  402. return err;
  403. }
  404. platform_set_drvdata(pdev, nvdec);
  405. INIT_LIST_HEAD(&nvdec->client.base.list);
  406. nvdec->client.base.ops = &nvdec_client_ops;
  407. nvdec->client.base.dev = dev;
  408. nvdec->client.base.class = host_class;
  409. nvdec->client.base.syncpts = syncpts;
  410. nvdec->client.base.num_syncpts = 1;
  411. nvdec->dev = dev;
  412. INIT_LIST_HEAD(&nvdec->client.list);
  413. nvdec->client.version = nvdec->config->version;
  414. nvdec->client.ops = &nvdec_ops;
  415. err = host1x_client_register(&nvdec->client.base);
  416. if (err < 0) {
  417. dev_err(dev, "failed to register host1x client: %d\n", err);
  418. goto exit_falcon;
  419. }
  420. pm_runtime_enable(dev);
  421. pm_runtime_use_autosuspend(dev);
  422. pm_runtime_set_autosuspend_delay(dev, 500);
  423. return 0;
  424. exit_falcon:
  425. falcon_exit(&nvdec->falcon);
  426. return err;
  427. }
  428. static void nvdec_remove(struct platform_device *pdev)
  429. {
  430. struct nvdec *nvdec = platform_get_drvdata(pdev);
  431. pm_runtime_disable(&pdev->dev);
  432. host1x_client_unregister(&nvdec->client.base);
  433. falcon_exit(&nvdec->falcon);
  434. }
  435. static const struct dev_pm_ops nvdec_pm_ops = {
  436. SET_RUNTIME_PM_OPS(nvdec_runtime_suspend, nvdec_runtime_resume, NULL)
  437. SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
  438. pm_runtime_force_resume)
  439. };
  440. struct platform_driver tegra_nvdec_driver = {
  441. .driver = {
  442. .name = "tegra-nvdec",
  443. .of_match_table = tegra_nvdec_of_match,
  444. .pm = &nvdec_pm_ops
  445. },
  446. .probe = nvdec_probe,
  447. .remove_new = nvdec_remove,
  448. };
  449. #if IS_ENABLED(CONFIG_ARCH_TEGRA_210_SOC)
  450. MODULE_FIRMWARE(NVIDIA_TEGRA_210_NVDEC_FIRMWARE);
  451. #endif
  452. #if IS_ENABLED(CONFIG_ARCH_TEGRA_186_SOC)
  453. MODULE_FIRMWARE(NVIDIA_TEGRA_186_NVDEC_FIRMWARE);
  454. #endif
  455. #if IS_ENABLED(CONFIG_ARCH_TEGRA_194_SOC)
  456. MODULE_FIRMWARE(NVIDIA_TEGRA_194_NVDEC_FIRMWARE);
  457. #endif