ivpu_jsm_msg.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (C) 2020-2024 Intel Corporation
  4. */
  5. #include "ivpu_drv.h"
  6. #include "ivpu_hw.h"
  7. #include "ivpu_ipc.h"
  8. #include "ivpu_jsm_msg.h"
  9. const char *ivpu_jsm_msg_type_to_str(enum vpu_ipc_msg_type type)
  10. {
  11. #define IVPU_CASE_TO_STR(x) case x: return #x
  12. switch (type) {
  13. IVPU_CASE_TO_STR(VPU_JSM_MSG_UNKNOWN);
  14. IVPU_CASE_TO_STR(VPU_JSM_MSG_ENGINE_RESET);
  15. IVPU_CASE_TO_STR(VPU_JSM_MSG_ENGINE_PREEMPT);
  16. IVPU_CASE_TO_STR(VPU_JSM_MSG_REGISTER_DB);
  17. IVPU_CASE_TO_STR(VPU_JSM_MSG_UNREGISTER_DB);
  18. IVPU_CASE_TO_STR(VPU_JSM_MSG_QUERY_ENGINE_HB);
  19. IVPU_CASE_TO_STR(VPU_JSM_MSG_GET_POWER_LEVEL_COUNT);
  20. IVPU_CASE_TO_STR(VPU_JSM_MSG_GET_POWER_LEVEL);
  21. IVPU_CASE_TO_STR(VPU_JSM_MSG_SET_POWER_LEVEL);
  22. IVPU_CASE_TO_STR(VPU_JSM_MSG_METRIC_STREAMER_OPEN);
  23. IVPU_CASE_TO_STR(VPU_JSM_MSG_METRIC_STREAMER_CLOSE);
  24. IVPU_CASE_TO_STR(VPU_JSM_MSG_TRACE_SET_CONFIG);
  25. IVPU_CASE_TO_STR(VPU_JSM_MSG_TRACE_GET_CONFIG);
  26. IVPU_CASE_TO_STR(VPU_JSM_MSG_TRACE_GET_CAPABILITY);
  27. IVPU_CASE_TO_STR(VPU_JSM_MSG_TRACE_GET_NAME);
  28. IVPU_CASE_TO_STR(VPU_JSM_MSG_SSID_RELEASE);
  29. IVPU_CASE_TO_STR(VPU_JSM_MSG_METRIC_STREAMER_START);
  30. IVPU_CASE_TO_STR(VPU_JSM_MSG_METRIC_STREAMER_STOP);
  31. IVPU_CASE_TO_STR(VPU_JSM_MSG_METRIC_STREAMER_UPDATE);
  32. IVPU_CASE_TO_STR(VPU_JSM_MSG_METRIC_STREAMER_INFO);
  33. IVPU_CASE_TO_STR(VPU_JSM_MSG_SET_PRIORITY_BAND_SETUP);
  34. IVPU_CASE_TO_STR(VPU_JSM_MSG_CREATE_CMD_QUEUE);
  35. IVPU_CASE_TO_STR(VPU_JSM_MSG_DESTROY_CMD_QUEUE);
  36. IVPU_CASE_TO_STR(VPU_JSM_MSG_SET_CONTEXT_SCHED_PROPERTIES);
  37. IVPU_CASE_TO_STR(VPU_JSM_MSG_HWS_REGISTER_DB);
  38. IVPU_CASE_TO_STR(VPU_JSM_MSG_HWS_RESUME_CMDQ);
  39. IVPU_CASE_TO_STR(VPU_JSM_MSG_HWS_SUSPEND_CMDQ);
  40. IVPU_CASE_TO_STR(VPU_JSM_MSG_HWS_RESUME_CMDQ_RSP);
  41. IVPU_CASE_TO_STR(VPU_JSM_MSG_HWS_SUSPEND_CMDQ_DONE);
  42. IVPU_CASE_TO_STR(VPU_JSM_MSG_HWS_SET_SCHEDULING_LOG);
  43. IVPU_CASE_TO_STR(VPU_JSM_MSG_HWS_SET_SCHEDULING_LOG_RSP);
  44. IVPU_CASE_TO_STR(VPU_JSM_MSG_HWS_SCHEDULING_LOG_NOTIFICATION);
  45. IVPU_CASE_TO_STR(VPU_JSM_MSG_HWS_ENGINE_RESUME);
  46. IVPU_CASE_TO_STR(VPU_JSM_MSG_HWS_RESUME_ENGINE_DONE);
  47. IVPU_CASE_TO_STR(VPU_JSM_MSG_STATE_DUMP);
  48. IVPU_CASE_TO_STR(VPU_JSM_MSG_STATE_DUMP_RSP);
  49. IVPU_CASE_TO_STR(VPU_JSM_MSG_BLOB_DEINIT);
  50. IVPU_CASE_TO_STR(VPU_JSM_MSG_DYNDBG_CONTROL);
  51. IVPU_CASE_TO_STR(VPU_JSM_MSG_JOB_DONE);
  52. IVPU_CASE_TO_STR(VPU_JSM_MSG_ENGINE_RESET_DONE);
  53. IVPU_CASE_TO_STR(VPU_JSM_MSG_ENGINE_PREEMPT_DONE);
  54. IVPU_CASE_TO_STR(VPU_JSM_MSG_REGISTER_DB_DONE);
  55. IVPU_CASE_TO_STR(VPU_JSM_MSG_UNREGISTER_DB_DONE);
  56. IVPU_CASE_TO_STR(VPU_JSM_MSG_QUERY_ENGINE_HB_DONE);
  57. IVPU_CASE_TO_STR(VPU_JSM_MSG_GET_POWER_LEVEL_COUNT_DONE);
  58. IVPU_CASE_TO_STR(VPU_JSM_MSG_GET_POWER_LEVEL_DONE);
  59. IVPU_CASE_TO_STR(VPU_JSM_MSG_SET_POWER_LEVEL_DONE);
  60. IVPU_CASE_TO_STR(VPU_JSM_MSG_METRIC_STREAMER_OPEN_DONE);
  61. IVPU_CASE_TO_STR(VPU_JSM_MSG_METRIC_STREAMER_CLOSE_DONE);
  62. IVPU_CASE_TO_STR(VPU_JSM_MSG_TRACE_SET_CONFIG_RSP);
  63. IVPU_CASE_TO_STR(VPU_JSM_MSG_TRACE_GET_CONFIG_RSP);
  64. IVPU_CASE_TO_STR(VPU_JSM_MSG_TRACE_GET_CAPABILITY_RSP);
  65. IVPU_CASE_TO_STR(VPU_JSM_MSG_TRACE_GET_NAME_RSP);
  66. IVPU_CASE_TO_STR(VPU_JSM_MSG_SSID_RELEASE_DONE);
  67. IVPU_CASE_TO_STR(VPU_JSM_MSG_METRIC_STREAMER_START_DONE);
  68. IVPU_CASE_TO_STR(VPU_JSM_MSG_METRIC_STREAMER_STOP_DONE);
  69. IVPU_CASE_TO_STR(VPU_JSM_MSG_METRIC_STREAMER_UPDATE_DONE);
  70. IVPU_CASE_TO_STR(VPU_JSM_MSG_METRIC_STREAMER_INFO_DONE);
  71. IVPU_CASE_TO_STR(VPU_JSM_MSG_METRIC_STREAMER_NOTIFICATION);
  72. IVPU_CASE_TO_STR(VPU_JSM_MSG_SET_PRIORITY_BAND_SETUP_RSP);
  73. IVPU_CASE_TO_STR(VPU_JSM_MSG_CREATE_CMD_QUEUE_RSP);
  74. IVPU_CASE_TO_STR(VPU_JSM_MSG_DESTROY_CMD_QUEUE_RSP);
  75. IVPU_CASE_TO_STR(VPU_JSM_MSG_SET_CONTEXT_SCHED_PROPERTIES_RSP);
  76. IVPU_CASE_TO_STR(VPU_JSM_MSG_BLOB_DEINIT_DONE);
  77. IVPU_CASE_TO_STR(VPU_JSM_MSG_DYNDBG_CONTROL_RSP);
  78. IVPU_CASE_TO_STR(VPU_JSM_MSG_PWR_D0I3_ENTER);
  79. IVPU_CASE_TO_STR(VPU_JSM_MSG_PWR_D0I3_ENTER_DONE);
  80. IVPU_CASE_TO_STR(VPU_JSM_MSG_DCT_ENABLE);
  81. IVPU_CASE_TO_STR(VPU_JSM_MSG_DCT_ENABLE_DONE);
  82. IVPU_CASE_TO_STR(VPU_JSM_MSG_DCT_DISABLE);
  83. IVPU_CASE_TO_STR(VPU_JSM_MSG_DCT_DISABLE_DONE);
  84. }
  85. #undef IVPU_CASE_TO_STR
  86. return "Unknown JSM message type";
  87. }
  88. int ivpu_jsm_register_db(struct ivpu_device *vdev, u32 ctx_id, u32 db_id,
  89. u64 jobq_base, u32 jobq_size)
  90. {
  91. struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_REGISTER_DB };
  92. struct vpu_jsm_msg resp;
  93. int ret = 0;
  94. req.payload.register_db.db_idx = db_id;
  95. req.payload.register_db.jobq_base = jobq_base;
  96. req.payload.register_db.jobq_size = jobq_size;
  97. req.payload.register_db.host_ssid = ctx_id;
  98. ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_REGISTER_DB_DONE, &resp,
  99. VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
  100. if (ret)
  101. ivpu_err_ratelimited(vdev, "Failed to register doorbell %u: %d\n", db_id, ret);
  102. return ret;
  103. }
  104. int ivpu_jsm_unregister_db(struct ivpu_device *vdev, u32 db_id)
  105. {
  106. struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_UNREGISTER_DB };
  107. struct vpu_jsm_msg resp;
  108. int ret = 0;
  109. req.payload.unregister_db.db_idx = db_id;
  110. ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_UNREGISTER_DB_DONE, &resp,
  111. VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
  112. if (ret)
  113. ivpu_warn_ratelimited(vdev, "Failed to unregister doorbell %u: %d\n", db_id, ret);
  114. return ret;
  115. }
  116. int ivpu_jsm_get_heartbeat(struct ivpu_device *vdev, u32 engine, u64 *heartbeat)
  117. {
  118. struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_QUERY_ENGINE_HB };
  119. struct vpu_jsm_msg resp;
  120. int ret;
  121. if (engine > VPU_ENGINE_COPY)
  122. return -EINVAL;
  123. req.payload.query_engine_hb.engine_idx = engine;
  124. ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_QUERY_ENGINE_HB_DONE, &resp,
  125. VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
  126. if (ret) {
  127. ivpu_err_ratelimited(vdev, "Failed to get heartbeat from engine %d: %d\n",
  128. engine, ret);
  129. return ret;
  130. }
  131. *heartbeat = resp.payload.query_engine_hb_done.heartbeat;
  132. return ret;
  133. }
  134. int ivpu_jsm_reset_engine(struct ivpu_device *vdev, u32 engine)
  135. {
  136. struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_ENGINE_RESET };
  137. struct vpu_jsm_msg resp;
  138. int ret;
  139. if (engine > VPU_ENGINE_COPY)
  140. return -EINVAL;
  141. req.payload.engine_reset.engine_idx = engine;
  142. ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_ENGINE_RESET_DONE, &resp,
  143. VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
  144. if (ret)
  145. ivpu_err_ratelimited(vdev, "Failed to reset engine %d: %d\n", engine, ret);
  146. return ret;
  147. }
  148. int ivpu_jsm_preempt_engine(struct ivpu_device *vdev, u32 engine, u32 preempt_id)
  149. {
  150. struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_ENGINE_PREEMPT };
  151. struct vpu_jsm_msg resp;
  152. int ret;
  153. if (engine > VPU_ENGINE_COPY)
  154. return -EINVAL;
  155. req.payload.engine_preempt.engine_idx = engine;
  156. req.payload.engine_preempt.preempt_id = preempt_id;
  157. ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_ENGINE_PREEMPT_DONE, &resp,
  158. VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
  159. if (ret)
  160. ivpu_err_ratelimited(vdev, "Failed to preempt engine %d: %d\n", engine, ret);
  161. return ret;
  162. }
  163. int ivpu_jsm_dyndbg_control(struct ivpu_device *vdev, char *command, size_t size)
  164. {
  165. struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_DYNDBG_CONTROL };
  166. struct vpu_jsm_msg resp;
  167. int ret;
  168. strscpy(req.payload.dyndbg_control.dyndbg_cmd, command, VPU_DYNDBG_CMD_MAX_LEN);
  169. ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_DYNDBG_CONTROL_RSP, &resp,
  170. VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
  171. if (ret)
  172. ivpu_warn_ratelimited(vdev, "Failed to send command \"%s\": ret %d\n",
  173. command, ret);
  174. return ret;
  175. }
  176. int ivpu_jsm_trace_get_capability(struct ivpu_device *vdev, u32 *trace_destination_mask,
  177. u64 *trace_hw_component_mask)
  178. {
  179. struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_TRACE_GET_CAPABILITY };
  180. struct vpu_jsm_msg resp;
  181. int ret;
  182. ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_TRACE_GET_CAPABILITY_RSP, &resp,
  183. VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
  184. if (ret) {
  185. ivpu_warn_ratelimited(vdev, "Failed to get trace capability: %d\n", ret);
  186. return ret;
  187. }
  188. *trace_destination_mask = resp.payload.trace_capability.trace_destination_mask;
  189. *trace_hw_component_mask = resp.payload.trace_capability.trace_hw_component_mask;
  190. return ret;
  191. }
  192. int ivpu_jsm_trace_set_config(struct ivpu_device *vdev, u32 trace_level, u32 trace_destination_mask,
  193. u64 trace_hw_component_mask)
  194. {
  195. struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_TRACE_SET_CONFIG };
  196. struct vpu_jsm_msg resp;
  197. int ret;
  198. req.payload.trace_config.trace_level = trace_level;
  199. req.payload.trace_config.trace_destination_mask = trace_destination_mask;
  200. req.payload.trace_config.trace_hw_component_mask = trace_hw_component_mask;
  201. ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_TRACE_SET_CONFIG_RSP, &resp,
  202. VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
  203. if (ret)
  204. ivpu_warn_ratelimited(vdev, "Failed to set config: %d\n", ret);
  205. return ret;
  206. }
  207. int ivpu_jsm_context_release(struct ivpu_device *vdev, u32 host_ssid)
  208. {
  209. struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_SSID_RELEASE };
  210. struct vpu_jsm_msg resp;
  211. int ret;
  212. req.payload.ssid_release.host_ssid = host_ssid;
  213. ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_SSID_RELEASE_DONE, &resp,
  214. VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
  215. if (ret)
  216. ivpu_warn_ratelimited(vdev, "Failed to release context: %d\n", ret);
  217. return ret;
  218. }
  219. int ivpu_jsm_pwr_d0i3_enter(struct ivpu_device *vdev)
  220. {
  221. struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_PWR_D0I3_ENTER };
  222. struct vpu_jsm_msg resp;
  223. int ret;
  224. if (IVPU_WA(disable_d0i3_msg))
  225. return 0;
  226. req.payload.pwr_d0i3_enter.send_response = 1;
  227. ret = ivpu_ipc_send_receive_internal(vdev, &req, VPU_JSM_MSG_PWR_D0I3_ENTER_DONE, &resp,
  228. VPU_IPC_CHAN_GEN_CMD, vdev->timeout.d0i3_entry_msg);
  229. if (ret)
  230. return ret;
  231. return ivpu_hw_wait_for_idle(vdev);
  232. }
  233. int ivpu_jsm_hws_create_cmdq(struct ivpu_device *vdev, u32 ctx_id, u32 cmdq_group, u32 cmdq_id,
  234. u32 pid, u32 engine, u64 cmdq_base, u32 cmdq_size)
  235. {
  236. struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_CREATE_CMD_QUEUE };
  237. struct vpu_jsm_msg resp;
  238. int ret;
  239. req.payload.hws_create_cmdq.host_ssid = ctx_id;
  240. req.payload.hws_create_cmdq.process_id = pid;
  241. req.payload.hws_create_cmdq.engine_idx = engine;
  242. req.payload.hws_create_cmdq.cmdq_group = cmdq_group;
  243. req.payload.hws_create_cmdq.cmdq_id = cmdq_id;
  244. req.payload.hws_create_cmdq.cmdq_base = cmdq_base;
  245. req.payload.hws_create_cmdq.cmdq_size = cmdq_size;
  246. ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_CREATE_CMD_QUEUE_RSP, &resp,
  247. VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
  248. if (ret)
  249. ivpu_warn_ratelimited(vdev, "Failed to create command queue: %d\n", ret);
  250. return ret;
  251. }
  252. int ivpu_jsm_hws_destroy_cmdq(struct ivpu_device *vdev, u32 ctx_id, u32 cmdq_id)
  253. {
  254. struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_DESTROY_CMD_QUEUE };
  255. struct vpu_jsm_msg resp;
  256. int ret;
  257. req.payload.hws_destroy_cmdq.host_ssid = ctx_id;
  258. req.payload.hws_destroy_cmdq.cmdq_id = cmdq_id;
  259. ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_DESTROY_CMD_QUEUE_RSP, &resp,
  260. VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
  261. if (ret)
  262. ivpu_warn_ratelimited(vdev, "Failed to destroy command queue: %d\n", ret);
  263. return ret;
  264. }
  265. int ivpu_jsm_hws_register_db(struct ivpu_device *vdev, u32 ctx_id, u32 cmdq_id, u32 db_id,
  266. u64 cmdq_base, u32 cmdq_size)
  267. {
  268. struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_HWS_REGISTER_DB };
  269. struct vpu_jsm_msg resp;
  270. int ret = 0;
  271. req.payload.hws_register_db.db_id = db_id;
  272. req.payload.hws_register_db.host_ssid = ctx_id;
  273. req.payload.hws_register_db.cmdq_id = cmdq_id;
  274. req.payload.hws_register_db.cmdq_base = cmdq_base;
  275. req.payload.hws_register_db.cmdq_size = cmdq_size;
  276. ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_REGISTER_DB_DONE, &resp,
  277. VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
  278. if (ret)
  279. ivpu_err_ratelimited(vdev, "Failed to register doorbell %u: %d\n", db_id, ret);
  280. return ret;
  281. }
  282. int ivpu_jsm_hws_resume_engine(struct ivpu_device *vdev, u32 engine)
  283. {
  284. struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_HWS_ENGINE_RESUME };
  285. struct vpu_jsm_msg resp;
  286. int ret;
  287. if (engine >= VPU_ENGINE_NB)
  288. return -EINVAL;
  289. req.payload.hws_resume_engine.engine_idx = engine;
  290. ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_HWS_RESUME_ENGINE_DONE, &resp,
  291. VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
  292. if (ret)
  293. ivpu_err_ratelimited(vdev, "Failed to resume engine %d: %d\n", engine, ret);
  294. return ret;
  295. }
  296. int ivpu_jsm_hws_set_context_sched_properties(struct ivpu_device *vdev, u32 ctx_id, u32 cmdq_id,
  297. u32 priority)
  298. {
  299. struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_SET_CONTEXT_SCHED_PROPERTIES };
  300. struct vpu_jsm_msg resp;
  301. int ret;
  302. req.payload.hws_set_context_sched_properties.host_ssid = ctx_id;
  303. req.payload.hws_set_context_sched_properties.cmdq_id = cmdq_id;
  304. req.payload.hws_set_context_sched_properties.priority_band = priority;
  305. req.payload.hws_set_context_sched_properties.realtime_priority_level = 0;
  306. req.payload.hws_set_context_sched_properties.in_process_priority = 0;
  307. req.payload.hws_set_context_sched_properties.context_quantum = 20000;
  308. req.payload.hws_set_context_sched_properties.grace_period_same_priority = 10000;
  309. req.payload.hws_set_context_sched_properties.grace_period_lower_priority = 0;
  310. ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_SET_CONTEXT_SCHED_PROPERTIES_RSP, &resp,
  311. VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
  312. if (ret)
  313. ivpu_warn_ratelimited(vdev, "Failed to set context sched properties: %d\n", ret);
  314. return ret;
  315. }
  316. int ivpu_jsm_hws_set_scheduling_log(struct ivpu_device *vdev, u32 engine_idx, u32 host_ssid,
  317. u64 vpu_log_buffer_va)
  318. {
  319. struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_HWS_SET_SCHEDULING_LOG };
  320. struct vpu_jsm_msg resp;
  321. int ret;
  322. req.payload.hws_set_scheduling_log.engine_idx = engine_idx;
  323. req.payload.hws_set_scheduling_log.host_ssid = host_ssid;
  324. req.payload.hws_set_scheduling_log.vpu_log_buffer_va = vpu_log_buffer_va;
  325. req.payload.hws_set_scheduling_log.notify_index = 0;
  326. req.payload.hws_set_scheduling_log.enable_extra_events =
  327. ivpu_test_mode & IVPU_TEST_MODE_HWS_EXTRA_EVENTS;
  328. ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_HWS_SET_SCHEDULING_LOG_RSP, &resp,
  329. VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
  330. if (ret)
  331. ivpu_warn_ratelimited(vdev, "Failed to set scheduling log: %d\n", ret);
  332. return ret;
  333. }
  334. int ivpu_jsm_hws_setup_priority_bands(struct ivpu_device *vdev)
  335. {
  336. struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_SET_PRIORITY_BAND_SETUP };
  337. struct vpu_jsm_msg resp;
  338. int ret;
  339. /* Idle */
  340. req.payload.hws_priority_band_setup.grace_period[0] = 0;
  341. req.payload.hws_priority_band_setup.process_grace_period[0] = 50000;
  342. req.payload.hws_priority_band_setup.process_quantum[0] = 160000;
  343. /* Normal */
  344. req.payload.hws_priority_band_setup.grace_period[1] = 50000;
  345. req.payload.hws_priority_band_setup.process_grace_period[1] = 50000;
  346. req.payload.hws_priority_band_setup.process_quantum[1] = 300000;
  347. /* Focus */
  348. req.payload.hws_priority_band_setup.grace_period[2] = 50000;
  349. req.payload.hws_priority_band_setup.process_grace_period[2] = 50000;
  350. req.payload.hws_priority_band_setup.process_quantum[2] = 200000;
  351. /* Realtime */
  352. req.payload.hws_priority_band_setup.grace_period[3] = 0;
  353. req.payload.hws_priority_band_setup.process_grace_period[3] = 50000;
  354. req.payload.hws_priority_band_setup.process_quantum[3] = 200000;
  355. req.payload.hws_priority_band_setup.normal_band_percentage = 10;
  356. ret = ivpu_ipc_send_receive_internal(vdev, &req, VPU_JSM_MSG_SET_PRIORITY_BAND_SETUP_RSP,
  357. &resp, VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
  358. if (ret)
  359. ivpu_warn_ratelimited(vdev, "Failed to set priority bands: %d\n", ret);
  360. return ret;
  361. }
  362. int ivpu_jsm_metric_streamer_start(struct ivpu_device *vdev, u64 metric_group_mask,
  363. u64 sampling_rate, u64 buffer_addr, u64 buffer_size)
  364. {
  365. struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_METRIC_STREAMER_START };
  366. struct vpu_jsm_msg resp;
  367. int ret;
  368. req.payload.metric_streamer_start.metric_group_mask = metric_group_mask;
  369. req.payload.metric_streamer_start.sampling_rate = sampling_rate;
  370. req.payload.metric_streamer_start.buffer_addr = buffer_addr;
  371. req.payload.metric_streamer_start.buffer_size = buffer_size;
  372. ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_METRIC_STREAMER_START_DONE, &resp,
  373. VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
  374. if (ret) {
  375. ivpu_warn_ratelimited(vdev, "Failed to start metric streamer: ret %d\n", ret);
  376. return ret;
  377. }
  378. return ret;
  379. }
  380. int ivpu_jsm_metric_streamer_stop(struct ivpu_device *vdev, u64 metric_group_mask)
  381. {
  382. struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_METRIC_STREAMER_STOP };
  383. struct vpu_jsm_msg resp;
  384. int ret;
  385. req.payload.metric_streamer_stop.metric_group_mask = metric_group_mask;
  386. ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_METRIC_STREAMER_STOP_DONE, &resp,
  387. VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
  388. if (ret)
  389. ivpu_warn_ratelimited(vdev, "Failed to stop metric streamer: ret %d\n", ret);
  390. return ret;
  391. }
  392. int ivpu_jsm_metric_streamer_update(struct ivpu_device *vdev, u64 metric_group_mask,
  393. u64 buffer_addr, u64 buffer_size, u64 *bytes_written)
  394. {
  395. struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_METRIC_STREAMER_UPDATE };
  396. struct vpu_jsm_msg resp;
  397. int ret;
  398. req.payload.metric_streamer_update.metric_group_mask = metric_group_mask;
  399. req.payload.metric_streamer_update.buffer_addr = buffer_addr;
  400. req.payload.metric_streamer_update.buffer_size = buffer_size;
  401. ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_METRIC_STREAMER_UPDATE_DONE, &resp,
  402. VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
  403. if (ret) {
  404. ivpu_warn_ratelimited(vdev, "Failed to update metric streamer: ret %d\n", ret);
  405. return ret;
  406. }
  407. if (buffer_size && resp.payload.metric_streamer_done.bytes_written > buffer_size) {
  408. ivpu_warn_ratelimited(vdev, "MS buffer overflow: bytes_written %#llx > buffer_size %#llx\n",
  409. resp.payload.metric_streamer_done.bytes_written, buffer_size);
  410. return -EOVERFLOW;
  411. }
  412. *bytes_written = resp.payload.metric_streamer_done.bytes_written;
  413. return ret;
  414. }
  415. int ivpu_jsm_metric_streamer_info(struct ivpu_device *vdev, u64 metric_group_mask, u64 buffer_addr,
  416. u64 buffer_size, u32 *sample_size, u64 *info_size)
  417. {
  418. struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_METRIC_STREAMER_INFO };
  419. struct vpu_jsm_msg resp;
  420. int ret;
  421. req.payload.metric_streamer_start.metric_group_mask = metric_group_mask;
  422. req.payload.metric_streamer_start.buffer_addr = buffer_addr;
  423. req.payload.metric_streamer_start.buffer_size = buffer_size;
  424. ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_METRIC_STREAMER_INFO_DONE, &resp,
  425. VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
  426. if (ret) {
  427. ivpu_warn_ratelimited(vdev, "Failed to get metric streamer info: ret %d\n", ret);
  428. return ret;
  429. }
  430. if (!resp.payload.metric_streamer_done.sample_size) {
  431. ivpu_warn_ratelimited(vdev, "Invalid sample size\n");
  432. return -EBADMSG;
  433. }
  434. if (sample_size)
  435. *sample_size = resp.payload.metric_streamer_done.sample_size;
  436. if (info_size)
  437. *info_size = resp.payload.metric_streamer_done.bytes_written;
  438. return ret;
  439. }
  440. int ivpu_jsm_dct_enable(struct ivpu_device *vdev, u32 active_us, u32 inactive_us)
  441. {
  442. struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_DCT_ENABLE };
  443. struct vpu_jsm_msg resp;
  444. req.payload.pwr_dct_control.dct_active_us = active_us;
  445. req.payload.pwr_dct_control.dct_inactive_us = inactive_us;
  446. return ivpu_ipc_send_receive_internal(vdev, &req, VPU_JSM_MSG_DCT_ENABLE_DONE, &resp,
  447. VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
  448. }
  449. int ivpu_jsm_dct_disable(struct ivpu_device *vdev)
  450. {
  451. struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_DCT_DISABLE };
  452. struct vpu_jsm_msg resp;
  453. return ivpu_ipc_send_receive_internal(vdev, &req, VPU_JSM_MSG_DCT_DISABLE_DONE, &resp,
  454. VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
  455. }