mtk-cmdq-helper.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564
  1. // SPDX-License-Identifier: GPL-2.0
  2. //
  3. // Copyright (c) 2018 MediaTek Inc.
  4. #include <linux/completion.h>
  5. #include <linux/errno.h>
  6. #include <linux/dma-mapping.h>
  7. #include <linux/module.h>
  8. #include <linux/mailbox_controller.h>
  9. #include <linux/of.h>
  10. #include <linux/soc/mediatek/mtk-cmdq.h>
  11. #define CMDQ_WRITE_ENABLE_MASK BIT(0)
  12. #define CMDQ_POLL_ENABLE_MASK BIT(0)
  13. /* dedicate the last GPR_R15 to assign the register address to be poll */
  14. #define CMDQ_POLL_ADDR_GPR (15)
  15. #define CMDQ_EOC_IRQ_EN BIT(0)
  16. #define CMDQ_IMMEDIATE_VALUE 0
  17. #define CMDQ_REG_TYPE 1
  18. #define CMDQ_JUMP_RELATIVE 0
  19. #define CMDQ_JUMP_ABSOLUTE 1
  20. struct cmdq_instruction {
  21. union {
  22. u32 value;
  23. u32 mask;
  24. struct {
  25. u16 arg_c;
  26. u16 src_reg;
  27. };
  28. };
  29. union {
  30. u16 offset;
  31. u16 event;
  32. u16 reg_dst;
  33. };
  34. union {
  35. u8 subsys;
  36. struct {
  37. u8 sop:5;
  38. u8 arg_c_t:1;
  39. u8 src_t:1;
  40. u8 dst_t:1;
  41. };
  42. };
  43. u8 op;
  44. };
  45. static inline u8 cmdq_operand_get_type(struct cmdq_operand *op)
  46. {
  47. return op->reg ? CMDQ_REG_TYPE : CMDQ_IMMEDIATE_VALUE;
  48. }
  49. static inline u16 cmdq_operand_get_idx_value(struct cmdq_operand *op)
  50. {
  51. return op->reg ? op->idx : op->value;
  52. }
  53. int cmdq_dev_get_client_reg(struct device *dev,
  54. struct cmdq_client_reg *client_reg, int idx)
  55. {
  56. struct of_phandle_args spec;
  57. int err;
  58. if (!client_reg)
  59. return -ENOENT;
  60. err = of_parse_phandle_with_fixed_args(dev->of_node,
  61. "mediatek,gce-client-reg",
  62. 3, idx, &spec);
  63. if (err < 0) {
  64. dev_warn(dev,
  65. "error %d can't parse gce-client-reg property (%d)",
  66. err, idx);
  67. return err;
  68. }
  69. client_reg->subsys = (u8)spec.args[0];
  70. client_reg->offset = (u16)spec.args[1];
  71. client_reg->size = (u16)spec.args[2];
  72. of_node_put(spec.np);
  73. return 0;
  74. }
  75. EXPORT_SYMBOL(cmdq_dev_get_client_reg);
  76. struct cmdq_client *cmdq_mbox_create(struct device *dev, int index)
  77. {
  78. struct cmdq_client *client;
  79. client = kzalloc(sizeof(*client), GFP_KERNEL);
  80. if (!client)
  81. return (struct cmdq_client *)-ENOMEM;
  82. client->client.dev = dev;
  83. client->client.tx_block = false;
  84. client->client.knows_txdone = true;
  85. client->chan = mbox_request_channel(&client->client, index);
  86. if (IS_ERR(client->chan)) {
  87. long err;
  88. dev_err(dev, "failed to request channel\n");
  89. err = PTR_ERR(client->chan);
  90. kfree(client);
  91. return ERR_PTR(err);
  92. }
  93. return client;
  94. }
  95. EXPORT_SYMBOL(cmdq_mbox_create);
  96. void cmdq_mbox_destroy(struct cmdq_client *client)
  97. {
  98. mbox_free_channel(client->chan);
  99. kfree(client);
  100. }
  101. EXPORT_SYMBOL(cmdq_mbox_destroy);
  102. int cmdq_pkt_create(struct cmdq_client *client, struct cmdq_pkt *pkt, size_t size)
  103. {
  104. struct device *dev;
  105. dma_addr_t dma_addr;
  106. pkt->va_base = kzalloc(size, GFP_KERNEL);
  107. if (!pkt->va_base)
  108. return -ENOMEM;
  109. pkt->buf_size = size;
  110. dev = client->chan->mbox->dev;
  111. dma_addr = dma_map_single(dev, pkt->va_base, pkt->buf_size,
  112. DMA_TO_DEVICE);
  113. if (dma_mapping_error(dev, dma_addr)) {
  114. dev_err(dev, "dma map failed, size=%u\n", (u32)(u64)size);
  115. kfree(pkt->va_base);
  116. return -ENOMEM;
  117. }
  118. pkt->pa_base = dma_addr;
  119. return 0;
  120. }
  121. EXPORT_SYMBOL(cmdq_pkt_create);
  122. void cmdq_pkt_destroy(struct cmdq_client *client, struct cmdq_pkt *pkt)
  123. {
  124. dma_unmap_single(client->chan->mbox->dev, pkt->pa_base, pkt->buf_size,
  125. DMA_TO_DEVICE);
  126. kfree(pkt->va_base);
  127. }
  128. EXPORT_SYMBOL(cmdq_pkt_destroy);
  129. static int cmdq_pkt_append_command(struct cmdq_pkt *pkt,
  130. struct cmdq_instruction inst)
  131. {
  132. struct cmdq_instruction *cmd_ptr;
  133. if (unlikely(pkt->cmd_buf_size + CMDQ_INST_SIZE > pkt->buf_size)) {
  134. /*
  135. * In the case of allocated buffer size (pkt->buf_size) is used
  136. * up, the real required size (pkt->cmdq_buf_size) is still
  137. * increased, so that the user knows how much memory should be
  138. * ultimately allocated after appending all commands and
  139. * flushing the command packet. Therefor, the user can call
  140. * cmdq_pkt_create() again with the real required buffer size.
  141. */
  142. pkt->cmd_buf_size += CMDQ_INST_SIZE;
  143. WARN_ONCE(1, "%s: buffer size %u is too small !\n",
  144. __func__, (u32)pkt->buf_size);
  145. return -ENOMEM;
  146. }
  147. cmd_ptr = pkt->va_base + pkt->cmd_buf_size;
  148. *cmd_ptr = inst;
  149. pkt->cmd_buf_size += CMDQ_INST_SIZE;
  150. return 0;
  151. }
  152. int cmdq_pkt_write(struct cmdq_pkt *pkt, u8 subsys, u16 offset, u32 value)
  153. {
  154. struct cmdq_instruction inst;
  155. inst.op = CMDQ_CODE_WRITE;
  156. inst.value = value;
  157. inst.offset = offset;
  158. inst.subsys = subsys;
  159. return cmdq_pkt_append_command(pkt, inst);
  160. }
  161. EXPORT_SYMBOL(cmdq_pkt_write);
  162. int cmdq_pkt_write_mask(struct cmdq_pkt *pkt, u8 subsys,
  163. u16 offset, u32 value, u32 mask)
  164. {
  165. struct cmdq_instruction inst = { {0} };
  166. u16 offset_mask = offset;
  167. int err;
  168. if (mask != 0xffffffff) {
  169. inst.op = CMDQ_CODE_MASK;
  170. inst.mask = ~mask;
  171. err = cmdq_pkt_append_command(pkt, inst);
  172. if (err < 0)
  173. return err;
  174. offset_mask |= CMDQ_WRITE_ENABLE_MASK;
  175. }
  176. err = cmdq_pkt_write(pkt, subsys, offset_mask, value);
  177. return err;
  178. }
  179. EXPORT_SYMBOL(cmdq_pkt_write_mask);
  180. int cmdq_pkt_read_s(struct cmdq_pkt *pkt, u16 high_addr_reg_idx, u16 addr_low,
  181. u16 reg_idx)
  182. {
  183. struct cmdq_instruction inst = {};
  184. inst.op = CMDQ_CODE_READ_S;
  185. inst.dst_t = CMDQ_REG_TYPE;
  186. inst.sop = high_addr_reg_idx;
  187. inst.reg_dst = reg_idx;
  188. inst.src_reg = addr_low;
  189. return cmdq_pkt_append_command(pkt, inst);
  190. }
  191. EXPORT_SYMBOL(cmdq_pkt_read_s);
  192. int cmdq_pkt_write_s(struct cmdq_pkt *pkt, u16 high_addr_reg_idx,
  193. u16 addr_low, u16 src_reg_idx)
  194. {
  195. struct cmdq_instruction inst = {};
  196. inst.op = CMDQ_CODE_WRITE_S;
  197. inst.src_t = CMDQ_REG_TYPE;
  198. inst.sop = high_addr_reg_idx;
  199. inst.offset = addr_low;
  200. inst.src_reg = src_reg_idx;
  201. return cmdq_pkt_append_command(pkt, inst);
  202. }
  203. EXPORT_SYMBOL(cmdq_pkt_write_s);
  204. int cmdq_pkt_write_s_mask(struct cmdq_pkt *pkt, u16 high_addr_reg_idx,
  205. u16 addr_low, u16 src_reg_idx, u32 mask)
  206. {
  207. struct cmdq_instruction inst = {};
  208. int err;
  209. inst.op = CMDQ_CODE_MASK;
  210. inst.mask = ~mask;
  211. err = cmdq_pkt_append_command(pkt, inst);
  212. if (err < 0)
  213. return err;
  214. inst.mask = 0;
  215. inst.op = CMDQ_CODE_WRITE_S_MASK;
  216. inst.src_t = CMDQ_REG_TYPE;
  217. inst.sop = high_addr_reg_idx;
  218. inst.offset = addr_low;
  219. inst.src_reg = src_reg_idx;
  220. return cmdq_pkt_append_command(pkt, inst);
  221. }
  222. EXPORT_SYMBOL(cmdq_pkt_write_s_mask);
  223. int cmdq_pkt_write_s_value(struct cmdq_pkt *pkt, u8 high_addr_reg_idx,
  224. u16 addr_low, u32 value)
  225. {
  226. struct cmdq_instruction inst = {};
  227. inst.op = CMDQ_CODE_WRITE_S;
  228. inst.sop = high_addr_reg_idx;
  229. inst.offset = addr_low;
  230. inst.value = value;
  231. return cmdq_pkt_append_command(pkt, inst);
  232. }
  233. EXPORT_SYMBOL(cmdq_pkt_write_s_value);
  234. int cmdq_pkt_write_s_mask_value(struct cmdq_pkt *pkt, u8 high_addr_reg_idx,
  235. u16 addr_low, u32 value, u32 mask)
  236. {
  237. struct cmdq_instruction inst = {};
  238. int err;
  239. inst.op = CMDQ_CODE_MASK;
  240. inst.mask = ~mask;
  241. err = cmdq_pkt_append_command(pkt, inst);
  242. if (err < 0)
  243. return err;
  244. inst.op = CMDQ_CODE_WRITE_S_MASK;
  245. inst.sop = high_addr_reg_idx;
  246. inst.offset = addr_low;
  247. inst.value = value;
  248. return cmdq_pkt_append_command(pkt, inst);
  249. }
  250. EXPORT_SYMBOL(cmdq_pkt_write_s_mask_value);
  251. int cmdq_pkt_mem_move(struct cmdq_pkt *pkt, dma_addr_t src_addr, dma_addr_t dst_addr)
  252. {
  253. const u16 high_addr_reg_idx = CMDQ_THR_SPR_IDX0;
  254. const u16 value_reg_idx = CMDQ_THR_SPR_IDX1;
  255. int ret;
  256. /* read the value of src_addr into high_addr_reg_idx */
  257. ret = cmdq_pkt_assign(pkt, high_addr_reg_idx, CMDQ_ADDR_HIGH(src_addr));
  258. if (ret < 0)
  259. return ret;
  260. ret = cmdq_pkt_read_s(pkt, high_addr_reg_idx, CMDQ_ADDR_LOW(src_addr), value_reg_idx);
  261. if (ret < 0)
  262. return ret;
  263. /* write the value of value_reg_idx into dst_addr */
  264. ret = cmdq_pkt_assign(pkt, high_addr_reg_idx, CMDQ_ADDR_HIGH(dst_addr));
  265. if (ret < 0)
  266. return ret;
  267. ret = cmdq_pkt_write_s(pkt, high_addr_reg_idx, CMDQ_ADDR_LOW(dst_addr), value_reg_idx);
  268. if (ret < 0)
  269. return ret;
  270. return 0;
  271. }
  272. EXPORT_SYMBOL(cmdq_pkt_mem_move);
  273. int cmdq_pkt_wfe(struct cmdq_pkt *pkt, u16 event, bool clear)
  274. {
  275. struct cmdq_instruction inst = { {0} };
  276. u32 clear_option = clear ? CMDQ_WFE_UPDATE : 0;
  277. if (event >= CMDQ_MAX_EVENT)
  278. return -EINVAL;
  279. inst.op = CMDQ_CODE_WFE;
  280. inst.value = CMDQ_WFE_OPTION | clear_option;
  281. inst.event = event;
  282. return cmdq_pkt_append_command(pkt, inst);
  283. }
  284. EXPORT_SYMBOL(cmdq_pkt_wfe);
  285. int cmdq_pkt_acquire_event(struct cmdq_pkt *pkt, u16 event)
  286. {
  287. struct cmdq_instruction inst = {};
  288. if (event >= CMDQ_MAX_EVENT)
  289. return -EINVAL;
  290. inst.op = CMDQ_CODE_WFE;
  291. inst.value = CMDQ_WFE_UPDATE | CMDQ_WFE_UPDATE_VALUE | CMDQ_WFE_WAIT;
  292. inst.event = event;
  293. return cmdq_pkt_append_command(pkt, inst);
  294. }
  295. EXPORT_SYMBOL(cmdq_pkt_acquire_event);
  296. int cmdq_pkt_clear_event(struct cmdq_pkt *pkt, u16 event)
  297. {
  298. struct cmdq_instruction inst = { {0} };
  299. if (event >= CMDQ_MAX_EVENT)
  300. return -EINVAL;
  301. inst.op = CMDQ_CODE_WFE;
  302. inst.value = CMDQ_WFE_UPDATE;
  303. inst.event = event;
  304. return cmdq_pkt_append_command(pkt, inst);
  305. }
  306. EXPORT_SYMBOL(cmdq_pkt_clear_event);
  307. int cmdq_pkt_set_event(struct cmdq_pkt *pkt, u16 event)
  308. {
  309. struct cmdq_instruction inst = {};
  310. if (event >= CMDQ_MAX_EVENT)
  311. return -EINVAL;
  312. inst.op = CMDQ_CODE_WFE;
  313. inst.value = CMDQ_WFE_UPDATE | CMDQ_WFE_UPDATE_VALUE;
  314. inst.event = event;
  315. return cmdq_pkt_append_command(pkt, inst);
  316. }
  317. EXPORT_SYMBOL(cmdq_pkt_set_event);
  318. int cmdq_pkt_poll(struct cmdq_pkt *pkt, u8 subsys,
  319. u16 offset, u32 value)
  320. {
  321. struct cmdq_instruction inst = { {0} };
  322. int err;
  323. inst.op = CMDQ_CODE_POLL;
  324. inst.value = value;
  325. inst.offset = offset;
  326. inst.subsys = subsys;
  327. err = cmdq_pkt_append_command(pkt, inst);
  328. return err;
  329. }
  330. EXPORT_SYMBOL(cmdq_pkt_poll);
  331. int cmdq_pkt_poll_mask(struct cmdq_pkt *pkt, u8 subsys,
  332. u16 offset, u32 value, u32 mask)
  333. {
  334. struct cmdq_instruction inst = { {0} };
  335. int err;
  336. inst.op = CMDQ_CODE_MASK;
  337. inst.mask = ~mask;
  338. err = cmdq_pkt_append_command(pkt, inst);
  339. if (err < 0)
  340. return err;
  341. offset = offset | CMDQ_POLL_ENABLE_MASK;
  342. err = cmdq_pkt_poll(pkt, subsys, offset, value);
  343. return err;
  344. }
  345. EXPORT_SYMBOL(cmdq_pkt_poll_mask);
  346. int cmdq_pkt_poll_addr(struct cmdq_pkt *pkt, dma_addr_t addr, u32 value, u32 mask)
  347. {
  348. struct cmdq_instruction inst = { {0} };
  349. u8 use_mask = 0;
  350. int ret;
  351. /*
  352. * Append an MASK instruction to set the mask for following POLL instruction
  353. * which enables use_mask bit.
  354. */
  355. if (mask != GENMASK(31, 0)) {
  356. inst.op = CMDQ_CODE_MASK;
  357. inst.mask = ~mask;
  358. ret = cmdq_pkt_append_command(pkt, inst);
  359. if (ret < 0)
  360. return ret;
  361. use_mask = CMDQ_POLL_ENABLE_MASK;
  362. }
  363. /*
  364. * POLL is an legacy operation in GCE and it does not support SPR and CMDQ_CODE_LOGIC,
  365. * so it can not use cmdq_pkt_assign to keep polling register address to SPR.
  366. * If user wants to poll a register address which doesn't have a subsys id,
  367. * user needs to use GPR and CMDQ_CODE_MASK to move polling register address to GPR.
  368. */
  369. inst.op = CMDQ_CODE_MASK;
  370. inst.dst_t = CMDQ_REG_TYPE;
  371. inst.sop = CMDQ_POLL_ADDR_GPR;
  372. inst.value = addr;
  373. ret = cmdq_pkt_append_command(pkt, inst);
  374. if (ret < 0)
  375. return ret;
  376. /* Append POLL instruction to poll the register address assign to GPR previously. */
  377. inst.op = CMDQ_CODE_POLL;
  378. inst.dst_t = CMDQ_REG_TYPE;
  379. inst.sop = CMDQ_POLL_ADDR_GPR;
  380. inst.offset = use_mask;
  381. inst.value = value;
  382. ret = cmdq_pkt_append_command(pkt, inst);
  383. if (ret < 0)
  384. return ret;
  385. return 0;
  386. }
  387. EXPORT_SYMBOL(cmdq_pkt_poll_addr);
  388. int cmdq_pkt_logic_command(struct cmdq_pkt *pkt, u16 result_reg_idx,
  389. struct cmdq_operand *left_operand,
  390. enum cmdq_logic_op s_op,
  391. struct cmdq_operand *right_operand)
  392. {
  393. struct cmdq_instruction inst = { {0} };
  394. if (!left_operand || !right_operand || s_op >= CMDQ_LOGIC_MAX)
  395. return -EINVAL;
  396. inst.op = CMDQ_CODE_LOGIC;
  397. inst.dst_t = CMDQ_REG_TYPE;
  398. inst.src_t = cmdq_operand_get_type(left_operand);
  399. inst.arg_c_t = cmdq_operand_get_type(right_operand);
  400. inst.sop = s_op;
  401. inst.reg_dst = result_reg_idx;
  402. inst.src_reg = cmdq_operand_get_idx_value(left_operand);
  403. inst.arg_c = cmdq_operand_get_idx_value(right_operand);
  404. return cmdq_pkt_append_command(pkt, inst);
  405. }
  406. EXPORT_SYMBOL(cmdq_pkt_logic_command);
  407. int cmdq_pkt_assign(struct cmdq_pkt *pkt, u16 reg_idx, u32 value)
  408. {
  409. struct cmdq_instruction inst = {};
  410. inst.op = CMDQ_CODE_LOGIC;
  411. inst.dst_t = CMDQ_REG_TYPE;
  412. inst.reg_dst = reg_idx;
  413. inst.value = value;
  414. return cmdq_pkt_append_command(pkt, inst);
  415. }
  416. EXPORT_SYMBOL(cmdq_pkt_assign);
  417. int cmdq_pkt_jump_abs(struct cmdq_pkt *pkt, dma_addr_t addr, u8 shift_pa)
  418. {
  419. struct cmdq_instruction inst = {};
  420. inst.op = CMDQ_CODE_JUMP;
  421. inst.offset = CMDQ_JUMP_ABSOLUTE;
  422. inst.value = addr >> shift_pa;
  423. return cmdq_pkt_append_command(pkt, inst);
  424. }
  425. EXPORT_SYMBOL(cmdq_pkt_jump_abs);
  426. int cmdq_pkt_jump_rel(struct cmdq_pkt *pkt, s32 offset, u8 shift_pa)
  427. {
  428. struct cmdq_instruction inst = { {0} };
  429. inst.op = CMDQ_CODE_JUMP;
  430. inst.value = (u32)offset >> shift_pa;
  431. return cmdq_pkt_append_command(pkt, inst);
  432. }
  433. EXPORT_SYMBOL(cmdq_pkt_jump_rel);
  434. int cmdq_pkt_eoc(struct cmdq_pkt *pkt)
  435. {
  436. struct cmdq_instruction inst = { {0} };
  437. inst.op = CMDQ_CODE_EOC;
  438. inst.value = CMDQ_EOC_IRQ_EN;
  439. return cmdq_pkt_append_command(pkt, inst);
  440. }
  441. EXPORT_SYMBOL(cmdq_pkt_eoc);
  442. int cmdq_pkt_finalize(struct cmdq_pkt *pkt)
  443. {
  444. struct cmdq_instruction inst = { {0} };
  445. int err;
  446. /* insert EOC and generate IRQ for each command iteration */
  447. inst.op = CMDQ_CODE_EOC;
  448. inst.value = CMDQ_EOC_IRQ_EN;
  449. err = cmdq_pkt_append_command(pkt, inst);
  450. if (err < 0)
  451. return err;
  452. /* JUMP to end */
  453. inst.op = CMDQ_CODE_JUMP;
  454. inst.value = CMDQ_JUMP_PASS >>
  455. cmdq_get_shift_pa(((struct cmdq_client *)pkt->cl)->chan);
  456. err = cmdq_pkt_append_command(pkt, inst);
  457. return err;
  458. }
  459. EXPORT_SYMBOL(cmdq_pkt_finalize);
  460. MODULE_DESCRIPTION("MediaTek Command Queue (CMDQ) driver");
  461. MODULE_LICENSE("GPL v2");