mtk-cmdq-mailbox.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804
  1. // SPDX-License-Identifier: GPL-2.0
  2. //
  3. // Copyright (c) 2018 MediaTek Inc.
  4. #include <linux/bitops.h>
  5. #include <linux/clk.h>
  6. #include <linux/clk-provider.h>
  7. #include <linux/dma-mapping.h>
  8. #include <linux/errno.h>
  9. #include <linux/interrupt.h>
  10. #include <linux/io.h>
  11. #include <linux/iopoll.h>
  12. #include <linux/kernel.h>
  13. #include <linux/module.h>
  14. #include <linux/platform_device.h>
  15. #include <linux/pm_runtime.h>
  16. #include <linux/mailbox_controller.h>
  17. #include <linux/mailbox/mtk-cmdq-mailbox.h>
  18. #include <linux/of.h>
  19. #define CMDQ_MBOX_AUTOSUSPEND_DELAY_MS 100
  20. #define CMDQ_OP_CODE_MASK (0xff << CMDQ_OP_CODE_SHIFT)
  21. #define CMDQ_NUM_CMD(t) (t->cmd_buf_size / CMDQ_INST_SIZE)
  22. #define CMDQ_CURR_IRQ_STATUS 0x10
  23. #define CMDQ_SYNC_TOKEN_UPDATE 0x68
  24. #define CMDQ_THR_SLOT_CYCLES 0x30
  25. #define CMDQ_THR_BASE 0x100
  26. #define CMDQ_THR_SIZE 0x80
  27. #define CMDQ_THR_WARM_RESET 0x00
  28. #define CMDQ_THR_ENABLE_TASK 0x04
  29. #define CMDQ_THR_SUSPEND_TASK 0x08
  30. #define CMDQ_THR_CURR_STATUS 0x0c
  31. #define CMDQ_THR_IRQ_STATUS 0x10
  32. #define CMDQ_THR_IRQ_ENABLE 0x14
  33. #define CMDQ_THR_CURR_ADDR 0x20
  34. #define CMDQ_THR_END_ADDR 0x24
  35. #define CMDQ_THR_WAIT_TOKEN 0x30
  36. #define CMDQ_THR_PRIORITY 0x40
  37. #define GCE_GCTL_VALUE 0x48
  38. #define GCE_CTRL_BY_SW GENMASK(2, 0)
  39. #define GCE_DDR_EN GENMASK(18, 16)
  40. #define CMDQ_THR_ACTIVE_SLOT_CYCLES 0x3200
  41. #define CMDQ_THR_ENABLED 0x1
  42. #define CMDQ_THR_DISABLED 0x0
  43. #define CMDQ_THR_SUSPEND 0x1
  44. #define CMDQ_THR_RESUME 0x0
  45. #define CMDQ_THR_STATUS_SUSPENDED BIT(1)
  46. #define CMDQ_THR_DO_WARM_RESET BIT(0)
  47. #define CMDQ_THR_IRQ_DONE 0x1
  48. #define CMDQ_THR_IRQ_ERROR 0x12
  49. #define CMDQ_THR_IRQ_EN (CMDQ_THR_IRQ_ERROR | CMDQ_THR_IRQ_DONE)
  50. #define CMDQ_THR_IS_WAITING BIT(31)
  51. #define CMDQ_JUMP_BY_OFFSET 0x10000000
  52. #define CMDQ_JUMP_BY_PA 0x10000001
  53. struct cmdq_thread {
  54. struct mbox_chan *chan;
  55. void __iomem *base;
  56. struct list_head task_busy_list;
  57. u32 priority;
  58. };
  59. struct cmdq_task {
  60. struct cmdq *cmdq;
  61. struct list_head list_entry;
  62. dma_addr_t pa_base;
  63. struct cmdq_thread *thread;
  64. struct cmdq_pkt *pkt; /* the packet sent from mailbox client */
  65. };
  66. struct cmdq {
  67. struct mbox_controller mbox;
  68. void __iomem *base;
  69. int irq;
  70. u32 irq_mask;
  71. const struct gce_plat *pdata;
  72. struct cmdq_thread *thread;
  73. struct clk_bulk_data *clocks;
  74. bool suspended;
  75. };
  76. struct gce_plat {
  77. u32 thread_nr;
  78. u8 shift;
  79. bool control_by_sw;
  80. bool sw_ddr_en;
  81. u32 gce_num;
  82. };
  83. u8 cmdq_get_shift_pa(struct mbox_chan *chan)
  84. {
  85. struct cmdq *cmdq = container_of(chan->mbox, struct cmdq, mbox);
  86. return cmdq->pdata->shift;
  87. }
  88. EXPORT_SYMBOL(cmdq_get_shift_pa);
  89. static void cmdq_gctl_value_toggle(struct cmdq *cmdq, bool ddr_enable)
  90. {
  91. u32 val = cmdq->pdata->control_by_sw ? GCE_CTRL_BY_SW : 0;
  92. if (!cmdq->pdata->control_by_sw && !cmdq->pdata->sw_ddr_en)
  93. return;
  94. if (cmdq->pdata->sw_ddr_en && ddr_enable)
  95. val |= GCE_DDR_EN;
  96. writel(val, cmdq->base + GCE_GCTL_VALUE);
  97. }
  98. static int cmdq_thread_suspend(struct cmdq *cmdq, struct cmdq_thread *thread)
  99. {
  100. u32 status;
  101. writel(CMDQ_THR_SUSPEND, thread->base + CMDQ_THR_SUSPEND_TASK);
  102. /* If already disabled, treat as suspended successful. */
  103. if (!(readl(thread->base + CMDQ_THR_ENABLE_TASK) & CMDQ_THR_ENABLED))
  104. return 0;
  105. if (readl_poll_timeout_atomic(thread->base + CMDQ_THR_CURR_STATUS,
  106. status, status & CMDQ_THR_STATUS_SUSPENDED, 0, 10)) {
  107. dev_err(cmdq->mbox.dev, "suspend GCE thread 0x%x failed\n",
  108. (u32)(thread->base - cmdq->base));
  109. return -EFAULT;
  110. }
  111. return 0;
  112. }
  113. static void cmdq_thread_resume(struct cmdq_thread *thread)
  114. {
  115. writel(CMDQ_THR_RESUME, thread->base + CMDQ_THR_SUSPEND_TASK);
  116. }
  117. static void cmdq_init(struct cmdq *cmdq)
  118. {
  119. int i;
  120. WARN_ON(clk_bulk_enable(cmdq->pdata->gce_num, cmdq->clocks));
  121. cmdq_gctl_value_toggle(cmdq, true);
  122. writel(CMDQ_THR_ACTIVE_SLOT_CYCLES, cmdq->base + CMDQ_THR_SLOT_CYCLES);
  123. for (i = 0; i <= CMDQ_MAX_EVENT; i++)
  124. writel(i, cmdq->base + CMDQ_SYNC_TOKEN_UPDATE);
  125. clk_bulk_disable(cmdq->pdata->gce_num, cmdq->clocks);
  126. }
  127. static int cmdq_thread_reset(struct cmdq *cmdq, struct cmdq_thread *thread)
  128. {
  129. u32 warm_reset;
  130. writel(CMDQ_THR_DO_WARM_RESET, thread->base + CMDQ_THR_WARM_RESET);
  131. if (readl_poll_timeout_atomic(thread->base + CMDQ_THR_WARM_RESET,
  132. warm_reset, !(warm_reset & CMDQ_THR_DO_WARM_RESET),
  133. 0, 10)) {
  134. dev_err(cmdq->mbox.dev, "reset GCE thread 0x%x failed\n",
  135. (u32)(thread->base - cmdq->base));
  136. return -EFAULT;
  137. }
  138. return 0;
  139. }
  140. static void cmdq_thread_disable(struct cmdq *cmdq, struct cmdq_thread *thread)
  141. {
  142. cmdq_thread_reset(cmdq, thread);
  143. writel(CMDQ_THR_DISABLED, thread->base + CMDQ_THR_ENABLE_TASK);
  144. }
  145. /* notify GCE to re-fetch commands by setting GCE thread PC */
  146. static void cmdq_thread_invalidate_fetched_data(struct cmdq_thread *thread)
  147. {
  148. writel(readl(thread->base + CMDQ_THR_CURR_ADDR),
  149. thread->base + CMDQ_THR_CURR_ADDR);
  150. }
  151. static void cmdq_task_insert_into_thread(struct cmdq_task *task)
  152. {
  153. struct device *dev = task->cmdq->mbox.dev;
  154. struct cmdq_thread *thread = task->thread;
  155. struct cmdq_task *prev_task = list_last_entry(
  156. &thread->task_busy_list, typeof(*task), list_entry);
  157. u64 *prev_task_base = prev_task->pkt->va_base;
  158. /* let previous task jump to this task */
  159. dma_sync_single_for_cpu(dev, prev_task->pa_base,
  160. prev_task->pkt->cmd_buf_size, DMA_TO_DEVICE);
  161. prev_task_base[CMDQ_NUM_CMD(prev_task->pkt) - 1] =
  162. (u64)CMDQ_JUMP_BY_PA << 32 |
  163. (task->pa_base >> task->cmdq->pdata->shift);
  164. dma_sync_single_for_device(dev, prev_task->pa_base,
  165. prev_task->pkt->cmd_buf_size, DMA_TO_DEVICE);
  166. cmdq_thread_invalidate_fetched_data(thread);
  167. }
  168. static bool cmdq_thread_is_in_wfe(struct cmdq_thread *thread)
  169. {
  170. return readl(thread->base + CMDQ_THR_WAIT_TOKEN) & CMDQ_THR_IS_WAITING;
  171. }
  172. static void cmdq_task_exec_done(struct cmdq_task *task, int sta)
  173. {
  174. struct cmdq_cb_data data;
  175. data.sta = sta;
  176. data.pkt = task->pkt;
  177. mbox_chan_received_data(task->thread->chan, &data);
  178. list_del(&task->list_entry);
  179. }
  180. static void cmdq_task_handle_error(struct cmdq_task *task)
  181. {
  182. struct cmdq_thread *thread = task->thread;
  183. struct cmdq_task *next_task;
  184. struct cmdq *cmdq = task->cmdq;
  185. dev_err(cmdq->mbox.dev, "task 0x%p error\n", task);
  186. WARN_ON(cmdq_thread_suspend(cmdq, thread) < 0);
  187. next_task = list_first_entry_or_null(&thread->task_busy_list,
  188. struct cmdq_task, list_entry);
  189. if (next_task)
  190. writel(next_task->pa_base >> cmdq->pdata->shift,
  191. thread->base + CMDQ_THR_CURR_ADDR);
  192. cmdq_thread_resume(thread);
  193. }
  194. static void cmdq_thread_irq_handler(struct cmdq *cmdq,
  195. struct cmdq_thread *thread)
  196. {
  197. struct cmdq_task *task, *tmp, *curr_task = NULL;
  198. u32 curr_pa, irq_flag, task_end_pa;
  199. bool err;
  200. irq_flag = readl(thread->base + CMDQ_THR_IRQ_STATUS);
  201. writel(~irq_flag, thread->base + CMDQ_THR_IRQ_STATUS);
  202. /*
  203. * When ISR call this function, another CPU core could run
  204. * "release task" right before we acquire the spin lock, and thus
  205. * reset / disable this GCE thread, so we need to check the enable
  206. * bit of this GCE thread.
  207. */
  208. if (!(readl(thread->base + CMDQ_THR_ENABLE_TASK) & CMDQ_THR_ENABLED))
  209. return;
  210. if (irq_flag & CMDQ_THR_IRQ_ERROR)
  211. err = true;
  212. else if (irq_flag & CMDQ_THR_IRQ_DONE)
  213. err = false;
  214. else
  215. return;
  216. curr_pa = readl(thread->base + CMDQ_THR_CURR_ADDR) << cmdq->pdata->shift;
  217. list_for_each_entry_safe(task, tmp, &thread->task_busy_list,
  218. list_entry) {
  219. task_end_pa = task->pa_base + task->pkt->cmd_buf_size;
  220. if (curr_pa >= task->pa_base && curr_pa < task_end_pa)
  221. curr_task = task;
  222. if (!curr_task || curr_pa == task_end_pa - CMDQ_INST_SIZE) {
  223. cmdq_task_exec_done(task, 0);
  224. kfree(task);
  225. } else if (err) {
  226. cmdq_task_exec_done(task, -ENOEXEC);
  227. cmdq_task_handle_error(curr_task);
  228. kfree(task);
  229. }
  230. if (curr_task)
  231. break;
  232. }
  233. if (list_empty(&thread->task_busy_list))
  234. cmdq_thread_disable(cmdq, thread);
  235. }
  236. static irqreturn_t cmdq_irq_handler(int irq, void *dev)
  237. {
  238. struct cmdq *cmdq = dev;
  239. unsigned long irq_status, flags = 0L;
  240. int bit;
  241. irq_status = readl(cmdq->base + CMDQ_CURR_IRQ_STATUS) & cmdq->irq_mask;
  242. if (!(irq_status ^ cmdq->irq_mask))
  243. return IRQ_NONE;
  244. for_each_clear_bit(bit, &irq_status, cmdq->pdata->thread_nr) {
  245. struct cmdq_thread *thread = &cmdq->thread[bit];
  246. spin_lock_irqsave(&thread->chan->lock, flags);
  247. cmdq_thread_irq_handler(cmdq, thread);
  248. spin_unlock_irqrestore(&thread->chan->lock, flags);
  249. }
  250. pm_runtime_mark_last_busy(cmdq->mbox.dev);
  251. return IRQ_HANDLED;
  252. }
  253. static int cmdq_runtime_resume(struct device *dev)
  254. {
  255. struct cmdq *cmdq = dev_get_drvdata(dev);
  256. int ret;
  257. ret = clk_bulk_enable(cmdq->pdata->gce_num, cmdq->clocks);
  258. if (ret)
  259. return ret;
  260. cmdq_gctl_value_toggle(cmdq, true);
  261. return 0;
  262. }
  263. static int cmdq_runtime_suspend(struct device *dev)
  264. {
  265. struct cmdq *cmdq = dev_get_drvdata(dev);
  266. cmdq_gctl_value_toggle(cmdq, false);
  267. clk_bulk_disable(cmdq->pdata->gce_num, cmdq->clocks);
  268. return 0;
  269. }
  270. static int cmdq_suspend(struct device *dev)
  271. {
  272. struct cmdq *cmdq = dev_get_drvdata(dev);
  273. struct cmdq_thread *thread;
  274. int i;
  275. bool task_running = false;
  276. cmdq->suspended = true;
  277. for (i = 0; i < cmdq->pdata->thread_nr; i++) {
  278. thread = &cmdq->thread[i];
  279. if (!list_empty(&thread->task_busy_list)) {
  280. task_running = true;
  281. break;
  282. }
  283. }
  284. if (task_running)
  285. dev_warn(dev, "exist running task(s) in suspend\n");
  286. return pm_runtime_force_suspend(dev);
  287. }
  288. static int cmdq_resume(struct device *dev)
  289. {
  290. struct cmdq *cmdq = dev_get_drvdata(dev);
  291. WARN_ON(pm_runtime_force_resume(dev));
  292. cmdq->suspended = false;
  293. return 0;
  294. }
  295. static void cmdq_remove(struct platform_device *pdev)
  296. {
  297. struct cmdq *cmdq = platform_get_drvdata(pdev);
  298. if (!IS_ENABLED(CONFIG_PM))
  299. cmdq_runtime_suspend(&pdev->dev);
  300. clk_bulk_unprepare(cmdq->pdata->gce_num, cmdq->clocks);
  301. }
  302. static int cmdq_mbox_send_data(struct mbox_chan *chan, void *data)
  303. {
  304. struct cmdq_pkt *pkt = (struct cmdq_pkt *)data;
  305. struct cmdq_thread *thread = (struct cmdq_thread *)chan->con_priv;
  306. struct cmdq *cmdq = dev_get_drvdata(chan->mbox->dev);
  307. struct cmdq_task *task;
  308. unsigned long curr_pa, end_pa;
  309. /* Client should not flush new tasks if suspended. */
  310. WARN_ON(cmdq->suspended);
  311. task = kzalloc(sizeof(*task), GFP_ATOMIC);
  312. if (!task)
  313. return -ENOMEM;
  314. task->cmdq = cmdq;
  315. INIT_LIST_HEAD(&task->list_entry);
  316. task->pa_base = pkt->pa_base;
  317. task->thread = thread;
  318. task->pkt = pkt;
  319. if (list_empty(&thread->task_busy_list)) {
  320. /*
  321. * The thread reset will clear thread related register to 0,
  322. * including pc, end, priority, irq, suspend and enable. Thus
  323. * set CMDQ_THR_ENABLED to CMDQ_THR_ENABLE_TASK will enable
  324. * thread and make it running.
  325. */
  326. WARN_ON(cmdq_thread_reset(cmdq, thread) < 0);
  327. writel(task->pa_base >> cmdq->pdata->shift,
  328. thread->base + CMDQ_THR_CURR_ADDR);
  329. writel((task->pa_base + pkt->cmd_buf_size) >> cmdq->pdata->shift,
  330. thread->base + CMDQ_THR_END_ADDR);
  331. writel(thread->priority, thread->base + CMDQ_THR_PRIORITY);
  332. writel(CMDQ_THR_IRQ_EN, thread->base + CMDQ_THR_IRQ_ENABLE);
  333. writel(CMDQ_THR_ENABLED, thread->base + CMDQ_THR_ENABLE_TASK);
  334. } else {
  335. WARN_ON(cmdq_thread_suspend(cmdq, thread) < 0);
  336. curr_pa = readl(thread->base + CMDQ_THR_CURR_ADDR) <<
  337. cmdq->pdata->shift;
  338. end_pa = readl(thread->base + CMDQ_THR_END_ADDR) <<
  339. cmdq->pdata->shift;
  340. /* check boundary */
  341. if (curr_pa == end_pa - CMDQ_INST_SIZE ||
  342. curr_pa == end_pa) {
  343. /* set to this task directly */
  344. writel(task->pa_base >> cmdq->pdata->shift,
  345. thread->base + CMDQ_THR_CURR_ADDR);
  346. } else {
  347. cmdq_task_insert_into_thread(task);
  348. smp_mb(); /* modify jump before enable thread */
  349. }
  350. writel((task->pa_base + pkt->cmd_buf_size) >> cmdq->pdata->shift,
  351. thread->base + CMDQ_THR_END_ADDR);
  352. cmdq_thread_resume(thread);
  353. }
  354. list_move_tail(&task->list_entry, &thread->task_busy_list);
  355. return 0;
  356. }
  357. static int cmdq_mbox_startup(struct mbox_chan *chan)
  358. {
  359. return 0;
  360. }
  361. static void cmdq_mbox_shutdown(struct mbox_chan *chan)
  362. {
  363. struct cmdq_thread *thread = (struct cmdq_thread *)chan->con_priv;
  364. struct cmdq *cmdq = dev_get_drvdata(chan->mbox->dev);
  365. struct cmdq_task *task, *tmp;
  366. unsigned long flags;
  367. WARN_ON(pm_runtime_get_sync(cmdq->mbox.dev) < 0);
  368. spin_lock_irqsave(&thread->chan->lock, flags);
  369. if (list_empty(&thread->task_busy_list))
  370. goto done;
  371. WARN_ON(cmdq_thread_suspend(cmdq, thread) < 0);
  372. /* make sure executed tasks have success callback */
  373. cmdq_thread_irq_handler(cmdq, thread);
  374. if (list_empty(&thread->task_busy_list))
  375. goto done;
  376. list_for_each_entry_safe(task, tmp, &thread->task_busy_list,
  377. list_entry) {
  378. cmdq_task_exec_done(task, -ECONNABORTED);
  379. kfree(task);
  380. }
  381. cmdq_thread_disable(cmdq, thread);
  382. done:
  383. /*
  384. * The thread->task_busy_list empty means thread already disable. The
  385. * cmdq_mbox_send_data() always reset thread which clear disable and
  386. * suspend statue when first pkt send to channel, so there is no need
  387. * to do any operation here, only unlock and leave.
  388. */
  389. spin_unlock_irqrestore(&thread->chan->lock, flags);
  390. pm_runtime_mark_last_busy(cmdq->mbox.dev);
  391. pm_runtime_put_autosuspend(cmdq->mbox.dev);
  392. }
  393. static int cmdq_mbox_flush(struct mbox_chan *chan, unsigned long timeout)
  394. {
  395. struct cmdq_thread *thread = (struct cmdq_thread *)chan->con_priv;
  396. struct cmdq_cb_data data;
  397. struct cmdq *cmdq = dev_get_drvdata(chan->mbox->dev);
  398. struct cmdq_task *task, *tmp;
  399. unsigned long flags;
  400. u32 enable;
  401. int ret;
  402. ret = pm_runtime_get_sync(cmdq->mbox.dev);
  403. if (ret < 0)
  404. return ret;
  405. spin_lock_irqsave(&thread->chan->lock, flags);
  406. if (list_empty(&thread->task_busy_list))
  407. goto out;
  408. WARN_ON(cmdq_thread_suspend(cmdq, thread) < 0);
  409. if (!cmdq_thread_is_in_wfe(thread))
  410. goto wait;
  411. list_for_each_entry_safe(task, tmp, &thread->task_busy_list,
  412. list_entry) {
  413. data.sta = -ECONNABORTED;
  414. data.pkt = task->pkt;
  415. mbox_chan_received_data(task->thread->chan, &data);
  416. list_del(&task->list_entry);
  417. kfree(task);
  418. }
  419. cmdq_thread_resume(thread);
  420. cmdq_thread_disable(cmdq, thread);
  421. out:
  422. spin_unlock_irqrestore(&thread->chan->lock, flags);
  423. pm_runtime_mark_last_busy(cmdq->mbox.dev);
  424. pm_runtime_put_autosuspend(cmdq->mbox.dev);
  425. return 0;
  426. wait:
  427. cmdq_thread_resume(thread);
  428. spin_unlock_irqrestore(&thread->chan->lock, flags);
  429. if (readl_poll_timeout_atomic(thread->base + CMDQ_THR_ENABLE_TASK,
  430. enable, enable == 0, 1, timeout)) {
  431. dev_err(cmdq->mbox.dev, "Fail to wait GCE thread 0x%x done\n",
  432. (u32)(thread->base - cmdq->base));
  433. return -EFAULT;
  434. }
  435. pm_runtime_mark_last_busy(cmdq->mbox.dev);
  436. pm_runtime_put_autosuspend(cmdq->mbox.dev);
  437. return 0;
  438. }
  439. static const struct mbox_chan_ops cmdq_mbox_chan_ops = {
  440. .send_data = cmdq_mbox_send_data,
  441. .startup = cmdq_mbox_startup,
  442. .shutdown = cmdq_mbox_shutdown,
  443. .flush = cmdq_mbox_flush,
  444. };
  445. static struct mbox_chan *cmdq_xlate(struct mbox_controller *mbox,
  446. const struct of_phandle_args *sp)
  447. {
  448. int ind = sp->args[0];
  449. struct cmdq_thread *thread;
  450. if (ind >= mbox->num_chans)
  451. return ERR_PTR(-EINVAL);
  452. thread = (struct cmdq_thread *)mbox->chans[ind].con_priv;
  453. thread->priority = sp->args[1];
  454. thread->chan = &mbox->chans[ind];
  455. return &mbox->chans[ind];
  456. }
  457. static int cmdq_get_clocks(struct device *dev, struct cmdq *cmdq)
  458. {
  459. static const char * const gce_name = "gce";
  460. struct device_node *node, *parent = dev->of_node->parent;
  461. struct clk_bulk_data *clks;
  462. cmdq->clocks = devm_kcalloc(dev, cmdq->pdata->gce_num,
  463. sizeof(*cmdq->clocks), GFP_KERNEL);
  464. if (!cmdq->clocks)
  465. return -ENOMEM;
  466. if (cmdq->pdata->gce_num == 1) {
  467. clks = &cmdq->clocks[0];
  468. clks->id = gce_name;
  469. clks->clk = devm_clk_get(dev, NULL);
  470. if (IS_ERR(clks->clk))
  471. return dev_err_probe(dev, PTR_ERR(clks->clk),
  472. "failed to get gce clock\n");
  473. return 0;
  474. }
  475. /*
  476. * If there is more than one GCE, get the clocks for the others too,
  477. * as the clock of the main GCE must be enabled for additional IPs
  478. * to be reachable.
  479. */
  480. for_each_child_of_node(parent, node) {
  481. int alias_id = of_alias_get_id(node, gce_name);
  482. if (alias_id < 0 || alias_id >= cmdq->pdata->gce_num)
  483. continue;
  484. clks = &cmdq->clocks[alias_id];
  485. clks->id = devm_kasprintf(dev, GFP_KERNEL, "gce%d", alias_id);
  486. if (!clks->id) {
  487. of_node_put(node);
  488. return -ENOMEM;
  489. }
  490. clks->clk = of_clk_get(node, 0);
  491. if (IS_ERR(clks->clk)) {
  492. of_node_put(node);
  493. return dev_err_probe(dev, PTR_ERR(clks->clk),
  494. "failed to get gce%d clock\n", alias_id);
  495. }
  496. }
  497. return 0;
  498. }
  499. static int cmdq_probe(struct platform_device *pdev)
  500. {
  501. struct device *dev = &pdev->dev;
  502. struct cmdq *cmdq;
  503. int err, i;
  504. cmdq = devm_kzalloc(dev, sizeof(*cmdq), GFP_KERNEL);
  505. if (!cmdq)
  506. return -ENOMEM;
  507. cmdq->base = devm_platform_ioremap_resource(pdev, 0);
  508. if (IS_ERR(cmdq->base))
  509. return PTR_ERR(cmdq->base);
  510. cmdq->irq = platform_get_irq(pdev, 0);
  511. if (cmdq->irq < 0)
  512. return cmdq->irq;
  513. cmdq->pdata = device_get_match_data(dev);
  514. if (!cmdq->pdata) {
  515. dev_err(dev, "failed to get match data\n");
  516. return -EINVAL;
  517. }
  518. cmdq->irq_mask = GENMASK(cmdq->pdata->thread_nr - 1, 0);
  519. dev_dbg(dev, "cmdq device: addr:0x%p, va:0x%p, irq:%d\n",
  520. dev, cmdq->base, cmdq->irq);
  521. err = cmdq_get_clocks(dev, cmdq);
  522. if (err)
  523. return err;
  524. cmdq->mbox.dev = dev;
  525. cmdq->mbox.chans = devm_kcalloc(dev, cmdq->pdata->thread_nr,
  526. sizeof(*cmdq->mbox.chans), GFP_KERNEL);
  527. if (!cmdq->mbox.chans)
  528. return -ENOMEM;
  529. cmdq->mbox.num_chans = cmdq->pdata->thread_nr;
  530. cmdq->mbox.ops = &cmdq_mbox_chan_ops;
  531. cmdq->mbox.of_xlate = cmdq_xlate;
  532. /* make use of TXDONE_BY_ACK */
  533. cmdq->mbox.txdone_irq = false;
  534. cmdq->mbox.txdone_poll = false;
  535. cmdq->thread = devm_kcalloc(dev, cmdq->pdata->thread_nr,
  536. sizeof(*cmdq->thread), GFP_KERNEL);
  537. if (!cmdq->thread)
  538. return -ENOMEM;
  539. for (i = 0; i < cmdq->pdata->thread_nr; i++) {
  540. cmdq->thread[i].base = cmdq->base + CMDQ_THR_BASE +
  541. CMDQ_THR_SIZE * i;
  542. INIT_LIST_HEAD(&cmdq->thread[i].task_busy_list);
  543. cmdq->mbox.chans[i].con_priv = (void *)&cmdq->thread[i];
  544. }
  545. platform_set_drvdata(pdev, cmdq);
  546. WARN_ON(clk_bulk_prepare(cmdq->pdata->gce_num, cmdq->clocks));
  547. cmdq_init(cmdq);
  548. err = devm_request_irq(dev, cmdq->irq, cmdq_irq_handler, IRQF_SHARED,
  549. "mtk_cmdq", cmdq);
  550. if (err < 0) {
  551. dev_err(dev, "failed to register ISR (%d)\n", err);
  552. return err;
  553. }
  554. /* If Runtime PM is not available enable the clocks now. */
  555. if (!IS_ENABLED(CONFIG_PM)) {
  556. err = cmdq_runtime_resume(dev);
  557. if (err)
  558. return err;
  559. }
  560. err = devm_pm_runtime_enable(dev);
  561. if (err)
  562. return err;
  563. pm_runtime_set_autosuspend_delay(dev, CMDQ_MBOX_AUTOSUSPEND_DELAY_MS);
  564. pm_runtime_use_autosuspend(dev);
  565. err = devm_mbox_controller_register(dev, &cmdq->mbox);
  566. if (err < 0) {
  567. dev_err(dev, "failed to register mailbox: %d\n", err);
  568. return err;
  569. }
  570. return 0;
  571. }
  572. static const struct dev_pm_ops cmdq_pm_ops = {
  573. .suspend = cmdq_suspend,
  574. .resume = cmdq_resume,
  575. SET_RUNTIME_PM_OPS(cmdq_runtime_suspend,
  576. cmdq_runtime_resume, NULL)
  577. };
  578. static const struct gce_plat gce_plat_mt6779 = {
  579. .thread_nr = 24,
  580. .shift = 3,
  581. .control_by_sw = false,
  582. .gce_num = 1
  583. };
  584. static const struct gce_plat gce_plat_mt8173 = {
  585. .thread_nr = 16,
  586. .shift = 0,
  587. .control_by_sw = false,
  588. .gce_num = 1
  589. };
  590. static const struct gce_plat gce_plat_mt8183 = {
  591. .thread_nr = 24,
  592. .shift = 0,
  593. .control_by_sw = false,
  594. .gce_num = 1
  595. };
  596. static const struct gce_plat gce_plat_mt8186 = {
  597. .thread_nr = 24,
  598. .shift = 3,
  599. .control_by_sw = true,
  600. .sw_ddr_en = true,
  601. .gce_num = 1
  602. };
  603. static const struct gce_plat gce_plat_mt8188 = {
  604. .thread_nr = 32,
  605. .shift = 3,
  606. .control_by_sw = true,
  607. .gce_num = 2
  608. };
  609. static const struct gce_plat gce_plat_mt8192 = {
  610. .thread_nr = 24,
  611. .shift = 3,
  612. .control_by_sw = true,
  613. .gce_num = 1
  614. };
  615. static const struct gce_plat gce_plat_mt8195 = {
  616. .thread_nr = 24,
  617. .shift = 3,
  618. .control_by_sw = true,
  619. .gce_num = 2
  620. };
  621. static const struct of_device_id cmdq_of_ids[] = {
  622. {.compatible = "mediatek,mt6779-gce", .data = (void *)&gce_plat_mt6779},
  623. {.compatible = "mediatek,mt8173-gce", .data = (void *)&gce_plat_mt8173},
  624. {.compatible = "mediatek,mt8183-gce", .data = (void *)&gce_plat_mt8183},
  625. {.compatible = "mediatek,mt8186-gce", .data = (void *)&gce_plat_mt8186},
  626. {.compatible = "mediatek,mt8188-gce", .data = (void *)&gce_plat_mt8188},
  627. {.compatible = "mediatek,mt8192-gce", .data = (void *)&gce_plat_mt8192},
  628. {.compatible = "mediatek,mt8195-gce", .data = (void *)&gce_plat_mt8195},
  629. {}
  630. };
  631. MODULE_DEVICE_TABLE(of, cmdq_of_ids);
  632. static struct platform_driver cmdq_drv = {
  633. .probe = cmdq_probe,
  634. .remove_new = cmdq_remove,
  635. .driver = {
  636. .name = "mtk_cmdq",
  637. .pm = &cmdq_pm_ops,
  638. .of_match_table = cmdq_of_ids,
  639. }
  640. };
  641. static int __init cmdq_drv_init(void)
  642. {
  643. return platform_driver_register(&cmdq_drv);
  644. }
  645. static void __exit cmdq_drv_exit(void)
  646. {
  647. platform_driver_unregister(&cmdq_drv);
  648. }
  649. subsys_initcall(cmdq_drv_init);
  650. module_exit(cmdq_drv_exit);
  651. MODULE_DESCRIPTION("Mediatek Command Queue(CMDQ) Mailbox driver");
  652. MODULE_LICENSE("GPL v2");