a6xx_gmu.c 30 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217
  1. // SPDX-License-Identifier: GPL-2.0
  2. /* Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. */
  3. #include <linux/clk.h>
  4. #include <linux/iopoll.h>
  5. #include <linux/pm_opp.h>
  6. #include <soc/qcom/cmd-db.h>
  7. #include "a6xx_gpu.h"
  8. #include "a6xx_gmu.xml.h"
  9. static irqreturn_t a6xx_gmu_irq(int irq, void *data)
  10. {
  11. struct a6xx_gmu *gmu = data;
  12. u32 status;
  13. status = gmu_read(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_STATUS);
  14. gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_CLR, status);
  15. if (status & A6XX_GMU_AO_HOST_INTERRUPT_STATUS_WDOG_BITE) {
  16. dev_err_ratelimited(gmu->dev, "GMU watchdog expired\n");
  17. /* Temporary until we can recover safely */
  18. BUG();
  19. }
  20. if (status & A6XX_GMU_AO_HOST_INTERRUPT_STATUS_HOST_AHB_BUS_ERROR)
  21. dev_err_ratelimited(gmu->dev, "GMU AHB bus error\n");
  22. if (status & A6XX_GMU_AO_HOST_INTERRUPT_STATUS_FENCE_ERR)
  23. dev_err_ratelimited(gmu->dev, "GMU fence error: 0x%x\n",
  24. gmu_read(gmu, REG_A6XX_GMU_AHB_FENCE_STATUS));
  25. return IRQ_HANDLED;
  26. }
  27. static irqreturn_t a6xx_hfi_irq(int irq, void *data)
  28. {
  29. struct a6xx_gmu *gmu = data;
  30. u32 status;
  31. status = gmu_read(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO);
  32. gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_CLR, status);
  33. if (status & A6XX_GMU_GMU2HOST_INTR_INFO_MSGQ)
  34. tasklet_schedule(&gmu->hfi_tasklet);
  35. if (status & A6XX_GMU_GMU2HOST_INTR_INFO_CM3_FAULT) {
  36. dev_err_ratelimited(gmu->dev, "GMU firmware fault\n");
  37. /* Temporary until we can recover safely */
  38. BUG();
  39. }
  40. return IRQ_HANDLED;
  41. }
  42. /* Check to see if the GX rail is still powered */
  43. static bool a6xx_gmu_gx_is_on(struct a6xx_gmu *gmu)
  44. {
  45. u32 val = gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS);
  46. return !(val &
  47. (A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_GX_HM_GDSC_POWER_OFF |
  48. A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_GX_HM_CLK_OFF));
  49. }
  50. static int a6xx_gmu_set_freq(struct a6xx_gmu *gmu, int index)
  51. {
  52. gmu_write(gmu, REG_A6XX_GMU_DCVS_ACK_OPTION, 0);
  53. gmu_write(gmu, REG_A6XX_GMU_DCVS_PERF_SETTING,
  54. ((index << 24) & 0xff) | (3 & 0xf));
  55. /*
  56. * Send an invalid index as a vote for the bus bandwidth and let the
  57. * firmware decide on the right vote
  58. */
  59. gmu_write(gmu, REG_A6XX_GMU_DCVS_BW_SETTING, 0xff);
  60. /* Set and clear the OOB for DCVS to trigger the GMU */
  61. a6xx_gmu_set_oob(gmu, GMU_OOB_DCVS_SET);
  62. a6xx_gmu_clear_oob(gmu, GMU_OOB_DCVS_SET);
  63. return gmu_read(gmu, REG_A6XX_GMU_DCVS_RETURN);
  64. }
  65. static bool a6xx_gmu_check_idle_level(struct a6xx_gmu *gmu)
  66. {
  67. u32 val;
  68. int local = gmu->idle_level;
  69. /* SPTP and IFPC both report as IFPC */
  70. if (gmu->idle_level == GMU_IDLE_STATE_SPTP)
  71. local = GMU_IDLE_STATE_IFPC;
  72. val = gmu_read(gmu, REG_A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE);
  73. if (val == local) {
  74. if (gmu->idle_level != GMU_IDLE_STATE_IFPC ||
  75. !a6xx_gmu_gx_is_on(gmu))
  76. return true;
  77. }
  78. return false;
  79. }
  80. /* Wait for the GMU to get to its most idle state */
  81. int a6xx_gmu_wait_for_idle(struct a6xx_gpu *a6xx_gpu)
  82. {
  83. struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
  84. return spin_until(a6xx_gmu_check_idle_level(gmu));
  85. }
  86. static int a6xx_gmu_start(struct a6xx_gmu *gmu)
  87. {
  88. int ret;
  89. u32 val;
  90. u32 mask, reset_val;
  91. val = gmu_read(gmu, REG_A6XX_GMU_CM3_DTCM_START + 0xff8);
  92. if (val <= 0x20010004) {
  93. mask = 0xffffffff;
  94. reset_val = 0xbabeface;
  95. } else {
  96. mask = 0x1ff;
  97. reset_val = 0x100;
  98. }
  99. gmu_write(gmu, REG_A6XX_GMU_CM3_SYSRESET, 1);
  100. gmu_write(gmu, REG_A6XX_GMU_CM3_SYSRESET, 0);
  101. ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_CM3_FW_INIT_RESULT, val,
  102. (val & mask) == reset_val, 100, 10000);
  103. if (ret)
  104. dev_err(gmu->dev, "GMU firmware initialization timed out\n");
  105. return ret;
  106. }
  107. static int a6xx_gmu_hfi_start(struct a6xx_gmu *gmu)
  108. {
  109. u32 val;
  110. int ret;
  111. gmu_rmw(gmu, REG_A6XX_GMU_GMU2HOST_INTR_MASK,
  112. A6XX_GMU_GMU2HOST_INTR_INFO_MSGQ, 0);
  113. gmu_write(gmu, REG_A6XX_GMU_HFI_CTRL_INIT, 1);
  114. ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_HFI_CTRL_STATUS, val,
  115. val & 1, 100, 10000);
  116. if (ret)
  117. dev_err(gmu->dev, "Unable to start the HFI queues\n");
  118. return ret;
  119. }
  120. /* Trigger a OOB (out of band) request to the GMU */
  121. int a6xx_gmu_set_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state)
  122. {
  123. int ret;
  124. u32 val;
  125. int request, ack;
  126. const char *name;
  127. switch (state) {
  128. case GMU_OOB_GPU_SET:
  129. request = GMU_OOB_GPU_SET_REQUEST;
  130. ack = GMU_OOB_GPU_SET_ACK;
  131. name = "GPU_SET";
  132. break;
  133. case GMU_OOB_BOOT_SLUMBER:
  134. request = GMU_OOB_BOOT_SLUMBER_REQUEST;
  135. ack = GMU_OOB_BOOT_SLUMBER_ACK;
  136. name = "BOOT_SLUMBER";
  137. break;
  138. case GMU_OOB_DCVS_SET:
  139. request = GMU_OOB_DCVS_REQUEST;
  140. ack = GMU_OOB_DCVS_ACK;
  141. name = "GPU_DCVS";
  142. break;
  143. default:
  144. return -EINVAL;
  145. }
  146. /* Trigger the equested OOB operation */
  147. gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET, 1 << request);
  148. /* Wait for the acknowledge interrupt */
  149. ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO, val,
  150. val & (1 << ack), 100, 10000);
  151. if (ret)
  152. dev_err(gmu->dev,
  153. "Timeout waiting for GMU OOB set %s: 0x%x\n",
  154. name,
  155. gmu_read(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO));
  156. /* Clear the acknowledge interrupt */
  157. gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_CLR, 1 << ack);
  158. return ret;
  159. }
  160. /* Clear a pending OOB state in the GMU */
  161. void a6xx_gmu_clear_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state)
  162. {
  163. switch (state) {
  164. case GMU_OOB_GPU_SET:
  165. gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET,
  166. 1 << GMU_OOB_GPU_SET_CLEAR);
  167. break;
  168. case GMU_OOB_BOOT_SLUMBER:
  169. gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET,
  170. 1 << GMU_OOB_BOOT_SLUMBER_CLEAR);
  171. break;
  172. case GMU_OOB_DCVS_SET:
  173. gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET,
  174. 1 << GMU_OOB_DCVS_CLEAR);
  175. break;
  176. }
  177. }
  178. /* Enable CPU control of SPTP power power collapse */
  179. static int a6xx_sptprac_enable(struct a6xx_gmu *gmu)
  180. {
  181. int ret;
  182. u32 val;
  183. gmu_write(gmu, REG_A6XX_GMU_GX_SPTPRAC_POWER_CONTROL, 0x778000);
  184. ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS, val,
  185. (val & 0x38) == 0x28, 1, 100);
  186. if (ret) {
  187. dev_err(gmu->dev, "Unable to power on SPTPRAC: 0x%x\n",
  188. gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS));
  189. }
  190. return 0;
  191. }
  192. /* Disable CPU control of SPTP power power collapse */
  193. static void a6xx_sptprac_disable(struct a6xx_gmu *gmu)
  194. {
  195. u32 val;
  196. int ret;
  197. /* Make sure retention is on */
  198. gmu_rmw(gmu, REG_A6XX_GPU_CC_GX_GDSCR, 0, (1 << 11));
  199. gmu_write(gmu, REG_A6XX_GMU_GX_SPTPRAC_POWER_CONTROL, 0x778001);
  200. ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS, val,
  201. (val & 0x04), 100, 10000);
  202. if (ret)
  203. dev_err(gmu->dev, "failed to power off SPTPRAC: 0x%x\n",
  204. gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS));
  205. }
  206. /* Let the GMU know we are starting a boot sequence */
  207. static int a6xx_gmu_gfx_rail_on(struct a6xx_gmu *gmu)
  208. {
  209. u32 vote;
  210. /* Let the GMU know we are getting ready for boot */
  211. gmu_write(gmu, REG_A6XX_GMU_BOOT_SLUMBER_OPTION, 0);
  212. /* Choose the "default" power level as the highest available */
  213. vote = gmu->gx_arc_votes[gmu->nr_gpu_freqs - 1];
  214. gmu_write(gmu, REG_A6XX_GMU_GX_VOTE_IDX, vote & 0xff);
  215. gmu_write(gmu, REG_A6XX_GMU_MX_VOTE_IDX, (vote >> 8) & 0xff);
  216. /* Let the GMU know the boot sequence has started */
  217. return a6xx_gmu_set_oob(gmu, GMU_OOB_BOOT_SLUMBER);
  218. }
  219. /* Let the GMU know that we are about to go into slumber */
  220. static int a6xx_gmu_notify_slumber(struct a6xx_gmu *gmu)
  221. {
  222. int ret;
  223. /* Disable the power counter so the GMU isn't busy */
  224. gmu_write(gmu, REG_A6XX_GMU_CX_GMU_POWER_COUNTER_ENABLE, 0);
  225. /* Disable SPTP_PC if the CPU is responsible for it */
  226. if (gmu->idle_level < GMU_IDLE_STATE_SPTP)
  227. a6xx_sptprac_disable(gmu);
  228. /* Tell the GMU to get ready to slumber */
  229. gmu_write(gmu, REG_A6XX_GMU_BOOT_SLUMBER_OPTION, 1);
  230. ret = a6xx_gmu_set_oob(gmu, GMU_OOB_BOOT_SLUMBER);
  231. a6xx_gmu_clear_oob(gmu, GMU_OOB_BOOT_SLUMBER);
  232. if (!ret) {
  233. /* Check to see if the GMU really did slumber */
  234. if (gmu_read(gmu, REG_A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE)
  235. != 0x0f) {
  236. dev_err(gmu->dev, "The GMU did not go into slumber\n");
  237. ret = -ETIMEDOUT;
  238. }
  239. }
  240. /* Put fence into allow mode */
  241. gmu_write(gmu, REG_A6XX_GMU_AO_AHB_FENCE_CTRL, 0);
  242. return ret;
  243. }
  244. static int a6xx_rpmh_start(struct a6xx_gmu *gmu)
  245. {
  246. int ret;
  247. u32 val;
  248. gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 1 << 1);
  249. /* Wait for the register to finish posting */
  250. wmb();
  251. ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_RSCC_CONTROL_ACK, val,
  252. val & (1 << 1), 100, 10000);
  253. if (ret) {
  254. dev_err(gmu->dev, "Unable to power on the GPU RSC\n");
  255. return ret;
  256. }
  257. ret = gmu_poll_timeout(gmu, REG_A6XX_RSCC_SEQ_BUSY_DRV0, val,
  258. !val, 100, 10000);
  259. if (!ret) {
  260. gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 0);
  261. /* Re-enable the power counter */
  262. gmu_write(gmu, REG_A6XX_GMU_CX_GMU_POWER_COUNTER_ENABLE, 1);
  263. return 0;
  264. }
  265. dev_err(gmu->dev, "GPU RSC sequence stuck while waking up the GPU\n");
  266. return ret;
  267. }
  268. static void a6xx_rpmh_stop(struct a6xx_gmu *gmu)
  269. {
  270. int ret;
  271. u32 val;
  272. gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 1);
  273. ret = gmu_poll_timeout(gmu, REG_A6XX_GPU_RSCC_RSC_STATUS0_DRV0,
  274. val, val & (1 << 16), 100, 10000);
  275. if (ret)
  276. dev_err(gmu->dev, "Unable to power off the GPU RSC\n");
  277. gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 0);
  278. }
  279. static void a6xx_gmu_rpmh_init(struct a6xx_gmu *gmu)
  280. {
  281. /* Disable SDE clock gating */
  282. gmu_write(gmu, REG_A6XX_GPU_RSCC_RSC_STATUS0_DRV0, BIT(24));
  283. /* Setup RSC PDC handshake for sleep and wakeup */
  284. gmu_write(gmu, REG_A6XX_RSCC_PDC_SLAVE_ID_DRV0, 1);
  285. gmu_write(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA, 0);
  286. gmu_write(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR, 0);
  287. gmu_write(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA + 2, 0);
  288. gmu_write(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR + 2, 0);
  289. gmu_write(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA + 4, 0x80000000);
  290. gmu_write(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR + 4, 0);
  291. gmu_write(gmu, REG_A6XX_RSCC_OVERRIDE_START_ADDR, 0);
  292. gmu_write(gmu, REG_A6XX_RSCC_PDC_SEQ_START_ADDR, 0x4520);
  293. gmu_write(gmu, REG_A6XX_RSCC_PDC_MATCH_VALUE_LO, 0x4510);
  294. gmu_write(gmu, REG_A6XX_RSCC_PDC_MATCH_VALUE_HI, 0x4514);
  295. /* Load RSC sequencer uCode for sleep and wakeup */
  296. gmu_write(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0, 0xa7a506a0);
  297. gmu_write(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 1, 0xa1e6a6e7);
  298. gmu_write(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 2, 0xa2e081e1);
  299. gmu_write(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 3, 0xe9a982e2);
  300. gmu_write(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 4, 0x0020e8a8);
  301. /* Load PDC sequencer uCode for power up and power down sequence */
  302. pdc_write(gmu, REG_A6XX_PDC_GPU_SEQ_MEM_0, 0xfebea1e1);
  303. pdc_write(gmu, REG_A6XX_PDC_GPU_SEQ_MEM_0 + 1, 0xa5a4a3a2);
  304. pdc_write(gmu, REG_A6XX_PDC_GPU_SEQ_MEM_0 + 2, 0x8382a6e0);
  305. pdc_write(gmu, REG_A6XX_PDC_GPU_SEQ_MEM_0 + 3, 0xbce3e284);
  306. pdc_write(gmu, REG_A6XX_PDC_GPU_SEQ_MEM_0 + 4, 0x002081fc);
  307. /* Set TCS commands used by PDC sequence for low power modes */
  308. pdc_write(gmu, REG_A6XX_PDC_GPU_TCS1_CMD_ENABLE_BANK, 7);
  309. pdc_write(gmu, REG_A6XX_PDC_GPU_TCS1_CMD_WAIT_FOR_CMPL_BANK, 0);
  310. pdc_write(gmu, REG_A6XX_PDC_GPU_TCS1_CONTROL, 0);
  311. pdc_write(gmu, REG_A6XX_PDC_GPU_TCS1_CMD0_MSGID, 0x10108);
  312. pdc_write(gmu, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR, 0x30010);
  313. pdc_write(gmu, REG_A6XX_PDC_GPU_TCS1_CMD0_DATA, 1);
  314. pdc_write(gmu, REG_A6XX_PDC_GPU_TCS1_CMD0_MSGID + 4, 0x10108);
  315. pdc_write(gmu, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR + 4, 0x30000);
  316. pdc_write(gmu, REG_A6XX_PDC_GPU_TCS1_CMD0_DATA + 4, 0x0);
  317. pdc_write(gmu, REG_A6XX_PDC_GPU_TCS1_CMD0_MSGID + 8, 0x10108);
  318. pdc_write(gmu, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR + 8, 0x30080);
  319. pdc_write(gmu, REG_A6XX_PDC_GPU_TCS1_CMD0_DATA + 8, 0x0);
  320. pdc_write(gmu, REG_A6XX_PDC_GPU_TCS3_CMD_ENABLE_BANK, 7);
  321. pdc_write(gmu, REG_A6XX_PDC_GPU_TCS3_CMD_WAIT_FOR_CMPL_BANK, 0);
  322. pdc_write(gmu, REG_A6XX_PDC_GPU_TCS3_CONTROL, 0);
  323. pdc_write(gmu, REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID, 0x10108);
  324. pdc_write(gmu, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR, 0x30010);
  325. pdc_write(gmu, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA, 2);
  326. pdc_write(gmu, REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID + 4, 0x10108);
  327. pdc_write(gmu, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR + 4, 0x30000);
  328. pdc_write(gmu, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA + 4, 0x3);
  329. pdc_write(gmu, REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID + 8, 0x10108);
  330. pdc_write(gmu, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR + 8, 0x30080);
  331. pdc_write(gmu, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA + 8, 0x3);
  332. /* Setup GPU PDC */
  333. pdc_write(gmu, REG_A6XX_PDC_GPU_SEQ_START_ADDR, 0);
  334. pdc_write(gmu, REG_A6XX_PDC_GPU_ENABLE_PDC, 0x80000001);
  335. /* ensure no writes happen before the uCode is fully written */
  336. wmb();
  337. }
  338. /*
  339. * The lowest 16 bits of this value are the number of XO clock cycles for main
  340. * hysteresis which is set at 0x1680 cycles (300 us). The higher 16 bits are
  341. * for the shorter hysteresis that happens after main - this is 0xa (.5 us)
  342. */
  343. #define GMU_PWR_COL_HYST 0x000a1680
  344. /* Set up the idle state for the GMU */
  345. static void a6xx_gmu_power_config(struct a6xx_gmu *gmu)
  346. {
  347. /* Disable GMU WB/RB buffer */
  348. gmu_write(gmu, REG_A6XX_GMU_SYS_BUS_CONFIG, 0x1);
  349. gmu_write(gmu, REG_A6XX_GMU_PWR_COL_INTER_FRAME_CTRL, 0x9c40400);
  350. switch (gmu->idle_level) {
  351. case GMU_IDLE_STATE_IFPC:
  352. gmu_write(gmu, REG_A6XX_GMU_PWR_COL_INTER_FRAME_HYST,
  353. GMU_PWR_COL_HYST);
  354. gmu_rmw(gmu, REG_A6XX_GMU_PWR_COL_INTER_FRAME_CTRL, 0,
  355. A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_IFPC_ENABLE |
  356. A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_HM_POWER_COLLAPSE_ENABLE);
  357. /* Fall through */
  358. case GMU_IDLE_STATE_SPTP:
  359. gmu_write(gmu, REG_A6XX_GMU_PWR_COL_SPTPRAC_HYST,
  360. GMU_PWR_COL_HYST);
  361. gmu_rmw(gmu, REG_A6XX_GMU_PWR_COL_INTER_FRAME_CTRL, 0,
  362. A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_IFPC_ENABLE |
  363. A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_SPTPRAC_POWER_CONTROL_ENABLE);
  364. }
  365. /* Enable RPMh GPU client */
  366. gmu_rmw(gmu, REG_A6XX_GMU_RPMH_CTRL, 0,
  367. A6XX_GMU_RPMH_CTRL_RPMH_INTERFACE_ENABLE |
  368. A6XX_GMU_RPMH_CTRL_LLC_VOTE_ENABLE |
  369. A6XX_GMU_RPMH_CTRL_DDR_VOTE_ENABLE |
  370. A6XX_GMU_RPMH_CTRL_MX_VOTE_ENABLE |
  371. A6XX_GMU_RPMH_CTRL_CX_VOTE_ENABLE |
  372. A6XX_GMU_RPMH_CTRL_GFX_VOTE_ENABLE);
  373. }
  374. static int a6xx_gmu_fw_start(struct a6xx_gmu *gmu, unsigned int state)
  375. {
  376. static bool rpmh_init;
  377. struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
  378. struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
  379. int i, ret;
  380. u32 chipid;
  381. u32 *image;
  382. if (state == GMU_WARM_BOOT) {
  383. ret = a6xx_rpmh_start(gmu);
  384. if (ret)
  385. return ret;
  386. } else {
  387. if (WARN(!adreno_gpu->fw[ADRENO_FW_GMU],
  388. "GMU firmware is not loaded\n"))
  389. return -ENOENT;
  390. /* Sanity check the size of the firmware that was loaded */
  391. if (adreno_gpu->fw[ADRENO_FW_GMU]->size > 0x8000) {
  392. dev_err(gmu->dev,
  393. "GMU firmware is bigger than the available region\n");
  394. return -EINVAL;
  395. }
  396. /* Turn on register retention */
  397. gmu_write(gmu, REG_A6XX_GMU_GENERAL_7, 1);
  398. /* We only need to load the RPMh microcode once */
  399. if (!rpmh_init) {
  400. a6xx_gmu_rpmh_init(gmu);
  401. rpmh_init = true;
  402. } else if (state != GMU_RESET) {
  403. ret = a6xx_rpmh_start(gmu);
  404. if (ret)
  405. return ret;
  406. }
  407. image = (u32 *) adreno_gpu->fw[ADRENO_FW_GMU]->data;
  408. for (i = 0; i < adreno_gpu->fw[ADRENO_FW_GMU]->size >> 2; i++)
  409. gmu_write(gmu, REG_A6XX_GMU_CM3_ITCM_START + i,
  410. image[i]);
  411. }
  412. gmu_write(gmu, REG_A6XX_GMU_CM3_FW_INIT_RESULT, 0);
  413. gmu_write(gmu, REG_A6XX_GMU_CM3_BOOT_CONFIG, 0x02);
  414. /* Write the iova of the HFI table */
  415. gmu_write(gmu, REG_A6XX_GMU_HFI_QTBL_ADDR, gmu->hfi->iova);
  416. gmu_write(gmu, REG_A6XX_GMU_HFI_QTBL_INFO, 1);
  417. gmu_write(gmu, REG_A6XX_GMU_AHB_FENCE_RANGE_0,
  418. (1 << 31) | (0xa << 18) | (0xa0));
  419. chipid = adreno_gpu->rev.core << 24;
  420. chipid |= adreno_gpu->rev.major << 16;
  421. chipid |= adreno_gpu->rev.minor << 12;
  422. chipid |= adreno_gpu->rev.patchid << 8;
  423. gmu_write(gmu, REG_A6XX_GMU_HFI_SFR_ADDR, chipid);
  424. /* Set up the lowest idle level on the GMU */
  425. a6xx_gmu_power_config(gmu);
  426. ret = a6xx_gmu_start(gmu);
  427. if (ret)
  428. return ret;
  429. ret = a6xx_gmu_gfx_rail_on(gmu);
  430. if (ret)
  431. return ret;
  432. /* Enable SPTP_PC if the CPU is responsible for it */
  433. if (gmu->idle_level < GMU_IDLE_STATE_SPTP) {
  434. ret = a6xx_sptprac_enable(gmu);
  435. if (ret)
  436. return ret;
  437. }
  438. ret = a6xx_gmu_hfi_start(gmu);
  439. if (ret)
  440. return ret;
  441. /* FIXME: Do we need this wmb() here? */
  442. wmb();
  443. return 0;
  444. }
  445. #define A6XX_HFI_IRQ_MASK \
  446. (A6XX_GMU_GMU2HOST_INTR_INFO_MSGQ | \
  447. A6XX_GMU_GMU2HOST_INTR_INFO_CM3_FAULT)
  448. #define A6XX_GMU_IRQ_MASK \
  449. (A6XX_GMU_AO_HOST_INTERRUPT_STATUS_WDOG_BITE | \
  450. A6XX_GMU_AO_HOST_INTERRUPT_STATUS_HOST_AHB_BUS_ERROR | \
  451. A6XX_GMU_AO_HOST_INTERRUPT_STATUS_FENCE_ERR)
  452. static void a6xx_gmu_irq_enable(struct a6xx_gmu *gmu)
  453. {
  454. gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_CLR, ~0);
  455. gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_CLR, ~0);
  456. gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_MASK,
  457. ~A6XX_GMU_IRQ_MASK);
  458. gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_MASK,
  459. ~A6XX_HFI_IRQ_MASK);
  460. enable_irq(gmu->gmu_irq);
  461. enable_irq(gmu->hfi_irq);
  462. }
  463. static void a6xx_gmu_irq_disable(struct a6xx_gmu *gmu)
  464. {
  465. disable_irq(gmu->gmu_irq);
  466. disable_irq(gmu->hfi_irq);
  467. gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_MASK, ~0);
  468. gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_MASK, ~0);
  469. }
  470. int a6xx_gmu_reset(struct a6xx_gpu *a6xx_gpu)
  471. {
  472. struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
  473. int ret;
  474. u32 val;
  475. /* Flush all the queues */
  476. a6xx_hfi_stop(gmu);
  477. /* Stop the interrupts */
  478. a6xx_gmu_irq_disable(gmu);
  479. /* Force off SPTP in case the GMU is managing it */
  480. a6xx_sptprac_disable(gmu);
  481. /* Make sure there are no outstanding RPMh votes */
  482. gmu_poll_timeout(gmu, REG_A6XX_RSCC_TCS0_DRV0_STATUS, val,
  483. (val & 1), 100, 10000);
  484. gmu_poll_timeout(gmu, REG_A6XX_RSCC_TCS1_DRV0_STATUS, val,
  485. (val & 1), 100, 10000);
  486. gmu_poll_timeout(gmu, REG_A6XX_RSCC_TCS2_DRV0_STATUS, val,
  487. (val & 1), 100, 10000);
  488. gmu_poll_timeout(gmu, REG_A6XX_RSCC_TCS3_DRV0_STATUS, val,
  489. (val & 1), 100, 1000);
  490. /* Force off the GX GSDC */
  491. regulator_force_disable(gmu->gx);
  492. /* Disable the resources */
  493. clk_bulk_disable_unprepare(gmu->nr_clocks, gmu->clocks);
  494. pm_runtime_put_sync(gmu->dev);
  495. /* Re-enable the resources */
  496. pm_runtime_get_sync(gmu->dev);
  497. /* Use a known rate to bring up the GMU */
  498. clk_set_rate(gmu->core_clk, 200000000);
  499. ret = clk_bulk_prepare_enable(gmu->nr_clocks, gmu->clocks);
  500. if (ret)
  501. goto out;
  502. a6xx_gmu_irq_enable(gmu);
  503. ret = a6xx_gmu_fw_start(gmu, GMU_RESET);
  504. if (!ret)
  505. ret = a6xx_hfi_start(gmu, GMU_COLD_BOOT);
  506. /* Set the GPU back to the highest power frequency */
  507. a6xx_gmu_set_freq(gmu, gmu->nr_gpu_freqs - 1);
  508. out:
  509. if (ret)
  510. a6xx_gmu_clear_oob(gmu, GMU_OOB_BOOT_SLUMBER);
  511. return ret;
  512. }
  513. int a6xx_gmu_resume(struct a6xx_gpu *a6xx_gpu)
  514. {
  515. struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
  516. int status, ret;
  517. if (WARN(!gmu->mmio, "The GMU is not set up yet\n"))
  518. return 0;
  519. /* Turn on the resources */
  520. pm_runtime_get_sync(gmu->dev);
  521. /* Use a known rate to bring up the GMU */
  522. clk_set_rate(gmu->core_clk, 200000000);
  523. ret = clk_bulk_prepare_enable(gmu->nr_clocks, gmu->clocks);
  524. if (ret)
  525. goto out;
  526. a6xx_gmu_irq_enable(gmu);
  527. /* Check to see if we are doing a cold or warm boot */
  528. status = gmu_read(gmu, REG_A6XX_GMU_GENERAL_7) == 1 ?
  529. GMU_WARM_BOOT : GMU_COLD_BOOT;
  530. ret = a6xx_gmu_fw_start(gmu, status);
  531. if (ret)
  532. goto out;
  533. ret = a6xx_hfi_start(gmu, status);
  534. /* Set the GPU to the highest power frequency */
  535. a6xx_gmu_set_freq(gmu, gmu->nr_gpu_freqs - 1);
  536. out:
  537. /* Make sure to turn off the boot OOB request on error */
  538. if (ret)
  539. a6xx_gmu_clear_oob(gmu, GMU_OOB_BOOT_SLUMBER);
  540. return ret;
  541. }
  542. bool a6xx_gmu_isidle(struct a6xx_gmu *gmu)
  543. {
  544. u32 reg;
  545. if (!gmu->mmio)
  546. return true;
  547. reg = gmu_read(gmu, REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS);
  548. if (reg & A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS_GPUBUSYIGNAHB)
  549. return false;
  550. return true;
  551. }
  552. int a6xx_gmu_stop(struct a6xx_gpu *a6xx_gpu)
  553. {
  554. struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
  555. u32 val;
  556. /*
  557. * The GMU may still be in slumber unless the GPU started so check and
  558. * skip putting it back into slumber if so
  559. */
  560. val = gmu_read(gmu, REG_A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE);
  561. if (val != 0xf) {
  562. int ret = a6xx_gmu_wait_for_idle(a6xx_gpu);
  563. /* Temporary until we can recover safely */
  564. BUG_ON(ret);
  565. /* tell the GMU we want to slumber */
  566. a6xx_gmu_notify_slumber(gmu);
  567. ret = gmu_poll_timeout(gmu,
  568. REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS, val,
  569. !(val & A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS_GPUBUSYIGNAHB),
  570. 100, 10000);
  571. /*
  572. * Let the user know we failed to slumber but don't worry too
  573. * much because we are powering down anyway
  574. */
  575. if (ret)
  576. dev_err(gmu->dev,
  577. "Unable to slumber GMU: status = 0%x/0%x\n",
  578. gmu_read(gmu,
  579. REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS),
  580. gmu_read(gmu,
  581. REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS2));
  582. }
  583. /* Turn off HFI */
  584. a6xx_hfi_stop(gmu);
  585. /* Stop the interrupts and mask the hardware */
  586. a6xx_gmu_irq_disable(gmu);
  587. /* Tell RPMh to power off the GPU */
  588. a6xx_rpmh_stop(gmu);
  589. clk_bulk_disable_unprepare(gmu->nr_clocks, gmu->clocks);
  590. pm_runtime_put_sync(gmu->dev);
  591. return 0;
  592. }
  593. static void a6xx_gmu_memory_free(struct a6xx_gmu *gmu, struct a6xx_gmu_bo *bo)
  594. {
  595. int count, i;
  596. u64 iova;
  597. if (IS_ERR_OR_NULL(bo))
  598. return;
  599. count = bo->size >> PAGE_SHIFT;
  600. iova = bo->iova;
  601. for (i = 0; i < count; i++, iova += PAGE_SIZE) {
  602. iommu_unmap(gmu->domain, iova, PAGE_SIZE);
  603. __free_pages(bo->pages[i], 0);
  604. }
  605. kfree(bo->pages);
  606. kfree(bo);
  607. }
  608. static struct a6xx_gmu_bo *a6xx_gmu_memory_alloc(struct a6xx_gmu *gmu,
  609. size_t size)
  610. {
  611. struct a6xx_gmu_bo *bo;
  612. int ret, count, i;
  613. bo = kzalloc(sizeof(*bo), GFP_KERNEL);
  614. if (!bo)
  615. return ERR_PTR(-ENOMEM);
  616. bo->size = PAGE_ALIGN(size);
  617. count = bo->size >> PAGE_SHIFT;
  618. bo->pages = kcalloc(count, sizeof(struct page *), GFP_KERNEL);
  619. if (!bo->pages) {
  620. kfree(bo);
  621. return ERR_PTR(-ENOMEM);
  622. }
  623. for (i = 0; i < count; i++) {
  624. bo->pages[i] = alloc_page(GFP_KERNEL);
  625. if (!bo->pages[i])
  626. goto err;
  627. }
  628. bo->iova = gmu->uncached_iova_base;
  629. for (i = 0; i < count; i++) {
  630. ret = iommu_map(gmu->domain,
  631. bo->iova + (PAGE_SIZE * i),
  632. page_to_phys(bo->pages[i]), PAGE_SIZE,
  633. IOMMU_READ | IOMMU_WRITE);
  634. if (ret) {
  635. dev_err(gmu->dev, "Unable to map GMU buffer object\n");
  636. for (i = i - 1 ; i >= 0; i--)
  637. iommu_unmap(gmu->domain,
  638. bo->iova + (PAGE_SIZE * i),
  639. PAGE_SIZE);
  640. goto err;
  641. }
  642. }
  643. bo->virt = vmap(bo->pages, count, VM_IOREMAP,
  644. pgprot_writecombine(PAGE_KERNEL));
  645. if (!bo->virt)
  646. goto err;
  647. /* Align future IOVA addresses on 1MB boundaries */
  648. gmu->uncached_iova_base += ALIGN(size, SZ_1M);
  649. return bo;
  650. err:
  651. for (i = 0; i < count; i++) {
  652. if (bo->pages[i])
  653. __free_pages(bo->pages[i], 0);
  654. }
  655. kfree(bo->pages);
  656. kfree(bo);
  657. return ERR_PTR(-ENOMEM);
  658. }
  659. static int a6xx_gmu_memory_probe(struct a6xx_gmu *gmu)
  660. {
  661. int ret;
  662. /*
  663. * The GMU address space is hardcoded to treat the range
  664. * 0x60000000 - 0x80000000 as un-cached memory. All buffers shared
  665. * between the GMU and the CPU will live in this space
  666. */
  667. gmu->uncached_iova_base = 0x60000000;
  668. gmu->domain = iommu_domain_alloc(&platform_bus_type);
  669. if (!gmu->domain)
  670. return -ENODEV;
  671. ret = iommu_attach_device(gmu->domain, gmu->dev);
  672. if (ret) {
  673. iommu_domain_free(gmu->domain);
  674. gmu->domain = NULL;
  675. }
  676. return ret;
  677. }
  678. /* Get the list of RPMh voltage levels from cmd-db */
  679. static int a6xx_gmu_rpmh_arc_cmds(const char *id, void *vals, int size)
  680. {
  681. u32 len = cmd_db_read_aux_data_len(id);
  682. if (!len)
  683. return 0;
  684. if (WARN_ON(len > size))
  685. return -EINVAL;
  686. cmd_db_read_aux_data(id, vals, len);
  687. /*
  688. * The data comes back as an array of unsigned shorts so adjust the
  689. * count accordingly
  690. */
  691. return len >> 1;
  692. }
  693. /* Return the 'arc-level' for the given frequency */
  694. static u32 a6xx_gmu_get_arc_level(struct device *dev, unsigned long freq)
  695. {
  696. struct dev_pm_opp *opp;
  697. struct device_node *np;
  698. u32 val = 0;
  699. if (!freq)
  700. return 0;
  701. opp = dev_pm_opp_find_freq_exact(dev, freq, true);
  702. if (IS_ERR(opp))
  703. return 0;
  704. np = dev_pm_opp_get_of_node(opp);
  705. if (np) {
  706. of_property_read_u32(np, "opp-level", &val);
  707. of_node_put(np);
  708. }
  709. dev_pm_opp_put(opp);
  710. return val;
  711. }
  712. static int a6xx_gmu_rpmh_arc_votes_init(struct device *dev, u32 *votes,
  713. unsigned long *freqs, int freqs_count,
  714. u16 *pri, int pri_count,
  715. u16 *sec, int sec_count)
  716. {
  717. int i, j;
  718. /* Construct a vote for each frequency */
  719. for (i = 0; i < freqs_count; i++) {
  720. u8 pindex = 0, sindex = 0;
  721. u32 level = a6xx_gmu_get_arc_level(dev, freqs[i]);
  722. /* Get the primary index that matches the arc level */
  723. for (j = 0; j < pri_count; j++) {
  724. if (pri[j] >= level) {
  725. pindex = j;
  726. break;
  727. }
  728. }
  729. if (j == pri_count) {
  730. dev_err(dev,
  731. "Level %u not found in in the RPMh list\n",
  732. level);
  733. dev_err(dev, "Available levels:\n");
  734. for (j = 0; j < pri_count; j++)
  735. dev_err(dev, " %u\n", pri[j]);
  736. return -EINVAL;
  737. }
  738. /*
  739. * Look for a level in in the secondary list that matches. If
  740. * nothing fits, use the maximum non zero vote
  741. */
  742. for (j = 0; j < sec_count; j++) {
  743. if (sec[j] >= level) {
  744. sindex = j;
  745. break;
  746. } else if (sec[j]) {
  747. sindex = j;
  748. }
  749. }
  750. /* Construct the vote */
  751. votes[i] = ((pri[pindex] & 0xffff) << 16) |
  752. (sindex << 8) | pindex;
  753. }
  754. return 0;
  755. }
  756. /*
  757. * The GMU votes with the RPMh for itself and on behalf of the GPU but we need
  758. * to construct the list of votes on the CPU and send it over. Query the RPMh
  759. * voltage levels and build the votes
  760. */
  761. static int a6xx_gmu_rpmh_votes_init(struct a6xx_gmu *gmu)
  762. {
  763. struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
  764. struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
  765. struct msm_gpu *gpu = &adreno_gpu->base;
  766. u16 gx[16], cx[16], mx[16];
  767. u32 gxcount, cxcount, mxcount;
  768. int ret;
  769. /* Get the list of available voltage levels for each component */
  770. gxcount = a6xx_gmu_rpmh_arc_cmds("gfx.lvl", gx, sizeof(gx));
  771. cxcount = a6xx_gmu_rpmh_arc_cmds("cx.lvl", cx, sizeof(cx));
  772. mxcount = a6xx_gmu_rpmh_arc_cmds("mx.lvl", mx, sizeof(mx));
  773. /* Build the GX votes */
  774. ret = a6xx_gmu_rpmh_arc_votes_init(&gpu->pdev->dev, gmu->gx_arc_votes,
  775. gmu->gpu_freqs, gmu->nr_gpu_freqs,
  776. gx, gxcount, mx, mxcount);
  777. /* Build the CX votes */
  778. ret |= a6xx_gmu_rpmh_arc_votes_init(gmu->dev, gmu->cx_arc_votes,
  779. gmu->gmu_freqs, gmu->nr_gmu_freqs,
  780. cx, cxcount, mx, mxcount);
  781. return ret;
  782. }
  783. static int a6xx_gmu_build_freq_table(struct device *dev, unsigned long *freqs,
  784. u32 size)
  785. {
  786. int count = dev_pm_opp_get_opp_count(dev);
  787. struct dev_pm_opp *opp;
  788. int i, index = 0;
  789. unsigned long freq = 1;
  790. /*
  791. * The OPP table doesn't contain the "off" frequency level so we need to
  792. * add 1 to the table size to account for it
  793. */
  794. if (WARN(count + 1 > size,
  795. "The GMU frequency table is being truncated\n"))
  796. count = size - 1;
  797. /* Set the "off" frequency */
  798. freqs[index++] = 0;
  799. for (i = 0; i < count; i++) {
  800. opp = dev_pm_opp_find_freq_ceil(dev, &freq);
  801. if (IS_ERR(opp))
  802. break;
  803. dev_pm_opp_put(opp);
  804. freqs[index++] = freq++;
  805. }
  806. return index;
  807. }
  808. static int a6xx_gmu_pwrlevels_probe(struct a6xx_gmu *gmu)
  809. {
  810. struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
  811. struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
  812. struct msm_gpu *gpu = &adreno_gpu->base;
  813. int ret = 0;
  814. /*
  815. * The GMU handles its own frequency switching so build a list of
  816. * available frequencies to send during initialization
  817. */
  818. ret = dev_pm_opp_of_add_table(gmu->dev);
  819. if (ret) {
  820. dev_err(gmu->dev, "Unable to set the OPP table for the GMU\n");
  821. return ret;
  822. }
  823. gmu->nr_gmu_freqs = a6xx_gmu_build_freq_table(gmu->dev,
  824. gmu->gmu_freqs, ARRAY_SIZE(gmu->gmu_freqs));
  825. /*
  826. * The GMU also handles GPU frequency switching so build a list
  827. * from the GPU OPP table
  828. */
  829. gmu->nr_gpu_freqs = a6xx_gmu_build_freq_table(&gpu->pdev->dev,
  830. gmu->gpu_freqs, ARRAY_SIZE(gmu->gpu_freqs));
  831. /* Build the list of RPMh votes that we'll send to the GMU */
  832. return a6xx_gmu_rpmh_votes_init(gmu);
  833. }
  834. static int a6xx_gmu_clocks_probe(struct a6xx_gmu *gmu)
  835. {
  836. int ret = msm_clk_bulk_get(gmu->dev, &gmu->clocks);
  837. if (ret < 1)
  838. return ret;
  839. gmu->nr_clocks = ret;
  840. gmu->core_clk = msm_clk_bulk_get_clock(gmu->clocks,
  841. gmu->nr_clocks, "gmu");
  842. return 0;
  843. }
  844. static void __iomem *a6xx_gmu_get_mmio(struct platform_device *pdev,
  845. const char *name)
  846. {
  847. void __iomem *ret;
  848. struct resource *res = platform_get_resource_byname(pdev,
  849. IORESOURCE_MEM, name);
  850. if (!res) {
  851. dev_err(&pdev->dev, "Unable to find the %s registers\n", name);
  852. return ERR_PTR(-EINVAL);
  853. }
  854. ret = devm_ioremap(&pdev->dev, res->start, resource_size(res));
  855. if (!ret) {
  856. dev_err(&pdev->dev, "Unable to map the %s registers\n", name);
  857. return ERR_PTR(-EINVAL);
  858. }
  859. return ret;
  860. }
  861. static int a6xx_gmu_get_irq(struct a6xx_gmu *gmu, struct platform_device *pdev,
  862. const char *name, irq_handler_t handler)
  863. {
  864. int irq, ret;
  865. irq = platform_get_irq_byname(pdev, name);
  866. ret = devm_request_irq(&pdev->dev, irq, handler, IRQF_TRIGGER_HIGH,
  867. name, gmu);
  868. if (ret) {
  869. dev_err(&pdev->dev, "Unable to get interrupt %s\n", name);
  870. return ret;
  871. }
  872. disable_irq(irq);
  873. return irq;
  874. }
  875. void a6xx_gmu_remove(struct a6xx_gpu *a6xx_gpu)
  876. {
  877. struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
  878. if (IS_ERR_OR_NULL(gmu->mmio))
  879. return;
  880. pm_runtime_disable(gmu->dev);
  881. a6xx_gmu_stop(a6xx_gpu);
  882. a6xx_gmu_irq_disable(gmu);
  883. a6xx_gmu_memory_free(gmu, gmu->hfi);
  884. iommu_detach_device(gmu->domain, gmu->dev);
  885. iommu_domain_free(gmu->domain);
  886. }
  887. int a6xx_gmu_probe(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
  888. {
  889. struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
  890. struct platform_device *pdev = of_find_device_by_node(node);
  891. int ret;
  892. if (!pdev)
  893. return -ENODEV;
  894. gmu->dev = &pdev->dev;
  895. of_dma_configure(gmu->dev, node, true);
  896. /* Fow now, don't do anything fancy until we get our feet under us */
  897. gmu->idle_level = GMU_IDLE_STATE_ACTIVE;
  898. pm_runtime_enable(gmu->dev);
  899. gmu->gx = devm_regulator_get(gmu->dev, "vdd");
  900. /* Get the list of clocks */
  901. ret = a6xx_gmu_clocks_probe(gmu);
  902. if (ret)
  903. return ret;
  904. /* Set up the IOMMU context bank */
  905. ret = a6xx_gmu_memory_probe(gmu);
  906. if (ret)
  907. return ret;
  908. /* Allocate memory for for the HFI queues */
  909. gmu->hfi = a6xx_gmu_memory_alloc(gmu, SZ_16K);
  910. if (IS_ERR(gmu->hfi))
  911. goto err;
  912. /* Allocate memory for the GMU debug region */
  913. gmu->debug = a6xx_gmu_memory_alloc(gmu, SZ_16K);
  914. if (IS_ERR(gmu->debug))
  915. goto err;
  916. /* Map the GMU registers */
  917. gmu->mmio = a6xx_gmu_get_mmio(pdev, "gmu");
  918. /* Map the GPU power domain controller registers */
  919. gmu->pdc_mmio = a6xx_gmu_get_mmio(pdev, "gmu_pdc");
  920. if (IS_ERR(gmu->mmio) || IS_ERR(gmu->pdc_mmio))
  921. goto err;
  922. /* Get the HFI and GMU interrupts */
  923. gmu->hfi_irq = a6xx_gmu_get_irq(gmu, pdev, "hfi", a6xx_hfi_irq);
  924. gmu->gmu_irq = a6xx_gmu_get_irq(gmu, pdev, "gmu", a6xx_gmu_irq);
  925. if (gmu->hfi_irq < 0 || gmu->gmu_irq < 0)
  926. goto err;
  927. /* Set up a tasklet to handle GMU HFI responses */
  928. tasklet_init(&gmu->hfi_tasklet, a6xx_hfi_task, (unsigned long) gmu);
  929. /* Get the power levels for the GMU and GPU */
  930. a6xx_gmu_pwrlevels_probe(gmu);
  931. /* Set up the HFI queues */
  932. a6xx_hfi_init(gmu);
  933. return 0;
  934. err:
  935. a6xx_gmu_memory_free(gmu, gmu->hfi);
  936. if (gmu->domain) {
  937. iommu_detach_device(gmu->domain, gmu->dev);
  938. iommu_domain_free(gmu->domain);
  939. }
  940. return -ENODEV;
  941. }