ivpu_mmu.c 28 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (C) 2020-2024 Intel Corporation
  4. */
  5. #include <linux/circ_buf.h>
  6. #include <linux/highmem.h>
  7. #include "ivpu_drv.h"
  8. #include "ivpu_hw.h"
  9. #include "ivpu_hw_reg_io.h"
  10. #include "ivpu_mmu.h"
  11. #include "ivpu_mmu_context.h"
  12. #include "ivpu_pm.h"
  13. #define IVPU_MMU_REG_IDR0 0x00200000u
  14. #define IVPU_MMU_REG_IDR1 0x00200004u
  15. #define IVPU_MMU_REG_IDR3 0x0020000cu
  16. #define IVPU_MMU_REG_IDR5 0x00200014u
  17. #define IVPU_MMU_REG_CR0 0x00200020u
  18. #define IVPU_MMU_REG_CR0ACK 0x00200024u
  19. #define IVPU_MMU_REG_CR0ACK_VAL_MASK GENMASK(31, 0)
  20. #define IVPU_MMU_REG_CR1 0x00200028u
  21. #define IVPU_MMU_REG_CR2 0x0020002cu
  22. #define IVPU_MMU_REG_IRQ_CTRL 0x00200050u
  23. #define IVPU_MMU_REG_IRQ_CTRLACK 0x00200054u
  24. #define IVPU_MMU_REG_IRQ_CTRLACK_VAL_MASK GENMASK(31, 0)
  25. #define IVPU_MMU_REG_GERROR 0x00200060u
  26. #define IVPU_MMU_REG_GERROR_CMDQ_MASK BIT_MASK(0)
  27. #define IVPU_MMU_REG_GERROR_EVTQ_ABT_MASK BIT_MASK(2)
  28. #define IVPU_MMU_REG_GERROR_PRIQ_ABT_MASK BIT_MASK(3)
  29. #define IVPU_MMU_REG_GERROR_MSI_CMDQ_ABT_MASK BIT_MASK(4)
  30. #define IVPU_MMU_REG_GERROR_MSI_EVTQ_ABT_MASK BIT_MASK(5)
  31. #define IVPU_MMU_REG_GERROR_MSI_PRIQ_ABT_MASK BIT_MASK(6)
  32. #define IVPU_MMU_REG_GERROR_MSI_ABT_MASK BIT_MASK(7)
  33. #define IVPU_MMU_REG_GERRORN 0x00200064u
  34. #define IVPU_MMU_REG_STRTAB_BASE 0x00200080u
  35. #define IVPU_MMU_REG_STRTAB_BASE_CFG 0x00200088u
  36. #define IVPU_MMU_REG_CMDQ_BASE 0x00200090u
  37. #define IVPU_MMU_REG_CMDQ_PROD 0x00200098u
  38. #define IVPU_MMU_REG_CMDQ_CONS 0x0020009cu
  39. #define IVPU_MMU_REG_CMDQ_CONS_VAL_MASK GENMASK(23, 0)
  40. #define IVPU_MMU_REG_CMDQ_CONS_ERR_MASK GENMASK(30, 24)
  41. #define IVPU_MMU_REG_EVTQ_BASE 0x002000a0u
  42. #define IVPU_MMU_REG_EVTQ_PROD 0x002000a8u
  43. #define IVPU_MMU_REG_EVTQ_CONS 0x002000acu
  44. #define IVPU_MMU_REG_EVTQ_PROD_SEC (0x002000a8u + SZ_64K)
  45. #define IVPU_MMU_REG_EVTQ_CONS_SEC (0x002000acu + SZ_64K)
  46. #define IVPU_MMU_IDR0_REF 0x080f3e0f
  47. #define IVPU_MMU_IDR0_REF_SIMICS 0x080f3e1f
  48. #define IVPU_MMU_IDR1_REF 0x0e739d18
  49. #define IVPU_MMU_IDR3_REF 0x0000003c
  50. #define IVPU_MMU_IDR5_REF 0x00040070
  51. #define IVPU_MMU_IDR5_REF_SIMICS 0x00000075
  52. #define IVPU_MMU_IDR5_REF_FPGA 0x00800075
  53. #define IVPU_MMU_CDTAB_ENT_SIZE 64
  54. #define IVPU_MMU_CDTAB_ENT_COUNT_LOG2 8 /* 256 entries */
  55. #define IVPU_MMU_CDTAB_ENT_COUNT ((u32)1 << IVPU_MMU_CDTAB_ENT_COUNT_LOG2)
  56. #define IVPU_MMU_STREAM_ID0 0
  57. #define IVPU_MMU_STREAM_ID3 3
  58. #define IVPU_MMU_STRTAB_ENT_SIZE 64
  59. #define IVPU_MMU_STRTAB_ENT_COUNT 4
  60. #define IVPU_MMU_STRTAB_CFG_LOG2SIZE 2
  61. #define IVPU_MMU_STRTAB_CFG IVPU_MMU_STRTAB_CFG_LOG2SIZE
  62. #define IVPU_MMU_Q_COUNT_LOG2 4 /* 16 entries */
  63. #define IVPU_MMU_Q_COUNT ((u32)1 << IVPU_MMU_Q_COUNT_LOG2)
  64. #define IVPU_MMU_Q_WRAP_MASK GENMASK(IVPU_MMU_Q_COUNT_LOG2, 0)
  65. #define IVPU_MMU_Q_IDX_MASK (IVPU_MMU_Q_COUNT - 1)
  66. #define IVPU_MMU_Q_IDX(val) ((val) & IVPU_MMU_Q_IDX_MASK)
  67. #define IVPU_MMU_Q_WRP(val) ((val) & IVPU_MMU_Q_COUNT)
  68. #define IVPU_MMU_CMDQ_CMD_SIZE 16
  69. #define IVPU_MMU_CMDQ_SIZE (IVPU_MMU_Q_COUNT * IVPU_MMU_CMDQ_CMD_SIZE)
  70. #define IVPU_MMU_EVTQ_CMD_SIZE 32
  71. #define IVPU_MMU_EVTQ_SIZE (IVPU_MMU_Q_COUNT * IVPU_MMU_EVTQ_CMD_SIZE)
  72. #define IVPU_MMU_CMD_OPCODE GENMASK(7, 0)
  73. #define IVPU_MMU_CMD_SYNC_0_CS GENMASK(13, 12)
  74. #define IVPU_MMU_CMD_SYNC_0_MSH GENMASK(23, 22)
  75. #define IVPU_MMU_CMD_SYNC_0_MSI_ATTR GENMASK(27, 24)
  76. #define IVPU_MMU_CMD_SYNC_0_MSI_ATTR GENMASK(27, 24)
  77. #define IVPU_MMU_CMD_SYNC_0_MSI_DATA GENMASK(63, 32)
  78. #define IVPU_MMU_CMD_CFGI_0_SSEC BIT(10)
  79. #define IVPU_MMU_CMD_CFGI_0_SSV BIT(11)
  80. #define IVPU_MMU_CMD_CFGI_0_SSID GENMASK(31, 12)
  81. #define IVPU_MMU_CMD_CFGI_0_SID GENMASK(63, 32)
  82. #define IVPU_MMU_CMD_CFGI_1_RANGE GENMASK(4, 0)
  83. #define IVPU_MMU_CMD_TLBI_0_ASID GENMASK(63, 48)
  84. #define IVPU_MMU_CMD_TLBI_0_VMID GENMASK(47, 32)
  85. #define CMD_PREFETCH_CFG 0x1
  86. #define CMD_CFGI_STE 0x3
  87. #define CMD_CFGI_ALL 0x4
  88. #define CMD_CFGI_CD 0x5
  89. #define CMD_CFGI_CD_ALL 0x6
  90. #define CMD_TLBI_NH_ASID 0x11
  91. #define CMD_TLBI_EL2_ALL 0x20
  92. #define CMD_TLBI_NSNH_ALL 0x30
  93. #define CMD_SYNC 0x46
  94. #define IVPU_MMU_EVT_F_UUT 0x01
  95. #define IVPU_MMU_EVT_C_BAD_STREAMID 0x02
  96. #define IVPU_MMU_EVT_F_STE_FETCH 0x03
  97. #define IVPU_MMU_EVT_C_BAD_STE 0x04
  98. #define IVPU_MMU_EVT_F_BAD_ATS_TREQ 0x05
  99. #define IVPU_MMU_EVT_F_STREAM_DISABLED 0x06
  100. #define IVPU_MMU_EVT_F_TRANSL_FORBIDDEN 0x07
  101. #define IVPU_MMU_EVT_C_BAD_SUBSTREAMID 0x08
  102. #define IVPU_MMU_EVT_F_CD_FETCH 0x09
  103. #define IVPU_MMU_EVT_C_BAD_CD 0x0a
  104. #define IVPU_MMU_EVT_F_WALK_EABT 0x0b
  105. #define IVPU_MMU_EVT_F_TRANSLATION 0x10
  106. #define IVPU_MMU_EVT_F_ADDR_SIZE 0x11
  107. #define IVPU_MMU_EVT_F_ACCESS 0x12
  108. #define IVPU_MMU_EVT_F_PERMISSION 0x13
  109. #define IVPU_MMU_EVT_F_TLB_CONFLICT 0x20
  110. #define IVPU_MMU_EVT_F_CFG_CONFLICT 0x21
  111. #define IVPU_MMU_EVT_E_PAGE_REQUEST 0x24
  112. #define IVPU_MMU_EVT_F_VMS_FETCH 0x25
  113. #define IVPU_MMU_EVT_OP_MASK GENMASK_ULL(7, 0)
  114. #define IVPU_MMU_EVT_SSID_MASK GENMASK_ULL(31, 12)
  115. #define IVPU_MMU_Q_BASE_RWA BIT(62)
  116. #define IVPU_MMU_Q_BASE_ADDR_MASK GENMASK_ULL(51, 5)
  117. #define IVPU_MMU_STRTAB_BASE_RA BIT(62)
  118. #define IVPU_MMU_STRTAB_BASE_ADDR_MASK GENMASK_ULL(51, 6)
  119. #define IVPU_MMU_IRQ_EVTQ_EN BIT(2)
  120. #define IVPU_MMU_IRQ_GERROR_EN BIT(0)
  121. #define IVPU_MMU_CR0_ATSCHK BIT(4)
  122. #define IVPU_MMU_CR0_CMDQEN BIT(3)
  123. #define IVPU_MMU_CR0_EVTQEN BIT(2)
  124. #define IVPU_MMU_CR0_PRIQEN BIT(1)
  125. #define IVPU_MMU_CR0_SMMUEN BIT(0)
  126. #define IVPU_MMU_CR1_TABLE_SH GENMASK(11, 10)
  127. #define IVPU_MMU_CR1_TABLE_OC GENMASK(9, 8)
  128. #define IVPU_MMU_CR1_TABLE_IC GENMASK(7, 6)
  129. #define IVPU_MMU_CR1_QUEUE_SH GENMASK(5, 4)
  130. #define IVPU_MMU_CR1_QUEUE_OC GENMASK(3, 2)
  131. #define IVPU_MMU_CR1_QUEUE_IC GENMASK(1, 0)
  132. #define IVPU_MMU_CACHE_NC 0
  133. #define IVPU_MMU_CACHE_WB 1
  134. #define IVPU_MMU_CACHE_WT 2
  135. #define IVPU_MMU_SH_NSH 0
  136. #define IVPU_MMU_SH_OSH 2
  137. #define IVPU_MMU_SH_ISH 3
  138. #define IVPU_MMU_CMDQ_OP GENMASK_ULL(7, 0)
  139. #define IVPU_MMU_CD_0_TCR_T0SZ GENMASK_ULL(5, 0)
  140. #define IVPU_MMU_CD_0_TCR_TG0 GENMASK_ULL(7, 6)
  141. #define IVPU_MMU_CD_0_TCR_IRGN0 GENMASK_ULL(9, 8)
  142. #define IVPU_MMU_CD_0_TCR_ORGN0 GENMASK_ULL(11, 10)
  143. #define IVPU_MMU_CD_0_TCR_SH0 GENMASK_ULL(13, 12)
  144. #define IVPU_MMU_CD_0_TCR_EPD0 BIT_ULL(14)
  145. #define IVPU_MMU_CD_0_TCR_EPD1 BIT_ULL(30)
  146. #define IVPU_MMU_CD_0_ENDI BIT(15)
  147. #define IVPU_MMU_CD_0_V BIT(31)
  148. #define IVPU_MMU_CD_0_TCR_IPS GENMASK_ULL(34, 32)
  149. #define IVPU_MMU_CD_0_TCR_TBI0 BIT_ULL(38)
  150. #define IVPU_MMU_CD_0_AA64 BIT(41)
  151. #define IVPU_MMU_CD_0_S BIT(44)
  152. #define IVPU_MMU_CD_0_R BIT(45)
  153. #define IVPU_MMU_CD_0_A BIT(46)
  154. #define IVPU_MMU_CD_0_ASET BIT(47)
  155. #define IVPU_MMU_CD_0_ASID GENMASK_ULL(63, 48)
  156. #define IVPU_MMU_T0SZ_48BIT 16
  157. #define IVPU_MMU_T0SZ_38BIT 26
  158. #define IVPU_MMU_IPS_48BIT 5
  159. #define IVPU_MMU_IPS_44BIT 4
  160. #define IVPU_MMU_IPS_42BIT 3
  161. #define IVPU_MMU_IPS_40BIT 2
  162. #define IVPU_MMU_IPS_36BIT 1
  163. #define IVPU_MMU_IPS_32BIT 0
  164. #define IVPU_MMU_CD_1_TTB0_MASK GENMASK_ULL(51, 4)
  165. #define IVPU_MMU_STE_0_S1CDMAX GENMASK_ULL(63, 59)
  166. #define IVPU_MMU_STE_0_S1FMT GENMASK_ULL(5, 4)
  167. #define IVPU_MMU_STE_0_S1FMT_LINEAR 0
  168. #define IVPU_MMU_STE_DWORDS 8
  169. #define IVPU_MMU_STE_0_CFG_S1_TRANS 5
  170. #define IVPU_MMU_STE_0_CFG GENMASK_ULL(3, 1)
  171. #define IVPU_MMU_STE_0_S1CTXPTR_MASK GENMASK_ULL(51, 6)
  172. #define IVPU_MMU_STE_0_V BIT(0)
  173. #define IVPU_MMU_STE_1_STRW_NSEL1 0ul
  174. #define IVPU_MMU_STE_1_CONT GENMASK_ULL(16, 13)
  175. #define IVPU_MMU_STE_1_STRW GENMASK_ULL(31, 30)
  176. #define IVPU_MMU_STE_1_PRIVCFG GENMASK_ULL(49, 48)
  177. #define IVPU_MMU_STE_1_PRIVCFG_UNPRIV 2ul
  178. #define IVPU_MMU_STE_1_INSTCFG GENMASK_ULL(51, 50)
  179. #define IVPU_MMU_STE_1_INSTCFG_DATA 2ul
  180. #define IVPU_MMU_STE_1_MEV BIT(19)
  181. #define IVPU_MMU_STE_1_S1STALLD BIT(27)
  182. #define IVPU_MMU_STE_1_S1C_CACHE_NC 0ul
  183. #define IVPU_MMU_STE_1_S1C_CACHE_WBRA 1ul
  184. #define IVPU_MMU_STE_1_S1C_CACHE_WT 2ul
  185. #define IVPU_MMU_STE_1_S1C_CACHE_WB 3ul
  186. #define IVPU_MMU_STE_1_S1CIR GENMASK_ULL(3, 2)
  187. #define IVPU_MMU_STE_1_S1COR GENMASK_ULL(5, 4)
  188. #define IVPU_MMU_STE_1_S1CSH GENMASK_ULL(7, 6)
  189. #define IVPU_MMU_STE_1_S1DSS GENMASK_ULL(1, 0)
  190. #define IVPU_MMU_STE_1_S1DSS_TERMINATE 0x0
  191. #define IVPU_MMU_REG_TIMEOUT_US (10 * USEC_PER_MSEC)
  192. #define IVPU_MMU_QUEUE_TIMEOUT_US (100 * USEC_PER_MSEC)
  193. #define IVPU_MMU_GERROR_ERR_MASK ((REG_FLD(IVPU_MMU_REG_GERROR, CMDQ)) | \
  194. (REG_FLD(IVPU_MMU_REG_GERROR, EVTQ_ABT)) | \
  195. (REG_FLD(IVPU_MMU_REG_GERROR, PRIQ_ABT)) | \
  196. (REG_FLD(IVPU_MMU_REG_GERROR, MSI_CMDQ_ABT)) | \
  197. (REG_FLD(IVPU_MMU_REG_GERROR, MSI_EVTQ_ABT)) | \
  198. (REG_FLD(IVPU_MMU_REG_GERROR, MSI_PRIQ_ABT)) | \
  199. (REG_FLD(IVPU_MMU_REG_GERROR, MSI_ABT)))
  200. #define IVPU_MMU_CERROR_NONE 0x0
  201. #define IVPU_MMU_CERROR_ILL 0x1
  202. #define IVPU_MMU_CERROR_ABT 0x2
  203. #define IVPU_MMU_CERROR_ATC_INV_SYNC 0x3
  204. static const char *ivpu_mmu_event_to_str(u32 cmd)
  205. {
  206. switch (cmd) {
  207. case IVPU_MMU_EVT_F_UUT:
  208. return "Unsupported Upstream Transaction";
  209. case IVPU_MMU_EVT_C_BAD_STREAMID:
  210. return "Transaction StreamID out of range";
  211. case IVPU_MMU_EVT_F_STE_FETCH:
  212. return "Fetch of STE caused external abort";
  213. case IVPU_MMU_EVT_C_BAD_STE:
  214. return "Used STE invalid";
  215. case IVPU_MMU_EVT_F_BAD_ATS_TREQ:
  216. return "Address Request disallowed for a StreamID";
  217. case IVPU_MMU_EVT_F_STREAM_DISABLED:
  218. return "Transaction marks non-substream disabled";
  219. case IVPU_MMU_EVT_F_TRANSL_FORBIDDEN:
  220. return "MMU bypass is disallowed for this StreamID";
  221. case IVPU_MMU_EVT_C_BAD_SUBSTREAMID:
  222. return "Invalid StreamID";
  223. case IVPU_MMU_EVT_F_CD_FETCH:
  224. return "Fetch of CD caused external abort";
  225. case IVPU_MMU_EVT_C_BAD_CD:
  226. return "Fetched CD invalid";
  227. case IVPU_MMU_EVT_F_WALK_EABT:
  228. return " An external abort occurred fetching a TLB";
  229. case IVPU_MMU_EVT_F_TRANSLATION:
  230. return "Translation fault";
  231. case IVPU_MMU_EVT_F_ADDR_SIZE:
  232. return " Output address caused address size fault";
  233. case IVPU_MMU_EVT_F_ACCESS:
  234. return "Access flag fault";
  235. case IVPU_MMU_EVT_F_PERMISSION:
  236. return "Permission fault occurred on page access";
  237. case IVPU_MMU_EVT_F_TLB_CONFLICT:
  238. return "A TLB conflict";
  239. case IVPU_MMU_EVT_F_CFG_CONFLICT:
  240. return "A configuration cache conflict";
  241. case IVPU_MMU_EVT_E_PAGE_REQUEST:
  242. return "Page request hint from a client device";
  243. case IVPU_MMU_EVT_F_VMS_FETCH:
  244. return "Fetch of VMS caused external abort";
  245. default:
  246. return "Unknown event";
  247. }
  248. }
  249. static const char *ivpu_mmu_cmdq_err_to_str(u32 err)
  250. {
  251. switch (err) {
  252. case IVPU_MMU_CERROR_NONE:
  253. return "No error";
  254. case IVPU_MMU_CERROR_ILL:
  255. return "Illegal command";
  256. case IVPU_MMU_CERROR_ABT:
  257. return "External abort on command queue read";
  258. case IVPU_MMU_CERROR_ATC_INV_SYNC:
  259. return "Sync failed to complete ATS invalidation";
  260. default:
  261. return "Unknown error";
  262. }
  263. }
  264. static void ivpu_mmu_config_check(struct ivpu_device *vdev)
  265. {
  266. u32 val_ref;
  267. u32 val;
  268. if (ivpu_is_simics(vdev))
  269. val_ref = IVPU_MMU_IDR0_REF_SIMICS;
  270. else
  271. val_ref = IVPU_MMU_IDR0_REF;
  272. val = REGV_RD32(IVPU_MMU_REG_IDR0);
  273. if (val != val_ref)
  274. ivpu_dbg(vdev, MMU, "IDR0 0x%x != IDR0_REF 0x%x\n", val, val_ref);
  275. val = REGV_RD32(IVPU_MMU_REG_IDR1);
  276. if (val != IVPU_MMU_IDR1_REF)
  277. ivpu_dbg(vdev, MMU, "IDR1 0x%x != IDR1_REF 0x%x\n", val, IVPU_MMU_IDR1_REF);
  278. val = REGV_RD32(IVPU_MMU_REG_IDR3);
  279. if (val != IVPU_MMU_IDR3_REF)
  280. ivpu_dbg(vdev, MMU, "IDR3 0x%x != IDR3_REF 0x%x\n", val, IVPU_MMU_IDR3_REF);
  281. if (ivpu_is_simics(vdev))
  282. val_ref = IVPU_MMU_IDR5_REF_SIMICS;
  283. else if (ivpu_is_fpga(vdev))
  284. val_ref = IVPU_MMU_IDR5_REF_FPGA;
  285. else
  286. val_ref = IVPU_MMU_IDR5_REF;
  287. val = REGV_RD32(IVPU_MMU_REG_IDR5);
  288. if (val != val_ref)
  289. ivpu_dbg(vdev, MMU, "IDR5 0x%x != IDR5_REF 0x%x\n", val, val_ref);
  290. }
  291. static int ivpu_mmu_cdtab_alloc(struct ivpu_device *vdev)
  292. {
  293. struct ivpu_mmu_info *mmu = vdev->mmu;
  294. struct ivpu_mmu_cdtab *cdtab = &mmu->cdtab;
  295. size_t size = IVPU_MMU_CDTAB_ENT_COUNT * IVPU_MMU_CDTAB_ENT_SIZE;
  296. cdtab->base = dmam_alloc_coherent(vdev->drm.dev, size, &cdtab->dma, GFP_KERNEL);
  297. if (!cdtab->base)
  298. return -ENOMEM;
  299. ivpu_dbg(vdev, MMU, "CDTAB alloc: dma=%pad size=%zu\n", &cdtab->dma, size);
  300. return 0;
  301. }
  302. static int ivpu_mmu_strtab_alloc(struct ivpu_device *vdev)
  303. {
  304. struct ivpu_mmu_info *mmu = vdev->mmu;
  305. struct ivpu_mmu_strtab *strtab = &mmu->strtab;
  306. size_t size = IVPU_MMU_STRTAB_ENT_COUNT * IVPU_MMU_STRTAB_ENT_SIZE;
  307. strtab->base = dmam_alloc_coherent(vdev->drm.dev, size, &strtab->dma, GFP_KERNEL);
  308. if (!strtab->base)
  309. return -ENOMEM;
  310. strtab->base_cfg = IVPU_MMU_STRTAB_CFG;
  311. strtab->dma_q = IVPU_MMU_STRTAB_BASE_RA;
  312. strtab->dma_q |= strtab->dma & IVPU_MMU_STRTAB_BASE_ADDR_MASK;
  313. ivpu_dbg(vdev, MMU, "STRTAB alloc: dma=%pad dma_q=%pad size=%zu\n",
  314. &strtab->dma, &strtab->dma_q, size);
  315. return 0;
  316. }
  317. static int ivpu_mmu_cmdq_alloc(struct ivpu_device *vdev)
  318. {
  319. struct ivpu_mmu_info *mmu = vdev->mmu;
  320. struct ivpu_mmu_queue *q = &mmu->cmdq;
  321. q->base = dmam_alloc_coherent(vdev->drm.dev, IVPU_MMU_CMDQ_SIZE, &q->dma, GFP_KERNEL);
  322. if (!q->base)
  323. return -ENOMEM;
  324. q->dma_q = IVPU_MMU_Q_BASE_RWA;
  325. q->dma_q |= q->dma & IVPU_MMU_Q_BASE_ADDR_MASK;
  326. q->dma_q |= IVPU_MMU_Q_COUNT_LOG2;
  327. ivpu_dbg(vdev, MMU, "CMDQ alloc: dma=%pad dma_q=%pad size=%u\n",
  328. &q->dma, &q->dma_q, IVPU_MMU_CMDQ_SIZE);
  329. return 0;
  330. }
  331. static int ivpu_mmu_evtq_alloc(struct ivpu_device *vdev)
  332. {
  333. struct ivpu_mmu_info *mmu = vdev->mmu;
  334. struct ivpu_mmu_queue *q = &mmu->evtq;
  335. q->base = dmam_alloc_coherent(vdev->drm.dev, IVPU_MMU_EVTQ_SIZE, &q->dma, GFP_KERNEL);
  336. if (!q->base)
  337. return -ENOMEM;
  338. q->dma_q = IVPU_MMU_Q_BASE_RWA;
  339. q->dma_q |= q->dma & IVPU_MMU_Q_BASE_ADDR_MASK;
  340. q->dma_q |= IVPU_MMU_Q_COUNT_LOG2;
  341. ivpu_dbg(vdev, MMU, "EVTQ alloc: dma=%pad dma_q=%pad size=%u\n",
  342. &q->dma, &q->dma_q, IVPU_MMU_EVTQ_SIZE);
  343. return 0;
  344. }
  345. static int ivpu_mmu_structs_alloc(struct ivpu_device *vdev)
  346. {
  347. int ret;
  348. ret = ivpu_mmu_cdtab_alloc(vdev);
  349. if (ret) {
  350. ivpu_err(vdev, "Failed to allocate cdtab: %d\n", ret);
  351. return ret;
  352. }
  353. ret = ivpu_mmu_strtab_alloc(vdev);
  354. if (ret) {
  355. ivpu_err(vdev, "Failed to allocate strtab: %d\n", ret);
  356. return ret;
  357. }
  358. ret = ivpu_mmu_cmdq_alloc(vdev);
  359. if (ret) {
  360. ivpu_err(vdev, "Failed to allocate cmdq: %d\n", ret);
  361. return ret;
  362. }
  363. ret = ivpu_mmu_evtq_alloc(vdev);
  364. if (ret)
  365. ivpu_err(vdev, "Failed to allocate evtq: %d\n", ret);
  366. return ret;
  367. }
  368. static int ivpu_mmu_reg_write_cr0(struct ivpu_device *vdev, u32 val)
  369. {
  370. REGV_WR32(IVPU_MMU_REG_CR0, val);
  371. return REGV_POLL_FLD(IVPU_MMU_REG_CR0ACK, VAL, val, IVPU_MMU_REG_TIMEOUT_US);
  372. }
  373. static int ivpu_mmu_reg_write_irq_ctrl(struct ivpu_device *vdev, u32 val)
  374. {
  375. REGV_WR32(IVPU_MMU_REG_IRQ_CTRL, val);
  376. return REGV_POLL_FLD(IVPU_MMU_REG_IRQ_CTRLACK, VAL, val, IVPU_MMU_REG_TIMEOUT_US);
  377. }
  378. static int ivpu_mmu_irqs_setup(struct ivpu_device *vdev)
  379. {
  380. u32 irq_ctrl = IVPU_MMU_IRQ_EVTQ_EN | IVPU_MMU_IRQ_GERROR_EN;
  381. int ret;
  382. ret = ivpu_mmu_reg_write_irq_ctrl(vdev, 0);
  383. if (ret)
  384. return ret;
  385. return ivpu_mmu_reg_write_irq_ctrl(vdev, irq_ctrl);
  386. }
  387. static int ivpu_mmu_cmdq_wait_for_cons(struct ivpu_device *vdev)
  388. {
  389. struct ivpu_mmu_queue *cmdq = &vdev->mmu->cmdq;
  390. int ret;
  391. ret = REGV_POLL_FLD(IVPU_MMU_REG_CMDQ_CONS, VAL, cmdq->prod,
  392. IVPU_MMU_QUEUE_TIMEOUT_US);
  393. if (ret)
  394. return ret;
  395. cmdq->cons = cmdq->prod;
  396. return 0;
  397. }
  398. static bool ivpu_mmu_queue_is_full(struct ivpu_mmu_queue *q)
  399. {
  400. return ((IVPU_MMU_Q_IDX(q->prod) == IVPU_MMU_Q_IDX(q->cons)) &&
  401. (IVPU_MMU_Q_WRP(q->prod) != IVPU_MMU_Q_WRP(q->cons)));
  402. }
  403. static bool ivpu_mmu_queue_is_empty(struct ivpu_mmu_queue *q)
  404. {
  405. return ((IVPU_MMU_Q_IDX(q->prod) == IVPU_MMU_Q_IDX(q->cons)) &&
  406. (IVPU_MMU_Q_WRP(q->prod) == IVPU_MMU_Q_WRP(q->cons)));
  407. }
  408. static int ivpu_mmu_cmdq_cmd_write(struct ivpu_device *vdev, const char *name, u64 data0, u64 data1)
  409. {
  410. struct ivpu_mmu_queue *cmdq = &vdev->mmu->cmdq;
  411. u64 *queue_buffer = cmdq->base;
  412. int idx = IVPU_MMU_Q_IDX(cmdq->prod) * (IVPU_MMU_CMDQ_CMD_SIZE / sizeof(*queue_buffer));
  413. if (ivpu_mmu_queue_is_full(cmdq)) {
  414. ivpu_err(vdev, "Failed to write MMU CMD %s\n", name);
  415. return -EBUSY;
  416. }
  417. queue_buffer[idx] = data0;
  418. queue_buffer[idx + 1] = data1;
  419. cmdq->prod = (cmdq->prod + 1) & IVPU_MMU_Q_WRAP_MASK;
  420. ivpu_dbg(vdev, MMU, "CMD write: %s data: 0x%llx 0x%llx\n", name, data0, data1);
  421. return 0;
  422. }
  423. static int ivpu_mmu_cmdq_sync(struct ivpu_device *vdev)
  424. {
  425. struct ivpu_mmu_queue *q = &vdev->mmu->cmdq;
  426. u64 val;
  427. int ret;
  428. val = FIELD_PREP(IVPU_MMU_CMD_OPCODE, CMD_SYNC);
  429. ret = ivpu_mmu_cmdq_cmd_write(vdev, "SYNC", val, 0);
  430. if (ret)
  431. return ret;
  432. if (!ivpu_is_force_snoop_enabled(vdev))
  433. clflush_cache_range(q->base, IVPU_MMU_CMDQ_SIZE);
  434. REGV_WR32(IVPU_MMU_REG_CMDQ_PROD, q->prod);
  435. ret = ivpu_mmu_cmdq_wait_for_cons(vdev);
  436. if (ret) {
  437. u32 err;
  438. val = REGV_RD32(IVPU_MMU_REG_CMDQ_CONS);
  439. err = REG_GET_FLD(IVPU_MMU_REG_CMDQ_CONS, ERR, val);
  440. ivpu_err(vdev, "Timed out waiting for MMU consumer: %d, error: %s\n", ret,
  441. ivpu_mmu_cmdq_err_to_str(err));
  442. ivpu_hw_diagnose_failure(vdev);
  443. }
  444. return ret;
  445. }
  446. static int ivpu_mmu_cmdq_write_cfgi_all(struct ivpu_device *vdev)
  447. {
  448. u64 data0 = FIELD_PREP(IVPU_MMU_CMD_OPCODE, CMD_CFGI_ALL);
  449. u64 data1 = FIELD_PREP(IVPU_MMU_CMD_CFGI_1_RANGE, 0x1f);
  450. return ivpu_mmu_cmdq_cmd_write(vdev, "CFGI_ALL", data0, data1);
  451. }
  452. static int ivpu_mmu_cmdq_write_tlbi_nh_asid(struct ivpu_device *vdev, u16 ssid)
  453. {
  454. u64 val = FIELD_PREP(IVPU_MMU_CMD_OPCODE, CMD_TLBI_NH_ASID) |
  455. FIELD_PREP(IVPU_MMU_CMD_TLBI_0_ASID, ssid);
  456. return ivpu_mmu_cmdq_cmd_write(vdev, "TLBI_NH_ASID", val, 0);
  457. }
  458. static int ivpu_mmu_cmdq_write_tlbi_nsnh_all(struct ivpu_device *vdev)
  459. {
  460. u64 val = FIELD_PREP(IVPU_MMU_CMD_OPCODE, CMD_TLBI_NSNH_ALL);
  461. return ivpu_mmu_cmdq_cmd_write(vdev, "TLBI_NSNH_ALL", val, 0);
  462. }
  463. static int ivpu_mmu_reset(struct ivpu_device *vdev)
  464. {
  465. struct ivpu_mmu_info *mmu = vdev->mmu;
  466. u32 val;
  467. int ret;
  468. memset(mmu->cmdq.base, 0, IVPU_MMU_CMDQ_SIZE);
  469. if (!ivpu_is_force_snoop_enabled(vdev))
  470. clflush_cache_range(mmu->cmdq.base, IVPU_MMU_CMDQ_SIZE);
  471. mmu->cmdq.prod = 0;
  472. mmu->cmdq.cons = 0;
  473. memset(mmu->evtq.base, 0, IVPU_MMU_EVTQ_SIZE);
  474. mmu->evtq.prod = 0;
  475. mmu->evtq.cons = 0;
  476. ret = ivpu_mmu_reg_write_cr0(vdev, 0);
  477. if (ret)
  478. return ret;
  479. val = FIELD_PREP(IVPU_MMU_CR1_TABLE_SH, IVPU_MMU_SH_ISH) |
  480. FIELD_PREP(IVPU_MMU_CR1_TABLE_OC, IVPU_MMU_CACHE_WB) |
  481. FIELD_PREP(IVPU_MMU_CR1_TABLE_IC, IVPU_MMU_CACHE_WB) |
  482. FIELD_PREP(IVPU_MMU_CR1_QUEUE_SH, IVPU_MMU_SH_ISH) |
  483. FIELD_PREP(IVPU_MMU_CR1_QUEUE_OC, IVPU_MMU_CACHE_WB) |
  484. FIELD_PREP(IVPU_MMU_CR1_QUEUE_IC, IVPU_MMU_CACHE_WB);
  485. REGV_WR32(IVPU_MMU_REG_CR1, val);
  486. REGV_WR64(IVPU_MMU_REG_STRTAB_BASE, mmu->strtab.dma_q);
  487. REGV_WR32(IVPU_MMU_REG_STRTAB_BASE_CFG, mmu->strtab.base_cfg);
  488. REGV_WR64(IVPU_MMU_REG_CMDQ_BASE, mmu->cmdq.dma_q);
  489. REGV_WR32(IVPU_MMU_REG_CMDQ_PROD, 0);
  490. REGV_WR32(IVPU_MMU_REG_CMDQ_CONS, 0);
  491. val = IVPU_MMU_CR0_CMDQEN;
  492. ret = ivpu_mmu_reg_write_cr0(vdev, val);
  493. if (ret)
  494. return ret;
  495. ret = ivpu_mmu_cmdq_write_cfgi_all(vdev);
  496. if (ret)
  497. return ret;
  498. ret = ivpu_mmu_cmdq_write_tlbi_nsnh_all(vdev);
  499. if (ret)
  500. return ret;
  501. ret = ivpu_mmu_cmdq_sync(vdev);
  502. if (ret)
  503. return ret;
  504. REGV_WR64(IVPU_MMU_REG_EVTQ_BASE, mmu->evtq.dma_q);
  505. REGV_WR32(IVPU_MMU_REG_EVTQ_PROD_SEC, 0);
  506. REGV_WR32(IVPU_MMU_REG_EVTQ_CONS_SEC, 0);
  507. val |= IVPU_MMU_CR0_EVTQEN;
  508. ret = ivpu_mmu_reg_write_cr0(vdev, val);
  509. if (ret)
  510. return ret;
  511. val |= IVPU_MMU_CR0_ATSCHK;
  512. ret = ivpu_mmu_reg_write_cr0(vdev, val);
  513. if (ret)
  514. return ret;
  515. ret = ivpu_mmu_irqs_setup(vdev);
  516. if (ret)
  517. return ret;
  518. val |= IVPU_MMU_CR0_SMMUEN;
  519. return ivpu_mmu_reg_write_cr0(vdev, val);
  520. }
  521. static void ivpu_mmu_strtab_link_cd(struct ivpu_device *vdev, u32 sid)
  522. {
  523. struct ivpu_mmu_info *mmu = vdev->mmu;
  524. struct ivpu_mmu_strtab *strtab = &mmu->strtab;
  525. struct ivpu_mmu_cdtab *cdtab = &mmu->cdtab;
  526. u64 *entry = strtab->base + (sid * IVPU_MMU_STRTAB_ENT_SIZE);
  527. u64 str[2];
  528. str[0] = FIELD_PREP(IVPU_MMU_STE_0_CFG, IVPU_MMU_STE_0_CFG_S1_TRANS) |
  529. FIELD_PREP(IVPU_MMU_STE_0_S1CDMAX, IVPU_MMU_CDTAB_ENT_COUNT_LOG2) |
  530. FIELD_PREP(IVPU_MMU_STE_0_S1FMT, IVPU_MMU_STE_0_S1FMT_LINEAR) |
  531. IVPU_MMU_STE_0_V |
  532. (cdtab->dma & IVPU_MMU_STE_0_S1CTXPTR_MASK);
  533. str[1] = FIELD_PREP(IVPU_MMU_STE_1_S1DSS, IVPU_MMU_STE_1_S1DSS_TERMINATE) |
  534. FIELD_PREP(IVPU_MMU_STE_1_S1CIR, IVPU_MMU_STE_1_S1C_CACHE_NC) |
  535. FIELD_PREP(IVPU_MMU_STE_1_S1COR, IVPU_MMU_STE_1_S1C_CACHE_NC) |
  536. FIELD_PREP(IVPU_MMU_STE_1_S1CSH, IVPU_MMU_SH_NSH) |
  537. FIELD_PREP(IVPU_MMU_STE_1_PRIVCFG, IVPU_MMU_STE_1_PRIVCFG_UNPRIV) |
  538. FIELD_PREP(IVPU_MMU_STE_1_INSTCFG, IVPU_MMU_STE_1_INSTCFG_DATA) |
  539. FIELD_PREP(IVPU_MMU_STE_1_STRW, IVPU_MMU_STE_1_STRW_NSEL1) |
  540. FIELD_PREP(IVPU_MMU_STE_1_CONT, IVPU_MMU_STRTAB_CFG_LOG2SIZE) |
  541. IVPU_MMU_STE_1_MEV |
  542. IVPU_MMU_STE_1_S1STALLD;
  543. WRITE_ONCE(entry[1], str[1]);
  544. WRITE_ONCE(entry[0], str[0]);
  545. if (!ivpu_is_force_snoop_enabled(vdev))
  546. clflush_cache_range(entry, IVPU_MMU_STRTAB_ENT_SIZE);
  547. ivpu_dbg(vdev, MMU, "STRTAB write entry (SSID=%u): 0x%llx, 0x%llx\n", sid, str[0], str[1]);
  548. }
  549. static int ivpu_mmu_strtab_init(struct ivpu_device *vdev)
  550. {
  551. ivpu_mmu_strtab_link_cd(vdev, IVPU_MMU_STREAM_ID0);
  552. ivpu_mmu_strtab_link_cd(vdev, IVPU_MMU_STREAM_ID3);
  553. return 0;
  554. }
  555. int ivpu_mmu_invalidate_tlb(struct ivpu_device *vdev, u16 ssid)
  556. {
  557. struct ivpu_mmu_info *mmu = vdev->mmu;
  558. int ret = 0;
  559. mutex_lock(&mmu->lock);
  560. if (!mmu->on)
  561. goto unlock;
  562. ret = ivpu_mmu_cmdq_write_tlbi_nh_asid(vdev, ssid);
  563. if (ret)
  564. goto unlock;
  565. ret = ivpu_mmu_cmdq_sync(vdev);
  566. unlock:
  567. mutex_unlock(&mmu->lock);
  568. return ret;
  569. }
  570. static int ivpu_mmu_cd_add(struct ivpu_device *vdev, u32 ssid, u64 cd_dma)
  571. {
  572. struct ivpu_mmu_info *mmu = vdev->mmu;
  573. struct ivpu_mmu_cdtab *cdtab = &mmu->cdtab;
  574. u64 *entry;
  575. u64 cd[4];
  576. int ret = 0;
  577. if (ssid > IVPU_MMU_CDTAB_ENT_COUNT)
  578. return -EINVAL;
  579. entry = cdtab->base + (ssid * IVPU_MMU_CDTAB_ENT_SIZE);
  580. if (cd_dma != 0) {
  581. cd[0] = FIELD_PREP(IVPU_MMU_CD_0_TCR_T0SZ, IVPU_MMU_T0SZ_48BIT) |
  582. FIELD_PREP(IVPU_MMU_CD_0_TCR_TG0, 0) |
  583. FIELD_PREP(IVPU_MMU_CD_0_TCR_IRGN0, 0) |
  584. FIELD_PREP(IVPU_MMU_CD_0_TCR_ORGN0, 0) |
  585. FIELD_PREP(IVPU_MMU_CD_0_TCR_SH0, 0) |
  586. FIELD_PREP(IVPU_MMU_CD_0_TCR_IPS, IVPU_MMU_IPS_48BIT) |
  587. FIELD_PREP(IVPU_MMU_CD_0_ASID, ssid) |
  588. IVPU_MMU_CD_0_TCR_EPD1 |
  589. IVPU_MMU_CD_0_AA64 |
  590. IVPU_MMU_CD_0_R |
  591. IVPU_MMU_CD_0_ASET |
  592. IVPU_MMU_CD_0_V;
  593. cd[1] = cd_dma & IVPU_MMU_CD_1_TTB0_MASK;
  594. cd[2] = 0;
  595. cd[3] = 0x0000000000007444;
  596. /* For global context generate memory fault on VPU */
  597. if (ssid == IVPU_GLOBAL_CONTEXT_MMU_SSID)
  598. cd[0] |= IVPU_MMU_CD_0_A;
  599. } else {
  600. memset(cd, 0, sizeof(cd));
  601. }
  602. WRITE_ONCE(entry[1], cd[1]);
  603. WRITE_ONCE(entry[2], cd[2]);
  604. WRITE_ONCE(entry[3], cd[3]);
  605. WRITE_ONCE(entry[0], cd[0]);
  606. if (!ivpu_is_force_snoop_enabled(vdev))
  607. clflush_cache_range(entry, IVPU_MMU_CDTAB_ENT_SIZE);
  608. ivpu_dbg(vdev, MMU, "CDTAB %s entry (SSID=%u, dma=%pad): 0x%llx, 0x%llx, 0x%llx, 0x%llx\n",
  609. cd_dma ? "write" : "clear", ssid, &cd_dma, cd[0], cd[1], cd[2], cd[3]);
  610. mutex_lock(&mmu->lock);
  611. if (!mmu->on)
  612. goto unlock;
  613. ret = ivpu_mmu_cmdq_write_cfgi_all(vdev);
  614. if (ret)
  615. goto unlock;
  616. ret = ivpu_mmu_cmdq_sync(vdev);
  617. unlock:
  618. mutex_unlock(&mmu->lock);
  619. return ret;
  620. }
  621. static int ivpu_mmu_cd_add_gbl(struct ivpu_device *vdev)
  622. {
  623. int ret;
  624. ret = ivpu_mmu_cd_add(vdev, 0, vdev->gctx.pgtable.pgd_dma);
  625. if (ret)
  626. ivpu_err(vdev, "Failed to add global CD entry: %d\n", ret);
  627. return ret;
  628. }
  629. static int ivpu_mmu_cd_add_user(struct ivpu_device *vdev, u32 ssid, dma_addr_t cd_dma)
  630. {
  631. int ret;
  632. if (ssid == 0) {
  633. ivpu_err(vdev, "Invalid SSID: %u\n", ssid);
  634. return -EINVAL;
  635. }
  636. ret = ivpu_mmu_cd_add(vdev, ssid, cd_dma);
  637. if (ret)
  638. ivpu_err(vdev, "Failed to add CD entry SSID=%u: %d\n", ssid, ret);
  639. return ret;
  640. }
  641. int ivpu_mmu_init(struct ivpu_device *vdev)
  642. {
  643. struct ivpu_mmu_info *mmu = vdev->mmu;
  644. int ret;
  645. ivpu_dbg(vdev, MMU, "Init..\n");
  646. ivpu_mmu_config_check(vdev);
  647. ret = drmm_mutex_init(&vdev->drm, &mmu->lock);
  648. if (ret)
  649. return ret;
  650. ret = ivpu_mmu_structs_alloc(vdev);
  651. if (ret)
  652. return ret;
  653. ret = ivpu_mmu_strtab_init(vdev);
  654. if (ret) {
  655. ivpu_err(vdev, "Failed to initialize strtab: %d\n", ret);
  656. return ret;
  657. }
  658. ret = ivpu_mmu_cd_add_gbl(vdev);
  659. if (ret) {
  660. ivpu_err(vdev, "Failed to initialize strtab: %d\n", ret);
  661. return ret;
  662. }
  663. ret = ivpu_mmu_enable(vdev);
  664. if (ret) {
  665. ivpu_err(vdev, "Failed to resume MMU: %d\n", ret);
  666. return ret;
  667. }
  668. ivpu_dbg(vdev, MMU, "Init done\n");
  669. return 0;
  670. }
  671. int ivpu_mmu_enable(struct ivpu_device *vdev)
  672. {
  673. struct ivpu_mmu_info *mmu = vdev->mmu;
  674. int ret;
  675. mutex_lock(&mmu->lock);
  676. mmu->on = true;
  677. ret = ivpu_mmu_reset(vdev);
  678. if (ret) {
  679. ivpu_err(vdev, "Failed to reset MMU: %d\n", ret);
  680. goto err;
  681. }
  682. ret = ivpu_mmu_cmdq_write_cfgi_all(vdev);
  683. if (ret)
  684. goto err;
  685. ret = ivpu_mmu_cmdq_write_tlbi_nsnh_all(vdev);
  686. if (ret)
  687. goto err;
  688. ret = ivpu_mmu_cmdq_sync(vdev);
  689. if (ret)
  690. goto err;
  691. mutex_unlock(&mmu->lock);
  692. return 0;
  693. err:
  694. mmu->on = false;
  695. mutex_unlock(&mmu->lock);
  696. return ret;
  697. }
  698. void ivpu_mmu_disable(struct ivpu_device *vdev)
  699. {
  700. struct ivpu_mmu_info *mmu = vdev->mmu;
  701. mutex_lock(&mmu->lock);
  702. mmu->on = false;
  703. mutex_unlock(&mmu->lock);
  704. }
  705. static void ivpu_mmu_dump_event(struct ivpu_device *vdev, u32 *event)
  706. {
  707. u32 ssid = FIELD_GET(IVPU_MMU_EVT_SSID_MASK, event[0]);
  708. u32 op = FIELD_GET(IVPU_MMU_EVT_OP_MASK, event[0]);
  709. u64 fetch_addr = ((u64)event[7]) << 32 | event[6];
  710. u64 in_addr = ((u64)event[5]) << 32 | event[4];
  711. u32 sid = event[1];
  712. ivpu_err_ratelimited(vdev, "MMU EVTQ: 0x%x (%s) SSID: %d SID: %d, e[2] %08x, e[3] %08x, in addr: 0x%llx, fetch addr: 0x%llx\n",
  713. op, ivpu_mmu_event_to_str(op), ssid, sid,
  714. event[2], event[3], in_addr, fetch_addr);
  715. }
  716. static u32 *ivpu_mmu_get_event(struct ivpu_device *vdev)
  717. {
  718. struct ivpu_mmu_queue *evtq = &vdev->mmu->evtq;
  719. u32 idx = IVPU_MMU_Q_IDX(evtq->cons);
  720. u32 *evt = evtq->base + (idx * IVPU_MMU_EVTQ_CMD_SIZE);
  721. evtq->prod = REGV_RD32(IVPU_MMU_REG_EVTQ_PROD_SEC);
  722. if (ivpu_mmu_queue_is_empty(evtq))
  723. return NULL;
  724. evtq->cons = (evtq->cons + 1) & IVPU_MMU_Q_WRAP_MASK;
  725. return evt;
  726. }
  727. void ivpu_mmu_irq_evtq_handler(struct ivpu_device *vdev)
  728. {
  729. u32 *event;
  730. u32 ssid;
  731. ivpu_dbg(vdev, IRQ, "MMU event queue\n");
  732. while ((event = ivpu_mmu_get_event(vdev)) != NULL) {
  733. ivpu_mmu_dump_event(vdev, event);
  734. ssid = FIELD_GET(IVPU_MMU_EVT_SSID_MASK, event[0]);
  735. if (ssid == IVPU_GLOBAL_CONTEXT_MMU_SSID) {
  736. ivpu_pm_trigger_recovery(vdev, "MMU event");
  737. return;
  738. }
  739. ivpu_mmu_user_context_mark_invalid(vdev, ssid);
  740. REGV_WR32(IVPU_MMU_REG_EVTQ_CONS_SEC, vdev->mmu->evtq.cons);
  741. }
  742. if (!kfifo_put(&vdev->hw->irq.fifo, IVPU_HW_IRQ_SRC_MMU_EVTQ))
  743. ivpu_err_ratelimited(vdev, "IRQ FIFO full\n");
  744. }
  745. void ivpu_mmu_evtq_dump(struct ivpu_device *vdev)
  746. {
  747. u32 *event;
  748. while ((event = ivpu_mmu_get_event(vdev)) != NULL)
  749. ivpu_mmu_dump_event(vdev, event);
  750. }
  751. void ivpu_mmu_irq_gerr_handler(struct ivpu_device *vdev)
  752. {
  753. u32 gerror_val, gerrorn_val, active;
  754. ivpu_dbg(vdev, IRQ, "MMU error\n");
  755. gerror_val = REGV_RD32(IVPU_MMU_REG_GERROR);
  756. gerrorn_val = REGV_RD32(IVPU_MMU_REG_GERRORN);
  757. active = gerror_val ^ gerrorn_val;
  758. if (!(active & IVPU_MMU_GERROR_ERR_MASK))
  759. return;
  760. if (REG_TEST_FLD(IVPU_MMU_REG_GERROR, MSI_ABT, active))
  761. ivpu_warn_ratelimited(vdev, "MMU MSI ABT write aborted\n");
  762. if (REG_TEST_FLD(IVPU_MMU_REG_GERROR, MSI_PRIQ_ABT, active))
  763. ivpu_warn_ratelimited(vdev, "MMU PRIQ MSI ABT write aborted\n");
  764. if (REG_TEST_FLD(IVPU_MMU_REG_GERROR, MSI_EVTQ_ABT, active))
  765. ivpu_warn_ratelimited(vdev, "MMU EVTQ MSI ABT write aborted\n");
  766. if (REG_TEST_FLD(IVPU_MMU_REG_GERROR, MSI_CMDQ_ABT, active))
  767. ivpu_warn_ratelimited(vdev, "MMU CMDQ MSI ABT write aborted\n");
  768. if (REG_TEST_FLD(IVPU_MMU_REG_GERROR, PRIQ_ABT, active))
  769. ivpu_err_ratelimited(vdev, "MMU PRIQ write aborted\n");
  770. if (REG_TEST_FLD(IVPU_MMU_REG_GERROR, EVTQ_ABT, active))
  771. ivpu_err_ratelimited(vdev, "MMU EVTQ write aborted\n");
  772. if (REG_TEST_FLD(IVPU_MMU_REG_GERROR, CMDQ, active))
  773. ivpu_err_ratelimited(vdev, "MMU CMDQ write aborted\n");
  774. REGV_WR32(IVPU_MMU_REG_GERRORN, gerror_val);
  775. }
  776. int ivpu_mmu_set_pgtable(struct ivpu_device *vdev, int ssid, struct ivpu_mmu_pgtable *pgtable)
  777. {
  778. return ivpu_mmu_cd_add_user(vdev, ssid, pgtable->pgd_dma);
  779. }
  780. void ivpu_mmu_clear_pgtable(struct ivpu_device *vdev, int ssid)
  781. {
  782. ivpu_mmu_cd_add_user(vdev, ssid, 0); /* 0 will clear CD entry */
  783. }