tlbex.S 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. /*
  3. * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
  4. */
  5. #include <asm/asm.h>
  6. #include <asm/loongarch.h>
  7. #include <asm/page.h>
  8. #include <asm/pgtable.h>
  9. #include <asm/regdef.h>
  10. #include <asm/stackframe.h>
  11. #define INVTLB_ADDR_GFALSE_AND_ASID 5
  12. #define PTRS_PER_PGD_BITS (PAGE_SHIFT - 3)
  13. #define PTRS_PER_PUD_BITS (PAGE_SHIFT - 3)
  14. #define PTRS_PER_PMD_BITS (PAGE_SHIFT - 3)
  15. #define PTRS_PER_PTE_BITS (PAGE_SHIFT - 3)
  16. .macro tlb_do_page_fault, write
  17. SYM_CODE_START(tlb_do_page_fault_\write)
  18. UNWIND_HINT_UNDEFINED
  19. SAVE_ALL
  20. csrrd a2, LOONGARCH_CSR_BADV
  21. move a0, sp
  22. REG_S a2, sp, PT_BVADDR
  23. li.w a1, \write
  24. bl do_page_fault
  25. RESTORE_ALL_AND_RET
  26. SYM_CODE_END(tlb_do_page_fault_\write)
  27. .endm
  28. tlb_do_page_fault 0
  29. tlb_do_page_fault 1
  30. SYM_CODE_START(handle_tlb_protect)
  31. UNWIND_HINT_UNDEFINED
  32. BACKUP_T0T1
  33. SAVE_ALL
  34. move a0, sp
  35. move a1, zero
  36. csrrd a2, LOONGARCH_CSR_BADV
  37. REG_S a2, sp, PT_BVADDR
  38. la_abs t0, do_page_fault
  39. jirl ra, t0, 0
  40. RESTORE_ALL_AND_RET
  41. SYM_CODE_END(handle_tlb_protect)
  42. SYM_CODE_START(handle_tlb_load)
  43. UNWIND_HINT_UNDEFINED
  44. csrwr t0, EXCEPTION_KS0
  45. csrwr t1, EXCEPTION_KS1
  46. csrwr ra, EXCEPTION_KS2
  47. /*
  48. * The vmalloc handling is not in the hotpath.
  49. */
  50. csrrd t0, LOONGARCH_CSR_BADV
  51. bltz t0, vmalloc_load
  52. csrrd t1, LOONGARCH_CSR_PGDL
  53. vmalloc_done_load:
  54. /* Get PGD offset in bytes */
  55. bstrpick.d ra, t0, PTRS_PER_PGD_BITS + PGDIR_SHIFT - 1, PGDIR_SHIFT
  56. alsl.d t1, ra, t1, 3
  57. #if CONFIG_PGTABLE_LEVELS > 3
  58. ld.d t1, t1, 0
  59. bstrpick.d ra, t0, PTRS_PER_PUD_BITS + PUD_SHIFT - 1, PUD_SHIFT
  60. alsl.d t1, ra, t1, 3
  61. #endif
  62. #if CONFIG_PGTABLE_LEVELS > 2
  63. ld.d t1, t1, 0
  64. bstrpick.d ra, t0, PTRS_PER_PMD_BITS + PMD_SHIFT - 1, PMD_SHIFT
  65. alsl.d t1, ra, t1, 3
  66. #endif
  67. ld.d ra, t1, 0
  68. /*
  69. * For huge tlb entries, pmde doesn't contain an address but
  70. * instead contains the tlb pte. Check the PAGE_HUGE bit and
  71. * see if we need to jump to huge tlb processing.
  72. */
  73. rotri.d ra, ra, _PAGE_HUGE_SHIFT + 1
  74. bltz ra, tlb_huge_update_load
  75. rotri.d ra, ra, 64 - (_PAGE_HUGE_SHIFT + 1)
  76. bstrpick.d t0, t0, PTRS_PER_PTE_BITS + PAGE_SHIFT - 1, PAGE_SHIFT
  77. alsl.d t1, t0, ra, _PTE_T_LOG2
  78. #ifdef CONFIG_SMP
  79. smp_pgtable_change_load:
  80. ll.d t0, t1, 0
  81. #else
  82. ld.d t0, t1, 0
  83. #endif
  84. andi ra, t0, _PAGE_PRESENT
  85. beqz ra, nopage_tlb_load
  86. ori t0, t0, _PAGE_VALID
  87. #ifdef CONFIG_SMP
  88. sc.d t0, t1, 0
  89. beqz t0, smp_pgtable_change_load
  90. #else
  91. st.d t0, t1, 0
  92. #endif
  93. tlbsrch
  94. bstrins.d t1, zero, 3, 3
  95. ld.d t0, t1, 0
  96. ld.d t1, t1, 8
  97. csrwr t0, LOONGARCH_CSR_TLBELO0
  98. csrwr t1, LOONGARCH_CSR_TLBELO1
  99. tlbwr
  100. csrrd t0, EXCEPTION_KS0
  101. csrrd t1, EXCEPTION_KS1
  102. csrrd ra, EXCEPTION_KS2
  103. ertn
  104. #ifdef CONFIG_64BIT
  105. vmalloc_load:
  106. la_abs t1, swapper_pg_dir
  107. b vmalloc_done_load
  108. #endif
  109. /* This is the entry point of a huge page. */
  110. tlb_huge_update_load:
  111. #ifdef CONFIG_SMP
  112. ll.d ra, t1, 0
  113. #else
  114. rotri.d ra, ra, 64 - (_PAGE_HUGE_SHIFT + 1)
  115. #endif
  116. andi t0, ra, _PAGE_PRESENT
  117. beqz t0, nopage_tlb_load
  118. #ifdef CONFIG_SMP
  119. ori t0, ra, _PAGE_VALID
  120. sc.d t0, t1, 0
  121. beqz t0, tlb_huge_update_load
  122. ori t0, ra, _PAGE_VALID
  123. #else
  124. ori t0, ra, _PAGE_VALID
  125. st.d t0, t1, 0
  126. #endif
  127. csrrd ra, LOONGARCH_CSR_ASID
  128. csrrd t1, LOONGARCH_CSR_BADV
  129. andi ra, ra, CSR_ASID_ASID
  130. invtlb INVTLB_ADDR_GFALSE_AND_ASID, ra, t1
  131. /*
  132. * A huge PTE describes an area the size of the
  133. * configured huge page size. This is twice the
  134. * of the large TLB entry size we intend to use.
  135. * A TLB entry half the size of the configured
  136. * huge page size is configured into entrylo0
  137. * and entrylo1 to cover the contiguous huge PTE
  138. * address space.
  139. */
  140. /* Huge page: Move Global bit */
  141. xori t0, t0, _PAGE_HUGE
  142. lu12i.w t1, _PAGE_HGLOBAL >> 12
  143. and t1, t0, t1
  144. srli.d t1, t1, (_PAGE_HGLOBAL_SHIFT - _PAGE_GLOBAL_SHIFT)
  145. or t0, t0, t1
  146. move ra, t0
  147. csrwr ra, LOONGARCH_CSR_TLBELO0
  148. /* Convert to entrylo1 */
  149. addi.d t1, zero, 1
  150. slli.d t1, t1, (HPAGE_SHIFT - 1)
  151. add.d t0, t0, t1
  152. csrwr t0, LOONGARCH_CSR_TLBELO1
  153. /* Set huge page tlb entry size */
  154. addu16i.d t0, zero, (CSR_TLBIDX_PS >> 16)
  155. addu16i.d t1, zero, (PS_HUGE_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
  156. csrxchg t1, t0, LOONGARCH_CSR_TLBIDX
  157. tlbfill
  158. addu16i.d t0, zero, (CSR_TLBIDX_PS >> 16)
  159. addu16i.d t1, zero, (PS_DEFAULT_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
  160. csrxchg t1, t0, LOONGARCH_CSR_TLBIDX
  161. csrrd t0, EXCEPTION_KS0
  162. csrrd t1, EXCEPTION_KS1
  163. csrrd ra, EXCEPTION_KS2
  164. ertn
  165. nopage_tlb_load:
  166. dbar 0x700
  167. csrrd ra, EXCEPTION_KS2
  168. la_abs t0, tlb_do_page_fault_0
  169. jr t0
  170. SYM_CODE_END(handle_tlb_load)
  171. SYM_CODE_START(handle_tlb_load_ptw)
  172. UNWIND_HINT_UNDEFINED
  173. csrwr t0, LOONGARCH_CSR_KS0
  174. csrwr t1, LOONGARCH_CSR_KS1
  175. la_abs t0, tlb_do_page_fault_0
  176. jr t0
  177. SYM_CODE_END(handle_tlb_load_ptw)
  178. SYM_CODE_START(handle_tlb_store)
  179. UNWIND_HINT_UNDEFINED
  180. csrwr t0, EXCEPTION_KS0
  181. csrwr t1, EXCEPTION_KS1
  182. csrwr ra, EXCEPTION_KS2
  183. /*
  184. * The vmalloc handling is not in the hotpath.
  185. */
  186. csrrd t0, LOONGARCH_CSR_BADV
  187. bltz t0, vmalloc_store
  188. csrrd t1, LOONGARCH_CSR_PGDL
  189. vmalloc_done_store:
  190. /* Get PGD offset in bytes */
  191. bstrpick.d ra, t0, PTRS_PER_PGD_BITS + PGDIR_SHIFT - 1, PGDIR_SHIFT
  192. alsl.d t1, ra, t1, 3
  193. #if CONFIG_PGTABLE_LEVELS > 3
  194. ld.d t1, t1, 0
  195. bstrpick.d ra, t0, PTRS_PER_PUD_BITS + PUD_SHIFT - 1, PUD_SHIFT
  196. alsl.d t1, ra, t1, 3
  197. #endif
  198. #if CONFIG_PGTABLE_LEVELS > 2
  199. ld.d t1, t1, 0
  200. bstrpick.d ra, t0, PTRS_PER_PMD_BITS + PMD_SHIFT - 1, PMD_SHIFT
  201. alsl.d t1, ra, t1, 3
  202. #endif
  203. ld.d ra, t1, 0
  204. /*
  205. * For huge tlb entries, pmde doesn't contain an address but
  206. * instead contains the tlb pte. Check the PAGE_HUGE bit and
  207. * see if we need to jump to huge tlb processing.
  208. */
  209. rotri.d ra, ra, _PAGE_HUGE_SHIFT + 1
  210. bltz ra, tlb_huge_update_store
  211. rotri.d ra, ra, 64 - (_PAGE_HUGE_SHIFT + 1)
  212. bstrpick.d t0, t0, PTRS_PER_PTE_BITS + PAGE_SHIFT - 1, PAGE_SHIFT
  213. alsl.d t1, t0, ra, _PTE_T_LOG2
  214. #ifdef CONFIG_SMP
  215. smp_pgtable_change_store:
  216. ll.d t0, t1, 0
  217. #else
  218. ld.d t0, t1, 0
  219. #endif
  220. andi ra, t0, _PAGE_PRESENT | _PAGE_WRITE
  221. xori ra, ra, _PAGE_PRESENT | _PAGE_WRITE
  222. bnez ra, nopage_tlb_store
  223. ori t0, t0, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
  224. #ifdef CONFIG_SMP
  225. sc.d t0, t1, 0
  226. beqz t0, smp_pgtable_change_store
  227. #else
  228. st.d t0, t1, 0
  229. #endif
  230. tlbsrch
  231. bstrins.d t1, zero, 3, 3
  232. ld.d t0, t1, 0
  233. ld.d t1, t1, 8
  234. csrwr t0, LOONGARCH_CSR_TLBELO0
  235. csrwr t1, LOONGARCH_CSR_TLBELO1
  236. tlbwr
  237. csrrd t0, EXCEPTION_KS0
  238. csrrd t1, EXCEPTION_KS1
  239. csrrd ra, EXCEPTION_KS2
  240. ertn
  241. #ifdef CONFIG_64BIT
  242. vmalloc_store:
  243. la_abs t1, swapper_pg_dir
  244. b vmalloc_done_store
  245. #endif
  246. /* This is the entry point of a huge page. */
  247. tlb_huge_update_store:
  248. #ifdef CONFIG_SMP
  249. ll.d ra, t1, 0
  250. #else
  251. rotri.d ra, ra, 64 - (_PAGE_HUGE_SHIFT + 1)
  252. #endif
  253. andi t0, ra, _PAGE_PRESENT | _PAGE_WRITE
  254. xori t0, t0, _PAGE_PRESENT | _PAGE_WRITE
  255. bnez t0, nopage_tlb_store
  256. #ifdef CONFIG_SMP
  257. ori t0, ra, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
  258. sc.d t0, t1, 0
  259. beqz t0, tlb_huge_update_store
  260. ori t0, ra, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
  261. #else
  262. ori t0, ra, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
  263. st.d t0, t1, 0
  264. #endif
  265. csrrd ra, LOONGARCH_CSR_ASID
  266. csrrd t1, LOONGARCH_CSR_BADV
  267. andi ra, ra, CSR_ASID_ASID
  268. invtlb INVTLB_ADDR_GFALSE_AND_ASID, ra, t1
  269. /*
  270. * A huge PTE describes an area the size of the
  271. * configured huge page size. This is twice the
  272. * of the large TLB entry size we intend to use.
  273. * A TLB entry half the size of the configured
  274. * huge page size is configured into entrylo0
  275. * and entrylo1 to cover the contiguous huge PTE
  276. * address space.
  277. */
  278. /* Huge page: Move Global bit */
  279. xori t0, t0, _PAGE_HUGE
  280. lu12i.w t1, _PAGE_HGLOBAL >> 12
  281. and t1, t0, t1
  282. srli.d t1, t1, (_PAGE_HGLOBAL_SHIFT - _PAGE_GLOBAL_SHIFT)
  283. or t0, t0, t1
  284. move ra, t0
  285. csrwr ra, LOONGARCH_CSR_TLBELO0
  286. /* Convert to entrylo1 */
  287. addi.d t1, zero, 1
  288. slli.d t1, t1, (HPAGE_SHIFT - 1)
  289. add.d t0, t0, t1
  290. csrwr t0, LOONGARCH_CSR_TLBELO1
  291. /* Set huge page tlb entry size */
  292. addu16i.d t0, zero, (CSR_TLBIDX_PS >> 16)
  293. addu16i.d t1, zero, (PS_HUGE_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
  294. csrxchg t1, t0, LOONGARCH_CSR_TLBIDX
  295. tlbfill
  296. /* Reset default page size */
  297. addu16i.d t0, zero, (CSR_TLBIDX_PS >> 16)
  298. addu16i.d t1, zero, (PS_DEFAULT_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
  299. csrxchg t1, t0, LOONGARCH_CSR_TLBIDX
  300. csrrd t0, EXCEPTION_KS0
  301. csrrd t1, EXCEPTION_KS1
  302. csrrd ra, EXCEPTION_KS2
  303. ertn
  304. nopage_tlb_store:
  305. dbar 0x700
  306. csrrd ra, EXCEPTION_KS2
  307. la_abs t0, tlb_do_page_fault_1
  308. jr t0
  309. SYM_CODE_END(handle_tlb_store)
  310. SYM_CODE_START(handle_tlb_store_ptw)
  311. UNWIND_HINT_UNDEFINED
  312. csrwr t0, LOONGARCH_CSR_KS0
  313. csrwr t1, LOONGARCH_CSR_KS1
  314. la_abs t0, tlb_do_page_fault_1
  315. jr t0
  316. SYM_CODE_END(handle_tlb_store_ptw)
  317. SYM_CODE_START(handle_tlb_modify)
  318. UNWIND_HINT_UNDEFINED
  319. csrwr t0, EXCEPTION_KS0
  320. csrwr t1, EXCEPTION_KS1
  321. csrwr ra, EXCEPTION_KS2
  322. /*
  323. * The vmalloc handling is not in the hotpath.
  324. */
  325. csrrd t0, LOONGARCH_CSR_BADV
  326. bltz t0, vmalloc_modify
  327. csrrd t1, LOONGARCH_CSR_PGDL
  328. vmalloc_done_modify:
  329. /* Get PGD offset in bytes */
  330. bstrpick.d ra, t0, PTRS_PER_PGD_BITS + PGDIR_SHIFT - 1, PGDIR_SHIFT
  331. alsl.d t1, ra, t1, 3
  332. #if CONFIG_PGTABLE_LEVELS > 3
  333. ld.d t1, t1, 0
  334. bstrpick.d ra, t0, PTRS_PER_PUD_BITS + PUD_SHIFT - 1, PUD_SHIFT
  335. alsl.d t1, ra, t1, 3
  336. #endif
  337. #if CONFIG_PGTABLE_LEVELS > 2
  338. ld.d t1, t1, 0
  339. bstrpick.d ra, t0, PTRS_PER_PMD_BITS + PMD_SHIFT - 1, PMD_SHIFT
  340. alsl.d t1, ra, t1, 3
  341. #endif
  342. ld.d ra, t1, 0
  343. /*
  344. * For huge tlb entries, pmde doesn't contain an address but
  345. * instead contains the tlb pte. Check the PAGE_HUGE bit and
  346. * see if we need to jump to huge tlb processing.
  347. */
  348. rotri.d ra, ra, _PAGE_HUGE_SHIFT + 1
  349. bltz ra, tlb_huge_update_modify
  350. rotri.d ra, ra, 64 - (_PAGE_HUGE_SHIFT + 1)
  351. bstrpick.d t0, t0, PTRS_PER_PTE_BITS + PAGE_SHIFT - 1, PAGE_SHIFT
  352. alsl.d t1, t0, ra, _PTE_T_LOG2
  353. #ifdef CONFIG_SMP
  354. smp_pgtable_change_modify:
  355. ll.d t0, t1, 0
  356. #else
  357. ld.d t0, t1, 0
  358. #endif
  359. andi ra, t0, _PAGE_WRITE
  360. beqz ra, nopage_tlb_modify
  361. ori t0, t0, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
  362. #ifdef CONFIG_SMP
  363. sc.d t0, t1, 0
  364. beqz t0, smp_pgtable_change_modify
  365. #else
  366. st.d t0, t1, 0
  367. #endif
  368. tlbsrch
  369. bstrins.d t1, zero, 3, 3
  370. ld.d t0, t1, 0
  371. ld.d t1, t1, 8
  372. csrwr t0, LOONGARCH_CSR_TLBELO0
  373. csrwr t1, LOONGARCH_CSR_TLBELO1
  374. tlbwr
  375. csrrd t0, EXCEPTION_KS0
  376. csrrd t1, EXCEPTION_KS1
  377. csrrd ra, EXCEPTION_KS2
  378. ertn
  379. #ifdef CONFIG_64BIT
  380. vmalloc_modify:
  381. la_abs t1, swapper_pg_dir
  382. b vmalloc_done_modify
  383. #endif
  384. /* This is the entry point of a huge page. */
  385. tlb_huge_update_modify:
  386. #ifdef CONFIG_SMP
  387. ll.d ra, t1, 0
  388. #else
  389. rotri.d ra, ra, 64 - (_PAGE_HUGE_SHIFT + 1)
  390. #endif
  391. andi t0, ra, _PAGE_WRITE
  392. beqz t0, nopage_tlb_modify
  393. #ifdef CONFIG_SMP
  394. ori t0, ra, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
  395. sc.d t0, t1, 0
  396. beqz t0, tlb_huge_update_modify
  397. ori t0, ra, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
  398. #else
  399. ori t0, ra, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
  400. st.d t0, t1, 0
  401. #endif
  402. csrrd ra, LOONGARCH_CSR_ASID
  403. csrrd t1, LOONGARCH_CSR_BADV
  404. andi ra, ra, CSR_ASID_ASID
  405. invtlb INVTLB_ADDR_GFALSE_AND_ASID, ra, t1
  406. /*
  407. * A huge PTE describes an area the size of the
  408. * configured huge page size. This is twice the
  409. * of the large TLB entry size we intend to use.
  410. * A TLB entry half the size of the configured
  411. * huge page size is configured into entrylo0
  412. * and entrylo1 to cover the contiguous huge PTE
  413. * address space.
  414. */
  415. /* Huge page: Move Global bit */
  416. xori t0, t0, _PAGE_HUGE
  417. lu12i.w t1, _PAGE_HGLOBAL >> 12
  418. and t1, t0, t1
  419. srli.d t1, t1, (_PAGE_HGLOBAL_SHIFT - _PAGE_GLOBAL_SHIFT)
  420. or t0, t0, t1
  421. move ra, t0
  422. csrwr ra, LOONGARCH_CSR_TLBELO0
  423. /* Convert to entrylo1 */
  424. addi.d t1, zero, 1
  425. slli.d t1, t1, (HPAGE_SHIFT - 1)
  426. add.d t0, t0, t1
  427. csrwr t0, LOONGARCH_CSR_TLBELO1
  428. /* Set huge page tlb entry size */
  429. addu16i.d t0, zero, (CSR_TLBIDX_PS >> 16)
  430. addu16i.d t1, zero, (PS_HUGE_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
  431. csrxchg t1, t0, LOONGARCH_CSR_TLBIDX
  432. tlbfill
  433. /* Reset default page size */
  434. addu16i.d t0, zero, (CSR_TLBIDX_PS >> 16)
  435. addu16i.d t1, zero, (PS_DEFAULT_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
  436. csrxchg t1, t0, LOONGARCH_CSR_TLBIDX
  437. csrrd t0, EXCEPTION_KS0
  438. csrrd t1, EXCEPTION_KS1
  439. csrrd ra, EXCEPTION_KS2
  440. ertn
  441. nopage_tlb_modify:
  442. dbar 0x700
  443. csrrd ra, EXCEPTION_KS2
  444. la_abs t0, tlb_do_page_fault_1
  445. jr t0
  446. SYM_CODE_END(handle_tlb_modify)
  447. SYM_CODE_START(handle_tlb_modify_ptw)
  448. UNWIND_HINT_UNDEFINED
  449. csrwr t0, LOONGARCH_CSR_KS0
  450. csrwr t1, LOONGARCH_CSR_KS1
  451. la_abs t0, tlb_do_page_fault_1
  452. jr t0
  453. SYM_CODE_END(handle_tlb_modify_ptw)
  454. SYM_CODE_START(handle_tlb_refill)
  455. UNWIND_HINT_UNDEFINED
  456. csrwr t0, LOONGARCH_CSR_TLBRSAVE
  457. csrrd t0, LOONGARCH_CSR_PGD
  458. lddir t0, t0, 3
  459. #if CONFIG_PGTABLE_LEVELS > 3
  460. lddir t0, t0, 2
  461. #endif
  462. #if CONFIG_PGTABLE_LEVELS > 2
  463. lddir t0, t0, 1
  464. #endif
  465. ldpte t0, 0
  466. ldpte t0, 1
  467. tlbfill
  468. csrrd t0, LOONGARCH_CSR_TLBRSAVE
  469. ertn
  470. SYM_CODE_END(handle_tlb_refill)