switch.S 7.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. /*
  3. * Copyright (C) 2020-2023 Loongson Technology Corporation Limited
  4. */
  5. #include <linux/linkage.h>
  6. #include <asm/asm.h>
  7. #include <asm/asmmacro.h>
  8. #include <asm/loongarch.h>
  9. #include <asm/regdef.h>
  10. #include <asm/unwind_hints.h>
  11. #define HGPR_OFFSET(x) (PT_R0 + 8*x)
  12. #define GGPR_OFFSET(x) (KVM_ARCH_GGPR + 8*x)
  13. .macro kvm_save_host_gpr base
  14. .irp n,1,2,3,22,23,24,25,26,27,28,29,30,31
  15. st.d $r\n, \base, HGPR_OFFSET(\n)
  16. .endr
  17. .endm
  18. .macro kvm_restore_host_gpr base
  19. .irp n,1,2,3,22,23,24,25,26,27,28,29,30,31
  20. ld.d $r\n, \base, HGPR_OFFSET(\n)
  21. .endr
  22. .endm
  23. /*
  24. * Save and restore all GPRs except base register,
  25. * and default value of base register is a2.
  26. */
  27. .macro kvm_save_guest_gprs base
  28. .irp n,1,2,3,4,5,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31
  29. st.d $r\n, \base, GGPR_OFFSET(\n)
  30. .endr
  31. .endm
  32. .macro kvm_restore_guest_gprs base
  33. .irp n,1,2,3,4,5,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31
  34. ld.d $r\n, \base, GGPR_OFFSET(\n)
  35. .endr
  36. .endm
  37. /*
  38. * Prepare switch to guest, save host regs and restore guest regs.
  39. * a2: kvm_vcpu_arch, don't touch it until 'ertn'
  40. * t0, t1: temp register
  41. */
  42. .macro kvm_switch_to_guest
  43. /* Set host ECFG.VS=0, all exceptions share one exception entry */
  44. csrrd t0, LOONGARCH_CSR_ECFG
  45. bstrins.w t0, zero, CSR_ECFG_VS_SHIFT_END, CSR_ECFG_VS_SHIFT
  46. csrwr t0, LOONGARCH_CSR_ECFG
  47. /* Load up the new EENTRY */
  48. ld.d t0, a2, KVM_ARCH_GEENTRY
  49. csrwr t0, LOONGARCH_CSR_EENTRY
  50. /* Set Guest ERA */
  51. ld.d t0, a2, KVM_ARCH_GPC
  52. csrwr t0, LOONGARCH_CSR_ERA
  53. /* Save host PGDL */
  54. csrrd t0, LOONGARCH_CSR_PGDL
  55. st.d t0, a2, KVM_ARCH_HPGD
  56. /* Switch to kvm */
  57. ld.d t1, a2, KVM_VCPU_KVM - KVM_VCPU_ARCH
  58. /* Load guest PGDL */
  59. li.w t0, KVM_GPGD
  60. ldx.d t0, t1, t0
  61. csrwr t0, LOONGARCH_CSR_PGDL
  62. /* Mix GID and RID */
  63. csrrd t1, LOONGARCH_CSR_GSTAT
  64. bstrpick.w t1, t1, CSR_GSTAT_GID_SHIFT_END, CSR_GSTAT_GID_SHIFT
  65. csrrd t0, LOONGARCH_CSR_GTLBC
  66. bstrins.w t0, t1, CSR_GTLBC_TGID_SHIFT_END, CSR_GTLBC_TGID_SHIFT
  67. csrwr t0, LOONGARCH_CSR_GTLBC
  68. /*
  69. * Enable intr in root mode with future ertn so that host interrupt
  70. * can be responsed during VM runs
  71. * Guest CRMD comes from separate GCSR_CRMD register
  72. */
  73. ori t0, zero, CSR_PRMD_PIE
  74. csrxchg t0, t0, LOONGARCH_CSR_PRMD
  75. /* Set PVM bit to setup ertn to guest context */
  76. ori t0, zero, CSR_GSTAT_PVM
  77. csrxchg t0, t0, LOONGARCH_CSR_GSTAT
  78. /* Load Guest GPRs */
  79. kvm_restore_guest_gprs a2
  80. /* Load KVM_ARCH register */
  81. ld.d a2, a2, (KVM_ARCH_GGPR + 8 * REG_A2)
  82. ertn /* Switch to guest: GSTAT.PGM = 1, ERRCTL.ISERR = 0, TLBRPRMD.ISTLBR = 0 */
  83. .endm
  84. /*
  85. * Exception entry for general exception from guest mode
  86. * - IRQ is disabled
  87. * - kernel privilege in root mode
  88. * - page mode keep unchanged from previous PRMD in root mode
  89. * - Fixme: tlb exception cannot happen since registers relative with TLB
  90. * - is still in guest mode, such as pgd table/vmid registers etc,
  91. * - will fix with hw page walk enabled in future
  92. * load kvm_vcpu from reserved CSR KVM_VCPU_KS, and save a2 to KVM_TEMP_KS
  93. */
  94. .text
  95. .cfi_sections .debug_frame
  96. SYM_CODE_START(kvm_exc_entry)
  97. UNWIND_HINT_UNDEFINED
  98. csrwr a2, KVM_TEMP_KS
  99. csrrd a2, KVM_VCPU_KS
  100. addi.d a2, a2, KVM_VCPU_ARCH
  101. /* After save GPRs, free to use any GPR */
  102. kvm_save_guest_gprs a2
  103. /* Save guest A2 */
  104. csrrd t0, KVM_TEMP_KS
  105. st.d t0, a2, (KVM_ARCH_GGPR + 8 * REG_A2)
  106. /* A2 is kvm_vcpu_arch, A1 is free to use */
  107. csrrd s1, KVM_VCPU_KS
  108. ld.d s0, s1, KVM_VCPU_RUN
  109. csrrd t0, LOONGARCH_CSR_ESTAT
  110. st.d t0, a2, KVM_ARCH_HESTAT
  111. csrrd t0, LOONGARCH_CSR_ERA
  112. st.d t0, a2, KVM_ARCH_GPC
  113. csrrd t0, LOONGARCH_CSR_BADV
  114. st.d t0, a2, KVM_ARCH_HBADV
  115. csrrd t0, LOONGARCH_CSR_BADI
  116. st.d t0, a2, KVM_ARCH_HBADI
  117. /* Restore host ECFG.VS */
  118. csrrd t0, LOONGARCH_CSR_ECFG
  119. ld.d t1, a2, KVM_ARCH_HECFG
  120. or t0, t0, t1
  121. csrwr t0, LOONGARCH_CSR_ECFG
  122. /* Restore host EENTRY */
  123. ld.d t0, a2, KVM_ARCH_HEENTRY
  124. csrwr t0, LOONGARCH_CSR_EENTRY
  125. /* Restore host pgd table */
  126. ld.d t0, a2, KVM_ARCH_HPGD
  127. csrwr t0, LOONGARCH_CSR_PGDL
  128. /*
  129. * Disable PGM bit to enter root mode by default with next ertn
  130. */
  131. ori t0, zero, CSR_GSTAT_PVM
  132. csrxchg zero, t0, LOONGARCH_CSR_GSTAT
  133. /*
  134. * Clear GTLBC.TGID field
  135. * 0: for root tlb update in future tlb instr
  136. * others: for guest tlb update like gpa to hpa in future tlb instr
  137. */
  138. csrrd t0, LOONGARCH_CSR_GTLBC
  139. bstrins.w t0, zero, CSR_GTLBC_TGID_SHIFT_END, CSR_GTLBC_TGID_SHIFT
  140. csrwr t0, LOONGARCH_CSR_GTLBC
  141. ld.d tp, a2, KVM_ARCH_HTP
  142. ld.d sp, a2, KVM_ARCH_HSP
  143. /* restore per cpu register */
  144. ld.d u0, a2, KVM_ARCH_HPERCPU
  145. addi.d sp, sp, -PT_SIZE
  146. /* Prepare handle exception */
  147. or a0, s0, zero
  148. or a1, s1, zero
  149. ld.d t8, a2, KVM_ARCH_HANDLE_EXIT
  150. jirl ra, t8, 0
  151. or a2, s1, zero
  152. addi.d a2, a2, KVM_VCPU_ARCH
  153. /* Resume host when ret <= 0 */
  154. blez a0, ret_to_host
  155. /*
  156. * Return to guest
  157. * Save per cpu register again, maybe switched to another cpu
  158. */
  159. st.d u0, a2, KVM_ARCH_HPERCPU
  160. /* Save kvm_vcpu to kscratch */
  161. csrwr s1, KVM_VCPU_KS
  162. kvm_switch_to_guest
  163. ret_to_host:
  164. ld.d a2, a2, KVM_ARCH_HSP
  165. addi.d a2, a2, -PT_SIZE
  166. kvm_restore_host_gpr a2
  167. jr ra
  168. SYM_INNER_LABEL(kvm_exc_entry_end, SYM_L_LOCAL)
  169. SYM_CODE_END(kvm_exc_entry)
  170. /*
  171. * int kvm_enter_guest(struct kvm_run *run, struct kvm_vcpu *vcpu)
  172. *
  173. * @register_param:
  174. * a0: kvm_run* run
  175. * a1: kvm_vcpu* vcpu
  176. */
  177. SYM_FUNC_START(kvm_enter_guest)
  178. /* Allocate space in stack bottom */
  179. addi.d a2, sp, -PT_SIZE
  180. /* Save host GPRs */
  181. kvm_save_host_gpr a2
  182. addi.d a2, a1, KVM_VCPU_ARCH
  183. st.d sp, a2, KVM_ARCH_HSP
  184. st.d tp, a2, KVM_ARCH_HTP
  185. /* Save per cpu register */
  186. st.d u0, a2, KVM_ARCH_HPERCPU
  187. /* Save kvm_vcpu to kscratch */
  188. csrwr a1, KVM_VCPU_KS
  189. kvm_switch_to_guest
  190. SYM_INNER_LABEL(kvm_enter_guest_end, SYM_L_LOCAL)
  191. SYM_FUNC_END(kvm_enter_guest)
  192. SYM_FUNC_START(kvm_save_fpu)
  193. fpu_save_csr a0 t1
  194. fpu_save_double a0 t1
  195. fpu_save_cc a0 t1 t2
  196. jr ra
  197. SYM_FUNC_END(kvm_save_fpu)
  198. SYM_FUNC_START(kvm_restore_fpu)
  199. fpu_restore_double a0 t1
  200. fpu_restore_csr a0 t1 t2
  201. fpu_restore_cc a0 t1 t2
  202. jr ra
  203. SYM_FUNC_END(kvm_restore_fpu)
  204. #ifdef CONFIG_CPU_HAS_LSX
  205. SYM_FUNC_START(kvm_save_lsx)
  206. fpu_save_csr a0 t1
  207. fpu_save_cc a0 t1 t2
  208. lsx_save_data a0 t1
  209. jr ra
  210. SYM_FUNC_END(kvm_save_lsx)
  211. SYM_FUNC_START(kvm_restore_lsx)
  212. lsx_restore_data a0 t1
  213. fpu_restore_cc a0 t1 t2
  214. fpu_restore_csr a0 t1 t2
  215. jr ra
  216. SYM_FUNC_END(kvm_restore_lsx)
  217. #endif
  218. #ifdef CONFIG_CPU_HAS_LASX
  219. SYM_FUNC_START(kvm_save_lasx)
  220. fpu_save_csr a0 t1
  221. fpu_save_cc a0 t1 t2
  222. lasx_save_data a0 t1
  223. jr ra
  224. SYM_FUNC_END(kvm_save_lasx)
  225. SYM_FUNC_START(kvm_restore_lasx)
  226. lasx_restore_data a0 t1
  227. fpu_restore_cc a0 t1 t2
  228. fpu_restore_csr a0 t1 t2
  229. jr ra
  230. SYM_FUNC_END(kvm_restore_lasx)
  231. #endif
  232. .section ".rodata"
  233. SYM_DATA(kvm_exception_size, .quad kvm_exc_entry_end - kvm_exc_entry)
  234. SYM_DATA(kvm_enter_guest_size, .quad kvm_enter_guest_end - kvm_enter_guest)
  235. #ifdef CONFIG_CPU_HAS_LBT
  236. STACK_FRAME_NON_STANDARD kvm_restore_fpu
  237. #ifdef CONFIG_CPU_HAS_LSX
  238. STACK_FRAME_NON_STANDARD kvm_restore_lsx
  239. #endif
  240. #ifdef CONFIG_CPU_HAS_LASX
  241. STACK_FRAME_NON_STANDARD kvm_restore_lasx
  242. #endif
  243. #endif