kvm_emul.S 8.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349
  1. /*
  2. * This program is free software; you can redistribute it and/or modify
  3. * it under the terms of the GNU General Public License, version 2, as
  4. * published by the Free Software Foundation.
  5. *
  6. * This program is distributed in the hope that it will be useful,
  7. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  8. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  9. * GNU General Public License for more details.
  10. *
  11. * You should have received a copy of the GNU General Public License
  12. * along with this program; if not, write to the Free Software
  13. * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
  14. *
  15. * Copyright SUSE Linux Products GmbH 2010
  16. * Copyright 2010-2011 Freescale Semiconductor, Inc.
  17. *
  18. * Authors: Alexander Graf <agraf@suse.de>
  19. */
  20. #include <asm/ppc_asm.h>
  21. #include <asm/kvm_asm.h>
  22. #include <asm/reg.h>
  23. #include <asm/page.h>
  24. #include <asm/asm-offsets.h>
  25. #include <asm/asm-compat.h>
  26. #define KVM_MAGIC_PAGE (-4096)
  27. #ifdef CONFIG_64BIT
  28. #define LL64(reg, offs, reg2) ld reg, (offs)(reg2)
  29. #define STL64(reg, offs, reg2) std reg, (offs)(reg2)
  30. #else
  31. #define LL64(reg, offs, reg2) lwz reg, (offs + 4)(reg2)
  32. #define STL64(reg, offs, reg2) stw reg, (offs + 4)(reg2)
  33. #endif
  34. #define SCRATCH_SAVE \
  35. /* Enable critical section. We are critical if \
  36. shared->critical == r1 */ \
  37. STL64(r1, KVM_MAGIC_PAGE + KVM_MAGIC_CRITICAL, 0); \
  38. \
  39. /* Save state */ \
  40. PPC_STL r31, (KVM_MAGIC_PAGE + KVM_MAGIC_SCRATCH1)(0); \
  41. PPC_STL r30, (KVM_MAGIC_PAGE + KVM_MAGIC_SCRATCH2)(0); \
  42. mfcr r31; \
  43. stw r31, (KVM_MAGIC_PAGE + KVM_MAGIC_SCRATCH3)(0);
  44. #define SCRATCH_RESTORE \
  45. /* Restore state */ \
  46. PPC_LL r31, (KVM_MAGIC_PAGE + KVM_MAGIC_SCRATCH1)(0); \
  47. lwz r30, (KVM_MAGIC_PAGE + KVM_MAGIC_SCRATCH3)(0); \
  48. mtcr r30; \
  49. PPC_LL r30, (KVM_MAGIC_PAGE + KVM_MAGIC_SCRATCH2)(0); \
  50. \
  51. /* Disable critical section. We are critical if \
  52. shared->critical == r1 and r2 is always != r1 */ \
  53. STL64(r2, KVM_MAGIC_PAGE + KVM_MAGIC_CRITICAL, 0);
  54. .global kvm_template_start
  55. kvm_template_start:
  56. .global kvm_emulate_mtmsrd
  57. kvm_emulate_mtmsrd:
  58. SCRATCH_SAVE
  59. /* Put MSR & ~(MSR_EE|MSR_RI) in r31 */
  60. LL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
  61. lis r30, (~(MSR_EE | MSR_RI))@h
  62. ori r30, r30, (~(MSR_EE | MSR_RI))@l
  63. and r31, r31, r30
  64. /* OR the register's (MSR_EE|MSR_RI) on MSR */
  65. kvm_emulate_mtmsrd_reg:
  66. ori r30, r0, 0
  67. andi. r30, r30, (MSR_EE|MSR_RI)
  68. or r31, r31, r30
  69. /* Put MSR back into magic page */
  70. STL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
  71. /* Check if we have to fetch an interrupt */
  72. lwz r31, (KVM_MAGIC_PAGE + KVM_MAGIC_INT)(0)
  73. cmpwi r31, 0
  74. beq+ no_check
  75. /* Check if we may trigger an interrupt */
  76. andi. r30, r30, MSR_EE
  77. beq no_check
  78. SCRATCH_RESTORE
  79. /* Nag hypervisor */
  80. kvm_emulate_mtmsrd_orig_ins:
  81. tlbsync
  82. b kvm_emulate_mtmsrd_branch
  83. no_check:
  84. SCRATCH_RESTORE
  85. /* Go back to caller */
  86. kvm_emulate_mtmsrd_branch:
  87. b .
  88. kvm_emulate_mtmsrd_end:
  89. .global kvm_emulate_mtmsrd_branch_offs
  90. kvm_emulate_mtmsrd_branch_offs:
  91. .long (kvm_emulate_mtmsrd_branch - kvm_emulate_mtmsrd) / 4
  92. .global kvm_emulate_mtmsrd_reg_offs
  93. kvm_emulate_mtmsrd_reg_offs:
  94. .long (kvm_emulate_mtmsrd_reg - kvm_emulate_mtmsrd) / 4
  95. .global kvm_emulate_mtmsrd_orig_ins_offs
  96. kvm_emulate_mtmsrd_orig_ins_offs:
  97. .long (kvm_emulate_mtmsrd_orig_ins - kvm_emulate_mtmsrd) / 4
  98. .global kvm_emulate_mtmsrd_len
  99. kvm_emulate_mtmsrd_len:
  100. .long (kvm_emulate_mtmsrd_end - kvm_emulate_mtmsrd) / 4
  101. #define MSR_SAFE_BITS (MSR_EE | MSR_RI)
  102. #define MSR_CRITICAL_BITS ~MSR_SAFE_BITS
  103. .global kvm_emulate_mtmsr
  104. kvm_emulate_mtmsr:
  105. SCRATCH_SAVE
  106. /* Fetch old MSR in r31 */
  107. LL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
  108. /* Find the changed bits between old and new MSR */
  109. kvm_emulate_mtmsr_reg1:
  110. ori r30, r0, 0
  111. xor r31, r30, r31
  112. /* Check if we need to really do mtmsr */
  113. LOAD_REG_IMMEDIATE(r30, MSR_CRITICAL_BITS)
  114. and. r31, r31, r30
  115. /* No critical bits changed? Maybe we can stay in the guest. */
  116. beq maybe_stay_in_guest
  117. do_mtmsr:
  118. SCRATCH_RESTORE
  119. /* Just fire off the mtmsr if it's critical */
  120. kvm_emulate_mtmsr_orig_ins:
  121. mtmsr r0
  122. b kvm_emulate_mtmsr_branch
  123. maybe_stay_in_guest:
  124. /* Get the target register in r30 */
  125. kvm_emulate_mtmsr_reg2:
  126. ori r30, r0, 0
  127. /* Put MSR into magic page because we don't call mtmsr */
  128. STL64(r30, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
  129. /* Check if we have to fetch an interrupt */
  130. lwz r31, (KVM_MAGIC_PAGE + KVM_MAGIC_INT)(0)
  131. cmpwi r31, 0
  132. beq+ no_mtmsr
  133. /* Check if we may trigger an interrupt */
  134. andi. r31, r30, MSR_EE
  135. bne do_mtmsr
  136. no_mtmsr:
  137. SCRATCH_RESTORE
  138. /* Go back to caller */
  139. kvm_emulate_mtmsr_branch:
  140. b .
  141. kvm_emulate_mtmsr_end:
  142. .global kvm_emulate_mtmsr_branch_offs
  143. kvm_emulate_mtmsr_branch_offs:
  144. .long (kvm_emulate_mtmsr_branch - kvm_emulate_mtmsr) / 4
  145. .global kvm_emulate_mtmsr_reg1_offs
  146. kvm_emulate_mtmsr_reg1_offs:
  147. .long (kvm_emulate_mtmsr_reg1 - kvm_emulate_mtmsr) / 4
  148. .global kvm_emulate_mtmsr_reg2_offs
  149. kvm_emulate_mtmsr_reg2_offs:
  150. .long (kvm_emulate_mtmsr_reg2 - kvm_emulate_mtmsr) / 4
  151. .global kvm_emulate_mtmsr_orig_ins_offs
  152. kvm_emulate_mtmsr_orig_ins_offs:
  153. .long (kvm_emulate_mtmsr_orig_ins - kvm_emulate_mtmsr) / 4
  154. .global kvm_emulate_mtmsr_len
  155. kvm_emulate_mtmsr_len:
  156. .long (kvm_emulate_mtmsr_end - kvm_emulate_mtmsr) / 4
  157. /* also used for wrteei 1 */
  158. .global kvm_emulate_wrtee
  159. kvm_emulate_wrtee:
  160. SCRATCH_SAVE
  161. /* Fetch old MSR in r31 */
  162. LL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
  163. /* Insert new MSR[EE] */
  164. kvm_emulate_wrtee_reg:
  165. ori r30, r0, 0
  166. rlwimi r31, r30, 0, MSR_EE
  167. /*
  168. * If MSR[EE] is now set, check for a pending interrupt.
  169. * We could skip this if MSR[EE] was already on, but that
  170. * should be rare, so don't bother.
  171. */
  172. andi. r30, r30, MSR_EE
  173. /* Put MSR into magic page because we don't call wrtee */
  174. STL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
  175. beq no_wrtee
  176. /* Check if we have to fetch an interrupt */
  177. lwz r30, (KVM_MAGIC_PAGE + KVM_MAGIC_INT)(0)
  178. cmpwi r30, 0
  179. bne do_wrtee
  180. no_wrtee:
  181. SCRATCH_RESTORE
  182. /* Go back to caller */
  183. kvm_emulate_wrtee_branch:
  184. b .
  185. do_wrtee:
  186. SCRATCH_RESTORE
  187. /* Just fire off the wrtee if it's critical */
  188. kvm_emulate_wrtee_orig_ins:
  189. wrtee r0
  190. b kvm_emulate_wrtee_branch
  191. kvm_emulate_wrtee_end:
  192. .global kvm_emulate_wrtee_branch_offs
  193. kvm_emulate_wrtee_branch_offs:
  194. .long (kvm_emulate_wrtee_branch - kvm_emulate_wrtee) / 4
  195. .global kvm_emulate_wrtee_reg_offs
  196. kvm_emulate_wrtee_reg_offs:
  197. .long (kvm_emulate_wrtee_reg - kvm_emulate_wrtee) / 4
  198. .global kvm_emulate_wrtee_orig_ins_offs
  199. kvm_emulate_wrtee_orig_ins_offs:
  200. .long (kvm_emulate_wrtee_orig_ins - kvm_emulate_wrtee) / 4
  201. .global kvm_emulate_wrtee_len
  202. kvm_emulate_wrtee_len:
  203. .long (kvm_emulate_wrtee_end - kvm_emulate_wrtee) / 4
  204. .global kvm_emulate_wrteei_0
  205. kvm_emulate_wrteei_0:
  206. SCRATCH_SAVE
  207. /* Fetch old MSR in r31 */
  208. LL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
  209. /* Remove MSR_EE from old MSR */
  210. rlwinm r31, r31, 0, ~MSR_EE
  211. /* Write new MSR value back */
  212. STL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
  213. SCRATCH_RESTORE
  214. /* Go back to caller */
  215. kvm_emulate_wrteei_0_branch:
  216. b .
  217. kvm_emulate_wrteei_0_end:
  218. .global kvm_emulate_wrteei_0_branch_offs
  219. kvm_emulate_wrteei_0_branch_offs:
  220. .long (kvm_emulate_wrteei_0_branch - kvm_emulate_wrteei_0) / 4
  221. .global kvm_emulate_wrteei_0_len
  222. kvm_emulate_wrteei_0_len:
  223. .long (kvm_emulate_wrteei_0_end - kvm_emulate_wrteei_0) / 4
  224. .global kvm_emulate_mtsrin
  225. kvm_emulate_mtsrin:
  226. SCRATCH_SAVE
  227. LL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
  228. andi. r31, r31, MSR_DR | MSR_IR
  229. beq kvm_emulate_mtsrin_reg1
  230. SCRATCH_RESTORE
  231. kvm_emulate_mtsrin_orig_ins:
  232. nop
  233. b kvm_emulate_mtsrin_branch
  234. kvm_emulate_mtsrin_reg1:
  235. /* rX >> 26 */
  236. rlwinm r30,r0,6,26,29
  237. kvm_emulate_mtsrin_reg2:
  238. stw r0, (KVM_MAGIC_PAGE + KVM_MAGIC_SR)(r30)
  239. SCRATCH_RESTORE
  240. /* Go back to caller */
  241. kvm_emulate_mtsrin_branch:
  242. b .
  243. kvm_emulate_mtsrin_end:
  244. .global kvm_emulate_mtsrin_branch_offs
  245. kvm_emulate_mtsrin_branch_offs:
  246. .long (kvm_emulate_mtsrin_branch - kvm_emulate_mtsrin) / 4
  247. .global kvm_emulate_mtsrin_reg1_offs
  248. kvm_emulate_mtsrin_reg1_offs:
  249. .long (kvm_emulate_mtsrin_reg1 - kvm_emulate_mtsrin) / 4
  250. .global kvm_emulate_mtsrin_reg2_offs
  251. kvm_emulate_mtsrin_reg2_offs:
  252. .long (kvm_emulate_mtsrin_reg2 - kvm_emulate_mtsrin) / 4
  253. .global kvm_emulate_mtsrin_orig_ins_offs
  254. kvm_emulate_mtsrin_orig_ins_offs:
  255. .long (kvm_emulate_mtsrin_orig_ins - kvm_emulate_mtsrin) / 4
  256. .global kvm_emulate_mtsrin_len
  257. kvm_emulate_mtsrin_len:
  258. .long (kvm_emulate_mtsrin_end - kvm_emulate_mtsrin) / 4
  259. .global kvm_template_end
  260. kvm_template_end: