inject_fault.c 6.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Fault injection for both 32 and 64bit guests.
  4. *
  5. * Copyright (C) 2012,2013 - ARM Ltd
  6. * Author: Marc Zyngier <marc.zyngier@arm.com>
  7. *
  8. * Based on arch/arm/kvm/emulate.c
  9. * Copyright (C) 2012 - Virtual Open Systems and Columbia University
  10. * Author: Christoffer Dall <c.dall@virtualopensystems.com>
  11. */
  12. #include <linux/kvm_host.h>
  13. #include <asm/kvm_emulate.h>
  14. #include <asm/kvm_nested.h>
  15. #include <asm/esr.h>
  16. static void pend_sync_exception(struct kvm_vcpu *vcpu)
  17. {
  18. /* If not nesting, EL1 is the only possible exception target */
  19. if (likely(!vcpu_has_nv(vcpu))) {
  20. kvm_pend_exception(vcpu, EXCEPT_AA64_EL1_SYNC);
  21. return;
  22. }
  23. /*
  24. * With NV, we need to pick between EL1 and EL2. Note that we
  25. * never deal with a nesting exception here, hence never
  26. * changing context, and the exception itself can be delayed
  27. * until the next entry.
  28. */
  29. switch(*vcpu_cpsr(vcpu) & PSR_MODE_MASK) {
  30. case PSR_MODE_EL2h:
  31. case PSR_MODE_EL2t:
  32. kvm_pend_exception(vcpu, EXCEPT_AA64_EL2_SYNC);
  33. break;
  34. case PSR_MODE_EL1h:
  35. case PSR_MODE_EL1t:
  36. kvm_pend_exception(vcpu, EXCEPT_AA64_EL1_SYNC);
  37. break;
  38. case PSR_MODE_EL0t:
  39. if (vcpu_el2_tge_is_set(vcpu))
  40. kvm_pend_exception(vcpu, EXCEPT_AA64_EL2_SYNC);
  41. else
  42. kvm_pend_exception(vcpu, EXCEPT_AA64_EL1_SYNC);
  43. break;
  44. default:
  45. BUG();
  46. }
  47. }
  48. static bool match_target_el(struct kvm_vcpu *vcpu, unsigned long target)
  49. {
  50. return (vcpu_get_flag(vcpu, EXCEPT_MASK) == target);
  51. }
  52. static void inject_abt64(struct kvm_vcpu *vcpu, bool is_iabt, unsigned long addr)
  53. {
  54. unsigned long cpsr = *vcpu_cpsr(vcpu);
  55. bool is_aarch32 = vcpu_mode_is_32bit(vcpu);
  56. u64 esr = 0;
  57. pend_sync_exception(vcpu);
  58. /*
  59. * Build an {i,d}abort, depending on the level and the
  60. * instruction set. Report an external synchronous abort.
  61. */
  62. if (kvm_vcpu_trap_il_is32bit(vcpu))
  63. esr |= ESR_ELx_IL;
  64. /*
  65. * Here, the guest runs in AArch64 mode when in EL1. If we get
  66. * an AArch32 fault, it means we managed to trap an EL0 fault.
  67. */
  68. if (is_aarch32 || (cpsr & PSR_MODE_MASK) == PSR_MODE_EL0t)
  69. esr |= (ESR_ELx_EC_IABT_LOW << ESR_ELx_EC_SHIFT);
  70. else
  71. esr |= (ESR_ELx_EC_IABT_CUR << ESR_ELx_EC_SHIFT);
  72. if (!is_iabt)
  73. esr |= ESR_ELx_EC_DABT_LOW << ESR_ELx_EC_SHIFT;
  74. esr |= ESR_ELx_FSC_EXTABT;
  75. if (match_target_el(vcpu, unpack_vcpu_flag(EXCEPT_AA64_EL1_SYNC))) {
  76. vcpu_write_sys_reg(vcpu, addr, FAR_EL1);
  77. vcpu_write_sys_reg(vcpu, esr, ESR_EL1);
  78. } else {
  79. vcpu_write_sys_reg(vcpu, addr, FAR_EL2);
  80. vcpu_write_sys_reg(vcpu, esr, ESR_EL2);
  81. }
  82. }
  83. static void inject_undef64(struct kvm_vcpu *vcpu)
  84. {
  85. u64 esr = (ESR_ELx_EC_UNKNOWN << ESR_ELx_EC_SHIFT);
  86. pend_sync_exception(vcpu);
  87. /*
  88. * Build an unknown exception, depending on the instruction
  89. * set.
  90. */
  91. if (kvm_vcpu_trap_il_is32bit(vcpu))
  92. esr |= ESR_ELx_IL;
  93. if (match_target_el(vcpu, unpack_vcpu_flag(EXCEPT_AA64_EL1_SYNC)))
  94. vcpu_write_sys_reg(vcpu, esr, ESR_EL1);
  95. else
  96. vcpu_write_sys_reg(vcpu, esr, ESR_EL2);
  97. }
  98. #define DFSR_FSC_EXTABT_LPAE 0x10
  99. #define DFSR_FSC_EXTABT_nLPAE 0x08
  100. #define DFSR_LPAE BIT(9)
  101. #define TTBCR_EAE BIT(31)
  102. static void inject_undef32(struct kvm_vcpu *vcpu)
  103. {
  104. kvm_pend_exception(vcpu, EXCEPT_AA32_UND);
  105. }
  106. /*
  107. * Modelled after TakeDataAbortException() and TakePrefetchAbortException
  108. * pseudocode.
  109. */
  110. static void inject_abt32(struct kvm_vcpu *vcpu, bool is_pabt, u32 addr)
  111. {
  112. u64 far;
  113. u32 fsr;
  114. /* Give the guest an IMPLEMENTATION DEFINED exception */
  115. if (vcpu_read_sys_reg(vcpu, TCR_EL1) & TTBCR_EAE) {
  116. fsr = DFSR_LPAE | DFSR_FSC_EXTABT_LPAE;
  117. } else {
  118. /* no need to shuffle FS[4] into DFSR[10] as it's 0 */
  119. fsr = DFSR_FSC_EXTABT_nLPAE;
  120. }
  121. far = vcpu_read_sys_reg(vcpu, FAR_EL1);
  122. if (is_pabt) {
  123. kvm_pend_exception(vcpu, EXCEPT_AA32_IABT);
  124. far &= GENMASK(31, 0);
  125. far |= (u64)addr << 32;
  126. vcpu_write_sys_reg(vcpu, fsr, IFSR32_EL2);
  127. } else { /* !iabt */
  128. kvm_pend_exception(vcpu, EXCEPT_AA32_DABT);
  129. far &= GENMASK(63, 32);
  130. far |= addr;
  131. vcpu_write_sys_reg(vcpu, fsr, ESR_EL1);
  132. }
  133. vcpu_write_sys_reg(vcpu, far, FAR_EL1);
  134. }
  135. /**
  136. * kvm_inject_dabt - inject a data abort into the guest
  137. * @vcpu: The VCPU to receive the data abort
  138. * @addr: The address to report in the DFAR
  139. *
  140. * It is assumed that this code is called from the VCPU thread and that the
  141. * VCPU therefore is not currently executing guest code.
  142. */
  143. void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr)
  144. {
  145. if (vcpu_el1_is_32bit(vcpu))
  146. inject_abt32(vcpu, false, addr);
  147. else
  148. inject_abt64(vcpu, false, addr);
  149. }
  150. /**
  151. * kvm_inject_pabt - inject a prefetch abort into the guest
  152. * @vcpu: The VCPU to receive the prefetch abort
  153. * @addr: The address to report in the DFAR
  154. *
  155. * It is assumed that this code is called from the VCPU thread and that the
  156. * VCPU therefore is not currently executing guest code.
  157. */
  158. void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr)
  159. {
  160. if (vcpu_el1_is_32bit(vcpu))
  161. inject_abt32(vcpu, true, addr);
  162. else
  163. inject_abt64(vcpu, true, addr);
  164. }
  165. void kvm_inject_size_fault(struct kvm_vcpu *vcpu)
  166. {
  167. unsigned long addr, esr;
  168. addr = kvm_vcpu_get_fault_ipa(vcpu);
  169. addr |= kvm_vcpu_get_hfar(vcpu) & GENMASK(11, 0);
  170. if (kvm_vcpu_trap_is_iabt(vcpu))
  171. kvm_inject_pabt(vcpu, addr);
  172. else
  173. kvm_inject_dabt(vcpu, addr);
  174. /*
  175. * If AArch64 or LPAE, set FSC to 0 to indicate an Address
  176. * Size Fault at level 0, as if exceeding PARange.
  177. *
  178. * Non-LPAE guests will only get the external abort, as there
  179. * is no way to describe the ASF.
  180. */
  181. if (vcpu_el1_is_32bit(vcpu) &&
  182. !(vcpu_read_sys_reg(vcpu, TCR_EL1) & TTBCR_EAE))
  183. return;
  184. esr = vcpu_read_sys_reg(vcpu, ESR_EL1);
  185. esr &= ~GENMASK_ULL(5, 0);
  186. vcpu_write_sys_reg(vcpu, esr, ESR_EL1);
  187. }
  188. /**
  189. * kvm_inject_undefined - inject an undefined instruction into the guest
  190. * @vcpu: The vCPU in which to inject the exception
  191. *
  192. * It is assumed that this code is called from the VCPU thread and that the
  193. * VCPU therefore is not currently executing guest code.
  194. */
  195. void kvm_inject_undefined(struct kvm_vcpu *vcpu)
  196. {
  197. if (vcpu_el1_is_32bit(vcpu))
  198. inject_undef32(vcpu);
  199. else
  200. inject_undef64(vcpu);
  201. }
  202. void kvm_set_sei_esr(struct kvm_vcpu *vcpu, u64 esr)
  203. {
  204. vcpu_set_vsesr(vcpu, esr & ESR_ELx_ISS_MASK);
  205. *vcpu_hcr(vcpu) |= HCR_VSE;
  206. }
  207. /**
  208. * kvm_inject_vabt - inject an async abort / SError into the guest
  209. * @vcpu: The VCPU to receive the exception
  210. *
  211. * It is assumed that this code is called from the VCPU thread and that the
  212. * VCPU therefore is not currently executing guest code.
  213. *
  214. * Systems with the RAS Extensions specify an imp-def ESR (ISV/IDS = 1) with
  215. * the remaining ISS all-zeros so that this error is not interpreted as an
  216. * uncategorized RAS error. Without the RAS Extensions we can't specify an ESR
  217. * value, so the CPU generates an imp-def value.
  218. */
  219. void kvm_inject_vabt(struct kvm_vcpu *vcpu)
  220. {
  221. kvm_set_sei_esr(vcpu, ESR_ELx_ISV);
  222. }