inject_fault.c 7.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248
  1. /*
  2. * Fault injection for both 32 and 64bit guests.
  3. *
  4. * Copyright (C) 2012,2013 - ARM Ltd
  5. * Author: Marc Zyngier <marc.zyngier@arm.com>
  6. *
  7. * Based on arch/arm/kvm/emulate.c
  8. * Copyright (C) 2012 - Virtual Open Systems and Columbia University
  9. * Author: Christoffer Dall <c.dall@virtualopensystems.com>
  10. *
  11. * This program is free software: you can redistribute it and/or modify
  12. * it under the terms of the GNU General Public License version 2 as
  13. * published by the Free Software Foundation.
  14. *
  15. * This program is distributed in the hope that it will be useful,
  16. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  17. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  18. * GNU General Public License for more details.
  19. *
  20. * You should have received a copy of the GNU General Public License
  21. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  22. */
  23. #include <linux/kvm_host.h>
  24. #include <asm/kvm_emulate.h>
  25. #include <asm/esr.h>
  26. #define CURRENT_EL_SP_EL0_VECTOR 0x0
  27. #define CURRENT_EL_SP_ELx_VECTOR 0x200
  28. #define LOWER_EL_AArch64_VECTOR 0x400
  29. #define LOWER_EL_AArch32_VECTOR 0x600
  30. enum exception_type {
  31. except_type_sync = 0,
  32. except_type_irq = 0x80,
  33. except_type_fiq = 0x100,
  34. except_type_serror = 0x180,
  35. };
  36. static u64 get_except_vector(struct kvm_vcpu *vcpu, enum exception_type type)
  37. {
  38. u64 exc_offset;
  39. switch (*vcpu_cpsr(vcpu) & (PSR_MODE_MASK | PSR_MODE32_BIT)) {
  40. case PSR_MODE_EL1t:
  41. exc_offset = CURRENT_EL_SP_EL0_VECTOR;
  42. break;
  43. case PSR_MODE_EL1h:
  44. exc_offset = CURRENT_EL_SP_ELx_VECTOR;
  45. break;
  46. case PSR_MODE_EL0t:
  47. exc_offset = LOWER_EL_AArch64_VECTOR;
  48. break;
  49. default:
  50. exc_offset = LOWER_EL_AArch32_VECTOR;
  51. }
  52. return vcpu_read_sys_reg(vcpu, VBAR_EL1) + exc_offset + type;
  53. }
  54. /*
  55. * When an exception is taken, most PSTATE fields are left unchanged in the
  56. * handler. However, some are explicitly overridden (e.g. M[4:0]). Luckily all
  57. * of the inherited bits have the same position in the AArch64/AArch32 SPSR_ELx
  58. * layouts, so we don't need to shuffle these for exceptions from AArch32 EL0.
  59. *
  60. * For the SPSR_ELx layout for AArch64, see ARM DDI 0487E.a page C5-429.
  61. * For the SPSR_ELx layout for AArch32, see ARM DDI 0487E.a page C5-426.
  62. *
  63. * Here we manipulate the fields in order of the AArch64 SPSR_ELx layout, from
  64. * MSB to LSB.
  65. */
  66. static unsigned long get_except64_pstate(struct kvm_vcpu *vcpu)
  67. {
  68. unsigned long sctlr = vcpu_read_sys_reg(vcpu, SCTLR_EL1);
  69. unsigned long old, new;
  70. old = *vcpu_cpsr(vcpu);
  71. new = 0;
  72. new |= (old & PSR_N_BIT);
  73. new |= (old & PSR_Z_BIT);
  74. new |= (old & PSR_C_BIT);
  75. new |= (old & PSR_V_BIT);
  76. // TODO: TCO (if/when ARMv8.5-MemTag is exposed to guests)
  77. new |= (old & PSR_DIT_BIT);
  78. // PSTATE.UAO is set to zero upon any exception to AArch64
  79. // See ARM DDI 0487E.a, page D5-2579.
  80. // PSTATE.PAN is unchanged unless SCTLR_ELx.SPAN == 0b0
  81. // SCTLR_ELx.SPAN is RES1 when ARMv8.1-PAN is not implemented
  82. // See ARM DDI 0487E.a, page D5-2578.
  83. new |= (old & PSR_PAN_BIT);
  84. if (!(sctlr & SCTLR_EL1_SPAN))
  85. new |= PSR_PAN_BIT;
  86. // PSTATE.SS is set to zero upon any exception to AArch64
  87. // See ARM DDI 0487E.a, page D2-2452.
  88. // PSTATE.IL is set to zero upon any exception to AArch64
  89. // See ARM DDI 0487E.a, page D1-2306.
  90. // PSTATE.SSBS is set to SCTLR_ELx.DSSBS upon any exception to AArch64
  91. // See ARM DDI 0487E.a, page D13-3258
  92. if (sctlr & SCTLR_ELx_DSSBS)
  93. new |= PSR_SSBS_BIT;
  94. // PSTATE.BTYPE is set to zero upon any exception to AArch64
  95. // See ARM DDI 0487E.a, pages D1-2293 to D1-2294.
  96. new |= PSR_D_BIT;
  97. new |= PSR_A_BIT;
  98. new |= PSR_I_BIT;
  99. new |= PSR_F_BIT;
  100. new |= PSR_MODE_EL1h;
  101. return new;
  102. }
  103. static void inject_abt64(struct kvm_vcpu *vcpu, bool is_iabt, unsigned long addr)
  104. {
  105. unsigned long cpsr = *vcpu_cpsr(vcpu);
  106. bool is_aarch32 = vcpu_mode_is_32bit(vcpu);
  107. u32 esr = 0;
  108. vcpu_write_elr_el1(vcpu, *vcpu_pc(vcpu));
  109. *vcpu_pc(vcpu) = get_except_vector(vcpu, except_type_sync);
  110. *vcpu_cpsr(vcpu) = get_except64_pstate(vcpu);
  111. vcpu_write_spsr(vcpu, cpsr);
  112. vcpu_write_sys_reg(vcpu, addr, FAR_EL1);
  113. /*
  114. * Build an {i,d}abort, depending on the level and the
  115. * instruction set. Report an external synchronous abort.
  116. */
  117. if (kvm_vcpu_trap_il_is32bit(vcpu))
  118. esr |= ESR_ELx_IL;
  119. /*
  120. * Here, the guest runs in AArch64 mode when in EL1. If we get
  121. * an AArch32 fault, it means we managed to trap an EL0 fault.
  122. */
  123. if (is_aarch32 || (cpsr & PSR_MODE_MASK) == PSR_MODE_EL0t)
  124. esr |= (ESR_ELx_EC_IABT_LOW << ESR_ELx_EC_SHIFT);
  125. else
  126. esr |= (ESR_ELx_EC_IABT_CUR << ESR_ELx_EC_SHIFT);
  127. if (!is_iabt)
  128. esr |= ESR_ELx_EC_DABT_LOW << ESR_ELx_EC_SHIFT;
  129. vcpu_write_sys_reg(vcpu, esr | ESR_ELx_FSC_EXTABT, ESR_EL1);
  130. }
  131. static void inject_undef64(struct kvm_vcpu *vcpu)
  132. {
  133. unsigned long cpsr = *vcpu_cpsr(vcpu);
  134. u32 esr = (ESR_ELx_EC_UNKNOWN << ESR_ELx_EC_SHIFT);
  135. vcpu_write_elr_el1(vcpu, *vcpu_pc(vcpu));
  136. *vcpu_pc(vcpu) = get_except_vector(vcpu, except_type_sync);
  137. *vcpu_cpsr(vcpu) = get_except64_pstate(vcpu);
  138. vcpu_write_spsr(vcpu, cpsr);
  139. /*
  140. * Build an unknown exception, depending on the instruction
  141. * set.
  142. */
  143. if (kvm_vcpu_trap_il_is32bit(vcpu))
  144. esr |= ESR_ELx_IL;
  145. vcpu_write_sys_reg(vcpu, esr, ESR_EL1);
  146. }
  147. /**
  148. * kvm_inject_dabt - inject a data abort into the guest
  149. * @vcpu: The VCPU to receive the undefined exception
  150. * @addr: The address to report in the DFAR
  151. *
  152. * It is assumed that this code is called from the VCPU thread and that the
  153. * VCPU therefore is not currently executing guest code.
  154. */
  155. void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr)
  156. {
  157. if (vcpu_el1_is_32bit(vcpu))
  158. kvm_inject_dabt32(vcpu, addr);
  159. else
  160. inject_abt64(vcpu, false, addr);
  161. }
  162. /**
  163. * kvm_inject_pabt - inject a prefetch abort into the guest
  164. * @vcpu: The VCPU to receive the undefined exception
  165. * @addr: The address to report in the DFAR
  166. *
  167. * It is assumed that this code is called from the VCPU thread and that the
  168. * VCPU therefore is not currently executing guest code.
  169. */
  170. void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr)
  171. {
  172. if (vcpu_el1_is_32bit(vcpu))
  173. kvm_inject_pabt32(vcpu, addr);
  174. else
  175. inject_abt64(vcpu, true, addr);
  176. }
  177. /**
  178. * kvm_inject_undefined - inject an undefined instruction into the guest
  179. *
  180. * It is assumed that this code is called from the VCPU thread and that the
  181. * VCPU therefore is not currently executing guest code.
  182. */
  183. void kvm_inject_undefined(struct kvm_vcpu *vcpu)
  184. {
  185. if (vcpu_el1_is_32bit(vcpu))
  186. kvm_inject_undef32(vcpu);
  187. else
  188. inject_undef64(vcpu);
  189. }
  190. void kvm_set_sei_esr(struct kvm_vcpu *vcpu, u64 esr)
  191. {
  192. vcpu_set_vsesr(vcpu, esr & ESR_ELx_ISS_MASK);
  193. *vcpu_hcr(vcpu) |= HCR_VSE;
  194. }
  195. /**
  196. * kvm_inject_vabt - inject an async abort / SError into the guest
  197. * @vcpu: The VCPU to receive the exception
  198. *
  199. * It is assumed that this code is called from the VCPU thread and that the
  200. * VCPU therefore is not currently executing guest code.
  201. *
  202. * Systems with the RAS Extensions specify an imp-def ESR (ISV/IDS = 1) with
  203. * the remaining ISS all-zeros so that this error is not interpreted as an
  204. * uncategorized RAS error. Without the RAS Extensions we can't specify an ESR
  205. * value, so the CPU generates an imp-def value.
  206. */
  207. void kvm_inject_vabt(struct kvm_vcpu *vcpu)
  208. {
  209. kvm_set_sei_esr(vcpu, ESR_ELx_ISV);
  210. }