aarch32.c 6.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243
  1. /*
  2. * (not much of an) Emulation layer for 32bit guests.
  3. *
  4. * Copyright (C) 2012,2013 - ARM Ltd
  5. * Author: Marc Zyngier <marc.zyngier@arm.com>
  6. *
  7. * based on arch/arm/kvm/emulate.c
  8. * Copyright (C) 2012 - Virtual Open Systems and Columbia University
  9. * Author: Christoffer Dall <c.dall@virtualopensystems.com>
  10. *
  11. * This program is free software: you can redistribute it and/or modify
  12. * it under the terms of the GNU General Public License version 2 as
  13. * published by the Free Software Foundation.
  14. *
  15. * This program is distributed in the hope that it will be useful,
  16. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  17. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  18. * GNU General Public License for more details.
  19. *
  20. * You should have received a copy of the GNU General Public License
  21. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  22. */
  23. #include <linux/bits.h>
  24. #include <linux/kvm_host.h>
  25. #include <asm/kvm_emulate.h>
  26. #include <asm/kvm_hyp.h>
  27. #define DFSR_FSC_EXTABT_LPAE 0x10
  28. #define DFSR_FSC_EXTABT_nLPAE 0x08
  29. #define DFSR_LPAE BIT(9)
  30. /*
  31. * Table taken from ARMv8 ARM DDI0487B-B, table G1-10.
  32. */
  33. static const u8 return_offsets[8][2] = {
  34. [0] = { 0, 0 }, /* Reset, unused */
  35. [1] = { 4, 2 }, /* Undefined */
  36. [2] = { 0, 0 }, /* SVC, unused */
  37. [3] = { 4, 4 }, /* Prefetch abort */
  38. [4] = { 8, 8 }, /* Data abort */
  39. [5] = { 0, 0 }, /* HVC, unused */
  40. [6] = { 4, 4 }, /* IRQ, unused */
  41. [7] = { 4, 4 }, /* FIQ, unused */
  42. };
  43. static bool pre_fault_synchronize(struct kvm_vcpu *vcpu)
  44. {
  45. preempt_disable();
  46. if (kvm_arm_vcpu_loaded(vcpu)) {
  47. kvm_arch_vcpu_put(vcpu);
  48. return true;
  49. }
  50. preempt_enable();
  51. return false;
  52. }
  53. static void post_fault_synchronize(struct kvm_vcpu *vcpu, bool loaded)
  54. {
  55. if (loaded) {
  56. kvm_arch_vcpu_load(vcpu, smp_processor_id());
  57. preempt_enable();
  58. }
  59. }
  60. /*
  61. * When an exception is taken, most CPSR fields are left unchanged in the
  62. * handler. However, some are explicitly overridden (e.g. M[4:0]).
  63. *
  64. * The SPSR/SPSR_ELx layouts differ, and the below is intended to work with
  65. * either format. Note: SPSR.J bit doesn't exist in SPSR_ELx, but this bit was
  66. * obsoleted by the ARMv7 virtualization extensions and is RES0.
  67. *
  68. * For the SPSR layout seen from AArch32, see:
  69. * - ARM DDI 0406C.d, page B1-1148
  70. * - ARM DDI 0487E.a, page G8-6264
  71. *
  72. * For the SPSR_ELx layout for AArch32 seen from AArch64, see:
  73. * - ARM DDI 0487E.a, page C5-426
  74. *
  75. * Here we manipulate the fields in order of the AArch32 SPSR_ELx layout, from
  76. * MSB to LSB.
  77. */
  78. static unsigned long get_except32_cpsr(struct kvm_vcpu *vcpu, u32 mode)
  79. {
  80. u32 sctlr = vcpu_cp15(vcpu, c1_SCTLR);
  81. unsigned long old, new;
  82. old = *vcpu_cpsr(vcpu);
  83. new = 0;
  84. new |= (old & PSR_AA32_N_BIT);
  85. new |= (old & PSR_AA32_Z_BIT);
  86. new |= (old & PSR_AA32_C_BIT);
  87. new |= (old & PSR_AA32_V_BIT);
  88. new |= (old & PSR_AA32_Q_BIT);
  89. // CPSR.IT[7:0] are set to zero upon any exception
  90. // See ARM DDI 0487E.a, section G1.12.3
  91. // See ARM DDI 0406C.d, section B1.8.3
  92. new |= (old & PSR_AA32_DIT_BIT);
  93. // CPSR.SSBS is set to SCTLR.DSSBS upon any exception
  94. // See ARM DDI 0487E.a, page G8-6244
  95. if (sctlr & BIT(31))
  96. new |= PSR_AA32_SSBS_BIT;
  97. // CPSR.PAN is unchanged unless SCTLR.SPAN == 0b0
  98. // SCTLR.SPAN is RES1 when ARMv8.1-PAN is not implemented
  99. // See ARM DDI 0487E.a, page G8-6246
  100. new |= (old & PSR_AA32_PAN_BIT);
  101. if (!(sctlr & BIT(23)))
  102. new |= PSR_AA32_PAN_BIT;
  103. // SS does not exist in AArch32, so ignore
  104. // CPSR.IL is set to zero upon any exception
  105. // See ARM DDI 0487E.a, page G1-5527
  106. new |= (old & PSR_AA32_GE_MASK);
  107. // CPSR.IT[7:0] are set to zero upon any exception
  108. // See prior comment above
  109. // CPSR.E is set to SCTLR.EE upon any exception
  110. // See ARM DDI 0487E.a, page G8-6245
  111. // See ARM DDI 0406C.d, page B4-1701
  112. if (sctlr & BIT(25))
  113. new |= PSR_AA32_E_BIT;
  114. // CPSR.A is unchanged upon an exception to Undefined, Supervisor
  115. // CPSR.A is set upon an exception to other modes
  116. // See ARM DDI 0487E.a, pages G1-5515 to G1-5516
  117. // See ARM DDI 0406C.d, page B1-1182
  118. new |= (old & PSR_AA32_A_BIT);
  119. if (mode != PSR_AA32_MODE_UND && mode != PSR_AA32_MODE_SVC)
  120. new |= PSR_AA32_A_BIT;
  121. // CPSR.I is set upon any exception
  122. // See ARM DDI 0487E.a, pages G1-5515 to G1-5516
  123. // See ARM DDI 0406C.d, page B1-1182
  124. new |= PSR_AA32_I_BIT;
  125. // CPSR.F is set upon an exception to FIQ
  126. // CPSR.F is unchanged upon an exception to other modes
  127. // See ARM DDI 0487E.a, pages G1-5515 to G1-5516
  128. // See ARM DDI 0406C.d, page B1-1182
  129. new |= (old & PSR_AA32_F_BIT);
  130. if (mode == PSR_AA32_MODE_FIQ)
  131. new |= PSR_AA32_F_BIT;
  132. // CPSR.T is set to SCTLR.TE upon any exception
  133. // See ARM DDI 0487E.a, page G8-5514
  134. // See ARM DDI 0406C.d, page B1-1181
  135. if (sctlr & BIT(30))
  136. new |= PSR_AA32_T_BIT;
  137. new |= mode;
  138. return new;
  139. }
  140. static void prepare_fault32(struct kvm_vcpu *vcpu, u32 mode, u32 vect_offset)
  141. {
  142. unsigned long spsr = *vcpu_cpsr(vcpu);
  143. bool is_thumb = (spsr & PSR_AA32_T_BIT);
  144. u32 return_offset = return_offsets[vect_offset >> 2][is_thumb];
  145. u32 sctlr = vcpu_cp15(vcpu, c1_SCTLR);
  146. *vcpu_cpsr(vcpu) = get_except32_cpsr(vcpu, mode);
  147. /* Note: These now point to the banked copies */
  148. vcpu_write_spsr(vcpu, host_spsr_to_spsr32(spsr));
  149. *vcpu_reg32(vcpu, 14) = *vcpu_pc(vcpu) + return_offset;
  150. /* Branch to exception vector */
  151. if (sctlr & (1 << 13))
  152. vect_offset += 0xffff0000;
  153. else /* always have security exceptions */
  154. vect_offset += vcpu_cp15(vcpu, c12_VBAR);
  155. *vcpu_pc(vcpu) = vect_offset;
  156. }
  157. void kvm_inject_undef32(struct kvm_vcpu *vcpu)
  158. {
  159. bool loaded = pre_fault_synchronize(vcpu);
  160. prepare_fault32(vcpu, PSR_AA32_MODE_UND, 4);
  161. post_fault_synchronize(vcpu, loaded);
  162. }
  163. /*
  164. * Modelled after TakeDataAbortException() and TakePrefetchAbortException
  165. * pseudocode.
  166. */
  167. static void inject_abt32(struct kvm_vcpu *vcpu, bool is_pabt,
  168. unsigned long addr)
  169. {
  170. u32 vect_offset;
  171. u32 *far, *fsr;
  172. bool is_lpae;
  173. bool loaded;
  174. loaded = pre_fault_synchronize(vcpu);
  175. if (is_pabt) {
  176. vect_offset = 12;
  177. far = &vcpu_cp15(vcpu, c6_IFAR);
  178. fsr = &vcpu_cp15(vcpu, c5_IFSR);
  179. } else { /* !iabt */
  180. vect_offset = 16;
  181. far = &vcpu_cp15(vcpu, c6_DFAR);
  182. fsr = &vcpu_cp15(vcpu, c5_DFSR);
  183. }
  184. prepare_fault32(vcpu, PSR_AA32_MODE_ABT, vect_offset);
  185. *far = addr;
  186. /* Give the guest an IMPLEMENTATION DEFINED exception */
  187. is_lpae = (vcpu_cp15(vcpu, c2_TTBCR) >> 31);
  188. if (is_lpae) {
  189. *fsr = DFSR_LPAE | DFSR_FSC_EXTABT_LPAE;
  190. } else {
  191. /* no need to shuffle FS[4] into DFSR[10] as its 0 */
  192. *fsr = DFSR_FSC_EXTABT_nLPAE;
  193. }
  194. post_fault_synchronize(vcpu, loaded);
  195. }
  196. void kvm_inject_dabt32(struct kvm_vcpu *vcpu, unsigned long addr)
  197. {
  198. inject_abt32(vcpu, false, addr);
  199. }
  200. void kvm_inject_pabt32(struct kvm_vcpu *vcpu, unsigned long addr)
  201. {
  202. inject_abt32(vcpu, true, addr);
  203. }