entry.S 5.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187
  1. /*
  2. * Copyright (C) 2015 - ARM Ltd
  3. * Author: Marc Zyngier <marc.zyngier@arm.com>
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License version 2 as
  7. * published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  16. */
  17. #include <linux/linkage.h>
  18. #include <asm/alternative.h>
  19. #include <asm/asm-offsets.h>
  20. #include <asm/assembler.h>
  21. #include <asm/fpsimdmacros.h>
  22. #include <asm/kvm.h>
  23. #include <asm/kvm_arm.h>
  24. #include <asm/kvm_asm.h>
  25. #include <asm/kvm_mmu.h>
  26. #define CPU_GP_REG_OFFSET(x) (CPU_GP_REGS + x)
  27. #define CPU_XREG_OFFSET(x) CPU_GP_REG_OFFSET(CPU_USER_PT_REGS + 8*x)
  28. .text
  29. .pushsection .hyp.text, "ax"
  30. .macro save_callee_saved_regs ctxt
  31. stp x19, x20, [\ctxt, #CPU_XREG_OFFSET(19)]
  32. stp x21, x22, [\ctxt, #CPU_XREG_OFFSET(21)]
  33. stp x23, x24, [\ctxt, #CPU_XREG_OFFSET(23)]
  34. stp x25, x26, [\ctxt, #CPU_XREG_OFFSET(25)]
  35. stp x27, x28, [\ctxt, #CPU_XREG_OFFSET(27)]
  36. stp x29, lr, [\ctxt, #CPU_XREG_OFFSET(29)]
  37. .endm
  38. .macro restore_callee_saved_regs ctxt
  39. ldp x19, x20, [\ctxt, #CPU_XREG_OFFSET(19)]
  40. ldp x21, x22, [\ctxt, #CPU_XREG_OFFSET(21)]
  41. ldp x23, x24, [\ctxt, #CPU_XREG_OFFSET(23)]
  42. ldp x25, x26, [\ctxt, #CPU_XREG_OFFSET(25)]
  43. ldp x27, x28, [\ctxt, #CPU_XREG_OFFSET(27)]
  44. ldp x29, lr, [\ctxt, #CPU_XREG_OFFSET(29)]
  45. .endm
  46. /*
  47. * u64 __guest_enter(struct kvm_vcpu *vcpu,
  48. * struct kvm_cpu_context *host_ctxt);
  49. */
  50. ENTRY(__guest_enter)
  51. // x0: vcpu
  52. // x1: host context
  53. // x2-x17: clobbered by macros
  54. // x18: guest context
  55. // Store the host regs
  56. save_callee_saved_regs x1
  57. // Now the host state is stored if we have a pending RAS SError it must
  58. // affect the host. If any asynchronous exception is pending we defer
  59. // the guest entry. The DSB isn't necessary before v8.2 as any SError
  60. // would be fatal.
  61. alternative_if ARM64_HAS_RAS_EXTN
  62. dsb nshst
  63. isb
  64. alternative_else_nop_endif
  65. mrs x1, isr_el1
  66. cbz x1, 1f
  67. mov x0, #ARM_EXCEPTION_IRQ
  68. ret
  69. 1:
  70. add x18, x0, #VCPU_CONTEXT
  71. // Restore guest regs x0-x17
  72. ldp x0, x1, [x18, #CPU_XREG_OFFSET(0)]
  73. ldp x2, x3, [x18, #CPU_XREG_OFFSET(2)]
  74. ldp x4, x5, [x18, #CPU_XREG_OFFSET(4)]
  75. ldp x6, x7, [x18, #CPU_XREG_OFFSET(6)]
  76. ldp x8, x9, [x18, #CPU_XREG_OFFSET(8)]
  77. ldp x10, x11, [x18, #CPU_XREG_OFFSET(10)]
  78. ldp x12, x13, [x18, #CPU_XREG_OFFSET(12)]
  79. ldp x14, x15, [x18, #CPU_XREG_OFFSET(14)]
  80. ldp x16, x17, [x18, #CPU_XREG_OFFSET(16)]
  81. // Restore guest regs x19-x29, lr
  82. restore_callee_saved_regs x18
  83. // Restore guest reg x18
  84. ldr x18, [x18, #CPU_XREG_OFFSET(18)]
  85. // Do not touch any register after this!
  86. eret
  87. ENDPROC(__guest_enter)
  88. ENTRY(__guest_exit)
  89. // x0: return code
  90. // x1: vcpu
  91. // x2-x29,lr: vcpu regs
  92. // vcpu x0-x1 on the stack
  93. add x1, x1, #VCPU_CONTEXT
  94. ALTERNATIVE(nop, SET_PSTATE_PAN(1), ARM64_HAS_PAN, CONFIG_ARM64_PAN)
  95. // Store the guest regs x2 and x3
  96. stp x2, x3, [x1, #CPU_XREG_OFFSET(2)]
  97. // Retrieve the guest regs x0-x1 from the stack
  98. ldp x2, x3, [sp], #16 // x0, x1
  99. // Store the guest regs x0-x1 and x4-x18
  100. stp x2, x3, [x1, #CPU_XREG_OFFSET(0)]
  101. stp x4, x5, [x1, #CPU_XREG_OFFSET(4)]
  102. stp x6, x7, [x1, #CPU_XREG_OFFSET(6)]
  103. stp x8, x9, [x1, #CPU_XREG_OFFSET(8)]
  104. stp x10, x11, [x1, #CPU_XREG_OFFSET(10)]
  105. stp x12, x13, [x1, #CPU_XREG_OFFSET(12)]
  106. stp x14, x15, [x1, #CPU_XREG_OFFSET(14)]
  107. stp x16, x17, [x1, #CPU_XREG_OFFSET(16)]
  108. str x18, [x1, #CPU_XREG_OFFSET(18)]
  109. // Store the guest regs x19-x29, lr
  110. save_callee_saved_regs x1
  111. get_host_ctxt x2, x3
  112. // Now restore the host regs
  113. restore_callee_saved_regs x2
  114. alternative_if ARM64_HAS_RAS_EXTN
  115. // If we have the RAS extensions we can consume a pending error
  116. // without an unmask-SError and isb.
  117. esb
  118. mrs_s x2, SYS_DISR_EL1
  119. str x2, [x1, #(VCPU_FAULT_DISR - VCPU_CONTEXT)]
  120. cbz x2, 1f
  121. msr_s SYS_DISR_EL1, xzr
  122. orr x0, x0, #(1<<ARM_EXIT_WITH_SERROR_BIT)
  123. 1: ret
  124. alternative_else
  125. // If we have a pending asynchronous abort, now is the
  126. // time to find out. From your VAXorcist book, page 666:
  127. // "Threaten me not, oh Evil one! For I speak with
  128. // the power of DEC, and I command thee to show thyself!"
  129. mrs x2, elr_el2
  130. mrs x3, esr_el2
  131. mrs x4, spsr_el2
  132. mov x5, x0
  133. dsb sy // Synchronize against in-flight ld/st
  134. nop
  135. msr daifclr, #4 // Unmask aborts
  136. alternative_endif
  137. // This is our single instruction exception window. A pending
  138. // SError is guaranteed to occur at the earliest when we unmask
  139. // it, and at the latest just after the ISB.
  140. abort_guest_exit_start:
  141. isb
  142. abort_guest_exit_end:
  143. msr daifset, #4 // Mask aborts
  144. ret
  145. _kvm_extable abort_guest_exit_start, 9997f
  146. _kvm_extable abort_guest_exit_end, 9997f
  147. 9997:
  148. msr daifset, #4 // Mask aborts
  149. mov x0, #(1 << ARM_EXIT_WITH_SERROR_BIT)
  150. // restore the EL1 exception context so that we can report some
  151. // information. Merge the exception code with the SError pending bit.
  152. msr elr_el2, x2
  153. msr esr_el2, x3
  154. msr spsr_el2, x4
  155. orr x0, x0, x5
  156. 1: ret
  157. ENDPROC(__guest_exit)