fpsimd.c 7.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * arch/arm64/kvm/fpsimd.c: Guest/host FPSIMD context coordination helpers
  4. *
  5. * Copyright 2018 Arm Limited
  6. * Author: Dave Martin <Dave.Martin@arm.com>
  7. */
  8. #include <linux/irqflags.h>
  9. #include <linux/sched.h>
  10. #include <linux/kvm_host.h>
  11. #include <asm/fpsimd.h>
  12. #include <asm/kvm_asm.h>
  13. #include <asm/kvm_hyp.h>
  14. #include <asm/kvm_mmu.h>
  15. #include <asm/sysreg.h>
  16. /*
  17. * Called on entry to KVM_RUN unless this vcpu previously ran at least
  18. * once and the most recent prior KVM_RUN for this vcpu was called from
  19. * the same task as current (highly likely).
  20. *
  21. * This is guaranteed to execute before kvm_arch_vcpu_load_fp(vcpu),
  22. * such that on entering hyp the relevant parts of current are already
  23. * mapped.
  24. */
  25. int kvm_arch_vcpu_run_map_fp(struct kvm_vcpu *vcpu)
  26. {
  27. struct user_fpsimd_state *fpsimd = &current->thread.uw.fpsimd_state;
  28. int ret;
  29. /* pKVM has its own tracking of the host fpsimd state. */
  30. if (is_protected_kvm_enabled())
  31. return 0;
  32. /* Make sure the host task fpsimd state is visible to hyp: */
  33. ret = kvm_share_hyp(fpsimd, fpsimd + 1);
  34. if (ret)
  35. return ret;
  36. return 0;
  37. }
  38. /*
  39. * Prepare vcpu for saving the host's FPSIMD state and loading the guest's.
  40. * The actual loading is done by the FPSIMD access trap taken to hyp.
  41. *
  42. * Here, we just set the correct metadata to indicate that the FPSIMD
  43. * state in the cpu regs (if any) belongs to current on the host.
  44. */
  45. void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu)
  46. {
  47. BUG_ON(!current->mm);
  48. if (!system_supports_fpsimd())
  49. return;
  50. fpsimd_kvm_prepare();
  51. /*
  52. * We will check TIF_FOREIGN_FPSTATE just before entering the
  53. * guest in kvm_arch_vcpu_ctxflush_fp() and override this to
  54. * FP_STATE_FREE if the flag set.
  55. */
  56. *host_data_ptr(fp_owner) = FP_STATE_HOST_OWNED;
  57. *host_data_ptr(fpsimd_state) = kern_hyp_va(&current->thread.uw.fpsimd_state);
  58. *host_data_ptr(fpmr_ptr) = kern_hyp_va(&current->thread.uw.fpmr);
  59. vcpu_clear_flag(vcpu, HOST_SVE_ENABLED);
  60. if (read_sysreg(cpacr_el1) & CPACR_EL1_ZEN_EL0EN)
  61. vcpu_set_flag(vcpu, HOST_SVE_ENABLED);
  62. if (system_supports_sme()) {
  63. vcpu_clear_flag(vcpu, HOST_SME_ENABLED);
  64. if (read_sysreg(cpacr_el1) & CPACR_EL1_SMEN_EL0EN)
  65. vcpu_set_flag(vcpu, HOST_SME_ENABLED);
  66. /*
  67. * If PSTATE.SM is enabled then save any pending FP
  68. * state and disable PSTATE.SM. If we leave PSTATE.SM
  69. * enabled and the guest does not enable SME via
  70. * CPACR_EL1.SMEN then operations that should be valid
  71. * may generate SME traps from EL1 to EL1 which we
  72. * can't intercept and which would confuse the guest.
  73. *
  74. * Do the same for PSTATE.ZA in the case where there
  75. * is state in the registers which has not already
  76. * been saved, this is very unlikely to happen.
  77. */
  78. if (read_sysreg_s(SYS_SVCR) & (SVCR_SM_MASK | SVCR_ZA_MASK)) {
  79. *host_data_ptr(fp_owner) = FP_STATE_FREE;
  80. fpsimd_save_and_flush_cpu_state();
  81. }
  82. }
  83. /*
  84. * If normal guests gain SME support, maintain this behavior for pKVM
  85. * guests, which don't support SME.
  86. */
  87. WARN_ON(is_protected_kvm_enabled() && system_supports_sme() &&
  88. read_sysreg_s(SYS_SVCR));
  89. }
  90. /*
  91. * Called just before entering the guest once we are no longer preemptible
  92. * and interrupts are disabled. If we have managed to run anything using
  93. * FP while we were preemptible (such as off the back of an interrupt),
  94. * then neither the host nor the guest own the FP hardware (and it was the
  95. * responsibility of the code that used FP to save the existing state).
  96. */
  97. void kvm_arch_vcpu_ctxflush_fp(struct kvm_vcpu *vcpu)
  98. {
  99. if (test_thread_flag(TIF_FOREIGN_FPSTATE))
  100. *host_data_ptr(fp_owner) = FP_STATE_FREE;
  101. }
  102. /*
  103. * Called just after exiting the guest. If the guest FPSIMD state
  104. * was loaded, update the host's context tracking data mark the CPU
  105. * FPSIMD regs as dirty and belonging to vcpu so that they will be
  106. * written back if the kernel clobbers them due to kernel-mode NEON
  107. * before re-entry into the guest.
  108. */
  109. void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu *vcpu)
  110. {
  111. struct cpu_fp_state fp_state;
  112. WARN_ON_ONCE(!irqs_disabled());
  113. if (guest_owns_fp_regs()) {
  114. /*
  115. * Currently we do not support SME guests so SVCR is
  116. * always 0 and we just need a variable to point to.
  117. */
  118. fp_state.st = &vcpu->arch.ctxt.fp_regs;
  119. fp_state.sve_state = vcpu->arch.sve_state;
  120. fp_state.sve_vl = vcpu->arch.sve_max_vl;
  121. fp_state.sme_state = NULL;
  122. fp_state.svcr = &__vcpu_sys_reg(vcpu, SVCR);
  123. fp_state.fpmr = &__vcpu_sys_reg(vcpu, FPMR);
  124. fp_state.fp_type = &vcpu->arch.fp_type;
  125. if (vcpu_has_sve(vcpu))
  126. fp_state.to_save = FP_STATE_SVE;
  127. else
  128. fp_state.to_save = FP_STATE_FPSIMD;
  129. fpsimd_bind_state_to_cpu(&fp_state);
  130. clear_thread_flag(TIF_FOREIGN_FPSTATE);
  131. }
  132. }
  133. /*
  134. * Write back the vcpu FPSIMD regs if they are dirty, and invalidate the
  135. * cpu FPSIMD regs so that they can't be spuriously reused if this vcpu
  136. * disappears and another task or vcpu appears that recycles the same
  137. * struct fpsimd_state.
  138. */
  139. void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu)
  140. {
  141. unsigned long flags;
  142. local_irq_save(flags);
  143. /*
  144. * If we have VHE then the Hyp code will reset CPACR_EL1 to
  145. * the default value and we need to reenable SME.
  146. */
  147. if (has_vhe() && system_supports_sme()) {
  148. /* Also restore EL0 state seen on entry */
  149. if (vcpu_get_flag(vcpu, HOST_SME_ENABLED))
  150. sysreg_clear_set(CPACR_EL1, 0, CPACR_ELx_SMEN);
  151. else
  152. sysreg_clear_set(CPACR_EL1,
  153. CPACR_EL1_SMEN_EL0EN,
  154. CPACR_EL1_SMEN_EL1EN);
  155. isb();
  156. }
  157. if (guest_owns_fp_regs()) {
  158. if (vcpu_has_sve(vcpu)) {
  159. u64 zcr = read_sysreg_el1(SYS_ZCR);
  160. /*
  161. * If the vCPU is in the hyp context then ZCR_EL1 is
  162. * loaded with its vEL2 counterpart.
  163. */
  164. __vcpu_sys_reg(vcpu, vcpu_sve_zcr_elx(vcpu)) = zcr;
  165. /*
  166. * Restore the VL that was saved when bound to the CPU,
  167. * which is the maximum VL for the guest. Because the
  168. * layout of the data when saving the sve state depends
  169. * on the VL, we need to use a consistent (i.e., the
  170. * maximum) VL.
  171. * Note that this means that at guest exit ZCR_EL1 is
  172. * not necessarily the same as on guest entry.
  173. *
  174. * ZCR_EL2 holds the guest hypervisor's VL when running
  175. * a nested guest, which could be smaller than the
  176. * max for the vCPU. Similar to above, we first need to
  177. * switch to a VL consistent with the layout of the
  178. * vCPU's SVE state. KVM support for NV implies VHE, so
  179. * using the ZCR_EL1 alias is safe.
  180. */
  181. if (!has_vhe() || (vcpu_has_nv(vcpu) && !is_hyp_ctxt(vcpu)))
  182. sve_cond_update_zcr_vq(vcpu_sve_max_vq(vcpu) - 1,
  183. SYS_ZCR_EL1);
  184. }
  185. /*
  186. * Flush (save and invalidate) the fpsimd/sve state so that if
  187. * the host tries to use fpsimd/sve, it's not using stale data
  188. * from the guest.
  189. *
  190. * Flushing the state sets the TIF_FOREIGN_FPSTATE bit for the
  191. * context unconditionally, in both nVHE and VHE. This allows
  192. * the kernel to restore the fpsimd/sve state, including ZCR_EL1
  193. * when needed.
  194. */
  195. fpsimd_save_and_flush_cpu_state();
  196. } else if (has_vhe() && system_supports_sve()) {
  197. /*
  198. * The FPSIMD/SVE state in the CPU has not been touched, and we
  199. * have SVE (and VHE): CPACR_EL1 (alias CPTR_EL2) has been
  200. * reset by kvm_reset_cptr_el2() in the Hyp code, disabling SVE
  201. * for EL0. To avoid spurious traps, restore the trap state
  202. * seen by kvm_arch_vcpu_load_fp():
  203. */
  204. if (vcpu_get_flag(vcpu, HOST_SVE_ENABLED))
  205. sysreg_clear_set(CPACR_EL1, 0, CPACR_EL1_ZEN_EL0EN);
  206. else
  207. sysreg_clear_set(CPACR_EL1, CPACR_EL1_ZEN_EL0EN, 0);
  208. }
  209. local_irq_restore(flags);
  210. }