pauth.c 4.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (C) 2024 - Google LLC
  4. * Author: Marc Zyngier <maz@kernel.org>
  5. *
  6. * Primitive PAuth emulation for ERETAA/ERETAB.
  7. *
  8. * This code assumes that is is run from EL2, and that it is part of
  9. * the emulation of ERETAx for a guest hypervisor. That's a lot of
  10. * baked-in assumptions and shortcuts.
  11. *
  12. * Do no reuse for anything else!
  13. */
  14. #include <linux/kvm_host.h>
  15. #include <asm/gpr-num.h>
  16. #include <asm/kvm_emulate.h>
  17. #include <asm/pointer_auth.h>
  18. /* PACGA Xd, Xn, Xm */
  19. #define PACGA(d,n,m) \
  20. asm volatile(__DEFINE_ASM_GPR_NUMS \
  21. ".inst 0x9AC03000 |" \
  22. "(.L__gpr_num_%[Rd] << 0) |" \
  23. "(.L__gpr_num_%[Rn] << 5) |" \
  24. "(.L__gpr_num_%[Rm] << 16)\n" \
  25. : [Rd] "=r" ((d)) \
  26. : [Rn] "r" ((n)), [Rm] "r" ((m)))
  27. static u64 compute_pac(struct kvm_vcpu *vcpu, u64 ptr,
  28. struct ptrauth_key ikey)
  29. {
  30. struct ptrauth_key gkey;
  31. u64 mod, pac = 0;
  32. preempt_disable();
  33. if (!vcpu_get_flag(vcpu, SYSREGS_ON_CPU))
  34. mod = __vcpu_sys_reg(vcpu, SP_EL2);
  35. else
  36. mod = read_sysreg(sp_el1);
  37. gkey.lo = read_sysreg_s(SYS_APGAKEYLO_EL1);
  38. gkey.hi = read_sysreg_s(SYS_APGAKEYHI_EL1);
  39. __ptrauth_key_install_nosync(APGA, ikey);
  40. isb();
  41. PACGA(pac, ptr, mod);
  42. isb();
  43. __ptrauth_key_install_nosync(APGA, gkey);
  44. preempt_enable();
  45. /* PAC in the top 32bits */
  46. return pac;
  47. }
  48. static bool effective_tbi(struct kvm_vcpu *vcpu, bool bit55)
  49. {
  50. u64 tcr = vcpu_read_sys_reg(vcpu, TCR_EL2);
  51. bool tbi, tbid;
  52. /*
  53. * Since we are authenticating an instruction address, we have
  54. * to take TBID into account. If E2H==0, ignore VA[55], as
  55. * TCR_EL2 only has a single TBI/TBID. If VA[55] was set in
  56. * this case, this is likely a guest bug...
  57. */
  58. if (!vcpu_el2_e2h_is_set(vcpu)) {
  59. tbi = tcr & BIT(20);
  60. tbid = tcr & BIT(29);
  61. } else if (bit55) {
  62. tbi = tcr & TCR_TBI1;
  63. tbid = tcr & TCR_TBID1;
  64. } else {
  65. tbi = tcr & TCR_TBI0;
  66. tbid = tcr & TCR_TBID0;
  67. }
  68. return tbi && !tbid;
  69. }
  70. static int compute_bottom_pac(struct kvm_vcpu *vcpu, bool bit55)
  71. {
  72. static const int maxtxsz = 39; // Revisit these two values once
  73. static const int mintxsz = 16; // (if) we support TTST/LVA/LVA2
  74. u64 tcr = vcpu_read_sys_reg(vcpu, TCR_EL2);
  75. int txsz;
  76. if (!vcpu_el2_e2h_is_set(vcpu) || !bit55)
  77. txsz = FIELD_GET(TCR_T0SZ_MASK, tcr);
  78. else
  79. txsz = FIELD_GET(TCR_T1SZ_MASK, tcr);
  80. return 64 - clamp(txsz, mintxsz, maxtxsz);
  81. }
  82. static u64 compute_pac_mask(struct kvm_vcpu *vcpu, bool bit55)
  83. {
  84. int bottom_pac;
  85. u64 mask;
  86. bottom_pac = compute_bottom_pac(vcpu, bit55);
  87. mask = GENMASK(54, bottom_pac);
  88. if (!effective_tbi(vcpu, bit55))
  89. mask |= GENMASK(63, 56);
  90. return mask;
  91. }
  92. static u64 to_canonical_addr(struct kvm_vcpu *vcpu, u64 ptr, u64 mask)
  93. {
  94. bool bit55 = !!(ptr & BIT(55));
  95. if (bit55)
  96. return ptr | mask;
  97. return ptr & ~mask;
  98. }
  99. static u64 corrupt_addr(struct kvm_vcpu *vcpu, u64 ptr)
  100. {
  101. bool bit55 = !!(ptr & BIT(55));
  102. u64 mask, error_code;
  103. int shift;
  104. if (effective_tbi(vcpu, bit55)) {
  105. mask = GENMASK(54, 53);
  106. shift = 53;
  107. } else {
  108. mask = GENMASK(62, 61);
  109. shift = 61;
  110. }
  111. if (esr_iss_is_eretab(kvm_vcpu_get_esr(vcpu)))
  112. error_code = 2 << shift;
  113. else
  114. error_code = 1 << shift;
  115. ptr &= ~mask;
  116. ptr |= error_code;
  117. return ptr;
  118. }
  119. /*
  120. * Authenticate an ERETAA/ERETAB instruction, returning true if the
  121. * authentication succeeded and false otherwise. In all cases, *elr
  122. * contains the VA to ERET to. Potential exception injection is left
  123. * to the caller.
  124. */
  125. bool kvm_auth_eretax(struct kvm_vcpu *vcpu, u64 *elr)
  126. {
  127. u64 sctlr = vcpu_read_sys_reg(vcpu, SCTLR_EL2);
  128. u64 esr = kvm_vcpu_get_esr(vcpu);
  129. u64 ptr, cptr, pac, mask;
  130. struct ptrauth_key ikey;
  131. *elr = ptr = vcpu_read_sys_reg(vcpu, ELR_EL2);
  132. /* We assume we're already in the context of an ERETAx */
  133. if (esr_iss_is_eretab(esr)) {
  134. if (!(sctlr & SCTLR_EL1_EnIB))
  135. return true;
  136. ikey.lo = __vcpu_sys_reg(vcpu, APIBKEYLO_EL1);
  137. ikey.hi = __vcpu_sys_reg(vcpu, APIBKEYHI_EL1);
  138. } else {
  139. if (!(sctlr & SCTLR_EL1_EnIA))
  140. return true;
  141. ikey.lo = __vcpu_sys_reg(vcpu, APIAKEYLO_EL1);
  142. ikey.hi = __vcpu_sys_reg(vcpu, APIAKEYHI_EL1);
  143. }
  144. mask = compute_pac_mask(vcpu, !!(ptr & BIT(55)));
  145. cptr = to_canonical_addr(vcpu, ptr, mask);
  146. pac = compute_pac(vcpu, cptr, ikey);
  147. /*
  148. * Slightly deviate from the pseudocode: if we have a PAC
  149. * match with the signed pointer, then it must be good.
  150. * Anything after this point is pure error handling.
  151. */
  152. if ((pac & mask) == (ptr & mask)) {
  153. *elr = cptr;
  154. return true;
  155. }
  156. /*
  157. * Authentication failed, corrupt the canonical address if
  158. * PAuth2 isn't implemented, or some XORing if it is.
  159. */
  160. if (!kvm_has_pauth(vcpu->kvm, PAuth2))
  161. cptr = corrupt_addr(vcpu, cptr);
  162. else
  163. cptr = ptr ^ (pac & mask);
  164. *elr = cptr;
  165. return false;
  166. }