kvm_cache_regs.h 6.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef ASM_KVM_CACHE_REGS_H
  3. #define ASM_KVM_CACHE_REGS_H
  4. #include <linux/kvm_host.h>
  5. #define KVM_POSSIBLE_CR0_GUEST_BITS (X86_CR0_TS | X86_CR0_WP)
  6. #define KVM_POSSIBLE_CR4_GUEST_BITS \
  7. (X86_CR4_PVI | X86_CR4_DE | X86_CR4_PCE | X86_CR4_OSFXSR \
  8. | X86_CR4_OSXMMEXCPT | X86_CR4_PGE | X86_CR4_TSD | X86_CR4_FSGSBASE)
  9. #define X86_CR0_PDPTR_BITS (X86_CR0_CD | X86_CR0_NW | X86_CR0_PG)
  10. #define X86_CR4_TLBFLUSH_BITS (X86_CR4_PGE | X86_CR4_PCIDE | X86_CR4_PAE | X86_CR4_SMEP)
  11. #define X86_CR4_PDPTR_BITS (X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_SMEP)
  12. static_assert(!(KVM_POSSIBLE_CR0_GUEST_BITS & X86_CR0_PDPTR_BITS));
  13. #define BUILD_KVM_GPR_ACCESSORS(lname, uname) \
  14. static __always_inline unsigned long kvm_##lname##_read(struct kvm_vcpu *vcpu)\
  15. { \
  16. return vcpu->arch.regs[VCPU_REGS_##uname]; \
  17. } \
  18. static __always_inline void kvm_##lname##_write(struct kvm_vcpu *vcpu, \
  19. unsigned long val) \
  20. { \
  21. vcpu->arch.regs[VCPU_REGS_##uname] = val; \
  22. }
  23. BUILD_KVM_GPR_ACCESSORS(rax, RAX)
  24. BUILD_KVM_GPR_ACCESSORS(rbx, RBX)
  25. BUILD_KVM_GPR_ACCESSORS(rcx, RCX)
  26. BUILD_KVM_GPR_ACCESSORS(rdx, RDX)
  27. BUILD_KVM_GPR_ACCESSORS(rbp, RBP)
  28. BUILD_KVM_GPR_ACCESSORS(rsi, RSI)
  29. BUILD_KVM_GPR_ACCESSORS(rdi, RDI)
  30. #ifdef CONFIG_X86_64
  31. BUILD_KVM_GPR_ACCESSORS(r8, R8)
  32. BUILD_KVM_GPR_ACCESSORS(r9, R9)
  33. BUILD_KVM_GPR_ACCESSORS(r10, R10)
  34. BUILD_KVM_GPR_ACCESSORS(r11, R11)
  35. BUILD_KVM_GPR_ACCESSORS(r12, R12)
  36. BUILD_KVM_GPR_ACCESSORS(r13, R13)
  37. BUILD_KVM_GPR_ACCESSORS(r14, R14)
  38. BUILD_KVM_GPR_ACCESSORS(r15, R15)
  39. #endif
  40. /*
  41. * avail dirty
  42. * 0 0 register in VMCS/VMCB
  43. * 0 1 *INVALID*
  44. * 1 0 register in vcpu->arch
  45. * 1 1 register in vcpu->arch, needs to be stored back
  46. */
  47. static inline bool kvm_register_is_available(struct kvm_vcpu *vcpu,
  48. enum kvm_reg reg)
  49. {
  50. return test_bit(reg, (unsigned long *)&vcpu->arch.regs_avail);
  51. }
  52. static inline bool kvm_register_is_dirty(struct kvm_vcpu *vcpu,
  53. enum kvm_reg reg)
  54. {
  55. return test_bit(reg, (unsigned long *)&vcpu->arch.regs_dirty);
  56. }
  57. static inline void kvm_register_mark_available(struct kvm_vcpu *vcpu,
  58. enum kvm_reg reg)
  59. {
  60. __set_bit(reg, (unsigned long *)&vcpu->arch.regs_avail);
  61. }
  62. static inline void kvm_register_mark_dirty(struct kvm_vcpu *vcpu,
  63. enum kvm_reg reg)
  64. {
  65. __set_bit(reg, (unsigned long *)&vcpu->arch.regs_avail);
  66. __set_bit(reg, (unsigned long *)&vcpu->arch.regs_dirty);
  67. }
  68. /*
  69. * kvm_register_test_and_mark_available() is a special snowflake that uses an
  70. * arch bitop directly to avoid the explicit instrumentation that comes with
  71. * the generic bitops. This allows code that cannot be instrumented (noinstr
  72. * functions), e.g. the low level VM-Enter/VM-Exit paths, to cache registers.
  73. */
  74. static __always_inline bool kvm_register_test_and_mark_available(struct kvm_vcpu *vcpu,
  75. enum kvm_reg reg)
  76. {
  77. return arch___test_and_set_bit(reg, (unsigned long *)&vcpu->arch.regs_avail);
  78. }
  79. /*
  80. * The "raw" register helpers are only for cases where the full 64 bits of a
  81. * register are read/written irrespective of current vCPU mode. In other words,
  82. * odds are good you shouldn't be using the raw variants.
  83. */
  84. static inline unsigned long kvm_register_read_raw(struct kvm_vcpu *vcpu, int reg)
  85. {
  86. if (WARN_ON_ONCE((unsigned int)reg >= NR_VCPU_REGS))
  87. return 0;
  88. if (!kvm_register_is_available(vcpu, reg))
  89. kvm_x86_call(cache_reg)(vcpu, reg);
  90. return vcpu->arch.regs[reg];
  91. }
  92. static inline void kvm_register_write_raw(struct kvm_vcpu *vcpu, int reg,
  93. unsigned long val)
  94. {
  95. if (WARN_ON_ONCE((unsigned int)reg >= NR_VCPU_REGS))
  96. return;
  97. vcpu->arch.regs[reg] = val;
  98. kvm_register_mark_dirty(vcpu, reg);
  99. }
  100. static inline unsigned long kvm_rip_read(struct kvm_vcpu *vcpu)
  101. {
  102. return kvm_register_read_raw(vcpu, VCPU_REGS_RIP);
  103. }
  104. static inline void kvm_rip_write(struct kvm_vcpu *vcpu, unsigned long val)
  105. {
  106. kvm_register_write_raw(vcpu, VCPU_REGS_RIP, val);
  107. }
  108. static inline unsigned long kvm_rsp_read(struct kvm_vcpu *vcpu)
  109. {
  110. return kvm_register_read_raw(vcpu, VCPU_REGS_RSP);
  111. }
  112. static inline void kvm_rsp_write(struct kvm_vcpu *vcpu, unsigned long val)
  113. {
  114. kvm_register_write_raw(vcpu, VCPU_REGS_RSP, val);
  115. }
  116. static inline u64 kvm_pdptr_read(struct kvm_vcpu *vcpu, int index)
  117. {
  118. might_sleep(); /* on svm */
  119. if (!kvm_register_is_available(vcpu, VCPU_EXREG_PDPTR))
  120. kvm_x86_call(cache_reg)(vcpu, VCPU_EXREG_PDPTR);
  121. return vcpu->arch.walk_mmu->pdptrs[index];
  122. }
  123. static inline void kvm_pdptr_write(struct kvm_vcpu *vcpu, int index, u64 value)
  124. {
  125. vcpu->arch.walk_mmu->pdptrs[index] = value;
  126. }
  127. static inline ulong kvm_read_cr0_bits(struct kvm_vcpu *vcpu, ulong mask)
  128. {
  129. ulong tmask = mask & KVM_POSSIBLE_CR0_GUEST_BITS;
  130. if ((tmask & vcpu->arch.cr0_guest_owned_bits) &&
  131. !kvm_register_is_available(vcpu, VCPU_EXREG_CR0))
  132. kvm_x86_call(cache_reg)(vcpu, VCPU_EXREG_CR0);
  133. return vcpu->arch.cr0 & mask;
  134. }
  135. static __always_inline bool kvm_is_cr0_bit_set(struct kvm_vcpu *vcpu,
  136. unsigned long cr0_bit)
  137. {
  138. BUILD_BUG_ON(!is_power_of_2(cr0_bit));
  139. return !!kvm_read_cr0_bits(vcpu, cr0_bit);
  140. }
  141. static inline ulong kvm_read_cr0(struct kvm_vcpu *vcpu)
  142. {
  143. return kvm_read_cr0_bits(vcpu, ~0UL);
  144. }
  145. static inline ulong kvm_read_cr4_bits(struct kvm_vcpu *vcpu, ulong mask)
  146. {
  147. ulong tmask = mask & KVM_POSSIBLE_CR4_GUEST_BITS;
  148. if ((tmask & vcpu->arch.cr4_guest_owned_bits) &&
  149. !kvm_register_is_available(vcpu, VCPU_EXREG_CR4))
  150. kvm_x86_call(cache_reg)(vcpu, VCPU_EXREG_CR4);
  151. return vcpu->arch.cr4 & mask;
  152. }
  153. static __always_inline bool kvm_is_cr4_bit_set(struct kvm_vcpu *vcpu,
  154. unsigned long cr4_bit)
  155. {
  156. BUILD_BUG_ON(!is_power_of_2(cr4_bit));
  157. return !!kvm_read_cr4_bits(vcpu, cr4_bit);
  158. }
  159. static inline ulong kvm_read_cr3(struct kvm_vcpu *vcpu)
  160. {
  161. if (!kvm_register_is_available(vcpu, VCPU_EXREG_CR3))
  162. kvm_x86_call(cache_reg)(vcpu, VCPU_EXREG_CR3);
  163. return vcpu->arch.cr3;
  164. }
  165. static inline ulong kvm_read_cr4(struct kvm_vcpu *vcpu)
  166. {
  167. return kvm_read_cr4_bits(vcpu, ~0UL);
  168. }
  169. static inline u64 kvm_read_edx_eax(struct kvm_vcpu *vcpu)
  170. {
  171. return (kvm_rax_read(vcpu) & -1u)
  172. | ((u64)(kvm_rdx_read(vcpu) & -1u) << 32);
  173. }
  174. static inline void enter_guest_mode(struct kvm_vcpu *vcpu)
  175. {
  176. vcpu->arch.hflags |= HF_GUEST_MASK;
  177. vcpu->stat.guest_mode = 1;
  178. }
  179. static inline void leave_guest_mode(struct kvm_vcpu *vcpu)
  180. {
  181. vcpu->arch.hflags &= ~HF_GUEST_MASK;
  182. if (vcpu->arch.load_eoi_exitmap_pending) {
  183. vcpu->arch.load_eoi_exitmap_pending = false;
  184. kvm_make_request(KVM_REQ_LOAD_EOI_EXITMAP, vcpu);
  185. }
  186. vcpu->stat.guest_mode = 0;
  187. }
  188. static inline bool is_guest_mode(struct kvm_vcpu *vcpu)
  189. {
  190. return vcpu->arch.hflags & HF_GUEST_MASK;
  191. }
  192. #endif