vmx_tsc_adjust_test.c 4.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175
  1. /*
  2. * gtests/tests/vmx_tsc_adjust_test.c
  3. *
  4. * Copyright (C) 2018, Google LLC.
  5. *
  6. * This work is licensed under the terms of the GNU GPL, version 2.
  7. *
  8. *
  9. * IA32_TSC_ADJUST test
  10. *
  11. * According to the SDM, "if an execution of WRMSR to the
  12. * IA32_TIME_STAMP_COUNTER MSR adds (or subtracts) value X from the TSC,
  13. * the logical processor also adds (or subtracts) value X from the
  14. * IA32_TSC_ADJUST MSR.
  15. *
  16. * Note that when L1 doesn't intercept writes to IA32_TSC, a
  17. * WRMSR(IA32_TSC) from L2 sets L1's TSC value, not L2's perceived TSC
  18. * value.
  19. *
  20. * This test verifies that this unusual case is handled correctly.
  21. */
  22. #include "test_util.h"
  23. #include "kvm_util.h"
  24. #include "x86.h"
  25. #include "vmx.h"
  26. #include <string.h>
  27. #include <sys/ioctl.h>
  28. #include "../kselftest.h"
  29. #ifndef MSR_IA32_TSC_ADJUST
  30. #define MSR_IA32_TSC_ADJUST 0x3b
  31. #endif
  32. #define PAGE_SIZE 4096
  33. #define VCPU_ID 5
  34. #define TSC_ADJUST_VALUE (1ll << 32)
  35. #define TSC_OFFSET_VALUE -(1ll << 48)
  36. enum {
  37. PORT_ABORT = 0x1000,
  38. PORT_REPORT,
  39. PORT_DONE,
  40. };
  41. enum {
  42. VMXON_PAGE = 0,
  43. VMCS_PAGE,
  44. MSR_BITMAP_PAGE,
  45. NUM_VMX_PAGES,
  46. };
  47. struct kvm_single_msr {
  48. struct kvm_msrs header;
  49. struct kvm_msr_entry entry;
  50. } __attribute__((packed));
  51. /* The virtual machine object. */
  52. static struct kvm_vm *vm;
  53. static void check_ia32_tsc_adjust(int64_t max)
  54. {
  55. int64_t adjust;
  56. adjust = rdmsr(MSR_IA32_TSC_ADJUST);
  57. GUEST_SYNC(adjust);
  58. GUEST_ASSERT(adjust <= max);
  59. }
  60. static void l2_guest_code(void)
  61. {
  62. uint64_t l1_tsc = rdtsc() - TSC_OFFSET_VALUE;
  63. wrmsr(MSR_IA32_TSC, l1_tsc - TSC_ADJUST_VALUE);
  64. check_ia32_tsc_adjust(-2 * TSC_ADJUST_VALUE);
  65. /* Exit to L1 */
  66. __asm__ __volatile__("vmcall");
  67. }
  68. static void l1_guest_code(struct vmx_pages *vmx_pages)
  69. {
  70. #define L2_GUEST_STACK_SIZE 64
  71. unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
  72. uint32_t control;
  73. uintptr_t save_cr3;
  74. GUEST_ASSERT(rdtsc() < TSC_ADJUST_VALUE);
  75. wrmsr(MSR_IA32_TSC, rdtsc() - TSC_ADJUST_VALUE);
  76. check_ia32_tsc_adjust(-1 * TSC_ADJUST_VALUE);
  77. GUEST_ASSERT(prepare_for_vmx_operation(vmx_pages));
  78. /* Prepare the VMCS for L2 execution. */
  79. prepare_vmcs(vmx_pages, l2_guest_code,
  80. &l2_guest_stack[L2_GUEST_STACK_SIZE]);
  81. control = vmreadz(CPU_BASED_VM_EXEC_CONTROL);
  82. control |= CPU_BASED_USE_MSR_BITMAPS | CPU_BASED_USE_TSC_OFFSETING;
  83. vmwrite(CPU_BASED_VM_EXEC_CONTROL, control);
  84. vmwrite(TSC_OFFSET, TSC_OFFSET_VALUE);
  85. /* Jump into L2. First, test failure to load guest CR3. */
  86. save_cr3 = vmreadz(GUEST_CR3);
  87. vmwrite(GUEST_CR3, -1ull);
  88. GUEST_ASSERT(!vmlaunch());
  89. GUEST_ASSERT(vmreadz(VM_EXIT_REASON) ==
  90. (EXIT_REASON_FAILED_VMENTRY | EXIT_REASON_INVALID_STATE));
  91. check_ia32_tsc_adjust(-1 * TSC_ADJUST_VALUE);
  92. vmwrite(GUEST_CR3, save_cr3);
  93. GUEST_ASSERT(!vmlaunch());
  94. GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL);
  95. check_ia32_tsc_adjust(-2 * TSC_ADJUST_VALUE);
  96. GUEST_DONE();
  97. }
  98. void report(int64_t val)
  99. {
  100. printf("IA32_TSC_ADJUST is %ld (%lld * TSC_ADJUST_VALUE + %lld).\n",
  101. val, val / TSC_ADJUST_VALUE, val % TSC_ADJUST_VALUE);
  102. }
  103. int main(int argc, char *argv[])
  104. {
  105. struct vmx_pages *vmx_pages;
  106. vm_vaddr_t vmx_pages_gva;
  107. struct kvm_cpuid_entry2 *entry = kvm_get_supported_cpuid_entry(1);
  108. if (!(entry->ecx & CPUID_VMX)) {
  109. fprintf(stderr, "nested VMX not enabled, skipping test\n");
  110. exit(KSFT_SKIP);
  111. }
  112. vm = vm_create_default(VCPU_ID, 0, (void *) l1_guest_code);
  113. vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
  114. /* Allocate VMX pages and shared descriptors (vmx_pages). */
  115. vmx_pages = vcpu_alloc_vmx(vm, &vmx_pages_gva);
  116. vcpu_args_set(vm, VCPU_ID, 1, vmx_pages_gva);
  117. for (;;) {
  118. volatile struct kvm_run *run = vcpu_state(vm, VCPU_ID);
  119. struct guest_args args;
  120. vcpu_run(vm, VCPU_ID);
  121. guest_args_read(vm, VCPU_ID, &args);
  122. TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
  123. "Got exit_reason other than KVM_EXIT_IO: %u (%s)\n",
  124. run->exit_reason,
  125. exit_reason_str(run->exit_reason));
  126. switch (args.port) {
  127. case GUEST_PORT_ABORT:
  128. TEST_ASSERT(false, "%s", (const char *) args.arg0);
  129. /* NOT REACHED */
  130. case GUEST_PORT_SYNC:
  131. report(args.arg1);
  132. break;
  133. case GUEST_PORT_DONE:
  134. goto done;
  135. default:
  136. TEST_ASSERT(false, "Unknown port 0x%x.", args.port);
  137. }
  138. }
  139. kvm_vm_free(vm);
  140. done:
  141. return 0;
  142. }