ptp_kvm_x86.c 2.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * Virtual PTP 1588 clock for use with KVM guests
  4. *
  5. * Copyright (C) 2017 Red Hat Inc.
  6. */
  7. #include <linux/device.h>
  8. #include <linux/kernel.h>
  9. #include <asm/pvclock.h>
  10. #include <asm/kvmclock.h>
  11. #include <linux/module.h>
  12. #include <uapi/asm/kvm_para.h>
  13. #include <uapi/linux/kvm_para.h>
  14. #include <linux/ptp_clock_kernel.h>
  15. #include <linux/ptp_kvm.h>
  16. #include <linux/set_memory.h>
  17. static phys_addr_t clock_pair_gpa;
  18. static struct kvm_clock_pairing clock_pair_glbl;
  19. static struct kvm_clock_pairing *clock_pair;
  20. int kvm_arch_ptp_init(void)
  21. {
  22. struct page *p;
  23. long ret;
  24. if (!kvm_para_available())
  25. return -EOPNOTSUPP;
  26. if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT)) {
  27. p = alloc_page(GFP_KERNEL | __GFP_ZERO);
  28. if (!p)
  29. return -ENOMEM;
  30. clock_pair = page_address(p);
  31. ret = set_memory_decrypted((unsigned long)clock_pair, 1);
  32. if (ret) {
  33. __free_page(p);
  34. clock_pair = NULL;
  35. goto nofree;
  36. }
  37. } else {
  38. clock_pair = &clock_pair_glbl;
  39. }
  40. clock_pair_gpa = slow_virt_to_phys(clock_pair);
  41. if (!pvclock_get_pvti_cpu0_va()) {
  42. ret = -EOPNOTSUPP;
  43. goto err;
  44. }
  45. ret = kvm_hypercall2(KVM_HC_CLOCK_PAIRING, clock_pair_gpa,
  46. KVM_CLOCK_PAIRING_WALLCLOCK);
  47. if (ret == -KVM_ENOSYS) {
  48. ret = -EOPNOTSUPP;
  49. goto err;
  50. }
  51. return ret;
  52. err:
  53. kvm_arch_ptp_exit();
  54. nofree:
  55. return ret;
  56. }
  57. void kvm_arch_ptp_exit(void)
  58. {
  59. if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT)) {
  60. WARN_ON(set_memory_encrypted((unsigned long)clock_pair, 1));
  61. free_page((unsigned long)clock_pair);
  62. clock_pair = NULL;
  63. }
  64. }
  65. int kvm_arch_ptp_get_clock(struct timespec64 *ts)
  66. {
  67. long ret;
  68. ret = kvm_hypercall2(KVM_HC_CLOCK_PAIRING,
  69. clock_pair_gpa,
  70. KVM_CLOCK_PAIRING_WALLCLOCK);
  71. if (ret != 0) {
  72. pr_err_ratelimited("clock offset hypercall ret %lu\n", ret);
  73. return -EOPNOTSUPP;
  74. }
  75. ts->tv_sec = clock_pair->sec;
  76. ts->tv_nsec = clock_pair->nsec;
  77. return 0;
  78. }
  79. int kvm_arch_ptp_get_crosststamp(u64 *cycle, struct timespec64 *tspec,
  80. enum clocksource_ids *cs_id)
  81. {
  82. struct pvclock_vcpu_time_info *src;
  83. unsigned int version;
  84. long ret;
  85. src = this_cpu_pvti();
  86. do {
  87. /*
  88. * We are using a TSC value read in the hosts
  89. * kvm_hc_clock_pairing handling.
  90. * So any changes to tsc_to_system_mul
  91. * and tsc_shift or any other pvclock
  92. * data invalidate that measurement.
  93. */
  94. version = pvclock_read_begin(src);
  95. ret = kvm_hypercall2(KVM_HC_CLOCK_PAIRING,
  96. clock_pair_gpa,
  97. KVM_CLOCK_PAIRING_WALLCLOCK);
  98. if (ret != 0) {
  99. pr_err_ratelimited("clock pairing hypercall ret %lu\n", ret);
  100. return -EOPNOTSUPP;
  101. }
  102. tspec->tv_sec = clock_pair->sec;
  103. tspec->tv_nsec = clock_pair->nsec;
  104. *cycle = __pvclock_read_cycles(src, clock_pair->tsc);
  105. } while (pvclock_read_retry(src, version));
  106. *cs_id = CSID_X86_KVM_CLK;
  107. return 0;
  108. }