kvm_onhyperv.c 3.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * KVM L1 hypervisor optimizations on Hyper-V.
  4. */
  5. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  6. #include <linux/kvm_host.h>
  7. #include <asm/mshyperv.h>
  8. #include "hyperv.h"
  9. #include "kvm_onhyperv.h"
  10. struct kvm_hv_tlb_range {
  11. u64 start_gfn;
  12. u64 pages;
  13. };
  14. static int kvm_fill_hv_flush_list_func(struct hv_guest_mapping_flush_list *flush,
  15. void *data)
  16. {
  17. struct kvm_hv_tlb_range *range = data;
  18. return hyperv_fill_flush_guest_mapping_list(flush, range->start_gfn,
  19. range->pages);
  20. }
  21. static inline int hv_remote_flush_root_tdp(hpa_t root_tdp,
  22. struct kvm_hv_tlb_range *range)
  23. {
  24. if (range)
  25. return hyperv_flush_guest_mapping_range(root_tdp,
  26. kvm_fill_hv_flush_list_func, (void *)range);
  27. else
  28. return hyperv_flush_guest_mapping(root_tdp);
  29. }
  30. static int __hv_flush_remote_tlbs_range(struct kvm *kvm,
  31. struct kvm_hv_tlb_range *range)
  32. {
  33. struct kvm_arch *kvm_arch = &kvm->arch;
  34. struct kvm_vcpu *vcpu;
  35. int ret = 0, nr_unique_valid_roots;
  36. unsigned long i;
  37. hpa_t root;
  38. spin_lock(&kvm_arch->hv_root_tdp_lock);
  39. if (!VALID_PAGE(kvm_arch->hv_root_tdp)) {
  40. nr_unique_valid_roots = 0;
  41. /*
  42. * Flush all valid roots, and see if all vCPUs have converged
  43. * on a common root, in which case future flushes can skip the
  44. * loop and flush the common root.
  45. */
  46. kvm_for_each_vcpu(i, vcpu, kvm) {
  47. root = vcpu->arch.hv_root_tdp;
  48. if (!VALID_PAGE(root) || root == kvm_arch->hv_root_tdp)
  49. continue;
  50. /*
  51. * Set the tracked root to the first valid root. Keep
  52. * this root for the entirety of the loop even if more
  53. * roots are encountered as a low effort optimization
  54. * to avoid flushing the same (first) root again.
  55. */
  56. if (++nr_unique_valid_roots == 1)
  57. kvm_arch->hv_root_tdp = root;
  58. if (!ret)
  59. ret = hv_remote_flush_root_tdp(root, range);
  60. /*
  61. * Stop processing roots if a failure occurred and
  62. * multiple valid roots have already been detected.
  63. */
  64. if (ret && nr_unique_valid_roots > 1)
  65. break;
  66. }
  67. /*
  68. * The optimized flush of a single root can't be used if there
  69. * are multiple valid roots (obviously).
  70. */
  71. if (nr_unique_valid_roots > 1)
  72. kvm_arch->hv_root_tdp = INVALID_PAGE;
  73. } else {
  74. ret = hv_remote_flush_root_tdp(kvm_arch->hv_root_tdp, range);
  75. }
  76. spin_unlock(&kvm_arch->hv_root_tdp_lock);
  77. return ret;
  78. }
  79. int hv_flush_remote_tlbs_range(struct kvm *kvm, gfn_t start_gfn, gfn_t nr_pages)
  80. {
  81. struct kvm_hv_tlb_range range = {
  82. .start_gfn = start_gfn,
  83. .pages = nr_pages,
  84. };
  85. return __hv_flush_remote_tlbs_range(kvm, &range);
  86. }
  87. EXPORT_SYMBOL_GPL(hv_flush_remote_tlbs_range);
  88. int hv_flush_remote_tlbs(struct kvm *kvm)
  89. {
  90. return __hv_flush_remote_tlbs_range(kvm, NULL);
  91. }
  92. EXPORT_SYMBOL_GPL(hv_flush_remote_tlbs);
  93. void hv_track_root_tdp(struct kvm_vcpu *vcpu, hpa_t root_tdp)
  94. {
  95. struct kvm_arch *kvm_arch = &vcpu->kvm->arch;
  96. if (kvm_x86_ops.flush_remote_tlbs == hv_flush_remote_tlbs) {
  97. spin_lock(&kvm_arch->hv_root_tdp_lock);
  98. vcpu->arch.hv_root_tdp = root_tdp;
  99. if (root_tdp != kvm_arch->hv_root_tdp)
  100. kvm_arch->hv_root_tdp = INVALID_PAGE;
  101. spin_unlock(&kvm_arch->hv_root_tdp_lock);
  102. }
  103. }
  104. EXPORT_SYMBOL_GPL(hv_track_root_tdp);