nested.c 2.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Hyper-V nested virtualization code.
  4. *
  5. * Copyright (C) 2018, Microsoft, Inc.
  6. *
  7. * Author : Lan Tianyu <Tianyu.Lan@microsoft.com>
  8. */
  9. #define pr_fmt(fmt) "Hyper-V: " fmt
  10. #include <linux/types.h>
  11. #include <asm/hyperv-tlfs.h>
  12. #include <asm/mshyperv.h>
  13. #include <asm/tlbflush.h>
  14. #include <asm/trace/hyperv.h>
  15. int hyperv_flush_guest_mapping(u64 as)
  16. {
  17. struct hv_guest_mapping_flush *flush;
  18. u64 status;
  19. unsigned long flags;
  20. int ret = -ENOTSUPP;
  21. if (!hv_hypercall_pg)
  22. goto fault;
  23. local_irq_save(flags);
  24. flush = *this_cpu_ptr(hyperv_pcpu_input_arg);
  25. if (unlikely(!flush)) {
  26. local_irq_restore(flags);
  27. goto fault;
  28. }
  29. flush->address_space = as;
  30. flush->flags = 0;
  31. status = hv_do_hypercall(HVCALL_FLUSH_GUEST_PHYSICAL_ADDRESS_SPACE,
  32. flush, NULL);
  33. local_irq_restore(flags);
  34. if (hv_result_success(status))
  35. ret = 0;
  36. fault:
  37. trace_hyperv_nested_flush_guest_mapping(as, ret);
  38. return ret;
  39. }
  40. EXPORT_SYMBOL_GPL(hyperv_flush_guest_mapping);
  41. int hyperv_fill_flush_guest_mapping_list(
  42. struct hv_guest_mapping_flush_list *flush,
  43. u64 start_gfn, u64 pages)
  44. {
  45. u64 cur = start_gfn;
  46. u64 additional_pages;
  47. int gpa_n = 0;
  48. do {
  49. /*
  50. * If flush requests exceed max flush count, go back to
  51. * flush tlbs without range.
  52. */
  53. if (gpa_n >= HV_MAX_FLUSH_REP_COUNT)
  54. return -ENOSPC;
  55. additional_pages = min_t(u64, pages, HV_MAX_FLUSH_PAGES) - 1;
  56. flush->gpa_list[gpa_n].page.additional_pages = additional_pages;
  57. flush->gpa_list[gpa_n].page.largepage = false;
  58. flush->gpa_list[gpa_n].page.basepfn = cur;
  59. pages -= additional_pages + 1;
  60. cur += additional_pages + 1;
  61. gpa_n++;
  62. } while (pages > 0);
  63. return gpa_n;
  64. }
  65. EXPORT_SYMBOL_GPL(hyperv_fill_flush_guest_mapping_list);
  66. int hyperv_flush_guest_mapping_range(u64 as,
  67. hyperv_fill_flush_list_func fill_flush_list_func, void *data)
  68. {
  69. struct hv_guest_mapping_flush_list *flush;
  70. u64 status;
  71. unsigned long flags;
  72. int ret = -ENOTSUPP;
  73. int gpa_n = 0;
  74. if (!hv_hypercall_pg || !fill_flush_list_func)
  75. goto fault;
  76. local_irq_save(flags);
  77. flush = *this_cpu_ptr(hyperv_pcpu_input_arg);
  78. if (unlikely(!flush)) {
  79. local_irq_restore(flags);
  80. goto fault;
  81. }
  82. flush->address_space = as;
  83. flush->flags = 0;
  84. gpa_n = fill_flush_list_func(flush, data);
  85. if (gpa_n < 0) {
  86. local_irq_restore(flags);
  87. goto fault;
  88. }
  89. status = hv_do_rep_hypercall(HVCALL_FLUSH_GUEST_PHYSICAL_ADDRESS_LIST,
  90. gpa_n, 0, flush, NULL);
  91. local_irq_restore(flags);
  92. if (hv_result_success(status))
  93. ret = 0;
  94. else
  95. ret = hv_result(status);
  96. fault:
  97. trace_hyperv_nested_flush_guest_mapping_range(as, ret);
  98. return ret;
  99. }
  100. EXPORT_SYMBOL_GPL(hyperv_flush_guest_mapping_range);