vm.c 4.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (C) 2019 Western Digital Corporation or its affiliates.
  4. *
  5. * Authors:
  6. * Anup Patel <anup.patel@wdc.com>
  7. */
  8. #include <linux/errno.h>
  9. #include <linux/err.h>
  10. #include <linux/module.h>
  11. #include <linux/uaccess.h>
  12. #include <linux/kvm_host.h>
  13. const struct _kvm_stats_desc kvm_vm_stats_desc[] = {
  14. KVM_GENERIC_VM_STATS()
  15. };
  16. static_assert(ARRAY_SIZE(kvm_vm_stats_desc) ==
  17. sizeof(struct kvm_vm_stat) / sizeof(u64));
  18. const struct kvm_stats_header kvm_vm_stats_header = {
  19. .name_size = KVM_STATS_NAME_SIZE,
  20. .num_desc = ARRAY_SIZE(kvm_vm_stats_desc),
  21. .id_offset = sizeof(struct kvm_stats_header),
  22. .desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE,
  23. .data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE +
  24. sizeof(kvm_vm_stats_desc),
  25. };
  26. int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
  27. {
  28. int r;
  29. r = kvm_riscv_gstage_alloc_pgd(kvm);
  30. if (r)
  31. return r;
  32. r = kvm_riscv_gstage_vmid_init(kvm);
  33. if (r) {
  34. kvm_riscv_gstage_free_pgd(kvm);
  35. return r;
  36. }
  37. kvm_riscv_aia_init_vm(kvm);
  38. kvm_riscv_guest_timer_init(kvm);
  39. return 0;
  40. }
  41. void kvm_arch_destroy_vm(struct kvm *kvm)
  42. {
  43. kvm_destroy_vcpus(kvm);
  44. kvm_riscv_aia_destroy_vm(kvm);
  45. }
  46. int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irql,
  47. bool line_status)
  48. {
  49. if (!irqchip_in_kernel(kvm))
  50. return -ENXIO;
  51. return kvm_riscv_aia_inject_irq(kvm, irql->irq, irql->level);
  52. }
  53. int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e,
  54. struct kvm *kvm, int irq_source_id,
  55. int level, bool line_status)
  56. {
  57. struct kvm_msi msi;
  58. if (!level)
  59. return -1;
  60. msi.address_lo = e->msi.address_lo;
  61. msi.address_hi = e->msi.address_hi;
  62. msi.data = e->msi.data;
  63. msi.flags = e->msi.flags;
  64. msi.devid = e->msi.devid;
  65. return kvm_riscv_aia_inject_msi(kvm, &msi);
  66. }
  67. static int kvm_riscv_set_irq(struct kvm_kernel_irq_routing_entry *e,
  68. struct kvm *kvm, int irq_source_id,
  69. int level, bool line_status)
  70. {
  71. return kvm_riscv_aia_inject_irq(kvm, e->irqchip.pin, level);
  72. }
  73. int kvm_riscv_setup_default_irq_routing(struct kvm *kvm, u32 lines)
  74. {
  75. struct kvm_irq_routing_entry *ents;
  76. int i, rc;
  77. ents = kcalloc(lines, sizeof(*ents), GFP_KERNEL);
  78. if (!ents)
  79. return -ENOMEM;
  80. for (i = 0; i < lines; i++) {
  81. ents[i].gsi = i;
  82. ents[i].type = KVM_IRQ_ROUTING_IRQCHIP;
  83. ents[i].u.irqchip.irqchip = 0;
  84. ents[i].u.irqchip.pin = i;
  85. }
  86. rc = kvm_set_irq_routing(kvm, ents, lines, 0);
  87. kfree(ents);
  88. return rc;
  89. }
  90. bool kvm_arch_can_set_irq_routing(struct kvm *kvm)
  91. {
  92. return irqchip_in_kernel(kvm);
  93. }
  94. int kvm_set_routing_entry(struct kvm *kvm,
  95. struct kvm_kernel_irq_routing_entry *e,
  96. const struct kvm_irq_routing_entry *ue)
  97. {
  98. int r = -EINVAL;
  99. switch (ue->type) {
  100. case KVM_IRQ_ROUTING_IRQCHIP:
  101. e->set = kvm_riscv_set_irq;
  102. e->irqchip.irqchip = ue->u.irqchip.irqchip;
  103. e->irqchip.pin = ue->u.irqchip.pin;
  104. if ((e->irqchip.pin >= KVM_IRQCHIP_NUM_PINS) ||
  105. (e->irqchip.irqchip >= KVM_NR_IRQCHIPS))
  106. goto out;
  107. break;
  108. case KVM_IRQ_ROUTING_MSI:
  109. e->set = kvm_set_msi;
  110. e->msi.address_lo = ue->u.msi.address_lo;
  111. e->msi.address_hi = ue->u.msi.address_hi;
  112. e->msi.data = ue->u.msi.data;
  113. e->msi.flags = ue->flags;
  114. e->msi.devid = ue->u.msi.devid;
  115. break;
  116. default:
  117. goto out;
  118. }
  119. r = 0;
  120. out:
  121. return r;
  122. }
  123. int kvm_arch_set_irq_inatomic(struct kvm_kernel_irq_routing_entry *e,
  124. struct kvm *kvm, int irq_source_id, int level,
  125. bool line_status)
  126. {
  127. if (!level)
  128. return -EWOULDBLOCK;
  129. switch (e->type) {
  130. case KVM_IRQ_ROUTING_MSI:
  131. return kvm_set_msi(e, kvm, irq_source_id, level, line_status);
  132. case KVM_IRQ_ROUTING_IRQCHIP:
  133. return kvm_riscv_set_irq(e, kvm, irq_source_id,
  134. level, line_status);
  135. }
  136. return -EWOULDBLOCK;
  137. }
  138. bool kvm_arch_irqchip_in_kernel(struct kvm *kvm)
  139. {
  140. return irqchip_in_kernel(kvm);
  141. }
  142. int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
  143. {
  144. int r;
  145. switch (ext) {
  146. case KVM_CAP_IRQCHIP:
  147. r = kvm_riscv_aia_available();
  148. break;
  149. case KVM_CAP_IOEVENTFD:
  150. case KVM_CAP_USER_MEMORY:
  151. case KVM_CAP_SYNC_MMU:
  152. case KVM_CAP_DESTROY_MEMORY_REGION_WORKS:
  153. case KVM_CAP_ONE_REG:
  154. case KVM_CAP_READONLY_MEM:
  155. case KVM_CAP_MP_STATE:
  156. case KVM_CAP_IMMEDIATE_EXIT:
  157. case KVM_CAP_SET_GUEST_DEBUG:
  158. r = 1;
  159. break;
  160. case KVM_CAP_NR_VCPUS:
  161. r = min_t(unsigned int, num_online_cpus(), KVM_MAX_VCPUS);
  162. break;
  163. case KVM_CAP_MAX_VCPUS:
  164. r = KVM_MAX_VCPUS;
  165. break;
  166. case KVM_CAP_NR_MEMSLOTS:
  167. r = KVM_USER_MEM_SLOTS;
  168. break;
  169. case KVM_CAP_VM_GPA_BITS:
  170. r = kvm_riscv_gstage_gpa_bits();
  171. break;
  172. default:
  173. r = 0;
  174. break;
  175. }
  176. return r;
  177. }
  178. int kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
  179. {
  180. return -EINVAL;
  181. }