main.c 2.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (C) 2019 Western Digital Corporation or its affiliates.
  4. *
  5. * Authors:
  6. * Anup Patel <anup.patel@wdc.com>
  7. */
  8. #include <linux/errno.h>
  9. #include <linux/err.h>
  10. #include <linux/module.h>
  11. #include <linux/kvm_host.h>
  12. #include <asm/csr.h>
  13. #include <asm/cpufeature.h>
  14. #include <asm/sbi.h>
  15. long kvm_arch_dev_ioctl(struct file *filp,
  16. unsigned int ioctl, unsigned long arg)
  17. {
  18. return -EINVAL;
  19. }
  20. int kvm_arch_enable_virtualization_cpu(void)
  21. {
  22. csr_write(CSR_HEDELEG, KVM_HEDELEG_DEFAULT);
  23. csr_write(CSR_HIDELEG, KVM_HIDELEG_DEFAULT);
  24. /* VS should access only the time counter directly. Everything else should trap */
  25. csr_write(CSR_HCOUNTEREN, 0x02);
  26. csr_write(CSR_HVIP, 0);
  27. kvm_riscv_aia_enable();
  28. return 0;
  29. }
  30. void kvm_arch_disable_virtualization_cpu(void)
  31. {
  32. kvm_riscv_aia_disable();
  33. /*
  34. * After clearing the hideleg CSR, the host kernel will receive
  35. * spurious interrupts if hvip CSR has pending interrupts and the
  36. * corresponding enable bits in vsie CSR are asserted. To avoid it,
  37. * hvip CSR and vsie CSR must be cleared before clearing hideleg CSR.
  38. */
  39. csr_write(CSR_VSIE, 0);
  40. csr_write(CSR_HVIP, 0);
  41. csr_write(CSR_HEDELEG, 0);
  42. csr_write(CSR_HIDELEG, 0);
  43. }
  44. static int __init riscv_kvm_init(void)
  45. {
  46. int rc;
  47. const char *str;
  48. if (!riscv_isa_extension_available(NULL, h)) {
  49. kvm_info("hypervisor extension not available\n");
  50. return -ENODEV;
  51. }
  52. if (sbi_spec_is_0_1()) {
  53. kvm_info("require SBI v0.2 or higher\n");
  54. return -ENODEV;
  55. }
  56. if (!sbi_probe_extension(SBI_EXT_RFENCE)) {
  57. kvm_info("require SBI RFENCE extension\n");
  58. return -ENODEV;
  59. }
  60. kvm_riscv_gstage_mode_detect();
  61. kvm_riscv_gstage_vmid_detect();
  62. rc = kvm_riscv_aia_init();
  63. if (rc && rc != -ENODEV)
  64. return rc;
  65. kvm_info("hypervisor extension available\n");
  66. switch (kvm_riscv_gstage_mode()) {
  67. case HGATP_MODE_SV32X4:
  68. str = "Sv32x4";
  69. break;
  70. case HGATP_MODE_SV39X4:
  71. str = "Sv39x4";
  72. break;
  73. case HGATP_MODE_SV48X4:
  74. str = "Sv48x4";
  75. break;
  76. case HGATP_MODE_SV57X4:
  77. str = "Sv57x4";
  78. break;
  79. default:
  80. return -ENODEV;
  81. }
  82. kvm_info("using %s G-stage page table format\n", str);
  83. kvm_info("VMID %ld bits available\n", kvm_riscv_gstage_vmid_bits());
  84. if (kvm_riscv_aia_available())
  85. kvm_info("AIA available with %d guest external interrupts\n",
  86. kvm_riscv_aia_nr_hgei);
  87. rc = kvm_init(sizeof(struct kvm_vcpu), 0, THIS_MODULE);
  88. if (rc) {
  89. kvm_riscv_aia_exit();
  90. return rc;
  91. }
  92. return 0;
  93. }
  94. module_init(riscv_kvm_init);
  95. static void __exit riscv_kvm_exit(void)
  96. {
  97. kvm_riscv_aia_exit();
  98. kvm_exit();
  99. }
  100. module_exit(riscv_kvm_exit);