main.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (C) 2020-2023 Loongson Technology Corporation Limited
  4. */
  5. #include <linux/err.h>
  6. #include <linux/module.h>
  7. #include <linux/kvm_host.h>
  8. #include <asm/cacheflush.h>
  9. #include <asm/cpufeature.h>
  10. #include <asm/kvm_csr.h>
  11. #include "trace.h"
  12. unsigned long vpid_mask;
  13. struct kvm_world_switch *kvm_loongarch_ops;
  14. static int gcsr_flag[CSR_MAX_NUMS];
  15. static struct kvm_context __percpu *vmcs;
  16. int get_gcsr_flag(int csr)
  17. {
  18. if (csr < CSR_MAX_NUMS)
  19. return gcsr_flag[csr];
  20. return INVALID_GCSR;
  21. }
  22. static inline void set_gcsr_sw_flag(int csr)
  23. {
  24. if (csr < CSR_MAX_NUMS)
  25. gcsr_flag[csr] |= SW_GCSR;
  26. }
  27. static inline void set_gcsr_hw_flag(int csr)
  28. {
  29. if (csr < CSR_MAX_NUMS)
  30. gcsr_flag[csr] |= HW_GCSR;
  31. }
  32. /*
  33. * The default value of gcsr_flag[CSR] is 0, and we use this
  34. * function to set the flag to 1 (SW_GCSR) or 2 (HW_GCSR) if the
  35. * gcsr is software or hardware. It will be used by get/set_gcsr,
  36. * if gcsr_flag is HW we should use gcsrrd/gcsrwr to access it,
  37. * else use software csr to emulate it.
  38. */
  39. static void kvm_init_gcsr_flag(void)
  40. {
  41. set_gcsr_hw_flag(LOONGARCH_CSR_CRMD);
  42. set_gcsr_hw_flag(LOONGARCH_CSR_PRMD);
  43. set_gcsr_hw_flag(LOONGARCH_CSR_EUEN);
  44. set_gcsr_hw_flag(LOONGARCH_CSR_MISC);
  45. set_gcsr_hw_flag(LOONGARCH_CSR_ECFG);
  46. set_gcsr_hw_flag(LOONGARCH_CSR_ESTAT);
  47. set_gcsr_hw_flag(LOONGARCH_CSR_ERA);
  48. set_gcsr_hw_flag(LOONGARCH_CSR_BADV);
  49. set_gcsr_hw_flag(LOONGARCH_CSR_BADI);
  50. set_gcsr_hw_flag(LOONGARCH_CSR_EENTRY);
  51. set_gcsr_hw_flag(LOONGARCH_CSR_TLBIDX);
  52. set_gcsr_hw_flag(LOONGARCH_CSR_TLBEHI);
  53. set_gcsr_hw_flag(LOONGARCH_CSR_TLBELO0);
  54. set_gcsr_hw_flag(LOONGARCH_CSR_TLBELO1);
  55. set_gcsr_hw_flag(LOONGARCH_CSR_ASID);
  56. set_gcsr_hw_flag(LOONGARCH_CSR_PGDL);
  57. set_gcsr_hw_flag(LOONGARCH_CSR_PGDH);
  58. set_gcsr_hw_flag(LOONGARCH_CSR_PGD);
  59. set_gcsr_hw_flag(LOONGARCH_CSR_PWCTL0);
  60. set_gcsr_hw_flag(LOONGARCH_CSR_PWCTL1);
  61. set_gcsr_hw_flag(LOONGARCH_CSR_STLBPGSIZE);
  62. set_gcsr_hw_flag(LOONGARCH_CSR_RVACFG);
  63. set_gcsr_hw_flag(LOONGARCH_CSR_CPUID);
  64. set_gcsr_hw_flag(LOONGARCH_CSR_PRCFG1);
  65. set_gcsr_hw_flag(LOONGARCH_CSR_PRCFG2);
  66. set_gcsr_hw_flag(LOONGARCH_CSR_PRCFG3);
  67. set_gcsr_hw_flag(LOONGARCH_CSR_KS0);
  68. set_gcsr_hw_flag(LOONGARCH_CSR_KS1);
  69. set_gcsr_hw_flag(LOONGARCH_CSR_KS2);
  70. set_gcsr_hw_flag(LOONGARCH_CSR_KS3);
  71. set_gcsr_hw_flag(LOONGARCH_CSR_KS4);
  72. set_gcsr_hw_flag(LOONGARCH_CSR_KS5);
  73. set_gcsr_hw_flag(LOONGARCH_CSR_KS6);
  74. set_gcsr_hw_flag(LOONGARCH_CSR_KS7);
  75. set_gcsr_hw_flag(LOONGARCH_CSR_TMID);
  76. set_gcsr_hw_flag(LOONGARCH_CSR_TCFG);
  77. set_gcsr_hw_flag(LOONGARCH_CSR_TVAL);
  78. set_gcsr_hw_flag(LOONGARCH_CSR_TINTCLR);
  79. set_gcsr_hw_flag(LOONGARCH_CSR_CNTC);
  80. set_gcsr_hw_flag(LOONGARCH_CSR_LLBCTL);
  81. set_gcsr_hw_flag(LOONGARCH_CSR_TLBRENTRY);
  82. set_gcsr_hw_flag(LOONGARCH_CSR_TLBRBADV);
  83. set_gcsr_hw_flag(LOONGARCH_CSR_TLBRERA);
  84. set_gcsr_hw_flag(LOONGARCH_CSR_TLBRSAVE);
  85. set_gcsr_hw_flag(LOONGARCH_CSR_TLBRELO0);
  86. set_gcsr_hw_flag(LOONGARCH_CSR_TLBRELO1);
  87. set_gcsr_hw_flag(LOONGARCH_CSR_TLBREHI);
  88. set_gcsr_hw_flag(LOONGARCH_CSR_TLBRPRMD);
  89. set_gcsr_hw_flag(LOONGARCH_CSR_DMWIN0);
  90. set_gcsr_hw_flag(LOONGARCH_CSR_DMWIN1);
  91. set_gcsr_hw_flag(LOONGARCH_CSR_DMWIN2);
  92. set_gcsr_hw_flag(LOONGARCH_CSR_DMWIN3);
  93. set_gcsr_sw_flag(LOONGARCH_CSR_IMPCTL1);
  94. set_gcsr_sw_flag(LOONGARCH_CSR_IMPCTL2);
  95. set_gcsr_sw_flag(LOONGARCH_CSR_MERRCTL);
  96. set_gcsr_sw_flag(LOONGARCH_CSR_MERRINFO1);
  97. set_gcsr_sw_flag(LOONGARCH_CSR_MERRINFO2);
  98. set_gcsr_sw_flag(LOONGARCH_CSR_MERRENTRY);
  99. set_gcsr_sw_flag(LOONGARCH_CSR_MERRERA);
  100. set_gcsr_sw_flag(LOONGARCH_CSR_MERRSAVE);
  101. set_gcsr_sw_flag(LOONGARCH_CSR_CTAG);
  102. set_gcsr_sw_flag(LOONGARCH_CSR_DEBUG);
  103. set_gcsr_sw_flag(LOONGARCH_CSR_DERA);
  104. set_gcsr_sw_flag(LOONGARCH_CSR_DESAVE);
  105. set_gcsr_sw_flag(LOONGARCH_CSR_FWPC);
  106. set_gcsr_sw_flag(LOONGARCH_CSR_FWPS);
  107. set_gcsr_sw_flag(LOONGARCH_CSR_MWPC);
  108. set_gcsr_sw_flag(LOONGARCH_CSR_MWPS);
  109. set_gcsr_sw_flag(LOONGARCH_CSR_DB0ADDR);
  110. set_gcsr_sw_flag(LOONGARCH_CSR_DB0MASK);
  111. set_gcsr_sw_flag(LOONGARCH_CSR_DB0CTRL);
  112. set_gcsr_sw_flag(LOONGARCH_CSR_DB0ASID);
  113. set_gcsr_sw_flag(LOONGARCH_CSR_DB1ADDR);
  114. set_gcsr_sw_flag(LOONGARCH_CSR_DB1MASK);
  115. set_gcsr_sw_flag(LOONGARCH_CSR_DB1CTRL);
  116. set_gcsr_sw_flag(LOONGARCH_CSR_DB1ASID);
  117. set_gcsr_sw_flag(LOONGARCH_CSR_DB2ADDR);
  118. set_gcsr_sw_flag(LOONGARCH_CSR_DB2MASK);
  119. set_gcsr_sw_flag(LOONGARCH_CSR_DB2CTRL);
  120. set_gcsr_sw_flag(LOONGARCH_CSR_DB2ASID);
  121. set_gcsr_sw_flag(LOONGARCH_CSR_DB3ADDR);
  122. set_gcsr_sw_flag(LOONGARCH_CSR_DB3MASK);
  123. set_gcsr_sw_flag(LOONGARCH_CSR_DB3CTRL);
  124. set_gcsr_sw_flag(LOONGARCH_CSR_DB3ASID);
  125. set_gcsr_sw_flag(LOONGARCH_CSR_DB4ADDR);
  126. set_gcsr_sw_flag(LOONGARCH_CSR_DB4MASK);
  127. set_gcsr_sw_flag(LOONGARCH_CSR_DB4CTRL);
  128. set_gcsr_sw_flag(LOONGARCH_CSR_DB4ASID);
  129. set_gcsr_sw_flag(LOONGARCH_CSR_DB5ADDR);
  130. set_gcsr_sw_flag(LOONGARCH_CSR_DB5MASK);
  131. set_gcsr_sw_flag(LOONGARCH_CSR_DB5CTRL);
  132. set_gcsr_sw_flag(LOONGARCH_CSR_DB5ASID);
  133. set_gcsr_sw_flag(LOONGARCH_CSR_DB6ADDR);
  134. set_gcsr_sw_flag(LOONGARCH_CSR_DB6MASK);
  135. set_gcsr_sw_flag(LOONGARCH_CSR_DB6CTRL);
  136. set_gcsr_sw_flag(LOONGARCH_CSR_DB6ASID);
  137. set_gcsr_sw_flag(LOONGARCH_CSR_DB7ADDR);
  138. set_gcsr_sw_flag(LOONGARCH_CSR_DB7MASK);
  139. set_gcsr_sw_flag(LOONGARCH_CSR_DB7CTRL);
  140. set_gcsr_sw_flag(LOONGARCH_CSR_DB7ASID);
  141. set_gcsr_sw_flag(LOONGARCH_CSR_IB0ADDR);
  142. set_gcsr_sw_flag(LOONGARCH_CSR_IB0MASK);
  143. set_gcsr_sw_flag(LOONGARCH_CSR_IB0CTRL);
  144. set_gcsr_sw_flag(LOONGARCH_CSR_IB0ASID);
  145. set_gcsr_sw_flag(LOONGARCH_CSR_IB1ADDR);
  146. set_gcsr_sw_flag(LOONGARCH_CSR_IB1MASK);
  147. set_gcsr_sw_flag(LOONGARCH_CSR_IB1CTRL);
  148. set_gcsr_sw_flag(LOONGARCH_CSR_IB1ASID);
  149. set_gcsr_sw_flag(LOONGARCH_CSR_IB2ADDR);
  150. set_gcsr_sw_flag(LOONGARCH_CSR_IB2MASK);
  151. set_gcsr_sw_flag(LOONGARCH_CSR_IB2CTRL);
  152. set_gcsr_sw_flag(LOONGARCH_CSR_IB2ASID);
  153. set_gcsr_sw_flag(LOONGARCH_CSR_IB3ADDR);
  154. set_gcsr_sw_flag(LOONGARCH_CSR_IB3MASK);
  155. set_gcsr_sw_flag(LOONGARCH_CSR_IB3CTRL);
  156. set_gcsr_sw_flag(LOONGARCH_CSR_IB3ASID);
  157. set_gcsr_sw_flag(LOONGARCH_CSR_IB4ADDR);
  158. set_gcsr_sw_flag(LOONGARCH_CSR_IB4MASK);
  159. set_gcsr_sw_flag(LOONGARCH_CSR_IB4CTRL);
  160. set_gcsr_sw_flag(LOONGARCH_CSR_IB4ASID);
  161. set_gcsr_sw_flag(LOONGARCH_CSR_IB5ADDR);
  162. set_gcsr_sw_flag(LOONGARCH_CSR_IB5MASK);
  163. set_gcsr_sw_flag(LOONGARCH_CSR_IB5CTRL);
  164. set_gcsr_sw_flag(LOONGARCH_CSR_IB5ASID);
  165. set_gcsr_sw_flag(LOONGARCH_CSR_IB6ADDR);
  166. set_gcsr_sw_flag(LOONGARCH_CSR_IB6MASK);
  167. set_gcsr_sw_flag(LOONGARCH_CSR_IB6CTRL);
  168. set_gcsr_sw_flag(LOONGARCH_CSR_IB6ASID);
  169. set_gcsr_sw_flag(LOONGARCH_CSR_IB7ADDR);
  170. set_gcsr_sw_flag(LOONGARCH_CSR_IB7MASK);
  171. set_gcsr_sw_flag(LOONGARCH_CSR_IB7CTRL);
  172. set_gcsr_sw_flag(LOONGARCH_CSR_IB7ASID);
  173. set_gcsr_sw_flag(LOONGARCH_CSR_PERFCTRL0);
  174. set_gcsr_sw_flag(LOONGARCH_CSR_PERFCNTR0);
  175. set_gcsr_sw_flag(LOONGARCH_CSR_PERFCTRL1);
  176. set_gcsr_sw_flag(LOONGARCH_CSR_PERFCNTR1);
  177. set_gcsr_sw_flag(LOONGARCH_CSR_PERFCTRL2);
  178. set_gcsr_sw_flag(LOONGARCH_CSR_PERFCNTR2);
  179. set_gcsr_sw_flag(LOONGARCH_CSR_PERFCTRL3);
  180. set_gcsr_sw_flag(LOONGARCH_CSR_PERFCNTR3);
  181. }
  182. static void kvm_update_vpid(struct kvm_vcpu *vcpu, int cpu)
  183. {
  184. unsigned long vpid;
  185. struct kvm_context *context;
  186. context = per_cpu_ptr(vcpu->kvm->arch.vmcs, cpu);
  187. vpid = context->vpid_cache + 1;
  188. if (!(vpid & vpid_mask)) {
  189. /* finish round of vpid loop */
  190. if (unlikely(!vpid))
  191. vpid = vpid_mask + 1;
  192. ++vpid; /* vpid 0 reserved for root */
  193. /* start new vpid cycle */
  194. kvm_flush_tlb_all();
  195. }
  196. context->vpid_cache = vpid;
  197. vcpu->arch.vpid = vpid;
  198. }
  199. void kvm_check_vpid(struct kvm_vcpu *vcpu)
  200. {
  201. int cpu;
  202. bool migrated;
  203. unsigned long ver, old, vpid;
  204. struct kvm_context *context;
  205. cpu = smp_processor_id();
  206. /*
  207. * Are we entering guest context on a different CPU to last time?
  208. * If so, the vCPU's guest TLB state on this CPU may be stale.
  209. */
  210. context = per_cpu_ptr(vcpu->kvm->arch.vmcs, cpu);
  211. migrated = (vcpu->cpu != cpu);
  212. /*
  213. * Check if our vpid is of an older version
  214. *
  215. * We also discard the stored vpid if we've executed on
  216. * another CPU, as the guest mappings may have changed without
  217. * hypervisor knowledge.
  218. */
  219. ver = vcpu->arch.vpid & ~vpid_mask;
  220. old = context->vpid_cache & ~vpid_mask;
  221. if (migrated || (ver != old)) {
  222. kvm_update_vpid(vcpu, cpu);
  223. trace_kvm_vpid_change(vcpu, vcpu->arch.vpid);
  224. vcpu->cpu = cpu;
  225. kvm_clear_request(KVM_REQ_TLB_FLUSH_GPA, vcpu);
  226. }
  227. /* Restore GSTAT(0x50).vpid */
  228. vpid = (vcpu->arch.vpid & vpid_mask) << CSR_GSTAT_GID_SHIFT;
  229. change_csr_gstat(vpid_mask << CSR_GSTAT_GID_SHIFT, vpid);
  230. }
  231. void kvm_init_vmcs(struct kvm *kvm)
  232. {
  233. kvm->arch.vmcs = vmcs;
  234. }
  235. long kvm_arch_dev_ioctl(struct file *filp,
  236. unsigned int ioctl, unsigned long arg)
  237. {
  238. return -ENOIOCTLCMD;
  239. }
  240. int kvm_arch_enable_virtualization_cpu(void)
  241. {
  242. unsigned long env, gcfg = 0;
  243. env = read_csr_gcfg();
  244. /* First init gcfg, gstat, gintc, gtlbc. All guest use the same config */
  245. write_csr_gcfg(0);
  246. write_csr_gstat(0);
  247. write_csr_gintc(0);
  248. clear_csr_gtlbc(CSR_GTLBC_USETGID | CSR_GTLBC_TOTI);
  249. /*
  250. * Enable virtualization features granting guest direct control of
  251. * certain features:
  252. * GCI=2: Trap on init or unimplement cache instruction.
  253. * TORU=0: Trap on Root Unimplement.
  254. * CACTRL=1: Root control cache.
  255. * TOP=0: Trap on Previlege.
  256. * TOE=0: Trap on Exception.
  257. * TIT=0: Trap on Timer.
  258. */
  259. if (env & CSR_GCFG_GCIP_SECURE)
  260. gcfg |= CSR_GCFG_GCI_SECURE;
  261. if (env & CSR_GCFG_MATP_ROOT)
  262. gcfg |= CSR_GCFG_MATC_ROOT;
  263. write_csr_gcfg(gcfg);
  264. kvm_flush_tlb_all();
  265. /* Enable using TGID */
  266. set_csr_gtlbc(CSR_GTLBC_USETGID);
  267. kvm_debug("GCFG:%lx GSTAT:%lx GINTC:%lx GTLBC:%lx",
  268. read_csr_gcfg(), read_csr_gstat(), read_csr_gintc(), read_csr_gtlbc());
  269. return 0;
  270. }
  271. void kvm_arch_disable_virtualization_cpu(void)
  272. {
  273. write_csr_gcfg(0);
  274. write_csr_gstat(0);
  275. write_csr_gintc(0);
  276. clear_csr_gtlbc(CSR_GTLBC_USETGID | CSR_GTLBC_TOTI);
  277. /* Flush any remaining guest TLB entries */
  278. kvm_flush_tlb_all();
  279. }
  280. static int kvm_loongarch_env_init(void)
  281. {
  282. int cpu, order;
  283. void *addr;
  284. struct kvm_context *context;
  285. vmcs = alloc_percpu(struct kvm_context);
  286. if (!vmcs) {
  287. pr_err("kvm: failed to allocate percpu kvm_context\n");
  288. return -ENOMEM;
  289. }
  290. kvm_loongarch_ops = kzalloc(sizeof(*kvm_loongarch_ops), GFP_KERNEL);
  291. if (!kvm_loongarch_ops) {
  292. free_percpu(vmcs);
  293. vmcs = NULL;
  294. return -ENOMEM;
  295. }
  296. /*
  297. * PGD register is shared between root kernel and kvm hypervisor.
  298. * So world switch entry should be in DMW area rather than TLB area
  299. * to avoid page fault reenter.
  300. *
  301. * In future if hardware pagetable walking is supported, we won't
  302. * need to copy world switch code to DMW area.
  303. */
  304. order = get_order(kvm_exception_size + kvm_enter_guest_size);
  305. addr = (void *)__get_free_pages(GFP_KERNEL, order);
  306. if (!addr) {
  307. free_percpu(vmcs);
  308. vmcs = NULL;
  309. kfree(kvm_loongarch_ops);
  310. kvm_loongarch_ops = NULL;
  311. return -ENOMEM;
  312. }
  313. memcpy(addr, kvm_exc_entry, kvm_exception_size);
  314. memcpy(addr + kvm_exception_size, kvm_enter_guest, kvm_enter_guest_size);
  315. flush_icache_range((unsigned long)addr, (unsigned long)addr + kvm_exception_size + kvm_enter_guest_size);
  316. kvm_loongarch_ops->exc_entry = addr;
  317. kvm_loongarch_ops->enter_guest = addr + kvm_exception_size;
  318. kvm_loongarch_ops->page_order = order;
  319. vpid_mask = read_csr_gstat();
  320. vpid_mask = (vpid_mask & CSR_GSTAT_GIDBIT) >> CSR_GSTAT_GIDBIT_SHIFT;
  321. if (vpid_mask)
  322. vpid_mask = GENMASK(vpid_mask - 1, 0);
  323. for_each_possible_cpu(cpu) {
  324. context = per_cpu_ptr(vmcs, cpu);
  325. context->vpid_cache = vpid_mask + 1;
  326. context->last_vcpu = NULL;
  327. }
  328. kvm_init_gcsr_flag();
  329. return 0;
  330. }
  331. static void kvm_loongarch_env_exit(void)
  332. {
  333. unsigned long addr;
  334. if (vmcs)
  335. free_percpu(vmcs);
  336. if (kvm_loongarch_ops) {
  337. if (kvm_loongarch_ops->exc_entry) {
  338. addr = (unsigned long)kvm_loongarch_ops->exc_entry;
  339. free_pages(addr, kvm_loongarch_ops->page_order);
  340. }
  341. kfree(kvm_loongarch_ops);
  342. }
  343. }
  344. static int kvm_loongarch_init(void)
  345. {
  346. int r;
  347. if (!cpu_has_lvz) {
  348. kvm_info("Hardware virtualization not available\n");
  349. return -ENODEV;
  350. }
  351. r = kvm_loongarch_env_init();
  352. if (r)
  353. return r;
  354. return kvm_init(sizeof(struct kvm_vcpu), 0, THIS_MODULE);
  355. }
  356. static void kvm_loongarch_exit(void)
  357. {
  358. kvm_exit();
  359. kvm_loongarch_env_exit();
  360. }
  361. module_init(kvm_loongarch_init);
  362. module_exit(kvm_loongarch_exit);
  363. #ifdef MODULE
  364. static const struct cpu_feature kvm_feature[] = {
  365. { .feature = cpu_feature(LOONGARCH_LVZ) },
  366. {},
  367. };
  368. MODULE_DEVICE_TABLE(cpu, kvm_feature);
  369. #endif