vcpu_sbi.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (c) 2019 Western Digital Corporation or its affiliates.
  4. *
  5. * Authors:
  6. * Atish Patra <atish.patra@wdc.com>
  7. */
  8. #include <linux/errno.h>
  9. #include <linux/err.h>
  10. #include <linux/kvm_host.h>
  11. #include <asm/sbi.h>
  12. #include <asm/kvm_vcpu_sbi.h>
  13. #ifndef CONFIG_RISCV_SBI_V01
  14. static const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_v01 = {
  15. .extid_start = -1UL,
  16. .extid_end = -1UL,
  17. .handler = NULL,
  18. };
  19. #endif
  20. #ifndef CONFIG_RISCV_PMU_SBI
  21. static const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_pmu = {
  22. .extid_start = -1UL,
  23. .extid_end = -1UL,
  24. .handler = NULL,
  25. };
  26. #endif
  27. struct kvm_riscv_sbi_extension_entry {
  28. enum KVM_RISCV_SBI_EXT_ID ext_idx;
  29. const struct kvm_vcpu_sbi_extension *ext_ptr;
  30. };
  31. static const struct kvm_riscv_sbi_extension_entry sbi_ext[] = {
  32. {
  33. .ext_idx = KVM_RISCV_SBI_EXT_V01,
  34. .ext_ptr = &vcpu_sbi_ext_v01,
  35. },
  36. {
  37. .ext_idx = KVM_RISCV_SBI_EXT_MAX, /* Can't be disabled */
  38. .ext_ptr = &vcpu_sbi_ext_base,
  39. },
  40. {
  41. .ext_idx = KVM_RISCV_SBI_EXT_TIME,
  42. .ext_ptr = &vcpu_sbi_ext_time,
  43. },
  44. {
  45. .ext_idx = KVM_RISCV_SBI_EXT_IPI,
  46. .ext_ptr = &vcpu_sbi_ext_ipi,
  47. },
  48. {
  49. .ext_idx = KVM_RISCV_SBI_EXT_RFENCE,
  50. .ext_ptr = &vcpu_sbi_ext_rfence,
  51. },
  52. {
  53. .ext_idx = KVM_RISCV_SBI_EXT_SRST,
  54. .ext_ptr = &vcpu_sbi_ext_srst,
  55. },
  56. {
  57. .ext_idx = KVM_RISCV_SBI_EXT_HSM,
  58. .ext_ptr = &vcpu_sbi_ext_hsm,
  59. },
  60. {
  61. .ext_idx = KVM_RISCV_SBI_EXT_PMU,
  62. .ext_ptr = &vcpu_sbi_ext_pmu,
  63. },
  64. {
  65. .ext_idx = KVM_RISCV_SBI_EXT_DBCN,
  66. .ext_ptr = &vcpu_sbi_ext_dbcn,
  67. },
  68. {
  69. .ext_idx = KVM_RISCV_SBI_EXT_STA,
  70. .ext_ptr = &vcpu_sbi_ext_sta,
  71. },
  72. {
  73. .ext_idx = KVM_RISCV_SBI_EXT_EXPERIMENTAL,
  74. .ext_ptr = &vcpu_sbi_ext_experimental,
  75. },
  76. {
  77. .ext_idx = KVM_RISCV_SBI_EXT_VENDOR,
  78. .ext_ptr = &vcpu_sbi_ext_vendor,
  79. },
  80. };
  81. static const struct kvm_riscv_sbi_extension_entry *
  82. riscv_vcpu_get_sbi_ext(struct kvm_vcpu *vcpu, unsigned long idx)
  83. {
  84. const struct kvm_riscv_sbi_extension_entry *sext = NULL;
  85. if (idx >= KVM_RISCV_SBI_EXT_MAX)
  86. return NULL;
  87. for (int i = 0; i < ARRAY_SIZE(sbi_ext); i++) {
  88. if (sbi_ext[i].ext_idx == idx) {
  89. sext = &sbi_ext[i];
  90. break;
  91. }
  92. }
  93. return sext;
  94. }
  95. bool riscv_vcpu_supports_sbi_ext(struct kvm_vcpu *vcpu, int idx)
  96. {
  97. struct kvm_vcpu_sbi_context *scontext = &vcpu->arch.sbi_context;
  98. const struct kvm_riscv_sbi_extension_entry *sext;
  99. sext = riscv_vcpu_get_sbi_ext(vcpu, idx);
  100. return sext && scontext->ext_status[sext->ext_idx] != KVM_RISCV_SBI_EXT_STATUS_UNAVAILABLE;
  101. }
  102. void kvm_riscv_vcpu_sbi_forward(struct kvm_vcpu *vcpu, struct kvm_run *run)
  103. {
  104. struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
  105. vcpu->arch.sbi_context.return_handled = 0;
  106. vcpu->stat.ecall_exit_stat++;
  107. run->exit_reason = KVM_EXIT_RISCV_SBI;
  108. run->riscv_sbi.extension_id = cp->a7;
  109. run->riscv_sbi.function_id = cp->a6;
  110. run->riscv_sbi.args[0] = cp->a0;
  111. run->riscv_sbi.args[1] = cp->a1;
  112. run->riscv_sbi.args[2] = cp->a2;
  113. run->riscv_sbi.args[3] = cp->a3;
  114. run->riscv_sbi.args[4] = cp->a4;
  115. run->riscv_sbi.args[5] = cp->a5;
  116. run->riscv_sbi.ret[0] = SBI_ERR_NOT_SUPPORTED;
  117. run->riscv_sbi.ret[1] = 0;
  118. }
  119. void kvm_riscv_vcpu_sbi_system_reset(struct kvm_vcpu *vcpu,
  120. struct kvm_run *run,
  121. u32 type, u64 reason)
  122. {
  123. unsigned long i;
  124. struct kvm_vcpu *tmp;
  125. kvm_for_each_vcpu(i, tmp, vcpu->kvm) {
  126. spin_lock(&vcpu->arch.mp_state_lock);
  127. WRITE_ONCE(tmp->arch.mp_state.mp_state, KVM_MP_STATE_STOPPED);
  128. spin_unlock(&vcpu->arch.mp_state_lock);
  129. }
  130. kvm_make_all_cpus_request(vcpu->kvm, KVM_REQ_SLEEP);
  131. memset(&run->system_event, 0, sizeof(run->system_event));
  132. run->system_event.type = type;
  133. run->system_event.ndata = 1;
  134. run->system_event.data[0] = reason;
  135. run->exit_reason = KVM_EXIT_SYSTEM_EVENT;
  136. }
  137. int kvm_riscv_vcpu_sbi_return(struct kvm_vcpu *vcpu, struct kvm_run *run)
  138. {
  139. struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
  140. /* Handle SBI return only once */
  141. if (vcpu->arch.sbi_context.return_handled)
  142. return 0;
  143. vcpu->arch.sbi_context.return_handled = 1;
  144. /* Update return values */
  145. cp->a0 = run->riscv_sbi.ret[0];
  146. cp->a1 = run->riscv_sbi.ret[1];
  147. /* Move to next instruction */
  148. vcpu->arch.guest_context.sepc += 4;
  149. return 0;
  150. }
  151. static int riscv_vcpu_set_sbi_ext_single(struct kvm_vcpu *vcpu,
  152. unsigned long reg_num,
  153. unsigned long reg_val)
  154. {
  155. struct kvm_vcpu_sbi_context *scontext = &vcpu->arch.sbi_context;
  156. const struct kvm_riscv_sbi_extension_entry *sext;
  157. if (reg_val != 1 && reg_val != 0)
  158. return -EINVAL;
  159. sext = riscv_vcpu_get_sbi_ext(vcpu, reg_num);
  160. if (!sext || scontext->ext_status[sext->ext_idx] == KVM_RISCV_SBI_EXT_STATUS_UNAVAILABLE)
  161. return -ENOENT;
  162. scontext->ext_status[sext->ext_idx] = (reg_val) ?
  163. KVM_RISCV_SBI_EXT_STATUS_ENABLED :
  164. KVM_RISCV_SBI_EXT_STATUS_DISABLED;
  165. return 0;
  166. }
  167. static int riscv_vcpu_get_sbi_ext_single(struct kvm_vcpu *vcpu,
  168. unsigned long reg_num,
  169. unsigned long *reg_val)
  170. {
  171. struct kvm_vcpu_sbi_context *scontext = &vcpu->arch.sbi_context;
  172. const struct kvm_riscv_sbi_extension_entry *sext;
  173. sext = riscv_vcpu_get_sbi_ext(vcpu, reg_num);
  174. if (!sext || scontext->ext_status[sext->ext_idx] == KVM_RISCV_SBI_EXT_STATUS_UNAVAILABLE)
  175. return -ENOENT;
  176. *reg_val = scontext->ext_status[sext->ext_idx] ==
  177. KVM_RISCV_SBI_EXT_STATUS_ENABLED;
  178. return 0;
  179. }
  180. static int riscv_vcpu_set_sbi_ext_multi(struct kvm_vcpu *vcpu,
  181. unsigned long reg_num,
  182. unsigned long reg_val, bool enable)
  183. {
  184. unsigned long i, ext_id;
  185. if (reg_num > KVM_REG_RISCV_SBI_MULTI_REG_LAST)
  186. return -ENOENT;
  187. for_each_set_bit(i, &reg_val, BITS_PER_LONG) {
  188. ext_id = i + reg_num * BITS_PER_LONG;
  189. if (ext_id >= KVM_RISCV_SBI_EXT_MAX)
  190. break;
  191. riscv_vcpu_set_sbi_ext_single(vcpu, ext_id, enable);
  192. }
  193. return 0;
  194. }
  195. static int riscv_vcpu_get_sbi_ext_multi(struct kvm_vcpu *vcpu,
  196. unsigned long reg_num,
  197. unsigned long *reg_val)
  198. {
  199. unsigned long i, ext_id, ext_val;
  200. if (reg_num > KVM_REG_RISCV_SBI_MULTI_REG_LAST)
  201. return -ENOENT;
  202. for (i = 0; i < BITS_PER_LONG; i++) {
  203. ext_id = i + reg_num * BITS_PER_LONG;
  204. if (ext_id >= KVM_RISCV_SBI_EXT_MAX)
  205. break;
  206. ext_val = 0;
  207. riscv_vcpu_get_sbi_ext_single(vcpu, ext_id, &ext_val);
  208. if (ext_val)
  209. *reg_val |= KVM_REG_RISCV_SBI_MULTI_MASK(ext_id);
  210. }
  211. return 0;
  212. }
  213. int kvm_riscv_vcpu_set_reg_sbi_ext(struct kvm_vcpu *vcpu,
  214. const struct kvm_one_reg *reg)
  215. {
  216. unsigned long __user *uaddr =
  217. (unsigned long __user *)(unsigned long)reg->addr;
  218. unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
  219. KVM_REG_SIZE_MASK |
  220. KVM_REG_RISCV_SBI_EXT);
  221. unsigned long reg_val, reg_subtype;
  222. if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
  223. return -EINVAL;
  224. if (vcpu->arch.ran_atleast_once)
  225. return -EBUSY;
  226. reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK;
  227. reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK;
  228. if (copy_from_user(&reg_val, uaddr, KVM_REG_SIZE(reg->id)))
  229. return -EFAULT;
  230. switch (reg_subtype) {
  231. case KVM_REG_RISCV_SBI_SINGLE:
  232. return riscv_vcpu_set_sbi_ext_single(vcpu, reg_num, reg_val);
  233. case KVM_REG_RISCV_SBI_MULTI_EN:
  234. return riscv_vcpu_set_sbi_ext_multi(vcpu, reg_num, reg_val, true);
  235. case KVM_REG_RISCV_SBI_MULTI_DIS:
  236. return riscv_vcpu_set_sbi_ext_multi(vcpu, reg_num, reg_val, false);
  237. default:
  238. return -ENOENT;
  239. }
  240. return 0;
  241. }
  242. int kvm_riscv_vcpu_get_reg_sbi_ext(struct kvm_vcpu *vcpu,
  243. const struct kvm_one_reg *reg)
  244. {
  245. int rc;
  246. unsigned long __user *uaddr =
  247. (unsigned long __user *)(unsigned long)reg->addr;
  248. unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
  249. KVM_REG_SIZE_MASK |
  250. KVM_REG_RISCV_SBI_EXT);
  251. unsigned long reg_val, reg_subtype;
  252. if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
  253. return -EINVAL;
  254. reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK;
  255. reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK;
  256. reg_val = 0;
  257. switch (reg_subtype) {
  258. case KVM_REG_RISCV_SBI_SINGLE:
  259. rc = riscv_vcpu_get_sbi_ext_single(vcpu, reg_num, &reg_val);
  260. break;
  261. case KVM_REG_RISCV_SBI_MULTI_EN:
  262. case KVM_REG_RISCV_SBI_MULTI_DIS:
  263. rc = riscv_vcpu_get_sbi_ext_multi(vcpu, reg_num, &reg_val);
  264. if (!rc && reg_subtype == KVM_REG_RISCV_SBI_MULTI_DIS)
  265. reg_val = ~reg_val;
  266. break;
  267. default:
  268. rc = -ENOENT;
  269. }
  270. if (rc)
  271. return rc;
  272. if (copy_to_user(uaddr, &reg_val, KVM_REG_SIZE(reg->id)))
  273. return -EFAULT;
  274. return 0;
  275. }
  276. int kvm_riscv_vcpu_set_reg_sbi(struct kvm_vcpu *vcpu,
  277. const struct kvm_one_reg *reg)
  278. {
  279. unsigned long __user *uaddr =
  280. (unsigned long __user *)(unsigned long)reg->addr;
  281. unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
  282. KVM_REG_SIZE_MASK |
  283. KVM_REG_RISCV_SBI_STATE);
  284. unsigned long reg_subtype, reg_val;
  285. if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
  286. return -EINVAL;
  287. if (copy_from_user(&reg_val, uaddr, KVM_REG_SIZE(reg->id)))
  288. return -EFAULT;
  289. reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK;
  290. reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK;
  291. switch (reg_subtype) {
  292. case KVM_REG_RISCV_SBI_STA:
  293. return kvm_riscv_vcpu_set_reg_sbi_sta(vcpu, reg_num, reg_val);
  294. default:
  295. return -EINVAL;
  296. }
  297. return 0;
  298. }
  299. int kvm_riscv_vcpu_get_reg_sbi(struct kvm_vcpu *vcpu,
  300. const struct kvm_one_reg *reg)
  301. {
  302. unsigned long __user *uaddr =
  303. (unsigned long __user *)(unsigned long)reg->addr;
  304. unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
  305. KVM_REG_SIZE_MASK |
  306. KVM_REG_RISCV_SBI_STATE);
  307. unsigned long reg_subtype, reg_val;
  308. int ret;
  309. if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
  310. return -EINVAL;
  311. reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK;
  312. reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK;
  313. switch (reg_subtype) {
  314. case KVM_REG_RISCV_SBI_STA:
  315. ret = kvm_riscv_vcpu_get_reg_sbi_sta(vcpu, reg_num, &reg_val);
  316. break;
  317. default:
  318. return -EINVAL;
  319. }
  320. if (ret)
  321. return ret;
  322. if (copy_to_user(uaddr, &reg_val, KVM_REG_SIZE(reg->id)))
  323. return -EFAULT;
  324. return 0;
  325. }
  326. const struct kvm_vcpu_sbi_extension *kvm_vcpu_sbi_find_ext(
  327. struct kvm_vcpu *vcpu, unsigned long extid)
  328. {
  329. struct kvm_vcpu_sbi_context *scontext = &vcpu->arch.sbi_context;
  330. const struct kvm_riscv_sbi_extension_entry *entry;
  331. const struct kvm_vcpu_sbi_extension *ext;
  332. int i;
  333. for (i = 0; i < ARRAY_SIZE(sbi_ext); i++) {
  334. entry = &sbi_ext[i];
  335. ext = entry->ext_ptr;
  336. if (ext->extid_start <= extid && ext->extid_end >= extid) {
  337. if (entry->ext_idx >= KVM_RISCV_SBI_EXT_MAX ||
  338. scontext->ext_status[entry->ext_idx] ==
  339. KVM_RISCV_SBI_EXT_STATUS_ENABLED)
  340. return ext;
  341. return NULL;
  342. }
  343. }
  344. return NULL;
  345. }
  346. int kvm_riscv_vcpu_sbi_ecall(struct kvm_vcpu *vcpu, struct kvm_run *run)
  347. {
  348. int ret = 1;
  349. bool next_sepc = true;
  350. struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
  351. const struct kvm_vcpu_sbi_extension *sbi_ext;
  352. struct kvm_cpu_trap utrap = {0};
  353. struct kvm_vcpu_sbi_return sbi_ret = {
  354. .out_val = 0,
  355. .err_val = 0,
  356. .utrap = &utrap,
  357. };
  358. bool ext_is_v01 = false;
  359. sbi_ext = kvm_vcpu_sbi_find_ext(vcpu, cp->a7);
  360. if (sbi_ext && sbi_ext->handler) {
  361. #ifdef CONFIG_RISCV_SBI_V01
  362. if (cp->a7 >= SBI_EXT_0_1_SET_TIMER &&
  363. cp->a7 <= SBI_EXT_0_1_SHUTDOWN)
  364. ext_is_v01 = true;
  365. #endif
  366. ret = sbi_ext->handler(vcpu, run, &sbi_ret);
  367. } else {
  368. /* Return error for unsupported SBI calls */
  369. cp->a0 = SBI_ERR_NOT_SUPPORTED;
  370. goto ecall_done;
  371. }
  372. /*
  373. * When the SBI extension returns a Linux error code, it exits the ioctl
  374. * loop and forwards the error to userspace.
  375. */
  376. if (ret < 0) {
  377. next_sepc = false;
  378. goto ecall_done;
  379. }
  380. /* Handle special error cases i.e trap, exit or userspace forward */
  381. if (sbi_ret.utrap->scause) {
  382. /* No need to increment sepc or exit ioctl loop */
  383. ret = 1;
  384. sbi_ret.utrap->sepc = cp->sepc;
  385. kvm_riscv_vcpu_trap_redirect(vcpu, sbi_ret.utrap);
  386. next_sepc = false;
  387. goto ecall_done;
  388. }
  389. /* Exit ioctl loop or Propagate the error code the guest */
  390. if (sbi_ret.uexit) {
  391. next_sepc = false;
  392. ret = 0;
  393. } else {
  394. cp->a0 = sbi_ret.err_val;
  395. ret = 1;
  396. }
  397. ecall_done:
  398. if (next_sepc)
  399. cp->sepc += 4;
  400. /* a1 should only be updated when we continue the ioctl loop */
  401. if (!ext_is_v01 && ret == 1)
  402. cp->a1 = sbi_ret.out_val;
  403. return ret;
  404. }
  405. void kvm_riscv_vcpu_sbi_init(struct kvm_vcpu *vcpu)
  406. {
  407. struct kvm_vcpu_sbi_context *scontext = &vcpu->arch.sbi_context;
  408. const struct kvm_riscv_sbi_extension_entry *entry;
  409. const struct kvm_vcpu_sbi_extension *ext;
  410. int idx, i;
  411. for (i = 0; i < ARRAY_SIZE(sbi_ext); i++) {
  412. entry = &sbi_ext[i];
  413. ext = entry->ext_ptr;
  414. idx = entry->ext_idx;
  415. if (idx < 0 || idx >= ARRAY_SIZE(scontext->ext_status))
  416. continue;
  417. if (ext->probe && !ext->probe(vcpu)) {
  418. scontext->ext_status[idx] = KVM_RISCV_SBI_EXT_STATUS_UNAVAILABLE;
  419. continue;
  420. }
  421. scontext->ext_status[idx] = ext->default_disabled ?
  422. KVM_RISCV_SBI_EXT_STATUS_DISABLED :
  423. KVM_RISCV_SBI_EXT_STATUS_ENABLED;
  424. }
  425. }