vcpu_onereg.c 31 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (C) 2019 Western Digital Corporation or its affiliates.
  4. * Copyright (C) 2023 Ventana Micro Systems Inc.
  5. *
  6. * Authors:
  7. * Anup Patel <apatel@ventanamicro.com>
  8. */
  9. #include <linux/bitops.h>
  10. #include <linux/errno.h>
  11. #include <linux/err.h>
  12. #include <linux/uaccess.h>
  13. #include <linux/kvm_host.h>
  14. #include <asm/cacheflush.h>
  15. #include <asm/cpufeature.h>
  16. #include <asm/kvm_vcpu_vector.h>
  17. #include <asm/vector.h>
  18. #define KVM_RISCV_BASE_ISA_MASK GENMASK(25, 0)
  19. #define KVM_ISA_EXT_ARR(ext) \
  20. [KVM_RISCV_ISA_EXT_##ext] = RISCV_ISA_EXT_##ext
  21. /* Mapping between KVM ISA Extension ID & Host ISA extension ID */
  22. static const unsigned long kvm_isa_ext_arr[] = {
  23. /* Single letter extensions (alphabetically sorted) */
  24. [KVM_RISCV_ISA_EXT_A] = RISCV_ISA_EXT_a,
  25. [KVM_RISCV_ISA_EXT_C] = RISCV_ISA_EXT_c,
  26. [KVM_RISCV_ISA_EXT_D] = RISCV_ISA_EXT_d,
  27. [KVM_RISCV_ISA_EXT_F] = RISCV_ISA_EXT_f,
  28. [KVM_RISCV_ISA_EXT_H] = RISCV_ISA_EXT_h,
  29. [KVM_RISCV_ISA_EXT_I] = RISCV_ISA_EXT_i,
  30. [KVM_RISCV_ISA_EXT_M] = RISCV_ISA_EXT_m,
  31. [KVM_RISCV_ISA_EXT_V] = RISCV_ISA_EXT_v,
  32. /* Multi letter extensions (alphabetically sorted) */
  33. KVM_ISA_EXT_ARR(SMSTATEEN),
  34. KVM_ISA_EXT_ARR(SSAIA),
  35. KVM_ISA_EXT_ARR(SSCOFPMF),
  36. KVM_ISA_EXT_ARR(SSTC),
  37. KVM_ISA_EXT_ARR(SVINVAL),
  38. KVM_ISA_EXT_ARR(SVNAPOT),
  39. KVM_ISA_EXT_ARR(SVPBMT),
  40. KVM_ISA_EXT_ARR(ZACAS),
  41. KVM_ISA_EXT_ARR(ZAWRS),
  42. KVM_ISA_EXT_ARR(ZBA),
  43. KVM_ISA_EXT_ARR(ZBB),
  44. KVM_ISA_EXT_ARR(ZBC),
  45. KVM_ISA_EXT_ARR(ZBKB),
  46. KVM_ISA_EXT_ARR(ZBKC),
  47. KVM_ISA_EXT_ARR(ZBKX),
  48. KVM_ISA_EXT_ARR(ZBS),
  49. KVM_ISA_EXT_ARR(ZCA),
  50. KVM_ISA_EXT_ARR(ZCB),
  51. KVM_ISA_EXT_ARR(ZCD),
  52. KVM_ISA_EXT_ARR(ZCF),
  53. KVM_ISA_EXT_ARR(ZCMOP),
  54. KVM_ISA_EXT_ARR(ZFA),
  55. KVM_ISA_EXT_ARR(ZFH),
  56. KVM_ISA_EXT_ARR(ZFHMIN),
  57. KVM_ISA_EXT_ARR(ZICBOM),
  58. KVM_ISA_EXT_ARR(ZICBOZ),
  59. KVM_ISA_EXT_ARR(ZICNTR),
  60. KVM_ISA_EXT_ARR(ZICOND),
  61. KVM_ISA_EXT_ARR(ZICSR),
  62. KVM_ISA_EXT_ARR(ZIFENCEI),
  63. KVM_ISA_EXT_ARR(ZIHINTNTL),
  64. KVM_ISA_EXT_ARR(ZIHINTPAUSE),
  65. KVM_ISA_EXT_ARR(ZIHPM),
  66. KVM_ISA_EXT_ARR(ZIMOP),
  67. KVM_ISA_EXT_ARR(ZKND),
  68. KVM_ISA_EXT_ARR(ZKNE),
  69. KVM_ISA_EXT_ARR(ZKNH),
  70. KVM_ISA_EXT_ARR(ZKR),
  71. KVM_ISA_EXT_ARR(ZKSED),
  72. KVM_ISA_EXT_ARR(ZKSH),
  73. KVM_ISA_EXT_ARR(ZKT),
  74. KVM_ISA_EXT_ARR(ZTSO),
  75. KVM_ISA_EXT_ARR(ZVBB),
  76. KVM_ISA_EXT_ARR(ZVBC),
  77. KVM_ISA_EXT_ARR(ZVFH),
  78. KVM_ISA_EXT_ARR(ZVFHMIN),
  79. KVM_ISA_EXT_ARR(ZVKB),
  80. KVM_ISA_EXT_ARR(ZVKG),
  81. KVM_ISA_EXT_ARR(ZVKNED),
  82. KVM_ISA_EXT_ARR(ZVKNHA),
  83. KVM_ISA_EXT_ARR(ZVKNHB),
  84. KVM_ISA_EXT_ARR(ZVKSED),
  85. KVM_ISA_EXT_ARR(ZVKSH),
  86. KVM_ISA_EXT_ARR(ZVKT),
  87. };
  88. static unsigned long kvm_riscv_vcpu_base2isa_ext(unsigned long base_ext)
  89. {
  90. unsigned long i;
  91. for (i = 0; i < KVM_RISCV_ISA_EXT_MAX; i++) {
  92. if (kvm_isa_ext_arr[i] == base_ext)
  93. return i;
  94. }
  95. return KVM_RISCV_ISA_EXT_MAX;
  96. }
  97. static bool kvm_riscv_vcpu_isa_enable_allowed(unsigned long ext)
  98. {
  99. switch (ext) {
  100. case KVM_RISCV_ISA_EXT_H:
  101. return false;
  102. case KVM_RISCV_ISA_EXT_SSCOFPMF:
  103. /* Sscofpmf depends on interrupt filtering defined in ssaia */
  104. return __riscv_isa_extension_available(NULL, RISCV_ISA_EXT_SSAIA);
  105. case KVM_RISCV_ISA_EXT_V:
  106. return riscv_v_vstate_ctrl_user_allowed();
  107. default:
  108. break;
  109. }
  110. return true;
  111. }
  112. static bool kvm_riscv_vcpu_isa_disable_allowed(unsigned long ext)
  113. {
  114. switch (ext) {
  115. /* Extensions which don't have any mechanism to disable */
  116. case KVM_RISCV_ISA_EXT_A:
  117. case KVM_RISCV_ISA_EXT_C:
  118. case KVM_RISCV_ISA_EXT_I:
  119. case KVM_RISCV_ISA_EXT_M:
  120. /* There is not architectural config bit to disable sscofpmf completely */
  121. case KVM_RISCV_ISA_EXT_SSCOFPMF:
  122. case KVM_RISCV_ISA_EXT_SSTC:
  123. case KVM_RISCV_ISA_EXT_SVINVAL:
  124. case KVM_RISCV_ISA_EXT_SVNAPOT:
  125. case KVM_RISCV_ISA_EXT_ZACAS:
  126. case KVM_RISCV_ISA_EXT_ZAWRS:
  127. case KVM_RISCV_ISA_EXT_ZBA:
  128. case KVM_RISCV_ISA_EXT_ZBB:
  129. case KVM_RISCV_ISA_EXT_ZBC:
  130. case KVM_RISCV_ISA_EXT_ZBKB:
  131. case KVM_RISCV_ISA_EXT_ZBKC:
  132. case KVM_RISCV_ISA_EXT_ZBKX:
  133. case KVM_RISCV_ISA_EXT_ZBS:
  134. case KVM_RISCV_ISA_EXT_ZCA:
  135. case KVM_RISCV_ISA_EXT_ZCB:
  136. case KVM_RISCV_ISA_EXT_ZCD:
  137. case KVM_RISCV_ISA_EXT_ZCF:
  138. case KVM_RISCV_ISA_EXT_ZCMOP:
  139. case KVM_RISCV_ISA_EXT_ZFA:
  140. case KVM_RISCV_ISA_EXT_ZFH:
  141. case KVM_RISCV_ISA_EXT_ZFHMIN:
  142. case KVM_RISCV_ISA_EXT_ZICNTR:
  143. case KVM_RISCV_ISA_EXT_ZICOND:
  144. case KVM_RISCV_ISA_EXT_ZICSR:
  145. case KVM_RISCV_ISA_EXT_ZIFENCEI:
  146. case KVM_RISCV_ISA_EXT_ZIHINTNTL:
  147. case KVM_RISCV_ISA_EXT_ZIHINTPAUSE:
  148. case KVM_RISCV_ISA_EXT_ZIHPM:
  149. case KVM_RISCV_ISA_EXT_ZIMOP:
  150. case KVM_RISCV_ISA_EXT_ZKND:
  151. case KVM_RISCV_ISA_EXT_ZKNE:
  152. case KVM_RISCV_ISA_EXT_ZKNH:
  153. case KVM_RISCV_ISA_EXT_ZKR:
  154. case KVM_RISCV_ISA_EXT_ZKSED:
  155. case KVM_RISCV_ISA_EXT_ZKSH:
  156. case KVM_RISCV_ISA_EXT_ZKT:
  157. case KVM_RISCV_ISA_EXT_ZTSO:
  158. case KVM_RISCV_ISA_EXT_ZVBB:
  159. case KVM_RISCV_ISA_EXT_ZVBC:
  160. case KVM_RISCV_ISA_EXT_ZVFH:
  161. case KVM_RISCV_ISA_EXT_ZVFHMIN:
  162. case KVM_RISCV_ISA_EXT_ZVKB:
  163. case KVM_RISCV_ISA_EXT_ZVKG:
  164. case KVM_RISCV_ISA_EXT_ZVKNED:
  165. case KVM_RISCV_ISA_EXT_ZVKNHA:
  166. case KVM_RISCV_ISA_EXT_ZVKNHB:
  167. case KVM_RISCV_ISA_EXT_ZVKSED:
  168. case KVM_RISCV_ISA_EXT_ZVKSH:
  169. case KVM_RISCV_ISA_EXT_ZVKT:
  170. return false;
  171. /* Extensions which can be disabled using Smstateen */
  172. case KVM_RISCV_ISA_EXT_SSAIA:
  173. return riscv_has_extension_unlikely(RISCV_ISA_EXT_SMSTATEEN);
  174. default:
  175. break;
  176. }
  177. return true;
  178. }
  179. void kvm_riscv_vcpu_setup_isa(struct kvm_vcpu *vcpu)
  180. {
  181. unsigned long host_isa, i;
  182. for (i = 0; i < ARRAY_SIZE(kvm_isa_ext_arr); i++) {
  183. host_isa = kvm_isa_ext_arr[i];
  184. if (__riscv_isa_extension_available(NULL, host_isa) &&
  185. kvm_riscv_vcpu_isa_enable_allowed(i))
  186. set_bit(host_isa, vcpu->arch.isa);
  187. }
  188. }
  189. static int kvm_riscv_vcpu_get_reg_config(struct kvm_vcpu *vcpu,
  190. const struct kvm_one_reg *reg)
  191. {
  192. unsigned long __user *uaddr =
  193. (unsigned long __user *)(unsigned long)reg->addr;
  194. unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
  195. KVM_REG_SIZE_MASK |
  196. KVM_REG_RISCV_CONFIG);
  197. unsigned long reg_val;
  198. if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
  199. return -EINVAL;
  200. switch (reg_num) {
  201. case KVM_REG_RISCV_CONFIG_REG(isa):
  202. reg_val = vcpu->arch.isa[0] & KVM_RISCV_BASE_ISA_MASK;
  203. break;
  204. case KVM_REG_RISCV_CONFIG_REG(zicbom_block_size):
  205. if (!riscv_isa_extension_available(vcpu->arch.isa, ZICBOM))
  206. return -ENOENT;
  207. reg_val = riscv_cbom_block_size;
  208. break;
  209. case KVM_REG_RISCV_CONFIG_REG(zicboz_block_size):
  210. if (!riscv_isa_extension_available(vcpu->arch.isa, ZICBOZ))
  211. return -ENOENT;
  212. reg_val = riscv_cboz_block_size;
  213. break;
  214. case KVM_REG_RISCV_CONFIG_REG(mvendorid):
  215. reg_val = vcpu->arch.mvendorid;
  216. break;
  217. case KVM_REG_RISCV_CONFIG_REG(marchid):
  218. reg_val = vcpu->arch.marchid;
  219. break;
  220. case KVM_REG_RISCV_CONFIG_REG(mimpid):
  221. reg_val = vcpu->arch.mimpid;
  222. break;
  223. case KVM_REG_RISCV_CONFIG_REG(satp_mode):
  224. reg_val = satp_mode >> SATP_MODE_SHIFT;
  225. break;
  226. default:
  227. return -ENOENT;
  228. }
  229. if (copy_to_user(uaddr, &reg_val, KVM_REG_SIZE(reg->id)))
  230. return -EFAULT;
  231. return 0;
  232. }
  233. static int kvm_riscv_vcpu_set_reg_config(struct kvm_vcpu *vcpu,
  234. const struct kvm_one_reg *reg)
  235. {
  236. unsigned long __user *uaddr =
  237. (unsigned long __user *)(unsigned long)reg->addr;
  238. unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
  239. KVM_REG_SIZE_MASK |
  240. KVM_REG_RISCV_CONFIG);
  241. unsigned long i, isa_ext, reg_val;
  242. if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
  243. return -EINVAL;
  244. if (copy_from_user(&reg_val, uaddr, KVM_REG_SIZE(reg->id)))
  245. return -EFAULT;
  246. switch (reg_num) {
  247. case KVM_REG_RISCV_CONFIG_REG(isa):
  248. /*
  249. * This ONE REG interface is only defined for
  250. * single letter extensions.
  251. */
  252. if (fls(reg_val) >= RISCV_ISA_EXT_BASE)
  253. return -EINVAL;
  254. /*
  255. * Return early (i.e. do nothing) if reg_val is the same
  256. * value retrievable via kvm_riscv_vcpu_get_reg_config().
  257. */
  258. if (reg_val == (vcpu->arch.isa[0] & KVM_RISCV_BASE_ISA_MASK))
  259. break;
  260. if (!vcpu->arch.ran_atleast_once) {
  261. /* Ignore the enable/disable request for certain extensions */
  262. for (i = 0; i < RISCV_ISA_EXT_BASE; i++) {
  263. isa_ext = kvm_riscv_vcpu_base2isa_ext(i);
  264. if (isa_ext >= KVM_RISCV_ISA_EXT_MAX) {
  265. reg_val &= ~BIT(i);
  266. continue;
  267. }
  268. if (!kvm_riscv_vcpu_isa_enable_allowed(isa_ext))
  269. if (reg_val & BIT(i))
  270. reg_val &= ~BIT(i);
  271. if (!kvm_riscv_vcpu_isa_disable_allowed(isa_ext))
  272. if (!(reg_val & BIT(i)))
  273. reg_val |= BIT(i);
  274. }
  275. reg_val &= riscv_isa_extension_base(NULL);
  276. /* Do not modify anything beyond single letter extensions */
  277. reg_val = (vcpu->arch.isa[0] & ~KVM_RISCV_BASE_ISA_MASK) |
  278. (reg_val & KVM_RISCV_BASE_ISA_MASK);
  279. vcpu->arch.isa[0] = reg_val;
  280. kvm_riscv_vcpu_fp_reset(vcpu);
  281. } else {
  282. return -EBUSY;
  283. }
  284. break;
  285. case KVM_REG_RISCV_CONFIG_REG(zicbom_block_size):
  286. if (!riscv_isa_extension_available(vcpu->arch.isa, ZICBOM))
  287. return -ENOENT;
  288. if (reg_val != riscv_cbom_block_size)
  289. return -EINVAL;
  290. break;
  291. case KVM_REG_RISCV_CONFIG_REG(zicboz_block_size):
  292. if (!riscv_isa_extension_available(vcpu->arch.isa, ZICBOZ))
  293. return -ENOENT;
  294. if (reg_val != riscv_cboz_block_size)
  295. return -EINVAL;
  296. break;
  297. case KVM_REG_RISCV_CONFIG_REG(mvendorid):
  298. if (reg_val == vcpu->arch.mvendorid)
  299. break;
  300. if (!vcpu->arch.ran_atleast_once)
  301. vcpu->arch.mvendorid = reg_val;
  302. else
  303. return -EBUSY;
  304. break;
  305. case KVM_REG_RISCV_CONFIG_REG(marchid):
  306. if (reg_val == vcpu->arch.marchid)
  307. break;
  308. if (!vcpu->arch.ran_atleast_once)
  309. vcpu->arch.marchid = reg_val;
  310. else
  311. return -EBUSY;
  312. break;
  313. case KVM_REG_RISCV_CONFIG_REG(mimpid):
  314. if (reg_val == vcpu->arch.mimpid)
  315. break;
  316. if (!vcpu->arch.ran_atleast_once)
  317. vcpu->arch.mimpid = reg_val;
  318. else
  319. return -EBUSY;
  320. break;
  321. case KVM_REG_RISCV_CONFIG_REG(satp_mode):
  322. if (reg_val != (satp_mode >> SATP_MODE_SHIFT))
  323. return -EINVAL;
  324. break;
  325. default:
  326. return -ENOENT;
  327. }
  328. return 0;
  329. }
  330. static int kvm_riscv_vcpu_get_reg_core(struct kvm_vcpu *vcpu,
  331. const struct kvm_one_reg *reg)
  332. {
  333. struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
  334. unsigned long __user *uaddr =
  335. (unsigned long __user *)(unsigned long)reg->addr;
  336. unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
  337. KVM_REG_SIZE_MASK |
  338. KVM_REG_RISCV_CORE);
  339. unsigned long reg_val;
  340. if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
  341. return -EINVAL;
  342. if (reg_num >= sizeof(struct kvm_riscv_core) / sizeof(unsigned long))
  343. return -ENOENT;
  344. if (reg_num == KVM_REG_RISCV_CORE_REG(regs.pc))
  345. reg_val = cntx->sepc;
  346. else if (KVM_REG_RISCV_CORE_REG(regs.pc) < reg_num &&
  347. reg_num <= KVM_REG_RISCV_CORE_REG(regs.t6))
  348. reg_val = ((unsigned long *)cntx)[reg_num];
  349. else if (reg_num == KVM_REG_RISCV_CORE_REG(mode))
  350. reg_val = (cntx->sstatus & SR_SPP) ?
  351. KVM_RISCV_MODE_S : KVM_RISCV_MODE_U;
  352. else
  353. return -ENOENT;
  354. if (copy_to_user(uaddr, &reg_val, KVM_REG_SIZE(reg->id)))
  355. return -EFAULT;
  356. return 0;
  357. }
  358. static int kvm_riscv_vcpu_set_reg_core(struct kvm_vcpu *vcpu,
  359. const struct kvm_one_reg *reg)
  360. {
  361. struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
  362. unsigned long __user *uaddr =
  363. (unsigned long __user *)(unsigned long)reg->addr;
  364. unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
  365. KVM_REG_SIZE_MASK |
  366. KVM_REG_RISCV_CORE);
  367. unsigned long reg_val;
  368. if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
  369. return -EINVAL;
  370. if (reg_num >= sizeof(struct kvm_riscv_core) / sizeof(unsigned long))
  371. return -ENOENT;
  372. if (copy_from_user(&reg_val, uaddr, KVM_REG_SIZE(reg->id)))
  373. return -EFAULT;
  374. if (reg_num == KVM_REG_RISCV_CORE_REG(regs.pc))
  375. cntx->sepc = reg_val;
  376. else if (KVM_REG_RISCV_CORE_REG(regs.pc) < reg_num &&
  377. reg_num <= KVM_REG_RISCV_CORE_REG(regs.t6))
  378. ((unsigned long *)cntx)[reg_num] = reg_val;
  379. else if (reg_num == KVM_REG_RISCV_CORE_REG(mode)) {
  380. if (reg_val == KVM_RISCV_MODE_S)
  381. cntx->sstatus |= SR_SPP;
  382. else
  383. cntx->sstatus &= ~SR_SPP;
  384. } else
  385. return -ENOENT;
  386. return 0;
  387. }
  388. static int kvm_riscv_vcpu_general_get_csr(struct kvm_vcpu *vcpu,
  389. unsigned long reg_num,
  390. unsigned long *out_val)
  391. {
  392. struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
  393. if (reg_num >= sizeof(struct kvm_riscv_csr) / sizeof(unsigned long))
  394. return -ENOENT;
  395. if (reg_num == KVM_REG_RISCV_CSR_REG(sip)) {
  396. kvm_riscv_vcpu_flush_interrupts(vcpu);
  397. *out_val = (csr->hvip >> VSIP_TO_HVIP_SHIFT) & VSIP_VALID_MASK;
  398. *out_val |= csr->hvip & ~IRQ_LOCAL_MASK;
  399. } else
  400. *out_val = ((unsigned long *)csr)[reg_num];
  401. return 0;
  402. }
  403. static int kvm_riscv_vcpu_general_set_csr(struct kvm_vcpu *vcpu,
  404. unsigned long reg_num,
  405. unsigned long reg_val)
  406. {
  407. struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
  408. if (reg_num >= sizeof(struct kvm_riscv_csr) / sizeof(unsigned long))
  409. return -ENOENT;
  410. if (reg_num == KVM_REG_RISCV_CSR_REG(sip)) {
  411. reg_val &= VSIP_VALID_MASK;
  412. reg_val <<= VSIP_TO_HVIP_SHIFT;
  413. }
  414. ((unsigned long *)csr)[reg_num] = reg_val;
  415. if (reg_num == KVM_REG_RISCV_CSR_REG(sip))
  416. WRITE_ONCE(vcpu->arch.irqs_pending_mask[0], 0);
  417. return 0;
  418. }
  419. static inline int kvm_riscv_vcpu_smstateen_set_csr(struct kvm_vcpu *vcpu,
  420. unsigned long reg_num,
  421. unsigned long reg_val)
  422. {
  423. struct kvm_vcpu_smstateen_csr *csr = &vcpu->arch.smstateen_csr;
  424. if (reg_num >= sizeof(struct kvm_riscv_smstateen_csr) /
  425. sizeof(unsigned long))
  426. return -EINVAL;
  427. ((unsigned long *)csr)[reg_num] = reg_val;
  428. return 0;
  429. }
  430. static int kvm_riscv_vcpu_smstateen_get_csr(struct kvm_vcpu *vcpu,
  431. unsigned long reg_num,
  432. unsigned long *out_val)
  433. {
  434. struct kvm_vcpu_smstateen_csr *csr = &vcpu->arch.smstateen_csr;
  435. if (reg_num >= sizeof(struct kvm_riscv_smstateen_csr) /
  436. sizeof(unsigned long))
  437. return -EINVAL;
  438. *out_val = ((unsigned long *)csr)[reg_num];
  439. return 0;
  440. }
  441. static int kvm_riscv_vcpu_get_reg_csr(struct kvm_vcpu *vcpu,
  442. const struct kvm_one_reg *reg)
  443. {
  444. int rc;
  445. unsigned long __user *uaddr =
  446. (unsigned long __user *)(unsigned long)reg->addr;
  447. unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
  448. KVM_REG_SIZE_MASK |
  449. KVM_REG_RISCV_CSR);
  450. unsigned long reg_val, reg_subtype;
  451. if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
  452. return -EINVAL;
  453. reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK;
  454. reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK;
  455. switch (reg_subtype) {
  456. case KVM_REG_RISCV_CSR_GENERAL:
  457. rc = kvm_riscv_vcpu_general_get_csr(vcpu, reg_num, &reg_val);
  458. break;
  459. case KVM_REG_RISCV_CSR_AIA:
  460. rc = kvm_riscv_vcpu_aia_get_csr(vcpu, reg_num, &reg_val);
  461. break;
  462. case KVM_REG_RISCV_CSR_SMSTATEEN:
  463. rc = -EINVAL;
  464. if (riscv_has_extension_unlikely(RISCV_ISA_EXT_SMSTATEEN))
  465. rc = kvm_riscv_vcpu_smstateen_get_csr(vcpu, reg_num,
  466. &reg_val);
  467. break;
  468. default:
  469. rc = -ENOENT;
  470. break;
  471. }
  472. if (rc)
  473. return rc;
  474. if (copy_to_user(uaddr, &reg_val, KVM_REG_SIZE(reg->id)))
  475. return -EFAULT;
  476. return 0;
  477. }
  478. static int kvm_riscv_vcpu_set_reg_csr(struct kvm_vcpu *vcpu,
  479. const struct kvm_one_reg *reg)
  480. {
  481. int rc;
  482. unsigned long __user *uaddr =
  483. (unsigned long __user *)(unsigned long)reg->addr;
  484. unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
  485. KVM_REG_SIZE_MASK |
  486. KVM_REG_RISCV_CSR);
  487. unsigned long reg_val, reg_subtype;
  488. if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
  489. return -EINVAL;
  490. if (copy_from_user(&reg_val, uaddr, KVM_REG_SIZE(reg->id)))
  491. return -EFAULT;
  492. reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK;
  493. reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK;
  494. switch (reg_subtype) {
  495. case KVM_REG_RISCV_CSR_GENERAL:
  496. rc = kvm_riscv_vcpu_general_set_csr(vcpu, reg_num, reg_val);
  497. break;
  498. case KVM_REG_RISCV_CSR_AIA:
  499. rc = kvm_riscv_vcpu_aia_set_csr(vcpu, reg_num, reg_val);
  500. break;
  501. case KVM_REG_RISCV_CSR_SMSTATEEN:
  502. rc = -EINVAL;
  503. if (riscv_has_extension_unlikely(RISCV_ISA_EXT_SMSTATEEN))
  504. rc = kvm_riscv_vcpu_smstateen_set_csr(vcpu, reg_num,
  505. reg_val);
  506. break;
  507. default:
  508. rc = -ENOENT;
  509. break;
  510. }
  511. if (rc)
  512. return rc;
  513. return 0;
  514. }
  515. static int riscv_vcpu_get_isa_ext_single(struct kvm_vcpu *vcpu,
  516. unsigned long reg_num,
  517. unsigned long *reg_val)
  518. {
  519. unsigned long host_isa_ext;
  520. if (reg_num >= KVM_RISCV_ISA_EXT_MAX ||
  521. reg_num >= ARRAY_SIZE(kvm_isa_ext_arr))
  522. return -ENOENT;
  523. host_isa_ext = kvm_isa_ext_arr[reg_num];
  524. if (!__riscv_isa_extension_available(NULL, host_isa_ext))
  525. return -ENOENT;
  526. *reg_val = 0;
  527. if (__riscv_isa_extension_available(vcpu->arch.isa, host_isa_ext))
  528. *reg_val = 1; /* Mark the given extension as available */
  529. return 0;
  530. }
  531. static int riscv_vcpu_set_isa_ext_single(struct kvm_vcpu *vcpu,
  532. unsigned long reg_num,
  533. unsigned long reg_val)
  534. {
  535. unsigned long host_isa_ext;
  536. if (reg_num >= KVM_RISCV_ISA_EXT_MAX ||
  537. reg_num >= ARRAY_SIZE(kvm_isa_ext_arr))
  538. return -ENOENT;
  539. host_isa_ext = kvm_isa_ext_arr[reg_num];
  540. if (!__riscv_isa_extension_available(NULL, host_isa_ext))
  541. return -ENOENT;
  542. if (reg_val == test_bit(host_isa_ext, vcpu->arch.isa))
  543. return 0;
  544. if (!vcpu->arch.ran_atleast_once) {
  545. /*
  546. * All multi-letter extension and a few single letter
  547. * extension can be disabled
  548. */
  549. if (reg_val == 1 &&
  550. kvm_riscv_vcpu_isa_enable_allowed(reg_num))
  551. set_bit(host_isa_ext, vcpu->arch.isa);
  552. else if (!reg_val &&
  553. kvm_riscv_vcpu_isa_disable_allowed(reg_num))
  554. clear_bit(host_isa_ext, vcpu->arch.isa);
  555. else
  556. return -EINVAL;
  557. kvm_riscv_vcpu_fp_reset(vcpu);
  558. } else {
  559. return -EBUSY;
  560. }
  561. return 0;
  562. }
  563. static int riscv_vcpu_get_isa_ext_multi(struct kvm_vcpu *vcpu,
  564. unsigned long reg_num,
  565. unsigned long *reg_val)
  566. {
  567. unsigned long i, ext_id, ext_val;
  568. if (reg_num > KVM_REG_RISCV_ISA_MULTI_REG_LAST)
  569. return -ENOENT;
  570. for (i = 0; i < BITS_PER_LONG; i++) {
  571. ext_id = i + reg_num * BITS_PER_LONG;
  572. if (ext_id >= KVM_RISCV_ISA_EXT_MAX)
  573. break;
  574. ext_val = 0;
  575. riscv_vcpu_get_isa_ext_single(vcpu, ext_id, &ext_val);
  576. if (ext_val)
  577. *reg_val |= KVM_REG_RISCV_ISA_MULTI_MASK(ext_id);
  578. }
  579. return 0;
  580. }
  581. static int riscv_vcpu_set_isa_ext_multi(struct kvm_vcpu *vcpu,
  582. unsigned long reg_num,
  583. unsigned long reg_val, bool enable)
  584. {
  585. unsigned long i, ext_id;
  586. if (reg_num > KVM_REG_RISCV_ISA_MULTI_REG_LAST)
  587. return -ENOENT;
  588. for_each_set_bit(i, &reg_val, BITS_PER_LONG) {
  589. ext_id = i + reg_num * BITS_PER_LONG;
  590. if (ext_id >= KVM_RISCV_ISA_EXT_MAX)
  591. break;
  592. riscv_vcpu_set_isa_ext_single(vcpu, ext_id, enable);
  593. }
  594. return 0;
  595. }
  596. static int kvm_riscv_vcpu_get_reg_isa_ext(struct kvm_vcpu *vcpu,
  597. const struct kvm_one_reg *reg)
  598. {
  599. int rc;
  600. unsigned long __user *uaddr =
  601. (unsigned long __user *)(unsigned long)reg->addr;
  602. unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
  603. KVM_REG_SIZE_MASK |
  604. KVM_REG_RISCV_ISA_EXT);
  605. unsigned long reg_val, reg_subtype;
  606. if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
  607. return -EINVAL;
  608. reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK;
  609. reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK;
  610. reg_val = 0;
  611. switch (reg_subtype) {
  612. case KVM_REG_RISCV_ISA_SINGLE:
  613. rc = riscv_vcpu_get_isa_ext_single(vcpu, reg_num, &reg_val);
  614. break;
  615. case KVM_REG_RISCV_ISA_MULTI_EN:
  616. case KVM_REG_RISCV_ISA_MULTI_DIS:
  617. rc = riscv_vcpu_get_isa_ext_multi(vcpu, reg_num, &reg_val);
  618. if (!rc && reg_subtype == KVM_REG_RISCV_ISA_MULTI_DIS)
  619. reg_val = ~reg_val;
  620. break;
  621. default:
  622. rc = -ENOENT;
  623. }
  624. if (rc)
  625. return rc;
  626. if (copy_to_user(uaddr, &reg_val, KVM_REG_SIZE(reg->id)))
  627. return -EFAULT;
  628. return 0;
  629. }
  630. static int kvm_riscv_vcpu_set_reg_isa_ext(struct kvm_vcpu *vcpu,
  631. const struct kvm_one_reg *reg)
  632. {
  633. unsigned long __user *uaddr =
  634. (unsigned long __user *)(unsigned long)reg->addr;
  635. unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
  636. KVM_REG_SIZE_MASK |
  637. KVM_REG_RISCV_ISA_EXT);
  638. unsigned long reg_val, reg_subtype;
  639. if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
  640. return -EINVAL;
  641. reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK;
  642. reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK;
  643. if (copy_from_user(&reg_val, uaddr, KVM_REG_SIZE(reg->id)))
  644. return -EFAULT;
  645. switch (reg_subtype) {
  646. case KVM_REG_RISCV_ISA_SINGLE:
  647. return riscv_vcpu_set_isa_ext_single(vcpu, reg_num, reg_val);
  648. case KVM_REG_RISCV_ISA_MULTI_EN:
  649. return riscv_vcpu_set_isa_ext_multi(vcpu, reg_num, reg_val, true);
  650. case KVM_REG_RISCV_ISA_MULTI_DIS:
  651. return riscv_vcpu_set_isa_ext_multi(vcpu, reg_num, reg_val, false);
  652. default:
  653. return -ENOENT;
  654. }
  655. return 0;
  656. }
  657. static int copy_config_reg_indices(const struct kvm_vcpu *vcpu,
  658. u64 __user *uindices)
  659. {
  660. int n = 0;
  661. for (int i = 0; i < sizeof(struct kvm_riscv_config)/sizeof(unsigned long);
  662. i++) {
  663. u64 size;
  664. u64 reg;
  665. /*
  666. * Avoid reporting config reg if the corresponding extension
  667. * was not available.
  668. */
  669. if (i == KVM_REG_RISCV_CONFIG_REG(zicbom_block_size) &&
  670. !riscv_isa_extension_available(vcpu->arch.isa, ZICBOM))
  671. continue;
  672. else if (i == KVM_REG_RISCV_CONFIG_REG(zicboz_block_size) &&
  673. !riscv_isa_extension_available(vcpu->arch.isa, ZICBOZ))
  674. continue;
  675. size = IS_ENABLED(CONFIG_32BIT) ? KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
  676. reg = KVM_REG_RISCV | size | KVM_REG_RISCV_CONFIG | i;
  677. if (uindices) {
  678. if (put_user(reg, uindices))
  679. return -EFAULT;
  680. uindices++;
  681. }
  682. n++;
  683. }
  684. return n;
  685. }
  686. static unsigned long num_config_regs(const struct kvm_vcpu *vcpu)
  687. {
  688. return copy_config_reg_indices(vcpu, NULL);
  689. }
  690. static inline unsigned long num_core_regs(void)
  691. {
  692. return sizeof(struct kvm_riscv_core) / sizeof(unsigned long);
  693. }
  694. static int copy_core_reg_indices(u64 __user *uindices)
  695. {
  696. int n = num_core_regs();
  697. for (int i = 0; i < n; i++) {
  698. u64 size = IS_ENABLED(CONFIG_32BIT) ?
  699. KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
  700. u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_CORE | i;
  701. if (uindices) {
  702. if (put_user(reg, uindices))
  703. return -EFAULT;
  704. uindices++;
  705. }
  706. }
  707. return n;
  708. }
  709. static inline unsigned long num_csr_regs(const struct kvm_vcpu *vcpu)
  710. {
  711. unsigned long n = sizeof(struct kvm_riscv_csr) / sizeof(unsigned long);
  712. if (riscv_isa_extension_available(vcpu->arch.isa, SSAIA))
  713. n += sizeof(struct kvm_riscv_aia_csr) / sizeof(unsigned long);
  714. if (riscv_isa_extension_available(vcpu->arch.isa, SMSTATEEN))
  715. n += sizeof(struct kvm_riscv_smstateen_csr) / sizeof(unsigned long);
  716. return n;
  717. }
  718. static int copy_csr_reg_indices(const struct kvm_vcpu *vcpu,
  719. u64 __user *uindices)
  720. {
  721. int n1 = sizeof(struct kvm_riscv_csr) / sizeof(unsigned long);
  722. int n2 = 0, n3 = 0;
  723. /* copy general csr regs */
  724. for (int i = 0; i < n1; i++) {
  725. u64 size = IS_ENABLED(CONFIG_32BIT) ?
  726. KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
  727. u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_CSR |
  728. KVM_REG_RISCV_CSR_GENERAL | i;
  729. if (uindices) {
  730. if (put_user(reg, uindices))
  731. return -EFAULT;
  732. uindices++;
  733. }
  734. }
  735. /* copy AIA csr regs */
  736. if (riscv_isa_extension_available(vcpu->arch.isa, SSAIA)) {
  737. n2 = sizeof(struct kvm_riscv_aia_csr) / sizeof(unsigned long);
  738. for (int i = 0; i < n2; i++) {
  739. u64 size = IS_ENABLED(CONFIG_32BIT) ?
  740. KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
  741. u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_CSR |
  742. KVM_REG_RISCV_CSR_AIA | i;
  743. if (uindices) {
  744. if (put_user(reg, uindices))
  745. return -EFAULT;
  746. uindices++;
  747. }
  748. }
  749. }
  750. /* copy Smstateen csr regs */
  751. if (riscv_isa_extension_available(vcpu->arch.isa, SMSTATEEN)) {
  752. n3 = sizeof(struct kvm_riscv_smstateen_csr) / sizeof(unsigned long);
  753. for (int i = 0; i < n3; i++) {
  754. u64 size = IS_ENABLED(CONFIG_32BIT) ?
  755. KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
  756. u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_CSR |
  757. KVM_REG_RISCV_CSR_SMSTATEEN | i;
  758. if (uindices) {
  759. if (put_user(reg, uindices))
  760. return -EFAULT;
  761. uindices++;
  762. }
  763. }
  764. }
  765. return n1 + n2 + n3;
  766. }
  767. static inline unsigned long num_timer_regs(void)
  768. {
  769. return sizeof(struct kvm_riscv_timer) / sizeof(u64);
  770. }
  771. static int copy_timer_reg_indices(u64 __user *uindices)
  772. {
  773. int n = num_timer_regs();
  774. for (int i = 0; i < n; i++) {
  775. u64 reg = KVM_REG_RISCV | KVM_REG_SIZE_U64 |
  776. KVM_REG_RISCV_TIMER | i;
  777. if (uindices) {
  778. if (put_user(reg, uindices))
  779. return -EFAULT;
  780. uindices++;
  781. }
  782. }
  783. return n;
  784. }
  785. static inline unsigned long num_fp_f_regs(const struct kvm_vcpu *vcpu)
  786. {
  787. const struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
  788. if (riscv_isa_extension_available(vcpu->arch.isa, f))
  789. return sizeof(cntx->fp.f) / sizeof(u32);
  790. else
  791. return 0;
  792. }
  793. static int copy_fp_f_reg_indices(const struct kvm_vcpu *vcpu,
  794. u64 __user *uindices)
  795. {
  796. int n = num_fp_f_regs(vcpu);
  797. for (int i = 0; i < n; i++) {
  798. u64 reg = KVM_REG_RISCV | KVM_REG_SIZE_U32 |
  799. KVM_REG_RISCV_FP_F | i;
  800. if (uindices) {
  801. if (put_user(reg, uindices))
  802. return -EFAULT;
  803. uindices++;
  804. }
  805. }
  806. return n;
  807. }
  808. static inline unsigned long num_fp_d_regs(const struct kvm_vcpu *vcpu)
  809. {
  810. const struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
  811. if (riscv_isa_extension_available(vcpu->arch.isa, d))
  812. return sizeof(cntx->fp.d.f) / sizeof(u64) + 1;
  813. else
  814. return 0;
  815. }
  816. static int copy_fp_d_reg_indices(const struct kvm_vcpu *vcpu,
  817. u64 __user *uindices)
  818. {
  819. int i;
  820. int n = num_fp_d_regs(vcpu);
  821. u64 reg;
  822. /* copy fp.d.f indices */
  823. for (i = 0; i < n-1; i++) {
  824. reg = KVM_REG_RISCV | KVM_REG_SIZE_U64 |
  825. KVM_REG_RISCV_FP_D | i;
  826. if (uindices) {
  827. if (put_user(reg, uindices))
  828. return -EFAULT;
  829. uindices++;
  830. }
  831. }
  832. /* copy fp.d.fcsr indices */
  833. reg = KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_D | i;
  834. if (uindices) {
  835. if (put_user(reg, uindices))
  836. return -EFAULT;
  837. uindices++;
  838. }
  839. return n;
  840. }
  841. static int copy_isa_ext_reg_indices(const struct kvm_vcpu *vcpu,
  842. u64 __user *uindices)
  843. {
  844. unsigned int n = 0;
  845. unsigned long isa_ext;
  846. for (int i = 0; i < KVM_RISCV_ISA_EXT_MAX; i++) {
  847. u64 size = IS_ENABLED(CONFIG_32BIT) ?
  848. KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
  849. u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_ISA_EXT | i;
  850. isa_ext = kvm_isa_ext_arr[i];
  851. if (!__riscv_isa_extension_available(NULL, isa_ext))
  852. continue;
  853. if (uindices) {
  854. if (put_user(reg, uindices))
  855. return -EFAULT;
  856. uindices++;
  857. }
  858. n++;
  859. }
  860. return n;
  861. }
  862. static inline unsigned long num_isa_ext_regs(const struct kvm_vcpu *vcpu)
  863. {
  864. return copy_isa_ext_reg_indices(vcpu, NULL);
  865. }
  866. static int copy_sbi_ext_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
  867. {
  868. unsigned int n = 0;
  869. for (int i = 0; i < KVM_RISCV_SBI_EXT_MAX; i++) {
  870. u64 size = IS_ENABLED(CONFIG_32BIT) ?
  871. KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
  872. u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_SBI_EXT |
  873. KVM_REG_RISCV_SBI_SINGLE | i;
  874. if (!riscv_vcpu_supports_sbi_ext(vcpu, i))
  875. continue;
  876. if (uindices) {
  877. if (put_user(reg, uindices))
  878. return -EFAULT;
  879. uindices++;
  880. }
  881. n++;
  882. }
  883. return n;
  884. }
  885. static unsigned long num_sbi_ext_regs(struct kvm_vcpu *vcpu)
  886. {
  887. return copy_sbi_ext_reg_indices(vcpu, NULL);
  888. }
  889. static int copy_sbi_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
  890. {
  891. struct kvm_vcpu_sbi_context *scontext = &vcpu->arch.sbi_context;
  892. int total = 0;
  893. if (scontext->ext_status[KVM_RISCV_SBI_EXT_STA] == KVM_RISCV_SBI_EXT_STATUS_ENABLED) {
  894. u64 size = IS_ENABLED(CONFIG_32BIT) ? KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
  895. int n = sizeof(struct kvm_riscv_sbi_sta) / sizeof(unsigned long);
  896. for (int i = 0; i < n; i++) {
  897. u64 reg = KVM_REG_RISCV | size |
  898. KVM_REG_RISCV_SBI_STATE |
  899. KVM_REG_RISCV_SBI_STA | i;
  900. if (uindices) {
  901. if (put_user(reg, uindices))
  902. return -EFAULT;
  903. uindices++;
  904. }
  905. }
  906. total += n;
  907. }
  908. return total;
  909. }
  910. static inline unsigned long num_sbi_regs(struct kvm_vcpu *vcpu)
  911. {
  912. return copy_sbi_reg_indices(vcpu, NULL);
  913. }
  914. static inline unsigned long num_vector_regs(const struct kvm_vcpu *vcpu)
  915. {
  916. if (!riscv_isa_extension_available(vcpu->arch.isa, v))
  917. return 0;
  918. /* vstart, vl, vtype, vcsr, vlenb and 32 vector regs */
  919. return 37;
  920. }
  921. static int copy_vector_reg_indices(const struct kvm_vcpu *vcpu,
  922. u64 __user *uindices)
  923. {
  924. const struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
  925. int n = num_vector_regs(vcpu);
  926. u64 reg, size;
  927. int i;
  928. if (n == 0)
  929. return 0;
  930. /* copy vstart, vl, vtype, vcsr and vlenb */
  931. size = IS_ENABLED(CONFIG_32BIT) ? KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
  932. for (i = 0; i < 5; i++) {
  933. reg = KVM_REG_RISCV | size | KVM_REG_RISCV_VECTOR | i;
  934. if (uindices) {
  935. if (put_user(reg, uindices))
  936. return -EFAULT;
  937. uindices++;
  938. }
  939. }
  940. /* vector_regs have a variable 'vlenb' size */
  941. size = __builtin_ctzl(cntx->vector.vlenb);
  942. size <<= KVM_REG_SIZE_SHIFT;
  943. for (i = 0; i < 32; i++) {
  944. reg = KVM_REG_RISCV | KVM_REG_RISCV_VECTOR | size |
  945. KVM_REG_RISCV_VECTOR_REG(i);
  946. if (uindices) {
  947. if (put_user(reg, uindices))
  948. return -EFAULT;
  949. uindices++;
  950. }
  951. }
  952. return n;
  953. }
  954. /*
  955. * kvm_riscv_vcpu_num_regs - how many registers do we present via KVM_GET/SET_ONE_REG
  956. *
  957. * This is for all registers.
  958. */
  959. unsigned long kvm_riscv_vcpu_num_regs(struct kvm_vcpu *vcpu)
  960. {
  961. unsigned long res = 0;
  962. res += num_config_regs(vcpu);
  963. res += num_core_regs();
  964. res += num_csr_regs(vcpu);
  965. res += num_timer_regs();
  966. res += num_fp_f_regs(vcpu);
  967. res += num_fp_d_regs(vcpu);
  968. res += num_vector_regs(vcpu);
  969. res += num_isa_ext_regs(vcpu);
  970. res += num_sbi_ext_regs(vcpu);
  971. res += num_sbi_regs(vcpu);
  972. return res;
  973. }
  974. /*
  975. * kvm_riscv_vcpu_copy_reg_indices - get indices of all registers.
  976. */
  977. int kvm_riscv_vcpu_copy_reg_indices(struct kvm_vcpu *vcpu,
  978. u64 __user *uindices)
  979. {
  980. int ret;
  981. ret = copy_config_reg_indices(vcpu, uindices);
  982. if (ret < 0)
  983. return ret;
  984. uindices += ret;
  985. ret = copy_core_reg_indices(uindices);
  986. if (ret < 0)
  987. return ret;
  988. uindices += ret;
  989. ret = copy_csr_reg_indices(vcpu, uindices);
  990. if (ret < 0)
  991. return ret;
  992. uindices += ret;
  993. ret = copy_timer_reg_indices(uindices);
  994. if (ret < 0)
  995. return ret;
  996. uindices += ret;
  997. ret = copy_fp_f_reg_indices(vcpu, uindices);
  998. if (ret < 0)
  999. return ret;
  1000. uindices += ret;
  1001. ret = copy_fp_d_reg_indices(vcpu, uindices);
  1002. if (ret < 0)
  1003. return ret;
  1004. uindices += ret;
  1005. ret = copy_vector_reg_indices(vcpu, uindices);
  1006. if (ret < 0)
  1007. return ret;
  1008. uindices += ret;
  1009. ret = copy_isa_ext_reg_indices(vcpu, uindices);
  1010. if (ret < 0)
  1011. return ret;
  1012. uindices += ret;
  1013. ret = copy_sbi_ext_reg_indices(vcpu, uindices);
  1014. if (ret < 0)
  1015. return ret;
  1016. uindices += ret;
  1017. ret = copy_sbi_reg_indices(vcpu, uindices);
  1018. if (ret < 0)
  1019. return ret;
  1020. uindices += ret;
  1021. return 0;
  1022. }
  1023. int kvm_riscv_vcpu_set_reg(struct kvm_vcpu *vcpu,
  1024. const struct kvm_one_reg *reg)
  1025. {
  1026. switch (reg->id & KVM_REG_RISCV_TYPE_MASK) {
  1027. case KVM_REG_RISCV_CONFIG:
  1028. return kvm_riscv_vcpu_set_reg_config(vcpu, reg);
  1029. case KVM_REG_RISCV_CORE:
  1030. return kvm_riscv_vcpu_set_reg_core(vcpu, reg);
  1031. case KVM_REG_RISCV_CSR:
  1032. return kvm_riscv_vcpu_set_reg_csr(vcpu, reg);
  1033. case KVM_REG_RISCV_TIMER:
  1034. return kvm_riscv_vcpu_set_reg_timer(vcpu, reg);
  1035. case KVM_REG_RISCV_FP_F:
  1036. return kvm_riscv_vcpu_set_reg_fp(vcpu, reg,
  1037. KVM_REG_RISCV_FP_F);
  1038. case KVM_REG_RISCV_FP_D:
  1039. return kvm_riscv_vcpu_set_reg_fp(vcpu, reg,
  1040. KVM_REG_RISCV_FP_D);
  1041. case KVM_REG_RISCV_VECTOR:
  1042. return kvm_riscv_vcpu_set_reg_vector(vcpu, reg);
  1043. case KVM_REG_RISCV_ISA_EXT:
  1044. return kvm_riscv_vcpu_set_reg_isa_ext(vcpu, reg);
  1045. case KVM_REG_RISCV_SBI_EXT:
  1046. return kvm_riscv_vcpu_set_reg_sbi_ext(vcpu, reg);
  1047. case KVM_REG_RISCV_SBI_STATE:
  1048. return kvm_riscv_vcpu_set_reg_sbi(vcpu, reg);
  1049. default:
  1050. break;
  1051. }
  1052. return -ENOENT;
  1053. }
  1054. int kvm_riscv_vcpu_get_reg(struct kvm_vcpu *vcpu,
  1055. const struct kvm_one_reg *reg)
  1056. {
  1057. switch (reg->id & KVM_REG_RISCV_TYPE_MASK) {
  1058. case KVM_REG_RISCV_CONFIG:
  1059. return kvm_riscv_vcpu_get_reg_config(vcpu, reg);
  1060. case KVM_REG_RISCV_CORE:
  1061. return kvm_riscv_vcpu_get_reg_core(vcpu, reg);
  1062. case KVM_REG_RISCV_CSR:
  1063. return kvm_riscv_vcpu_get_reg_csr(vcpu, reg);
  1064. case KVM_REG_RISCV_TIMER:
  1065. return kvm_riscv_vcpu_get_reg_timer(vcpu, reg);
  1066. case KVM_REG_RISCV_FP_F:
  1067. return kvm_riscv_vcpu_get_reg_fp(vcpu, reg,
  1068. KVM_REG_RISCV_FP_F);
  1069. case KVM_REG_RISCV_FP_D:
  1070. return kvm_riscv_vcpu_get_reg_fp(vcpu, reg,
  1071. KVM_REG_RISCV_FP_D);
  1072. case KVM_REG_RISCV_VECTOR:
  1073. return kvm_riscv_vcpu_get_reg_vector(vcpu, reg);
  1074. case KVM_REG_RISCV_ISA_EXT:
  1075. return kvm_riscv_vcpu_get_reg_isa_ext(vcpu, reg);
  1076. case KVM_REG_RISCV_SBI_EXT:
  1077. return kvm_riscv_vcpu_get_reg_sbi_ext(vcpu, reg);
  1078. case KVM_REG_RISCV_SBI_STATE:
  1079. return kvm_riscv_vcpu_get_reg_sbi(vcpu, reg);
  1080. default:
  1081. break;
  1082. }
  1083. return -ENOENT;
  1084. }