book3s_hv_nestedv2.c 27 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright 2023 Jordan Niethe, IBM Corp. <jniethe5@gmail.com>
  4. *
  5. * Authors:
  6. * Jordan Niethe <jniethe5@gmail.com>
  7. *
  8. * Description: KVM functions specific to running on Book 3S
  9. * processors as a NESTEDv2 guest.
  10. *
  11. */
  12. #include "linux/blk-mq.h"
  13. #include "linux/console.h"
  14. #include "linux/gfp_types.h"
  15. #include "linux/signal.h"
  16. #include <linux/kernel.h>
  17. #include <linux/kvm_host.h>
  18. #include <linux/pgtable.h>
  19. #include <asm/kvm_ppc.h>
  20. #include <asm/kvm_book3s.h>
  21. #include <asm/hvcall.h>
  22. #include <asm/pgalloc.h>
  23. #include <asm/reg.h>
  24. #include <asm/plpar_wrappers.h>
  25. #include <asm/guest-state-buffer.h>
  26. #include "trace_hv.h"
  27. struct static_key_false __kvmhv_is_nestedv2 __read_mostly;
  28. EXPORT_SYMBOL_GPL(__kvmhv_is_nestedv2);
  29. static size_t
  30. gs_msg_ops_kvmhv_nestedv2_config_get_size(struct kvmppc_gs_msg *gsm)
  31. {
  32. u16 ids[] = {
  33. KVMPPC_GSID_RUN_OUTPUT_MIN_SIZE,
  34. KVMPPC_GSID_RUN_INPUT,
  35. KVMPPC_GSID_RUN_OUTPUT,
  36. };
  37. size_t size = 0;
  38. for (int i = 0; i < ARRAY_SIZE(ids); i++)
  39. size += kvmppc_gse_total_size(kvmppc_gsid_size(ids[i]));
  40. return size;
  41. }
  42. static int
  43. gs_msg_ops_kvmhv_nestedv2_config_fill_info(struct kvmppc_gs_buff *gsb,
  44. struct kvmppc_gs_msg *gsm)
  45. {
  46. struct kvmhv_nestedv2_config *cfg;
  47. int rc;
  48. cfg = gsm->data;
  49. if (kvmppc_gsm_includes(gsm, KVMPPC_GSID_RUN_OUTPUT_MIN_SIZE)) {
  50. rc = kvmppc_gse_put_u64(gsb, KVMPPC_GSID_RUN_OUTPUT_MIN_SIZE,
  51. cfg->vcpu_run_output_size);
  52. if (rc < 0)
  53. return rc;
  54. }
  55. if (kvmppc_gsm_includes(gsm, KVMPPC_GSID_RUN_INPUT)) {
  56. rc = kvmppc_gse_put_buff_info(gsb, KVMPPC_GSID_RUN_INPUT,
  57. cfg->vcpu_run_input_cfg);
  58. if (rc < 0)
  59. return rc;
  60. }
  61. if (kvmppc_gsm_includes(gsm, KVMPPC_GSID_RUN_OUTPUT)) {
  62. rc = kvmppc_gse_put_buff_info(gsb, KVMPPC_GSID_RUN_OUTPUT,
  63. cfg->vcpu_run_output_cfg);
  64. if (rc < 0)
  65. return rc;
  66. }
  67. return 0;
  68. }
  69. static int
  70. gs_msg_ops_kvmhv_nestedv2_config_refresh_info(struct kvmppc_gs_msg *gsm,
  71. struct kvmppc_gs_buff *gsb)
  72. {
  73. struct kvmhv_nestedv2_config *cfg;
  74. struct kvmppc_gs_parser gsp = { 0 };
  75. struct kvmppc_gs_elem *gse;
  76. int rc;
  77. cfg = gsm->data;
  78. rc = kvmppc_gse_parse(&gsp, gsb);
  79. if (rc < 0)
  80. return rc;
  81. gse = kvmppc_gsp_lookup(&gsp, KVMPPC_GSID_RUN_OUTPUT_MIN_SIZE);
  82. if (gse)
  83. cfg->vcpu_run_output_size = kvmppc_gse_get_u64(gse);
  84. return 0;
  85. }
  86. static struct kvmppc_gs_msg_ops config_msg_ops = {
  87. .get_size = gs_msg_ops_kvmhv_nestedv2_config_get_size,
  88. .fill_info = gs_msg_ops_kvmhv_nestedv2_config_fill_info,
  89. .refresh_info = gs_msg_ops_kvmhv_nestedv2_config_refresh_info,
  90. };
  91. static size_t gs_msg_ops_vcpu_get_size(struct kvmppc_gs_msg *gsm)
  92. {
  93. struct kvmppc_gs_bitmap gsbm = { 0 };
  94. size_t size = 0;
  95. u16 iden;
  96. kvmppc_gsbm_fill(&gsbm);
  97. kvmppc_gsbm_for_each(&gsbm, iden)
  98. {
  99. switch (iden) {
  100. case KVMPPC_GSID_HOST_STATE_SIZE:
  101. case KVMPPC_GSID_RUN_OUTPUT_MIN_SIZE:
  102. case KVMPPC_GSID_PARTITION_TABLE:
  103. case KVMPPC_GSID_PROCESS_TABLE:
  104. case KVMPPC_GSID_RUN_INPUT:
  105. case KVMPPC_GSID_RUN_OUTPUT:
  106. break;
  107. default:
  108. size += kvmppc_gse_total_size(kvmppc_gsid_size(iden));
  109. }
  110. }
  111. return size;
  112. }
  113. static int gs_msg_ops_vcpu_fill_info(struct kvmppc_gs_buff *gsb,
  114. struct kvmppc_gs_msg *gsm)
  115. {
  116. struct kvm_vcpu *vcpu;
  117. vector128 v;
  118. int rc, i;
  119. u16 iden;
  120. u32 arch_compat = 0;
  121. vcpu = gsm->data;
  122. kvmppc_gsm_for_each(gsm, iden)
  123. {
  124. rc = 0;
  125. if ((gsm->flags & KVMPPC_GS_FLAGS_WIDE) !=
  126. (kvmppc_gsid_flags(iden) & KVMPPC_GS_FLAGS_WIDE))
  127. continue;
  128. switch (iden) {
  129. case KVMPPC_GSID_DSCR:
  130. rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.dscr);
  131. break;
  132. case KVMPPC_GSID_MMCRA:
  133. rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.mmcra);
  134. break;
  135. case KVMPPC_GSID_HFSCR:
  136. rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.hfscr);
  137. break;
  138. case KVMPPC_GSID_PURR:
  139. rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.purr);
  140. break;
  141. case KVMPPC_GSID_SPURR:
  142. rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.spurr);
  143. break;
  144. case KVMPPC_GSID_AMR:
  145. rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.amr);
  146. break;
  147. case KVMPPC_GSID_UAMOR:
  148. rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.uamor);
  149. break;
  150. case KVMPPC_GSID_SIAR:
  151. rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.siar);
  152. break;
  153. case KVMPPC_GSID_SDAR:
  154. rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.sdar);
  155. break;
  156. case KVMPPC_GSID_IAMR:
  157. rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.iamr);
  158. break;
  159. case KVMPPC_GSID_DAWR0:
  160. rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.dawr0);
  161. break;
  162. case KVMPPC_GSID_DAWR1:
  163. rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.dawr1);
  164. break;
  165. case KVMPPC_GSID_DAWRX0:
  166. rc = kvmppc_gse_put_u32(gsb, iden, vcpu->arch.dawrx0);
  167. break;
  168. case KVMPPC_GSID_DAWRX1:
  169. rc = kvmppc_gse_put_u32(gsb, iden, vcpu->arch.dawrx1);
  170. break;
  171. case KVMPPC_GSID_DEXCR:
  172. rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.dexcr);
  173. break;
  174. case KVMPPC_GSID_HASHKEYR:
  175. rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.hashkeyr);
  176. break;
  177. case KVMPPC_GSID_HASHPKEYR:
  178. rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.hashpkeyr);
  179. break;
  180. case KVMPPC_GSID_CIABR:
  181. rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.ciabr);
  182. break;
  183. case KVMPPC_GSID_WORT:
  184. rc = kvmppc_gse_put_u32(gsb, iden, vcpu->arch.wort);
  185. break;
  186. case KVMPPC_GSID_PPR:
  187. rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.ppr);
  188. break;
  189. case KVMPPC_GSID_PSPB:
  190. rc = kvmppc_gse_put_u32(gsb, iden, vcpu->arch.pspb);
  191. break;
  192. case KVMPPC_GSID_TAR:
  193. rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.tar);
  194. break;
  195. case KVMPPC_GSID_FSCR:
  196. rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.fscr);
  197. break;
  198. case KVMPPC_GSID_EBBHR:
  199. rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.ebbhr);
  200. break;
  201. case KVMPPC_GSID_EBBRR:
  202. rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.ebbrr);
  203. break;
  204. case KVMPPC_GSID_BESCR:
  205. rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.bescr);
  206. break;
  207. case KVMPPC_GSID_IC:
  208. rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.ic);
  209. break;
  210. case KVMPPC_GSID_CTRL:
  211. rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.ctrl);
  212. break;
  213. case KVMPPC_GSID_PIDR:
  214. rc = kvmppc_gse_put_u32(gsb, iden, vcpu->arch.pid);
  215. break;
  216. case KVMPPC_GSID_AMOR: {
  217. u64 amor = ~0;
  218. rc = kvmppc_gse_put_u64(gsb, iden, amor);
  219. break;
  220. }
  221. case KVMPPC_GSID_VRSAVE:
  222. rc = kvmppc_gse_put_u32(gsb, iden, vcpu->arch.vrsave);
  223. break;
  224. case KVMPPC_GSID_MMCR(0)... KVMPPC_GSID_MMCR(3):
  225. i = iden - KVMPPC_GSID_MMCR(0);
  226. rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.mmcr[i]);
  227. break;
  228. case KVMPPC_GSID_SIER(0)... KVMPPC_GSID_SIER(2):
  229. i = iden - KVMPPC_GSID_SIER(0);
  230. rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.sier[i]);
  231. break;
  232. case KVMPPC_GSID_PMC(0)... KVMPPC_GSID_PMC(5):
  233. i = iden - KVMPPC_GSID_PMC(0);
  234. rc = kvmppc_gse_put_u32(gsb, iden, vcpu->arch.pmc[i]);
  235. break;
  236. case KVMPPC_GSID_GPR(0)... KVMPPC_GSID_GPR(31):
  237. i = iden - KVMPPC_GSID_GPR(0);
  238. rc = kvmppc_gse_put_u64(gsb, iden,
  239. vcpu->arch.regs.gpr[i]);
  240. break;
  241. case KVMPPC_GSID_CR:
  242. rc = kvmppc_gse_put_u32(gsb, iden, vcpu->arch.regs.ccr);
  243. break;
  244. case KVMPPC_GSID_XER:
  245. rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.regs.xer);
  246. break;
  247. case KVMPPC_GSID_CTR:
  248. rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.regs.ctr);
  249. break;
  250. case KVMPPC_GSID_LR:
  251. rc = kvmppc_gse_put_u64(gsb, iden,
  252. vcpu->arch.regs.link);
  253. break;
  254. case KVMPPC_GSID_NIA:
  255. rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.regs.nip);
  256. break;
  257. case KVMPPC_GSID_SRR0:
  258. rc = kvmppc_gse_put_u64(gsb, iden,
  259. vcpu->arch.shregs.srr0);
  260. break;
  261. case KVMPPC_GSID_SRR1:
  262. rc = kvmppc_gse_put_u64(gsb, iden,
  263. vcpu->arch.shregs.srr1);
  264. break;
  265. case KVMPPC_GSID_SPRG0:
  266. rc = kvmppc_gse_put_u64(gsb, iden,
  267. vcpu->arch.shregs.sprg0);
  268. break;
  269. case KVMPPC_GSID_SPRG1:
  270. rc = kvmppc_gse_put_u64(gsb, iden,
  271. vcpu->arch.shregs.sprg1);
  272. break;
  273. case KVMPPC_GSID_SPRG2:
  274. rc = kvmppc_gse_put_u64(gsb, iden,
  275. vcpu->arch.shregs.sprg2);
  276. break;
  277. case KVMPPC_GSID_SPRG3:
  278. rc = kvmppc_gse_put_u64(gsb, iden,
  279. vcpu->arch.shregs.sprg3);
  280. break;
  281. case KVMPPC_GSID_DAR:
  282. rc = kvmppc_gse_put_u64(gsb, iden,
  283. vcpu->arch.shregs.dar);
  284. break;
  285. case KVMPPC_GSID_DSISR:
  286. rc = kvmppc_gse_put_u32(gsb, iden,
  287. vcpu->arch.shregs.dsisr);
  288. break;
  289. case KVMPPC_GSID_MSR:
  290. rc = kvmppc_gse_put_u64(gsb, iden,
  291. vcpu->arch.shregs.msr);
  292. break;
  293. case KVMPPC_GSID_VTB:
  294. rc = kvmppc_gse_put_u64(gsb, iden,
  295. vcpu->arch.vcore->vtb);
  296. break;
  297. case KVMPPC_GSID_DPDES:
  298. rc = kvmppc_gse_put_u64(gsb, iden,
  299. vcpu->arch.vcore->dpdes);
  300. break;
  301. case KVMPPC_GSID_LPCR:
  302. rc = kvmppc_gse_put_u64(gsb, iden,
  303. vcpu->arch.vcore->lpcr);
  304. break;
  305. case KVMPPC_GSID_TB_OFFSET:
  306. rc = kvmppc_gse_put_u64(gsb, iden,
  307. vcpu->arch.vcore->tb_offset);
  308. break;
  309. case KVMPPC_GSID_FPSCR:
  310. rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.fp.fpscr);
  311. break;
  312. case KVMPPC_GSID_VSRS(0)... KVMPPC_GSID_VSRS(31):
  313. i = iden - KVMPPC_GSID_VSRS(0);
  314. memcpy(&v, &vcpu->arch.fp.fpr[i],
  315. sizeof(vcpu->arch.fp.fpr[i]));
  316. rc = kvmppc_gse_put_vector128(gsb, iden, &v);
  317. break;
  318. #ifdef CONFIG_VSX
  319. case KVMPPC_GSID_VSCR:
  320. rc = kvmppc_gse_put_u32(gsb, iden,
  321. vcpu->arch.vr.vscr.u[3]);
  322. break;
  323. case KVMPPC_GSID_VSRS(32)... KVMPPC_GSID_VSRS(63):
  324. i = iden - KVMPPC_GSID_VSRS(32);
  325. rc = kvmppc_gse_put_vector128(gsb, iden,
  326. &vcpu->arch.vr.vr[i]);
  327. break;
  328. #endif
  329. case KVMPPC_GSID_DEC_EXPIRY_TB: {
  330. u64 dw;
  331. dw = vcpu->arch.dec_expires -
  332. vcpu->arch.vcore->tb_offset;
  333. rc = kvmppc_gse_put_u64(gsb, iden, dw);
  334. break;
  335. }
  336. case KVMPPC_GSID_LOGICAL_PVR:
  337. /*
  338. * Though 'arch_compat == 0' would mean the default
  339. * compatibility, arch_compat, being a Guest Wide
  340. * Element, cannot be filled with a value of 0 in GSB
  341. * as this would result into a kernel trap.
  342. * Hence, when `arch_compat == 0`, arch_compat should
  343. * default to L1's PVR.
  344. */
  345. if (!vcpu->arch.vcore->arch_compat) {
  346. if (cpu_has_feature(CPU_FTR_ARCH_31))
  347. arch_compat = PVR_ARCH_31;
  348. else if (cpu_has_feature(CPU_FTR_ARCH_300))
  349. arch_compat = PVR_ARCH_300;
  350. } else {
  351. arch_compat = vcpu->arch.vcore->arch_compat;
  352. }
  353. rc = kvmppc_gse_put_u32(gsb, iden, arch_compat);
  354. break;
  355. }
  356. if (rc < 0)
  357. return rc;
  358. }
  359. return 0;
  360. }
  361. static int gs_msg_ops_vcpu_refresh_info(struct kvmppc_gs_msg *gsm,
  362. struct kvmppc_gs_buff *gsb)
  363. {
  364. struct kvmppc_gs_parser gsp = { 0 };
  365. struct kvmhv_nestedv2_io *io;
  366. struct kvmppc_gs_bitmap *valids;
  367. struct kvm_vcpu *vcpu;
  368. struct kvmppc_gs_elem *gse;
  369. vector128 v;
  370. int rc, i;
  371. u16 iden;
  372. vcpu = gsm->data;
  373. rc = kvmppc_gse_parse(&gsp, gsb);
  374. if (rc < 0)
  375. return rc;
  376. io = &vcpu->arch.nestedv2_io;
  377. valids = &io->valids;
  378. kvmppc_gsp_for_each(&gsp, iden, gse)
  379. {
  380. switch (iden) {
  381. case KVMPPC_GSID_DSCR:
  382. vcpu->arch.dscr = kvmppc_gse_get_u64(gse);
  383. break;
  384. case KVMPPC_GSID_MMCRA:
  385. vcpu->arch.mmcra = kvmppc_gse_get_u64(gse);
  386. break;
  387. case KVMPPC_GSID_HFSCR:
  388. vcpu->arch.hfscr = kvmppc_gse_get_u64(gse);
  389. break;
  390. case KVMPPC_GSID_PURR:
  391. vcpu->arch.purr = kvmppc_gse_get_u64(gse);
  392. break;
  393. case KVMPPC_GSID_SPURR:
  394. vcpu->arch.spurr = kvmppc_gse_get_u64(gse);
  395. break;
  396. case KVMPPC_GSID_AMR:
  397. vcpu->arch.amr = kvmppc_gse_get_u64(gse);
  398. break;
  399. case KVMPPC_GSID_UAMOR:
  400. vcpu->arch.uamor = kvmppc_gse_get_u64(gse);
  401. break;
  402. case KVMPPC_GSID_SIAR:
  403. vcpu->arch.siar = kvmppc_gse_get_u64(gse);
  404. break;
  405. case KVMPPC_GSID_SDAR:
  406. vcpu->arch.sdar = kvmppc_gse_get_u64(gse);
  407. break;
  408. case KVMPPC_GSID_IAMR:
  409. vcpu->arch.iamr = kvmppc_gse_get_u64(gse);
  410. break;
  411. case KVMPPC_GSID_DAWR0:
  412. vcpu->arch.dawr0 = kvmppc_gse_get_u64(gse);
  413. break;
  414. case KVMPPC_GSID_DAWR1:
  415. vcpu->arch.dawr1 = kvmppc_gse_get_u64(gse);
  416. break;
  417. case KVMPPC_GSID_DAWRX0:
  418. vcpu->arch.dawrx0 = kvmppc_gse_get_u32(gse);
  419. break;
  420. case KVMPPC_GSID_DAWRX1:
  421. vcpu->arch.dawrx1 = kvmppc_gse_get_u32(gse);
  422. break;
  423. case KVMPPC_GSID_DEXCR:
  424. vcpu->arch.dexcr = kvmppc_gse_get_u64(gse);
  425. break;
  426. case KVMPPC_GSID_HASHKEYR:
  427. vcpu->arch.hashkeyr = kvmppc_gse_get_u64(gse);
  428. break;
  429. case KVMPPC_GSID_HASHPKEYR:
  430. vcpu->arch.hashpkeyr = kvmppc_gse_get_u64(gse);
  431. break;
  432. case KVMPPC_GSID_CIABR:
  433. vcpu->arch.ciabr = kvmppc_gse_get_u64(gse);
  434. break;
  435. case KVMPPC_GSID_WORT:
  436. vcpu->arch.wort = kvmppc_gse_get_u32(gse);
  437. break;
  438. case KVMPPC_GSID_PPR:
  439. vcpu->arch.ppr = kvmppc_gse_get_u64(gse);
  440. break;
  441. case KVMPPC_GSID_PSPB:
  442. vcpu->arch.pspb = kvmppc_gse_get_u32(gse);
  443. break;
  444. case KVMPPC_GSID_TAR:
  445. vcpu->arch.tar = kvmppc_gse_get_u64(gse);
  446. break;
  447. case KVMPPC_GSID_FSCR:
  448. vcpu->arch.fscr = kvmppc_gse_get_u64(gse);
  449. break;
  450. case KVMPPC_GSID_EBBHR:
  451. vcpu->arch.ebbhr = kvmppc_gse_get_u64(gse);
  452. break;
  453. case KVMPPC_GSID_EBBRR:
  454. vcpu->arch.ebbrr = kvmppc_gse_get_u64(gse);
  455. break;
  456. case KVMPPC_GSID_BESCR:
  457. vcpu->arch.bescr = kvmppc_gse_get_u64(gse);
  458. break;
  459. case KVMPPC_GSID_IC:
  460. vcpu->arch.ic = kvmppc_gse_get_u64(gse);
  461. break;
  462. case KVMPPC_GSID_CTRL:
  463. vcpu->arch.ctrl = kvmppc_gse_get_u64(gse);
  464. break;
  465. case KVMPPC_GSID_PIDR:
  466. vcpu->arch.pid = kvmppc_gse_get_u32(gse);
  467. break;
  468. case KVMPPC_GSID_AMOR:
  469. break;
  470. case KVMPPC_GSID_VRSAVE:
  471. vcpu->arch.vrsave = kvmppc_gse_get_u32(gse);
  472. break;
  473. case KVMPPC_GSID_MMCR(0)... KVMPPC_GSID_MMCR(3):
  474. i = iden - KVMPPC_GSID_MMCR(0);
  475. vcpu->arch.mmcr[i] = kvmppc_gse_get_u64(gse);
  476. break;
  477. case KVMPPC_GSID_SIER(0)... KVMPPC_GSID_SIER(2):
  478. i = iden - KVMPPC_GSID_SIER(0);
  479. vcpu->arch.sier[i] = kvmppc_gse_get_u64(gse);
  480. break;
  481. case KVMPPC_GSID_PMC(0)... KVMPPC_GSID_PMC(5):
  482. i = iden - KVMPPC_GSID_PMC(0);
  483. vcpu->arch.pmc[i] = kvmppc_gse_get_u32(gse);
  484. break;
  485. case KVMPPC_GSID_GPR(0)... KVMPPC_GSID_GPR(31):
  486. i = iden - KVMPPC_GSID_GPR(0);
  487. vcpu->arch.regs.gpr[i] = kvmppc_gse_get_u64(gse);
  488. break;
  489. case KVMPPC_GSID_CR:
  490. vcpu->arch.regs.ccr = kvmppc_gse_get_u32(gse);
  491. break;
  492. case KVMPPC_GSID_XER:
  493. vcpu->arch.regs.xer = kvmppc_gse_get_u64(gse);
  494. break;
  495. case KVMPPC_GSID_CTR:
  496. vcpu->arch.regs.ctr = kvmppc_gse_get_u64(gse);
  497. break;
  498. case KVMPPC_GSID_LR:
  499. vcpu->arch.regs.link = kvmppc_gse_get_u64(gse);
  500. break;
  501. case KVMPPC_GSID_NIA:
  502. vcpu->arch.regs.nip = kvmppc_gse_get_u64(gse);
  503. break;
  504. case KVMPPC_GSID_SRR0:
  505. vcpu->arch.shregs.srr0 = kvmppc_gse_get_u64(gse);
  506. break;
  507. case KVMPPC_GSID_SRR1:
  508. vcpu->arch.shregs.srr1 = kvmppc_gse_get_u64(gse);
  509. break;
  510. case KVMPPC_GSID_SPRG0:
  511. vcpu->arch.shregs.sprg0 = kvmppc_gse_get_u64(gse);
  512. break;
  513. case KVMPPC_GSID_SPRG1:
  514. vcpu->arch.shregs.sprg1 = kvmppc_gse_get_u64(gse);
  515. break;
  516. case KVMPPC_GSID_SPRG2:
  517. vcpu->arch.shregs.sprg2 = kvmppc_gse_get_u64(gse);
  518. break;
  519. case KVMPPC_GSID_SPRG3:
  520. vcpu->arch.shregs.sprg3 = kvmppc_gse_get_u64(gse);
  521. break;
  522. case KVMPPC_GSID_DAR:
  523. vcpu->arch.shregs.dar = kvmppc_gse_get_u64(gse);
  524. break;
  525. case KVMPPC_GSID_DSISR:
  526. vcpu->arch.shregs.dsisr = kvmppc_gse_get_u32(gse);
  527. break;
  528. case KVMPPC_GSID_MSR:
  529. vcpu->arch.shregs.msr = kvmppc_gse_get_u64(gse);
  530. break;
  531. case KVMPPC_GSID_VTB:
  532. vcpu->arch.vcore->vtb = kvmppc_gse_get_u64(gse);
  533. break;
  534. case KVMPPC_GSID_DPDES:
  535. vcpu->arch.vcore->dpdes = kvmppc_gse_get_u64(gse);
  536. break;
  537. case KVMPPC_GSID_LPCR:
  538. vcpu->arch.vcore->lpcr = kvmppc_gse_get_u64(gse);
  539. break;
  540. case KVMPPC_GSID_TB_OFFSET:
  541. vcpu->arch.vcore->tb_offset = kvmppc_gse_get_u64(gse);
  542. break;
  543. case KVMPPC_GSID_FPSCR:
  544. vcpu->arch.fp.fpscr = kvmppc_gse_get_u64(gse);
  545. break;
  546. case KVMPPC_GSID_VSRS(0)... KVMPPC_GSID_VSRS(31):
  547. kvmppc_gse_get_vector128(gse, &v);
  548. i = iden - KVMPPC_GSID_VSRS(0);
  549. memcpy(&vcpu->arch.fp.fpr[i], &v,
  550. sizeof(vcpu->arch.fp.fpr[i]));
  551. break;
  552. #ifdef CONFIG_VSX
  553. case KVMPPC_GSID_VSCR:
  554. vcpu->arch.vr.vscr.u[3] = kvmppc_gse_get_u32(gse);
  555. break;
  556. case KVMPPC_GSID_VSRS(32)... KVMPPC_GSID_VSRS(63):
  557. i = iden - KVMPPC_GSID_VSRS(32);
  558. kvmppc_gse_get_vector128(gse, &vcpu->arch.vr.vr[i]);
  559. break;
  560. #endif
  561. case KVMPPC_GSID_HDAR:
  562. vcpu->arch.fault_dar = kvmppc_gse_get_u64(gse);
  563. break;
  564. case KVMPPC_GSID_HDSISR:
  565. vcpu->arch.fault_dsisr = kvmppc_gse_get_u32(gse);
  566. break;
  567. case KVMPPC_GSID_ASDR:
  568. vcpu->arch.fault_gpa = kvmppc_gse_get_u64(gse);
  569. break;
  570. case KVMPPC_GSID_HEIR:
  571. vcpu->arch.emul_inst = kvmppc_gse_get_u64(gse);
  572. break;
  573. case KVMPPC_GSID_DEC_EXPIRY_TB: {
  574. u64 dw;
  575. dw = kvmppc_gse_get_u64(gse);
  576. vcpu->arch.dec_expires =
  577. dw + vcpu->arch.vcore->tb_offset;
  578. break;
  579. }
  580. case KVMPPC_GSID_LOGICAL_PVR:
  581. vcpu->arch.vcore->arch_compat = kvmppc_gse_get_u32(gse);
  582. break;
  583. default:
  584. continue;
  585. }
  586. kvmppc_gsbm_set(valids, iden);
  587. }
  588. return 0;
  589. }
  590. static struct kvmppc_gs_msg_ops vcpu_message_ops = {
  591. .get_size = gs_msg_ops_vcpu_get_size,
  592. .fill_info = gs_msg_ops_vcpu_fill_info,
  593. .refresh_info = gs_msg_ops_vcpu_refresh_info,
  594. };
  595. static int kvmhv_nestedv2_host_create(struct kvm_vcpu *vcpu,
  596. struct kvmhv_nestedv2_io *io)
  597. {
  598. struct kvmhv_nestedv2_config *cfg;
  599. struct kvmppc_gs_buff *gsb, *vcpu_run_output, *vcpu_run_input;
  600. unsigned long guest_id, vcpu_id;
  601. struct kvmppc_gs_msg *gsm, *vcpu_message, *vcore_message;
  602. int rc;
  603. cfg = &io->cfg;
  604. guest_id = vcpu->kvm->arch.lpid;
  605. vcpu_id = vcpu->vcpu_id;
  606. gsm = kvmppc_gsm_new(&config_msg_ops, cfg, KVMPPC_GS_FLAGS_WIDE,
  607. GFP_KERNEL);
  608. if (!gsm) {
  609. rc = -ENOMEM;
  610. goto err;
  611. }
  612. gsb = kvmppc_gsb_new(kvmppc_gsm_size(gsm), guest_id, vcpu_id,
  613. GFP_KERNEL);
  614. if (!gsb) {
  615. rc = -ENOMEM;
  616. goto free_gsm;
  617. }
  618. rc = kvmppc_gsb_receive_datum(gsb, gsm,
  619. KVMPPC_GSID_RUN_OUTPUT_MIN_SIZE);
  620. if (rc < 0) {
  621. pr_err("KVM-NESTEDv2: couldn't get vcpu run output buffer minimum size\n");
  622. goto free_gsb;
  623. }
  624. vcpu_run_output = kvmppc_gsb_new(cfg->vcpu_run_output_size, guest_id,
  625. vcpu_id, GFP_KERNEL);
  626. if (!vcpu_run_output) {
  627. rc = -ENOMEM;
  628. goto free_gsb;
  629. }
  630. cfg->vcpu_run_output_cfg.address = kvmppc_gsb_paddress(vcpu_run_output);
  631. cfg->vcpu_run_output_cfg.size = kvmppc_gsb_capacity(vcpu_run_output);
  632. io->vcpu_run_output = vcpu_run_output;
  633. gsm->flags = 0;
  634. rc = kvmppc_gsb_send_datum(gsb, gsm, KVMPPC_GSID_RUN_OUTPUT);
  635. if (rc < 0) {
  636. pr_err("KVM-NESTEDv2: couldn't set vcpu run output buffer\n");
  637. goto free_gs_out;
  638. }
  639. vcpu_message = kvmppc_gsm_new(&vcpu_message_ops, vcpu, 0, GFP_KERNEL);
  640. if (!vcpu_message) {
  641. rc = -ENOMEM;
  642. goto free_gs_out;
  643. }
  644. kvmppc_gsm_include_all(vcpu_message);
  645. io->vcpu_message = vcpu_message;
  646. vcpu_run_input = kvmppc_gsb_new(kvmppc_gsm_size(vcpu_message), guest_id,
  647. vcpu_id, GFP_KERNEL);
  648. if (!vcpu_run_input) {
  649. rc = -ENOMEM;
  650. goto free_vcpu_message;
  651. }
  652. io->vcpu_run_input = vcpu_run_input;
  653. cfg->vcpu_run_input_cfg.address = kvmppc_gsb_paddress(vcpu_run_input);
  654. cfg->vcpu_run_input_cfg.size = kvmppc_gsb_capacity(vcpu_run_input);
  655. rc = kvmppc_gsb_send_datum(gsb, gsm, KVMPPC_GSID_RUN_INPUT);
  656. if (rc < 0) {
  657. pr_err("KVM-NESTEDv2: couldn't set vcpu run input buffer\n");
  658. goto free_vcpu_run_input;
  659. }
  660. vcore_message = kvmppc_gsm_new(&vcpu_message_ops, vcpu,
  661. KVMPPC_GS_FLAGS_WIDE, GFP_KERNEL);
  662. if (!vcore_message) {
  663. rc = -ENOMEM;
  664. goto free_vcpu_run_input;
  665. }
  666. kvmppc_gsm_include_all(vcore_message);
  667. kvmppc_gsbm_clear(&vcore_message->bitmap, KVMPPC_GSID_LOGICAL_PVR);
  668. io->vcore_message = vcore_message;
  669. kvmppc_gsbm_fill(&io->valids);
  670. kvmppc_gsm_free(gsm);
  671. kvmppc_gsb_free(gsb);
  672. return 0;
  673. free_vcpu_run_input:
  674. kvmppc_gsb_free(vcpu_run_input);
  675. free_vcpu_message:
  676. kvmppc_gsm_free(vcpu_message);
  677. free_gs_out:
  678. kvmppc_gsb_free(vcpu_run_output);
  679. free_gsb:
  680. kvmppc_gsb_free(gsb);
  681. free_gsm:
  682. kvmppc_gsm_free(gsm);
  683. err:
  684. return rc;
  685. }
  686. /**
  687. * __kvmhv_nestedv2_mark_dirty() - mark a Guest State ID to be sent to the host
  688. * @vcpu: vcpu
  689. * @iden: guest state ID
  690. *
  691. * Mark a guest state ID as having been changed by the L1 host and thus
  692. * the new value must be sent to the L0 hypervisor. See kvmhv_nestedv2_flush_vcpu()
  693. */
  694. int __kvmhv_nestedv2_mark_dirty(struct kvm_vcpu *vcpu, u16 iden)
  695. {
  696. struct kvmhv_nestedv2_io *io;
  697. struct kvmppc_gs_bitmap *valids;
  698. struct kvmppc_gs_msg *gsm;
  699. if (!iden)
  700. return 0;
  701. io = &vcpu->arch.nestedv2_io;
  702. valids = &io->valids;
  703. gsm = io->vcpu_message;
  704. kvmppc_gsm_include(gsm, iden);
  705. gsm = io->vcore_message;
  706. kvmppc_gsm_include(gsm, iden);
  707. kvmppc_gsbm_set(valids, iden);
  708. return 0;
  709. }
  710. EXPORT_SYMBOL_GPL(__kvmhv_nestedv2_mark_dirty);
  711. /**
  712. * __kvmhv_nestedv2_cached_reload() - reload a Guest State ID from the host
  713. * @vcpu: vcpu
  714. * @iden: guest state ID
  715. *
  716. * Reload the value for the guest state ID from the L0 host into the L1 host.
  717. * This is cached so that going out to the L0 host only happens if necessary.
  718. */
  719. int __kvmhv_nestedv2_cached_reload(struct kvm_vcpu *vcpu, u16 iden)
  720. {
  721. struct kvmhv_nestedv2_io *io;
  722. struct kvmppc_gs_bitmap *valids;
  723. struct kvmppc_gs_buff *gsb;
  724. struct kvmppc_gs_msg gsm;
  725. int rc;
  726. if (!iden)
  727. return 0;
  728. io = &vcpu->arch.nestedv2_io;
  729. valids = &io->valids;
  730. if (kvmppc_gsbm_test(valids, iden))
  731. return 0;
  732. gsb = io->vcpu_run_input;
  733. kvmppc_gsm_init(&gsm, &vcpu_message_ops, vcpu, kvmppc_gsid_flags(iden));
  734. rc = kvmppc_gsb_receive_datum(gsb, &gsm, iden);
  735. if (rc < 0) {
  736. pr_err("KVM-NESTEDv2: couldn't get GSID: 0x%x\n", iden);
  737. return rc;
  738. }
  739. return 0;
  740. }
  741. EXPORT_SYMBOL_GPL(__kvmhv_nestedv2_cached_reload);
  742. /**
  743. * kvmhv_nestedv2_flush_vcpu() - send modified Guest State IDs to the host
  744. * @vcpu: vcpu
  745. * @time_limit: hdec expiry tb
  746. *
  747. * Send the values marked by __kvmhv_nestedv2_mark_dirty() to the L0 host.
  748. * Thread wide values are copied to the H_GUEST_RUN_VCPU input buffer. Guest
  749. * wide values need to be sent with H_GUEST_SET first.
  750. *
  751. * The hdec tb offset is always sent to L0 host.
  752. */
  753. int kvmhv_nestedv2_flush_vcpu(struct kvm_vcpu *vcpu, u64 time_limit)
  754. {
  755. struct kvmhv_nestedv2_io *io;
  756. struct kvmppc_gs_buff *gsb;
  757. struct kvmppc_gs_msg *gsm;
  758. int rc;
  759. io = &vcpu->arch.nestedv2_io;
  760. gsb = io->vcpu_run_input;
  761. gsm = io->vcore_message;
  762. rc = kvmppc_gsb_send_data(gsb, gsm);
  763. if (rc < 0) {
  764. pr_err("KVM-NESTEDv2: couldn't set guest wide elements\n");
  765. return rc;
  766. }
  767. gsm = io->vcpu_message;
  768. kvmppc_gsb_reset(gsb);
  769. rc = kvmppc_gsm_fill_info(gsm, gsb);
  770. if (rc < 0) {
  771. pr_err("KVM-NESTEDv2: couldn't fill vcpu run input buffer\n");
  772. return rc;
  773. }
  774. rc = kvmppc_gse_put_u64(gsb, KVMPPC_GSID_HDEC_EXPIRY_TB, time_limit);
  775. if (rc < 0)
  776. return rc;
  777. return 0;
  778. }
  779. EXPORT_SYMBOL_GPL(kvmhv_nestedv2_flush_vcpu);
  780. /**
  781. * kvmhv_nestedv2_set_ptbl_entry() - send partition and process table state to
  782. * L0 host
  783. * @lpid: guest id
  784. * @dw0: partition table double word
  785. * @dw1: process table double word
  786. */
  787. int kvmhv_nestedv2_set_ptbl_entry(unsigned long lpid, u64 dw0, u64 dw1)
  788. {
  789. struct kvmppc_gs_part_table patbl;
  790. struct kvmppc_gs_proc_table prtbl;
  791. struct kvmppc_gs_buff *gsb;
  792. size_t size;
  793. int rc;
  794. size = kvmppc_gse_total_size(
  795. kvmppc_gsid_size(KVMPPC_GSID_PARTITION_TABLE)) +
  796. kvmppc_gse_total_size(
  797. kvmppc_gsid_size(KVMPPC_GSID_PROCESS_TABLE)) +
  798. sizeof(struct kvmppc_gs_header);
  799. gsb = kvmppc_gsb_new(size, lpid, 0, GFP_KERNEL);
  800. if (!gsb)
  801. return -ENOMEM;
  802. patbl.address = dw0 & RPDB_MASK;
  803. patbl.ea_bits = ((((dw0 & RTS1_MASK) >> (RTS1_SHIFT - 3)) |
  804. ((dw0 & RTS2_MASK) >> RTS2_SHIFT)) +
  805. 31);
  806. patbl.gpd_size = 1ul << ((dw0 & RPDS_MASK) + 3);
  807. rc = kvmppc_gse_put_part_table(gsb, KVMPPC_GSID_PARTITION_TABLE, patbl);
  808. if (rc < 0)
  809. goto free_gsb;
  810. prtbl.address = dw1 & PRTB_MASK;
  811. prtbl.gpd_size = 1ul << ((dw1 & PRTS_MASK) + 12);
  812. rc = kvmppc_gse_put_proc_table(gsb, KVMPPC_GSID_PROCESS_TABLE, prtbl);
  813. if (rc < 0)
  814. goto free_gsb;
  815. rc = kvmppc_gsb_send(gsb, KVMPPC_GS_FLAGS_WIDE);
  816. if (rc < 0) {
  817. pr_err("KVM-NESTEDv2: couldn't set the PATE\n");
  818. goto free_gsb;
  819. }
  820. kvmppc_gsb_free(gsb);
  821. return 0;
  822. free_gsb:
  823. kvmppc_gsb_free(gsb);
  824. return rc;
  825. }
  826. EXPORT_SYMBOL_GPL(kvmhv_nestedv2_set_ptbl_entry);
  827. /**
  828. * kvmhv_nestedv2_set_vpa() - register L2 VPA with L0
  829. * @vcpu: vcpu
  830. * @vpa: L1 logical real address
  831. */
  832. int kvmhv_nestedv2_set_vpa(struct kvm_vcpu *vcpu, unsigned long vpa)
  833. {
  834. struct kvmhv_nestedv2_io *io;
  835. struct kvmppc_gs_buff *gsb;
  836. int rc = 0;
  837. io = &vcpu->arch.nestedv2_io;
  838. gsb = io->vcpu_run_input;
  839. kvmppc_gsb_reset(gsb);
  840. rc = kvmppc_gse_put_u64(gsb, KVMPPC_GSID_VPA, vpa);
  841. if (rc < 0)
  842. goto out;
  843. rc = kvmppc_gsb_send(gsb, 0);
  844. if (rc < 0)
  845. pr_err("KVM-NESTEDv2: couldn't register the L2 VPA (rc=%d)\n", rc);
  846. out:
  847. kvmppc_gsb_reset(gsb);
  848. return rc;
  849. }
  850. EXPORT_SYMBOL_GPL(kvmhv_nestedv2_set_vpa);
  851. /**
  852. * kvmhv_nestedv2_parse_output() - receive values from H_GUEST_RUN_VCPU output
  853. * @vcpu: vcpu
  854. *
  855. * Parse the output buffer from H_GUEST_RUN_VCPU to update vcpu.
  856. */
  857. int kvmhv_nestedv2_parse_output(struct kvm_vcpu *vcpu)
  858. {
  859. struct kvmhv_nestedv2_io *io;
  860. struct kvmppc_gs_buff *gsb;
  861. struct kvmppc_gs_msg gsm;
  862. io = &vcpu->arch.nestedv2_io;
  863. gsb = io->vcpu_run_output;
  864. vcpu->arch.fault_dar = 0;
  865. vcpu->arch.fault_dsisr = 0;
  866. vcpu->arch.fault_gpa = 0;
  867. vcpu->arch.emul_inst = KVM_INST_FETCH_FAILED;
  868. kvmppc_gsm_init(&gsm, &vcpu_message_ops, vcpu, 0);
  869. return kvmppc_gsm_refresh_info(&gsm, gsb);
  870. }
  871. EXPORT_SYMBOL_GPL(kvmhv_nestedv2_parse_output);
  872. static void kvmhv_nestedv2_host_free(struct kvm_vcpu *vcpu,
  873. struct kvmhv_nestedv2_io *io)
  874. {
  875. kvmppc_gsm_free(io->vcpu_message);
  876. kvmppc_gsm_free(io->vcore_message);
  877. kvmppc_gsb_free(io->vcpu_run_input);
  878. kvmppc_gsb_free(io->vcpu_run_output);
  879. }
  880. int __kvmhv_nestedv2_reload_ptregs(struct kvm_vcpu *vcpu, struct pt_regs *regs)
  881. {
  882. struct kvmhv_nestedv2_io *io;
  883. struct kvmppc_gs_bitmap *valids;
  884. struct kvmppc_gs_buff *gsb;
  885. struct kvmppc_gs_msg gsm;
  886. int rc = 0;
  887. io = &vcpu->arch.nestedv2_io;
  888. valids = &io->valids;
  889. gsb = io->vcpu_run_input;
  890. kvmppc_gsm_init(&gsm, &vcpu_message_ops, vcpu, 0);
  891. for (int i = 0; i < 32; i++) {
  892. if (!kvmppc_gsbm_test(valids, KVMPPC_GSID_GPR(i)))
  893. kvmppc_gsm_include(&gsm, KVMPPC_GSID_GPR(i));
  894. }
  895. if (!kvmppc_gsbm_test(valids, KVMPPC_GSID_CR))
  896. kvmppc_gsm_include(&gsm, KVMPPC_GSID_CR);
  897. if (!kvmppc_gsbm_test(valids, KVMPPC_GSID_XER))
  898. kvmppc_gsm_include(&gsm, KVMPPC_GSID_XER);
  899. if (!kvmppc_gsbm_test(valids, KVMPPC_GSID_CTR))
  900. kvmppc_gsm_include(&gsm, KVMPPC_GSID_CTR);
  901. if (!kvmppc_gsbm_test(valids, KVMPPC_GSID_LR))
  902. kvmppc_gsm_include(&gsm, KVMPPC_GSID_LR);
  903. if (!kvmppc_gsbm_test(valids, KVMPPC_GSID_NIA))
  904. kvmppc_gsm_include(&gsm, KVMPPC_GSID_NIA);
  905. rc = kvmppc_gsb_receive_data(gsb, &gsm);
  906. if (rc < 0)
  907. pr_err("KVM-NESTEDv2: couldn't reload ptregs\n");
  908. return rc;
  909. }
  910. EXPORT_SYMBOL_GPL(__kvmhv_nestedv2_reload_ptregs);
  911. int __kvmhv_nestedv2_mark_dirty_ptregs(struct kvm_vcpu *vcpu,
  912. struct pt_regs *regs)
  913. {
  914. for (int i = 0; i < 32; i++)
  915. kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_GPR(i));
  916. kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_CR);
  917. kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_XER);
  918. kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_CTR);
  919. kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_LR);
  920. kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_NIA);
  921. return 0;
  922. }
  923. EXPORT_SYMBOL_GPL(__kvmhv_nestedv2_mark_dirty_ptregs);
  924. /**
  925. * kvmhv_nestedv2_vcpu_create() - create nested vcpu for the NESTEDv2 API
  926. * @vcpu: vcpu
  927. * @io: NESTEDv2 nested io state
  928. *
  929. * Parse the output buffer from H_GUEST_RUN_VCPU to update vcpu.
  930. */
  931. int kvmhv_nestedv2_vcpu_create(struct kvm_vcpu *vcpu,
  932. struct kvmhv_nestedv2_io *io)
  933. {
  934. long rc;
  935. rc = plpar_guest_create_vcpu(0, vcpu->kvm->arch.lpid, vcpu->vcpu_id);
  936. if (rc != H_SUCCESS) {
  937. pr_err("KVM: Create Guest vcpu hcall failed, rc=%ld\n", rc);
  938. switch (rc) {
  939. case H_NOT_ENOUGH_RESOURCES:
  940. case H_ABORTED:
  941. return -ENOMEM;
  942. case H_AUTHORITY:
  943. return -EPERM;
  944. default:
  945. return -EINVAL;
  946. }
  947. }
  948. rc = kvmhv_nestedv2_host_create(vcpu, io);
  949. return rc;
  950. }
  951. EXPORT_SYMBOL_GPL(kvmhv_nestedv2_vcpu_create);
  952. /**
  953. * kvmhv_nestedv2_vcpu_free() - free the NESTEDv2 state
  954. * @vcpu: vcpu
  955. * @io: NESTEDv2 nested io state
  956. */
  957. void kvmhv_nestedv2_vcpu_free(struct kvm_vcpu *vcpu,
  958. struct kvmhv_nestedv2_io *io)
  959. {
  960. kvmhv_nestedv2_host_free(vcpu, io);
  961. }
  962. EXPORT_SYMBOL_GPL(kvmhv_nestedv2_vcpu_free);