nested.c 51 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Kernel-based Virtual Machine driver for Linux
  4. *
  5. * AMD SVM support
  6. *
  7. * Copyright (C) 2006 Qumranet, Inc.
  8. * Copyright 2010 Red Hat, Inc. and/or its affiliates.
  9. *
  10. * Authors:
  11. * Yaniv Kamay <yaniv@qumranet.com>
  12. * Avi Kivity <avi@qumranet.com>
  13. */
  14. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  15. #include <linux/kvm_types.h>
  16. #include <linux/kvm_host.h>
  17. #include <linux/kernel.h>
  18. #include <asm/msr-index.h>
  19. #include <asm/debugreg.h>
  20. #include "kvm_emulate.h"
  21. #include "trace.h"
  22. #include "mmu.h"
  23. #include "x86.h"
  24. #include "smm.h"
  25. #include "cpuid.h"
  26. #include "lapic.h"
  27. #include "svm.h"
  28. #include "hyperv.h"
  29. #define CC KVM_NESTED_VMENTER_CONSISTENCY_CHECK
  30. static void nested_svm_inject_npf_exit(struct kvm_vcpu *vcpu,
  31. struct x86_exception *fault)
  32. {
  33. struct vcpu_svm *svm = to_svm(vcpu);
  34. struct vmcb *vmcb = svm->vmcb;
  35. if (vmcb->control.exit_code != SVM_EXIT_NPF) {
  36. /*
  37. * TODO: track the cause of the nested page fault, and
  38. * correctly fill in the high bits of exit_info_1.
  39. */
  40. vmcb->control.exit_code = SVM_EXIT_NPF;
  41. vmcb->control.exit_code_hi = 0;
  42. vmcb->control.exit_info_1 = (1ULL << 32);
  43. vmcb->control.exit_info_2 = fault->address;
  44. }
  45. vmcb->control.exit_info_1 &= ~0xffffffffULL;
  46. vmcb->control.exit_info_1 |= fault->error_code;
  47. nested_svm_vmexit(svm);
  48. }
  49. static u64 nested_svm_get_tdp_pdptr(struct kvm_vcpu *vcpu, int index)
  50. {
  51. struct vcpu_svm *svm = to_svm(vcpu);
  52. u64 cr3 = svm->nested.ctl.nested_cr3;
  53. u64 pdpte;
  54. int ret;
  55. /*
  56. * Note, nCR3 is "assumed" to be 32-byte aligned, i.e. the CPU ignores
  57. * nCR3[4:0] when loading PDPTEs from memory.
  58. */
  59. ret = kvm_vcpu_read_guest_page(vcpu, gpa_to_gfn(cr3), &pdpte,
  60. (cr3 & GENMASK(11, 5)) + index * 8, 8);
  61. if (ret)
  62. return 0;
  63. return pdpte;
  64. }
  65. static unsigned long nested_svm_get_tdp_cr3(struct kvm_vcpu *vcpu)
  66. {
  67. struct vcpu_svm *svm = to_svm(vcpu);
  68. return svm->nested.ctl.nested_cr3;
  69. }
  70. static void nested_svm_init_mmu_context(struct kvm_vcpu *vcpu)
  71. {
  72. struct vcpu_svm *svm = to_svm(vcpu);
  73. WARN_ON(mmu_is_nested(vcpu));
  74. vcpu->arch.mmu = &vcpu->arch.guest_mmu;
  75. /*
  76. * The NPT format depends on L1's CR4 and EFER, which is in vmcb01. Note,
  77. * when called via KVM_SET_NESTED_STATE, that state may _not_ match current
  78. * vCPU state. CR0.WP is explicitly ignored, while CR0.PG is required.
  79. */
  80. kvm_init_shadow_npt_mmu(vcpu, X86_CR0_PG, svm->vmcb01.ptr->save.cr4,
  81. svm->vmcb01.ptr->save.efer,
  82. svm->nested.ctl.nested_cr3);
  83. vcpu->arch.mmu->get_guest_pgd = nested_svm_get_tdp_cr3;
  84. vcpu->arch.mmu->get_pdptr = nested_svm_get_tdp_pdptr;
  85. vcpu->arch.mmu->inject_page_fault = nested_svm_inject_npf_exit;
  86. vcpu->arch.walk_mmu = &vcpu->arch.nested_mmu;
  87. }
  88. static void nested_svm_uninit_mmu_context(struct kvm_vcpu *vcpu)
  89. {
  90. vcpu->arch.mmu = &vcpu->arch.root_mmu;
  91. vcpu->arch.walk_mmu = &vcpu->arch.root_mmu;
  92. }
  93. static bool nested_vmcb_needs_vls_intercept(struct vcpu_svm *svm)
  94. {
  95. if (!guest_can_use(&svm->vcpu, X86_FEATURE_V_VMSAVE_VMLOAD))
  96. return true;
  97. if (!nested_npt_enabled(svm))
  98. return true;
  99. if (!(svm->nested.ctl.virt_ext & VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK))
  100. return true;
  101. return false;
  102. }
  103. void recalc_intercepts(struct vcpu_svm *svm)
  104. {
  105. struct vmcb_control_area *c, *h;
  106. struct vmcb_ctrl_area_cached *g;
  107. unsigned int i;
  108. vmcb_mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
  109. if (!is_guest_mode(&svm->vcpu))
  110. return;
  111. c = &svm->vmcb->control;
  112. h = &svm->vmcb01.ptr->control;
  113. g = &svm->nested.ctl;
  114. for (i = 0; i < MAX_INTERCEPT; i++)
  115. c->intercepts[i] = h->intercepts[i];
  116. if (g->int_ctl & V_INTR_MASKING_MASK) {
  117. /*
  118. * If L2 is active and V_INTR_MASKING is enabled in vmcb12,
  119. * disable intercept of CR8 writes as L2's CR8 does not affect
  120. * any interrupt KVM may want to inject.
  121. *
  122. * Similarly, disable intercept of virtual interrupts (used to
  123. * detect interrupt windows) if the saved RFLAGS.IF is '0', as
  124. * the effective RFLAGS.IF for L1 interrupts will never be set
  125. * while L2 is running (L2's RFLAGS.IF doesn't affect L1 IRQs).
  126. */
  127. vmcb_clr_intercept(c, INTERCEPT_CR8_WRITE);
  128. if (!(svm->vmcb01.ptr->save.rflags & X86_EFLAGS_IF))
  129. vmcb_clr_intercept(c, INTERCEPT_VINTR);
  130. }
  131. /*
  132. * We want to see VMMCALLs from a nested guest only when Hyper-V L2 TLB
  133. * flush feature is enabled.
  134. */
  135. if (!nested_svm_l2_tlb_flush_enabled(&svm->vcpu))
  136. vmcb_clr_intercept(c, INTERCEPT_VMMCALL);
  137. for (i = 0; i < MAX_INTERCEPT; i++)
  138. c->intercepts[i] |= g->intercepts[i];
  139. /* If SMI is not intercepted, ignore guest SMI intercept as well */
  140. if (!intercept_smi)
  141. vmcb_clr_intercept(c, INTERCEPT_SMI);
  142. if (nested_vmcb_needs_vls_intercept(svm)) {
  143. /*
  144. * If the virtual VMLOAD/VMSAVE is not enabled for the L2,
  145. * we must intercept these instructions to correctly
  146. * emulate them in case L1 doesn't intercept them.
  147. */
  148. vmcb_set_intercept(c, INTERCEPT_VMLOAD);
  149. vmcb_set_intercept(c, INTERCEPT_VMSAVE);
  150. } else {
  151. WARN_ON(!(c->virt_ext & VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK));
  152. }
  153. }
  154. /*
  155. * Merge L0's (KVM) and L1's (Nested VMCB) MSR permission bitmaps. The function
  156. * is optimized in that it only merges the parts where KVM MSR permission bitmap
  157. * may contain zero bits.
  158. */
  159. static bool nested_svm_vmrun_msrpm(struct vcpu_svm *svm)
  160. {
  161. int i;
  162. /*
  163. * MSR bitmap update can be skipped when:
  164. * - MSR bitmap for L1 hasn't changed.
  165. * - Nested hypervisor (L1) is attempting to launch the same L2 as
  166. * before.
  167. * - Nested hypervisor (L1) is using Hyper-V emulation interface and
  168. * tells KVM (L0) there were no changes in MSR bitmap for L2.
  169. */
  170. #ifdef CONFIG_KVM_HYPERV
  171. if (!svm->nested.force_msr_bitmap_recalc) {
  172. struct hv_vmcb_enlightenments *hve = &svm->nested.ctl.hv_enlightenments;
  173. if (kvm_hv_hypercall_enabled(&svm->vcpu) &&
  174. hve->hv_enlightenments_control.msr_bitmap &&
  175. (svm->nested.ctl.clean & BIT(HV_VMCB_NESTED_ENLIGHTENMENTS)))
  176. goto set_msrpm_base_pa;
  177. }
  178. #endif
  179. if (!(vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_MSR_PROT)))
  180. return true;
  181. for (i = 0; i < MSRPM_OFFSETS; i++) {
  182. u32 value, p;
  183. u64 offset;
  184. if (msrpm_offsets[i] == 0xffffffff)
  185. break;
  186. p = msrpm_offsets[i];
  187. /* x2apic msrs are intercepted always for the nested guest */
  188. if (is_x2apic_msrpm_offset(p))
  189. continue;
  190. offset = svm->nested.ctl.msrpm_base_pa + (p * 4);
  191. if (kvm_vcpu_read_guest(&svm->vcpu, offset, &value, 4))
  192. return false;
  193. svm->nested.msrpm[p] = svm->msrpm[p] | value;
  194. }
  195. svm->nested.force_msr_bitmap_recalc = false;
  196. #ifdef CONFIG_KVM_HYPERV
  197. set_msrpm_base_pa:
  198. #endif
  199. svm->vmcb->control.msrpm_base_pa = __sme_set(__pa(svm->nested.msrpm));
  200. return true;
  201. }
  202. /*
  203. * Bits 11:0 of bitmap address are ignored by hardware
  204. */
  205. static bool nested_svm_check_bitmap_pa(struct kvm_vcpu *vcpu, u64 pa, u32 size)
  206. {
  207. u64 addr = PAGE_ALIGN(pa);
  208. return kvm_vcpu_is_legal_gpa(vcpu, addr) &&
  209. kvm_vcpu_is_legal_gpa(vcpu, addr + size - 1);
  210. }
  211. static bool __nested_vmcb_check_controls(struct kvm_vcpu *vcpu,
  212. struct vmcb_ctrl_area_cached *control)
  213. {
  214. if (CC(!vmcb12_is_intercept(control, INTERCEPT_VMRUN)))
  215. return false;
  216. if (CC(control->asid == 0))
  217. return false;
  218. if (CC((control->nested_ctl & SVM_NESTED_CTL_NP_ENABLE) && !npt_enabled))
  219. return false;
  220. if (CC(!nested_svm_check_bitmap_pa(vcpu, control->msrpm_base_pa,
  221. MSRPM_SIZE)))
  222. return false;
  223. if (CC(!nested_svm_check_bitmap_pa(vcpu, control->iopm_base_pa,
  224. IOPM_SIZE)))
  225. return false;
  226. if (CC((control->int_ctl & V_NMI_ENABLE_MASK) &&
  227. !vmcb12_is_intercept(control, INTERCEPT_NMI))) {
  228. return false;
  229. }
  230. return true;
  231. }
  232. /* Common checks that apply to both L1 and L2 state. */
  233. static bool __nested_vmcb_check_save(struct kvm_vcpu *vcpu,
  234. struct vmcb_save_area_cached *save)
  235. {
  236. if (CC(!(save->efer & EFER_SVME)))
  237. return false;
  238. if (CC((save->cr0 & X86_CR0_CD) == 0 && (save->cr0 & X86_CR0_NW)) ||
  239. CC(save->cr0 & ~0xffffffffULL))
  240. return false;
  241. if (CC(!kvm_dr6_valid(save->dr6)) || CC(!kvm_dr7_valid(save->dr7)))
  242. return false;
  243. /*
  244. * These checks are also performed by KVM_SET_SREGS,
  245. * except that EFER.LMA is not checked by SVM against
  246. * CR0.PG && EFER.LME.
  247. */
  248. if ((save->efer & EFER_LME) && (save->cr0 & X86_CR0_PG)) {
  249. if (CC(!(save->cr4 & X86_CR4_PAE)) ||
  250. CC(!(save->cr0 & X86_CR0_PE)) ||
  251. CC(!kvm_vcpu_is_legal_cr3(vcpu, save->cr3)))
  252. return false;
  253. }
  254. /* Note, SVM doesn't have any additional restrictions on CR4. */
  255. if (CC(!__kvm_is_valid_cr4(vcpu, save->cr4)))
  256. return false;
  257. if (CC(!kvm_valid_efer(vcpu, save->efer)))
  258. return false;
  259. return true;
  260. }
  261. static bool nested_vmcb_check_save(struct kvm_vcpu *vcpu)
  262. {
  263. struct vcpu_svm *svm = to_svm(vcpu);
  264. struct vmcb_save_area_cached *save = &svm->nested.save;
  265. return __nested_vmcb_check_save(vcpu, save);
  266. }
  267. static bool nested_vmcb_check_controls(struct kvm_vcpu *vcpu)
  268. {
  269. struct vcpu_svm *svm = to_svm(vcpu);
  270. struct vmcb_ctrl_area_cached *ctl = &svm->nested.ctl;
  271. return __nested_vmcb_check_controls(vcpu, ctl);
  272. }
  273. static
  274. void __nested_copy_vmcb_control_to_cache(struct kvm_vcpu *vcpu,
  275. struct vmcb_ctrl_area_cached *to,
  276. struct vmcb_control_area *from)
  277. {
  278. unsigned int i;
  279. for (i = 0; i < MAX_INTERCEPT; i++)
  280. to->intercepts[i] = from->intercepts[i];
  281. to->iopm_base_pa = from->iopm_base_pa;
  282. to->msrpm_base_pa = from->msrpm_base_pa;
  283. to->tsc_offset = from->tsc_offset;
  284. to->tlb_ctl = from->tlb_ctl;
  285. to->int_ctl = from->int_ctl;
  286. to->int_vector = from->int_vector;
  287. to->int_state = from->int_state;
  288. to->exit_code = from->exit_code;
  289. to->exit_code_hi = from->exit_code_hi;
  290. to->exit_info_1 = from->exit_info_1;
  291. to->exit_info_2 = from->exit_info_2;
  292. to->exit_int_info = from->exit_int_info;
  293. to->exit_int_info_err = from->exit_int_info_err;
  294. to->nested_ctl = from->nested_ctl;
  295. to->event_inj = from->event_inj;
  296. to->event_inj_err = from->event_inj_err;
  297. to->next_rip = from->next_rip;
  298. to->nested_cr3 = from->nested_cr3;
  299. to->virt_ext = from->virt_ext;
  300. to->pause_filter_count = from->pause_filter_count;
  301. to->pause_filter_thresh = from->pause_filter_thresh;
  302. /* Copy asid here because nested_vmcb_check_controls will check it. */
  303. to->asid = from->asid;
  304. to->msrpm_base_pa &= ~0x0fffULL;
  305. to->iopm_base_pa &= ~0x0fffULL;
  306. #ifdef CONFIG_KVM_HYPERV
  307. /* Hyper-V extensions (Enlightened VMCB) */
  308. if (kvm_hv_hypercall_enabled(vcpu)) {
  309. to->clean = from->clean;
  310. memcpy(&to->hv_enlightenments, &from->hv_enlightenments,
  311. sizeof(to->hv_enlightenments));
  312. }
  313. #endif
  314. }
  315. void nested_copy_vmcb_control_to_cache(struct vcpu_svm *svm,
  316. struct vmcb_control_area *control)
  317. {
  318. __nested_copy_vmcb_control_to_cache(&svm->vcpu, &svm->nested.ctl, control);
  319. }
  320. static void __nested_copy_vmcb_save_to_cache(struct vmcb_save_area_cached *to,
  321. struct vmcb_save_area *from)
  322. {
  323. /*
  324. * Copy only fields that are validated, as we need them
  325. * to avoid TOC/TOU races.
  326. */
  327. to->efer = from->efer;
  328. to->cr0 = from->cr0;
  329. to->cr3 = from->cr3;
  330. to->cr4 = from->cr4;
  331. to->dr6 = from->dr6;
  332. to->dr7 = from->dr7;
  333. }
  334. void nested_copy_vmcb_save_to_cache(struct vcpu_svm *svm,
  335. struct vmcb_save_area *save)
  336. {
  337. __nested_copy_vmcb_save_to_cache(&svm->nested.save, save);
  338. }
  339. /*
  340. * Synchronize fields that are written by the processor, so that
  341. * they can be copied back into the vmcb12.
  342. */
  343. void nested_sync_control_from_vmcb02(struct vcpu_svm *svm)
  344. {
  345. u32 mask;
  346. svm->nested.ctl.event_inj = svm->vmcb->control.event_inj;
  347. svm->nested.ctl.event_inj_err = svm->vmcb->control.event_inj_err;
  348. /* Only a few fields of int_ctl are written by the processor. */
  349. mask = V_IRQ_MASK | V_TPR_MASK;
  350. /*
  351. * Don't sync vmcb02 V_IRQ back to vmcb12 if KVM (L0) is intercepting
  352. * virtual interrupts in order to request an interrupt window, as KVM
  353. * has usurped vmcb02's int_ctl. If an interrupt window opens before
  354. * the next VM-Exit, svm_clear_vintr() will restore vmcb12's int_ctl.
  355. * If no window opens, V_IRQ will be correctly preserved in vmcb12's
  356. * int_ctl (because it was never recognized while L2 was running).
  357. */
  358. if (svm_is_intercept(svm, INTERCEPT_VINTR) &&
  359. !test_bit(INTERCEPT_VINTR, (unsigned long *)svm->nested.ctl.intercepts))
  360. mask &= ~V_IRQ_MASK;
  361. if (nested_vgif_enabled(svm))
  362. mask |= V_GIF_MASK;
  363. if (nested_vnmi_enabled(svm))
  364. mask |= V_NMI_BLOCKING_MASK | V_NMI_PENDING_MASK;
  365. svm->nested.ctl.int_ctl &= ~mask;
  366. svm->nested.ctl.int_ctl |= svm->vmcb->control.int_ctl & mask;
  367. }
  368. /*
  369. * Transfer any event that L0 or L1 wanted to inject into L2 to
  370. * EXIT_INT_INFO.
  371. */
  372. static void nested_save_pending_event_to_vmcb12(struct vcpu_svm *svm,
  373. struct vmcb *vmcb12)
  374. {
  375. struct kvm_vcpu *vcpu = &svm->vcpu;
  376. u32 exit_int_info = 0;
  377. unsigned int nr;
  378. if (vcpu->arch.exception.injected) {
  379. nr = vcpu->arch.exception.vector;
  380. exit_int_info = nr | SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_EXEPT;
  381. if (vcpu->arch.exception.has_error_code) {
  382. exit_int_info |= SVM_EVTINJ_VALID_ERR;
  383. vmcb12->control.exit_int_info_err =
  384. vcpu->arch.exception.error_code;
  385. }
  386. } else if (vcpu->arch.nmi_injected) {
  387. exit_int_info = SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_NMI;
  388. } else if (vcpu->arch.interrupt.injected) {
  389. nr = vcpu->arch.interrupt.nr;
  390. exit_int_info = nr | SVM_EVTINJ_VALID;
  391. if (vcpu->arch.interrupt.soft)
  392. exit_int_info |= SVM_EVTINJ_TYPE_SOFT;
  393. else
  394. exit_int_info |= SVM_EVTINJ_TYPE_INTR;
  395. }
  396. vmcb12->control.exit_int_info = exit_int_info;
  397. }
  398. static void nested_svm_transition_tlb_flush(struct kvm_vcpu *vcpu)
  399. {
  400. /* Handle pending Hyper-V TLB flush requests */
  401. kvm_hv_nested_transtion_tlb_flush(vcpu, npt_enabled);
  402. /*
  403. * TODO: optimize unconditional TLB flush/MMU sync. A partial list of
  404. * things to fix before this can be conditional:
  405. *
  406. * - Flush TLBs for both L1 and L2 remote TLB flush
  407. * - Honor L1's request to flush an ASID on nested VMRUN
  408. * - Sync nested NPT MMU on VMRUN that flushes L2's ASID[*]
  409. * - Don't crush a pending TLB flush in vmcb02 on nested VMRUN
  410. * - Flush L1's ASID on KVM_REQ_TLB_FLUSH_GUEST
  411. *
  412. * [*] Unlike nested EPT, SVM's ASID management can invalidate nested
  413. * NPT guest-physical mappings on VMRUN.
  414. */
  415. kvm_make_request(KVM_REQ_MMU_SYNC, vcpu);
  416. kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
  417. }
  418. /*
  419. * Load guest's/host's cr3 on nested vmentry or vmexit. @nested_npt is true
  420. * if we are emulating VM-Entry into a guest with NPT enabled.
  421. */
  422. static int nested_svm_load_cr3(struct kvm_vcpu *vcpu, unsigned long cr3,
  423. bool nested_npt, bool reload_pdptrs)
  424. {
  425. if (CC(!kvm_vcpu_is_legal_cr3(vcpu, cr3)))
  426. return -EINVAL;
  427. if (reload_pdptrs && !nested_npt && is_pae_paging(vcpu) &&
  428. CC(!load_pdptrs(vcpu, cr3)))
  429. return -EINVAL;
  430. vcpu->arch.cr3 = cr3;
  431. /* Re-initialize the MMU, e.g. to pick up CR4 MMU role changes. */
  432. kvm_init_mmu(vcpu);
  433. if (!nested_npt)
  434. kvm_mmu_new_pgd(vcpu, cr3);
  435. return 0;
  436. }
  437. void nested_vmcb02_compute_g_pat(struct vcpu_svm *svm)
  438. {
  439. if (!svm->nested.vmcb02.ptr)
  440. return;
  441. /* FIXME: merge g_pat from vmcb01 and vmcb12. */
  442. svm->nested.vmcb02.ptr->save.g_pat = svm->vmcb01.ptr->save.g_pat;
  443. }
  444. static void nested_vmcb02_prepare_save(struct vcpu_svm *svm, struct vmcb *vmcb12)
  445. {
  446. bool new_vmcb12 = false;
  447. struct vmcb *vmcb01 = svm->vmcb01.ptr;
  448. struct vmcb *vmcb02 = svm->nested.vmcb02.ptr;
  449. struct kvm_vcpu *vcpu = &svm->vcpu;
  450. nested_vmcb02_compute_g_pat(svm);
  451. /* Load the nested guest state */
  452. if (svm->nested.vmcb12_gpa != svm->nested.last_vmcb12_gpa) {
  453. new_vmcb12 = true;
  454. svm->nested.last_vmcb12_gpa = svm->nested.vmcb12_gpa;
  455. svm->nested.force_msr_bitmap_recalc = true;
  456. }
  457. if (unlikely(new_vmcb12 || vmcb_is_dirty(vmcb12, VMCB_SEG))) {
  458. vmcb02->save.es = vmcb12->save.es;
  459. vmcb02->save.cs = vmcb12->save.cs;
  460. vmcb02->save.ss = vmcb12->save.ss;
  461. vmcb02->save.ds = vmcb12->save.ds;
  462. vmcb02->save.cpl = vmcb12->save.cpl;
  463. vmcb_mark_dirty(vmcb02, VMCB_SEG);
  464. }
  465. if (unlikely(new_vmcb12 || vmcb_is_dirty(vmcb12, VMCB_DT))) {
  466. vmcb02->save.gdtr = vmcb12->save.gdtr;
  467. vmcb02->save.idtr = vmcb12->save.idtr;
  468. vmcb_mark_dirty(vmcb02, VMCB_DT);
  469. }
  470. kvm_set_rflags(vcpu, vmcb12->save.rflags | X86_EFLAGS_FIXED);
  471. svm_set_efer(vcpu, svm->nested.save.efer);
  472. svm_set_cr0(vcpu, svm->nested.save.cr0);
  473. svm_set_cr4(vcpu, svm->nested.save.cr4);
  474. svm->vcpu.arch.cr2 = vmcb12->save.cr2;
  475. kvm_rax_write(vcpu, vmcb12->save.rax);
  476. kvm_rsp_write(vcpu, vmcb12->save.rsp);
  477. kvm_rip_write(vcpu, vmcb12->save.rip);
  478. /* In case we don't even reach vcpu_run, the fields are not updated */
  479. vmcb02->save.rax = vmcb12->save.rax;
  480. vmcb02->save.rsp = vmcb12->save.rsp;
  481. vmcb02->save.rip = vmcb12->save.rip;
  482. /* These bits will be set properly on the first execution when new_vmc12 is true */
  483. if (unlikely(new_vmcb12 || vmcb_is_dirty(vmcb12, VMCB_DR))) {
  484. vmcb02->save.dr7 = svm->nested.save.dr7 | DR7_FIXED_1;
  485. svm->vcpu.arch.dr6 = svm->nested.save.dr6 | DR6_ACTIVE_LOW;
  486. vmcb_mark_dirty(vmcb02, VMCB_DR);
  487. }
  488. if (unlikely(guest_can_use(vcpu, X86_FEATURE_LBRV) &&
  489. (svm->nested.ctl.virt_ext & LBR_CTL_ENABLE_MASK))) {
  490. /*
  491. * Reserved bits of DEBUGCTL are ignored. Be consistent with
  492. * svm_set_msr's definition of reserved bits.
  493. */
  494. svm_copy_lbrs(vmcb02, vmcb12);
  495. vmcb02->save.dbgctl &= ~DEBUGCTL_RESERVED_BITS;
  496. svm_update_lbrv(&svm->vcpu);
  497. } else if (unlikely(vmcb01->control.virt_ext & LBR_CTL_ENABLE_MASK)) {
  498. svm_copy_lbrs(vmcb02, vmcb01);
  499. }
  500. }
  501. static inline bool is_evtinj_soft(u32 evtinj)
  502. {
  503. u32 type = evtinj & SVM_EVTINJ_TYPE_MASK;
  504. u8 vector = evtinj & SVM_EVTINJ_VEC_MASK;
  505. if (!(evtinj & SVM_EVTINJ_VALID))
  506. return false;
  507. if (type == SVM_EVTINJ_TYPE_SOFT)
  508. return true;
  509. return type == SVM_EVTINJ_TYPE_EXEPT && kvm_exception_is_soft(vector);
  510. }
  511. static bool is_evtinj_nmi(u32 evtinj)
  512. {
  513. u32 type = evtinj & SVM_EVTINJ_TYPE_MASK;
  514. if (!(evtinj & SVM_EVTINJ_VALID))
  515. return false;
  516. return type == SVM_EVTINJ_TYPE_NMI;
  517. }
  518. static void nested_vmcb02_prepare_control(struct vcpu_svm *svm,
  519. unsigned long vmcb12_rip,
  520. unsigned long vmcb12_csbase)
  521. {
  522. u32 int_ctl_vmcb01_bits = V_INTR_MASKING_MASK;
  523. u32 int_ctl_vmcb12_bits = V_TPR_MASK | V_IRQ_INJECTION_BITS_MASK;
  524. struct kvm_vcpu *vcpu = &svm->vcpu;
  525. struct vmcb *vmcb01 = svm->vmcb01.ptr;
  526. struct vmcb *vmcb02 = svm->nested.vmcb02.ptr;
  527. u32 pause_count12;
  528. u32 pause_thresh12;
  529. nested_svm_transition_tlb_flush(vcpu);
  530. /* Enter Guest-Mode */
  531. enter_guest_mode(vcpu);
  532. /*
  533. * Filled at exit: exit_code, exit_code_hi, exit_info_1, exit_info_2,
  534. * exit_int_info, exit_int_info_err, next_rip, insn_len, insn_bytes.
  535. */
  536. if (guest_can_use(vcpu, X86_FEATURE_VGIF) &&
  537. (svm->nested.ctl.int_ctl & V_GIF_ENABLE_MASK))
  538. int_ctl_vmcb12_bits |= (V_GIF_MASK | V_GIF_ENABLE_MASK);
  539. else
  540. int_ctl_vmcb01_bits |= (V_GIF_MASK | V_GIF_ENABLE_MASK);
  541. if (vnmi) {
  542. if (vmcb01->control.int_ctl & V_NMI_PENDING_MASK) {
  543. svm->vcpu.arch.nmi_pending++;
  544. kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
  545. }
  546. if (nested_vnmi_enabled(svm))
  547. int_ctl_vmcb12_bits |= (V_NMI_PENDING_MASK |
  548. V_NMI_ENABLE_MASK |
  549. V_NMI_BLOCKING_MASK);
  550. }
  551. /* Copied from vmcb01. msrpm_base can be overwritten later. */
  552. vmcb02->control.nested_ctl = vmcb01->control.nested_ctl;
  553. vmcb02->control.iopm_base_pa = vmcb01->control.iopm_base_pa;
  554. vmcb02->control.msrpm_base_pa = vmcb01->control.msrpm_base_pa;
  555. /* Done at vmrun: asid. */
  556. /* Also overwritten later if necessary. */
  557. vmcb02->control.tlb_ctl = TLB_CONTROL_DO_NOTHING;
  558. /* nested_cr3. */
  559. if (nested_npt_enabled(svm))
  560. nested_svm_init_mmu_context(vcpu);
  561. vcpu->arch.tsc_offset = kvm_calc_nested_tsc_offset(
  562. vcpu->arch.l1_tsc_offset,
  563. svm->nested.ctl.tsc_offset,
  564. svm->tsc_ratio_msr);
  565. vmcb02->control.tsc_offset = vcpu->arch.tsc_offset;
  566. if (guest_can_use(vcpu, X86_FEATURE_TSCRATEMSR) &&
  567. svm->tsc_ratio_msr != kvm_caps.default_tsc_scaling_ratio)
  568. nested_svm_update_tsc_ratio_msr(vcpu);
  569. vmcb02->control.int_ctl =
  570. (svm->nested.ctl.int_ctl & int_ctl_vmcb12_bits) |
  571. (vmcb01->control.int_ctl & int_ctl_vmcb01_bits);
  572. vmcb02->control.int_vector = svm->nested.ctl.int_vector;
  573. vmcb02->control.int_state = svm->nested.ctl.int_state;
  574. vmcb02->control.event_inj = svm->nested.ctl.event_inj;
  575. vmcb02->control.event_inj_err = svm->nested.ctl.event_inj_err;
  576. /*
  577. * next_rip is consumed on VMRUN as the return address pushed on the
  578. * stack for injected soft exceptions/interrupts. If nrips is exposed
  579. * to L1, take it verbatim from vmcb12. If nrips is supported in
  580. * hardware but not exposed to L1, stuff the actual L2 RIP to emulate
  581. * what a nrips=0 CPU would do (L1 is responsible for advancing RIP
  582. * prior to injecting the event).
  583. */
  584. if (guest_can_use(vcpu, X86_FEATURE_NRIPS))
  585. vmcb02->control.next_rip = svm->nested.ctl.next_rip;
  586. else if (boot_cpu_has(X86_FEATURE_NRIPS))
  587. vmcb02->control.next_rip = vmcb12_rip;
  588. svm->nmi_l1_to_l2 = is_evtinj_nmi(vmcb02->control.event_inj);
  589. if (is_evtinj_soft(vmcb02->control.event_inj)) {
  590. svm->soft_int_injected = true;
  591. svm->soft_int_csbase = vmcb12_csbase;
  592. svm->soft_int_old_rip = vmcb12_rip;
  593. if (guest_can_use(vcpu, X86_FEATURE_NRIPS))
  594. svm->soft_int_next_rip = svm->nested.ctl.next_rip;
  595. else
  596. svm->soft_int_next_rip = vmcb12_rip;
  597. }
  598. vmcb02->control.virt_ext = vmcb01->control.virt_ext &
  599. LBR_CTL_ENABLE_MASK;
  600. if (guest_can_use(vcpu, X86_FEATURE_LBRV))
  601. vmcb02->control.virt_ext |=
  602. (svm->nested.ctl.virt_ext & LBR_CTL_ENABLE_MASK);
  603. if (!nested_vmcb_needs_vls_intercept(svm))
  604. vmcb02->control.virt_ext |= VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK;
  605. if (guest_can_use(vcpu, X86_FEATURE_PAUSEFILTER))
  606. pause_count12 = svm->nested.ctl.pause_filter_count;
  607. else
  608. pause_count12 = 0;
  609. if (guest_can_use(vcpu, X86_FEATURE_PFTHRESHOLD))
  610. pause_thresh12 = svm->nested.ctl.pause_filter_thresh;
  611. else
  612. pause_thresh12 = 0;
  613. if (kvm_pause_in_guest(svm->vcpu.kvm)) {
  614. /* use guest values since host doesn't intercept PAUSE */
  615. vmcb02->control.pause_filter_count = pause_count12;
  616. vmcb02->control.pause_filter_thresh = pause_thresh12;
  617. } else {
  618. /* start from host values otherwise */
  619. vmcb02->control.pause_filter_count = vmcb01->control.pause_filter_count;
  620. vmcb02->control.pause_filter_thresh = vmcb01->control.pause_filter_thresh;
  621. /* ... but ensure filtering is disabled if so requested. */
  622. if (vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_PAUSE)) {
  623. if (!pause_count12)
  624. vmcb02->control.pause_filter_count = 0;
  625. if (!pause_thresh12)
  626. vmcb02->control.pause_filter_thresh = 0;
  627. }
  628. }
  629. /*
  630. * Merge guest and host intercepts - must be called with vcpu in
  631. * guest-mode to take effect.
  632. */
  633. recalc_intercepts(svm);
  634. }
  635. static void nested_svm_copy_common_state(struct vmcb *from_vmcb, struct vmcb *to_vmcb)
  636. {
  637. /*
  638. * Some VMCB state is shared between L1 and L2 and thus has to be
  639. * moved at the time of nested vmrun and vmexit.
  640. *
  641. * VMLOAD/VMSAVE state would also belong in this category, but KVM
  642. * always performs VMLOAD and VMSAVE from the VMCB01.
  643. */
  644. to_vmcb->save.spec_ctrl = from_vmcb->save.spec_ctrl;
  645. }
  646. int enter_svm_guest_mode(struct kvm_vcpu *vcpu, u64 vmcb12_gpa,
  647. struct vmcb *vmcb12, bool from_vmrun)
  648. {
  649. struct vcpu_svm *svm = to_svm(vcpu);
  650. int ret;
  651. trace_kvm_nested_vmenter(svm->vmcb->save.rip,
  652. vmcb12_gpa,
  653. vmcb12->save.rip,
  654. vmcb12->control.int_ctl,
  655. vmcb12->control.event_inj,
  656. vmcb12->control.nested_ctl,
  657. vmcb12->control.nested_cr3,
  658. vmcb12->save.cr3,
  659. KVM_ISA_SVM);
  660. trace_kvm_nested_intercepts(vmcb12->control.intercepts[INTERCEPT_CR] & 0xffff,
  661. vmcb12->control.intercepts[INTERCEPT_CR] >> 16,
  662. vmcb12->control.intercepts[INTERCEPT_EXCEPTION],
  663. vmcb12->control.intercepts[INTERCEPT_WORD3],
  664. vmcb12->control.intercepts[INTERCEPT_WORD4],
  665. vmcb12->control.intercepts[INTERCEPT_WORD5]);
  666. svm->nested.vmcb12_gpa = vmcb12_gpa;
  667. WARN_ON(svm->vmcb == svm->nested.vmcb02.ptr);
  668. nested_svm_copy_common_state(svm->vmcb01.ptr, svm->nested.vmcb02.ptr);
  669. svm_switch_vmcb(svm, &svm->nested.vmcb02);
  670. nested_vmcb02_prepare_control(svm, vmcb12->save.rip, vmcb12->save.cs.base);
  671. nested_vmcb02_prepare_save(svm, vmcb12);
  672. ret = nested_svm_load_cr3(&svm->vcpu, svm->nested.save.cr3,
  673. nested_npt_enabled(svm), from_vmrun);
  674. if (ret)
  675. return ret;
  676. if (!from_vmrun)
  677. kvm_make_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu);
  678. svm_set_gif(svm, true);
  679. if (kvm_vcpu_apicv_active(vcpu))
  680. kvm_make_request(KVM_REQ_APICV_UPDATE, vcpu);
  681. nested_svm_hv_update_vm_vp_ids(vcpu);
  682. return 0;
  683. }
  684. int nested_svm_vmrun(struct kvm_vcpu *vcpu)
  685. {
  686. struct vcpu_svm *svm = to_svm(vcpu);
  687. int ret;
  688. struct vmcb *vmcb12;
  689. struct kvm_host_map map;
  690. u64 vmcb12_gpa;
  691. struct vmcb *vmcb01 = svm->vmcb01.ptr;
  692. if (!svm->nested.hsave_msr) {
  693. kvm_inject_gp(vcpu, 0);
  694. return 1;
  695. }
  696. if (is_smm(vcpu)) {
  697. kvm_queue_exception(vcpu, UD_VECTOR);
  698. return 1;
  699. }
  700. /* This fails when VP assist page is enabled but the supplied GPA is bogus */
  701. ret = kvm_hv_verify_vp_assist(vcpu);
  702. if (ret) {
  703. kvm_inject_gp(vcpu, 0);
  704. return ret;
  705. }
  706. vmcb12_gpa = svm->vmcb->save.rax;
  707. ret = kvm_vcpu_map(vcpu, gpa_to_gfn(vmcb12_gpa), &map);
  708. if (ret == -EINVAL) {
  709. kvm_inject_gp(vcpu, 0);
  710. return 1;
  711. } else if (ret) {
  712. return kvm_skip_emulated_instruction(vcpu);
  713. }
  714. ret = kvm_skip_emulated_instruction(vcpu);
  715. vmcb12 = map.hva;
  716. if (WARN_ON_ONCE(!svm->nested.initialized))
  717. return -EINVAL;
  718. nested_copy_vmcb_control_to_cache(svm, &vmcb12->control);
  719. nested_copy_vmcb_save_to_cache(svm, &vmcb12->save);
  720. if (!nested_vmcb_check_save(vcpu) ||
  721. !nested_vmcb_check_controls(vcpu)) {
  722. vmcb12->control.exit_code = SVM_EXIT_ERR;
  723. vmcb12->control.exit_code_hi = 0;
  724. vmcb12->control.exit_info_1 = 0;
  725. vmcb12->control.exit_info_2 = 0;
  726. goto out;
  727. }
  728. /*
  729. * Since vmcb01 is not in use, we can use it to store some of the L1
  730. * state.
  731. */
  732. vmcb01->save.efer = vcpu->arch.efer;
  733. vmcb01->save.cr0 = kvm_read_cr0(vcpu);
  734. vmcb01->save.cr4 = vcpu->arch.cr4;
  735. vmcb01->save.rflags = kvm_get_rflags(vcpu);
  736. vmcb01->save.rip = kvm_rip_read(vcpu);
  737. if (!npt_enabled)
  738. vmcb01->save.cr3 = kvm_read_cr3(vcpu);
  739. svm->nested.nested_run_pending = 1;
  740. if (enter_svm_guest_mode(vcpu, vmcb12_gpa, vmcb12, true))
  741. goto out_exit_err;
  742. if (nested_svm_vmrun_msrpm(svm))
  743. goto out;
  744. out_exit_err:
  745. svm->nested.nested_run_pending = 0;
  746. svm->nmi_l1_to_l2 = false;
  747. svm->soft_int_injected = false;
  748. svm->vmcb->control.exit_code = SVM_EXIT_ERR;
  749. svm->vmcb->control.exit_code_hi = 0;
  750. svm->vmcb->control.exit_info_1 = 0;
  751. svm->vmcb->control.exit_info_2 = 0;
  752. nested_svm_vmexit(svm);
  753. out:
  754. kvm_vcpu_unmap(vcpu, &map, true);
  755. return ret;
  756. }
  757. /* Copy state save area fields which are handled by VMRUN */
  758. void svm_copy_vmrun_state(struct vmcb_save_area *to_save,
  759. struct vmcb_save_area *from_save)
  760. {
  761. to_save->es = from_save->es;
  762. to_save->cs = from_save->cs;
  763. to_save->ss = from_save->ss;
  764. to_save->ds = from_save->ds;
  765. to_save->gdtr = from_save->gdtr;
  766. to_save->idtr = from_save->idtr;
  767. to_save->rflags = from_save->rflags | X86_EFLAGS_FIXED;
  768. to_save->efer = from_save->efer;
  769. to_save->cr0 = from_save->cr0;
  770. to_save->cr3 = from_save->cr3;
  771. to_save->cr4 = from_save->cr4;
  772. to_save->rax = from_save->rax;
  773. to_save->rsp = from_save->rsp;
  774. to_save->rip = from_save->rip;
  775. to_save->cpl = 0;
  776. }
  777. void svm_copy_vmloadsave_state(struct vmcb *to_vmcb, struct vmcb *from_vmcb)
  778. {
  779. to_vmcb->save.fs = from_vmcb->save.fs;
  780. to_vmcb->save.gs = from_vmcb->save.gs;
  781. to_vmcb->save.tr = from_vmcb->save.tr;
  782. to_vmcb->save.ldtr = from_vmcb->save.ldtr;
  783. to_vmcb->save.kernel_gs_base = from_vmcb->save.kernel_gs_base;
  784. to_vmcb->save.star = from_vmcb->save.star;
  785. to_vmcb->save.lstar = from_vmcb->save.lstar;
  786. to_vmcb->save.cstar = from_vmcb->save.cstar;
  787. to_vmcb->save.sfmask = from_vmcb->save.sfmask;
  788. to_vmcb->save.sysenter_cs = from_vmcb->save.sysenter_cs;
  789. to_vmcb->save.sysenter_esp = from_vmcb->save.sysenter_esp;
  790. to_vmcb->save.sysenter_eip = from_vmcb->save.sysenter_eip;
  791. }
  792. int nested_svm_vmexit(struct vcpu_svm *svm)
  793. {
  794. struct kvm_vcpu *vcpu = &svm->vcpu;
  795. struct vmcb *vmcb01 = svm->vmcb01.ptr;
  796. struct vmcb *vmcb02 = svm->nested.vmcb02.ptr;
  797. struct vmcb *vmcb12;
  798. struct kvm_host_map map;
  799. int rc;
  800. rc = kvm_vcpu_map(vcpu, gpa_to_gfn(svm->nested.vmcb12_gpa), &map);
  801. if (rc) {
  802. if (rc == -EINVAL)
  803. kvm_inject_gp(vcpu, 0);
  804. return 1;
  805. }
  806. vmcb12 = map.hva;
  807. /* Exit Guest-Mode */
  808. leave_guest_mode(vcpu);
  809. svm->nested.vmcb12_gpa = 0;
  810. WARN_ON_ONCE(svm->nested.nested_run_pending);
  811. kvm_clear_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu);
  812. /* in case we halted in L2 */
  813. svm->vcpu.arch.mp_state = KVM_MP_STATE_RUNNABLE;
  814. /* Give the current vmcb to the guest */
  815. vmcb12->save.es = vmcb02->save.es;
  816. vmcb12->save.cs = vmcb02->save.cs;
  817. vmcb12->save.ss = vmcb02->save.ss;
  818. vmcb12->save.ds = vmcb02->save.ds;
  819. vmcb12->save.gdtr = vmcb02->save.gdtr;
  820. vmcb12->save.idtr = vmcb02->save.idtr;
  821. vmcb12->save.efer = svm->vcpu.arch.efer;
  822. vmcb12->save.cr0 = kvm_read_cr0(vcpu);
  823. vmcb12->save.cr3 = kvm_read_cr3(vcpu);
  824. vmcb12->save.cr2 = vmcb02->save.cr2;
  825. vmcb12->save.cr4 = svm->vcpu.arch.cr4;
  826. vmcb12->save.rflags = kvm_get_rflags(vcpu);
  827. vmcb12->save.rip = kvm_rip_read(vcpu);
  828. vmcb12->save.rsp = kvm_rsp_read(vcpu);
  829. vmcb12->save.rax = kvm_rax_read(vcpu);
  830. vmcb12->save.dr7 = vmcb02->save.dr7;
  831. vmcb12->save.dr6 = svm->vcpu.arch.dr6;
  832. vmcb12->save.cpl = vmcb02->save.cpl;
  833. vmcb12->control.int_state = vmcb02->control.int_state;
  834. vmcb12->control.exit_code = vmcb02->control.exit_code;
  835. vmcb12->control.exit_code_hi = vmcb02->control.exit_code_hi;
  836. vmcb12->control.exit_info_1 = vmcb02->control.exit_info_1;
  837. vmcb12->control.exit_info_2 = vmcb02->control.exit_info_2;
  838. if (vmcb12->control.exit_code != SVM_EXIT_ERR)
  839. nested_save_pending_event_to_vmcb12(svm, vmcb12);
  840. if (guest_can_use(vcpu, X86_FEATURE_NRIPS))
  841. vmcb12->control.next_rip = vmcb02->control.next_rip;
  842. vmcb12->control.int_ctl = svm->nested.ctl.int_ctl;
  843. vmcb12->control.event_inj = svm->nested.ctl.event_inj;
  844. vmcb12->control.event_inj_err = svm->nested.ctl.event_inj_err;
  845. if (!kvm_pause_in_guest(vcpu->kvm)) {
  846. vmcb01->control.pause_filter_count = vmcb02->control.pause_filter_count;
  847. vmcb_mark_dirty(vmcb01, VMCB_INTERCEPTS);
  848. }
  849. nested_svm_copy_common_state(svm->nested.vmcb02.ptr, svm->vmcb01.ptr);
  850. svm_switch_vmcb(svm, &svm->vmcb01);
  851. /*
  852. * Rules for synchronizing int_ctl bits from vmcb02 to vmcb01:
  853. *
  854. * V_IRQ, V_IRQ_VECTOR, V_INTR_PRIO_MASK, V_IGN_TPR: If L1 doesn't
  855. * intercept interrupts, then KVM will use vmcb02's V_IRQ (and related
  856. * flags) to detect interrupt windows for L1 IRQs (even if L1 uses
  857. * virtual interrupt masking). Raise KVM_REQ_EVENT to ensure that
  858. * KVM re-requests an interrupt window if necessary, which implicitly
  859. * copies this bits from vmcb02 to vmcb01.
  860. *
  861. * V_TPR: If L1 doesn't use virtual interrupt masking, then L1's vTPR
  862. * is stored in vmcb02, but its value doesn't need to be copied from/to
  863. * vmcb01 because it is copied from/to the virtual APIC's TPR register
  864. * on each VM entry/exit.
  865. *
  866. * V_GIF: If nested vGIF is not used, KVM uses vmcb02's V_GIF for L1's
  867. * V_GIF. However, GIF is architecturally clear on each VM exit, thus
  868. * there is no need to copy V_GIF from vmcb02 to vmcb01.
  869. */
  870. if (!nested_exit_on_intr(svm))
  871. kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
  872. if (unlikely(guest_can_use(vcpu, X86_FEATURE_LBRV) &&
  873. (svm->nested.ctl.virt_ext & LBR_CTL_ENABLE_MASK))) {
  874. svm_copy_lbrs(vmcb12, vmcb02);
  875. svm_update_lbrv(vcpu);
  876. } else if (unlikely(vmcb01->control.virt_ext & LBR_CTL_ENABLE_MASK)) {
  877. svm_copy_lbrs(vmcb01, vmcb02);
  878. svm_update_lbrv(vcpu);
  879. }
  880. if (vnmi) {
  881. if (vmcb02->control.int_ctl & V_NMI_BLOCKING_MASK)
  882. vmcb01->control.int_ctl |= V_NMI_BLOCKING_MASK;
  883. else
  884. vmcb01->control.int_ctl &= ~V_NMI_BLOCKING_MASK;
  885. if (vcpu->arch.nmi_pending) {
  886. vcpu->arch.nmi_pending--;
  887. vmcb01->control.int_ctl |= V_NMI_PENDING_MASK;
  888. } else {
  889. vmcb01->control.int_ctl &= ~V_NMI_PENDING_MASK;
  890. }
  891. }
  892. /*
  893. * On vmexit the GIF is set to false and
  894. * no event can be injected in L1.
  895. */
  896. svm_set_gif(svm, false);
  897. vmcb01->control.exit_int_info = 0;
  898. svm->vcpu.arch.tsc_offset = svm->vcpu.arch.l1_tsc_offset;
  899. if (vmcb01->control.tsc_offset != svm->vcpu.arch.tsc_offset) {
  900. vmcb01->control.tsc_offset = svm->vcpu.arch.tsc_offset;
  901. vmcb_mark_dirty(vmcb01, VMCB_INTERCEPTS);
  902. }
  903. if (kvm_caps.has_tsc_control &&
  904. vcpu->arch.tsc_scaling_ratio != vcpu->arch.l1_tsc_scaling_ratio) {
  905. vcpu->arch.tsc_scaling_ratio = vcpu->arch.l1_tsc_scaling_ratio;
  906. svm_write_tsc_multiplier(vcpu);
  907. }
  908. svm->nested.ctl.nested_cr3 = 0;
  909. /*
  910. * Restore processor state that had been saved in vmcb01
  911. */
  912. kvm_set_rflags(vcpu, vmcb01->save.rflags);
  913. svm_set_efer(vcpu, vmcb01->save.efer);
  914. svm_set_cr0(vcpu, vmcb01->save.cr0 | X86_CR0_PE);
  915. svm_set_cr4(vcpu, vmcb01->save.cr4);
  916. kvm_rax_write(vcpu, vmcb01->save.rax);
  917. kvm_rsp_write(vcpu, vmcb01->save.rsp);
  918. kvm_rip_write(vcpu, vmcb01->save.rip);
  919. svm->vcpu.arch.dr7 = DR7_FIXED_1;
  920. kvm_update_dr7(&svm->vcpu);
  921. trace_kvm_nested_vmexit_inject(vmcb12->control.exit_code,
  922. vmcb12->control.exit_info_1,
  923. vmcb12->control.exit_info_2,
  924. vmcb12->control.exit_int_info,
  925. vmcb12->control.exit_int_info_err,
  926. KVM_ISA_SVM);
  927. kvm_vcpu_unmap(vcpu, &map, true);
  928. nested_svm_transition_tlb_flush(vcpu);
  929. nested_svm_uninit_mmu_context(vcpu);
  930. rc = nested_svm_load_cr3(vcpu, vmcb01->save.cr3, false, true);
  931. if (rc)
  932. return 1;
  933. /*
  934. * Drop what we picked up for L2 via svm_complete_interrupts() so it
  935. * doesn't end up in L1.
  936. */
  937. svm->vcpu.arch.nmi_injected = false;
  938. kvm_clear_exception_queue(vcpu);
  939. kvm_clear_interrupt_queue(vcpu);
  940. /*
  941. * If we are here following the completion of a VMRUN that
  942. * is being single-stepped, queue the pending #DB intercept
  943. * right now so that it an be accounted for before we execute
  944. * L1's next instruction.
  945. */
  946. if (unlikely(vmcb01->save.rflags & X86_EFLAGS_TF))
  947. kvm_queue_exception(&(svm->vcpu), DB_VECTOR);
  948. /*
  949. * Un-inhibit the AVIC right away, so that other vCPUs can start
  950. * to benefit from it right away.
  951. */
  952. if (kvm_apicv_activated(vcpu->kvm))
  953. __kvm_vcpu_update_apicv(vcpu);
  954. return 0;
  955. }
  956. static void nested_svm_triple_fault(struct kvm_vcpu *vcpu)
  957. {
  958. struct vcpu_svm *svm = to_svm(vcpu);
  959. if (!vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_SHUTDOWN))
  960. return;
  961. kvm_clear_request(KVM_REQ_TRIPLE_FAULT, vcpu);
  962. nested_svm_simple_vmexit(to_svm(vcpu), SVM_EXIT_SHUTDOWN);
  963. }
  964. int svm_allocate_nested(struct vcpu_svm *svm)
  965. {
  966. struct page *vmcb02_page;
  967. if (svm->nested.initialized)
  968. return 0;
  969. vmcb02_page = snp_safe_alloc_page();
  970. if (!vmcb02_page)
  971. return -ENOMEM;
  972. svm->nested.vmcb02.ptr = page_address(vmcb02_page);
  973. svm->nested.vmcb02.pa = __sme_set(page_to_pfn(vmcb02_page) << PAGE_SHIFT);
  974. svm->nested.msrpm = svm_vcpu_alloc_msrpm();
  975. if (!svm->nested.msrpm)
  976. goto err_free_vmcb02;
  977. svm_vcpu_init_msrpm(&svm->vcpu, svm->nested.msrpm);
  978. svm->nested.initialized = true;
  979. return 0;
  980. err_free_vmcb02:
  981. __free_page(vmcb02_page);
  982. return -ENOMEM;
  983. }
  984. void svm_free_nested(struct vcpu_svm *svm)
  985. {
  986. if (!svm->nested.initialized)
  987. return;
  988. if (WARN_ON_ONCE(svm->vmcb != svm->vmcb01.ptr))
  989. svm_switch_vmcb(svm, &svm->vmcb01);
  990. svm_vcpu_free_msrpm(svm->nested.msrpm);
  991. svm->nested.msrpm = NULL;
  992. __free_page(virt_to_page(svm->nested.vmcb02.ptr));
  993. svm->nested.vmcb02.ptr = NULL;
  994. /*
  995. * When last_vmcb12_gpa matches the current vmcb12 gpa,
  996. * some vmcb12 fields are not loaded if they are marked clean
  997. * in the vmcb12, since in this case they are up to date already.
  998. *
  999. * When the vmcb02 is freed, this optimization becomes invalid.
  1000. */
  1001. svm->nested.last_vmcb12_gpa = INVALID_GPA;
  1002. svm->nested.initialized = false;
  1003. }
  1004. void svm_leave_nested(struct kvm_vcpu *vcpu)
  1005. {
  1006. struct vcpu_svm *svm = to_svm(vcpu);
  1007. if (is_guest_mode(vcpu)) {
  1008. svm->nested.nested_run_pending = 0;
  1009. svm->nested.vmcb12_gpa = INVALID_GPA;
  1010. leave_guest_mode(vcpu);
  1011. svm_switch_vmcb(svm, &svm->vmcb01);
  1012. nested_svm_uninit_mmu_context(vcpu);
  1013. vmcb_mark_all_dirty(svm->vmcb);
  1014. if (kvm_apicv_activated(vcpu->kvm))
  1015. kvm_make_request(KVM_REQ_APICV_UPDATE, vcpu);
  1016. }
  1017. kvm_clear_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu);
  1018. }
  1019. static int nested_svm_exit_handled_msr(struct vcpu_svm *svm)
  1020. {
  1021. u32 offset, msr, value;
  1022. int write, mask;
  1023. if (!(vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_MSR_PROT)))
  1024. return NESTED_EXIT_HOST;
  1025. msr = svm->vcpu.arch.regs[VCPU_REGS_RCX];
  1026. offset = svm_msrpm_offset(msr);
  1027. write = svm->vmcb->control.exit_info_1 & 1;
  1028. mask = 1 << ((2 * (msr & 0xf)) + write);
  1029. if (offset == MSR_INVALID)
  1030. return NESTED_EXIT_DONE;
  1031. /* Offset is in 32 bit units but need in 8 bit units */
  1032. offset *= 4;
  1033. if (kvm_vcpu_read_guest(&svm->vcpu, svm->nested.ctl.msrpm_base_pa + offset, &value, 4))
  1034. return NESTED_EXIT_DONE;
  1035. return (value & mask) ? NESTED_EXIT_DONE : NESTED_EXIT_HOST;
  1036. }
  1037. static int nested_svm_intercept_ioio(struct vcpu_svm *svm)
  1038. {
  1039. unsigned port, size, iopm_len;
  1040. u16 val, mask;
  1041. u8 start_bit;
  1042. u64 gpa;
  1043. if (!(vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_IOIO_PROT)))
  1044. return NESTED_EXIT_HOST;
  1045. port = svm->vmcb->control.exit_info_1 >> 16;
  1046. size = (svm->vmcb->control.exit_info_1 & SVM_IOIO_SIZE_MASK) >>
  1047. SVM_IOIO_SIZE_SHIFT;
  1048. gpa = svm->nested.ctl.iopm_base_pa + (port / 8);
  1049. start_bit = port % 8;
  1050. iopm_len = (start_bit + size > 8) ? 2 : 1;
  1051. mask = (0xf >> (4 - size)) << start_bit;
  1052. val = 0;
  1053. if (kvm_vcpu_read_guest(&svm->vcpu, gpa, &val, iopm_len))
  1054. return NESTED_EXIT_DONE;
  1055. return (val & mask) ? NESTED_EXIT_DONE : NESTED_EXIT_HOST;
  1056. }
  1057. static int nested_svm_intercept(struct vcpu_svm *svm)
  1058. {
  1059. u32 exit_code = svm->vmcb->control.exit_code;
  1060. int vmexit = NESTED_EXIT_HOST;
  1061. switch (exit_code) {
  1062. case SVM_EXIT_MSR:
  1063. vmexit = nested_svm_exit_handled_msr(svm);
  1064. break;
  1065. case SVM_EXIT_IOIO:
  1066. vmexit = nested_svm_intercept_ioio(svm);
  1067. break;
  1068. case SVM_EXIT_READ_CR0 ... SVM_EXIT_WRITE_CR8: {
  1069. if (vmcb12_is_intercept(&svm->nested.ctl, exit_code))
  1070. vmexit = NESTED_EXIT_DONE;
  1071. break;
  1072. }
  1073. case SVM_EXIT_READ_DR0 ... SVM_EXIT_WRITE_DR7: {
  1074. if (vmcb12_is_intercept(&svm->nested.ctl, exit_code))
  1075. vmexit = NESTED_EXIT_DONE;
  1076. break;
  1077. }
  1078. case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 0x1f: {
  1079. /*
  1080. * Host-intercepted exceptions have been checked already in
  1081. * nested_svm_exit_special. There is nothing to do here,
  1082. * the vmexit is injected by svm_check_nested_events.
  1083. */
  1084. vmexit = NESTED_EXIT_DONE;
  1085. break;
  1086. }
  1087. case SVM_EXIT_ERR: {
  1088. vmexit = NESTED_EXIT_DONE;
  1089. break;
  1090. }
  1091. default: {
  1092. if (vmcb12_is_intercept(&svm->nested.ctl, exit_code))
  1093. vmexit = NESTED_EXIT_DONE;
  1094. }
  1095. }
  1096. return vmexit;
  1097. }
  1098. int nested_svm_exit_handled(struct vcpu_svm *svm)
  1099. {
  1100. int vmexit;
  1101. vmexit = nested_svm_intercept(svm);
  1102. if (vmexit == NESTED_EXIT_DONE)
  1103. nested_svm_vmexit(svm);
  1104. return vmexit;
  1105. }
  1106. int nested_svm_check_permissions(struct kvm_vcpu *vcpu)
  1107. {
  1108. if (!(vcpu->arch.efer & EFER_SVME) || !is_paging(vcpu)) {
  1109. kvm_queue_exception(vcpu, UD_VECTOR);
  1110. return 1;
  1111. }
  1112. if (to_svm(vcpu)->vmcb->save.cpl) {
  1113. kvm_inject_gp(vcpu, 0);
  1114. return 1;
  1115. }
  1116. return 0;
  1117. }
  1118. static bool nested_svm_is_exception_vmexit(struct kvm_vcpu *vcpu, u8 vector,
  1119. u32 error_code)
  1120. {
  1121. struct vcpu_svm *svm = to_svm(vcpu);
  1122. return (svm->nested.ctl.intercepts[INTERCEPT_EXCEPTION] & BIT(vector));
  1123. }
  1124. static void nested_svm_inject_exception_vmexit(struct kvm_vcpu *vcpu)
  1125. {
  1126. struct kvm_queued_exception *ex = &vcpu->arch.exception_vmexit;
  1127. struct vcpu_svm *svm = to_svm(vcpu);
  1128. struct vmcb *vmcb = svm->vmcb;
  1129. vmcb->control.exit_code = SVM_EXIT_EXCP_BASE + ex->vector;
  1130. vmcb->control.exit_code_hi = 0;
  1131. if (ex->has_error_code)
  1132. vmcb->control.exit_info_1 = ex->error_code;
  1133. /*
  1134. * EXITINFO2 is undefined for all exception intercepts other
  1135. * than #PF.
  1136. */
  1137. if (ex->vector == PF_VECTOR) {
  1138. if (ex->has_payload)
  1139. vmcb->control.exit_info_2 = ex->payload;
  1140. else
  1141. vmcb->control.exit_info_2 = vcpu->arch.cr2;
  1142. } else if (ex->vector == DB_VECTOR) {
  1143. /* See kvm_check_and_inject_events(). */
  1144. kvm_deliver_exception_payload(vcpu, ex);
  1145. if (vcpu->arch.dr7 & DR7_GD) {
  1146. vcpu->arch.dr7 &= ~DR7_GD;
  1147. kvm_update_dr7(vcpu);
  1148. }
  1149. } else {
  1150. WARN_ON(ex->has_payload);
  1151. }
  1152. nested_svm_vmexit(svm);
  1153. }
  1154. static inline bool nested_exit_on_init(struct vcpu_svm *svm)
  1155. {
  1156. return vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_INIT);
  1157. }
  1158. static int svm_check_nested_events(struct kvm_vcpu *vcpu)
  1159. {
  1160. struct kvm_lapic *apic = vcpu->arch.apic;
  1161. struct vcpu_svm *svm = to_svm(vcpu);
  1162. /*
  1163. * Only a pending nested run blocks a pending exception. If there is a
  1164. * previously injected event, the pending exception occurred while said
  1165. * event was being delivered and thus needs to be handled.
  1166. */
  1167. bool block_nested_exceptions = svm->nested.nested_run_pending;
  1168. /*
  1169. * New events (not exceptions) are only recognized at instruction
  1170. * boundaries. If an event needs reinjection, then KVM is handling a
  1171. * VM-Exit that occurred _during_ instruction execution; new events are
  1172. * blocked until the instruction completes.
  1173. */
  1174. bool block_nested_events = block_nested_exceptions ||
  1175. kvm_event_needs_reinjection(vcpu);
  1176. if (lapic_in_kernel(vcpu) &&
  1177. test_bit(KVM_APIC_INIT, &apic->pending_events)) {
  1178. if (block_nested_events)
  1179. return -EBUSY;
  1180. if (!nested_exit_on_init(svm))
  1181. return 0;
  1182. nested_svm_simple_vmexit(svm, SVM_EXIT_INIT);
  1183. return 0;
  1184. }
  1185. if (vcpu->arch.exception_vmexit.pending) {
  1186. if (block_nested_exceptions)
  1187. return -EBUSY;
  1188. nested_svm_inject_exception_vmexit(vcpu);
  1189. return 0;
  1190. }
  1191. if (vcpu->arch.exception.pending) {
  1192. if (block_nested_exceptions)
  1193. return -EBUSY;
  1194. return 0;
  1195. }
  1196. #ifdef CONFIG_KVM_SMM
  1197. if (vcpu->arch.smi_pending && !svm_smi_blocked(vcpu)) {
  1198. if (block_nested_events)
  1199. return -EBUSY;
  1200. if (!nested_exit_on_smi(svm))
  1201. return 0;
  1202. nested_svm_simple_vmexit(svm, SVM_EXIT_SMI);
  1203. return 0;
  1204. }
  1205. #endif
  1206. if (vcpu->arch.nmi_pending && !svm_nmi_blocked(vcpu)) {
  1207. if (block_nested_events)
  1208. return -EBUSY;
  1209. if (!nested_exit_on_nmi(svm))
  1210. return 0;
  1211. nested_svm_simple_vmexit(svm, SVM_EXIT_NMI);
  1212. return 0;
  1213. }
  1214. if (kvm_cpu_has_interrupt(vcpu) && !svm_interrupt_blocked(vcpu)) {
  1215. if (block_nested_events)
  1216. return -EBUSY;
  1217. if (!nested_exit_on_intr(svm))
  1218. return 0;
  1219. trace_kvm_nested_intr_vmexit(svm->vmcb->save.rip);
  1220. nested_svm_simple_vmexit(svm, SVM_EXIT_INTR);
  1221. return 0;
  1222. }
  1223. return 0;
  1224. }
  1225. int nested_svm_exit_special(struct vcpu_svm *svm)
  1226. {
  1227. u32 exit_code = svm->vmcb->control.exit_code;
  1228. struct kvm_vcpu *vcpu = &svm->vcpu;
  1229. switch (exit_code) {
  1230. case SVM_EXIT_INTR:
  1231. case SVM_EXIT_NMI:
  1232. case SVM_EXIT_NPF:
  1233. return NESTED_EXIT_HOST;
  1234. case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 0x1f: {
  1235. u32 excp_bits = 1 << (exit_code - SVM_EXIT_EXCP_BASE);
  1236. if (svm->vmcb01.ptr->control.intercepts[INTERCEPT_EXCEPTION] &
  1237. excp_bits)
  1238. return NESTED_EXIT_HOST;
  1239. else if (exit_code == SVM_EXIT_EXCP_BASE + PF_VECTOR &&
  1240. svm->vcpu.arch.apf.host_apf_flags)
  1241. /* Trap async PF even if not shadowing */
  1242. return NESTED_EXIT_HOST;
  1243. break;
  1244. }
  1245. case SVM_EXIT_VMMCALL:
  1246. /* Hyper-V L2 TLB flush hypercall is handled by L0 */
  1247. if (guest_hv_cpuid_has_l2_tlb_flush(vcpu) &&
  1248. nested_svm_l2_tlb_flush_enabled(vcpu) &&
  1249. kvm_hv_is_tlb_flush_hcall(vcpu))
  1250. return NESTED_EXIT_HOST;
  1251. break;
  1252. default:
  1253. break;
  1254. }
  1255. return NESTED_EXIT_CONTINUE;
  1256. }
  1257. void nested_svm_update_tsc_ratio_msr(struct kvm_vcpu *vcpu)
  1258. {
  1259. struct vcpu_svm *svm = to_svm(vcpu);
  1260. vcpu->arch.tsc_scaling_ratio =
  1261. kvm_calc_nested_tsc_multiplier(vcpu->arch.l1_tsc_scaling_ratio,
  1262. svm->tsc_ratio_msr);
  1263. svm_write_tsc_multiplier(vcpu);
  1264. }
  1265. /* Inverse operation of nested_copy_vmcb_control_to_cache(). asid is copied too. */
  1266. static void nested_copy_vmcb_cache_to_control(struct vmcb_control_area *dst,
  1267. struct vmcb_ctrl_area_cached *from)
  1268. {
  1269. unsigned int i;
  1270. memset(dst, 0, sizeof(struct vmcb_control_area));
  1271. for (i = 0; i < MAX_INTERCEPT; i++)
  1272. dst->intercepts[i] = from->intercepts[i];
  1273. dst->iopm_base_pa = from->iopm_base_pa;
  1274. dst->msrpm_base_pa = from->msrpm_base_pa;
  1275. dst->tsc_offset = from->tsc_offset;
  1276. dst->asid = from->asid;
  1277. dst->tlb_ctl = from->tlb_ctl;
  1278. dst->int_ctl = from->int_ctl;
  1279. dst->int_vector = from->int_vector;
  1280. dst->int_state = from->int_state;
  1281. dst->exit_code = from->exit_code;
  1282. dst->exit_code_hi = from->exit_code_hi;
  1283. dst->exit_info_1 = from->exit_info_1;
  1284. dst->exit_info_2 = from->exit_info_2;
  1285. dst->exit_int_info = from->exit_int_info;
  1286. dst->exit_int_info_err = from->exit_int_info_err;
  1287. dst->nested_ctl = from->nested_ctl;
  1288. dst->event_inj = from->event_inj;
  1289. dst->event_inj_err = from->event_inj_err;
  1290. dst->next_rip = from->next_rip;
  1291. dst->nested_cr3 = from->nested_cr3;
  1292. dst->virt_ext = from->virt_ext;
  1293. dst->pause_filter_count = from->pause_filter_count;
  1294. dst->pause_filter_thresh = from->pause_filter_thresh;
  1295. /* 'clean' and 'hv_enlightenments' are not changed by KVM */
  1296. }
  1297. static int svm_get_nested_state(struct kvm_vcpu *vcpu,
  1298. struct kvm_nested_state __user *user_kvm_nested_state,
  1299. u32 user_data_size)
  1300. {
  1301. struct vcpu_svm *svm;
  1302. struct vmcb_control_area *ctl;
  1303. unsigned long r;
  1304. struct kvm_nested_state kvm_state = {
  1305. .flags = 0,
  1306. .format = KVM_STATE_NESTED_FORMAT_SVM,
  1307. .size = sizeof(kvm_state),
  1308. };
  1309. struct vmcb __user *user_vmcb = (struct vmcb __user *)
  1310. &user_kvm_nested_state->data.svm[0];
  1311. if (!vcpu)
  1312. return kvm_state.size + KVM_STATE_NESTED_SVM_VMCB_SIZE;
  1313. svm = to_svm(vcpu);
  1314. if (user_data_size < kvm_state.size)
  1315. goto out;
  1316. /* First fill in the header and copy it out. */
  1317. if (is_guest_mode(vcpu)) {
  1318. kvm_state.hdr.svm.vmcb_pa = svm->nested.vmcb12_gpa;
  1319. kvm_state.size += KVM_STATE_NESTED_SVM_VMCB_SIZE;
  1320. kvm_state.flags |= KVM_STATE_NESTED_GUEST_MODE;
  1321. if (svm->nested.nested_run_pending)
  1322. kvm_state.flags |= KVM_STATE_NESTED_RUN_PENDING;
  1323. }
  1324. if (gif_set(svm))
  1325. kvm_state.flags |= KVM_STATE_NESTED_GIF_SET;
  1326. if (copy_to_user(user_kvm_nested_state, &kvm_state, sizeof(kvm_state)))
  1327. return -EFAULT;
  1328. if (!is_guest_mode(vcpu))
  1329. goto out;
  1330. /*
  1331. * Copy over the full size of the VMCB rather than just the size
  1332. * of the structs.
  1333. */
  1334. if (clear_user(user_vmcb, KVM_STATE_NESTED_SVM_VMCB_SIZE))
  1335. return -EFAULT;
  1336. ctl = kzalloc(sizeof(*ctl), GFP_KERNEL);
  1337. if (!ctl)
  1338. return -ENOMEM;
  1339. nested_copy_vmcb_cache_to_control(ctl, &svm->nested.ctl);
  1340. r = copy_to_user(&user_vmcb->control, ctl,
  1341. sizeof(user_vmcb->control));
  1342. kfree(ctl);
  1343. if (r)
  1344. return -EFAULT;
  1345. if (copy_to_user(&user_vmcb->save, &svm->vmcb01.ptr->save,
  1346. sizeof(user_vmcb->save)))
  1347. return -EFAULT;
  1348. out:
  1349. return kvm_state.size;
  1350. }
  1351. static int svm_set_nested_state(struct kvm_vcpu *vcpu,
  1352. struct kvm_nested_state __user *user_kvm_nested_state,
  1353. struct kvm_nested_state *kvm_state)
  1354. {
  1355. struct vcpu_svm *svm = to_svm(vcpu);
  1356. struct vmcb __user *user_vmcb = (struct vmcb __user *)
  1357. &user_kvm_nested_state->data.svm[0];
  1358. struct vmcb_control_area *ctl;
  1359. struct vmcb_save_area *save;
  1360. struct vmcb_save_area_cached save_cached;
  1361. struct vmcb_ctrl_area_cached ctl_cached;
  1362. unsigned long cr0;
  1363. int ret;
  1364. BUILD_BUG_ON(sizeof(struct vmcb_control_area) + sizeof(struct vmcb_save_area) >
  1365. KVM_STATE_NESTED_SVM_VMCB_SIZE);
  1366. if (kvm_state->format != KVM_STATE_NESTED_FORMAT_SVM)
  1367. return -EINVAL;
  1368. if (kvm_state->flags & ~(KVM_STATE_NESTED_GUEST_MODE |
  1369. KVM_STATE_NESTED_RUN_PENDING |
  1370. KVM_STATE_NESTED_GIF_SET))
  1371. return -EINVAL;
  1372. /*
  1373. * If in guest mode, vcpu->arch.efer actually refers to the L2 guest's
  1374. * EFER.SVME, but EFER.SVME still has to be 1 for VMRUN to succeed.
  1375. */
  1376. if (!(vcpu->arch.efer & EFER_SVME)) {
  1377. /* GIF=1 and no guest mode are required if SVME=0. */
  1378. if (kvm_state->flags != KVM_STATE_NESTED_GIF_SET)
  1379. return -EINVAL;
  1380. }
  1381. /* SMM temporarily disables SVM, so we cannot be in guest mode. */
  1382. if (is_smm(vcpu) && (kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE))
  1383. return -EINVAL;
  1384. if (!(kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE)) {
  1385. svm_leave_nested(vcpu);
  1386. svm_set_gif(svm, !!(kvm_state->flags & KVM_STATE_NESTED_GIF_SET));
  1387. return 0;
  1388. }
  1389. if (!page_address_valid(vcpu, kvm_state->hdr.svm.vmcb_pa))
  1390. return -EINVAL;
  1391. if (kvm_state->size < sizeof(*kvm_state) + KVM_STATE_NESTED_SVM_VMCB_SIZE)
  1392. return -EINVAL;
  1393. ret = -ENOMEM;
  1394. ctl = kzalloc(sizeof(*ctl), GFP_KERNEL);
  1395. save = kzalloc(sizeof(*save), GFP_KERNEL);
  1396. if (!ctl || !save)
  1397. goto out_free;
  1398. ret = -EFAULT;
  1399. if (copy_from_user(ctl, &user_vmcb->control, sizeof(*ctl)))
  1400. goto out_free;
  1401. if (copy_from_user(save, &user_vmcb->save, sizeof(*save)))
  1402. goto out_free;
  1403. ret = -EINVAL;
  1404. __nested_copy_vmcb_control_to_cache(vcpu, &ctl_cached, ctl);
  1405. if (!__nested_vmcb_check_controls(vcpu, &ctl_cached))
  1406. goto out_free;
  1407. /*
  1408. * Processor state contains L2 state. Check that it is
  1409. * valid for guest mode (see nested_vmcb_check_save).
  1410. */
  1411. cr0 = kvm_read_cr0(vcpu);
  1412. if (((cr0 & X86_CR0_CD) == 0) && (cr0 & X86_CR0_NW))
  1413. goto out_free;
  1414. /*
  1415. * Validate host state saved from before VMRUN (see
  1416. * nested_svm_check_permissions).
  1417. */
  1418. __nested_copy_vmcb_save_to_cache(&save_cached, save);
  1419. if (!(save->cr0 & X86_CR0_PG) ||
  1420. !(save->cr0 & X86_CR0_PE) ||
  1421. (save->rflags & X86_EFLAGS_VM) ||
  1422. !__nested_vmcb_check_save(vcpu, &save_cached))
  1423. goto out_free;
  1424. /*
  1425. * All checks done, we can enter guest mode. Userspace provides
  1426. * vmcb12.control, which will be combined with L1 and stored into
  1427. * vmcb02, and the L1 save state which we store in vmcb01.
  1428. * L2 registers if needed are moved from the current VMCB to VMCB02.
  1429. */
  1430. if (is_guest_mode(vcpu))
  1431. svm_leave_nested(vcpu);
  1432. else
  1433. svm->nested.vmcb02.ptr->save = svm->vmcb01.ptr->save;
  1434. svm_set_gif(svm, !!(kvm_state->flags & KVM_STATE_NESTED_GIF_SET));
  1435. svm->nested.nested_run_pending =
  1436. !!(kvm_state->flags & KVM_STATE_NESTED_RUN_PENDING);
  1437. svm->nested.vmcb12_gpa = kvm_state->hdr.svm.vmcb_pa;
  1438. svm_copy_vmrun_state(&svm->vmcb01.ptr->save, save);
  1439. nested_copy_vmcb_control_to_cache(svm, ctl);
  1440. svm_switch_vmcb(svm, &svm->nested.vmcb02);
  1441. nested_vmcb02_prepare_control(svm, svm->vmcb->save.rip, svm->vmcb->save.cs.base);
  1442. /*
  1443. * While the nested guest CR3 is already checked and set by
  1444. * KVM_SET_SREGS, it was set when nested state was yet loaded,
  1445. * thus MMU might not be initialized correctly.
  1446. * Set it again to fix this.
  1447. */
  1448. ret = nested_svm_load_cr3(&svm->vcpu, vcpu->arch.cr3,
  1449. nested_npt_enabled(svm), false);
  1450. if (WARN_ON_ONCE(ret))
  1451. goto out_free;
  1452. svm->nested.force_msr_bitmap_recalc = true;
  1453. kvm_make_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu);
  1454. ret = 0;
  1455. out_free:
  1456. kfree(save);
  1457. kfree(ctl);
  1458. return ret;
  1459. }
  1460. static bool svm_get_nested_state_pages(struct kvm_vcpu *vcpu)
  1461. {
  1462. struct vcpu_svm *svm = to_svm(vcpu);
  1463. if (WARN_ON(!is_guest_mode(vcpu)))
  1464. return true;
  1465. if (!vcpu->arch.pdptrs_from_userspace &&
  1466. !nested_npt_enabled(svm) && is_pae_paging(vcpu))
  1467. /*
  1468. * Reload the guest's PDPTRs since after a migration
  1469. * the guest CR3 might be restored prior to setting the nested
  1470. * state which can lead to a load of wrong PDPTRs.
  1471. */
  1472. if (CC(!load_pdptrs(vcpu, vcpu->arch.cr3)))
  1473. return false;
  1474. if (!nested_svm_vmrun_msrpm(svm)) {
  1475. vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
  1476. vcpu->run->internal.suberror =
  1477. KVM_INTERNAL_ERROR_EMULATION;
  1478. vcpu->run->internal.ndata = 0;
  1479. return false;
  1480. }
  1481. if (kvm_hv_verify_vp_assist(vcpu))
  1482. return false;
  1483. return true;
  1484. }
  1485. struct kvm_x86_nested_ops svm_nested_ops = {
  1486. .leave_nested = svm_leave_nested,
  1487. .is_exception_vmexit = nested_svm_is_exception_vmexit,
  1488. .check_events = svm_check_nested_events,
  1489. .triple_fault = nested_svm_triple_fault,
  1490. .get_nested_state_pages = svm_get_nested_state_pages,
  1491. .get_state = svm_get_nested_state,
  1492. .set_state = svm_set_nested_state,
  1493. .hv_inject_synthetic_vmexit_post_tlb_flush = svm_hv_inject_synthetic_vmexit_post_tlb_flush,
  1494. };