book3s_hv_p9_entry.c 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. #include <linux/kernel.h>
  3. #include <linux/kvm_host.h>
  4. #include <asm/asm-prototypes.h>
  5. #include <asm/dbell.h>
  6. #include <asm/ppc-opcode.h>
  7. #include "book3s_hv.h"
  8. static void load_spr_state(struct kvm_vcpu *vcpu,
  9. struct p9_host_os_sprs *host_os_sprs)
  10. {
  11. /* TAR is very fast */
  12. mtspr(SPRN_TAR, vcpu->arch.tar);
  13. #ifdef CONFIG_ALTIVEC
  14. if (cpu_has_feature(CPU_FTR_ALTIVEC) &&
  15. current->thread.vrsave != vcpu->arch.vrsave)
  16. mtspr(SPRN_VRSAVE, vcpu->arch.vrsave);
  17. #endif
  18. if (vcpu->arch.hfscr & HFSCR_EBB) {
  19. if (current->thread.ebbhr != vcpu->arch.ebbhr)
  20. mtspr(SPRN_EBBHR, vcpu->arch.ebbhr);
  21. if (current->thread.ebbrr != vcpu->arch.ebbrr)
  22. mtspr(SPRN_EBBRR, vcpu->arch.ebbrr);
  23. if (current->thread.bescr != vcpu->arch.bescr)
  24. mtspr(SPRN_BESCR, vcpu->arch.bescr);
  25. }
  26. if (cpu_has_feature(CPU_FTR_P9_TIDR) &&
  27. current->thread.tidr != vcpu->arch.tid)
  28. mtspr(SPRN_TIDR, vcpu->arch.tid);
  29. if (host_os_sprs->iamr != vcpu->arch.iamr)
  30. mtspr(SPRN_IAMR, vcpu->arch.iamr);
  31. if (host_os_sprs->amr != vcpu->arch.amr)
  32. mtspr(SPRN_AMR, vcpu->arch.amr);
  33. if (vcpu->arch.uamor != 0)
  34. mtspr(SPRN_UAMOR, vcpu->arch.uamor);
  35. if (current->thread.fscr != vcpu->arch.fscr)
  36. mtspr(SPRN_FSCR, vcpu->arch.fscr);
  37. if (current->thread.dscr != vcpu->arch.dscr)
  38. mtspr(SPRN_DSCR, vcpu->arch.dscr);
  39. if (vcpu->arch.pspb != 0)
  40. mtspr(SPRN_PSPB, vcpu->arch.pspb);
  41. /*
  42. * DAR, DSISR, and for nested HV, SPRGs must be set with MSR[RI]
  43. * clear (or hstate set appropriately to catch those registers
  44. * being clobbered if we take a MCE or SRESET), so those are done
  45. * later.
  46. */
  47. if (!(vcpu->arch.ctrl & 1))
  48. mtspr(SPRN_CTRLT, 0);
  49. }
  50. static void store_spr_state(struct kvm_vcpu *vcpu)
  51. {
  52. vcpu->arch.tar = mfspr(SPRN_TAR);
  53. #ifdef CONFIG_ALTIVEC
  54. if (cpu_has_feature(CPU_FTR_ALTIVEC))
  55. vcpu->arch.vrsave = mfspr(SPRN_VRSAVE);
  56. #endif
  57. if (vcpu->arch.hfscr & HFSCR_EBB) {
  58. vcpu->arch.ebbhr = mfspr(SPRN_EBBHR);
  59. vcpu->arch.ebbrr = mfspr(SPRN_EBBRR);
  60. vcpu->arch.bescr = mfspr(SPRN_BESCR);
  61. }
  62. if (cpu_has_feature(CPU_FTR_P9_TIDR))
  63. vcpu->arch.tid = mfspr(SPRN_TIDR);
  64. vcpu->arch.iamr = mfspr(SPRN_IAMR);
  65. vcpu->arch.amr = mfspr(SPRN_AMR);
  66. vcpu->arch.uamor = mfspr(SPRN_UAMOR);
  67. vcpu->arch.fscr = mfspr(SPRN_FSCR);
  68. vcpu->arch.dscr = mfspr(SPRN_DSCR);
  69. vcpu->arch.pspb = mfspr(SPRN_PSPB);
  70. vcpu->arch.ctrl = mfspr(SPRN_CTRLF);
  71. }
  72. /* Returns true if current MSR and/or guest MSR may have changed */
  73. bool load_vcpu_state(struct kvm_vcpu *vcpu,
  74. struct p9_host_os_sprs *host_os_sprs)
  75. {
  76. bool ret = false;
  77. #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
  78. if (cpu_has_feature(CPU_FTR_TM) ||
  79. cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST)) {
  80. unsigned long guest_msr = vcpu->arch.shregs.msr;
  81. if (MSR_TM_ACTIVE(guest_msr)) {
  82. kvmppc_restore_tm_hv(vcpu, guest_msr, true);
  83. ret = true;
  84. } else if (vcpu->arch.hfscr & HFSCR_TM) {
  85. mtspr(SPRN_TEXASR, vcpu->arch.texasr);
  86. mtspr(SPRN_TFHAR, vcpu->arch.tfhar);
  87. mtspr(SPRN_TFIAR, vcpu->arch.tfiar);
  88. }
  89. }
  90. #endif
  91. load_spr_state(vcpu, host_os_sprs);
  92. load_fp_state(&vcpu->arch.fp);
  93. #ifdef CONFIG_ALTIVEC
  94. load_vr_state(&vcpu->arch.vr);
  95. #endif
  96. return ret;
  97. }
  98. EXPORT_SYMBOL_GPL(load_vcpu_state);
  99. void store_vcpu_state(struct kvm_vcpu *vcpu)
  100. {
  101. store_spr_state(vcpu);
  102. store_fp_state(&vcpu->arch.fp);
  103. #ifdef CONFIG_ALTIVEC
  104. store_vr_state(&vcpu->arch.vr);
  105. #endif
  106. #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
  107. if (cpu_has_feature(CPU_FTR_TM) ||
  108. cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST)) {
  109. unsigned long guest_msr = vcpu->arch.shregs.msr;
  110. if (MSR_TM_ACTIVE(guest_msr)) {
  111. kvmppc_save_tm_hv(vcpu, guest_msr, true);
  112. } else if (vcpu->arch.hfscr & HFSCR_TM) {
  113. vcpu->arch.texasr = mfspr(SPRN_TEXASR);
  114. vcpu->arch.tfhar = mfspr(SPRN_TFHAR);
  115. vcpu->arch.tfiar = mfspr(SPRN_TFIAR);
  116. if (!vcpu->arch.nested) {
  117. vcpu->arch.load_tm++; /* see load_ebb comment */
  118. if (!vcpu->arch.load_tm)
  119. vcpu->arch.hfscr &= ~HFSCR_TM;
  120. }
  121. }
  122. }
  123. #endif
  124. }
  125. EXPORT_SYMBOL_GPL(store_vcpu_state);
  126. void save_p9_host_os_sprs(struct p9_host_os_sprs *host_os_sprs)
  127. {
  128. host_os_sprs->iamr = mfspr(SPRN_IAMR);
  129. host_os_sprs->amr = mfspr(SPRN_AMR);
  130. }
  131. EXPORT_SYMBOL_GPL(save_p9_host_os_sprs);
  132. /* vcpu guest regs must already be saved */
  133. void restore_p9_host_os_sprs(struct kvm_vcpu *vcpu,
  134. struct p9_host_os_sprs *host_os_sprs)
  135. {
  136. /*
  137. * current->thread.xxx registers must all be restored to host
  138. * values before a potential context switch, otherwise the context
  139. * switch itself will overwrite current->thread.xxx with the values
  140. * from the guest SPRs.
  141. */
  142. mtspr(SPRN_SPRG_VDSO_WRITE, local_paca->sprg_vdso);
  143. if (cpu_has_feature(CPU_FTR_P9_TIDR) &&
  144. current->thread.tidr != vcpu->arch.tid)
  145. mtspr(SPRN_TIDR, current->thread.tidr);
  146. if (host_os_sprs->iamr != vcpu->arch.iamr)
  147. mtspr(SPRN_IAMR, host_os_sprs->iamr);
  148. if (vcpu->arch.uamor != 0)
  149. mtspr(SPRN_UAMOR, 0);
  150. if (host_os_sprs->amr != vcpu->arch.amr)
  151. mtspr(SPRN_AMR, host_os_sprs->amr);
  152. if (current->thread.fscr != vcpu->arch.fscr)
  153. mtspr(SPRN_FSCR, current->thread.fscr);
  154. if (current->thread.dscr != vcpu->arch.dscr)
  155. mtspr(SPRN_DSCR, current->thread.dscr);
  156. if (vcpu->arch.pspb != 0)
  157. mtspr(SPRN_PSPB, 0);
  158. /* Save guest CTRL register, set runlatch to 1 */
  159. if (!(vcpu->arch.ctrl & 1))
  160. mtspr(SPRN_CTRLT, 1);
  161. #ifdef CONFIG_ALTIVEC
  162. if (cpu_has_feature(CPU_FTR_ALTIVEC) &&
  163. vcpu->arch.vrsave != current->thread.vrsave)
  164. mtspr(SPRN_VRSAVE, current->thread.vrsave);
  165. #endif
  166. if (vcpu->arch.hfscr & HFSCR_EBB) {
  167. if (vcpu->arch.bescr != current->thread.bescr)
  168. mtspr(SPRN_BESCR, current->thread.bescr);
  169. if (vcpu->arch.ebbhr != current->thread.ebbhr)
  170. mtspr(SPRN_EBBHR, current->thread.ebbhr);
  171. if (vcpu->arch.ebbrr != current->thread.ebbrr)
  172. mtspr(SPRN_EBBRR, current->thread.ebbrr);
  173. if (!vcpu->arch.nested) {
  174. /*
  175. * This is like load_fp in context switching, turn off
  176. * the facility after it wraps the u8 to try avoiding
  177. * saving and restoring the registers each partition
  178. * switch.
  179. */
  180. vcpu->arch.load_ebb++;
  181. if (!vcpu->arch.load_ebb)
  182. vcpu->arch.hfscr &= ~HFSCR_EBB;
  183. }
  184. }
  185. if (vcpu->arch.tar != current->thread.tar)
  186. mtspr(SPRN_TAR, current->thread.tar);
  187. }
  188. EXPORT_SYMBOL_GPL(restore_p9_host_os_sprs);
  189. #ifdef CONFIG_KVM_BOOK3S_HV_P9_TIMING
  190. void accumulate_time(struct kvm_vcpu *vcpu, struct kvmhv_tb_accumulator *next)
  191. {
  192. struct kvmppc_vcore *vc = vcpu->arch.vcore;
  193. struct kvmhv_tb_accumulator *curr;
  194. u64 tb = mftb() - vc->tb_offset_applied;
  195. u64 prev_tb;
  196. u64 delta;
  197. u64 seq;
  198. curr = vcpu->arch.cur_activity;
  199. vcpu->arch.cur_activity = next;
  200. prev_tb = vcpu->arch.cur_tb_start;
  201. vcpu->arch.cur_tb_start = tb;
  202. if (!curr)
  203. return;
  204. delta = tb - prev_tb;
  205. seq = curr->seqcount;
  206. curr->seqcount = seq + 1;
  207. smp_wmb();
  208. curr->tb_total += delta;
  209. if (seq == 0 || delta < curr->tb_min)
  210. curr->tb_min = delta;
  211. if (delta > curr->tb_max)
  212. curr->tb_max = delta;
  213. smp_wmb();
  214. curr->seqcount = seq + 2;
  215. }
  216. EXPORT_SYMBOL_GPL(accumulate_time);
  217. #endif
  218. static inline u64 mfslbv(unsigned int idx)
  219. {
  220. u64 slbev;
  221. asm volatile("slbmfev %0,%1" : "=r" (slbev) : "r" (idx));
  222. return slbev;
  223. }
  224. static inline u64 mfslbe(unsigned int idx)
  225. {
  226. u64 slbee;
  227. asm volatile("slbmfee %0,%1" : "=r" (slbee) : "r" (idx));
  228. return slbee;
  229. }
  230. static inline void mtslb(u64 slbee, u64 slbev)
  231. {
  232. asm volatile("slbmte %0,%1" :: "r" (slbev), "r" (slbee));
  233. }
  234. static inline void clear_slb_entry(unsigned int idx)
  235. {
  236. mtslb(idx, 0);
  237. }
  238. static inline void slb_clear_invalidate_partition(void)
  239. {
  240. clear_slb_entry(0);
  241. asm volatile(PPC_SLBIA(6));
  242. }
  243. /*
  244. * Malicious or buggy radix guests may have inserted SLB entries
  245. * (only 0..3 because radix always runs with UPRT=1), so these must
  246. * be cleared here to avoid side-channels. slbmte is used rather
  247. * than slbia, as it won't clear cached translations.
  248. */
  249. static void radix_clear_slb(void)
  250. {
  251. int i;
  252. for (i = 0; i < 4; i++)
  253. clear_slb_entry(i);
  254. }
  255. static void switch_mmu_to_guest_radix(struct kvm *kvm, struct kvm_vcpu *vcpu, u64 lpcr)
  256. {
  257. struct kvm_nested_guest *nested = vcpu->arch.nested;
  258. u32 lpid;
  259. u32 pid;
  260. lpid = nested ? nested->shadow_lpid : kvm->arch.lpid;
  261. pid = kvmppc_get_pid(vcpu);
  262. /*
  263. * Prior memory accesses to host PID Q3 must be completed before we
  264. * start switching, and stores must be drained to avoid not-my-LPAR
  265. * logic (see switch_mmu_to_host).
  266. */
  267. asm volatile("hwsync" ::: "memory");
  268. isync();
  269. mtspr(SPRN_LPID, lpid);
  270. mtspr(SPRN_LPCR, lpcr);
  271. mtspr(SPRN_PID, pid);
  272. /*
  273. * isync not required here because we are HRFID'ing to guest before
  274. * any guest context access, which is context synchronising.
  275. */
  276. }
  277. static void switch_mmu_to_guest_hpt(struct kvm *kvm, struct kvm_vcpu *vcpu, u64 lpcr)
  278. {
  279. u32 lpid;
  280. u32 pid;
  281. int i;
  282. lpid = kvm->arch.lpid;
  283. pid = kvmppc_get_pid(vcpu);
  284. /*
  285. * See switch_mmu_to_guest_radix. ptesync should not be required here
  286. * even if the host is in HPT mode because speculative accesses would
  287. * not cause RC updates (we are in real mode).
  288. */
  289. asm volatile("hwsync" ::: "memory");
  290. isync();
  291. mtspr(SPRN_LPID, lpid);
  292. mtspr(SPRN_LPCR, lpcr);
  293. mtspr(SPRN_PID, pid);
  294. for (i = 0; i < vcpu->arch.slb_max; i++)
  295. mtslb(vcpu->arch.slb[i].orige, vcpu->arch.slb[i].origv);
  296. /*
  297. * isync not required here, see switch_mmu_to_guest_radix.
  298. */
  299. }
  300. static void switch_mmu_to_host(struct kvm *kvm, u32 pid)
  301. {
  302. u32 lpid = kvm->arch.host_lpid;
  303. u64 lpcr = kvm->arch.host_lpcr;
  304. /*
  305. * The guest has exited, so guest MMU context is no longer being
  306. * non-speculatively accessed, but a hwsync is needed before the
  307. * mtLPIDR / mtPIDR switch, in order to ensure all stores are drained,
  308. * so the not-my-LPAR tlbie logic does not overlook them.
  309. */
  310. asm volatile("hwsync" ::: "memory");
  311. isync();
  312. mtspr(SPRN_PID, pid);
  313. mtspr(SPRN_LPID, lpid);
  314. mtspr(SPRN_LPCR, lpcr);
  315. /*
  316. * isync is not required after the switch, because mtmsrd with L=0
  317. * is performed after this switch, which is context synchronising.
  318. */
  319. if (!radix_enabled())
  320. slb_restore_bolted_realmode();
  321. }
  322. static void save_clear_host_mmu(struct kvm *kvm)
  323. {
  324. if (!radix_enabled()) {
  325. /*
  326. * Hash host could save and restore host SLB entries to
  327. * reduce SLB fault overheads of VM exits, but for now the
  328. * existing code clears all entries and restores just the
  329. * bolted ones when switching back to host.
  330. */
  331. slb_clear_invalidate_partition();
  332. }
  333. }
  334. static void save_clear_guest_mmu(struct kvm *kvm, struct kvm_vcpu *vcpu)
  335. {
  336. if (kvm_is_radix(kvm)) {
  337. radix_clear_slb();
  338. } else {
  339. int i;
  340. int nr = 0;
  341. /*
  342. * This must run before switching to host (radix host can't
  343. * access all SLBs).
  344. */
  345. for (i = 0; i < vcpu->arch.slb_nr; i++) {
  346. u64 slbee, slbev;
  347. slbee = mfslbe(i);
  348. if (slbee & SLB_ESID_V) {
  349. slbev = mfslbv(i);
  350. vcpu->arch.slb[nr].orige = slbee | i;
  351. vcpu->arch.slb[nr].origv = slbev;
  352. nr++;
  353. }
  354. }
  355. vcpu->arch.slb_max = nr;
  356. slb_clear_invalidate_partition();
  357. }
  358. }
  359. static void flush_guest_tlb(struct kvm *kvm)
  360. {
  361. unsigned long rb, set;
  362. rb = PPC_BIT(52); /* IS = 2 */
  363. if (kvm_is_radix(kvm)) {
  364. /* R=1 PRS=1 RIC=2 */
  365. asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1)
  366. : : "r" (rb), "i" (1), "i" (1), "i" (2),
  367. "r" (0) : "memory");
  368. for (set = 1; set < kvm->arch.tlb_sets; ++set) {
  369. rb += PPC_BIT(51); /* increment set number */
  370. /* R=1 PRS=1 RIC=0 */
  371. asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1)
  372. : : "r" (rb), "i" (1), "i" (1), "i" (0),
  373. "r" (0) : "memory");
  374. }
  375. asm volatile("ptesync": : :"memory");
  376. // POWER9 congruence-class TLBIEL leaves ERAT. Flush it now.
  377. asm volatile(PPC_RADIX_INVALIDATE_ERAT_GUEST : : :"memory");
  378. } else {
  379. for (set = 0; set < kvm->arch.tlb_sets; ++set) {
  380. /* R=0 PRS=0 RIC=0 */
  381. asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1)
  382. : : "r" (rb), "i" (0), "i" (0), "i" (0),
  383. "r" (0) : "memory");
  384. rb += PPC_BIT(51); /* increment set number */
  385. }
  386. asm volatile("ptesync": : :"memory");
  387. // POWER9 congruence-class TLBIEL leaves ERAT. Flush it now.
  388. asm volatile(PPC_ISA_3_0_INVALIDATE_ERAT : : :"memory");
  389. }
  390. }
  391. static void check_need_tlb_flush(struct kvm *kvm, int pcpu,
  392. struct kvm_nested_guest *nested)
  393. {
  394. cpumask_t *need_tlb_flush;
  395. bool all_set = true;
  396. int i;
  397. if (nested)
  398. need_tlb_flush = &nested->need_tlb_flush;
  399. else
  400. need_tlb_flush = &kvm->arch.need_tlb_flush;
  401. if (likely(!cpumask_test_cpu(pcpu, need_tlb_flush)))
  402. return;
  403. /*
  404. * Individual threads can come in here, but the TLB is shared between
  405. * the 4 threads in a core, hence invalidating on one thread
  406. * invalidates for all, so only invalidate the first time (if all bits
  407. * were set. The others must still execute a ptesync.
  408. *
  409. * If a race occurs and two threads do the TLB flush, that is not a
  410. * problem, just sub-optimal.
  411. */
  412. for (i = cpu_first_tlb_thread_sibling(pcpu);
  413. i <= cpu_last_tlb_thread_sibling(pcpu);
  414. i += cpu_tlb_thread_sibling_step()) {
  415. if (!cpumask_test_cpu(i, need_tlb_flush)) {
  416. all_set = false;
  417. break;
  418. }
  419. }
  420. if (all_set)
  421. flush_guest_tlb(kvm);
  422. else
  423. asm volatile("ptesync" ::: "memory");
  424. /* Clear the bit after the TLB flush */
  425. cpumask_clear_cpu(pcpu, need_tlb_flush);
  426. }
  427. unsigned long kvmppc_msr_hard_disable_set_facilities(struct kvm_vcpu *vcpu, unsigned long msr)
  428. {
  429. unsigned long msr_needed = 0;
  430. msr &= ~MSR_EE;
  431. /* MSR bits may have been cleared by context switch so must recheck */
  432. if (IS_ENABLED(CONFIG_PPC_FPU))
  433. msr_needed |= MSR_FP;
  434. if (cpu_has_feature(CPU_FTR_ALTIVEC))
  435. msr_needed |= MSR_VEC;
  436. if (cpu_has_feature(CPU_FTR_VSX))
  437. msr_needed |= MSR_VSX;
  438. if ((cpu_has_feature(CPU_FTR_TM) ||
  439. cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST)) &&
  440. (vcpu->arch.hfscr & HFSCR_TM))
  441. msr_needed |= MSR_TM;
  442. /*
  443. * This could be combined with MSR[RI] clearing, but that expands
  444. * the unrecoverable window. It would be better to cover unrecoverable
  445. * with KVM bad interrupt handling rather than use MSR[RI] at all.
  446. *
  447. * Much more difficult and less worthwhile to combine with IR/DR
  448. * disable.
  449. */
  450. if ((msr & msr_needed) != msr_needed) {
  451. msr |= msr_needed;
  452. __mtmsrd(msr, 0);
  453. } else {
  454. __hard_irq_disable();
  455. }
  456. local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
  457. return msr;
  458. }
  459. EXPORT_SYMBOL_GPL(kvmppc_msr_hard_disable_set_facilities);
  460. int kvmhv_vcpu_entry_p9(struct kvm_vcpu *vcpu, u64 time_limit, unsigned long lpcr, u64 *tb)
  461. {
  462. struct p9_host_os_sprs host_os_sprs;
  463. struct kvm *kvm = vcpu->kvm;
  464. struct kvm_nested_guest *nested = vcpu->arch.nested;
  465. struct kvmppc_vcore *vc = vcpu->arch.vcore;
  466. s64 hdec, dec;
  467. u64 purr, spurr;
  468. u64 *exsave;
  469. int trap;
  470. unsigned long msr;
  471. unsigned long host_hfscr;
  472. unsigned long host_ciabr;
  473. unsigned long host_dawr0;
  474. unsigned long host_dawrx0;
  475. unsigned long host_psscr;
  476. unsigned long host_hpsscr;
  477. unsigned long host_pidr;
  478. unsigned long host_dawr1;
  479. unsigned long host_dawrx1;
  480. unsigned long dpdes;
  481. hdec = time_limit - *tb;
  482. if (hdec < 0)
  483. return BOOK3S_INTERRUPT_HV_DECREMENTER;
  484. WARN_ON_ONCE(vcpu->arch.shregs.msr & MSR_HV);
  485. WARN_ON_ONCE(!(vcpu->arch.shregs.msr & MSR_ME));
  486. vcpu->arch.ceded = 0;
  487. /* Save MSR for restore, with EE clear. */
  488. msr = mfmsr() & ~MSR_EE;
  489. host_hfscr = mfspr(SPRN_HFSCR);
  490. host_ciabr = mfspr(SPRN_CIABR);
  491. host_psscr = mfspr(SPRN_PSSCR_PR);
  492. if (cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST))
  493. host_hpsscr = mfspr(SPRN_PSSCR);
  494. host_pidr = mfspr(SPRN_PID);
  495. if (dawr_enabled()) {
  496. host_dawr0 = mfspr(SPRN_DAWR0);
  497. host_dawrx0 = mfspr(SPRN_DAWRX0);
  498. if (cpu_has_feature(CPU_FTR_DAWR1)) {
  499. host_dawr1 = mfspr(SPRN_DAWR1);
  500. host_dawrx1 = mfspr(SPRN_DAWRX1);
  501. }
  502. }
  503. local_paca->kvm_hstate.host_purr = mfspr(SPRN_PURR);
  504. local_paca->kvm_hstate.host_spurr = mfspr(SPRN_SPURR);
  505. save_p9_host_os_sprs(&host_os_sprs);
  506. msr = kvmppc_msr_hard_disable_set_facilities(vcpu, msr);
  507. if (lazy_irq_pending()) {
  508. trap = 0;
  509. goto out;
  510. }
  511. if (unlikely(load_vcpu_state(vcpu, &host_os_sprs)))
  512. msr = mfmsr(); /* MSR may have been updated */
  513. if (vc->tb_offset) {
  514. u64 new_tb = *tb + vc->tb_offset;
  515. mtspr(SPRN_TBU40, new_tb);
  516. if ((mftb() & 0xffffff) < (new_tb & 0xffffff)) {
  517. new_tb += 0x1000000;
  518. mtspr(SPRN_TBU40, new_tb);
  519. }
  520. *tb = new_tb;
  521. vc->tb_offset_applied = vc->tb_offset;
  522. }
  523. mtspr(SPRN_VTB, vc->vtb);
  524. mtspr(SPRN_PURR, vcpu->arch.purr);
  525. mtspr(SPRN_SPURR, vcpu->arch.spurr);
  526. if (vc->pcr)
  527. mtspr(SPRN_PCR, vc->pcr | PCR_MASK);
  528. if (vcpu->arch.doorbell_request) {
  529. vcpu->arch.doorbell_request = 0;
  530. mtspr(SPRN_DPDES, 1);
  531. }
  532. if (dawr_enabled()) {
  533. if (vcpu->arch.dawr0 != host_dawr0)
  534. mtspr(SPRN_DAWR0, vcpu->arch.dawr0);
  535. if (vcpu->arch.dawrx0 != host_dawrx0)
  536. mtspr(SPRN_DAWRX0, vcpu->arch.dawrx0);
  537. if (cpu_has_feature(CPU_FTR_DAWR1)) {
  538. if (vcpu->arch.dawr1 != host_dawr1)
  539. mtspr(SPRN_DAWR1, vcpu->arch.dawr1);
  540. if (vcpu->arch.dawrx1 != host_dawrx1)
  541. mtspr(SPRN_DAWRX1, vcpu->arch.dawrx1);
  542. }
  543. }
  544. if (vcpu->arch.ciabr != host_ciabr)
  545. mtspr(SPRN_CIABR, vcpu->arch.ciabr);
  546. if (cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST)) {
  547. mtspr(SPRN_PSSCR, vcpu->arch.psscr | PSSCR_EC |
  548. (local_paca->kvm_hstate.fake_suspend << PSSCR_FAKE_SUSPEND_LG));
  549. } else {
  550. if (vcpu->arch.psscr != host_psscr)
  551. mtspr(SPRN_PSSCR_PR, vcpu->arch.psscr);
  552. }
  553. mtspr(SPRN_HFSCR, vcpu->arch.hfscr);
  554. mtspr(SPRN_HSRR0, vcpu->arch.regs.nip);
  555. mtspr(SPRN_HSRR1, (vcpu->arch.shregs.msr & ~MSR_HV) | MSR_ME);
  556. /*
  557. * On POWER9 DD2.1 and below, sometimes on a Hypervisor Data Storage
  558. * Interrupt (HDSI) the HDSISR is not be updated at all.
  559. *
  560. * To work around this we put a canary value into the HDSISR before
  561. * returning to a guest and then check for this canary when we take a
  562. * HDSI. If we find the canary on a HDSI, we know the hardware didn't
  563. * update the HDSISR. In this case we return to the guest to retake the
  564. * HDSI which should correctly update the HDSISR the second time HDSI
  565. * entry.
  566. *
  567. * The "radix prefetch bug" test can be used to test for this bug, as
  568. * it also exists fo DD2.1 and below.
  569. */
  570. if (cpu_has_feature(CPU_FTR_P9_RADIX_PREFETCH_BUG))
  571. mtspr(SPRN_HDSISR, HDSISR_CANARY);
  572. mtspr(SPRN_SPRG0, vcpu->arch.shregs.sprg0);
  573. mtspr(SPRN_SPRG1, vcpu->arch.shregs.sprg1);
  574. mtspr(SPRN_SPRG2, vcpu->arch.shregs.sprg2);
  575. mtspr(SPRN_SPRG3, vcpu->arch.shregs.sprg3);
  576. /*
  577. * It might be preferable to load_vcpu_state here, in order to get the
  578. * GPR/FP register loads executing in parallel with the previous mtSPR
  579. * instructions, but for now that can't be done because the TM handling
  580. * in load_vcpu_state can change some SPRs and vcpu state (nip, msr).
  581. * But TM could be split out if this would be a significant benefit.
  582. */
  583. /*
  584. * MSR[RI] does not need to be cleared (and is not, for radix guests
  585. * with no prefetch bug), because in_guest is set. If we take a SRESET
  586. * or MCE with in_guest set but still in HV mode, then
  587. * kvmppc_p9_bad_interrupt handles the interrupt, which effectively
  588. * clears MSR[RI] and doesn't return.
  589. */
  590. WRITE_ONCE(local_paca->kvm_hstate.in_guest, KVM_GUEST_MODE_HV_P9);
  591. barrier(); /* Open in_guest critical section */
  592. /*
  593. * Hash host, hash guest, or radix guest with prefetch bug, all have
  594. * to disable the MMU before switching to guest MMU state.
  595. */
  596. if (!radix_enabled() || !kvm_is_radix(kvm) ||
  597. cpu_has_feature(CPU_FTR_P9_RADIX_PREFETCH_BUG))
  598. __mtmsrd(msr & ~(MSR_IR|MSR_DR|MSR_RI), 0);
  599. save_clear_host_mmu(kvm);
  600. if (kvm_is_radix(kvm))
  601. switch_mmu_to_guest_radix(kvm, vcpu, lpcr);
  602. else
  603. switch_mmu_to_guest_hpt(kvm, vcpu, lpcr);
  604. /* TLBIEL uses LPID=LPIDR, so run this after setting guest LPID */
  605. check_need_tlb_flush(kvm, vc->pcpu, nested);
  606. /*
  607. * P9 suppresses the HDEC exception when LPCR[HDICE] = 0,
  608. * so set guest LPCR (with HDICE) before writing HDEC.
  609. */
  610. mtspr(SPRN_HDEC, hdec);
  611. mtspr(SPRN_DEC, vcpu->arch.dec_expires - *tb);
  612. #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
  613. tm_return_to_guest:
  614. #endif
  615. mtspr(SPRN_DAR, vcpu->arch.shregs.dar);
  616. mtspr(SPRN_DSISR, vcpu->arch.shregs.dsisr);
  617. mtspr(SPRN_SRR0, vcpu->arch.shregs.srr0);
  618. mtspr(SPRN_SRR1, vcpu->arch.shregs.srr1);
  619. switch_pmu_to_guest(vcpu, &host_os_sprs);
  620. accumulate_time(vcpu, &vcpu->arch.in_guest);
  621. kvmppc_p9_enter_guest(vcpu);
  622. accumulate_time(vcpu, &vcpu->arch.guest_exit);
  623. switch_pmu_to_host(vcpu, &host_os_sprs);
  624. /* XXX: Could get these from r11/12 and paca exsave instead */
  625. vcpu->arch.shregs.srr0 = mfspr(SPRN_SRR0);
  626. vcpu->arch.shregs.srr1 = mfspr(SPRN_SRR1);
  627. vcpu->arch.shregs.dar = mfspr(SPRN_DAR);
  628. vcpu->arch.shregs.dsisr = mfspr(SPRN_DSISR);
  629. /* 0x2 bit for HSRR is only used by PR and P7/8 HV paths, clear it */
  630. trap = local_paca->kvm_hstate.scratch0 & ~0x2;
  631. if (likely(trap > BOOK3S_INTERRUPT_MACHINE_CHECK))
  632. exsave = local_paca->exgen;
  633. else if (trap == BOOK3S_INTERRUPT_SYSTEM_RESET)
  634. exsave = local_paca->exnmi;
  635. else /* trap == 0x200 */
  636. exsave = local_paca->exmc;
  637. vcpu->arch.regs.gpr[1] = local_paca->kvm_hstate.scratch1;
  638. vcpu->arch.regs.gpr[3] = local_paca->kvm_hstate.scratch2;
  639. /*
  640. * After reading machine check regs (DAR, DSISR, SRR0/1) and hstate
  641. * scratch (which we need to move into exsave to make re-entrant vs
  642. * SRESET/MCE), register state is protected from reentrancy. However
  643. * timebase, MMU, among other state is still set to guest, so don't
  644. * enable MSR[RI] here. It gets enabled at the end, after in_guest
  645. * is cleared.
  646. *
  647. * It is possible an NMI could come in here, which is why it is
  648. * important to save the above state early so it can be debugged.
  649. */
  650. vcpu->arch.regs.gpr[9] = exsave[EX_R9/sizeof(u64)];
  651. vcpu->arch.regs.gpr[10] = exsave[EX_R10/sizeof(u64)];
  652. vcpu->arch.regs.gpr[11] = exsave[EX_R11/sizeof(u64)];
  653. vcpu->arch.regs.gpr[12] = exsave[EX_R12/sizeof(u64)];
  654. vcpu->arch.regs.gpr[13] = exsave[EX_R13/sizeof(u64)];
  655. vcpu->arch.ppr = exsave[EX_PPR/sizeof(u64)];
  656. vcpu->arch.cfar = exsave[EX_CFAR/sizeof(u64)];
  657. vcpu->arch.regs.ctr = exsave[EX_CTR/sizeof(u64)];
  658. vcpu->arch.last_inst = KVM_INST_FETCH_FAILED;
  659. if (unlikely(trap == BOOK3S_INTERRUPT_MACHINE_CHECK)) {
  660. vcpu->arch.fault_dar = exsave[EX_DAR/sizeof(u64)];
  661. vcpu->arch.fault_dsisr = exsave[EX_DSISR/sizeof(u64)];
  662. kvmppc_realmode_machine_check(vcpu);
  663. } else if (unlikely(trap == BOOK3S_INTERRUPT_HMI)) {
  664. kvmppc_p9_realmode_hmi_handler(vcpu);
  665. } else if (trap == BOOK3S_INTERRUPT_H_EMUL_ASSIST) {
  666. vcpu->arch.emul_inst = mfspr(SPRN_HEIR);
  667. } else if (trap == BOOK3S_INTERRUPT_H_DATA_STORAGE) {
  668. vcpu->arch.fault_dar = exsave[EX_DAR/sizeof(u64)];
  669. vcpu->arch.fault_dsisr = exsave[EX_DSISR/sizeof(u64)];
  670. vcpu->arch.fault_gpa = mfspr(SPRN_ASDR);
  671. } else if (trap == BOOK3S_INTERRUPT_H_INST_STORAGE) {
  672. vcpu->arch.fault_gpa = mfspr(SPRN_ASDR);
  673. } else if (trap == BOOK3S_INTERRUPT_H_FAC_UNAVAIL) {
  674. vcpu->arch.hfscr = mfspr(SPRN_HFSCR);
  675. #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
  676. /*
  677. * Softpatch interrupt for transactional memory emulation cases
  678. * on POWER9 DD2.2. This is early in the guest exit path - we
  679. * haven't saved registers or done a treclaim yet.
  680. */
  681. } else if (trap == BOOK3S_INTERRUPT_HV_SOFTPATCH) {
  682. vcpu->arch.emul_inst = mfspr(SPRN_HEIR);
  683. /*
  684. * The cases we want to handle here are those where the guest
  685. * is in real suspend mode and is trying to transition to
  686. * transactional mode.
  687. */
  688. if (!local_paca->kvm_hstate.fake_suspend &&
  689. (vcpu->arch.shregs.msr & MSR_TS_S)) {
  690. if (kvmhv_p9_tm_emulation_early(vcpu)) {
  691. /*
  692. * Go straight back into the guest with the
  693. * new NIP/MSR as set by TM emulation.
  694. */
  695. mtspr(SPRN_HSRR0, vcpu->arch.regs.nip);
  696. mtspr(SPRN_HSRR1, vcpu->arch.shregs.msr);
  697. goto tm_return_to_guest;
  698. }
  699. }
  700. #endif
  701. }
  702. /* Advance host PURR/SPURR by the amount used by guest */
  703. purr = mfspr(SPRN_PURR);
  704. spurr = mfspr(SPRN_SPURR);
  705. local_paca->kvm_hstate.host_purr += purr - vcpu->arch.purr;
  706. local_paca->kvm_hstate.host_spurr += spurr - vcpu->arch.spurr;
  707. vcpu->arch.purr = purr;
  708. vcpu->arch.spurr = spurr;
  709. vcpu->arch.ic = mfspr(SPRN_IC);
  710. vcpu->arch.pid = mfspr(SPRN_PID);
  711. vcpu->arch.psscr = mfspr(SPRN_PSSCR_PR);
  712. vcpu->arch.shregs.sprg0 = mfspr(SPRN_SPRG0);
  713. vcpu->arch.shregs.sprg1 = mfspr(SPRN_SPRG1);
  714. vcpu->arch.shregs.sprg2 = mfspr(SPRN_SPRG2);
  715. vcpu->arch.shregs.sprg3 = mfspr(SPRN_SPRG3);
  716. dpdes = mfspr(SPRN_DPDES);
  717. if (dpdes)
  718. vcpu->arch.doorbell_request = 1;
  719. vc->vtb = mfspr(SPRN_VTB);
  720. dec = mfspr(SPRN_DEC);
  721. if (!(lpcr & LPCR_LD)) /* Sign extend if not using large decrementer */
  722. dec = (s32) dec;
  723. *tb = mftb();
  724. vcpu->arch.dec_expires = dec + *tb;
  725. if (vc->tb_offset_applied) {
  726. u64 new_tb = *tb - vc->tb_offset_applied;
  727. mtspr(SPRN_TBU40, new_tb);
  728. if ((mftb() & 0xffffff) < (new_tb & 0xffffff)) {
  729. new_tb += 0x1000000;
  730. mtspr(SPRN_TBU40, new_tb);
  731. }
  732. *tb = new_tb;
  733. vc->tb_offset_applied = 0;
  734. }
  735. save_clear_guest_mmu(kvm, vcpu);
  736. switch_mmu_to_host(kvm, host_pidr);
  737. /*
  738. * Enable MSR here in order to have facilities enabled to save
  739. * guest registers. This enables MMU (if we were in realmode), so
  740. * only switch MMU on after the MMU is switched to host, to avoid
  741. * the P9_RADIX_PREFETCH_BUG or hash guest context.
  742. */
  743. if (IS_ENABLED(CONFIG_PPC_TRANSACTIONAL_MEM) &&
  744. vcpu->arch.shregs.msr & MSR_TS_MASK)
  745. msr |= MSR_TS_S;
  746. __mtmsrd(msr, 0);
  747. store_vcpu_state(vcpu);
  748. mtspr(SPRN_PURR, local_paca->kvm_hstate.host_purr);
  749. mtspr(SPRN_SPURR, local_paca->kvm_hstate.host_spurr);
  750. if (cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST)) {
  751. /* Preserve PSSCR[FAKE_SUSPEND] until we've called kvmppc_save_tm_hv */
  752. mtspr(SPRN_PSSCR, host_hpsscr |
  753. (local_paca->kvm_hstate.fake_suspend << PSSCR_FAKE_SUSPEND_LG));
  754. }
  755. mtspr(SPRN_HFSCR, host_hfscr);
  756. if (vcpu->arch.ciabr != host_ciabr)
  757. mtspr(SPRN_CIABR, host_ciabr);
  758. if (dawr_enabled()) {
  759. if (vcpu->arch.dawr0 != host_dawr0)
  760. mtspr(SPRN_DAWR0, host_dawr0);
  761. if (vcpu->arch.dawrx0 != host_dawrx0)
  762. mtspr(SPRN_DAWRX0, host_dawrx0);
  763. if (cpu_has_feature(CPU_FTR_DAWR1)) {
  764. if (vcpu->arch.dawr1 != host_dawr1)
  765. mtspr(SPRN_DAWR1, host_dawr1);
  766. if (vcpu->arch.dawrx1 != host_dawrx1)
  767. mtspr(SPRN_DAWRX1, host_dawrx1);
  768. }
  769. }
  770. if (dpdes)
  771. mtspr(SPRN_DPDES, 0);
  772. if (vc->pcr)
  773. mtspr(SPRN_PCR, PCR_MASK);
  774. /* HDEC must be at least as large as DEC, so decrementer_max fits */
  775. mtspr(SPRN_HDEC, decrementer_max);
  776. timer_rearm_host_dec(*tb);
  777. restore_p9_host_os_sprs(vcpu, &host_os_sprs);
  778. barrier(); /* Close in_guest critical section */
  779. WRITE_ONCE(local_paca->kvm_hstate.in_guest, KVM_GUEST_MODE_NONE);
  780. /* Interrupts are recoverable at this point */
  781. /*
  782. * cp_abort is required if the processor supports local copy-paste
  783. * to clear the copy buffer that was under control of the guest.
  784. */
  785. if (cpu_has_feature(CPU_FTR_ARCH_31))
  786. asm volatile(PPC_CP_ABORT);
  787. out:
  788. return trap;
  789. }
  790. EXPORT_SYMBOL_GPL(kvmhv_vcpu_entry_p9);