book3s_hv_p9_perf.c 6.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. #include <asm/kvm_ppc.h>
  3. #include <asm/pmc.h>
  4. #include "book3s_hv.h"
  5. static void freeze_pmu(unsigned long mmcr0, unsigned long mmcra)
  6. {
  7. if (!(mmcr0 & MMCR0_FC))
  8. goto do_freeze;
  9. if (mmcra & MMCRA_SAMPLE_ENABLE)
  10. goto do_freeze;
  11. if (cpu_has_feature(CPU_FTR_ARCH_31)) {
  12. if (!(mmcr0 & MMCR0_PMCCEXT))
  13. goto do_freeze;
  14. if (!(mmcra & MMCRA_BHRB_DISABLE))
  15. goto do_freeze;
  16. }
  17. return;
  18. do_freeze:
  19. mmcr0 = MMCR0_FC;
  20. mmcra = 0;
  21. if (cpu_has_feature(CPU_FTR_ARCH_31)) {
  22. mmcr0 |= MMCR0_PMCCEXT;
  23. mmcra = MMCRA_BHRB_DISABLE;
  24. }
  25. mtspr(SPRN_MMCR0, mmcr0);
  26. mtspr(SPRN_MMCRA, mmcra);
  27. isync();
  28. }
  29. void switch_pmu_to_guest(struct kvm_vcpu *vcpu,
  30. struct p9_host_os_sprs *host_os_sprs)
  31. {
  32. struct lppaca *lp;
  33. int load_pmu = 1;
  34. lp = vcpu->arch.vpa.pinned_addr;
  35. if (lp)
  36. load_pmu = lp->pmcregs_in_use;
  37. /* Save host */
  38. if (ppc_get_pmu_inuse()) {
  39. /* POWER9, POWER10 do not implement HPMC or SPMC */
  40. host_os_sprs->mmcr0 = mfspr(SPRN_MMCR0);
  41. host_os_sprs->mmcra = mfspr(SPRN_MMCRA);
  42. freeze_pmu(host_os_sprs->mmcr0, host_os_sprs->mmcra);
  43. host_os_sprs->pmc1 = mfspr(SPRN_PMC1);
  44. host_os_sprs->pmc2 = mfspr(SPRN_PMC2);
  45. host_os_sprs->pmc3 = mfspr(SPRN_PMC3);
  46. host_os_sprs->pmc4 = mfspr(SPRN_PMC4);
  47. host_os_sprs->pmc5 = mfspr(SPRN_PMC5);
  48. host_os_sprs->pmc6 = mfspr(SPRN_PMC6);
  49. host_os_sprs->mmcr1 = mfspr(SPRN_MMCR1);
  50. host_os_sprs->mmcr2 = mfspr(SPRN_MMCR2);
  51. host_os_sprs->sdar = mfspr(SPRN_SDAR);
  52. host_os_sprs->siar = mfspr(SPRN_SIAR);
  53. host_os_sprs->sier1 = mfspr(SPRN_SIER);
  54. if (cpu_has_feature(CPU_FTR_ARCH_31)) {
  55. host_os_sprs->mmcr3 = mfspr(SPRN_MMCR3);
  56. host_os_sprs->sier2 = mfspr(SPRN_SIER2);
  57. host_os_sprs->sier3 = mfspr(SPRN_SIER3);
  58. }
  59. }
  60. #ifdef CONFIG_PPC_PSERIES
  61. /* After saving PMU, before loading guest PMU, flip pmcregs_in_use */
  62. if (kvmhv_on_pseries()) {
  63. barrier();
  64. get_lppaca()->pmcregs_in_use = load_pmu;
  65. barrier();
  66. }
  67. #endif
  68. /*
  69. * Load guest. If the VPA said the PMCs are not in use but the guest
  70. * tried to access them anyway, HFSCR[PM] will be set by the HFAC
  71. * fault so we can make forward progress.
  72. */
  73. if (load_pmu || (vcpu->arch.hfscr & HFSCR_PM)) {
  74. mtspr(SPRN_PMC1, vcpu->arch.pmc[0]);
  75. mtspr(SPRN_PMC2, vcpu->arch.pmc[1]);
  76. mtspr(SPRN_PMC3, vcpu->arch.pmc[2]);
  77. mtspr(SPRN_PMC4, vcpu->arch.pmc[3]);
  78. mtspr(SPRN_PMC5, vcpu->arch.pmc[4]);
  79. mtspr(SPRN_PMC6, vcpu->arch.pmc[5]);
  80. mtspr(SPRN_MMCR1, vcpu->arch.mmcr[1]);
  81. mtspr(SPRN_MMCR2, vcpu->arch.mmcr[2]);
  82. mtspr(SPRN_SDAR, vcpu->arch.sdar);
  83. mtspr(SPRN_SIAR, vcpu->arch.siar);
  84. mtspr(SPRN_SIER, vcpu->arch.sier[0]);
  85. if (cpu_has_feature(CPU_FTR_ARCH_31)) {
  86. mtspr(SPRN_MMCR3, vcpu->arch.mmcr[3]);
  87. mtspr(SPRN_SIER2, vcpu->arch.sier[1]);
  88. mtspr(SPRN_SIER3, vcpu->arch.sier[2]);
  89. }
  90. /* Set MMCRA then MMCR0 last */
  91. mtspr(SPRN_MMCRA, vcpu->arch.mmcra);
  92. mtspr(SPRN_MMCR0, vcpu->arch.mmcr[0]);
  93. /* No isync necessary because we're starting counters */
  94. if (!vcpu->arch.nested &&
  95. (vcpu->arch.hfscr_permitted & HFSCR_PM))
  96. vcpu->arch.hfscr |= HFSCR_PM;
  97. }
  98. }
  99. EXPORT_SYMBOL_GPL(switch_pmu_to_guest);
  100. void switch_pmu_to_host(struct kvm_vcpu *vcpu,
  101. struct p9_host_os_sprs *host_os_sprs)
  102. {
  103. struct lppaca *lp;
  104. int save_pmu = 1;
  105. lp = vcpu->arch.vpa.pinned_addr;
  106. if (lp)
  107. save_pmu = lp->pmcregs_in_use;
  108. if (IS_ENABLED(CONFIG_KVM_BOOK3S_HV_NESTED_PMU_WORKAROUND)) {
  109. /*
  110. * Save pmu if this guest is capable of running nested guests.
  111. * This is option is for old L1s that do not set their
  112. * lppaca->pmcregs_in_use properly when entering their L2.
  113. */
  114. save_pmu |= nesting_enabled(vcpu->kvm);
  115. }
  116. if (save_pmu) {
  117. vcpu->arch.mmcr[0] = mfspr(SPRN_MMCR0);
  118. vcpu->arch.mmcra = mfspr(SPRN_MMCRA);
  119. freeze_pmu(vcpu->arch.mmcr[0], vcpu->arch.mmcra);
  120. vcpu->arch.pmc[0] = mfspr(SPRN_PMC1);
  121. vcpu->arch.pmc[1] = mfspr(SPRN_PMC2);
  122. vcpu->arch.pmc[2] = mfspr(SPRN_PMC3);
  123. vcpu->arch.pmc[3] = mfspr(SPRN_PMC4);
  124. vcpu->arch.pmc[4] = mfspr(SPRN_PMC5);
  125. vcpu->arch.pmc[5] = mfspr(SPRN_PMC6);
  126. vcpu->arch.mmcr[1] = mfspr(SPRN_MMCR1);
  127. vcpu->arch.mmcr[2] = mfspr(SPRN_MMCR2);
  128. vcpu->arch.sdar = mfspr(SPRN_SDAR);
  129. vcpu->arch.siar = mfspr(SPRN_SIAR);
  130. vcpu->arch.sier[0] = mfspr(SPRN_SIER);
  131. if (cpu_has_feature(CPU_FTR_ARCH_31)) {
  132. vcpu->arch.mmcr[3] = mfspr(SPRN_MMCR3);
  133. vcpu->arch.sier[1] = mfspr(SPRN_SIER2);
  134. vcpu->arch.sier[2] = mfspr(SPRN_SIER3);
  135. }
  136. } else if (vcpu->arch.hfscr & HFSCR_PM) {
  137. /*
  138. * The guest accessed PMC SPRs without specifying they should
  139. * be preserved, or it cleared pmcregs_in_use after the last
  140. * access. Just ensure they are frozen.
  141. */
  142. freeze_pmu(mfspr(SPRN_MMCR0), mfspr(SPRN_MMCRA));
  143. /*
  144. * Demand-fault PMU register access in the guest.
  145. *
  146. * This is used to grab the guest's VPA pmcregs_in_use value
  147. * and reflect it into the host's VPA in the case of a nested
  148. * hypervisor.
  149. *
  150. * It also avoids having to zero-out SPRs after each guest
  151. * exit to avoid side-channels when.
  152. *
  153. * This is cleared here when we exit the guest, so later HFSCR
  154. * interrupt handling can add it back to run the guest with
  155. * PM enabled next time.
  156. */
  157. if (!vcpu->arch.nested)
  158. vcpu->arch.hfscr &= ~HFSCR_PM;
  159. } /* otherwise the PMU should still be frozen */
  160. #ifdef CONFIG_PPC_PSERIES
  161. if (kvmhv_on_pseries()) {
  162. barrier();
  163. get_lppaca()->pmcregs_in_use = ppc_get_pmu_inuse();
  164. barrier();
  165. }
  166. #endif
  167. if (ppc_get_pmu_inuse()) {
  168. mtspr(SPRN_PMC1, host_os_sprs->pmc1);
  169. mtspr(SPRN_PMC2, host_os_sprs->pmc2);
  170. mtspr(SPRN_PMC3, host_os_sprs->pmc3);
  171. mtspr(SPRN_PMC4, host_os_sprs->pmc4);
  172. mtspr(SPRN_PMC5, host_os_sprs->pmc5);
  173. mtspr(SPRN_PMC6, host_os_sprs->pmc6);
  174. mtspr(SPRN_MMCR1, host_os_sprs->mmcr1);
  175. mtspr(SPRN_MMCR2, host_os_sprs->mmcr2);
  176. mtspr(SPRN_SDAR, host_os_sprs->sdar);
  177. mtspr(SPRN_SIAR, host_os_sprs->siar);
  178. mtspr(SPRN_SIER, host_os_sprs->sier1);
  179. if (cpu_has_feature(CPU_FTR_ARCH_31)) {
  180. mtspr(SPRN_MMCR3, host_os_sprs->mmcr3);
  181. mtspr(SPRN_SIER2, host_os_sprs->sier2);
  182. mtspr(SPRN_SIER3, host_os_sprs->sier3);
  183. }
  184. /* Set MMCRA then MMCR0 last */
  185. mtspr(SPRN_MMCRA, host_os_sprs->mmcra);
  186. mtspr(SPRN_MMCR0, host_os_sprs->mmcr0);
  187. isync();
  188. }
  189. }
  190. EXPORT_SYMBOL_GPL(switch_pmu_to_host);