pmu_intel.c 9.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374
  1. /*
  2. * KVM PMU support for Intel CPUs
  3. *
  4. * Copyright 2011 Red Hat, Inc. and/or its affiliates.
  5. *
  6. * Authors:
  7. * Avi Kivity <avi@redhat.com>
  8. * Gleb Natapov <gleb@redhat.com>
  9. *
  10. * This work is licensed under the terms of the GNU GPL, version 2. See
  11. * the COPYING file in the top-level directory.
  12. *
  13. */
  14. #include <linux/types.h>
  15. #include <linux/kvm_host.h>
  16. #include <linux/perf_event.h>
  17. #include <asm/perf_event.h>
  18. #include "x86.h"
  19. #include "cpuid.h"
  20. #include "lapic.h"
  21. #include "pmu.h"
  22. static struct kvm_event_hw_type_mapping intel_arch_events[] = {
  23. /* Index must match CPUID 0x0A.EBX bit vector */
  24. [0] = { 0x3c, 0x00, PERF_COUNT_HW_CPU_CYCLES },
  25. [1] = { 0xc0, 0x00, PERF_COUNT_HW_INSTRUCTIONS },
  26. [2] = { 0x3c, 0x01, PERF_COUNT_HW_BUS_CYCLES },
  27. [3] = { 0x2e, 0x4f, PERF_COUNT_HW_CACHE_REFERENCES },
  28. [4] = { 0x2e, 0x41, PERF_COUNT_HW_CACHE_MISSES },
  29. [5] = { 0xc4, 0x00, PERF_COUNT_HW_BRANCH_INSTRUCTIONS },
  30. [6] = { 0xc5, 0x00, PERF_COUNT_HW_BRANCH_MISSES },
  31. [7] = { 0x00, 0x03, PERF_COUNT_HW_REF_CPU_CYCLES },
  32. };
  33. /* mapping between fixed pmc index and intel_arch_events array */
  34. static int fixed_pmc_events[] = {1, 0, 7};
  35. static void reprogram_fixed_counters(struct kvm_pmu *pmu, u64 data)
  36. {
  37. int i;
  38. for (i = 0; i < pmu->nr_arch_fixed_counters; i++) {
  39. u8 new_ctrl = fixed_ctrl_field(data, i);
  40. u8 old_ctrl = fixed_ctrl_field(pmu->fixed_ctr_ctrl, i);
  41. struct kvm_pmc *pmc;
  42. pmc = get_fixed_pmc(pmu, MSR_CORE_PERF_FIXED_CTR0 + i);
  43. if (old_ctrl == new_ctrl)
  44. continue;
  45. reprogram_fixed_counter(pmc, new_ctrl, i);
  46. }
  47. pmu->fixed_ctr_ctrl = data;
  48. }
  49. /* function is called when global control register has been updated. */
  50. static void global_ctrl_changed(struct kvm_pmu *pmu, u64 data)
  51. {
  52. int bit;
  53. u64 diff = pmu->global_ctrl ^ data;
  54. pmu->global_ctrl = data;
  55. for_each_set_bit(bit, (unsigned long *)&diff, X86_PMC_IDX_MAX)
  56. reprogram_counter(pmu, bit);
  57. }
  58. static unsigned intel_find_arch_event(struct kvm_pmu *pmu,
  59. u8 event_select,
  60. u8 unit_mask)
  61. {
  62. int i;
  63. for (i = 0; i < ARRAY_SIZE(intel_arch_events); i++)
  64. if (intel_arch_events[i].eventsel == event_select
  65. && intel_arch_events[i].unit_mask == unit_mask
  66. && (pmu->available_event_types & (1 << i)))
  67. break;
  68. if (i == ARRAY_SIZE(intel_arch_events))
  69. return PERF_COUNT_HW_MAX;
  70. return intel_arch_events[i].event_type;
  71. }
  72. static unsigned intel_find_fixed_event(int idx)
  73. {
  74. u32 event;
  75. size_t size = ARRAY_SIZE(fixed_pmc_events);
  76. if (idx >= size)
  77. return PERF_COUNT_HW_MAX;
  78. event = fixed_pmc_events[array_index_nospec(idx, size)];
  79. return intel_arch_events[event].event_type;
  80. }
  81. /* check if a PMC is enabled by comparing it with globl_ctrl bits. */
  82. static bool intel_pmc_is_enabled(struct kvm_pmc *pmc)
  83. {
  84. struct kvm_pmu *pmu = pmc_to_pmu(pmc);
  85. return test_bit(pmc->idx, (unsigned long *)&pmu->global_ctrl);
  86. }
  87. static struct kvm_pmc *intel_pmc_idx_to_pmc(struct kvm_pmu *pmu, int pmc_idx)
  88. {
  89. if (pmc_idx < INTEL_PMC_IDX_FIXED)
  90. return get_gp_pmc(pmu, MSR_P6_EVNTSEL0 + pmc_idx,
  91. MSR_P6_EVNTSEL0);
  92. else {
  93. u32 idx = pmc_idx - INTEL_PMC_IDX_FIXED;
  94. return get_fixed_pmc(pmu, idx + MSR_CORE_PERF_FIXED_CTR0);
  95. }
  96. }
  97. /* returns 0 if idx's corresponding MSR exists; otherwise returns 1. */
  98. static int intel_is_valid_msr_idx(struct kvm_vcpu *vcpu, unsigned idx)
  99. {
  100. struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
  101. bool fixed = idx & (1u << 30);
  102. idx &= ~(3u << 30);
  103. return (!fixed && idx >= pmu->nr_arch_gp_counters) ||
  104. (fixed && idx >= pmu->nr_arch_fixed_counters);
  105. }
  106. static struct kvm_pmc *intel_msr_idx_to_pmc(struct kvm_vcpu *vcpu,
  107. unsigned idx, u64 *mask)
  108. {
  109. struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
  110. bool fixed = idx & (1u << 30);
  111. struct kvm_pmc *counters;
  112. unsigned int num_counters;
  113. idx &= ~(3u << 30);
  114. if (fixed) {
  115. counters = pmu->fixed_counters;
  116. num_counters = pmu->nr_arch_fixed_counters;
  117. } else {
  118. counters = pmu->gp_counters;
  119. num_counters = pmu->nr_arch_gp_counters;
  120. }
  121. if (idx >= num_counters)
  122. return NULL;
  123. *mask &= pmu->counter_bitmask[fixed ? KVM_PMC_FIXED : KVM_PMC_GP];
  124. return &counters[array_index_nospec(idx, num_counters)];
  125. }
  126. static bool intel_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr)
  127. {
  128. struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
  129. int ret;
  130. switch (msr) {
  131. case MSR_CORE_PERF_FIXED_CTR_CTRL:
  132. case MSR_CORE_PERF_GLOBAL_STATUS:
  133. case MSR_CORE_PERF_GLOBAL_CTRL:
  134. case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
  135. ret = pmu->version > 1;
  136. break;
  137. default:
  138. ret = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0) ||
  139. get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0) ||
  140. get_fixed_pmc(pmu, msr);
  141. break;
  142. }
  143. return ret;
  144. }
  145. static int intel_pmu_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *data)
  146. {
  147. struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
  148. struct kvm_pmc *pmc;
  149. switch (msr) {
  150. case MSR_CORE_PERF_FIXED_CTR_CTRL:
  151. *data = pmu->fixed_ctr_ctrl;
  152. return 0;
  153. case MSR_CORE_PERF_GLOBAL_STATUS:
  154. *data = pmu->global_status;
  155. return 0;
  156. case MSR_CORE_PERF_GLOBAL_CTRL:
  157. *data = pmu->global_ctrl;
  158. return 0;
  159. case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
  160. *data = pmu->global_ovf_ctrl;
  161. return 0;
  162. default:
  163. if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0))) {
  164. u64 val = pmc_read_counter(pmc);
  165. *data = val & pmu->counter_bitmask[KVM_PMC_GP];
  166. return 0;
  167. } else if ((pmc = get_fixed_pmc(pmu, msr))) {
  168. u64 val = pmc_read_counter(pmc);
  169. *data = val & pmu->counter_bitmask[KVM_PMC_FIXED];
  170. return 0;
  171. } else if ((pmc = get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0))) {
  172. *data = pmc->eventsel;
  173. return 0;
  174. }
  175. }
  176. return 1;
  177. }
  178. static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
  179. {
  180. struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
  181. struct kvm_pmc *pmc;
  182. u32 msr = msr_info->index;
  183. u64 data = msr_info->data;
  184. switch (msr) {
  185. case MSR_CORE_PERF_FIXED_CTR_CTRL:
  186. if (pmu->fixed_ctr_ctrl == data)
  187. return 0;
  188. if (!(data & 0xfffffffffffff444ull)) {
  189. reprogram_fixed_counters(pmu, data);
  190. return 0;
  191. }
  192. break;
  193. case MSR_CORE_PERF_GLOBAL_STATUS:
  194. if (msr_info->host_initiated) {
  195. pmu->global_status = data;
  196. return 0;
  197. }
  198. break; /* RO MSR */
  199. case MSR_CORE_PERF_GLOBAL_CTRL:
  200. if (pmu->global_ctrl == data)
  201. return 0;
  202. if (!(data & pmu->global_ctrl_mask)) {
  203. global_ctrl_changed(pmu, data);
  204. return 0;
  205. }
  206. break;
  207. case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
  208. if (!(data & (pmu->global_ctrl_mask & ~(3ull<<62)))) {
  209. if (!msr_info->host_initiated)
  210. pmu->global_status &= ~data;
  211. pmu->global_ovf_ctrl = data;
  212. return 0;
  213. }
  214. break;
  215. default:
  216. if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0))) {
  217. if (msr_info->host_initiated)
  218. pmc->counter = data;
  219. else
  220. pmc->counter = (s32)data;
  221. return 0;
  222. } else if ((pmc = get_fixed_pmc(pmu, msr))) {
  223. pmc->counter = data;
  224. return 0;
  225. } else if ((pmc = get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0))) {
  226. if (data == pmc->eventsel)
  227. return 0;
  228. if (!(data & pmu->reserved_bits)) {
  229. reprogram_gp_counter(pmc, data);
  230. return 0;
  231. }
  232. }
  233. }
  234. return 1;
  235. }
  236. static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
  237. {
  238. struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
  239. struct kvm_cpuid_entry2 *entry;
  240. union cpuid10_eax eax;
  241. union cpuid10_edx edx;
  242. pmu->nr_arch_gp_counters = 0;
  243. pmu->nr_arch_fixed_counters = 0;
  244. pmu->counter_bitmask[KVM_PMC_GP] = 0;
  245. pmu->counter_bitmask[KVM_PMC_FIXED] = 0;
  246. pmu->version = 0;
  247. pmu->reserved_bits = 0xffffffff00200000ull;
  248. entry = kvm_find_cpuid_entry(vcpu, 0xa, 0);
  249. if (!entry)
  250. return;
  251. eax.full = entry->eax;
  252. edx.full = entry->edx;
  253. pmu->version = eax.split.version_id;
  254. if (!pmu->version)
  255. return;
  256. pmu->nr_arch_gp_counters = min_t(int, eax.split.num_counters,
  257. INTEL_PMC_MAX_GENERIC);
  258. pmu->counter_bitmask[KVM_PMC_GP] = ((u64)1 << eax.split.bit_width) - 1;
  259. pmu->available_event_types = ~entry->ebx &
  260. ((1ull << eax.split.mask_length) - 1);
  261. if (pmu->version == 1) {
  262. pmu->nr_arch_fixed_counters = 0;
  263. } else {
  264. pmu->nr_arch_fixed_counters =
  265. min_t(int, edx.split.num_counters_fixed,
  266. INTEL_PMC_MAX_FIXED);
  267. pmu->counter_bitmask[KVM_PMC_FIXED] =
  268. ((u64)1 << edx.split.bit_width_fixed) - 1;
  269. }
  270. pmu->global_ctrl = ((1ull << pmu->nr_arch_gp_counters) - 1) |
  271. (((1ull << pmu->nr_arch_fixed_counters) - 1) << INTEL_PMC_IDX_FIXED);
  272. pmu->global_ctrl_mask = ~pmu->global_ctrl;
  273. entry = kvm_find_cpuid_entry(vcpu, 7, 0);
  274. if (entry &&
  275. (boot_cpu_has(X86_FEATURE_HLE) || boot_cpu_has(X86_FEATURE_RTM)) &&
  276. (entry->ebx & (X86_FEATURE_HLE|X86_FEATURE_RTM)))
  277. pmu->reserved_bits ^= HSW_IN_TX|HSW_IN_TX_CHECKPOINTED;
  278. }
  279. static void intel_pmu_init(struct kvm_vcpu *vcpu)
  280. {
  281. int i;
  282. struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
  283. for (i = 0; i < INTEL_PMC_MAX_GENERIC; i++) {
  284. pmu->gp_counters[i].type = KVM_PMC_GP;
  285. pmu->gp_counters[i].vcpu = vcpu;
  286. pmu->gp_counters[i].idx = i;
  287. }
  288. for (i = 0; i < INTEL_PMC_MAX_FIXED; i++) {
  289. pmu->fixed_counters[i].type = KVM_PMC_FIXED;
  290. pmu->fixed_counters[i].vcpu = vcpu;
  291. pmu->fixed_counters[i].idx = i + INTEL_PMC_IDX_FIXED;
  292. }
  293. }
  294. static void intel_pmu_reset(struct kvm_vcpu *vcpu)
  295. {
  296. struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
  297. int i;
  298. for (i = 0; i < INTEL_PMC_MAX_GENERIC; i++) {
  299. struct kvm_pmc *pmc = &pmu->gp_counters[i];
  300. pmc_stop_counter(pmc);
  301. pmc->counter = pmc->eventsel = 0;
  302. }
  303. for (i = 0; i < INTEL_PMC_MAX_FIXED; i++)
  304. pmc_stop_counter(&pmu->fixed_counters[i]);
  305. pmu->fixed_ctr_ctrl = pmu->global_ctrl = pmu->global_status =
  306. pmu->global_ovf_ctrl = 0;
  307. }
  308. struct kvm_pmu_ops intel_pmu_ops = {
  309. .find_arch_event = intel_find_arch_event,
  310. .find_fixed_event = intel_find_fixed_event,
  311. .pmc_is_enabled = intel_pmc_is_enabled,
  312. .pmc_idx_to_pmc = intel_pmc_idx_to_pmc,
  313. .msr_idx_to_pmc = intel_msr_idx_to_pmc,
  314. .is_valid_msr_idx = intel_is_valid_msr_idx,
  315. .is_valid_msr = intel_is_valid_msr,
  316. .get_msr = intel_pmu_get_msr,
  317. .set_msr = intel_pmu_set_msr,
  318. .refresh = intel_pmu_refresh,
  319. .init = intel_pmu_init,
  320. .reset = intel_pmu_reset,
  321. };