perfctr-watchdog.c 4.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * local apic based NMI watchdog for various CPUs.
  4. *
  5. * This file also handles reservation of performance counters for coordination
  6. * with other users (like oprofile).
  7. *
  8. * Note that these events normally don't tick when the CPU idles. This means
  9. * the frequency varies with CPU load.
  10. *
  11. * Original code for K7/P6 written by Keith Owens
  12. *
  13. */
  14. #include <linux/percpu.h>
  15. #include <linux/export.h>
  16. #include <linux/kernel.h>
  17. #include <linux/bitops.h>
  18. #include <linux/smp.h>
  19. #include <asm/nmi.h>
  20. #include <linux/kprobes.h>
  21. #include <asm/apic.h>
  22. #include <asm/perf_event.h>
  23. /*
  24. * this number is calculated from Intel's MSR_P4_CRU_ESCR5 register and it's
  25. * offset from MSR_P4_BSU_ESCR0.
  26. *
  27. * It will be the max for all platforms (for now)
  28. */
  29. #define NMI_MAX_COUNTER_BITS 66
  30. /*
  31. * perfctr_nmi_owner tracks the ownership of the perfctr registers:
  32. * evtsel_nmi_owner tracks the ownership of the event selection
  33. * - different performance counters/ event selection may be reserved for
  34. * different subsystems this reservation system just tries to coordinate
  35. * things a little
  36. */
  37. static DECLARE_BITMAP(perfctr_nmi_owner, NMI_MAX_COUNTER_BITS);
  38. static DECLARE_BITMAP(evntsel_nmi_owner, NMI_MAX_COUNTER_BITS);
  39. /* converts an msr to an appropriate reservation bit */
  40. static inline unsigned int nmi_perfctr_msr_to_bit(unsigned int msr)
  41. {
  42. /* returns the bit offset of the performance counter register */
  43. switch (boot_cpu_data.x86_vendor) {
  44. case X86_VENDOR_AMD:
  45. if (msr >= MSR_F15H_PERF_CTR)
  46. return (msr - MSR_F15H_PERF_CTR) >> 1;
  47. return msr - MSR_K7_PERFCTR0;
  48. case X86_VENDOR_INTEL:
  49. if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON))
  50. return msr - MSR_ARCH_PERFMON_PERFCTR0;
  51. switch (boot_cpu_data.x86) {
  52. case 6:
  53. return msr - MSR_P6_PERFCTR0;
  54. case 11:
  55. return msr - MSR_KNC_PERFCTR0;
  56. case 15:
  57. return msr - MSR_P4_BPU_PERFCTR0;
  58. }
  59. }
  60. return 0;
  61. }
  62. /*
  63. * converts an msr to an appropriate reservation bit
  64. * returns the bit offset of the event selection register
  65. */
  66. static inline unsigned int nmi_evntsel_msr_to_bit(unsigned int msr)
  67. {
  68. /* returns the bit offset of the event selection register */
  69. switch (boot_cpu_data.x86_vendor) {
  70. case X86_VENDOR_AMD:
  71. if (msr >= MSR_F15H_PERF_CTL)
  72. return (msr - MSR_F15H_PERF_CTL) >> 1;
  73. return msr - MSR_K7_EVNTSEL0;
  74. case X86_VENDOR_INTEL:
  75. if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON))
  76. return msr - MSR_ARCH_PERFMON_EVENTSEL0;
  77. switch (boot_cpu_data.x86) {
  78. case 6:
  79. return msr - MSR_P6_EVNTSEL0;
  80. case 11:
  81. return msr - MSR_KNC_EVNTSEL0;
  82. case 15:
  83. return msr - MSR_P4_BSU_ESCR0;
  84. }
  85. }
  86. return 0;
  87. }
  88. /* checks for a bit availability (hack for oprofile) */
  89. int avail_to_resrv_perfctr_nmi_bit(unsigned int counter)
  90. {
  91. BUG_ON(counter > NMI_MAX_COUNTER_BITS);
  92. return !test_bit(counter, perfctr_nmi_owner);
  93. }
  94. EXPORT_SYMBOL(avail_to_resrv_perfctr_nmi_bit);
  95. int reserve_perfctr_nmi(unsigned int msr)
  96. {
  97. unsigned int counter;
  98. counter = nmi_perfctr_msr_to_bit(msr);
  99. /* register not managed by the allocator? */
  100. if (counter > NMI_MAX_COUNTER_BITS)
  101. return 1;
  102. if (!test_and_set_bit(counter, perfctr_nmi_owner))
  103. return 1;
  104. return 0;
  105. }
  106. EXPORT_SYMBOL(reserve_perfctr_nmi);
  107. void release_perfctr_nmi(unsigned int msr)
  108. {
  109. unsigned int counter;
  110. counter = nmi_perfctr_msr_to_bit(msr);
  111. /* register not managed by the allocator? */
  112. if (counter > NMI_MAX_COUNTER_BITS)
  113. return;
  114. clear_bit(counter, perfctr_nmi_owner);
  115. }
  116. EXPORT_SYMBOL(release_perfctr_nmi);
  117. int reserve_evntsel_nmi(unsigned int msr)
  118. {
  119. unsigned int counter;
  120. counter = nmi_evntsel_msr_to_bit(msr);
  121. /* register not managed by the allocator? */
  122. if (counter > NMI_MAX_COUNTER_BITS)
  123. return 1;
  124. if (!test_and_set_bit(counter, evntsel_nmi_owner))
  125. return 1;
  126. return 0;
  127. }
  128. EXPORT_SYMBOL(reserve_evntsel_nmi);
  129. void release_evntsel_nmi(unsigned int msr)
  130. {
  131. unsigned int counter;
  132. counter = nmi_evntsel_msr_to_bit(msr);
  133. /* register not managed by the allocator? */
  134. if (counter > NMI_MAX_COUNTER_BITS)
  135. return;
  136. clear_bit(counter, evntsel_nmi_owner);
  137. }
  138. EXPORT_SYMBOL(release_evntsel_nmi);