perf_event.c 41 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318
  1. /*
  2. * PMU support
  3. *
  4. * Copyright (C) 2012 ARM Limited
  5. * Author: Will Deacon <will.deacon@arm.com>
  6. *
  7. * This code is based heavily on the ARMv7 perf event code.
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License version 2 as
  11. * published by the Free Software Foundation.
  12. *
  13. * This program is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  16. * GNU General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU General Public License
  19. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  20. */
  21. #include <asm/irq_regs.h>
  22. #include <asm/perf_event.h>
  23. #include <asm/sysreg.h>
  24. #include <asm/virt.h>
  25. #include <linux/acpi.h>
  26. #include <linux/clocksource.h>
  27. #include <linux/of.h>
  28. #include <linux/perf/arm_pmu.h>
  29. #include <linux/platform_device.h>
  30. /*
  31. * ARMv8 PMUv3 Performance Events handling code.
  32. * Common event types (some are defined in asm/perf_event.h).
  33. */
  34. /* At least one of the following is required. */
  35. #define ARMV8_PMUV3_PERFCTR_INST_RETIRED 0x08
  36. #define ARMV8_PMUV3_PERFCTR_INST_SPEC 0x1B
  37. /* Common architectural events. */
  38. #define ARMV8_PMUV3_PERFCTR_LD_RETIRED 0x06
  39. #define ARMV8_PMUV3_PERFCTR_ST_RETIRED 0x07
  40. #define ARMV8_PMUV3_PERFCTR_EXC_TAKEN 0x09
  41. #define ARMV8_PMUV3_PERFCTR_EXC_RETURN 0x0A
  42. #define ARMV8_PMUV3_PERFCTR_CID_WRITE_RETIRED 0x0B
  43. #define ARMV8_PMUV3_PERFCTR_PC_WRITE_RETIRED 0x0C
  44. #define ARMV8_PMUV3_PERFCTR_BR_IMMED_RETIRED 0x0D
  45. #define ARMV8_PMUV3_PERFCTR_BR_RETURN_RETIRED 0x0E
  46. #define ARMV8_PMUV3_PERFCTR_UNALIGNED_LDST_RETIRED 0x0F
  47. #define ARMV8_PMUV3_PERFCTR_TTBR_WRITE_RETIRED 0x1C
  48. #define ARMV8_PMUV3_PERFCTR_CHAIN 0x1E
  49. #define ARMV8_PMUV3_PERFCTR_BR_RETIRED 0x21
  50. /* Common microarchitectural events. */
  51. #define ARMV8_PMUV3_PERFCTR_L1I_CACHE_REFILL 0x01
  52. #define ARMV8_PMUV3_PERFCTR_L1I_TLB_REFILL 0x02
  53. #define ARMV8_PMUV3_PERFCTR_L1D_TLB_REFILL 0x05
  54. #define ARMV8_PMUV3_PERFCTR_MEM_ACCESS 0x13
  55. #define ARMV8_PMUV3_PERFCTR_L1I_CACHE 0x14
  56. #define ARMV8_PMUV3_PERFCTR_L1D_CACHE_WB 0x15
  57. #define ARMV8_PMUV3_PERFCTR_L2D_CACHE 0x16
  58. #define ARMV8_PMUV3_PERFCTR_L2D_CACHE_REFILL 0x17
  59. #define ARMV8_PMUV3_PERFCTR_L2D_CACHE_WB 0x18
  60. #define ARMV8_PMUV3_PERFCTR_BUS_ACCESS 0x19
  61. #define ARMV8_PMUV3_PERFCTR_MEMORY_ERROR 0x1A
  62. #define ARMV8_PMUV3_PERFCTR_BUS_CYCLES 0x1D
  63. #define ARMV8_PMUV3_PERFCTR_L1D_CACHE_ALLOCATE 0x1F
  64. #define ARMV8_PMUV3_PERFCTR_L2D_CACHE_ALLOCATE 0x20
  65. #define ARMV8_PMUV3_PERFCTR_BR_MIS_PRED_RETIRED 0x22
  66. #define ARMV8_PMUV3_PERFCTR_STALL_FRONTEND 0x23
  67. #define ARMV8_PMUV3_PERFCTR_STALL_BACKEND 0x24
  68. #define ARMV8_PMUV3_PERFCTR_L1D_TLB 0x25
  69. #define ARMV8_PMUV3_PERFCTR_L1I_TLB 0x26
  70. #define ARMV8_PMUV3_PERFCTR_L2I_CACHE 0x27
  71. #define ARMV8_PMUV3_PERFCTR_L2I_CACHE_REFILL 0x28
  72. #define ARMV8_PMUV3_PERFCTR_L3D_CACHE_ALLOCATE 0x29
  73. #define ARMV8_PMUV3_PERFCTR_L3D_CACHE_REFILL 0x2A
  74. #define ARMV8_PMUV3_PERFCTR_L3D_CACHE 0x2B
  75. #define ARMV8_PMUV3_PERFCTR_L3D_CACHE_WB 0x2C
  76. #define ARMV8_PMUV3_PERFCTR_L2D_TLB_REFILL 0x2D
  77. #define ARMV8_PMUV3_PERFCTR_L2I_TLB_REFILL 0x2E
  78. #define ARMV8_PMUV3_PERFCTR_L2D_TLB 0x2F
  79. #define ARMV8_PMUV3_PERFCTR_L2I_TLB 0x30
  80. /* ARMv8 recommended implementation defined event types */
  81. #define ARMV8_IMPDEF_PERFCTR_L1D_CACHE_RD 0x40
  82. #define ARMV8_IMPDEF_PERFCTR_L1D_CACHE_WR 0x41
  83. #define ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_RD 0x42
  84. #define ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_WR 0x43
  85. #define ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_INNER 0x44
  86. #define ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_OUTER 0x45
  87. #define ARMV8_IMPDEF_PERFCTR_L1D_CACHE_WB_VICTIM 0x46
  88. #define ARMV8_IMPDEF_PERFCTR_L1D_CACHE_WB_CLEAN 0x47
  89. #define ARMV8_IMPDEF_PERFCTR_L1D_CACHE_INVAL 0x48
  90. #define ARMV8_IMPDEF_PERFCTR_L1D_TLB_REFILL_RD 0x4C
  91. #define ARMV8_IMPDEF_PERFCTR_L1D_TLB_REFILL_WR 0x4D
  92. #define ARMV8_IMPDEF_PERFCTR_L1D_TLB_RD 0x4E
  93. #define ARMV8_IMPDEF_PERFCTR_L1D_TLB_WR 0x4F
  94. #define ARMV8_IMPDEF_PERFCTR_L2D_CACHE_RD 0x50
  95. #define ARMV8_IMPDEF_PERFCTR_L2D_CACHE_WR 0x51
  96. #define ARMV8_IMPDEF_PERFCTR_L2D_CACHE_REFILL_RD 0x52
  97. #define ARMV8_IMPDEF_PERFCTR_L2D_CACHE_REFILL_WR 0x53
  98. #define ARMV8_IMPDEF_PERFCTR_L2D_CACHE_WB_VICTIM 0x56
  99. #define ARMV8_IMPDEF_PERFCTR_L2D_CACHE_WB_CLEAN 0x57
  100. #define ARMV8_IMPDEF_PERFCTR_L2D_CACHE_INVAL 0x58
  101. #define ARMV8_IMPDEF_PERFCTR_L2D_TLB_REFILL_RD 0x5C
  102. #define ARMV8_IMPDEF_PERFCTR_L2D_TLB_REFILL_WR 0x5D
  103. #define ARMV8_IMPDEF_PERFCTR_L2D_TLB_RD 0x5E
  104. #define ARMV8_IMPDEF_PERFCTR_L2D_TLB_WR 0x5F
  105. #define ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_RD 0x60
  106. #define ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_WR 0x61
  107. #define ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_SHARED 0x62
  108. #define ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_NOT_SHARED 0x63
  109. #define ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_NORMAL 0x64
  110. #define ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_PERIPH 0x65
  111. #define ARMV8_IMPDEF_PERFCTR_MEM_ACCESS_RD 0x66
  112. #define ARMV8_IMPDEF_PERFCTR_MEM_ACCESS_WR 0x67
  113. #define ARMV8_IMPDEF_PERFCTR_UNALIGNED_LD_SPEC 0x68
  114. #define ARMV8_IMPDEF_PERFCTR_UNALIGNED_ST_SPEC 0x69
  115. #define ARMV8_IMPDEF_PERFCTR_UNALIGNED_LDST_SPEC 0x6A
  116. #define ARMV8_IMPDEF_PERFCTR_LDREX_SPEC 0x6C
  117. #define ARMV8_IMPDEF_PERFCTR_STREX_PASS_SPEC 0x6D
  118. #define ARMV8_IMPDEF_PERFCTR_STREX_FAIL_SPEC 0x6E
  119. #define ARMV8_IMPDEF_PERFCTR_STREX_SPEC 0x6F
  120. #define ARMV8_IMPDEF_PERFCTR_LD_SPEC 0x70
  121. #define ARMV8_IMPDEF_PERFCTR_ST_SPEC 0x71
  122. #define ARMV8_IMPDEF_PERFCTR_LDST_SPEC 0x72
  123. #define ARMV8_IMPDEF_PERFCTR_DP_SPEC 0x73
  124. #define ARMV8_IMPDEF_PERFCTR_ASE_SPEC 0x74
  125. #define ARMV8_IMPDEF_PERFCTR_VFP_SPEC 0x75
  126. #define ARMV8_IMPDEF_PERFCTR_PC_WRITE_SPEC 0x76
  127. #define ARMV8_IMPDEF_PERFCTR_CRYPTO_SPEC 0x77
  128. #define ARMV8_IMPDEF_PERFCTR_BR_IMMED_SPEC 0x78
  129. #define ARMV8_IMPDEF_PERFCTR_BR_RETURN_SPEC 0x79
  130. #define ARMV8_IMPDEF_PERFCTR_BR_INDIRECT_SPEC 0x7A
  131. #define ARMV8_IMPDEF_PERFCTR_ISB_SPEC 0x7C
  132. #define ARMV8_IMPDEF_PERFCTR_DSB_SPEC 0x7D
  133. #define ARMV8_IMPDEF_PERFCTR_DMB_SPEC 0x7E
  134. #define ARMV8_IMPDEF_PERFCTR_EXC_UNDEF 0x81
  135. #define ARMV8_IMPDEF_PERFCTR_EXC_SVC 0x82
  136. #define ARMV8_IMPDEF_PERFCTR_EXC_PABORT 0x83
  137. #define ARMV8_IMPDEF_PERFCTR_EXC_DABORT 0x84
  138. #define ARMV8_IMPDEF_PERFCTR_EXC_IRQ 0x86
  139. #define ARMV8_IMPDEF_PERFCTR_EXC_FIQ 0x87
  140. #define ARMV8_IMPDEF_PERFCTR_EXC_SMC 0x88
  141. #define ARMV8_IMPDEF_PERFCTR_EXC_HVC 0x8A
  142. #define ARMV8_IMPDEF_PERFCTR_EXC_TRAP_PABORT 0x8B
  143. #define ARMV8_IMPDEF_PERFCTR_EXC_TRAP_DABORT 0x8C
  144. #define ARMV8_IMPDEF_PERFCTR_EXC_TRAP_OTHER 0x8D
  145. #define ARMV8_IMPDEF_PERFCTR_EXC_TRAP_IRQ 0x8E
  146. #define ARMV8_IMPDEF_PERFCTR_EXC_TRAP_FIQ 0x8F
  147. #define ARMV8_IMPDEF_PERFCTR_RC_LD_SPEC 0x90
  148. #define ARMV8_IMPDEF_PERFCTR_RC_ST_SPEC 0x91
  149. #define ARMV8_IMPDEF_PERFCTR_L3D_CACHE_RD 0xA0
  150. #define ARMV8_IMPDEF_PERFCTR_L3D_CACHE_WR 0xA1
  151. #define ARMV8_IMPDEF_PERFCTR_L3D_CACHE_REFILL_RD 0xA2
  152. #define ARMV8_IMPDEF_PERFCTR_L3D_CACHE_REFILL_WR 0xA3
  153. #define ARMV8_IMPDEF_PERFCTR_L3D_CACHE_WB_VICTIM 0xA6
  154. #define ARMV8_IMPDEF_PERFCTR_L3D_CACHE_WB_CLEAN 0xA7
  155. #define ARMV8_IMPDEF_PERFCTR_L3D_CACHE_INVAL 0xA8
  156. /* ARMv8 Cortex-A53 specific event types. */
  157. #define ARMV8_A53_PERFCTR_PREF_LINEFILL 0xC2
  158. /* ARMv8 Cavium ThunderX specific event types. */
  159. #define ARMV8_THUNDER_PERFCTR_L1D_CACHE_MISS_ST 0xE9
  160. #define ARMV8_THUNDER_PERFCTR_L1D_CACHE_PREF_ACCESS 0xEA
  161. #define ARMV8_THUNDER_PERFCTR_L1D_CACHE_PREF_MISS 0xEB
  162. #define ARMV8_THUNDER_PERFCTR_L1I_CACHE_PREF_ACCESS 0xEC
  163. #define ARMV8_THUNDER_PERFCTR_L1I_CACHE_PREF_MISS 0xED
  164. /* PMUv3 HW events mapping. */
  165. /*
  166. * ARMv8 Architectural defined events, not all of these may
  167. * be supported on any given implementation. Undefined events will
  168. * be disabled at run-time.
  169. */
  170. static const unsigned armv8_pmuv3_perf_map[PERF_COUNT_HW_MAX] = {
  171. PERF_MAP_ALL_UNSUPPORTED,
  172. [PERF_COUNT_HW_CPU_CYCLES] = ARMV8_PMUV3_PERFCTR_CPU_CYCLES,
  173. [PERF_COUNT_HW_INSTRUCTIONS] = ARMV8_PMUV3_PERFCTR_INST_RETIRED,
  174. [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV8_PMUV3_PERFCTR_L1D_CACHE,
  175. [PERF_COUNT_HW_CACHE_MISSES] = ARMV8_PMUV3_PERFCTR_L1D_CACHE_REFILL,
  176. [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV8_PMUV3_PERFCTR_PC_WRITE_RETIRED,
  177. [PERF_COUNT_HW_BRANCH_MISSES] = ARMV8_PMUV3_PERFCTR_BR_MIS_PRED,
  178. [PERF_COUNT_HW_BUS_CYCLES] = ARMV8_PMUV3_PERFCTR_BUS_CYCLES,
  179. [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = ARMV8_PMUV3_PERFCTR_STALL_FRONTEND,
  180. [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = ARMV8_PMUV3_PERFCTR_STALL_BACKEND,
  181. };
  182. static const unsigned armv8_pmuv3_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
  183. [PERF_COUNT_HW_CACHE_OP_MAX]
  184. [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
  185. PERF_CACHE_MAP_ALL_UNSUPPORTED,
  186. [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1D_CACHE,
  187. [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1D_CACHE_REFILL,
  188. [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1D_CACHE,
  189. [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1D_CACHE_REFILL,
  190. [C(L1I)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1I_CACHE,
  191. [C(L1I)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1I_CACHE_REFILL,
  192. [C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1D_TLB_REFILL,
  193. [C(DTLB)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1D_TLB,
  194. [C(ITLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1I_TLB_REFILL,
  195. [C(ITLB)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1I_TLB,
  196. [C(BPU)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_BR_PRED,
  197. [C(BPU)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_BR_MIS_PRED,
  198. [C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_BR_PRED,
  199. [C(BPU)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_BR_MIS_PRED,
  200. };
  201. static const unsigned armv8_a53_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
  202. [PERF_COUNT_HW_CACHE_OP_MAX]
  203. [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
  204. PERF_CACHE_MAP_ALL_UNSUPPORTED,
  205. [C(L1D)][C(OP_PREFETCH)][C(RESULT_MISS)] = ARMV8_A53_PERFCTR_PREF_LINEFILL,
  206. [C(NODE)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_RD,
  207. [C(NODE)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_WR,
  208. };
  209. static const unsigned armv8_a57_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
  210. [PERF_COUNT_HW_CACHE_OP_MAX]
  211. [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
  212. PERF_CACHE_MAP_ALL_UNSUPPORTED,
  213. [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_RD,
  214. [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_RD,
  215. [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_WR,
  216. [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_WR,
  217. [C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_REFILL_RD,
  218. [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_REFILL_WR,
  219. [C(NODE)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_RD,
  220. [C(NODE)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_WR,
  221. };
  222. static const unsigned armv8_a73_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
  223. [PERF_COUNT_HW_CACHE_OP_MAX]
  224. [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
  225. PERF_CACHE_MAP_ALL_UNSUPPORTED,
  226. [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_RD,
  227. [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_WR,
  228. };
  229. static const unsigned armv8_thunder_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
  230. [PERF_COUNT_HW_CACHE_OP_MAX]
  231. [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
  232. PERF_CACHE_MAP_ALL_UNSUPPORTED,
  233. [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_RD,
  234. [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_RD,
  235. [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_WR,
  236. [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_THUNDER_PERFCTR_L1D_CACHE_MISS_ST,
  237. [C(L1D)][C(OP_PREFETCH)][C(RESULT_ACCESS)] = ARMV8_THUNDER_PERFCTR_L1D_CACHE_PREF_ACCESS,
  238. [C(L1D)][C(OP_PREFETCH)][C(RESULT_MISS)] = ARMV8_THUNDER_PERFCTR_L1D_CACHE_PREF_MISS,
  239. [C(L1I)][C(OP_PREFETCH)][C(RESULT_ACCESS)] = ARMV8_THUNDER_PERFCTR_L1I_CACHE_PREF_ACCESS,
  240. [C(L1I)][C(OP_PREFETCH)][C(RESULT_MISS)] = ARMV8_THUNDER_PERFCTR_L1I_CACHE_PREF_MISS,
  241. [C(DTLB)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_RD,
  242. [C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_REFILL_RD,
  243. [C(DTLB)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_WR,
  244. [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_REFILL_WR,
  245. };
  246. static const unsigned armv8_vulcan_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
  247. [PERF_COUNT_HW_CACHE_OP_MAX]
  248. [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
  249. PERF_CACHE_MAP_ALL_UNSUPPORTED,
  250. [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_RD,
  251. [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_RD,
  252. [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_WR,
  253. [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_WR,
  254. [C(DTLB)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_RD,
  255. [C(DTLB)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_WR,
  256. [C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_REFILL_RD,
  257. [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_REFILL_WR,
  258. [C(NODE)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_RD,
  259. [C(NODE)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_WR,
  260. };
  261. static ssize_t
  262. armv8pmu_events_sysfs_show(struct device *dev,
  263. struct device_attribute *attr, char *page)
  264. {
  265. struct perf_pmu_events_attr *pmu_attr;
  266. pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr);
  267. return sprintf(page, "event=0x%03llx\n", pmu_attr->id);
  268. }
  269. #define ARMV8_EVENT_ATTR_RESOLVE(m) #m
  270. #define ARMV8_EVENT_ATTR(name, config) \
  271. PMU_EVENT_ATTR(name, armv8_event_attr_##name, \
  272. config, armv8pmu_events_sysfs_show)
  273. ARMV8_EVENT_ATTR(sw_incr, ARMV8_PMUV3_PERFCTR_SW_INCR);
  274. ARMV8_EVENT_ATTR(l1i_cache_refill, ARMV8_PMUV3_PERFCTR_L1I_CACHE_REFILL);
  275. ARMV8_EVENT_ATTR(l1i_tlb_refill, ARMV8_PMUV3_PERFCTR_L1I_TLB_REFILL);
  276. ARMV8_EVENT_ATTR(l1d_cache_refill, ARMV8_PMUV3_PERFCTR_L1D_CACHE_REFILL);
  277. ARMV8_EVENT_ATTR(l1d_cache, ARMV8_PMUV3_PERFCTR_L1D_CACHE);
  278. ARMV8_EVENT_ATTR(l1d_tlb_refill, ARMV8_PMUV3_PERFCTR_L1D_TLB_REFILL);
  279. ARMV8_EVENT_ATTR(ld_retired, ARMV8_PMUV3_PERFCTR_LD_RETIRED);
  280. ARMV8_EVENT_ATTR(st_retired, ARMV8_PMUV3_PERFCTR_ST_RETIRED);
  281. ARMV8_EVENT_ATTR(inst_retired, ARMV8_PMUV3_PERFCTR_INST_RETIRED);
  282. ARMV8_EVENT_ATTR(exc_taken, ARMV8_PMUV3_PERFCTR_EXC_TAKEN);
  283. ARMV8_EVENT_ATTR(exc_return, ARMV8_PMUV3_PERFCTR_EXC_RETURN);
  284. ARMV8_EVENT_ATTR(cid_write_retired, ARMV8_PMUV3_PERFCTR_CID_WRITE_RETIRED);
  285. ARMV8_EVENT_ATTR(pc_write_retired, ARMV8_PMUV3_PERFCTR_PC_WRITE_RETIRED);
  286. ARMV8_EVENT_ATTR(br_immed_retired, ARMV8_PMUV3_PERFCTR_BR_IMMED_RETIRED);
  287. ARMV8_EVENT_ATTR(br_return_retired, ARMV8_PMUV3_PERFCTR_BR_RETURN_RETIRED);
  288. ARMV8_EVENT_ATTR(unaligned_ldst_retired, ARMV8_PMUV3_PERFCTR_UNALIGNED_LDST_RETIRED);
  289. ARMV8_EVENT_ATTR(br_mis_pred, ARMV8_PMUV3_PERFCTR_BR_MIS_PRED);
  290. ARMV8_EVENT_ATTR(cpu_cycles, ARMV8_PMUV3_PERFCTR_CPU_CYCLES);
  291. ARMV8_EVENT_ATTR(br_pred, ARMV8_PMUV3_PERFCTR_BR_PRED);
  292. ARMV8_EVENT_ATTR(mem_access, ARMV8_PMUV3_PERFCTR_MEM_ACCESS);
  293. ARMV8_EVENT_ATTR(l1i_cache, ARMV8_PMUV3_PERFCTR_L1I_CACHE);
  294. ARMV8_EVENT_ATTR(l1d_cache_wb, ARMV8_PMUV3_PERFCTR_L1D_CACHE_WB);
  295. ARMV8_EVENT_ATTR(l2d_cache, ARMV8_PMUV3_PERFCTR_L2D_CACHE);
  296. ARMV8_EVENT_ATTR(l2d_cache_refill, ARMV8_PMUV3_PERFCTR_L2D_CACHE_REFILL);
  297. ARMV8_EVENT_ATTR(l2d_cache_wb, ARMV8_PMUV3_PERFCTR_L2D_CACHE_WB);
  298. ARMV8_EVENT_ATTR(bus_access, ARMV8_PMUV3_PERFCTR_BUS_ACCESS);
  299. ARMV8_EVENT_ATTR(memory_error, ARMV8_PMUV3_PERFCTR_MEMORY_ERROR);
  300. ARMV8_EVENT_ATTR(inst_spec, ARMV8_PMUV3_PERFCTR_INST_SPEC);
  301. ARMV8_EVENT_ATTR(ttbr_write_retired, ARMV8_PMUV3_PERFCTR_TTBR_WRITE_RETIRED);
  302. ARMV8_EVENT_ATTR(bus_cycles, ARMV8_PMUV3_PERFCTR_BUS_CYCLES);
  303. /* Don't expose the chain event in /sys, since it's useless in isolation */
  304. ARMV8_EVENT_ATTR(l1d_cache_allocate, ARMV8_PMUV3_PERFCTR_L1D_CACHE_ALLOCATE);
  305. ARMV8_EVENT_ATTR(l2d_cache_allocate, ARMV8_PMUV3_PERFCTR_L2D_CACHE_ALLOCATE);
  306. ARMV8_EVENT_ATTR(br_retired, ARMV8_PMUV3_PERFCTR_BR_RETIRED);
  307. ARMV8_EVENT_ATTR(br_mis_pred_retired, ARMV8_PMUV3_PERFCTR_BR_MIS_PRED_RETIRED);
  308. ARMV8_EVENT_ATTR(stall_frontend, ARMV8_PMUV3_PERFCTR_STALL_FRONTEND);
  309. ARMV8_EVENT_ATTR(stall_backend, ARMV8_PMUV3_PERFCTR_STALL_BACKEND);
  310. ARMV8_EVENT_ATTR(l1d_tlb, ARMV8_PMUV3_PERFCTR_L1D_TLB);
  311. ARMV8_EVENT_ATTR(l1i_tlb, ARMV8_PMUV3_PERFCTR_L1I_TLB);
  312. ARMV8_EVENT_ATTR(l2i_cache, ARMV8_PMUV3_PERFCTR_L2I_CACHE);
  313. ARMV8_EVENT_ATTR(l2i_cache_refill, ARMV8_PMUV3_PERFCTR_L2I_CACHE_REFILL);
  314. ARMV8_EVENT_ATTR(l3d_cache_allocate, ARMV8_PMUV3_PERFCTR_L3D_CACHE_ALLOCATE);
  315. ARMV8_EVENT_ATTR(l3d_cache_refill, ARMV8_PMUV3_PERFCTR_L3D_CACHE_REFILL);
  316. ARMV8_EVENT_ATTR(l3d_cache, ARMV8_PMUV3_PERFCTR_L3D_CACHE);
  317. ARMV8_EVENT_ATTR(l3d_cache_wb, ARMV8_PMUV3_PERFCTR_L3D_CACHE_WB);
  318. ARMV8_EVENT_ATTR(l2d_tlb_refill, ARMV8_PMUV3_PERFCTR_L2D_TLB_REFILL);
  319. ARMV8_EVENT_ATTR(l2i_tlb_refill, ARMV8_PMUV3_PERFCTR_L2I_TLB_REFILL);
  320. ARMV8_EVENT_ATTR(l2d_tlb, ARMV8_PMUV3_PERFCTR_L2D_TLB);
  321. ARMV8_EVENT_ATTR(l2i_tlb, ARMV8_PMUV3_PERFCTR_L2I_TLB);
  322. static struct attribute *armv8_pmuv3_event_attrs[] = {
  323. &armv8_event_attr_sw_incr.attr.attr,
  324. &armv8_event_attr_l1i_cache_refill.attr.attr,
  325. &armv8_event_attr_l1i_tlb_refill.attr.attr,
  326. &armv8_event_attr_l1d_cache_refill.attr.attr,
  327. &armv8_event_attr_l1d_cache.attr.attr,
  328. &armv8_event_attr_l1d_tlb_refill.attr.attr,
  329. &armv8_event_attr_ld_retired.attr.attr,
  330. &armv8_event_attr_st_retired.attr.attr,
  331. &armv8_event_attr_inst_retired.attr.attr,
  332. &armv8_event_attr_exc_taken.attr.attr,
  333. &armv8_event_attr_exc_return.attr.attr,
  334. &armv8_event_attr_cid_write_retired.attr.attr,
  335. &armv8_event_attr_pc_write_retired.attr.attr,
  336. &armv8_event_attr_br_immed_retired.attr.attr,
  337. &armv8_event_attr_br_return_retired.attr.attr,
  338. &armv8_event_attr_unaligned_ldst_retired.attr.attr,
  339. &armv8_event_attr_br_mis_pred.attr.attr,
  340. &armv8_event_attr_cpu_cycles.attr.attr,
  341. &armv8_event_attr_br_pred.attr.attr,
  342. &armv8_event_attr_mem_access.attr.attr,
  343. &armv8_event_attr_l1i_cache.attr.attr,
  344. &armv8_event_attr_l1d_cache_wb.attr.attr,
  345. &armv8_event_attr_l2d_cache.attr.attr,
  346. &armv8_event_attr_l2d_cache_refill.attr.attr,
  347. &armv8_event_attr_l2d_cache_wb.attr.attr,
  348. &armv8_event_attr_bus_access.attr.attr,
  349. &armv8_event_attr_memory_error.attr.attr,
  350. &armv8_event_attr_inst_spec.attr.attr,
  351. &armv8_event_attr_ttbr_write_retired.attr.attr,
  352. &armv8_event_attr_bus_cycles.attr.attr,
  353. &armv8_event_attr_l1d_cache_allocate.attr.attr,
  354. &armv8_event_attr_l2d_cache_allocate.attr.attr,
  355. &armv8_event_attr_br_retired.attr.attr,
  356. &armv8_event_attr_br_mis_pred_retired.attr.attr,
  357. &armv8_event_attr_stall_frontend.attr.attr,
  358. &armv8_event_attr_stall_backend.attr.attr,
  359. &armv8_event_attr_l1d_tlb.attr.attr,
  360. &armv8_event_attr_l1i_tlb.attr.attr,
  361. &armv8_event_attr_l2i_cache.attr.attr,
  362. &armv8_event_attr_l2i_cache_refill.attr.attr,
  363. &armv8_event_attr_l3d_cache_allocate.attr.attr,
  364. &armv8_event_attr_l3d_cache_refill.attr.attr,
  365. &armv8_event_attr_l3d_cache.attr.attr,
  366. &armv8_event_attr_l3d_cache_wb.attr.attr,
  367. &armv8_event_attr_l2d_tlb_refill.attr.attr,
  368. &armv8_event_attr_l2i_tlb_refill.attr.attr,
  369. &armv8_event_attr_l2d_tlb.attr.attr,
  370. &armv8_event_attr_l2i_tlb.attr.attr,
  371. NULL,
  372. };
  373. static umode_t
  374. armv8pmu_event_attr_is_visible(struct kobject *kobj,
  375. struct attribute *attr, int unused)
  376. {
  377. struct device *dev = kobj_to_dev(kobj);
  378. struct pmu *pmu = dev_get_drvdata(dev);
  379. struct arm_pmu *cpu_pmu = container_of(pmu, struct arm_pmu, pmu);
  380. struct perf_pmu_events_attr *pmu_attr;
  381. pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr.attr);
  382. if (test_bit(pmu_attr->id, cpu_pmu->pmceid_bitmap))
  383. return attr->mode;
  384. return 0;
  385. }
  386. static struct attribute_group armv8_pmuv3_events_attr_group = {
  387. .name = "events",
  388. .attrs = armv8_pmuv3_event_attrs,
  389. .is_visible = armv8pmu_event_attr_is_visible,
  390. };
  391. PMU_FORMAT_ATTR(event, "config:0-15");
  392. PMU_FORMAT_ATTR(long, "config1:0");
  393. static inline bool armv8pmu_event_is_64bit(struct perf_event *event)
  394. {
  395. return event->attr.config1 & 0x1;
  396. }
  397. static struct attribute *armv8_pmuv3_format_attrs[] = {
  398. &format_attr_event.attr,
  399. &format_attr_long.attr,
  400. NULL,
  401. };
  402. static struct attribute_group armv8_pmuv3_format_attr_group = {
  403. .name = "format",
  404. .attrs = armv8_pmuv3_format_attrs,
  405. };
  406. /*
  407. * Perf Events' indices
  408. */
  409. #define ARMV8_IDX_CYCLE_COUNTER 0
  410. #define ARMV8_IDX_COUNTER0 1
  411. #define ARMV8_IDX_COUNTER_LAST(cpu_pmu) \
  412. (ARMV8_IDX_CYCLE_COUNTER + cpu_pmu->num_events - 1)
  413. /*
  414. * We must chain two programmable counters for 64 bit events,
  415. * except when we have allocated the 64bit cycle counter (for CPU
  416. * cycles event). This must be called only when the event has
  417. * a counter allocated.
  418. */
  419. static inline bool armv8pmu_event_is_chained(struct perf_event *event)
  420. {
  421. int idx = event->hw.idx;
  422. return !WARN_ON(idx < 0) &&
  423. armv8pmu_event_is_64bit(event) &&
  424. (idx != ARMV8_IDX_CYCLE_COUNTER);
  425. }
  426. /*
  427. * ARMv8 low level PMU access
  428. */
  429. /*
  430. * Perf Event to low level counters mapping
  431. */
  432. #define ARMV8_IDX_TO_COUNTER(x) \
  433. (((x) - ARMV8_IDX_COUNTER0) & ARMV8_PMU_COUNTER_MASK)
  434. static inline u32 armv8pmu_pmcr_read(void)
  435. {
  436. return read_sysreg(pmcr_el0);
  437. }
  438. static inline void armv8pmu_pmcr_write(u32 val)
  439. {
  440. val &= ARMV8_PMU_PMCR_MASK;
  441. isb();
  442. write_sysreg(val, pmcr_el0);
  443. }
  444. static inline int armv8pmu_has_overflowed(u32 pmovsr)
  445. {
  446. return pmovsr & ARMV8_PMU_OVERFLOWED_MASK;
  447. }
  448. static inline int armv8pmu_counter_valid(struct arm_pmu *cpu_pmu, int idx)
  449. {
  450. return idx >= ARMV8_IDX_CYCLE_COUNTER &&
  451. idx <= ARMV8_IDX_COUNTER_LAST(cpu_pmu);
  452. }
  453. static inline int armv8pmu_counter_has_overflowed(u32 pmnc, int idx)
  454. {
  455. return pmnc & BIT(ARMV8_IDX_TO_COUNTER(idx));
  456. }
  457. static inline void armv8pmu_select_counter(int idx)
  458. {
  459. u32 counter = ARMV8_IDX_TO_COUNTER(idx);
  460. write_sysreg(counter, pmselr_el0);
  461. isb();
  462. }
  463. static inline u32 armv8pmu_read_evcntr(int idx)
  464. {
  465. armv8pmu_select_counter(idx);
  466. return read_sysreg(pmxevcntr_el0);
  467. }
  468. static inline u64 armv8pmu_read_hw_counter(struct perf_event *event)
  469. {
  470. int idx = event->hw.idx;
  471. u64 val = 0;
  472. val = armv8pmu_read_evcntr(idx);
  473. if (armv8pmu_event_is_chained(event))
  474. val = (val << 32) | armv8pmu_read_evcntr(idx - 1);
  475. return val;
  476. }
  477. static inline u64 armv8pmu_read_counter(struct perf_event *event)
  478. {
  479. struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
  480. struct hw_perf_event *hwc = &event->hw;
  481. int idx = hwc->idx;
  482. u64 value = 0;
  483. if (!armv8pmu_counter_valid(cpu_pmu, idx))
  484. pr_err("CPU%u reading wrong counter %d\n",
  485. smp_processor_id(), idx);
  486. else if (idx == ARMV8_IDX_CYCLE_COUNTER)
  487. value = read_sysreg(pmccntr_el0);
  488. else
  489. value = armv8pmu_read_hw_counter(event);
  490. return value;
  491. }
  492. static inline void armv8pmu_write_evcntr(int idx, u32 value)
  493. {
  494. armv8pmu_select_counter(idx);
  495. write_sysreg(value, pmxevcntr_el0);
  496. }
  497. static inline void armv8pmu_write_hw_counter(struct perf_event *event,
  498. u64 value)
  499. {
  500. int idx = event->hw.idx;
  501. if (armv8pmu_event_is_chained(event)) {
  502. armv8pmu_write_evcntr(idx, upper_32_bits(value));
  503. armv8pmu_write_evcntr(idx - 1, lower_32_bits(value));
  504. } else {
  505. armv8pmu_write_evcntr(idx, value);
  506. }
  507. }
  508. static inline void armv8pmu_write_counter(struct perf_event *event, u64 value)
  509. {
  510. struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
  511. struct hw_perf_event *hwc = &event->hw;
  512. int idx = hwc->idx;
  513. if (!armv8pmu_counter_valid(cpu_pmu, idx))
  514. pr_err("CPU%u writing wrong counter %d\n",
  515. smp_processor_id(), idx);
  516. else if (idx == ARMV8_IDX_CYCLE_COUNTER) {
  517. /*
  518. * The cycles counter is really a 64-bit counter.
  519. * When treating it as a 32-bit counter, we only count
  520. * the lower 32 bits, and set the upper 32-bits so that
  521. * we get an interrupt upon 32-bit overflow.
  522. */
  523. if (!armv8pmu_event_is_64bit(event))
  524. value |= 0xffffffff00000000ULL;
  525. write_sysreg(value, pmccntr_el0);
  526. } else
  527. armv8pmu_write_hw_counter(event, value);
  528. }
  529. static inline void armv8pmu_write_evtype(int idx, u32 val)
  530. {
  531. armv8pmu_select_counter(idx);
  532. val &= ARMV8_PMU_EVTYPE_MASK;
  533. write_sysreg(val, pmxevtyper_el0);
  534. }
  535. static inline void armv8pmu_write_event_type(struct perf_event *event)
  536. {
  537. struct hw_perf_event *hwc = &event->hw;
  538. int idx = hwc->idx;
  539. /*
  540. * For chained events, the low counter is programmed to count
  541. * the event of interest and the high counter is programmed
  542. * with CHAIN event code with filters set to count at all ELs.
  543. */
  544. if (armv8pmu_event_is_chained(event)) {
  545. u32 chain_evt = ARMV8_PMUV3_PERFCTR_CHAIN |
  546. ARMV8_PMU_INCLUDE_EL2;
  547. armv8pmu_write_evtype(idx - 1, hwc->config_base);
  548. armv8pmu_write_evtype(idx, chain_evt);
  549. } else {
  550. armv8pmu_write_evtype(idx, hwc->config_base);
  551. }
  552. }
  553. static inline int armv8pmu_enable_counter(int idx)
  554. {
  555. u32 counter = ARMV8_IDX_TO_COUNTER(idx);
  556. write_sysreg(BIT(counter), pmcntenset_el0);
  557. return idx;
  558. }
  559. static inline void armv8pmu_enable_event_counter(struct perf_event *event)
  560. {
  561. int idx = event->hw.idx;
  562. armv8pmu_enable_counter(idx);
  563. if (armv8pmu_event_is_chained(event))
  564. armv8pmu_enable_counter(idx - 1);
  565. isb();
  566. }
  567. static inline int armv8pmu_disable_counter(int idx)
  568. {
  569. u32 counter = ARMV8_IDX_TO_COUNTER(idx);
  570. write_sysreg(BIT(counter), pmcntenclr_el0);
  571. return idx;
  572. }
  573. static inline void armv8pmu_disable_event_counter(struct perf_event *event)
  574. {
  575. struct hw_perf_event *hwc = &event->hw;
  576. int idx = hwc->idx;
  577. if (armv8pmu_event_is_chained(event))
  578. armv8pmu_disable_counter(idx - 1);
  579. armv8pmu_disable_counter(idx);
  580. }
  581. static inline int armv8pmu_enable_intens(int idx)
  582. {
  583. u32 counter = ARMV8_IDX_TO_COUNTER(idx);
  584. write_sysreg(BIT(counter), pmintenset_el1);
  585. return idx;
  586. }
  587. static inline int armv8pmu_enable_event_irq(struct perf_event *event)
  588. {
  589. return armv8pmu_enable_intens(event->hw.idx);
  590. }
  591. static inline int armv8pmu_disable_intens(int idx)
  592. {
  593. u32 counter = ARMV8_IDX_TO_COUNTER(idx);
  594. write_sysreg(BIT(counter), pmintenclr_el1);
  595. isb();
  596. /* Clear the overflow flag in case an interrupt is pending. */
  597. write_sysreg(BIT(counter), pmovsclr_el0);
  598. isb();
  599. return idx;
  600. }
  601. static inline int armv8pmu_disable_event_irq(struct perf_event *event)
  602. {
  603. return armv8pmu_disable_intens(event->hw.idx);
  604. }
  605. static inline u32 armv8pmu_getreset_flags(void)
  606. {
  607. u32 value;
  608. /* Read */
  609. value = read_sysreg(pmovsclr_el0);
  610. /* Write to clear flags */
  611. value &= ARMV8_PMU_OVSR_MASK;
  612. write_sysreg(value, pmovsclr_el0);
  613. return value;
  614. }
  615. static void armv8pmu_enable_event(struct perf_event *event)
  616. {
  617. unsigned long flags;
  618. struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
  619. struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
  620. /*
  621. * Enable counter and interrupt, and set the counter to count
  622. * the event that we're interested in.
  623. */
  624. raw_spin_lock_irqsave(&events->pmu_lock, flags);
  625. /*
  626. * Disable counter
  627. */
  628. armv8pmu_disable_event_counter(event);
  629. /*
  630. * Set event (if destined for PMNx counters).
  631. */
  632. armv8pmu_write_event_type(event);
  633. /*
  634. * Enable interrupt for this counter
  635. */
  636. armv8pmu_enable_event_irq(event);
  637. /*
  638. * Enable counter
  639. */
  640. armv8pmu_enable_event_counter(event);
  641. raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
  642. }
  643. static void armv8pmu_disable_event(struct perf_event *event)
  644. {
  645. unsigned long flags;
  646. struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
  647. struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
  648. /*
  649. * Disable counter and interrupt
  650. */
  651. raw_spin_lock_irqsave(&events->pmu_lock, flags);
  652. /*
  653. * Disable counter
  654. */
  655. armv8pmu_disable_event_counter(event);
  656. /*
  657. * Disable interrupt for this counter
  658. */
  659. armv8pmu_disable_event_irq(event);
  660. raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
  661. }
  662. static void armv8pmu_start(struct arm_pmu *cpu_pmu)
  663. {
  664. unsigned long flags;
  665. struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
  666. raw_spin_lock_irqsave(&events->pmu_lock, flags);
  667. /* Enable all counters */
  668. armv8pmu_pmcr_write(armv8pmu_pmcr_read() | ARMV8_PMU_PMCR_E);
  669. raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
  670. }
  671. static void armv8pmu_stop(struct arm_pmu *cpu_pmu)
  672. {
  673. unsigned long flags;
  674. struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
  675. raw_spin_lock_irqsave(&events->pmu_lock, flags);
  676. /* Disable all counters */
  677. armv8pmu_pmcr_write(armv8pmu_pmcr_read() & ~ARMV8_PMU_PMCR_E);
  678. raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
  679. }
  680. static irqreturn_t armv8pmu_handle_irq(struct arm_pmu *cpu_pmu)
  681. {
  682. u32 pmovsr;
  683. struct perf_sample_data data;
  684. struct pmu_hw_events *cpuc = this_cpu_ptr(cpu_pmu->hw_events);
  685. struct pt_regs *regs;
  686. int idx;
  687. /*
  688. * Get and reset the IRQ flags
  689. */
  690. pmovsr = armv8pmu_getreset_flags();
  691. /*
  692. * Did an overflow occur?
  693. */
  694. if (!armv8pmu_has_overflowed(pmovsr))
  695. return IRQ_NONE;
  696. /*
  697. * Handle the counter(s) overflow(s)
  698. */
  699. regs = get_irq_regs();
  700. /*
  701. * Stop the PMU while processing the counter overflows
  702. * to prevent skews in group events.
  703. */
  704. armv8pmu_stop(cpu_pmu);
  705. for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
  706. struct perf_event *event = cpuc->events[idx];
  707. struct hw_perf_event *hwc;
  708. /* Ignore if we don't have an event. */
  709. if (!event)
  710. continue;
  711. /*
  712. * We have a single interrupt for all counters. Check that
  713. * each counter has overflowed before we process it.
  714. */
  715. if (!armv8pmu_counter_has_overflowed(pmovsr, idx))
  716. continue;
  717. hwc = &event->hw;
  718. armpmu_event_update(event);
  719. perf_sample_data_init(&data, 0, hwc->last_period);
  720. if (!armpmu_event_set_period(event))
  721. continue;
  722. if (perf_event_overflow(event, &data, regs))
  723. cpu_pmu->disable(event);
  724. }
  725. armv8pmu_start(cpu_pmu);
  726. /*
  727. * Handle the pending perf events.
  728. *
  729. * Note: this call *must* be run with interrupts disabled. For
  730. * platforms that can have the PMU interrupts raised as an NMI, this
  731. * will not work.
  732. */
  733. irq_work_run();
  734. return IRQ_HANDLED;
  735. }
  736. static int armv8pmu_get_single_idx(struct pmu_hw_events *cpuc,
  737. struct arm_pmu *cpu_pmu)
  738. {
  739. int idx;
  740. for (idx = ARMV8_IDX_COUNTER0; idx < cpu_pmu->num_events; idx ++) {
  741. if (!test_and_set_bit(idx, cpuc->used_mask))
  742. return idx;
  743. }
  744. return -EAGAIN;
  745. }
  746. static int armv8pmu_get_chain_idx(struct pmu_hw_events *cpuc,
  747. struct arm_pmu *cpu_pmu)
  748. {
  749. int idx;
  750. /*
  751. * Chaining requires two consecutive event counters, where
  752. * the lower idx must be even.
  753. */
  754. for (idx = ARMV8_IDX_COUNTER0 + 1; idx < cpu_pmu->num_events; idx += 2) {
  755. if (!test_and_set_bit(idx, cpuc->used_mask)) {
  756. /* Check if the preceding even counter is available */
  757. if (!test_and_set_bit(idx - 1, cpuc->used_mask))
  758. return idx;
  759. /* Release the Odd counter */
  760. clear_bit(idx, cpuc->used_mask);
  761. }
  762. }
  763. return -EAGAIN;
  764. }
  765. static int armv8pmu_get_event_idx(struct pmu_hw_events *cpuc,
  766. struct perf_event *event)
  767. {
  768. struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
  769. struct hw_perf_event *hwc = &event->hw;
  770. unsigned long evtype = hwc->config_base & ARMV8_PMU_EVTYPE_EVENT;
  771. /* Always prefer to place a cycle counter into the cycle counter. */
  772. if (evtype == ARMV8_PMUV3_PERFCTR_CPU_CYCLES) {
  773. if (!test_and_set_bit(ARMV8_IDX_CYCLE_COUNTER, cpuc->used_mask))
  774. return ARMV8_IDX_CYCLE_COUNTER;
  775. }
  776. /*
  777. * Otherwise use events counters
  778. */
  779. if (armv8pmu_event_is_64bit(event))
  780. return armv8pmu_get_chain_idx(cpuc, cpu_pmu);
  781. else
  782. return armv8pmu_get_single_idx(cpuc, cpu_pmu);
  783. }
  784. static void armv8pmu_clear_event_idx(struct pmu_hw_events *cpuc,
  785. struct perf_event *event)
  786. {
  787. int idx = event->hw.idx;
  788. clear_bit(idx, cpuc->used_mask);
  789. if (armv8pmu_event_is_chained(event))
  790. clear_bit(idx - 1, cpuc->used_mask);
  791. }
  792. /*
  793. * Add an event filter to a given event. This will only work for PMUv2 PMUs.
  794. */
  795. static int armv8pmu_set_event_filter(struct hw_perf_event *event,
  796. struct perf_event_attr *attr)
  797. {
  798. unsigned long config_base = 0;
  799. if (attr->exclude_idle)
  800. return -EPERM;
  801. /*
  802. * If we're running in hyp mode, then we *are* the hypervisor.
  803. * Therefore we ignore exclude_hv in this configuration, since
  804. * there's no hypervisor to sample anyway. This is consistent
  805. * with other architectures (x86 and Power).
  806. */
  807. if (is_kernel_in_hyp_mode()) {
  808. if (!attr->exclude_kernel)
  809. config_base |= ARMV8_PMU_INCLUDE_EL2;
  810. } else {
  811. if (attr->exclude_kernel)
  812. config_base |= ARMV8_PMU_EXCLUDE_EL1;
  813. if (!attr->exclude_hv)
  814. config_base |= ARMV8_PMU_INCLUDE_EL2;
  815. }
  816. if (attr->exclude_user)
  817. config_base |= ARMV8_PMU_EXCLUDE_EL0;
  818. /*
  819. * Install the filter into config_base as this is used to
  820. * construct the event type.
  821. */
  822. event->config_base = config_base;
  823. return 0;
  824. }
  825. static int armv8pmu_filter_match(struct perf_event *event)
  826. {
  827. unsigned long evtype = event->hw.config_base & ARMV8_PMU_EVTYPE_EVENT;
  828. return evtype != ARMV8_PMUV3_PERFCTR_CHAIN;
  829. }
  830. static void armv8pmu_reset(void *info)
  831. {
  832. struct arm_pmu *cpu_pmu = (struct arm_pmu *)info;
  833. u32 idx, nb_cnt = cpu_pmu->num_events;
  834. /* The counter and interrupt enable registers are unknown at reset. */
  835. for (idx = ARMV8_IDX_CYCLE_COUNTER; idx < nb_cnt; ++idx) {
  836. armv8pmu_disable_counter(idx);
  837. armv8pmu_disable_intens(idx);
  838. }
  839. /*
  840. * Initialize & Reset PMNC. Request overflow interrupt for
  841. * 64 bit cycle counter but cheat in armv8pmu_write_counter().
  842. */
  843. armv8pmu_pmcr_write(ARMV8_PMU_PMCR_P | ARMV8_PMU_PMCR_C |
  844. ARMV8_PMU_PMCR_LC);
  845. }
  846. static int __armv8_pmuv3_map_event(struct perf_event *event,
  847. const unsigned (*extra_event_map)
  848. [PERF_COUNT_HW_MAX],
  849. const unsigned (*extra_cache_map)
  850. [PERF_COUNT_HW_CACHE_MAX]
  851. [PERF_COUNT_HW_CACHE_OP_MAX]
  852. [PERF_COUNT_HW_CACHE_RESULT_MAX])
  853. {
  854. int hw_event_id;
  855. struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
  856. hw_event_id = armpmu_map_event(event, &armv8_pmuv3_perf_map,
  857. &armv8_pmuv3_perf_cache_map,
  858. ARMV8_PMU_EVTYPE_EVENT);
  859. if (armv8pmu_event_is_64bit(event))
  860. event->hw.flags |= ARMPMU_EVT_64BIT;
  861. /* Onl expose micro/arch events supported by this PMU */
  862. if ((hw_event_id > 0) && (hw_event_id < ARMV8_PMUV3_MAX_COMMON_EVENTS)
  863. && test_bit(hw_event_id, armpmu->pmceid_bitmap)) {
  864. return hw_event_id;
  865. }
  866. return armpmu_map_event(event, extra_event_map, extra_cache_map,
  867. ARMV8_PMU_EVTYPE_EVENT);
  868. }
  869. static int armv8_pmuv3_map_event(struct perf_event *event)
  870. {
  871. return __armv8_pmuv3_map_event(event, NULL, NULL);
  872. }
  873. static int armv8_a53_map_event(struct perf_event *event)
  874. {
  875. return __armv8_pmuv3_map_event(event, NULL, &armv8_a53_perf_cache_map);
  876. }
  877. static int armv8_a57_map_event(struct perf_event *event)
  878. {
  879. return __armv8_pmuv3_map_event(event, NULL, &armv8_a57_perf_cache_map);
  880. }
  881. static int armv8_a73_map_event(struct perf_event *event)
  882. {
  883. return __armv8_pmuv3_map_event(event, NULL, &armv8_a73_perf_cache_map);
  884. }
  885. static int armv8_thunder_map_event(struct perf_event *event)
  886. {
  887. return __armv8_pmuv3_map_event(event, NULL,
  888. &armv8_thunder_perf_cache_map);
  889. }
  890. static int armv8_vulcan_map_event(struct perf_event *event)
  891. {
  892. return __armv8_pmuv3_map_event(event, NULL,
  893. &armv8_vulcan_perf_cache_map);
  894. }
  895. struct armv8pmu_probe_info {
  896. struct arm_pmu *pmu;
  897. bool present;
  898. };
  899. static void __armv8pmu_probe_pmu(void *info)
  900. {
  901. struct armv8pmu_probe_info *probe = info;
  902. struct arm_pmu *cpu_pmu = probe->pmu;
  903. u64 dfr0;
  904. u32 pmceid[2];
  905. int pmuver;
  906. dfr0 = read_sysreg(id_aa64dfr0_el1);
  907. pmuver = cpuid_feature_extract_unsigned_field(dfr0,
  908. ID_AA64DFR0_PMUVER_SHIFT);
  909. if (pmuver == 0xf || pmuver == 0)
  910. return;
  911. probe->present = true;
  912. /* Read the nb of CNTx counters supported from PMNC */
  913. cpu_pmu->num_events = (armv8pmu_pmcr_read() >> ARMV8_PMU_PMCR_N_SHIFT)
  914. & ARMV8_PMU_PMCR_N_MASK;
  915. /* Add the CPU cycles counter */
  916. cpu_pmu->num_events += 1;
  917. pmceid[0] = read_sysreg(pmceid0_el0);
  918. pmceid[1] = read_sysreg(pmceid1_el0);
  919. bitmap_from_arr32(cpu_pmu->pmceid_bitmap,
  920. pmceid, ARMV8_PMUV3_MAX_COMMON_EVENTS);
  921. }
  922. static int armv8pmu_probe_pmu(struct arm_pmu *cpu_pmu)
  923. {
  924. struct armv8pmu_probe_info probe = {
  925. .pmu = cpu_pmu,
  926. .present = false,
  927. };
  928. int ret;
  929. ret = smp_call_function_any(&cpu_pmu->supported_cpus,
  930. __armv8pmu_probe_pmu,
  931. &probe, 1);
  932. if (ret)
  933. return ret;
  934. return probe.present ? 0 : -ENODEV;
  935. }
  936. static int armv8_pmu_init(struct arm_pmu *cpu_pmu)
  937. {
  938. int ret = armv8pmu_probe_pmu(cpu_pmu);
  939. if (ret)
  940. return ret;
  941. cpu_pmu->handle_irq = armv8pmu_handle_irq,
  942. cpu_pmu->enable = armv8pmu_enable_event,
  943. cpu_pmu->disable = armv8pmu_disable_event,
  944. cpu_pmu->read_counter = armv8pmu_read_counter,
  945. cpu_pmu->write_counter = armv8pmu_write_counter,
  946. cpu_pmu->get_event_idx = armv8pmu_get_event_idx,
  947. cpu_pmu->clear_event_idx = armv8pmu_clear_event_idx,
  948. cpu_pmu->start = armv8pmu_start,
  949. cpu_pmu->stop = armv8pmu_stop,
  950. cpu_pmu->reset = armv8pmu_reset,
  951. cpu_pmu->set_event_filter = armv8pmu_set_event_filter;
  952. cpu_pmu->filter_match = armv8pmu_filter_match;
  953. return 0;
  954. }
  955. static int armv8_pmuv3_init(struct arm_pmu *cpu_pmu)
  956. {
  957. int ret = armv8_pmu_init(cpu_pmu);
  958. if (ret)
  959. return ret;
  960. cpu_pmu->name = "armv8_pmuv3";
  961. cpu_pmu->map_event = armv8_pmuv3_map_event;
  962. cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
  963. &armv8_pmuv3_events_attr_group;
  964. cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
  965. &armv8_pmuv3_format_attr_group;
  966. return 0;
  967. }
  968. static int armv8_a35_pmu_init(struct arm_pmu *cpu_pmu)
  969. {
  970. int ret = armv8_pmu_init(cpu_pmu);
  971. if (ret)
  972. return ret;
  973. cpu_pmu->name = "armv8_cortex_a35";
  974. cpu_pmu->map_event = armv8_a53_map_event;
  975. cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
  976. &armv8_pmuv3_events_attr_group;
  977. cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
  978. &armv8_pmuv3_format_attr_group;
  979. return 0;
  980. }
  981. static int armv8_a53_pmu_init(struct arm_pmu *cpu_pmu)
  982. {
  983. int ret = armv8_pmu_init(cpu_pmu);
  984. if (ret)
  985. return ret;
  986. cpu_pmu->name = "armv8_cortex_a53";
  987. cpu_pmu->map_event = armv8_a53_map_event;
  988. cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
  989. &armv8_pmuv3_events_attr_group;
  990. cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
  991. &armv8_pmuv3_format_attr_group;
  992. return 0;
  993. }
  994. static int armv8_a57_pmu_init(struct arm_pmu *cpu_pmu)
  995. {
  996. int ret = armv8_pmu_init(cpu_pmu);
  997. if (ret)
  998. return ret;
  999. cpu_pmu->name = "armv8_cortex_a57";
  1000. cpu_pmu->map_event = armv8_a57_map_event;
  1001. cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
  1002. &armv8_pmuv3_events_attr_group;
  1003. cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
  1004. &armv8_pmuv3_format_attr_group;
  1005. return 0;
  1006. }
  1007. static int armv8_a72_pmu_init(struct arm_pmu *cpu_pmu)
  1008. {
  1009. int ret = armv8_pmu_init(cpu_pmu);
  1010. if (ret)
  1011. return ret;
  1012. cpu_pmu->name = "armv8_cortex_a72";
  1013. cpu_pmu->map_event = armv8_a57_map_event;
  1014. cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
  1015. &armv8_pmuv3_events_attr_group;
  1016. cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
  1017. &armv8_pmuv3_format_attr_group;
  1018. return 0;
  1019. }
  1020. static int armv8_a73_pmu_init(struct arm_pmu *cpu_pmu)
  1021. {
  1022. int ret = armv8_pmu_init(cpu_pmu);
  1023. if (ret)
  1024. return ret;
  1025. cpu_pmu->name = "armv8_cortex_a73";
  1026. cpu_pmu->map_event = armv8_a73_map_event;
  1027. cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
  1028. &armv8_pmuv3_events_attr_group;
  1029. cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
  1030. &armv8_pmuv3_format_attr_group;
  1031. return 0;
  1032. }
  1033. static int armv8_thunder_pmu_init(struct arm_pmu *cpu_pmu)
  1034. {
  1035. int ret = armv8_pmu_init(cpu_pmu);
  1036. if (ret)
  1037. return ret;
  1038. cpu_pmu->name = "armv8_cavium_thunder";
  1039. cpu_pmu->map_event = armv8_thunder_map_event;
  1040. cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
  1041. &armv8_pmuv3_events_attr_group;
  1042. cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
  1043. &armv8_pmuv3_format_attr_group;
  1044. return 0;
  1045. }
  1046. static int armv8_vulcan_pmu_init(struct arm_pmu *cpu_pmu)
  1047. {
  1048. int ret = armv8_pmu_init(cpu_pmu);
  1049. if (ret)
  1050. return ret;
  1051. cpu_pmu->name = "armv8_brcm_vulcan";
  1052. cpu_pmu->map_event = armv8_vulcan_map_event;
  1053. cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
  1054. &armv8_pmuv3_events_attr_group;
  1055. cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
  1056. &armv8_pmuv3_format_attr_group;
  1057. return 0;
  1058. }
  1059. static const struct of_device_id armv8_pmu_of_device_ids[] = {
  1060. {.compatible = "arm,armv8-pmuv3", .data = armv8_pmuv3_init},
  1061. {.compatible = "arm,cortex-a35-pmu", .data = armv8_a35_pmu_init},
  1062. {.compatible = "arm,cortex-a53-pmu", .data = armv8_a53_pmu_init},
  1063. {.compatible = "arm,cortex-a57-pmu", .data = armv8_a57_pmu_init},
  1064. {.compatible = "arm,cortex-a72-pmu", .data = armv8_a72_pmu_init},
  1065. {.compatible = "arm,cortex-a73-pmu", .data = armv8_a73_pmu_init},
  1066. {.compatible = "cavium,thunder-pmu", .data = armv8_thunder_pmu_init},
  1067. {.compatible = "brcm,vulcan-pmu", .data = armv8_vulcan_pmu_init},
  1068. {},
  1069. };
  1070. static int armv8_pmu_device_probe(struct platform_device *pdev)
  1071. {
  1072. return arm_pmu_device_probe(pdev, armv8_pmu_of_device_ids, NULL);
  1073. }
  1074. static struct platform_driver armv8_pmu_driver = {
  1075. .driver = {
  1076. .name = ARMV8_PMU_PDEV_NAME,
  1077. .of_match_table = armv8_pmu_of_device_ids,
  1078. .suppress_bind_attrs = true,
  1079. },
  1080. .probe = armv8_pmu_device_probe,
  1081. };
  1082. static int __init armv8_pmu_driver_init(void)
  1083. {
  1084. if (acpi_disabled)
  1085. return platform_driver_register(&armv8_pmu_driver);
  1086. else
  1087. return arm_pmu_acpi_probe(armv8_pmuv3_init);
  1088. }
  1089. device_initcall(armv8_pmu_driver_init)
  1090. void arch_perf_update_userpage(struct perf_event *event,
  1091. struct perf_event_mmap_page *userpg, u64 now)
  1092. {
  1093. u32 freq;
  1094. u32 shift;
  1095. /*
  1096. * Internal timekeeping for enabled/running/stopped times
  1097. * is always computed with the sched_clock.
  1098. */
  1099. freq = arch_timer_get_rate();
  1100. userpg->cap_user_time = 1;
  1101. clocks_calc_mult_shift(&userpg->time_mult, &shift, freq,
  1102. NSEC_PER_SEC, 0);
  1103. /*
  1104. * time_shift is not expected to be greater than 31 due to
  1105. * the original published conversion algorithm shifting a
  1106. * 32-bit value (now specifies a 64-bit value) - refer
  1107. * perf_event_mmap_page documentation in perf_event.h.
  1108. */
  1109. if (shift == 32) {
  1110. shift = 31;
  1111. userpg->time_mult >>= 1;
  1112. }
  1113. userpg->time_shift = (u16)shift;
  1114. userpg->time_offset = -now;
  1115. }