arm-cmn.c 81 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707
  1. // SPDX-License-Identifier: GPL-2.0
  2. // Copyright (C) 2016-2020 Arm Limited
  3. // CMN-600 Coherent Mesh Network PMU driver
  4. #include <linux/acpi.h>
  5. #include <linux/bitfield.h>
  6. #include <linux/bitops.h>
  7. #include <linux/debugfs.h>
  8. #include <linux/interrupt.h>
  9. #include <linux/io.h>
  10. #include <linux/io-64-nonatomic-lo-hi.h>
  11. #include <linux/kernel.h>
  12. #include <linux/list.h>
  13. #include <linux/module.h>
  14. #include <linux/of.h>
  15. #include <linux/perf_event.h>
  16. #include <linux/platform_device.h>
  17. #include <linux/slab.h>
  18. #include <linux/sort.h>
  19. /* Common register stuff */
  20. #define CMN_NODE_INFO 0x0000
  21. #define CMN_NI_NODE_TYPE GENMASK_ULL(15, 0)
  22. #define CMN_NI_NODE_ID GENMASK_ULL(31, 16)
  23. #define CMN_NI_LOGICAL_ID GENMASK_ULL(47, 32)
  24. #define CMN_CHILD_INFO 0x0080
  25. #define CMN_CI_CHILD_COUNT GENMASK_ULL(15, 0)
  26. #define CMN_CI_CHILD_PTR_OFFSET GENMASK_ULL(31, 16)
  27. #define CMN_CHILD_NODE_ADDR GENMASK(29, 0)
  28. #define CMN_CHILD_NODE_EXTERNAL BIT(31)
  29. #define CMN_MAX_DIMENSION 12
  30. #define CMN_MAX_XPS (CMN_MAX_DIMENSION * CMN_MAX_DIMENSION)
  31. #define CMN_MAX_DTMS (CMN_MAX_XPS + (CMN_MAX_DIMENSION - 1) * 4)
  32. /* Currently XPs are the node type we can have most of; others top out at 128 */
  33. #define CMN_MAX_NODES_PER_EVENT CMN_MAX_XPS
  34. /* The CFG node has various info besides the discovery tree */
  35. #define CMN_CFGM_PERIPH_ID_01 0x0008
  36. #define CMN_CFGM_PID0_PART_0 GENMASK_ULL(7, 0)
  37. #define CMN_CFGM_PID1_PART_1 GENMASK_ULL(35, 32)
  38. #define CMN_CFGM_PERIPH_ID_23 0x0010
  39. #define CMN_CFGM_PID2_REVISION GENMASK_ULL(7, 4)
  40. #define CMN_CFGM_INFO_GLOBAL 0x0900
  41. #define CMN_INFO_MULTIPLE_DTM_EN BIT_ULL(63)
  42. #define CMN_INFO_RSP_VC_NUM GENMASK_ULL(53, 52)
  43. #define CMN_INFO_DAT_VC_NUM GENMASK_ULL(51, 50)
  44. #define CMN_INFO_DEVICE_ISO_ENABLE BIT_ULL(44)
  45. #define CMN_CFGM_INFO_GLOBAL_1 0x0908
  46. #define CMN_INFO_SNP_VC_NUM GENMASK_ULL(3, 2)
  47. #define CMN_INFO_REQ_VC_NUM GENMASK_ULL(1, 0)
  48. /* XPs also have some local topology info which has uses too */
  49. #define CMN_MXP__CONNECT_INFO(p) (0x0008 + 8 * (p))
  50. #define CMN__CONNECT_INFO_DEVICE_TYPE GENMASK_ULL(5, 0)
  51. #define CMN_MAX_PORTS 6
  52. #define CI700_CONNECT_INFO_P2_5_OFFSET 0x10
  53. /* PMU registers occupy the 3rd 4KB page of each node's region */
  54. #define CMN_PMU_OFFSET 0x2000
  55. /* ...except when they don't :( */
  56. #define CMN_S3_R1_DTM_OFFSET 0xa000
  57. #define CMN_S3_PMU_OFFSET 0xd900
  58. /* For most nodes, this is all there is */
  59. #define CMN_PMU_EVENT_SEL 0x000
  60. #define CMN__PMU_CBUSY_SNTHROTTLE_SEL GENMASK_ULL(44, 42)
  61. #define CMN__PMU_SN_HOME_SEL GENMASK_ULL(40, 39)
  62. #define CMN__PMU_HBT_LBT_SEL GENMASK_ULL(38, 37)
  63. #define CMN__PMU_CLASS_OCCUP_ID GENMASK_ULL(36, 35)
  64. /* Technically this is 4 bits wide on DNs, but we only use 2 there anyway */
  65. #define CMN__PMU_OCCUP1_ID GENMASK_ULL(34, 32)
  66. /* Some types are designed to coexist with another device in the same node */
  67. #define CMN_CCLA_PMU_EVENT_SEL 0x008
  68. #define CMN_HNP_PMU_EVENT_SEL 0x008
  69. /* DTMs live in the PMU space of XP registers */
  70. #define CMN_DTM_WPn(n) (0x1A0 + (n) * 0x18)
  71. #define CMN_DTM_WPn_CONFIG(n) (CMN_DTM_WPn(n) + 0x00)
  72. #define CMN_DTM_WPn_CONFIG_WP_CHN_NUM GENMASK_ULL(20, 19)
  73. #define CMN_DTM_WPn_CONFIG_WP_DEV_SEL2 GENMASK_ULL(18, 17)
  74. #define CMN_DTM_WPn_CONFIG_WP_COMBINE BIT(9)
  75. #define CMN_DTM_WPn_CONFIG_WP_EXCLUSIVE BIT(8)
  76. #define CMN600_WPn_CONFIG_WP_COMBINE BIT(6)
  77. #define CMN600_WPn_CONFIG_WP_EXCLUSIVE BIT(5)
  78. #define CMN_DTM_WPn_CONFIG_WP_GRP GENMASK_ULL(5, 4)
  79. #define CMN_DTM_WPn_CONFIG_WP_CHN_SEL GENMASK_ULL(3, 1)
  80. #define CMN_DTM_WPn_CONFIG_WP_DEV_SEL BIT(0)
  81. #define CMN_DTM_WPn_VAL(n) (CMN_DTM_WPn(n) + 0x08)
  82. #define CMN_DTM_WPn_MASK(n) (CMN_DTM_WPn(n) + 0x10)
  83. #define CMN_DTM_PMU_CONFIG 0x210
  84. #define CMN__PMEVCNT0_INPUT_SEL GENMASK_ULL(37, 32)
  85. #define CMN__PMEVCNT0_INPUT_SEL_WP 0x00
  86. #define CMN__PMEVCNT0_INPUT_SEL_XP 0x04
  87. #define CMN__PMEVCNT0_INPUT_SEL_DEV 0x10
  88. #define CMN__PMEVCNT0_GLOBAL_NUM GENMASK_ULL(18, 16)
  89. #define CMN__PMEVCNTn_GLOBAL_NUM_SHIFT(n) ((n) * 4)
  90. #define CMN__PMEVCNT_PAIRED(n) BIT(4 + (n))
  91. #define CMN__PMEVCNT23_COMBINED BIT(2)
  92. #define CMN__PMEVCNT01_COMBINED BIT(1)
  93. #define CMN_DTM_PMU_CONFIG_PMU_EN BIT(0)
  94. #define CMN_DTM_PMEVCNT 0x220
  95. #define CMN_DTM_PMEVCNTSR 0x240
  96. #define CMN650_DTM_UNIT_INFO 0x0910
  97. #define CMN_DTM_UNIT_INFO 0x0960
  98. #define CMN_DTM_UNIT_INFO_DTC_DOMAIN GENMASK_ULL(1, 0)
  99. #define CMN_DTM_NUM_COUNTERS 4
  100. /* Want more local counters? Why not replicate the whole DTM! Ugh... */
  101. #define CMN_DTM_OFFSET(n) ((n) * 0x200)
  102. /* The DTC node is where the magic happens */
  103. #define CMN_DT_DTC_CTL 0x0a00
  104. #define CMN_DT_DTC_CTL_DT_EN BIT(0)
  105. #define CMN_DT_DTC_CTL_CG_DISABLE BIT(10)
  106. /* DTC counters are paired in 64-bit registers on a 16-byte stride. Yuck */
  107. #define _CMN_DT_CNT_REG(n) ((((n) / 2) * 4 + (n) % 2) * 4)
  108. #define CMN_DT_PMEVCNT(dtc, n) ((dtc)->pmu_base + _CMN_DT_CNT_REG(n))
  109. #define CMN_DT_PMCCNTR(dtc) ((dtc)->pmu_base + 0x40)
  110. #define CMN_DT_PMEVCNTSR(dtc, n) ((dtc)->pmu_base + 0x50 + _CMN_DT_CNT_REG(n))
  111. #define CMN_DT_PMCCNTRSR(dtc) ((dtc)->pmu_base + 0x90)
  112. #define CMN_DT_PMCR(dtc) ((dtc)->pmu_base + 0x100)
  113. #define CMN_DT_PMCR_PMU_EN BIT(0)
  114. #define CMN_DT_PMCR_CNTR_RST BIT(5)
  115. #define CMN_DT_PMCR_OVFL_INTR_EN BIT(6)
  116. #define CMN_DT_PMOVSR(dtc) ((dtc)->pmu_base + 0x118)
  117. #define CMN_DT_PMOVSR_CLR(dtc) ((dtc)->pmu_base + 0x120)
  118. #define CMN_DT_PMSSR(dtc) ((dtc)->pmu_base + 0x128)
  119. #define CMN_DT_PMSSR_SS_STATUS(n) BIT(n)
  120. #define CMN_DT_PMSRR(dtc) ((dtc)->pmu_base + 0x130)
  121. #define CMN_DT_PMSRR_SS_REQ BIT(0)
  122. #define CMN_DT_NUM_COUNTERS 8
  123. #define CMN_MAX_DTCS 4
  124. /*
  125. * Even in the worst case a DTC counter can't wrap in fewer than 2^42 cycles,
  126. * so throwing away one bit to make overflow handling easy is no big deal.
  127. */
  128. #define CMN_COUNTER_INIT 0x80000000
  129. /* Similarly for the 40-bit cycle counter */
  130. #define CMN_CC_INIT 0x8000000000ULL
  131. /* Event attributes */
  132. #define CMN_CONFIG_TYPE GENMASK_ULL(15, 0)
  133. #define CMN_CONFIG_EVENTID GENMASK_ULL(26, 16)
  134. #define CMN_CONFIG_OCCUPID GENMASK_ULL(30, 27)
  135. #define CMN_CONFIG_BYNODEID BIT_ULL(31)
  136. #define CMN_CONFIG_NODEID GENMASK_ULL(47, 32)
  137. #define CMN_EVENT_TYPE(event) FIELD_GET(CMN_CONFIG_TYPE, (event)->attr.config)
  138. #define CMN_EVENT_EVENTID(event) FIELD_GET(CMN_CONFIG_EVENTID, (event)->attr.config)
  139. #define CMN_EVENT_OCCUPID(event) FIELD_GET(CMN_CONFIG_OCCUPID, (event)->attr.config)
  140. #define CMN_EVENT_BYNODEID(event) FIELD_GET(CMN_CONFIG_BYNODEID, (event)->attr.config)
  141. #define CMN_EVENT_NODEID(event) FIELD_GET(CMN_CONFIG_NODEID, (event)->attr.config)
  142. #define CMN_CONFIG_WP_COMBINE GENMASK_ULL(30, 27)
  143. #define CMN_CONFIG_WP_DEV_SEL GENMASK_ULL(50, 48)
  144. #define CMN_CONFIG_WP_CHN_SEL GENMASK_ULL(55, 51)
  145. #define CMN_CONFIG_WP_GRP GENMASK_ULL(57, 56)
  146. #define CMN_CONFIG_WP_EXCLUSIVE BIT_ULL(58)
  147. #define CMN_CONFIG1_WP_VAL GENMASK_ULL(63, 0)
  148. #define CMN_CONFIG2_WP_MASK GENMASK_ULL(63, 0)
  149. #define CMN_EVENT_WP_COMBINE(event) FIELD_GET(CMN_CONFIG_WP_COMBINE, (event)->attr.config)
  150. #define CMN_EVENT_WP_DEV_SEL(event) FIELD_GET(CMN_CONFIG_WP_DEV_SEL, (event)->attr.config)
  151. #define CMN_EVENT_WP_CHN_SEL(event) FIELD_GET(CMN_CONFIG_WP_CHN_SEL, (event)->attr.config)
  152. #define CMN_EVENT_WP_GRP(event) FIELD_GET(CMN_CONFIG_WP_GRP, (event)->attr.config)
  153. #define CMN_EVENT_WP_EXCLUSIVE(event) FIELD_GET(CMN_CONFIG_WP_EXCLUSIVE, (event)->attr.config)
  154. #define CMN_EVENT_WP_VAL(event) FIELD_GET(CMN_CONFIG1_WP_VAL, (event)->attr.config1)
  155. #define CMN_EVENT_WP_MASK(event) FIELD_GET(CMN_CONFIG2_WP_MASK, (event)->attr.config2)
  156. /* Made-up event IDs for watchpoint direction */
  157. #define CMN_WP_UP 0
  158. #define CMN_WP_DOWN 2
  159. /* Internal values for encoding event support */
  160. enum cmn_model {
  161. CMN600 = 1,
  162. CMN650 = 2,
  163. CMN700 = 4,
  164. CI700 = 8,
  165. CMNS3 = 16,
  166. /* ...and then we can use bitmap tricks for commonality */
  167. CMN_ANY = -1,
  168. NOT_CMN600 = -2,
  169. CMN_650ON = CMN650 | CMN700 | CMNS3,
  170. };
  171. /* Actual part numbers and revision IDs defined by the hardware */
  172. enum cmn_part {
  173. PART_CMN600 = 0x434,
  174. PART_CMN650 = 0x436,
  175. PART_CMN700 = 0x43c,
  176. PART_CI700 = 0x43a,
  177. PART_CMN_S3 = 0x43e,
  178. };
  179. /* CMN-600 r0px shouldn't exist in silicon, thankfully */
  180. enum cmn_revision {
  181. REV_CMN600_R1P0,
  182. REV_CMN600_R1P1,
  183. REV_CMN600_R1P2,
  184. REV_CMN600_R1P3,
  185. REV_CMN600_R2P0,
  186. REV_CMN600_R3P0,
  187. REV_CMN600_R3P1,
  188. REV_CMN650_R0P0 = 0,
  189. REV_CMN650_R1P0,
  190. REV_CMN650_R1P1,
  191. REV_CMN650_R2P0,
  192. REV_CMN650_R1P2,
  193. REV_CMN700_R0P0 = 0,
  194. REV_CMN700_R1P0,
  195. REV_CMN700_R2P0,
  196. REV_CMN700_R3P0,
  197. REV_CMNS3_R0P0 = 0,
  198. REV_CMNS3_R0P1,
  199. REV_CMNS3_R1P0,
  200. REV_CI700_R0P0 = 0,
  201. REV_CI700_R1P0,
  202. REV_CI700_R2P0,
  203. };
  204. enum cmn_node_type {
  205. CMN_TYPE_INVALID,
  206. CMN_TYPE_DVM,
  207. CMN_TYPE_CFG,
  208. CMN_TYPE_DTC,
  209. CMN_TYPE_HNI,
  210. CMN_TYPE_HNF,
  211. CMN_TYPE_XP,
  212. CMN_TYPE_SBSX,
  213. CMN_TYPE_MPAM_S,
  214. CMN_TYPE_MPAM_NS,
  215. CMN_TYPE_RNI,
  216. CMN_TYPE_RND = 0xd,
  217. CMN_TYPE_RNSAM = 0xf,
  218. CMN_TYPE_MTSX,
  219. CMN_TYPE_HNP,
  220. CMN_TYPE_CXRA = 0x100,
  221. CMN_TYPE_CXHA,
  222. CMN_TYPE_CXLA,
  223. CMN_TYPE_CCRA,
  224. CMN_TYPE_CCHA,
  225. CMN_TYPE_CCLA,
  226. CMN_TYPE_CCLA_RNI,
  227. CMN_TYPE_HNS = 0x200,
  228. CMN_TYPE_HNS_MPAM_S,
  229. CMN_TYPE_HNS_MPAM_NS,
  230. CMN_TYPE_APB = 0x1000,
  231. /* Not a real node type */
  232. CMN_TYPE_WP = 0x7770
  233. };
  234. enum cmn_filter_select {
  235. SEL_NONE = -1,
  236. SEL_OCCUP1ID,
  237. SEL_CLASS_OCCUP_ID,
  238. SEL_CBUSY_SNTHROTTLE_SEL,
  239. SEL_HBT_LBT_SEL,
  240. SEL_SN_HOME_SEL,
  241. SEL_MAX
  242. };
  243. struct arm_cmn_node {
  244. void __iomem *pmu_base;
  245. u16 id, logid;
  246. enum cmn_node_type type;
  247. /* XP properties really, but replicated to children for convenience */
  248. u8 dtm;
  249. s8 dtc;
  250. u8 portid_bits:4;
  251. u8 deviceid_bits:4;
  252. /* DN/HN-F/CXHA */
  253. struct {
  254. u8 val : 4;
  255. u8 count : 4;
  256. } occupid[SEL_MAX];
  257. union {
  258. u8 event[4];
  259. __le32 event_sel;
  260. u16 event_w[4];
  261. __le64 event_sel_w;
  262. };
  263. };
  264. struct arm_cmn_dtm {
  265. void __iomem *base;
  266. u32 pmu_config_low;
  267. union {
  268. u8 input_sel[4];
  269. __le32 pmu_config_high;
  270. };
  271. s8 wp_event[4];
  272. };
  273. struct arm_cmn_dtc {
  274. void __iomem *base;
  275. void __iomem *pmu_base;
  276. int irq;
  277. s8 irq_friend;
  278. bool cc_active;
  279. struct perf_event *counters[CMN_DT_NUM_COUNTERS];
  280. struct perf_event *cycles;
  281. };
  282. #define CMN_STATE_DISABLED BIT(0)
  283. #define CMN_STATE_TXN BIT(1)
  284. struct arm_cmn {
  285. struct device *dev;
  286. void __iomem *base;
  287. unsigned int state;
  288. enum cmn_revision rev;
  289. enum cmn_part part;
  290. u8 mesh_x;
  291. u8 mesh_y;
  292. u16 num_xps;
  293. u16 num_dns;
  294. bool multi_dtm;
  295. u8 ports_used;
  296. struct {
  297. unsigned int rsp_vc_num : 2;
  298. unsigned int dat_vc_num : 2;
  299. unsigned int snp_vc_num : 2;
  300. unsigned int req_vc_num : 2;
  301. };
  302. struct arm_cmn_node *xps;
  303. struct arm_cmn_node *dns;
  304. struct arm_cmn_dtm *dtms;
  305. struct arm_cmn_dtc *dtc;
  306. unsigned int num_dtcs;
  307. int cpu;
  308. struct hlist_node cpuhp_node;
  309. struct pmu pmu;
  310. struct dentry *debug;
  311. };
  312. #define to_cmn(p) container_of(p, struct arm_cmn, pmu)
  313. static int arm_cmn_hp_state;
  314. struct arm_cmn_nodeid {
  315. u8 port;
  316. u8 dev;
  317. };
  318. static int arm_cmn_xyidbits(const struct arm_cmn *cmn)
  319. {
  320. return fls((cmn->mesh_x - 1) | (cmn->mesh_y - 1));
  321. }
  322. static struct arm_cmn_nodeid arm_cmn_nid(const struct arm_cmn_node *dn)
  323. {
  324. struct arm_cmn_nodeid nid;
  325. nid.dev = dn->id & ((1U << dn->deviceid_bits) - 1);
  326. nid.port = (dn->id >> dn->deviceid_bits) & ((1U << dn->portid_bits) - 1);
  327. return nid;
  328. }
  329. static struct arm_cmn_node *arm_cmn_node_to_xp(const struct arm_cmn *cmn,
  330. const struct arm_cmn_node *dn)
  331. {
  332. int id = dn->id >> (dn->portid_bits + dn->deviceid_bits);
  333. int bits = arm_cmn_xyidbits(cmn);
  334. int x = id >> bits;
  335. int y = id & ((1U << bits) - 1);
  336. return cmn->xps + cmn->mesh_x * y + x;
  337. }
  338. static struct arm_cmn_node *arm_cmn_node(const struct arm_cmn *cmn,
  339. enum cmn_node_type type)
  340. {
  341. struct arm_cmn_node *dn;
  342. for (dn = cmn->dns; dn->type; dn++)
  343. if (dn->type == type)
  344. return dn;
  345. return NULL;
  346. }
  347. static enum cmn_model arm_cmn_model(const struct arm_cmn *cmn)
  348. {
  349. switch (cmn->part) {
  350. case PART_CMN600:
  351. return CMN600;
  352. case PART_CMN650:
  353. return CMN650;
  354. case PART_CMN700:
  355. return CMN700;
  356. case PART_CI700:
  357. return CI700;
  358. case PART_CMN_S3:
  359. return CMNS3;
  360. default:
  361. return 0;
  362. };
  363. }
  364. static int arm_cmn_pmu_offset(const struct arm_cmn *cmn, const struct arm_cmn_node *dn)
  365. {
  366. if (cmn->part == PART_CMN_S3) {
  367. if (cmn->rev >= REV_CMNS3_R1P0 && dn->type == CMN_TYPE_XP)
  368. return CMN_S3_R1_DTM_OFFSET;
  369. return CMN_S3_PMU_OFFSET;
  370. }
  371. return CMN_PMU_OFFSET;
  372. }
  373. static u32 arm_cmn_device_connect_info(const struct arm_cmn *cmn,
  374. const struct arm_cmn_node *xp, int port)
  375. {
  376. int offset = CMN_MXP__CONNECT_INFO(port) - arm_cmn_pmu_offset(cmn, xp);
  377. if (port >= 2) {
  378. if (cmn->part == PART_CMN600 || cmn->part == PART_CMN650)
  379. return 0;
  380. /*
  381. * CI-700 may have extra ports, but still has the
  382. * mesh_port_connect_info registers in the way.
  383. */
  384. if (cmn->part == PART_CI700)
  385. offset += CI700_CONNECT_INFO_P2_5_OFFSET;
  386. }
  387. return readl_relaxed(xp->pmu_base + offset);
  388. }
  389. static struct dentry *arm_cmn_debugfs;
  390. #ifdef CONFIG_DEBUG_FS
  391. static const char *arm_cmn_device_type(u8 type)
  392. {
  393. switch(FIELD_GET(CMN__CONNECT_INFO_DEVICE_TYPE, type)) {
  394. case 0x00: return " |";
  395. case 0x01: return " RN-I |";
  396. case 0x02: return " RN-D |";
  397. case 0x04: return " RN-F_B |";
  398. case 0x05: return "RN-F_B_E|";
  399. case 0x06: return " RN-F_A |";
  400. case 0x07: return "RN-F_A_E|";
  401. case 0x08: return " HN-T |";
  402. case 0x09: return " HN-I |";
  403. case 0x0a: return " HN-D |";
  404. case 0x0b: return " HN-P |";
  405. case 0x0c: return " SN-F |";
  406. case 0x0d: return " SBSX |";
  407. case 0x0e: return " HN-F |";
  408. case 0x0f: return " SN-F_E |";
  409. case 0x10: return " SN-F_D |";
  410. case 0x11: return " CXHA |";
  411. case 0x12: return " CXRA |";
  412. case 0x13: return " CXRH |";
  413. case 0x14: return " RN-F_D |";
  414. case 0x15: return "RN-F_D_E|";
  415. case 0x16: return " RN-F_C |";
  416. case 0x17: return "RN-F_C_E|";
  417. case 0x18: return " RN-F_E |";
  418. case 0x19: return "RN-F_E_E|";
  419. case 0x1a: return " HN-S |";
  420. case 0x1b: return " LCN |";
  421. case 0x1c: return " MTSX |";
  422. case 0x1d: return " HN-V |";
  423. case 0x1e: return " CCG |";
  424. case 0x20: return " RN-F_F |";
  425. case 0x21: return "RN-F_F_E|";
  426. case 0x22: return " SN-F_F |";
  427. default: return " ???? |";
  428. }
  429. }
  430. static void arm_cmn_show_logid(struct seq_file *s, const struct arm_cmn_node *xp, int p, int d)
  431. {
  432. struct arm_cmn *cmn = s->private;
  433. struct arm_cmn_node *dn;
  434. u16 id = xp->id | d | (p << xp->deviceid_bits);
  435. for (dn = cmn->dns; dn->type; dn++) {
  436. int pad = dn->logid < 10;
  437. if (dn->type == CMN_TYPE_XP)
  438. continue;
  439. /* Ignore the extra components that will overlap on some ports */
  440. if (dn->type < CMN_TYPE_HNI)
  441. continue;
  442. if (dn->id != id)
  443. continue;
  444. seq_printf(s, " %*c#%-*d |", pad + 1, ' ', 3 - pad, dn->logid);
  445. return;
  446. }
  447. seq_puts(s, " |");
  448. }
  449. static int arm_cmn_map_show(struct seq_file *s, void *data)
  450. {
  451. struct arm_cmn *cmn = s->private;
  452. int x, y, p, pmax = fls(cmn->ports_used);
  453. seq_puts(s, " X");
  454. for (x = 0; x < cmn->mesh_x; x++)
  455. seq_printf(s, " %-2d ", x);
  456. seq_puts(s, "\nY P D+");
  457. y = cmn->mesh_y;
  458. while (y--) {
  459. int xp_base = cmn->mesh_x * y;
  460. struct arm_cmn_node *xp = cmn->xps + xp_base;
  461. u8 port[CMN_MAX_PORTS][CMN_MAX_DIMENSION];
  462. for (x = 0; x < cmn->mesh_x; x++)
  463. seq_puts(s, "--------+");
  464. seq_printf(s, "\n%-2d |", y);
  465. for (x = 0; x < cmn->mesh_x; x++) {
  466. for (p = 0; p < CMN_MAX_PORTS; p++)
  467. port[p][x] = arm_cmn_device_connect_info(cmn, xp + x, p);
  468. seq_printf(s, " XP #%-3d|", xp_base + x);
  469. }
  470. seq_puts(s, "\n |");
  471. for (x = 0; x < cmn->mesh_x; x++) {
  472. s8 dtc = xp[x].dtc;
  473. if (dtc < 0)
  474. seq_puts(s, " DTC ?? |");
  475. else
  476. seq_printf(s, " DTC %d |", dtc);
  477. }
  478. seq_puts(s, "\n |");
  479. for (x = 0; x < cmn->mesh_x; x++)
  480. seq_puts(s, "........|");
  481. for (p = 0; p < pmax; p++) {
  482. seq_printf(s, "\n %d |", p);
  483. for (x = 0; x < cmn->mesh_x; x++)
  484. seq_puts(s, arm_cmn_device_type(port[p][x]));
  485. seq_puts(s, "\n 0|");
  486. for (x = 0; x < cmn->mesh_x; x++)
  487. arm_cmn_show_logid(s, xp + x, p, 0);
  488. seq_puts(s, "\n 1|");
  489. for (x = 0; x < cmn->mesh_x; x++)
  490. arm_cmn_show_logid(s, xp + x, p, 1);
  491. }
  492. seq_puts(s, "\n-----+");
  493. }
  494. for (x = 0; x < cmn->mesh_x; x++)
  495. seq_puts(s, "--------+");
  496. seq_puts(s, "\n");
  497. return 0;
  498. }
  499. DEFINE_SHOW_ATTRIBUTE(arm_cmn_map);
  500. static void arm_cmn_debugfs_init(struct arm_cmn *cmn, int id)
  501. {
  502. const char *name = "map";
  503. if (id > 0)
  504. name = devm_kasprintf(cmn->dev, GFP_KERNEL, "map_%d", id);
  505. if (!name)
  506. return;
  507. cmn->debug = debugfs_create_file(name, 0444, arm_cmn_debugfs, cmn, &arm_cmn_map_fops);
  508. }
  509. #else
  510. static void arm_cmn_debugfs_init(struct arm_cmn *cmn, int id) {}
  511. #endif
  512. struct arm_cmn_hw_event {
  513. struct arm_cmn_node *dn;
  514. u64 dtm_idx[DIV_ROUND_UP(CMN_MAX_NODES_PER_EVENT * 2, 64)];
  515. s8 dtc_idx[CMN_MAX_DTCS];
  516. u8 num_dns;
  517. u8 dtm_offset;
  518. /*
  519. * WP config registers are divided to UP and DOWN events. We need to
  520. * keep to track only one of them.
  521. */
  522. DECLARE_BITMAP(wp_idx, CMN_MAX_XPS);
  523. bool wide_sel;
  524. enum cmn_filter_select filter_sel;
  525. };
  526. static_assert(sizeof(struct arm_cmn_hw_event) <= offsetof(struct hw_perf_event, target));
  527. #define for_each_hw_dn(hw, dn, i) \
  528. for (i = 0, dn = hw->dn; i < hw->num_dns; i++, dn++)
  529. /* @i is the DTC number, @idx is the counter index on that DTC */
  530. #define for_each_hw_dtc_idx(hw, i, idx) \
  531. for (int i = 0, idx; i < CMN_MAX_DTCS; i++) if ((idx = hw->dtc_idx[i]) >= 0)
  532. static struct arm_cmn_hw_event *to_cmn_hw(struct perf_event *event)
  533. {
  534. return (struct arm_cmn_hw_event *)&event->hw;
  535. }
  536. static void arm_cmn_set_index(u64 x[], unsigned int pos, unsigned int val)
  537. {
  538. x[pos / 32] |= (u64)val << ((pos % 32) * 2);
  539. }
  540. static unsigned int arm_cmn_get_index(u64 x[], unsigned int pos)
  541. {
  542. return (x[pos / 32] >> ((pos % 32) * 2)) & 3;
  543. }
  544. static void arm_cmn_set_wp_idx(unsigned long *wp_idx, unsigned int pos, bool val)
  545. {
  546. if (val)
  547. set_bit(pos, wp_idx);
  548. }
  549. static unsigned int arm_cmn_get_wp_idx(unsigned long *wp_idx, unsigned int pos)
  550. {
  551. return test_bit(pos, wp_idx);
  552. }
  553. struct arm_cmn_event_attr {
  554. struct device_attribute attr;
  555. enum cmn_model model;
  556. enum cmn_node_type type;
  557. enum cmn_filter_select fsel;
  558. u16 eventid;
  559. u8 occupid;
  560. };
  561. struct arm_cmn_format_attr {
  562. struct device_attribute attr;
  563. u64 field;
  564. int config;
  565. };
  566. #define _CMN_EVENT_ATTR(_model, _name, _type, _eventid, _occupid, _fsel)\
  567. (&((struct arm_cmn_event_attr[]) {{ \
  568. .attr = __ATTR(_name, 0444, arm_cmn_event_show, NULL), \
  569. .model = _model, \
  570. .type = _type, \
  571. .eventid = _eventid, \
  572. .occupid = _occupid, \
  573. .fsel = _fsel, \
  574. }})[0].attr.attr)
  575. #define CMN_EVENT_ATTR(_model, _name, _type, _eventid) \
  576. _CMN_EVENT_ATTR(_model, _name, _type, _eventid, 0, SEL_NONE)
  577. static ssize_t arm_cmn_event_show(struct device *dev,
  578. struct device_attribute *attr, char *buf)
  579. {
  580. struct arm_cmn_event_attr *eattr;
  581. eattr = container_of(attr, typeof(*eattr), attr);
  582. if (eattr->type == CMN_TYPE_DTC)
  583. return sysfs_emit(buf, "type=0x%x\n", eattr->type);
  584. if (eattr->type == CMN_TYPE_WP)
  585. return sysfs_emit(buf,
  586. "type=0x%x,eventid=0x%x,wp_dev_sel=?,wp_chn_sel=?,wp_grp=?,wp_val=?,wp_mask=?\n",
  587. eattr->type, eattr->eventid);
  588. if (eattr->fsel > SEL_NONE)
  589. return sysfs_emit(buf, "type=0x%x,eventid=0x%x,occupid=0x%x\n",
  590. eattr->type, eattr->eventid, eattr->occupid);
  591. return sysfs_emit(buf, "type=0x%x,eventid=0x%x\n", eattr->type,
  592. eattr->eventid);
  593. }
  594. static umode_t arm_cmn_event_attr_is_visible(struct kobject *kobj,
  595. struct attribute *attr,
  596. int unused)
  597. {
  598. struct device *dev = kobj_to_dev(kobj);
  599. struct arm_cmn *cmn = to_cmn(dev_get_drvdata(dev));
  600. struct arm_cmn_event_attr *eattr;
  601. enum cmn_node_type type;
  602. u16 eventid;
  603. eattr = container_of(attr, typeof(*eattr), attr.attr);
  604. if (!(eattr->model & arm_cmn_model(cmn)))
  605. return 0;
  606. type = eattr->type;
  607. eventid = eattr->eventid;
  608. /* Watchpoints aren't nodes, so avoid confusion */
  609. if (type == CMN_TYPE_WP)
  610. return attr->mode;
  611. /* Hide XP events for unused interfaces/channels */
  612. if (type == CMN_TYPE_XP) {
  613. unsigned int intf = (eventid >> 2) & 7;
  614. unsigned int chan = eventid >> 5;
  615. if ((intf & 4) && !(cmn->ports_used & BIT(intf & 3)))
  616. return 0;
  617. if (chan == 4 && cmn->part == PART_CMN600)
  618. return 0;
  619. if ((chan == 5 && cmn->rsp_vc_num < 2) ||
  620. (chan == 6 && cmn->dat_vc_num < 2) ||
  621. (chan == 7 && cmn->req_vc_num < 2) ||
  622. (chan == 8 && cmn->snp_vc_num < 2))
  623. return 0;
  624. }
  625. /* Revision-specific differences */
  626. if (cmn->part == PART_CMN600) {
  627. if (cmn->rev < REV_CMN600_R1P3) {
  628. if (type == CMN_TYPE_CXRA && eventid > 0x10)
  629. return 0;
  630. }
  631. if (cmn->rev < REV_CMN600_R1P2) {
  632. if (type == CMN_TYPE_HNF && eventid == 0x1b)
  633. return 0;
  634. if (type == CMN_TYPE_CXRA || type == CMN_TYPE_CXHA)
  635. return 0;
  636. }
  637. } else if (cmn->part == PART_CMN650) {
  638. if (cmn->rev < REV_CMN650_R2P0 || cmn->rev == REV_CMN650_R1P2) {
  639. if (type == CMN_TYPE_HNF && eventid > 0x22)
  640. return 0;
  641. if (type == CMN_TYPE_SBSX && eventid == 0x17)
  642. return 0;
  643. if (type == CMN_TYPE_RNI && eventid > 0x10)
  644. return 0;
  645. }
  646. } else if (cmn->part == PART_CMN700) {
  647. if (cmn->rev < REV_CMN700_R2P0) {
  648. if (type == CMN_TYPE_HNF && eventid > 0x2c)
  649. return 0;
  650. if (type == CMN_TYPE_CCHA && eventid > 0x74)
  651. return 0;
  652. if (type == CMN_TYPE_CCLA && eventid > 0x27)
  653. return 0;
  654. }
  655. if (cmn->rev < REV_CMN700_R1P0) {
  656. if (type == CMN_TYPE_HNF && eventid > 0x2b)
  657. return 0;
  658. }
  659. }
  660. if (!arm_cmn_node(cmn, type))
  661. return 0;
  662. return attr->mode;
  663. }
  664. #define _CMN_EVENT_DVM(_model, _name, _event, _occup, _fsel) \
  665. _CMN_EVENT_ATTR(_model, dn_##_name, CMN_TYPE_DVM, _event, _occup, _fsel)
  666. #define CMN_EVENT_DTC(_name) \
  667. CMN_EVENT_ATTR(CMN_ANY, dtc_##_name, CMN_TYPE_DTC, 0)
  668. #define CMN_EVENT_HNF(_model, _name, _event) \
  669. CMN_EVENT_ATTR(_model, hnf_##_name, CMN_TYPE_HNF, _event)
  670. #define CMN_EVENT_HNI(_name, _event) \
  671. CMN_EVENT_ATTR(CMN_ANY, hni_##_name, CMN_TYPE_HNI, _event)
  672. #define CMN_EVENT_HNP(_name, _event) \
  673. CMN_EVENT_ATTR(CMN_ANY, hnp_##_name, CMN_TYPE_HNP, _event)
  674. #define __CMN_EVENT_XP(_name, _event) \
  675. CMN_EVENT_ATTR(CMN_ANY, mxp_##_name, CMN_TYPE_XP, _event)
  676. #define CMN_EVENT_SBSX(_model, _name, _event) \
  677. CMN_EVENT_ATTR(_model, sbsx_##_name, CMN_TYPE_SBSX, _event)
  678. #define CMN_EVENT_RNID(_model, _name, _event) \
  679. CMN_EVENT_ATTR(_model, rnid_##_name, CMN_TYPE_RNI, _event)
  680. #define CMN_EVENT_MTSX(_name, _event) \
  681. CMN_EVENT_ATTR(CMN_ANY, mtsx_##_name, CMN_TYPE_MTSX, _event)
  682. #define CMN_EVENT_CXRA(_model, _name, _event) \
  683. CMN_EVENT_ATTR(_model, cxra_##_name, CMN_TYPE_CXRA, _event)
  684. #define CMN_EVENT_CXHA(_name, _event) \
  685. CMN_EVENT_ATTR(CMN_ANY, cxha_##_name, CMN_TYPE_CXHA, _event)
  686. #define CMN_EVENT_CCRA(_name, _event) \
  687. CMN_EVENT_ATTR(CMN_ANY, ccra_##_name, CMN_TYPE_CCRA, _event)
  688. #define CMN_EVENT_CCHA(_model, _name, _event) \
  689. CMN_EVENT_ATTR(_model, ccha_##_name, CMN_TYPE_CCHA, _event)
  690. #define CMN_EVENT_CCLA(_name, _event) \
  691. CMN_EVENT_ATTR(CMN_ANY, ccla_##_name, CMN_TYPE_CCLA, _event)
  692. #define CMN_EVENT_CCLA_RNI(_name, _event) \
  693. CMN_EVENT_ATTR(CMN_ANY, ccla_rni_##_name, CMN_TYPE_CCLA_RNI, _event)
  694. #define CMN_EVENT_HNS(_name, _event) \
  695. CMN_EVENT_ATTR(CMN_ANY, hns_##_name, CMN_TYPE_HNS, _event)
  696. #define CMN_EVENT_DVM(_model, _name, _event) \
  697. _CMN_EVENT_DVM(_model, _name, _event, 0, SEL_NONE)
  698. #define CMN_EVENT_DVM_OCC(_model, _name, _event) \
  699. _CMN_EVENT_DVM(_model, _name##_all, _event, 0, SEL_OCCUP1ID), \
  700. _CMN_EVENT_DVM(_model, _name##_dvmop, _event, 1, SEL_OCCUP1ID), \
  701. _CMN_EVENT_DVM(_model, _name##_dvmsync, _event, 2, SEL_OCCUP1ID)
  702. #define CMN_EVENT_HN_OCC(_model, _name, _type, _event) \
  703. _CMN_EVENT_ATTR(_model, _name##_all, _type, _event, 0, SEL_OCCUP1ID), \
  704. _CMN_EVENT_ATTR(_model, _name##_read, _type, _event, 1, SEL_OCCUP1ID), \
  705. _CMN_EVENT_ATTR(_model, _name##_write, _type, _event, 2, SEL_OCCUP1ID), \
  706. _CMN_EVENT_ATTR(_model, _name##_atomic, _type, _event, 3, SEL_OCCUP1ID), \
  707. _CMN_EVENT_ATTR(_model, _name##_stash, _type, _event, 4, SEL_OCCUP1ID)
  708. #define CMN_EVENT_HN_CLS(_model, _name, _type, _event) \
  709. _CMN_EVENT_ATTR(_model, _name##_class0, _type, _event, 0, SEL_CLASS_OCCUP_ID), \
  710. _CMN_EVENT_ATTR(_model, _name##_class1, _type, _event, 1, SEL_CLASS_OCCUP_ID), \
  711. _CMN_EVENT_ATTR(_model, _name##_class2, _type, _event, 2, SEL_CLASS_OCCUP_ID), \
  712. _CMN_EVENT_ATTR(_model, _name##_class3, _type, _event, 3, SEL_CLASS_OCCUP_ID)
  713. #define CMN_EVENT_HN_SNT(_model, _name, _type, _event) \
  714. _CMN_EVENT_ATTR(_model, _name##_all, _type, _event, 0, SEL_CBUSY_SNTHROTTLE_SEL), \
  715. _CMN_EVENT_ATTR(_model, _name##_group0_read, _type, _event, 1, SEL_CBUSY_SNTHROTTLE_SEL), \
  716. _CMN_EVENT_ATTR(_model, _name##_group0_write, _type, _event, 2, SEL_CBUSY_SNTHROTTLE_SEL), \
  717. _CMN_EVENT_ATTR(_model, _name##_group1_read, _type, _event, 3, SEL_CBUSY_SNTHROTTLE_SEL), \
  718. _CMN_EVENT_ATTR(_model, _name##_group1_write, _type, _event, 4, SEL_CBUSY_SNTHROTTLE_SEL), \
  719. _CMN_EVENT_ATTR(_model, _name##_read, _type, _event, 5, SEL_CBUSY_SNTHROTTLE_SEL), \
  720. _CMN_EVENT_ATTR(_model, _name##_write, _type, _event, 6, SEL_CBUSY_SNTHROTTLE_SEL)
  721. #define CMN_EVENT_HNF_OCC(_model, _name, _event) \
  722. CMN_EVENT_HN_OCC(_model, hnf_##_name, CMN_TYPE_HNF, _event)
  723. #define CMN_EVENT_HNF_CLS(_model, _name, _event) \
  724. CMN_EVENT_HN_CLS(_model, hnf_##_name, CMN_TYPE_HNF, _event)
  725. #define CMN_EVENT_HNF_SNT(_model, _name, _event) \
  726. CMN_EVENT_HN_SNT(_model, hnf_##_name, CMN_TYPE_HNF, _event)
  727. #define CMN_EVENT_HNS_OCC(_name, _event) \
  728. CMN_EVENT_HN_OCC(CMN_ANY, hns_##_name, CMN_TYPE_HNS, _event), \
  729. _CMN_EVENT_ATTR(CMN_ANY, hns_##_name##_rxsnp, CMN_TYPE_HNS, _event, 5, SEL_OCCUP1ID), \
  730. _CMN_EVENT_ATTR(CMN_ANY, hns_##_name##_lbt, CMN_TYPE_HNS, _event, 6, SEL_OCCUP1ID), \
  731. _CMN_EVENT_ATTR(CMN_ANY, hns_##_name##_hbt, CMN_TYPE_HNS, _event, 7, SEL_OCCUP1ID)
  732. #define CMN_EVENT_HNS_CLS( _name, _event) \
  733. CMN_EVENT_HN_CLS(CMN_ANY, hns_##_name, CMN_TYPE_HNS, _event)
  734. #define CMN_EVENT_HNS_SNT(_name, _event) \
  735. CMN_EVENT_HN_SNT(CMN_ANY, hns_##_name, CMN_TYPE_HNS, _event)
  736. #define CMN_EVENT_HNS_HBT(_name, _event) \
  737. _CMN_EVENT_ATTR(CMN_ANY, hns_##_name##_all, CMN_TYPE_HNS, _event, 0, SEL_HBT_LBT_SEL), \
  738. _CMN_EVENT_ATTR(CMN_ANY, hns_##_name##_hbt, CMN_TYPE_HNS, _event, 1, SEL_HBT_LBT_SEL), \
  739. _CMN_EVENT_ATTR(CMN_ANY, hns_##_name##_lbt, CMN_TYPE_HNS, _event, 2, SEL_HBT_LBT_SEL)
  740. #define CMN_EVENT_HNS_SNH(_name, _event) \
  741. _CMN_EVENT_ATTR(CMN_ANY, hns_##_name##_all, CMN_TYPE_HNS, _event, 0, SEL_SN_HOME_SEL), \
  742. _CMN_EVENT_ATTR(CMN_ANY, hns_##_name##_sn, CMN_TYPE_HNS, _event, 1, SEL_SN_HOME_SEL), \
  743. _CMN_EVENT_ATTR(CMN_ANY, hns_##_name##_home, CMN_TYPE_HNS, _event, 2, SEL_SN_HOME_SEL)
  744. #define _CMN_EVENT_XP_MESH(_name, _event) \
  745. __CMN_EVENT_XP(e_##_name, (_event) | (0 << 2)), \
  746. __CMN_EVENT_XP(w_##_name, (_event) | (1 << 2)), \
  747. __CMN_EVENT_XP(n_##_name, (_event) | (2 << 2)), \
  748. __CMN_EVENT_XP(s_##_name, (_event) | (3 << 2))
  749. #define _CMN_EVENT_XP_PORT(_name, _event) \
  750. __CMN_EVENT_XP(p0_##_name, (_event) | (4 << 2)), \
  751. __CMN_EVENT_XP(p1_##_name, (_event) | (5 << 2)), \
  752. __CMN_EVENT_XP(p2_##_name, (_event) | (6 << 2)), \
  753. __CMN_EVENT_XP(p3_##_name, (_event) | (7 << 2))
  754. #define _CMN_EVENT_XP(_name, _event) \
  755. _CMN_EVENT_XP_MESH(_name, _event), \
  756. _CMN_EVENT_XP_PORT(_name, _event)
  757. /* Good thing there are only 3 fundamental XP events... */
  758. #define CMN_EVENT_XP(_name, _event) \
  759. _CMN_EVENT_XP(req_##_name, (_event) | (0 << 5)), \
  760. _CMN_EVENT_XP(rsp_##_name, (_event) | (1 << 5)), \
  761. _CMN_EVENT_XP(snp_##_name, (_event) | (2 << 5)), \
  762. _CMN_EVENT_XP(dat_##_name, (_event) | (3 << 5)), \
  763. _CMN_EVENT_XP(pub_##_name, (_event) | (4 << 5)), \
  764. _CMN_EVENT_XP(rsp2_##_name, (_event) | (5 << 5)), \
  765. _CMN_EVENT_XP(dat2_##_name, (_event) | (6 << 5)), \
  766. _CMN_EVENT_XP(req2_##_name, (_event) | (7 << 5)), \
  767. _CMN_EVENT_XP(snp2_##_name, (_event) | (8 << 5))
  768. #define CMN_EVENT_XP_DAT(_name, _event) \
  769. _CMN_EVENT_XP_PORT(dat_##_name, (_event) | (3 << 5)), \
  770. _CMN_EVENT_XP_PORT(dat2_##_name, (_event) | (6 << 5))
  771. static struct attribute *arm_cmn_event_attrs[] = {
  772. CMN_EVENT_DTC(cycles),
  773. /*
  774. * DVM node events conflict with HN-I events in the equivalent PMU
  775. * slot, but our lazy short-cut of using the DTM counter index for
  776. * the PMU index as well happens to avoid that by construction.
  777. */
  778. CMN_EVENT_DVM(CMN600, rxreq_dvmop, 0x01),
  779. CMN_EVENT_DVM(CMN600, rxreq_dvmsync, 0x02),
  780. CMN_EVENT_DVM(CMN600, rxreq_dvmop_vmid_filtered, 0x03),
  781. CMN_EVENT_DVM(CMN600, rxreq_retried, 0x04),
  782. CMN_EVENT_DVM_OCC(CMN600, rxreq_trk_occupancy, 0x05),
  783. CMN_EVENT_DVM(NOT_CMN600, dvmop_tlbi, 0x01),
  784. CMN_EVENT_DVM(NOT_CMN600, dvmop_bpi, 0x02),
  785. CMN_EVENT_DVM(NOT_CMN600, dvmop_pici, 0x03),
  786. CMN_EVENT_DVM(NOT_CMN600, dvmop_vici, 0x04),
  787. CMN_EVENT_DVM(NOT_CMN600, dvmsync, 0x05),
  788. CMN_EVENT_DVM(NOT_CMN600, vmid_filtered, 0x06),
  789. CMN_EVENT_DVM(NOT_CMN600, rndop_filtered, 0x07),
  790. CMN_EVENT_DVM(NOT_CMN600, retry, 0x08),
  791. CMN_EVENT_DVM(NOT_CMN600, txsnp_flitv, 0x09),
  792. CMN_EVENT_DVM(NOT_CMN600, txsnp_stall, 0x0a),
  793. CMN_EVENT_DVM(NOT_CMN600, trkfull, 0x0b),
  794. CMN_EVENT_DVM_OCC(NOT_CMN600, trk_occupancy, 0x0c),
  795. CMN_EVENT_DVM_OCC(CMN700, trk_occupancy_cxha, 0x0d),
  796. CMN_EVENT_DVM_OCC(CMN700, trk_occupancy_pdn, 0x0e),
  797. CMN_EVENT_DVM(CMN700, trk_alloc, 0x0f),
  798. CMN_EVENT_DVM(CMN700, trk_cxha_alloc, 0x10),
  799. CMN_EVENT_DVM(CMN700, trk_pdn_alloc, 0x11),
  800. CMN_EVENT_DVM(CMN700, txsnp_stall_limit, 0x12),
  801. CMN_EVENT_DVM(CMN700, rxsnp_stall_starv, 0x13),
  802. CMN_EVENT_DVM(CMN700, txsnp_sync_stall_op, 0x14),
  803. CMN_EVENT_HNF(CMN_ANY, cache_miss, 0x01),
  804. CMN_EVENT_HNF(CMN_ANY, slc_sf_cache_access, 0x02),
  805. CMN_EVENT_HNF(CMN_ANY, cache_fill, 0x03),
  806. CMN_EVENT_HNF(CMN_ANY, pocq_retry, 0x04),
  807. CMN_EVENT_HNF(CMN_ANY, pocq_reqs_recvd, 0x05),
  808. CMN_EVENT_HNF(CMN_ANY, sf_hit, 0x06),
  809. CMN_EVENT_HNF(CMN_ANY, sf_evictions, 0x07),
  810. CMN_EVENT_HNF(CMN_ANY, dir_snoops_sent, 0x08),
  811. CMN_EVENT_HNF(CMN_ANY, brd_snoops_sent, 0x09),
  812. CMN_EVENT_HNF(CMN_ANY, slc_eviction, 0x0a),
  813. CMN_EVENT_HNF(CMN_ANY, slc_fill_invalid_way, 0x0b),
  814. CMN_EVENT_HNF(CMN_ANY, mc_retries, 0x0c),
  815. CMN_EVENT_HNF(CMN_ANY, mc_reqs, 0x0d),
  816. CMN_EVENT_HNF(CMN_ANY, qos_hh_retry, 0x0e),
  817. CMN_EVENT_HNF_OCC(CMN_ANY, qos_pocq_occupancy, 0x0f),
  818. CMN_EVENT_HNF(CMN_ANY, pocq_addrhaz, 0x10),
  819. CMN_EVENT_HNF(CMN_ANY, pocq_atomic_addrhaz, 0x11),
  820. CMN_EVENT_HNF(CMN_ANY, ld_st_swp_adq_full, 0x12),
  821. CMN_EVENT_HNF(CMN_ANY, cmp_adq_full, 0x13),
  822. CMN_EVENT_HNF(CMN_ANY, txdat_stall, 0x14),
  823. CMN_EVENT_HNF(CMN_ANY, txrsp_stall, 0x15),
  824. CMN_EVENT_HNF(CMN_ANY, seq_full, 0x16),
  825. CMN_EVENT_HNF(CMN_ANY, seq_hit, 0x17),
  826. CMN_EVENT_HNF(CMN_ANY, snp_sent, 0x18),
  827. CMN_EVENT_HNF(CMN_ANY, sfbi_dir_snp_sent, 0x19),
  828. CMN_EVENT_HNF(CMN_ANY, sfbi_brd_snp_sent, 0x1a),
  829. CMN_EVENT_HNF(CMN_ANY, snp_sent_untrk, 0x1b),
  830. CMN_EVENT_HNF(CMN_ANY, intv_dirty, 0x1c),
  831. CMN_EVENT_HNF(CMN_ANY, stash_snp_sent, 0x1d),
  832. CMN_EVENT_HNF(CMN_ANY, stash_data_pull, 0x1e),
  833. CMN_EVENT_HNF(CMN_ANY, snp_fwded, 0x1f),
  834. CMN_EVENT_HNF(NOT_CMN600, atomic_fwd, 0x20),
  835. CMN_EVENT_HNF(NOT_CMN600, mpam_hardlim, 0x21),
  836. CMN_EVENT_HNF(NOT_CMN600, mpam_softlim, 0x22),
  837. CMN_EVENT_HNF(CMN_650ON, snp_sent_cluster, 0x23),
  838. CMN_EVENT_HNF(CMN_650ON, sf_imprecise_evict, 0x24),
  839. CMN_EVENT_HNF(CMN_650ON, sf_evict_shared_line, 0x25),
  840. CMN_EVENT_HNF_CLS(CMN700, pocq_class_occup, 0x26),
  841. CMN_EVENT_HNF_CLS(CMN700, pocq_class_retry, 0x27),
  842. CMN_EVENT_HNF_CLS(CMN700, class_mc_reqs, 0x28),
  843. CMN_EVENT_HNF_CLS(CMN700, class_cgnt_cmin, 0x29),
  844. CMN_EVENT_HNF_SNT(CMN700, sn_throttle, 0x2a),
  845. CMN_EVENT_HNF_SNT(CMN700, sn_throttle_min, 0x2b),
  846. CMN_EVENT_HNF(CMN700, sf_precise_to_imprecise, 0x2c),
  847. CMN_EVENT_HNF(CMN700, snp_intv_cln, 0x2d),
  848. CMN_EVENT_HNF(CMN700, nc_excl, 0x2e),
  849. CMN_EVENT_HNF(CMN700, excl_mon_ovfl, 0x2f),
  850. CMN_EVENT_HNI(rrt_rd_occ_cnt_ovfl, 0x20),
  851. CMN_EVENT_HNI(rrt_wr_occ_cnt_ovfl, 0x21),
  852. CMN_EVENT_HNI(rdt_rd_occ_cnt_ovfl, 0x22),
  853. CMN_EVENT_HNI(rdt_wr_occ_cnt_ovfl, 0x23),
  854. CMN_EVENT_HNI(wdb_occ_cnt_ovfl, 0x24),
  855. CMN_EVENT_HNI(rrt_rd_alloc, 0x25),
  856. CMN_EVENT_HNI(rrt_wr_alloc, 0x26),
  857. CMN_EVENT_HNI(rdt_rd_alloc, 0x27),
  858. CMN_EVENT_HNI(rdt_wr_alloc, 0x28),
  859. CMN_EVENT_HNI(wdb_alloc, 0x29),
  860. CMN_EVENT_HNI(txrsp_retryack, 0x2a),
  861. CMN_EVENT_HNI(arvalid_no_arready, 0x2b),
  862. CMN_EVENT_HNI(arready_no_arvalid, 0x2c),
  863. CMN_EVENT_HNI(awvalid_no_awready, 0x2d),
  864. CMN_EVENT_HNI(awready_no_awvalid, 0x2e),
  865. CMN_EVENT_HNI(wvalid_no_wready, 0x2f),
  866. CMN_EVENT_HNI(txdat_stall, 0x30),
  867. CMN_EVENT_HNI(nonpcie_serialization, 0x31),
  868. CMN_EVENT_HNI(pcie_serialization, 0x32),
  869. /*
  870. * HN-P events squat on top of the HN-I similarly to DVM events, except
  871. * for being crammed into the same physical node as well. And of course
  872. * where would the fun be if the same events were in the same order...
  873. */
  874. CMN_EVENT_HNP(rrt_wr_occ_cnt_ovfl, 0x01),
  875. CMN_EVENT_HNP(rdt_wr_occ_cnt_ovfl, 0x02),
  876. CMN_EVENT_HNP(wdb_occ_cnt_ovfl, 0x03),
  877. CMN_EVENT_HNP(rrt_wr_alloc, 0x04),
  878. CMN_EVENT_HNP(rdt_wr_alloc, 0x05),
  879. CMN_EVENT_HNP(wdb_alloc, 0x06),
  880. CMN_EVENT_HNP(awvalid_no_awready, 0x07),
  881. CMN_EVENT_HNP(awready_no_awvalid, 0x08),
  882. CMN_EVENT_HNP(wvalid_no_wready, 0x09),
  883. CMN_EVENT_HNP(rrt_rd_occ_cnt_ovfl, 0x11),
  884. CMN_EVENT_HNP(rdt_rd_occ_cnt_ovfl, 0x12),
  885. CMN_EVENT_HNP(rrt_rd_alloc, 0x13),
  886. CMN_EVENT_HNP(rdt_rd_alloc, 0x14),
  887. CMN_EVENT_HNP(arvalid_no_arready, 0x15),
  888. CMN_EVENT_HNP(arready_no_arvalid, 0x16),
  889. CMN_EVENT_XP(txflit_valid, 0x01),
  890. CMN_EVENT_XP(txflit_stall, 0x02),
  891. CMN_EVENT_XP_DAT(partial_dat_flit, 0x03),
  892. /* We treat watchpoints as a special made-up class of XP events */
  893. CMN_EVENT_ATTR(CMN_ANY, watchpoint_up, CMN_TYPE_WP, CMN_WP_UP),
  894. CMN_EVENT_ATTR(CMN_ANY, watchpoint_down, CMN_TYPE_WP, CMN_WP_DOWN),
  895. CMN_EVENT_SBSX(CMN_ANY, rd_req, 0x01),
  896. CMN_EVENT_SBSX(CMN_ANY, wr_req, 0x02),
  897. CMN_EVENT_SBSX(CMN_ANY, cmo_req, 0x03),
  898. CMN_EVENT_SBSX(CMN_ANY, txrsp_retryack, 0x04),
  899. CMN_EVENT_SBSX(CMN_ANY, txdat_flitv, 0x05),
  900. CMN_EVENT_SBSX(CMN_ANY, txrsp_flitv, 0x06),
  901. CMN_EVENT_SBSX(CMN_ANY, rd_req_trkr_occ_cnt_ovfl, 0x11),
  902. CMN_EVENT_SBSX(CMN_ANY, wr_req_trkr_occ_cnt_ovfl, 0x12),
  903. CMN_EVENT_SBSX(CMN_ANY, cmo_req_trkr_occ_cnt_ovfl, 0x13),
  904. CMN_EVENT_SBSX(CMN_ANY, wdb_occ_cnt_ovfl, 0x14),
  905. CMN_EVENT_SBSX(CMN_ANY, rd_axi_trkr_occ_cnt_ovfl, 0x15),
  906. CMN_EVENT_SBSX(CMN_ANY, cmo_axi_trkr_occ_cnt_ovfl, 0x16),
  907. CMN_EVENT_SBSX(NOT_CMN600, rdb_occ_cnt_ovfl, 0x17),
  908. CMN_EVENT_SBSX(CMN_ANY, arvalid_no_arready, 0x21),
  909. CMN_EVENT_SBSX(CMN_ANY, awvalid_no_awready, 0x22),
  910. CMN_EVENT_SBSX(CMN_ANY, wvalid_no_wready, 0x23),
  911. CMN_EVENT_SBSX(CMN_ANY, txdat_stall, 0x24),
  912. CMN_EVENT_SBSX(CMN_ANY, txrsp_stall, 0x25),
  913. CMN_EVENT_RNID(CMN_ANY, s0_rdata_beats, 0x01),
  914. CMN_EVENT_RNID(CMN_ANY, s1_rdata_beats, 0x02),
  915. CMN_EVENT_RNID(CMN_ANY, s2_rdata_beats, 0x03),
  916. CMN_EVENT_RNID(CMN_ANY, rxdat_flits, 0x04),
  917. CMN_EVENT_RNID(CMN_ANY, txdat_flits, 0x05),
  918. CMN_EVENT_RNID(CMN_ANY, txreq_flits_total, 0x06),
  919. CMN_EVENT_RNID(CMN_ANY, txreq_flits_retried, 0x07),
  920. CMN_EVENT_RNID(CMN_ANY, rrt_occ_ovfl, 0x08),
  921. CMN_EVENT_RNID(CMN_ANY, wrt_occ_ovfl, 0x09),
  922. CMN_EVENT_RNID(CMN_ANY, txreq_flits_replayed, 0x0a),
  923. CMN_EVENT_RNID(CMN_ANY, wrcancel_sent, 0x0b),
  924. CMN_EVENT_RNID(CMN_ANY, s0_wdata_beats, 0x0c),
  925. CMN_EVENT_RNID(CMN_ANY, s1_wdata_beats, 0x0d),
  926. CMN_EVENT_RNID(CMN_ANY, s2_wdata_beats, 0x0e),
  927. CMN_EVENT_RNID(CMN_ANY, rrt_alloc, 0x0f),
  928. CMN_EVENT_RNID(CMN_ANY, wrt_alloc, 0x10),
  929. CMN_EVENT_RNID(CMN600, rdb_unord, 0x11),
  930. CMN_EVENT_RNID(CMN600, rdb_replay, 0x12),
  931. CMN_EVENT_RNID(CMN600, rdb_hybrid, 0x13),
  932. CMN_EVENT_RNID(CMN600, rdb_ord, 0x14),
  933. CMN_EVENT_RNID(NOT_CMN600, padb_occ_ovfl, 0x11),
  934. CMN_EVENT_RNID(NOT_CMN600, rpdb_occ_ovfl, 0x12),
  935. CMN_EVENT_RNID(NOT_CMN600, rrt_occup_ovfl_slice1, 0x13),
  936. CMN_EVENT_RNID(NOT_CMN600, rrt_occup_ovfl_slice2, 0x14),
  937. CMN_EVENT_RNID(NOT_CMN600, rrt_occup_ovfl_slice3, 0x15),
  938. CMN_EVENT_RNID(NOT_CMN600, wrt_throttled, 0x16),
  939. CMN_EVENT_RNID(CMN700, ldb_full, 0x17),
  940. CMN_EVENT_RNID(CMN700, rrt_rd_req_occup_ovfl_slice0, 0x18),
  941. CMN_EVENT_RNID(CMN700, rrt_rd_req_occup_ovfl_slice1, 0x19),
  942. CMN_EVENT_RNID(CMN700, rrt_rd_req_occup_ovfl_slice2, 0x1a),
  943. CMN_EVENT_RNID(CMN700, rrt_rd_req_occup_ovfl_slice3, 0x1b),
  944. CMN_EVENT_RNID(CMN700, rrt_burst_occup_ovfl_slice0, 0x1c),
  945. CMN_EVENT_RNID(CMN700, rrt_burst_occup_ovfl_slice1, 0x1d),
  946. CMN_EVENT_RNID(CMN700, rrt_burst_occup_ovfl_slice2, 0x1e),
  947. CMN_EVENT_RNID(CMN700, rrt_burst_occup_ovfl_slice3, 0x1f),
  948. CMN_EVENT_RNID(CMN700, rrt_burst_alloc, 0x20),
  949. CMN_EVENT_RNID(CMN700, awid_hash, 0x21),
  950. CMN_EVENT_RNID(CMN700, atomic_alloc, 0x22),
  951. CMN_EVENT_RNID(CMN700, atomic_occ_ovfl, 0x23),
  952. CMN_EVENT_MTSX(tc_lookup, 0x01),
  953. CMN_EVENT_MTSX(tc_fill, 0x02),
  954. CMN_EVENT_MTSX(tc_miss, 0x03),
  955. CMN_EVENT_MTSX(tdb_forward, 0x04),
  956. CMN_EVENT_MTSX(tcq_hazard, 0x05),
  957. CMN_EVENT_MTSX(tcq_rd_alloc, 0x06),
  958. CMN_EVENT_MTSX(tcq_wr_alloc, 0x07),
  959. CMN_EVENT_MTSX(tcq_cmo_alloc, 0x08),
  960. CMN_EVENT_MTSX(axi_rd_req, 0x09),
  961. CMN_EVENT_MTSX(axi_wr_req, 0x0a),
  962. CMN_EVENT_MTSX(tcq_occ_cnt_ovfl, 0x0b),
  963. CMN_EVENT_MTSX(tdb_occ_cnt_ovfl, 0x0c),
  964. CMN_EVENT_CXRA(CMN_ANY, rht_occ, 0x01),
  965. CMN_EVENT_CXRA(CMN_ANY, sht_occ, 0x02),
  966. CMN_EVENT_CXRA(CMN_ANY, rdb_occ, 0x03),
  967. CMN_EVENT_CXRA(CMN_ANY, wdb_occ, 0x04),
  968. CMN_EVENT_CXRA(CMN_ANY, ssb_occ, 0x05),
  969. CMN_EVENT_CXRA(CMN_ANY, snp_bcasts, 0x06),
  970. CMN_EVENT_CXRA(CMN_ANY, req_chains, 0x07),
  971. CMN_EVENT_CXRA(CMN_ANY, req_chain_avglen, 0x08),
  972. CMN_EVENT_CXRA(CMN_ANY, chirsp_stalls, 0x09),
  973. CMN_EVENT_CXRA(CMN_ANY, chidat_stalls, 0x0a),
  974. CMN_EVENT_CXRA(CMN_ANY, cxreq_pcrd_stalls_link0, 0x0b),
  975. CMN_EVENT_CXRA(CMN_ANY, cxreq_pcrd_stalls_link1, 0x0c),
  976. CMN_EVENT_CXRA(CMN_ANY, cxreq_pcrd_stalls_link2, 0x0d),
  977. CMN_EVENT_CXRA(CMN_ANY, cxdat_pcrd_stalls_link0, 0x0e),
  978. CMN_EVENT_CXRA(CMN_ANY, cxdat_pcrd_stalls_link1, 0x0f),
  979. CMN_EVENT_CXRA(CMN_ANY, cxdat_pcrd_stalls_link2, 0x10),
  980. CMN_EVENT_CXRA(CMN_ANY, external_chirsp_stalls, 0x11),
  981. CMN_EVENT_CXRA(CMN_ANY, external_chidat_stalls, 0x12),
  982. CMN_EVENT_CXRA(NOT_CMN600, cxmisc_pcrd_stalls_link0, 0x13),
  983. CMN_EVENT_CXRA(NOT_CMN600, cxmisc_pcrd_stalls_link1, 0x14),
  984. CMN_EVENT_CXRA(NOT_CMN600, cxmisc_pcrd_stalls_link2, 0x15),
  985. CMN_EVENT_CXHA(rddatbyp, 0x21),
  986. CMN_EVENT_CXHA(chirsp_up_stall, 0x22),
  987. CMN_EVENT_CXHA(chidat_up_stall, 0x23),
  988. CMN_EVENT_CXHA(snppcrd_link0_stall, 0x24),
  989. CMN_EVENT_CXHA(snppcrd_link1_stall, 0x25),
  990. CMN_EVENT_CXHA(snppcrd_link2_stall, 0x26),
  991. CMN_EVENT_CXHA(reqtrk_occ, 0x27),
  992. CMN_EVENT_CXHA(rdb_occ, 0x28),
  993. CMN_EVENT_CXHA(rdbyp_occ, 0x29),
  994. CMN_EVENT_CXHA(wdb_occ, 0x2a),
  995. CMN_EVENT_CXHA(snptrk_occ, 0x2b),
  996. CMN_EVENT_CXHA(sdb_occ, 0x2c),
  997. CMN_EVENT_CXHA(snphaz_occ, 0x2d),
  998. CMN_EVENT_CCRA(rht_occ, 0x41),
  999. CMN_EVENT_CCRA(sht_occ, 0x42),
  1000. CMN_EVENT_CCRA(rdb_occ, 0x43),
  1001. CMN_EVENT_CCRA(wdb_occ, 0x44),
  1002. CMN_EVENT_CCRA(ssb_occ, 0x45),
  1003. CMN_EVENT_CCRA(snp_bcasts, 0x46),
  1004. CMN_EVENT_CCRA(req_chains, 0x47),
  1005. CMN_EVENT_CCRA(req_chain_avglen, 0x48),
  1006. CMN_EVENT_CCRA(chirsp_stalls, 0x49),
  1007. CMN_EVENT_CCRA(chidat_stalls, 0x4a),
  1008. CMN_EVENT_CCRA(cxreq_pcrd_stalls_link0, 0x4b),
  1009. CMN_EVENT_CCRA(cxreq_pcrd_stalls_link1, 0x4c),
  1010. CMN_EVENT_CCRA(cxreq_pcrd_stalls_link2, 0x4d),
  1011. CMN_EVENT_CCRA(cxdat_pcrd_stalls_link0, 0x4e),
  1012. CMN_EVENT_CCRA(cxdat_pcrd_stalls_link1, 0x4f),
  1013. CMN_EVENT_CCRA(cxdat_pcrd_stalls_link2, 0x50),
  1014. CMN_EVENT_CCRA(external_chirsp_stalls, 0x51),
  1015. CMN_EVENT_CCRA(external_chidat_stalls, 0x52),
  1016. CMN_EVENT_CCRA(cxmisc_pcrd_stalls_link0, 0x53),
  1017. CMN_EVENT_CCRA(cxmisc_pcrd_stalls_link1, 0x54),
  1018. CMN_EVENT_CCRA(cxmisc_pcrd_stalls_link2, 0x55),
  1019. CMN_EVENT_CCRA(rht_alloc, 0x56),
  1020. CMN_EVENT_CCRA(sht_alloc, 0x57),
  1021. CMN_EVENT_CCRA(rdb_alloc, 0x58),
  1022. CMN_EVENT_CCRA(wdb_alloc, 0x59),
  1023. CMN_EVENT_CCRA(ssb_alloc, 0x5a),
  1024. CMN_EVENT_CCHA(CMN_ANY, rddatbyp, 0x61),
  1025. CMN_EVENT_CCHA(CMN_ANY, chirsp_up_stall, 0x62),
  1026. CMN_EVENT_CCHA(CMN_ANY, chidat_up_stall, 0x63),
  1027. CMN_EVENT_CCHA(CMN_ANY, snppcrd_link0_stall, 0x64),
  1028. CMN_EVENT_CCHA(CMN_ANY, snppcrd_link1_stall, 0x65),
  1029. CMN_EVENT_CCHA(CMN_ANY, snppcrd_link2_stall, 0x66),
  1030. CMN_EVENT_CCHA(CMN_ANY, reqtrk_occ, 0x67),
  1031. CMN_EVENT_CCHA(CMN_ANY, rdb_occ, 0x68),
  1032. CMN_EVENT_CCHA(CMN_ANY, rdbyp_occ, 0x69),
  1033. CMN_EVENT_CCHA(CMN_ANY, wdb_occ, 0x6a),
  1034. CMN_EVENT_CCHA(CMN_ANY, snptrk_occ, 0x6b),
  1035. CMN_EVENT_CCHA(CMN_ANY, sdb_occ, 0x6c),
  1036. CMN_EVENT_CCHA(CMN_ANY, snphaz_occ, 0x6d),
  1037. CMN_EVENT_CCHA(CMN_ANY, reqtrk_alloc, 0x6e),
  1038. CMN_EVENT_CCHA(CMN_ANY, rdb_alloc, 0x6f),
  1039. CMN_EVENT_CCHA(CMN_ANY, rdbyp_alloc, 0x70),
  1040. CMN_EVENT_CCHA(CMN_ANY, wdb_alloc, 0x71),
  1041. CMN_EVENT_CCHA(CMN_ANY, snptrk_alloc, 0x72),
  1042. CMN_EVENT_CCHA(CMN_ANY, db_alloc, 0x73),
  1043. CMN_EVENT_CCHA(CMN_ANY, snphaz_alloc, 0x74),
  1044. CMN_EVENT_CCHA(CMN_ANY, pb_rhu_req_occ, 0x75),
  1045. CMN_EVENT_CCHA(CMN_ANY, pb_rhu_req_alloc, 0x76),
  1046. CMN_EVENT_CCHA(CMN_ANY, pb_rhu_pcie_req_occ, 0x77),
  1047. CMN_EVENT_CCHA(CMN_ANY, pb_rhu_pcie_req_alloc, 0x78),
  1048. CMN_EVENT_CCHA(CMN_ANY, pb_pcie_wr_req_occ, 0x79),
  1049. CMN_EVENT_CCHA(CMN_ANY, pb_pcie_wr_req_alloc, 0x7a),
  1050. CMN_EVENT_CCHA(CMN_ANY, pb_pcie_reg_req_occ, 0x7b),
  1051. CMN_EVENT_CCHA(CMN_ANY, pb_pcie_reg_req_alloc, 0x7c),
  1052. CMN_EVENT_CCHA(CMN_ANY, pb_pcie_rsvd_req_occ, 0x7d),
  1053. CMN_EVENT_CCHA(CMN_ANY, pb_pcie_rsvd_req_alloc, 0x7e),
  1054. CMN_EVENT_CCHA(CMN_ANY, pb_rhu_dat_occ, 0x7f),
  1055. CMN_EVENT_CCHA(CMN_ANY, pb_rhu_dat_alloc, 0x80),
  1056. CMN_EVENT_CCHA(CMN_ANY, pb_rhu_pcie_dat_occ, 0x81),
  1057. CMN_EVENT_CCHA(CMN_ANY, pb_rhu_pcie_dat_alloc, 0x82),
  1058. CMN_EVENT_CCHA(CMN_ANY, pb_pcie_wr_dat_occ, 0x83),
  1059. CMN_EVENT_CCHA(CMN_ANY, pb_pcie_wr_dat_alloc, 0x84),
  1060. CMN_EVENT_CCHA(CMNS3, chirsp1_up_stall, 0x85),
  1061. CMN_EVENT_CCLA(rx_cxs, 0x21),
  1062. CMN_EVENT_CCLA(tx_cxs, 0x22),
  1063. CMN_EVENT_CCLA(rx_cxs_avg_size, 0x23),
  1064. CMN_EVENT_CCLA(tx_cxs_avg_size, 0x24),
  1065. CMN_EVENT_CCLA(tx_cxs_lcrd_backpressure, 0x25),
  1066. CMN_EVENT_CCLA(link_crdbuf_occ, 0x26),
  1067. CMN_EVENT_CCLA(link_crdbuf_alloc, 0x27),
  1068. CMN_EVENT_CCLA(pfwd_rcvr_cxs, 0x28),
  1069. CMN_EVENT_CCLA(pfwd_sndr_num_flits, 0x29),
  1070. CMN_EVENT_CCLA(pfwd_sndr_stalls_static_crd, 0x2a),
  1071. CMN_EVENT_CCLA(pfwd_sndr_stalls_dynmaic_crd, 0x2b),
  1072. CMN_EVENT_HNS_HBT(cache_miss, 0x01),
  1073. CMN_EVENT_HNS_HBT(slc_sf_cache_access, 0x02),
  1074. CMN_EVENT_HNS_HBT(cache_fill, 0x03),
  1075. CMN_EVENT_HNS_HBT(pocq_retry, 0x04),
  1076. CMN_EVENT_HNS_HBT(pocq_reqs_recvd, 0x05),
  1077. CMN_EVENT_HNS_HBT(sf_hit, 0x06),
  1078. CMN_EVENT_HNS_HBT(sf_evictions, 0x07),
  1079. CMN_EVENT_HNS(dir_snoops_sent, 0x08),
  1080. CMN_EVENT_HNS(brd_snoops_sent, 0x09),
  1081. CMN_EVENT_HNS_HBT(slc_eviction, 0x0a),
  1082. CMN_EVENT_HNS_HBT(slc_fill_invalid_way, 0x0b),
  1083. CMN_EVENT_HNS(mc_retries_local, 0x0c),
  1084. CMN_EVENT_HNS_SNH(mc_reqs_local, 0x0d),
  1085. CMN_EVENT_HNS(qos_hh_retry, 0x0e),
  1086. CMN_EVENT_HNS_OCC(qos_pocq_occupancy, 0x0f),
  1087. CMN_EVENT_HNS(pocq_addrhaz, 0x10),
  1088. CMN_EVENT_HNS(pocq_atomic_addrhaz, 0x11),
  1089. CMN_EVENT_HNS(ld_st_swp_adq_full, 0x12),
  1090. CMN_EVENT_HNS(cmp_adq_full, 0x13),
  1091. CMN_EVENT_HNS(txdat_stall, 0x14),
  1092. CMN_EVENT_HNS(txrsp_stall, 0x15),
  1093. CMN_EVENT_HNS(seq_full, 0x16),
  1094. CMN_EVENT_HNS(seq_hit, 0x17),
  1095. CMN_EVENT_HNS(snp_sent, 0x18),
  1096. CMN_EVENT_HNS(sfbi_dir_snp_sent, 0x19),
  1097. CMN_EVENT_HNS(sfbi_brd_snp_sent, 0x1a),
  1098. CMN_EVENT_HNS(intv_dirty, 0x1c),
  1099. CMN_EVENT_HNS(stash_snp_sent, 0x1d),
  1100. CMN_EVENT_HNS(stash_data_pull, 0x1e),
  1101. CMN_EVENT_HNS(snp_fwded, 0x1f),
  1102. CMN_EVENT_HNS(atomic_fwd, 0x20),
  1103. CMN_EVENT_HNS(mpam_hardlim, 0x21),
  1104. CMN_EVENT_HNS(mpam_softlim, 0x22),
  1105. CMN_EVENT_HNS(snp_sent_cluster, 0x23),
  1106. CMN_EVENT_HNS(sf_imprecise_evict, 0x24),
  1107. CMN_EVENT_HNS(sf_evict_shared_line, 0x25),
  1108. CMN_EVENT_HNS_CLS(pocq_class_occup, 0x26),
  1109. CMN_EVENT_HNS_CLS(pocq_class_retry, 0x27),
  1110. CMN_EVENT_HNS_CLS(class_mc_reqs_local, 0x28),
  1111. CMN_EVENT_HNS_CLS(class_cgnt_cmin, 0x29),
  1112. CMN_EVENT_HNS_SNT(sn_throttle, 0x2a),
  1113. CMN_EVENT_HNS_SNT(sn_throttle_min, 0x2b),
  1114. CMN_EVENT_HNS(sf_precise_to_imprecise, 0x2c),
  1115. CMN_EVENT_HNS(snp_intv_cln, 0x2d),
  1116. CMN_EVENT_HNS(nc_excl, 0x2e),
  1117. CMN_EVENT_HNS(excl_mon_ovfl, 0x2f),
  1118. CMN_EVENT_HNS(snp_req_recvd, 0x30),
  1119. CMN_EVENT_HNS(snp_req_byp_pocq, 0x31),
  1120. CMN_EVENT_HNS(dir_ccgha_snp_sent, 0x32),
  1121. CMN_EVENT_HNS(brd_ccgha_snp_sent, 0x33),
  1122. CMN_EVENT_HNS(ccgha_snp_stall, 0x34),
  1123. CMN_EVENT_HNS(lbt_req_hardlim, 0x35),
  1124. CMN_EVENT_HNS(hbt_req_hardlim, 0x36),
  1125. CMN_EVENT_HNS(sf_reupdate, 0x37),
  1126. CMN_EVENT_HNS(excl_sf_imprecise, 0x38),
  1127. CMN_EVENT_HNS(snp_pocq_addrhaz, 0x39),
  1128. CMN_EVENT_HNS(mc_retries_remote, 0x3a),
  1129. CMN_EVENT_HNS_SNH(mc_reqs_remote, 0x3b),
  1130. CMN_EVENT_HNS_CLS(class_mc_reqs_remote, 0x3c),
  1131. NULL
  1132. };
  1133. static const struct attribute_group arm_cmn_event_attrs_group = {
  1134. .name = "events",
  1135. .attrs = arm_cmn_event_attrs,
  1136. .is_visible = arm_cmn_event_attr_is_visible,
  1137. };
  1138. static ssize_t arm_cmn_format_show(struct device *dev,
  1139. struct device_attribute *attr, char *buf)
  1140. {
  1141. struct arm_cmn_format_attr *fmt = container_of(attr, typeof(*fmt), attr);
  1142. if (!fmt->config)
  1143. return sysfs_emit(buf, "config:%*pbl\n", 64, &fmt->field);
  1144. return sysfs_emit(buf, "config%d:%*pbl\n", fmt->config, 64, &fmt->field);
  1145. }
  1146. #define _CMN_FORMAT_ATTR(_name, _cfg, _fld) \
  1147. (&((struct arm_cmn_format_attr[]) {{ \
  1148. .attr = __ATTR(_name, 0444, arm_cmn_format_show, NULL), \
  1149. .config = _cfg, \
  1150. .field = _fld, \
  1151. }})[0].attr.attr)
  1152. #define CMN_FORMAT_ATTR(_name, _fld) _CMN_FORMAT_ATTR(_name, 0, _fld)
  1153. static struct attribute *arm_cmn_format_attrs[] = {
  1154. CMN_FORMAT_ATTR(type, CMN_CONFIG_TYPE),
  1155. CMN_FORMAT_ATTR(eventid, CMN_CONFIG_EVENTID),
  1156. CMN_FORMAT_ATTR(occupid, CMN_CONFIG_OCCUPID),
  1157. CMN_FORMAT_ATTR(bynodeid, CMN_CONFIG_BYNODEID),
  1158. CMN_FORMAT_ATTR(nodeid, CMN_CONFIG_NODEID),
  1159. CMN_FORMAT_ATTR(wp_dev_sel, CMN_CONFIG_WP_DEV_SEL),
  1160. CMN_FORMAT_ATTR(wp_chn_sel, CMN_CONFIG_WP_CHN_SEL),
  1161. CMN_FORMAT_ATTR(wp_grp, CMN_CONFIG_WP_GRP),
  1162. CMN_FORMAT_ATTR(wp_exclusive, CMN_CONFIG_WP_EXCLUSIVE),
  1163. CMN_FORMAT_ATTR(wp_combine, CMN_CONFIG_WP_COMBINE),
  1164. _CMN_FORMAT_ATTR(wp_val, 1, CMN_CONFIG1_WP_VAL),
  1165. _CMN_FORMAT_ATTR(wp_mask, 2, CMN_CONFIG2_WP_MASK),
  1166. NULL
  1167. };
  1168. static const struct attribute_group arm_cmn_format_attrs_group = {
  1169. .name = "format",
  1170. .attrs = arm_cmn_format_attrs,
  1171. };
  1172. static ssize_t arm_cmn_cpumask_show(struct device *dev,
  1173. struct device_attribute *attr, char *buf)
  1174. {
  1175. struct arm_cmn *cmn = to_cmn(dev_get_drvdata(dev));
  1176. return cpumap_print_to_pagebuf(true, buf, cpumask_of(cmn->cpu));
  1177. }
  1178. static struct device_attribute arm_cmn_cpumask_attr =
  1179. __ATTR(cpumask, 0444, arm_cmn_cpumask_show, NULL);
  1180. static ssize_t arm_cmn_identifier_show(struct device *dev,
  1181. struct device_attribute *attr, char *buf)
  1182. {
  1183. struct arm_cmn *cmn = to_cmn(dev_get_drvdata(dev));
  1184. return sysfs_emit(buf, "%03x%02x\n", cmn->part, cmn->rev);
  1185. }
  1186. static struct device_attribute arm_cmn_identifier_attr =
  1187. __ATTR(identifier, 0444, arm_cmn_identifier_show, NULL);
  1188. static struct attribute *arm_cmn_other_attrs[] = {
  1189. &arm_cmn_cpumask_attr.attr,
  1190. &arm_cmn_identifier_attr.attr,
  1191. NULL,
  1192. };
  1193. static const struct attribute_group arm_cmn_other_attrs_group = {
  1194. .attrs = arm_cmn_other_attrs,
  1195. };
  1196. static const struct attribute_group *arm_cmn_attr_groups[] = {
  1197. &arm_cmn_event_attrs_group,
  1198. &arm_cmn_format_attrs_group,
  1199. &arm_cmn_other_attrs_group,
  1200. NULL
  1201. };
  1202. static int arm_cmn_find_free_wp_idx(struct arm_cmn_dtm *dtm,
  1203. struct perf_event *event)
  1204. {
  1205. int wp_idx = CMN_EVENT_EVENTID(event);
  1206. if (dtm->wp_event[wp_idx] >= 0)
  1207. if (dtm->wp_event[++wp_idx] >= 0)
  1208. return -ENOSPC;
  1209. return wp_idx;
  1210. }
  1211. static int arm_cmn_get_assigned_wp_idx(struct perf_event *event,
  1212. struct arm_cmn_hw_event *hw,
  1213. unsigned int pos)
  1214. {
  1215. return CMN_EVENT_EVENTID(event) + arm_cmn_get_wp_idx(hw->wp_idx, pos);
  1216. }
  1217. static void arm_cmn_claim_wp_idx(struct arm_cmn_dtm *dtm,
  1218. struct perf_event *event,
  1219. unsigned int dtc, int wp_idx,
  1220. unsigned int pos)
  1221. {
  1222. struct arm_cmn_hw_event *hw = to_cmn_hw(event);
  1223. dtm->wp_event[wp_idx] = hw->dtc_idx[dtc];
  1224. arm_cmn_set_wp_idx(hw->wp_idx, pos, wp_idx - CMN_EVENT_EVENTID(event));
  1225. }
  1226. static u32 arm_cmn_wp_config(struct perf_event *event, int wp_idx)
  1227. {
  1228. u32 config;
  1229. u32 dev = CMN_EVENT_WP_DEV_SEL(event);
  1230. u32 chn = CMN_EVENT_WP_CHN_SEL(event);
  1231. u32 grp = CMN_EVENT_WP_GRP(event);
  1232. u32 exc = CMN_EVENT_WP_EXCLUSIVE(event);
  1233. u32 combine = CMN_EVENT_WP_COMBINE(event);
  1234. bool is_cmn600 = to_cmn(event->pmu)->part == PART_CMN600;
  1235. /* CMN-600 supports only primary and secondary matching groups */
  1236. if (is_cmn600)
  1237. grp &= 1;
  1238. config = FIELD_PREP(CMN_DTM_WPn_CONFIG_WP_DEV_SEL, dev) |
  1239. FIELD_PREP(CMN_DTM_WPn_CONFIG_WP_CHN_SEL, chn) |
  1240. FIELD_PREP(CMN_DTM_WPn_CONFIG_WP_GRP, grp) |
  1241. FIELD_PREP(CMN_DTM_WPn_CONFIG_WP_DEV_SEL2, dev >> 1);
  1242. if (exc)
  1243. config |= is_cmn600 ? CMN600_WPn_CONFIG_WP_EXCLUSIVE :
  1244. CMN_DTM_WPn_CONFIG_WP_EXCLUSIVE;
  1245. /* wp_combine is available only on WP0 and WP2 */
  1246. if (combine && !(wp_idx & 0x1))
  1247. config |= is_cmn600 ? CMN600_WPn_CONFIG_WP_COMBINE :
  1248. CMN_DTM_WPn_CONFIG_WP_COMBINE;
  1249. return config;
  1250. }
  1251. static void arm_cmn_set_state(struct arm_cmn *cmn, u32 state)
  1252. {
  1253. if (!cmn->state)
  1254. writel_relaxed(0, CMN_DT_PMCR(&cmn->dtc[0]));
  1255. cmn->state |= state;
  1256. }
  1257. static void arm_cmn_clear_state(struct arm_cmn *cmn, u32 state)
  1258. {
  1259. cmn->state &= ~state;
  1260. if (!cmn->state)
  1261. writel_relaxed(CMN_DT_PMCR_PMU_EN | CMN_DT_PMCR_OVFL_INTR_EN,
  1262. CMN_DT_PMCR(&cmn->dtc[0]));
  1263. }
  1264. static void arm_cmn_pmu_enable(struct pmu *pmu)
  1265. {
  1266. arm_cmn_clear_state(to_cmn(pmu), CMN_STATE_DISABLED);
  1267. }
  1268. static void arm_cmn_pmu_disable(struct pmu *pmu)
  1269. {
  1270. arm_cmn_set_state(to_cmn(pmu), CMN_STATE_DISABLED);
  1271. }
  1272. static u64 arm_cmn_read_dtm(struct arm_cmn *cmn, struct arm_cmn_hw_event *hw,
  1273. bool snapshot)
  1274. {
  1275. struct arm_cmn_dtm *dtm = NULL;
  1276. struct arm_cmn_node *dn;
  1277. unsigned int i, offset, dtm_idx;
  1278. u64 reg, count = 0;
  1279. offset = snapshot ? CMN_DTM_PMEVCNTSR : CMN_DTM_PMEVCNT;
  1280. for_each_hw_dn(hw, dn, i) {
  1281. if (dtm != &cmn->dtms[dn->dtm]) {
  1282. dtm = &cmn->dtms[dn->dtm] + hw->dtm_offset;
  1283. reg = readq_relaxed(dtm->base + offset);
  1284. }
  1285. dtm_idx = arm_cmn_get_index(hw->dtm_idx, i);
  1286. count += (u16)(reg >> (dtm_idx * 16));
  1287. }
  1288. return count;
  1289. }
  1290. static u64 arm_cmn_read_cc(struct arm_cmn_dtc *dtc)
  1291. {
  1292. void __iomem *pmccntr = CMN_DT_PMCCNTR(dtc);
  1293. u64 val = readq_relaxed(pmccntr);
  1294. writeq_relaxed(CMN_CC_INIT, pmccntr);
  1295. return (val - CMN_CC_INIT) & ((CMN_CC_INIT << 1) - 1);
  1296. }
  1297. static u32 arm_cmn_read_counter(struct arm_cmn_dtc *dtc, int idx)
  1298. {
  1299. void __iomem *pmevcnt = CMN_DT_PMEVCNT(dtc, idx);
  1300. u32 val = readl_relaxed(pmevcnt);
  1301. writel_relaxed(CMN_COUNTER_INIT, pmevcnt);
  1302. return val - CMN_COUNTER_INIT;
  1303. }
  1304. static void arm_cmn_init_counter(struct perf_event *event)
  1305. {
  1306. struct arm_cmn *cmn = to_cmn(event->pmu);
  1307. struct arm_cmn_hw_event *hw = to_cmn_hw(event);
  1308. u64 count;
  1309. for_each_hw_dtc_idx(hw, i, idx) {
  1310. writel_relaxed(CMN_COUNTER_INIT, CMN_DT_PMEVCNT(&cmn->dtc[i], idx));
  1311. cmn->dtc[i].counters[idx] = event;
  1312. }
  1313. count = arm_cmn_read_dtm(cmn, hw, false);
  1314. local64_set(&event->hw.prev_count, count);
  1315. }
  1316. static void arm_cmn_event_read(struct perf_event *event)
  1317. {
  1318. struct arm_cmn *cmn = to_cmn(event->pmu);
  1319. struct arm_cmn_hw_event *hw = to_cmn_hw(event);
  1320. u64 delta, new, prev;
  1321. unsigned long flags;
  1322. if (CMN_EVENT_TYPE(event) == CMN_TYPE_DTC) {
  1323. delta = arm_cmn_read_cc(cmn->dtc + hw->dtc_idx[0]);
  1324. local64_add(delta, &event->count);
  1325. return;
  1326. }
  1327. new = arm_cmn_read_dtm(cmn, hw, false);
  1328. prev = local64_xchg(&event->hw.prev_count, new);
  1329. delta = new - prev;
  1330. local_irq_save(flags);
  1331. for_each_hw_dtc_idx(hw, i, idx) {
  1332. new = arm_cmn_read_counter(cmn->dtc + i, idx);
  1333. delta += new << 16;
  1334. }
  1335. local_irq_restore(flags);
  1336. local64_add(delta, &event->count);
  1337. }
  1338. static int arm_cmn_set_event_sel_hi(struct arm_cmn_node *dn,
  1339. enum cmn_filter_select fsel, u8 occupid)
  1340. {
  1341. u64 reg;
  1342. if (fsel == SEL_NONE)
  1343. return 0;
  1344. if (!dn->occupid[fsel].count) {
  1345. dn->occupid[fsel].val = occupid;
  1346. reg = FIELD_PREP(CMN__PMU_CBUSY_SNTHROTTLE_SEL,
  1347. dn->occupid[SEL_CBUSY_SNTHROTTLE_SEL].val) |
  1348. FIELD_PREP(CMN__PMU_SN_HOME_SEL,
  1349. dn->occupid[SEL_SN_HOME_SEL].val) |
  1350. FIELD_PREP(CMN__PMU_HBT_LBT_SEL,
  1351. dn->occupid[SEL_HBT_LBT_SEL].val) |
  1352. FIELD_PREP(CMN__PMU_CLASS_OCCUP_ID,
  1353. dn->occupid[SEL_CLASS_OCCUP_ID].val) |
  1354. FIELD_PREP(CMN__PMU_OCCUP1_ID,
  1355. dn->occupid[SEL_OCCUP1ID].val);
  1356. writel_relaxed(reg >> 32, dn->pmu_base + CMN_PMU_EVENT_SEL + 4);
  1357. } else if (dn->occupid[fsel].val != occupid) {
  1358. return -EBUSY;
  1359. }
  1360. dn->occupid[fsel].count++;
  1361. return 0;
  1362. }
  1363. static void arm_cmn_set_event_sel_lo(struct arm_cmn_node *dn, int dtm_idx,
  1364. int eventid, bool wide_sel)
  1365. {
  1366. if (wide_sel) {
  1367. dn->event_w[dtm_idx] = eventid;
  1368. writeq_relaxed(le64_to_cpu(dn->event_sel_w), dn->pmu_base + CMN_PMU_EVENT_SEL);
  1369. } else {
  1370. dn->event[dtm_idx] = eventid;
  1371. writel_relaxed(le32_to_cpu(dn->event_sel), dn->pmu_base + CMN_PMU_EVENT_SEL);
  1372. }
  1373. }
  1374. static void arm_cmn_event_start(struct perf_event *event, int flags)
  1375. {
  1376. struct arm_cmn *cmn = to_cmn(event->pmu);
  1377. struct arm_cmn_hw_event *hw = to_cmn_hw(event);
  1378. struct arm_cmn_node *dn;
  1379. enum cmn_node_type type = CMN_EVENT_TYPE(event);
  1380. int i;
  1381. if (type == CMN_TYPE_DTC) {
  1382. struct arm_cmn_dtc *dtc = cmn->dtc + hw->dtc_idx[0];
  1383. writel_relaxed(CMN_DT_DTC_CTL_DT_EN | CMN_DT_DTC_CTL_CG_DISABLE,
  1384. dtc->base + CMN_DT_DTC_CTL);
  1385. writeq_relaxed(CMN_CC_INIT, CMN_DT_PMCCNTR(dtc));
  1386. dtc->cc_active = true;
  1387. } else if (type == CMN_TYPE_WP) {
  1388. u64 val = CMN_EVENT_WP_VAL(event);
  1389. u64 mask = CMN_EVENT_WP_MASK(event);
  1390. for_each_hw_dn(hw, dn, i) {
  1391. void __iomem *base = dn->pmu_base + CMN_DTM_OFFSET(hw->dtm_offset);
  1392. int wp_idx = arm_cmn_get_assigned_wp_idx(event, hw, i);
  1393. writeq_relaxed(val, base + CMN_DTM_WPn_VAL(wp_idx));
  1394. writeq_relaxed(mask, base + CMN_DTM_WPn_MASK(wp_idx));
  1395. }
  1396. } else for_each_hw_dn(hw, dn, i) {
  1397. int dtm_idx = arm_cmn_get_index(hw->dtm_idx, i);
  1398. arm_cmn_set_event_sel_lo(dn, dtm_idx, CMN_EVENT_EVENTID(event),
  1399. hw->wide_sel);
  1400. }
  1401. }
  1402. static void arm_cmn_event_stop(struct perf_event *event, int flags)
  1403. {
  1404. struct arm_cmn *cmn = to_cmn(event->pmu);
  1405. struct arm_cmn_hw_event *hw = to_cmn_hw(event);
  1406. struct arm_cmn_node *dn;
  1407. enum cmn_node_type type = CMN_EVENT_TYPE(event);
  1408. int i;
  1409. if (type == CMN_TYPE_DTC) {
  1410. struct arm_cmn_dtc *dtc = cmn->dtc + hw->dtc_idx[0];
  1411. dtc->cc_active = false;
  1412. writel_relaxed(CMN_DT_DTC_CTL_DT_EN, dtc->base + CMN_DT_DTC_CTL);
  1413. } else if (type == CMN_TYPE_WP) {
  1414. for_each_hw_dn(hw, dn, i) {
  1415. void __iomem *base = dn->pmu_base + CMN_DTM_OFFSET(hw->dtm_offset);
  1416. int wp_idx = arm_cmn_get_assigned_wp_idx(event, hw, i);
  1417. writeq_relaxed(0, base + CMN_DTM_WPn_MASK(wp_idx));
  1418. writeq_relaxed(~0ULL, base + CMN_DTM_WPn_VAL(wp_idx));
  1419. }
  1420. } else for_each_hw_dn(hw, dn, i) {
  1421. int dtm_idx = arm_cmn_get_index(hw->dtm_idx, i);
  1422. arm_cmn_set_event_sel_lo(dn, dtm_idx, 0, hw->wide_sel);
  1423. }
  1424. arm_cmn_event_read(event);
  1425. }
  1426. struct arm_cmn_val {
  1427. u8 dtm_count[CMN_MAX_DTMS];
  1428. u8 occupid[CMN_MAX_DTMS][SEL_MAX];
  1429. u8 wp[CMN_MAX_DTMS][4];
  1430. u8 wp_combine[CMN_MAX_DTMS][2];
  1431. int dtc_count[CMN_MAX_DTCS];
  1432. bool cycles;
  1433. };
  1434. static int arm_cmn_val_find_free_wp_config(struct perf_event *event,
  1435. struct arm_cmn_val *val, int dtm)
  1436. {
  1437. int wp_idx = CMN_EVENT_EVENTID(event);
  1438. if (val->wp[dtm][wp_idx])
  1439. if (val->wp[dtm][++wp_idx])
  1440. return -ENOSPC;
  1441. return wp_idx;
  1442. }
  1443. static void arm_cmn_val_add_event(struct arm_cmn *cmn, struct arm_cmn_val *val,
  1444. struct perf_event *event)
  1445. {
  1446. struct arm_cmn_hw_event *hw = to_cmn_hw(event);
  1447. struct arm_cmn_node *dn;
  1448. enum cmn_node_type type;
  1449. int i;
  1450. if (is_software_event(event))
  1451. return;
  1452. type = CMN_EVENT_TYPE(event);
  1453. if (type == CMN_TYPE_DTC) {
  1454. val->cycles = true;
  1455. return;
  1456. }
  1457. for_each_hw_dtc_idx(hw, dtc, idx)
  1458. val->dtc_count[dtc]++;
  1459. for_each_hw_dn(hw, dn, i) {
  1460. int wp_idx, dtm = dn->dtm, sel = hw->filter_sel;
  1461. val->dtm_count[dtm]++;
  1462. if (sel > SEL_NONE)
  1463. val->occupid[dtm][sel] = CMN_EVENT_OCCUPID(event) + 1;
  1464. if (type != CMN_TYPE_WP)
  1465. continue;
  1466. wp_idx = arm_cmn_val_find_free_wp_config(event, val, dtm);
  1467. val->wp[dtm][wp_idx] = 1;
  1468. val->wp_combine[dtm][wp_idx >> 1] += !!CMN_EVENT_WP_COMBINE(event);
  1469. }
  1470. }
  1471. static int arm_cmn_validate_group(struct arm_cmn *cmn, struct perf_event *event)
  1472. {
  1473. struct arm_cmn_hw_event *hw = to_cmn_hw(event);
  1474. struct arm_cmn_node *dn;
  1475. struct perf_event *sibling, *leader = event->group_leader;
  1476. enum cmn_node_type type;
  1477. struct arm_cmn_val *val;
  1478. int i, ret = -EINVAL;
  1479. if (leader == event)
  1480. return 0;
  1481. if (event->pmu != leader->pmu && !is_software_event(leader))
  1482. return -EINVAL;
  1483. val = kzalloc(sizeof(*val), GFP_KERNEL);
  1484. if (!val)
  1485. return -ENOMEM;
  1486. arm_cmn_val_add_event(cmn, val, leader);
  1487. for_each_sibling_event(sibling, leader)
  1488. arm_cmn_val_add_event(cmn, val, sibling);
  1489. type = CMN_EVENT_TYPE(event);
  1490. if (type == CMN_TYPE_DTC) {
  1491. ret = val->cycles ? -EINVAL : 0;
  1492. goto done;
  1493. }
  1494. for (i = 0; i < CMN_MAX_DTCS; i++)
  1495. if (val->dtc_count[i] == CMN_DT_NUM_COUNTERS)
  1496. goto done;
  1497. for_each_hw_dn(hw, dn, i) {
  1498. int wp_idx, dtm = dn->dtm, sel = hw->filter_sel;
  1499. if (val->dtm_count[dtm] == CMN_DTM_NUM_COUNTERS)
  1500. goto done;
  1501. if (sel > SEL_NONE && val->occupid[dtm][sel] &&
  1502. val->occupid[dtm][sel] != CMN_EVENT_OCCUPID(event) + 1)
  1503. goto done;
  1504. if (type != CMN_TYPE_WP)
  1505. continue;
  1506. wp_idx = arm_cmn_val_find_free_wp_config(event, val, dtm);
  1507. if (wp_idx < 0)
  1508. goto done;
  1509. if (wp_idx & 1 &&
  1510. val->wp_combine[dtm][wp_idx >> 1] != !!CMN_EVENT_WP_COMBINE(event))
  1511. goto done;
  1512. }
  1513. ret = 0;
  1514. done:
  1515. kfree(val);
  1516. return ret;
  1517. }
  1518. static enum cmn_filter_select arm_cmn_filter_sel(const struct arm_cmn *cmn,
  1519. enum cmn_node_type type,
  1520. unsigned int eventid)
  1521. {
  1522. struct arm_cmn_event_attr *e;
  1523. enum cmn_model model = arm_cmn_model(cmn);
  1524. for (int i = 0; i < ARRAY_SIZE(arm_cmn_event_attrs) - 1; i++) {
  1525. e = container_of(arm_cmn_event_attrs[i], typeof(*e), attr.attr);
  1526. if (e->model & model && e->type == type && e->eventid == eventid)
  1527. return e->fsel;
  1528. }
  1529. return SEL_NONE;
  1530. }
  1531. static int arm_cmn_event_init(struct perf_event *event)
  1532. {
  1533. struct arm_cmn *cmn = to_cmn(event->pmu);
  1534. struct arm_cmn_hw_event *hw = to_cmn_hw(event);
  1535. struct arm_cmn_node *dn;
  1536. enum cmn_node_type type;
  1537. bool bynodeid;
  1538. u16 nodeid, eventid;
  1539. if (event->attr.type != event->pmu->type)
  1540. return -ENOENT;
  1541. if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK)
  1542. return -EINVAL;
  1543. event->cpu = cmn->cpu;
  1544. if (event->cpu < 0)
  1545. return -EINVAL;
  1546. type = CMN_EVENT_TYPE(event);
  1547. /* DTC events (i.e. cycles) already have everything they need */
  1548. if (type == CMN_TYPE_DTC)
  1549. return arm_cmn_validate_group(cmn, event);
  1550. eventid = CMN_EVENT_EVENTID(event);
  1551. /* For watchpoints we need the actual XP node here */
  1552. if (type == CMN_TYPE_WP) {
  1553. type = CMN_TYPE_XP;
  1554. /* ...and we need a "real" direction */
  1555. if (eventid != CMN_WP_UP && eventid != CMN_WP_DOWN)
  1556. return -EINVAL;
  1557. /* ...but the DTM may depend on which port we're watching */
  1558. if (cmn->multi_dtm)
  1559. hw->dtm_offset = CMN_EVENT_WP_DEV_SEL(event) / 2;
  1560. } else if (type == CMN_TYPE_XP &&
  1561. (cmn->part == PART_CMN700 || cmn->part == PART_CMN_S3)) {
  1562. hw->wide_sel = true;
  1563. }
  1564. /* This is sufficiently annoying to recalculate, so cache it */
  1565. hw->filter_sel = arm_cmn_filter_sel(cmn, type, eventid);
  1566. bynodeid = CMN_EVENT_BYNODEID(event);
  1567. nodeid = CMN_EVENT_NODEID(event);
  1568. hw->dn = arm_cmn_node(cmn, type);
  1569. if (!hw->dn)
  1570. return -EINVAL;
  1571. memset(hw->dtc_idx, -1, sizeof(hw->dtc_idx));
  1572. for (dn = hw->dn; dn->type == type; dn++) {
  1573. if (bynodeid && dn->id != nodeid) {
  1574. hw->dn++;
  1575. continue;
  1576. }
  1577. hw->num_dns++;
  1578. if (dn->dtc < 0)
  1579. memset(hw->dtc_idx, 0, cmn->num_dtcs);
  1580. else
  1581. hw->dtc_idx[dn->dtc] = 0;
  1582. if (bynodeid)
  1583. break;
  1584. }
  1585. if (!hw->num_dns) {
  1586. dev_dbg(cmn->dev, "invalid node 0x%x type 0x%x\n", nodeid, type);
  1587. return -EINVAL;
  1588. }
  1589. return arm_cmn_validate_group(cmn, event);
  1590. }
  1591. static void arm_cmn_event_clear(struct arm_cmn *cmn, struct perf_event *event,
  1592. int i)
  1593. {
  1594. struct arm_cmn_hw_event *hw = to_cmn_hw(event);
  1595. enum cmn_node_type type = CMN_EVENT_TYPE(event);
  1596. while (i--) {
  1597. struct arm_cmn_dtm *dtm = &cmn->dtms[hw->dn[i].dtm] + hw->dtm_offset;
  1598. unsigned int dtm_idx = arm_cmn_get_index(hw->dtm_idx, i);
  1599. if (type == CMN_TYPE_WP) {
  1600. int wp_idx = arm_cmn_get_assigned_wp_idx(event, hw, i);
  1601. dtm->wp_event[wp_idx] = -1;
  1602. }
  1603. if (hw->filter_sel > SEL_NONE)
  1604. hw->dn[i].occupid[hw->filter_sel].count--;
  1605. dtm->pmu_config_low &= ~CMN__PMEVCNT_PAIRED(dtm_idx);
  1606. writel_relaxed(dtm->pmu_config_low, dtm->base + CMN_DTM_PMU_CONFIG);
  1607. }
  1608. memset(hw->dtm_idx, 0, sizeof(hw->dtm_idx));
  1609. memset(hw->wp_idx, 0, sizeof(hw->wp_idx));
  1610. for_each_hw_dtc_idx(hw, j, idx)
  1611. cmn->dtc[j].counters[idx] = NULL;
  1612. }
  1613. static int arm_cmn_event_add(struct perf_event *event, int flags)
  1614. {
  1615. struct arm_cmn *cmn = to_cmn(event->pmu);
  1616. struct arm_cmn_hw_event *hw = to_cmn_hw(event);
  1617. struct arm_cmn_node *dn;
  1618. enum cmn_node_type type = CMN_EVENT_TYPE(event);
  1619. unsigned int input_sel, i = 0;
  1620. if (type == CMN_TYPE_DTC) {
  1621. while (cmn->dtc[i].cycles)
  1622. if (++i == cmn->num_dtcs)
  1623. return -ENOSPC;
  1624. cmn->dtc[i].cycles = event;
  1625. hw->dtc_idx[0] = i;
  1626. if (flags & PERF_EF_START)
  1627. arm_cmn_event_start(event, 0);
  1628. return 0;
  1629. }
  1630. /* Grab the global counters first... */
  1631. for_each_hw_dtc_idx(hw, j, idx) {
  1632. if (cmn->part == PART_CMN600 && j > 0) {
  1633. idx = hw->dtc_idx[0];
  1634. } else {
  1635. idx = 0;
  1636. while (cmn->dtc[j].counters[idx])
  1637. if (++idx == CMN_DT_NUM_COUNTERS)
  1638. return -ENOSPC;
  1639. }
  1640. hw->dtc_idx[j] = idx;
  1641. }
  1642. /* ...then the local counters to feed them */
  1643. for_each_hw_dn(hw, dn, i) {
  1644. struct arm_cmn_dtm *dtm = &cmn->dtms[dn->dtm] + hw->dtm_offset;
  1645. unsigned int dtm_idx, shift, d = max_t(int, dn->dtc, 0);
  1646. u64 reg;
  1647. dtm_idx = 0;
  1648. while (dtm->pmu_config_low & CMN__PMEVCNT_PAIRED(dtm_idx))
  1649. if (++dtm_idx == CMN_DTM_NUM_COUNTERS)
  1650. goto free_dtms;
  1651. if (type == CMN_TYPE_XP) {
  1652. input_sel = CMN__PMEVCNT0_INPUT_SEL_XP + dtm_idx;
  1653. } else if (type == CMN_TYPE_WP) {
  1654. int tmp, wp_idx;
  1655. u32 cfg;
  1656. wp_idx = arm_cmn_find_free_wp_idx(dtm, event);
  1657. if (wp_idx < 0)
  1658. goto free_dtms;
  1659. cfg = arm_cmn_wp_config(event, wp_idx);
  1660. tmp = dtm->wp_event[wp_idx ^ 1];
  1661. if (tmp >= 0 && CMN_EVENT_WP_COMBINE(event) !=
  1662. CMN_EVENT_WP_COMBINE(cmn->dtc[d].counters[tmp]))
  1663. goto free_dtms;
  1664. input_sel = CMN__PMEVCNT0_INPUT_SEL_WP + wp_idx;
  1665. arm_cmn_claim_wp_idx(dtm, event, d, wp_idx, i);
  1666. writel_relaxed(cfg, dtm->base + CMN_DTM_WPn_CONFIG(wp_idx));
  1667. } else {
  1668. struct arm_cmn_nodeid nid = arm_cmn_nid(dn);
  1669. if (cmn->multi_dtm)
  1670. nid.port %= 2;
  1671. input_sel = CMN__PMEVCNT0_INPUT_SEL_DEV + dtm_idx +
  1672. (nid.port << 4) + (nid.dev << 2);
  1673. if (arm_cmn_set_event_sel_hi(dn, hw->filter_sel, CMN_EVENT_OCCUPID(event)))
  1674. goto free_dtms;
  1675. }
  1676. arm_cmn_set_index(hw->dtm_idx, i, dtm_idx);
  1677. dtm->input_sel[dtm_idx] = input_sel;
  1678. shift = CMN__PMEVCNTn_GLOBAL_NUM_SHIFT(dtm_idx);
  1679. dtm->pmu_config_low &= ~(CMN__PMEVCNT0_GLOBAL_NUM << shift);
  1680. dtm->pmu_config_low |= FIELD_PREP(CMN__PMEVCNT0_GLOBAL_NUM, hw->dtc_idx[d]) << shift;
  1681. dtm->pmu_config_low |= CMN__PMEVCNT_PAIRED(dtm_idx);
  1682. reg = (u64)le32_to_cpu(dtm->pmu_config_high) << 32 | dtm->pmu_config_low;
  1683. writeq_relaxed(reg, dtm->base + CMN_DTM_PMU_CONFIG);
  1684. }
  1685. /* Go go go! */
  1686. arm_cmn_init_counter(event);
  1687. if (flags & PERF_EF_START)
  1688. arm_cmn_event_start(event, 0);
  1689. return 0;
  1690. free_dtms:
  1691. arm_cmn_event_clear(cmn, event, i);
  1692. return -ENOSPC;
  1693. }
  1694. static void arm_cmn_event_del(struct perf_event *event, int flags)
  1695. {
  1696. struct arm_cmn *cmn = to_cmn(event->pmu);
  1697. struct arm_cmn_hw_event *hw = to_cmn_hw(event);
  1698. enum cmn_node_type type = CMN_EVENT_TYPE(event);
  1699. arm_cmn_event_stop(event, PERF_EF_UPDATE);
  1700. if (type == CMN_TYPE_DTC)
  1701. cmn->dtc[hw->dtc_idx[0]].cycles = NULL;
  1702. else
  1703. arm_cmn_event_clear(cmn, event, hw->num_dns);
  1704. }
  1705. /*
  1706. * We stop the PMU for both add and read, to avoid skew across DTM counters.
  1707. * In theory we could use snapshots to read without stopping, but then it
  1708. * becomes a lot trickier to deal with overlow and racing against interrupts,
  1709. * plus it seems they don't work properly on some hardware anyway :(
  1710. */
  1711. static void arm_cmn_start_txn(struct pmu *pmu, unsigned int flags)
  1712. {
  1713. arm_cmn_set_state(to_cmn(pmu), CMN_STATE_TXN);
  1714. }
  1715. static void arm_cmn_end_txn(struct pmu *pmu)
  1716. {
  1717. arm_cmn_clear_state(to_cmn(pmu), CMN_STATE_TXN);
  1718. }
  1719. static int arm_cmn_commit_txn(struct pmu *pmu)
  1720. {
  1721. arm_cmn_end_txn(pmu);
  1722. return 0;
  1723. }
  1724. static void arm_cmn_migrate(struct arm_cmn *cmn, unsigned int cpu)
  1725. {
  1726. unsigned int i;
  1727. perf_pmu_migrate_context(&cmn->pmu, cmn->cpu, cpu);
  1728. for (i = 0; i < cmn->num_dtcs; i++)
  1729. irq_set_affinity(cmn->dtc[i].irq, cpumask_of(cpu));
  1730. cmn->cpu = cpu;
  1731. }
  1732. static int arm_cmn_pmu_online_cpu(unsigned int cpu, struct hlist_node *cpuhp_node)
  1733. {
  1734. struct arm_cmn *cmn;
  1735. int node;
  1736. cmn = hlist_entry_safe(cpuhp_node, struct arm_cmn, cpuhp_node);
  1737. node = dev_to_node(cmn->dev);
  1738. if (cpu_to_node(cmn->cpu) != node && cpu_to_node(cpu) == node)
  1739. arm_cmn_migrate(cmn, cpu);
  1740. return 0;
  1741. }
  1742. static int arm_cmn_pmu_offline_cpu(unsigned int cpu, struct hlist_node *cpuhp_node)
  1743. {
  1744. struct arm_cmn *cmn;
  1745. unsigned int target;
  1746. int node;
  1747. cmn = hlist_entry_safe(cpuhp_node, struct arm_cmn, cpuhp_node);
  1748. if (cpu != cmn->cpu)
  1749. return 0;
  1750. node = dev_to_node(cmn->dev);
  1751. target = cpumask_any_and_but(cpumask_of_node(node), cpu_online_mask, cpu);
  1752. if (target >= nr_cpu_ids)
  1753. target = cpumask_any_but(cpu_online_mask, cpu);
  1754. if (target < nr_cpu_ids)
  1755. arm_cmn_migrate(cmn, target);
  1756. return 0;
  1757. }
  1758. static irqreturn_t arm_cmn_handle_irq(int irq, void *dev_id)
  1759. {
  1760. struct arm_cmn_dtc *dtc = dev_id;
  1761. irqreturn_t ret = IRQ_NONE;
  1762. for (;;) {
  1763. u32 status = readl_relaxed(CMN_DT_PMOVSR(dtc));
  1764. u64 delta;
  1765. int i;
  1766. for (i = 0; i < CMN_DT_NUM_COUNTERS; i++) {
  1767. if (status & (1U << i)) {
  1768. ret = IRQ_HANDLED;
  1769. if (WARN_ON(!dtc->counters[i]))
  1770. continue;
  1771. delta = (u64)arm_cmn_read_counter(dtc, i) << 16;
  1772. local64_add(delta, &dtc->counters[i]->count);
  1773. }
  1774. }
  1775. if (status & (1U << CMN_DT_NUM_COUNTERS)) {
  1776. ret = IRQ_HANDLED;
  1777. if (dtc->cc_active && !WARN_ON(!dtc->cycles)) {
  1778. delta = arm_cmn_read_cc(dtc);
  1779. local64_add(delta, &dtc->cycles->count);
  1780. }
  1781. }
  1782. writel_relaxed(status, CMN_DT_PMOVSR_CLR(dtc));
  1783. if (!dtc->irq_friend)
  1784. return ret;
  1785. dtc += dtc->irq_friend;
  1786. }
  1787. }
  1788. /* We can reasonably accommodate DTCs of the same CMN sharing IRQs */
  1789. static int arm_cmn_init_irqs(struct arm_cmn *cmn)
  1790. {
  1791. int i, j, irq, err;
  1792. for (i = 0; i < cmn->num_dtcs; i++) {
  1793. irq = cmn->dtc[i].irq;
  1794. for (j = i; j--; ) {
  1795. if (cmn->dtc[j].irq == irq) {
  1796. cmn->dtc[j].irq_friend = i - j;
  1797. goto next;
  1798. }
  1799. }
  1800. err = devm_request_irq(cmn->dev, irq, arm_cmn_handle_irq,
  1801. IRQF_NOBALANCING | IRQF_NO_THREAD,
  1802. dev_name(cmn->dev), &cmn->dtc[i]);
  1803. if (err)
  1804. return err;
  1805. err = irq_set_affinity(irq, cpumask_of(cmn->cpu));
  1806. if (err)
  1807. return err;
  1808. next:
  1809. ; /* isn't C great? */
  1810. }
  1811. return 0;
  1812. }
  1813. static void arm_cmn_init_dtm(struct arm_cmn_dtm *dtm, struct arm_cmn_node *xp, int idx)
  1814. {
  1815. int i;
  1816. dtm->base = xp->pmu_base + CMN_DTM_OFFSET(idx);
  1817. dtm->pmu_config_low = CMN_DTM_PMU_CONFIG_PMU_EN;
  1818. writeq_relaxed(dtm->pmu_config_low, dtm->base + CMN_DTM_PMU_CONFIG);
  1819. for (i = 0; i < 4; i++) {
  1820. dtm->wp_event[i] = -1;
  1821. writeq_relaxed(0, dtm->base + CMN_DTM_WPn_MASK(i));
  1822. writeq_relaxed(~0ULL, dtm->base + CMN_DTM_WPn_VAL(i));
  1823. }
  1824. }
  1825. static int arm_cmn_init_dtc(struct arm_cmn *cmn, struct arm_cmn_node *dn, int idx)
  1826. {
  1827. struct arm_cmn_dtc *dtc = cmn->dtc + idx;
  1828. dtc->pmu_base = dn->pmu_base;
  1829. dtc->base = dtc->pmu_base - arm_cmn_pmu_offset(cmn, dn);
  1830. dtc->irq = platform_get_irq(to_platform_device(cmn->dev), idx);
  1831. if (dtc->irq < 0)
  1832. return dtc->irq;
  1833. writel_relaxed(CMN_DT_DTC_CTL_DT_EN, dtc->base + CMN_DT_DTC_CTL);
  1834. writel_relaxed(CMN_DT_PMCR_PMU_EN | CMN_DT_PMCR_OVFL_INTR_EN, CMN_DT_PMCR(dtc));
  1835. writeq_relaxed(0, CMN_DT_PMCCNTR(dtc));
  1836. writel_relaxed(0x1ff, CMN_DT_PMOVSR_CLR(dtc));
  1837. return 0;
  1838. }
  1839. static int arm_cmn_node_cmp(const void *a, const void *b)
  1840. {
  1841. const struct arm_cmn_node *dna = a, *dnb = b;
  1842. int cmp;
  1843. cmp = dna->type - dnb->type;
  1844. if (!cmp)
  1845. cmp = dna->logid - dnb->logid;
  1846. return cmp;
  1847. }
  1848. static int arm_cmn_init_dtcs(struct arm_cmn *cmn)
  1849. {
  1850. struct arm_cmn_node *dn, *xp;
  1851. int dtc_idx = 0;
  1852. cmn->dtc = devm_kcalloc(cmn->dev, cmn->num_dtcs, sizeof(cmn->dtc[0]), GFP_KERNEL);
  1853. if (!cmn->dtc)
  1854. return -ENOMEM;
  1855. sort(cmn->dns, cmn->num_dns, sizeof(cmn->dns[0]), arm_cmn_node_cmp, NULL);
  1856. cmn->xps = arm_cmn_node(cmn, CMN_TYPE_XP);
  1857. if (cmn->part == PART_CMN600 && cmn->num_dtcs > 1) {
  1858. /* We do at least know that a DTC's XP must be in that DTC's domain */
  1859. dn = arm_cmn_node(cmn, CMN_TYPE_DTC);
  1860. for (int i = 0; i < cmn->num_dtcs; i++)
  1861. arm_cmn_node_to_xp(cmn, dn + i)->dtc = i;
  1862. }
  1863. for (dn = cmn->dns; dn->type; dn++) {
  1864. if (dn->type == CMN_TYPE_XP)
  1865. continue;
  1866. xp = arm_cmn_node_to_xp(cmn, dn);
  1867. dn->dtc = xp->dtc;
  1868. dn->dtm = xp->dtm;
  1869. if (cmn->multi_dtm)
  1870. dn->dtm += arm_cmn_nid(dn).port / 2;
  1871. if (dn->type == CMN_TYPE_DTC) {
  1872. int err = arm_cmn_init_dtc(cmn, dn, dtc_idx++);
  1873. if (err)
  1874. return err;
  1875. }
  1876. /* To the PMU, RN-Ds don't add anything over RN-Is, so smoosh them together */
  1877. if (dn->type == CMN_TYPE_RND)
  1878. dn->type = CMN_TYPE_RNI;
  1879. /* We split the RN-I off already, so let the CCLA part match CCLA events */
  1880. if (dn->type == CMN_TYPE_CCLA_RNI)
  1881. dn->type = CMN_TYPE_CCLA;
  1882. }
  1883. arm_cmn_set_state(cmn, CMN_STATE_DISABLED);
  1884. return 0;
  1885. }
  1886. static unsigned int arm_cmn_dtc_domain(struct arm_cmn *cmn, void __iomem *xp_region)
  1887. {
  1888. int offset = CMN_DTM_UNIT_INFO;
  1889. if (cmn->part == PART_CMN650 || cmn->part == PART_CI700)
  1890. offset = CMN650_DTM_UNIT_INFO;
  1891. return FIELD_GET(CMN_DTM_UNIT_INFO_DTC_DOMAIN, readl_relaxed(xp_region + offset));
  1892. }
  1893. static void arm_cmn_init_node_info(struct arm_cmn *cmn, u32 offset, struct arm_cmn_node *node)
  1894. {
  1895. int level;
  1896. u64 reg = readq_relaxed(cmn->base + offset + CMN_NODE_INFO);
  1897. node->type = FIELD_GET(CMN_NI_NODE_TYPE, reg);
  1898. node->id = FIELD_GET(CMN_NI_NODE_ID, reg);
  1899. node->logid = FIELD_GET(CMN_NI_LOGICAL_ID, reg);
  1900. node->pmu_base = cmn->base + offset + arm_cmn_pmu_offset(cmn, node);
  1901. if (node->type == CMN_TYPE_CFG)
  1902. level = 0;
  1903. else if (node->type == CMN_TYPE_XP)
  1904. level = 1;
  1905. else
  1906. level = 2;
  1907. dev_dbg(cmn->dev, "node%*c%#06hx%*ctype:%-#6x id:%-4hd off:%#x\n",
  1908. (level * 2) + 1, ' ', node->id, 5 - (level * 2), ' ',
  1909. node->type, node->logid, offset);
  1910. }
  1911. static enum cmn_node_type arm_cmn_subtype(enum cmn_node_type type)
  1912. {
  1913. switch (type) {
  1914. case CMN_TYPE_HNP:
  1915. return CMN_TYPE_HNI;
  1916. case CMN_TYPE_CCLA_RNI:
  1917. return CMN_TYPE_RNI;
  1918. default:
  1919. return CMN_TYPE_INVALID;
  1920. }
  1921. }
  1922. static int arm_cmn_discover(struct arm_cmn *cmn, unsigned int rgn_offset)
  1923. {
  1924. void __iomem *cfg_region;
  1925. struct arm_cmn_node cfg, *dn;
  1926. struct arm_cmn_dtm *dtm;
  1927. enum cmn_part part;
  1928. u16 child_count, child_poff;
  1929. u32 xp_offset[CMN_MAX_XPS];
  1930. u64 reg;
  1931. int i, j;
  1932. size_t sz;
  1933. arm_cmn_init_node_info(cmn, rgn_offset, &cfg);
  1934. if (cfg.type != CMN_TYPE_CFG)
  1935. return -ENODEV;
  1936. cfg_region = cmn->base + rgn_offset;
  1937. reg = readq_relaxed(cfg_region + CMN_CFGM_PERIPH_ID_01);
  1938. part = FIELD_GET(CMN_CFGM_PID0_PART_0, reg);
  1939. part |= FIELD_GET(CMN_CFGM_PID1_PART_1, reg) << 8;
  1940. if (cmn->part && cmn->part != part)
  1941. dev_warn(cmn->dev,
  1942. "Firmware binding mismatch: expected part number 0x%x, found 0x%x\n",
  1943. cmn->part, part);
  1944. cmn->part = part;
  1945. if (!arm_cmn_model(cmn))
  1946. dev_warn(cmn->dev, "Unknown part number: 0x%x\n", part);
  1947. reg = readl_relaxed(cfg_region + CMN_CFGM_PERIPH_ID_23);
  1948. cmn->rev = FIELD_GET(CMN_CFGM_PID2_REVISION, reg);
  1949. /*
  1950. * With the device isolation feature, if firmware has neglected to enable
  1951. * an XP port then we risk locking up if we try to access anything behind
  1952. * it; however we also have no way to tell from Non-Secure whether any
  1953. * given port is disabled or not, so the only way to win is not to play...
  1954. */
  1955. reg = readq_relaxed(cfg_region + CMN_CFGM_INFO_GLOBAL);
  1956. if (reg & CMN_INFO_DEVICE_ISO_ENABLE) {
  1957. dev_err(cmn->dev, "Device isolation enabled, not continuing due to risk of lockup\n");
  1958. return -ENODEV;
  1959. }
  1960. cmn->multi_dtm = reg & CMN_INFO_MULTIPLE_DTM_EN;
  1961. cmn->rsp_vc_num = FIELD_GET(CMN_INFO_RSP_VC_NUM, reg);
  1962. cmn->dat_vc_num = FIELD_GET(CMN_INFO_DAT_VC_NUM, reg);
  1963. reg = readq_relaxed(cfg_region + CMN_CFGM_INFO_GLOBAL_1);
  1964. cmn->snp_vc_num = FIELD_GET(CMN_INFO_SNP_VC_NUM, reg);
  1965. cmn->req_vc_num = FIELD_GET(CMN_INFO_REQ_VC_NUM, reg);
  1966. reg = readq_relaxed(cfg_region + CMN_CHILD_INFO);
  1967. child_count = FIELD_GET(CMN_CI_CHILD_COUNT, reg);
  1968. child_poff = FIELD_GET(CMN_CI_CHILD_PTR_OFFSET, reg);
  1969. cmn->num_xps = child_count;
  1970. cmn->num_dns = cmn->num_xps;
  1971. /* Pass 1: visit the XPs, enumerate their children */
  1972. for (i = 0; i < cmn->num_xps; i++) {
  1973. reg = readq_relaxed(cfg_region + child_poff + i * 8);
  1974. xp_offset[i] = reg & CMN_CHILD_NODE_ADDR;
  1975. reg = readq_relaxed(cmn->base + xp_offset[i] + CMN_CHILD_INFO);
  1976. cmn->num_dns += FIELD_GET(CMN_CI_CHILD_COUNT, reg);
  1977. }
  1978. /*
  1979. * Some nodes effectively have two separate types, which we'll handle
  1980. * by creating one of each internally. For a (very) safe initial upper
  1981. * bound, account for double the number of non-XP nodes.
  1982. */
  1983. dn = devm_kcalloc(cmn->dev, cmn->num_dns * 2 - cmn->num_xps,
  1984. sizeof(*dn), GFP_KERNEL);
  1985. if (!dn)
  1986. return -ENOMEM;
  1987. /* Initial safe upper bound on DTMs for any possible mesh layout */
  1988. i = cmn->num_xps;
  1989. if (cmn->multi_dtm)
  1990. i += cmn->num_xps + 1;
  1991. dtm = devm_kcalloc(cmn->dev, i, sizeof(*dtm), GFP_KERNEL);
  1992. if (!dtm)
  1993. return -ENOMEM;
  1994. /* Pass 2: now we can actually populate the nodes */
  1995. cmn->dns = dn;
  1996. cmn->dtms = dtm;
  1997. for (i = 0; i < cmn->num_xps; i++) {
  1998. void __iomem *xp_region = cmn->base + xp_offset[i];
  1999. struct arm_cmn_node *xp = dn++;
  2000. unsigned int xp_ports = 0;
  2001. arm_cmn_init_node_info(cmn, xp_offset[i], xp);
  2002. /*
  2003. * Thanks to the order in which XP logical IDs seem to be
  2004. * assigned, we can handily infer the mesh X dimension by
  2005. * looking out for the XP at (0,1) without needing to know
  2006. * the exact node ID format, which we can later derive.
  2007. */
  2008. if (xp->id == (1 << 3))
  2009. cmn->mesh_x = xp->logid;
  2010. if (cmn->part == PART_CMN600)
  2011. xp->dtc = -1;
  2012. else
  2013. xp->dtc = arm_cmn_dtc_domain(cmn, xp_region);
  2014. xp->dtm = dtm - cmn->dtms;
  2015. arm_cmn_init_dtm(dtm++, xp, 0);
  2016. /*
  2017. * Keeping track of connected ports will let us filter out
  2018. * unnecessary XP events easily, and also infer the per-XP
  2019. * part of the node ID format.
  2020. */
  2021. for (int p = 0; p < CMN_MAX_PORTS; p++)
  2022. if (arm_cmn_device_connect_info(cmn, xp, p))
  2023. xp_ports |= BIT(p);
  2024. if (cmn->num_xps == 1) {
  2025. xp->portid_bits = 3;
  2026. xp->deviceid_bits = 2;
  2027. } else if (xp_ports > 0x3) {
  2028. xp->portid_bits = 2;
  2029. xp->deviceid_bits = 1;
  2030. } else {
  2031. xp->portid_bits = 1;
  2032. xp->deviceid_bits = 2;
  2033. }
  2034. if (cmn->multi_dtm && (xp_ports > 0x3))
  2035. arm_cmn_init_dtm(dtm++, xp, 1);
  2036. if (cmn->multi_dtm && (xp_ports > 0xf))
  2037. arm_cmn_init_dtm(dtm++, xp, 2);
  2038. cmn->ports_used |= xp_ports;
  2039. reg = readq_relaxed(xp_region + CMN_CHILD_INFO);
  2040. child_count = FIELD_GET(CMN_CI_CHILD_COUNT, reg);
  2041. child_poff = FIELD_GET(CMN_CI_CHILD_PTR_OFFSET, reg);
  2042. for (j = 0; j < child_count; j++) {
  2043. reg = readq_relaxed(xp_region + child_poff + j * 8);
  2044. /*
  2045. * Don't even try to touch anything external, since in general
  2046. * we haven't a clue how to power up arbitrary CHI requesters.
  2047. * As of CMN-600r1 these could only be RN-SAMs or CXLAs,
  2048. * neither of which have any PMU events anyway.
  2049. * (Actually, CXLAs do seem to have grown some events in r1p2,
  2050. * but they don't go to regular XP DTMs, and they depend on
  2051. * secure configuration which we can't easily deal with)
  2052. */
  2053. if (reg & CMN_CHILD_NODE_EXTERNAL) {
  2054. dev_dbg(cmn->dev, "ignoring external node %llx\n", reg);
  2055. continue;
  2056. }
  2057. /*
  2058. * AmpereOneX erratum AC04_MESH_1 makes some XPs report a bogus
  2059. * child count larger than the number of valid child pointers.
  2060. * A child offset of 0 can only occur on CMN-600; otherwise it
  2061. * would imply the root node being its own grandchild, which
  2062. * we can safely dismiss in general.
  2063. */
  2064. if (reg == 0 && cmn->part != PART_CMN600) {
  2065. dev_dbg(cmn->dev, "bogus child pointer?\n");
  2066. continue;
  2067. }
  2068. arm_cmn_init_node_info(cmn, reg & CMN_CHILD_NODE_ADDR, dn);
  2069. dn->portid_bits = xp->portid_bits;
  2070. dn->deviceid_bits = xp->deviceid_bits;
  2071. switch (dn->type) {
  2072. case CMN_TYPE_DTC:
  2073. cmn->num_dtcs++;
  2074. dn++;
  2075. break;
  2076. /* These guys have PMU events */
  2077. case CMN_TYPE_DVM:
  2078. case CMN_TYPE_HNI:
  2079. case CMN_TYPE_HNF:
  2080. case CMN_TYPE_SBSX:
  2081. case CMN_TYPE_RNI:
  2082. case CMN_TYPE_RND:
  2083. case CMN_TYPE_MTSX:
  2084. case CMN_TYPE_CXRA:
  2085. case CMN_TYPE_CXHA:
  2086. case CMN_TYPE_CCRA:
  2087. case CMN_TYPE_CCHA:
  2088. case CMN_TYPE_HNS:
  2089. dn++;
  2090. break;
  2091. case CMN_TYPE_CCLA:
  2092. dn->pmu_base += CMN_CCLA_PMU_EVENT_SEL;
  2093. dn++;
  2094. break;
  2095. /* Nothing to see here */
  2096. case CMN_TYPE_MPAM_S:
  2097. case CMN_TYPE_MPAM_NS:
  2098. case CMN_TYPE_RNSAM:
  2099. case CMN_TYPE_CXLA:
  2100. case CMN_TYPE_HNS_MPAM_S:
  2101. case CMN_TYPE_HNS_MPAM_NS:
  2102. case CMN_TYPE_APB:
  2103. break;
  2104. /*
  2105. * Split "optimised" combination nodes into separate
  2106. * types for the different event sets. Offsetting the
  2107. * base address lets us handle the second pmu_event_sel
  2108. * register via the normal mechanism later.
  2109. */
  2110. case CMN_TYPE_HNP:
  2111. case CMN_TYPE_CCLA_RNI:
  2112. dn[1] = dn[0];
  2113. dn[0].pmu_base += CMN_CCLA_PMU_EVENT_SEL;
  2114. dn[1].type = arm_cmn_subtype(dn->type);
  2115. dn += 2;
  2116. break;
  2117. /* Something has gone horribly wrong */
  2118. default:
  2119. dev_err(cmn->dev, "invalid device node type: 0x%x\n", dn->type);
  2120. return -ENODEV;
  2121. }
  2122. }
  2123. }
  2124. /* Correct for any nodes we added or skipped */
  2125. cmn->num_dns = dn - cmn->dns;
  2126. /* Cheeky +1 to help terminate pointer-based iteration later */
  2127. sz = (void *)(dn + 1) - (void *)cmn->dns;
  2128. dn = devm_krealloc(cmn->dev, cmn->dns, sz, GFP_KERNEL);
  2129. if (dn)
  2130. cmn->dns = dn;
  2131. sz = (void *)dtm - (void *)cmn->dtms;
  2132. dtm = devm_krealloc(cmn->dev, cmn->dtms, sz, GFP_KERNEL);
  2133. if (dtm)
  2134. cmn->dtms = dtm;
  2135. /*
  2136. * If mesh_x wasn't set during discovery then we never saw
  2137. * an XP at (0,1), thus we must have an Nx1 configuration.
  2138. */
  2139. if (!cmn->mesh_x)
  2140. cmn->mesh_x = cmn->num_xps;
  2141. cmn->mesh_y = cmn->num_xps / cmn->mesh_x;
  2142. /* 1x1 config plays havoc with XP event encodings */
  2143. if (cmn->num_xps == 1)
  2144. dev_warn(cmn->dev, "1x1 config not fully supported, translate XP events manually\n");
  2145. dev_dbg(cmn->dev, "periph_id part 0x%03x revision %d\n", cmn->part, cmn->rev);
  2146. reg = cmn->ports_used;
  2147. dev_dbg(cmn->dev, "mesh %dx%d, ID width %d, ports %6pbl%s\n",
  2148. cmn->mesh_x, cmn->mesh_y, arm_cmn_xyidbits(cmn), &reg,
  2149. cmn->multi_dtm ? ", multi-DTM" : "");
  2150. return 0;
  2151. }
  2152. static int arm_cmn600_acpi_probe(struct platform_device *pdev, struct arm_cmn *cmn)
  2153. {
  2154. struct resource *cfg, *root;
  2155. cfg = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  2156. if (!cfg)
  2157. return -EINVAL;
  2158. root = platform_get_resource(pdev, IORESOURCE_MEM, 1);
  2159. if (!root)
  2160. return -EINVAL;
  2161. if (!resource_contains(cfg, root))
  2162. swap(cfg, root);
  2163. /*
  2164. * Note that devm_ioremap_resource() is dumb and won't let the platform
  2165. * device claim cfg when the ACPI companion device has already claimed
  2166. * root within it. But since they *are* already both claimed in the
  2167. * appropriate name, we don't really need to do it again here anyway.
  2168. */
  2169. cmn->base = devm_ioremap(cmn->dev, cfg->start, resource_size(cfg));
  2170. if (!cmn->base)
  2171. return -ENOMEM;
  2172. return root->start - cfg->start;
  2173. }
  2174. static int arm_cmn600_of_probe(struct device_node *np)
  2175. {
  2176. u32 rootnode;
  2177. return of_property_read_u32(np, "arm,root-node", &rootnode) ?: rootnode;
  2178. }
  2179. static int arm_cmn_probe(struct platform_device *pdev)
  2180. {
  2181. struct arm_cmn *cmn;
  2182. const char *name;
  2183. static atomic_t id;
  2184. int err, rootnode, this_id;
  2185. cmn = devm_kzalloc(&pdev->dev, sizeof(*cmn), GFP_KERNEL);
  2186. if (!cmn)
  2187. return -ENOMEM;
  2188. cmn->dev = &pdev->dev;
  2189. cmn->part = (unsigned long)device_get_match_data(cmn->dev);
  2190. cmn->cpu = cpumask_local_spread(0, dev_to_node(cmn->dev));
  2191. platform_set_drvdata(pdev, cmn);
  2192. if (cmn->part == PART_CMN600 && has_acpi_companion(cmn->dev)) {
  2193. rootnode = arm_cmn600_acpi_probe(pdev, cmn);
  2194. } else {
  2195. rootnode = 0;
  2196. cmn->base = devm_platform_ioremap_resource(pdev, 0);
  2197. if (IS_ERR(cmn->base))
  2198. return PTR_ERR(cmn->base);
  2199. if (cmn->part == PART_CMN600)
  2200. rootnode = arm_cmn600_of_probe(pdev->dev.of_node);
  2201. }
  2202. if (rootnode < 0)
  2203. return rootnode;
  2204. err = arm_cmn_discover(cmn, rootnode);
  2205. if (err)
  2206. return err;
  2207. err = arm_cmn_init_dtcs(cmn);
  2208. if (err)
  2209. return err;
  2210. err = arm_cmn_init_irqs(cmn);
  2211. if (err)
  2212. return err;
  2213. cmn->pmu = (struct pmu) {
  2214. .module = THIS_MODULE,
  2215. .parent = cmn->dev,
  2216. .attr_groups = arm_cmn_attr_groups,
  2217. .capabilities = PERF_PMU_CAP_NO_EXCLUDE,
  2218. .task_ctx_nr = perf_invalid_context,
  2219. .pmu_enable = arm_cmn_pmu_enable,
  2220. .pmu_disable = arm_cmn_pmu_disable,
  2221. .event_init = arm_cmn_event_init,
  2222. .add = arm_cmn_event_add,
  2223. .del = arm_cmn_event_del,
  2224. .start = arm_cmn_event_start,
  2225. .stop = arm_cmn_event_stop,
  2226. .read = arm_cmn_event_read,
  2227. .start_txn = arm_cmn_start_txn,
  2228. .commit_txn = arm_cmn_commit_txn,
  2229. .cancel_txn = arm_cmn_end_txn,
  2230. };
  2231. this_id = atomic_fetch_inc(&id);
  2232. name = devm_kasprintf(cmn->dev, GFP_KERNEL, "arm_cmn_%d", this_id);
  2233. if (!name)
  2234. return -ENOMEM;
  2235. err = cpuhp_state_add_instance(arm_cmn_hp_state, &cmn->cpuhp_node);
  2236. if (err)
  2237. return err;
  2238. err = perf_pmu_register(&cmn->pmu, name, -1);
  2239. if (err)
  2240. cpuhp_state_remove_instance_nocalls(arm_cmn_hp_state, &cmn->cpuhp_node);
  2241. else
  2242. arm_cmn_debugfs_init(cmn, this_id);
  2243. return err;
  2244. }
  2245. static void arm_cmn_remove(struct platform_device *pdev)
  2246. {
  2247. struct arm_cmn *cmn = platform_get_drvdata(pdev);
  2248. writel_relaxed(0, cmn->dtc[0].base + CMN_DT_DTC_CTL);
  2249. perf_pmu_unregister(&cmn->pmu);
  2250. cpuhp_state_remove_instance_nocalls(arm_cmn_hp_state, &cmn->cpuhp_node);
  2251. debugfs_remove(cmn->debug);
  2252. }
  2253. #ifdef CONFIG_OF
  2254. static const struct of_device_id arm_cmn_of_match[] = {
  2255. { .compatible = "arm,cmn-600", .data = (void *)PART_CMN600 },
  2256. { .compatible = "arm,cmn-650" },
  2257. { .compatible = "arm,cmn-700" },
  2258. { .compatible = "arm,cmn-s3" },
  2259. { .compatible = "arm,ci-700" },
  2260. {}
  2261. };
  2262. MODULE_DEVICE_TABLE(of, arm_cmn_of_match);
  2263. #endif
  2264. #ifdef CONFIG_ACPI
  2265. static const struct acpi_device_id arm_cmn_acpi_match[] = {
  2266. { "ARMHC600", PART_CMN600 },
  2267. { "ARMHC650" },
  2268. { "ARMHC700" },
  2269. { "ARMHC003" },
  2270. {}
  2271. };
  2272. MODULE_DEVICE_TABLE(acpi, arm_cmn_acpi_match);
  2273. #endif
  2274. static struct platform_driver arm_cmn_driver = {
  2275. .driver = {
  2276. .name = "arm-cmn",
  2277. .of_match_table = of_match_ptr(arm_cmn_of_match),
  2278. .acpi_match_table = ACPI_PTR(arm_cmn_acpi_match),
  2279. .suppress_bind_attrs = true,
  2280. },
  2281. .probe = arm_cmn_probe,
  2282. .remove_new = arm_cmn_remove,
  2283. };
  2284. static int __init arm_cmn_init(void)
  2285. {
  2286. int ret;
  2287. ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
  2288. "perf/arm/cmn:online",
  2289. arm_cmn_pmu_online_cpu,
  2290. arm_cmn_pmu_offline_cpu);
  2291. if (ret < 0)
  2292. return ret;
  2293. arm_cmn_hp_state = ret;
  2294. arm_cmn_debugfs = debugfs_create_dir("arm-cmn", NULL);
  2295. ret = platform_driver_register(&arm_cmn_driver);
  2296. if (ret) {
  2297. cpuhp_remove_multi_state(arm_cmn_hp_state);
  2298. debugfs_remove(arm_cmn_debugfs);
  2299. }
  2300. return ret;
  2301. }
  2302. static void __exit arm_cmn_exit(void)
  2303. {
  2304. platform_driver_unregister(&arm_cmn_driver);
  2305. cpuhp_remove_multi_state(arm_cmn_hp_state);
  2306. debugfs_remove(arm_cmn_debugfs);
  2307. }
  2308. module_init(arm_cmn_init);
  2309. module_exit(arm_cmn_exit);
  2310. MODULE_AUTHOR("Robin Murphy <robin.murphy@arm.com>");
  2311. MODULE_DESCRIPTION("Arm CMN-600 PMU driver");
  2312. MODULE_LICENSE("GPL v2");