coresight-etm4x-sysfs.c 57 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright(C) 2015 Linaro Limited. All rights reserved.
  4. * Author: Mathieu Poirier <mathieu.poirier@linaro.org>
  5. */
  6. #include <linux/pid_namespace.h>
  7. #include <linux/pm_runtime.h>
  8. #include <linux/sysfs.h>
  9. #include "coresight-etm4x.h"
  10. #include "coresight-priv.h"
  11. static int etm4_set_mode_exclude(struct etmv4_drvdata *drvdata, bool exclude)
  12. {
  13. u8 idx;
  14. struct etmv4_config *config = &drvdata->config;
  15. idx = config->addr_idx;
  16. /*
  17. * TRCACATRn.TYPE bit[1:0]: type of comparison
  18. * the trace unit performs
  19. */
  20. if (BMVAL(config->addr_acc[idx], 0, 1) == ETM_INSTR_ADDR) {
  21. if (idx % 2 != 0)
  22. return -EINVAL;
  23. /*
  24. * We are performing instruction address comparison. Set the
  25. * relevant bit of ViewInst Include/Exclude Control register
  26. * for corresponding address comparator pair.
  27. */
  28. if (config->addr_type[idx] != ETM_ADDR_TYPE_RANGE ||
  29. config->addr_type[idx + 1] != ETM_ADDR_TYPE_RANGE)
  30. return -EINVAL;
  31. if (exclude == true) {
  32. /*
  33. * Set exclude bit and unset the include bit
  34. * corresponding to comparator pair
  35. */
  36. config->viiectlr |= BIT(idx / 2 + 16);
  37. config->viiectlr &= ~BIT(idx / 2);
  38. } else {
  39. /*
  40. * Set include bit and unset exclude bit
  41. * corresponding to comparator pair
  42. */
  43. config->viiectlr |= BIT(idx / 2);
  44. config->viiectlr &= ~BIT(idx / 2 + 16);
  45. }
  46. }
  47. return 0;
  48. }
  49. static ssize_t nr_pe_cmp_show(struct device *dev,
  50. struct device_attribute *attr,
  51. char *buf)
  52. {
  53. unsigned long val;
  54. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  55. val = drvdata->nr_pe_cmp;
  56. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  57. }
  58. static DEVICE_ATTR_RO(nr_pe_cmp);
  59. static ssize_t nr_addr_cmp_show(struct device *dev,
  60. struct device_attribute *attr,
  61. char *buf)
  62. {
  63. unsigned long val;
  64. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  65. val = drvdata->nr_addr_cmp;
  66. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  67. }
  68. static DEVICE_ATTR_RO(nr_addr_cmp);
  69. static ssize_t nr_cntr_show(struct device *dev,
  70. struct device_attribute *attr,
  71. char *buf)
  72. {
  73. unsigned long val;
  74. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  75. val = drvdata->nr_cntr;
  76. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  77. }
  78. static DEVICE_ATTR_RO(nr_cntr);
  79. static ssize_t nr_ext_inp_show(struct device *dev,
  80. struct device_attribute *attr,
  81. char *buf)
  82. {
  83. unsigned long val;
  84. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  85. val = drvdata->nr_ext_inp;
  86. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  87. }
  88. static DEVICE_ATTR_RO(nr_ext_inp);
  89. static ssize_t numcidc_show(struct device *dev,
  90. struct device_attribute *attr,
  91. char *buf)
  92. {
  93. unsigned long val;
  94. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  95. val = drvdata->numcidc;
  96. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  97. }
  98. static DEVICE_ATTR_RO(numcidc);
  99. static ssize_t numvmidc_show(struct device *dev,
  100. struct device_attribute *attr,
  101. char *buf)
  102. {
  103. unsigned long val;
  104. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  105. val = drvdata->numvmidc;
  106. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  107. }
  108. static DEVICE_ATTR_RO(numvmidc);
  109. static ssize_t nrseqstate_show(struct device *dev,
  110. struct device_attribute *attr,
  111. char *buf)
  112. {
  113. unsigned long val;
  114. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  115. val = drvdata->nrseqstate;
  116. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  117. }
  118. static DEVICE_ATTR_RO(nrseqstate);
  119. static ssize_t nr_resource_show(struct device *dev,
  120. struct device_attribute *attr,
  121. char *buf)
  122. {
  123. unsigned long val;
  124. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  125. val = drvdata->nr_resource;
  126. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  127. }
  128. static DEVICE_ATTR_RO(nr_resource);
  129. static ssize_t nr_ss_cmp_show(struct device *dev,
  130. struct device_attribute *attr,
  131. char *buf)
  132. {
  133. unsigned long val;
  134. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  135. val = drvdata->nr_ss_cmp;
  136. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  137. }
  138. static DEVICE_ATTR_RO(nr_ss_cmp);
  139. static ssize_t reset_store(struct device *dev,
  140. struct device_attribute *attr,
  141. const char *buf, size_t size)
  142. {
  143. int i;
  144. unsigned long val;
  145. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  146. struct etmv4_config *config = &drvdata->config;
  147. if (kstrtoul(buf, 16, &val))
  148. return -EINVAL;
  149. spin_lock(&drvdata->spinlock);
  150. if (val)
  151. config->mode = 0x0;
  152. /* Disable data tracing: do not trace load and store data transfers */
  153. config->mode &= ~(ETM_MODE_LOAD | ETM_MODE_STORE);
  154. config->cfg &= ~(BIT(1) | BIT(2));
  155. /* Disable data value and data address tracing */
  156. config->mode &= ~(ETM_MODE_DATA_TRACE_ADDR |
  157. ETM_MODE_DATA_TRACE_VAL);
  158. config->cfg &= ~(BIT(16) | BIT(17));
  159. /* Disable all events tracing */
  160. config->eventctrl0 = 0x0;
  161. config->eventctrl1 = 0x0;
  162. /* Disable timestamp event */
  163. config->ts_ctrl = 0x0;
  164. /* Disable stalling */
  165. config->stall_ctrl = 0x0;
  166. /* Reset trace synchronization period to 2^8 = 256 bytes*/
  167. if (drvdata->syncpr == false)
  168. config->syncfreq = 0x8;
  169. /*
  170. * Enable ViewInst to trace everything with start-stop logic in
  171. * started state. ARM recommends start-stop logic is set before
  172. * each trace run.
  173. */
  174. config->vinst_ctrl |= BIT(0);
  175. if (drvdata->nr_addr_cmp == true) {
  176. config->mode |= ETM_MODE_VIEWINST_STARTSTOP;
  177. /* SSSTATUS, bit[9] */
  178. config->vinst_ctrl |= BIT(9);
  179. }
  180. /* No address range filtering for ViewInst */
  181. config->viiectlr = 0x0;
  182. /* No start-stop filtering for ViewInst */
  183. config->vissctlr = 0x0;
  184. /* Disable seq events */
  185. for (i = 0; i < drvdata->nrseqstate-1; i++)
  186. config->seq_ctrl[i] = 0x0;
  187. config->seq_rst = 0x0;
  188. config->seq_state = 0x0;
  189. /* Disable external input events */
  190. config->ext_inp = 0x0;
  191. config->cntr_idx = 0x0;
  192. for (i = 0; i < drvdata->nr_cntr; i++) {
  193. config->cntrldvr[i] = 0x0;
  194. config->cntr_ctrl[i] = 0x0;
  195. config->cntr_val[i] = 0x0;
  196. }
  197. config->res_idx = 0x0;
  198. for (i = 0; i < drvdata->nr_resource; i++)
  199. config->res_ctrl[i] = 0x0;
  200. for (i = 0; i < drvdata->nr_ss_cmp; i++) {
  201. config->ss_ctrl[i] = 0x0;
  202. config->ss_pe_cmp[i] = 0x0;
  203. }
  204. config->addr_idx = 0x0;
  205. for (i = 0; i < drvdata->nr_addr_cmp * 2; i++) {
  206. config->addr_val[i] = 0x0;
  207. config->addr_acc[i] = 0x0;
  208. config->addr_type[i] = ETM_ADDR_TYPE_NONE;
  209. }
  210. config->ctxid_idx = 0x0;
  211. for (i = 0; i < drvdata->numcidc; i++)
  212. config->ctxid_pid[i] = 0x0;
  213. config->ctxid_mask0 = 0x0;
  214. config->ctxid_mask1 = 0x0;
  215. config->vmid_idx = 0x0;
  216. for (i = 0; i < drvdata->numvmidc; i++)
  217. config->vmid_val[i] = 0x0;
  218. config->vmid_mask0 = 0x0;
  219. config->vmid_mask1 = 0x0;
  220. drvdata->trcid = drvdata->cpu + 1;
  221. spin_unlock(&drvdata->spinlock);
  222. return size;
  223. }
  224. static DEVICE_ATTR_WO(reset);
  225. static ssize_t mode_show(struct device *dev,
  226. struct device_attribute *attr,
  227. char *buf)
  228. {
  229. unsigned long val;
  230. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  231. struct etmv4_config *config = &drvdata->config;
  232. val = config->mode;
  233. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  234. }
  235. static ssize_t mode_store(struct device *dev,
  236. struct device_attribute *attr,
  237. const char *buf, size_t size)
  238. {
  239. unsigned long val, mode;
  240. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  241. struct etmv4_config *config = &drvdata->config;
  242. if (kstrtoul(buf, 16, &val))
  243. return -EINVAL;
  244. spin_lock(&drvdata->spinlock);
  245. config->mode = val & ETMv4_MODE_ALL;
  246. if (config->mode & ETM_MODE_EXCLUDE)
  247. etm4_set_mode_exclude(drvdata, true);
  248. else
  249. etm4_set_mode_exclude(drvdata, false);
  250. if (drvdata->instrp0 == true) {
  251. /* start by clearing instruction P0 field */
  252. config->cfg &= ~(BIT(1) | BIT(2));
  253. if (config->mode & ETM_MODE_LOAD)
  254. /* 0b01 Trace load instructions as P0 instructions */
  255. config->cfg |= BIT(1);
  256. if (config->mode & ETM_MODE_STORE)
  257. /* 0b10 Trace store instructions as P0 instructions */
  258. config->cfg |= BIT(2);
  259. if (config->mode & ETM_MODE_LOAD_STORE)
  260. /*
  261. * 0b11 Trace load and store instructions
  262. * as P0 instructions
  263. */
  264. config->cfg |= BIT(1) | BIT(2);
  265. }
  266. /* bit[3], Branch broadcast mode */
  267. if ((config->mode & ETM_MODE_BB) && (drvdata->trcbb == true))
  268. config->cfg |= BIT(3);
  269. else
  270. config->cfg &= ~BIT(3);
  271. /* bit[4], Cycle counting instruction trace bit */
  272. if ((config->mode & ETMv4_MODE_CYCACC) &&
  273. (drvdata->trccci == true))
  274. config->cfg |= BIT(4);
  275. else
  276. config->cfg &= ~BIT(4);
  277. /* bit[6], Context ID tracing bit */
  278. if ((config->mode & ETMv4_MODE_CTXID) && (drvdata->ctxid_size))
  279. config->cfg |= BIT(6);
  280. else
  281. config->cfg &= ~BIT(6);
  282. if ((config->mode & ETM_MODE_VMID) && (drvdata->vmid_size))
  283. config->cfg |= BIT(7);
  284. else
  285. config->cfg &= ~BIT(7);
  286. /* bits[10:8], Conditional instruction tracing bit */
  287. mode = ETM_MODE_COND(config->mode);
  288. if (drvdata->trccond == true) {
  289. config->cfg &= ~(BIT(8) | BIT(9) | BIT(10));
  290. config->cfg |= mode << 8;
  291. }
  292. /* bit[11], Global timestamp tracing bit */
  293. if ((config->mode & ETMv4_MODE_TIMESTAMP) && (drvdata->ts_size))
  294. config->cfg |= BIT(11);
  295. else
  296. config->cfg &= ~BIT(11);
  297. /* bit[12], Return stack enable bit */
  298. if ((config->mode & ETM_MODE_RETURNSTACK) &&
  299. (drvdata->retstack == true))
  300. config->cfg |= BIT(12);
  301. else
  302. config->cfg &= ~BIT(12);
  303. /* bits[14:13], Q element enable field */
  304. mode = ETM_MODE_QELEM(config->mode);
  305. /* start by clearing QE bits */
  306. config->cfg &= ~(BIT(13) | BIT(14));
  307. /* if supported, Q elements with instruction counts are enabled */
  308. if ((mode & BIT(0)) && (drvdata->q_support & BIT(0)))
  309. config->cfg |= BIT(13);
  310. /*
  311. * if supported, Q elements with and without instruction
  312. * counts are enabled
  313. */
  314. if ((mode & BIT(1)) && (drvdata->q_support & BIT(1)))
  315. config->cfg |= BIT(14);
  316. /* bit[11], AMBA Trace Bus (ATB) trigger enable bit */
  317. if ((config->mode & ETM_MODE_ATB_TRIGGER) &&
  318. (drvdata->atbtrig == true))
  319. config->eventctrl1 |= BIT(11);
  320. else
  321. config->eventctrl1 &= ~BIT(11);
  322. /* bit[12], Low-power state behavior override bit */
  323. if ((config->mode & ETM_MODE_LPOVERRIDE) &&
  324. (drvdata->lpoverride == true))
  325. config->eventctrl1 |= BIT(12);
  326. else
  327. config->eventctrl1 &= ~BIT(12);
  328. /* bit[8], Instruction stall bit */
  329. if (config->mode & ETM_MODE_ISTALL_EN)
  330. config->stall_ctrl |= BIT(8);
  331. else
  332. config->stall_ctrl &= ~BIT(8);
  333. /* bit[10], Prioritize instruction trace bit */
  334. if (config->mode & ETM_MODE_INSTPRIO)
  335. config->stall_ctrl |= BIT(10);
  336. else
  337. config->stall_ctrl &= ~BIT(10);
  338. /* bit[13], Trace overflow prevention bit */
  339. if ((config->mode & ETM_MODE_NOOVERFLOW) &&
  340. (drvdata->nooverflow == true))
  341. config->stall_ctrl |= BIT(13);
  342. else
  343. config->stall_ctrl &= ~BIT(13);
  344. /* bit[9] Start/stop logic control bit */
  345. if (config->mode & ETM_MODE_VIEWINST_STARTSTOP)
  346. config->vinst_ctrl |= BIT(9);
  347. else
  348. config->vinst_ctrl &= ~BIT(9);
  349. /* bit[10], Whether a trace unit must trace a Reset exception */
  350. if (config->mode & ETM_MODE_TRACE_RESET)
  351. config->vinst_ctrl |= BIT(10);
  352. else
  353. config->vinst_ctrl &= ~BIT(10);
  354. /* bit[11], Whether a trace unit must trace a system error exception */
  355. if ((config->mode & ETM_MODE_TRACE_ERR) &&
  356. (drvdata->trc_error == true))
  357. config->vinst_ctrl |= BIT(11);
  358. else
  359. config->vinst_ctrl &= ~BIT(11);
  360. if (config->mode & (ETM_MODE_EXCL_KERN | ETM_MODE_EXCL_USER))
  361. etm4_config_trace_mode(config);
  362. spin_unlock(&drvdata->spinlock);
  363. return size;
  364. }
  365. static DEVICE_ATTR_RW(mode);
  366. static ssize_t pe_show(struct device *dev,
  367. struct device_attribute *attr,
  368. char *buf)
  369. {
  370. unsigned long val;
  371. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  372. struct etmv4_config *config = &drvdata->config;
  373. val = config->pe_sel;
  374. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  375. }
  376. static ssize_t pe_store(struct device *dev,
  377. struct device_attribute *attr,
  378. const char *buf, size_t size)
  379. {
  380. unsigned long val;
  381. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  382. struct etmv4_config *config = &drvdata->config;
  383. if (kstrtoul(buf, 16, &val))
  384. return -EINVAL;
  385. spin_lock(&drvdata->spinlock);
  386. if (val > drvdata->nr_pe) {
  387. spin_unlock(&drvdata->spinlock);
  388. return -EINVAL;
  389. }
  390. config->pe_sel = val;
  391. spin_unlock(&drvdata->spinlock);
  392. return size;
  393. }
  394. static DEVICE_ATTR_RW(pe);
  395. static ssize_t event_show(struct device *dev,
  396. struct device_attribute *attr,
  397. char *buf)
  398. {
  399. unsigned long val;
  400. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  401. struct etmv4_config *config = &drvdata->config;
  402. val = config->eventctrl0;
  403. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  404. }
  405. static ssize_t event_store(struct device *dev,
  406. struct device_attribute *attr,
  407. const char *buf, size_t size)
  408. {
  409. unsigned long val;
  410. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  411. struct etmv4_config *config = &drvdata->config;
  412. if (kstrtoul(buf, 16, &val))
  413. return -EINVAL;
  414. spin_lock(&drvdata->spinlock);
  415. switch (drvdata->nr_event) {
  416. case 0x0:
  417. /* EVENT0, bits[7:0] */
  418. config->eventctrl0 = val & 0xFF;
  419. break;
  420. case 0x1:
  421. /* EVENT1, bits[15:8] */
  422. config->eventctrl0 = val & 0xFFFF;
  423. break;
  424. case 0x2:
  425. /* EVENT2, bits[23:16] */
  426. config->eventctrl0 = val & 0xFFFFFF;
  427. break;
  428. case 0x3:
  429. /* EVENT3, bits[31:24] */
  430. config->eventctrl0 = val;
  431. break;
  432. default:
  433. break;
  434. }
  435. spin_unlock(&drvdata->spinlock);
  436. return size;
  437. }
  438. static DEVICE_ATTR_RW(event);
  439. static ssize_t event_instren_show(struct device *dev,
  440. struct device_attribute *attr,
  441. char *buf)
  442. {
  443. unsigned long val;
  444. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  445. struct etmv4_config *config = &drvdata->config;
  446. val = BMVAL(config->eventctrl1, 0, 3);
  447. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  448. }
  449. static ssize_t event_instren_store(struct device *dev,
  450. struct device_attribute *attr,
  451. const char *buf, size_t size)
  452. {
  453. unsigned long val;
  454. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  455. struct etmv4_config *config = &drvdata->config;
  456. if (kstrtoul(buf, 16, &val))
  457. return -EINVAL;
  458. spin_lock(&drvdata->spinlock);
  459. /* start by clearing all instruction event enable bits */
  460. config->eventctrl1 &= ~(BIT(0) | BIT(1) | BIT(2) | BIT(3));
  461. switch (drvdata->nr_event) {
  462. case 0x0:
  463. /* generate Event element for event 1 */
  464. config->eventctrl1 |= val & BIT(1);
  465. break;
  466. case 0x1:
  467. /* generate Event element for event 1 and 2 */
  468. config->eventctrl1 |= val & (BIT(0) | BIT(1));
  469. break;
  470. case 0x2:
  471. /* generate Event element for event 1, 2 and 3 */
  472. config->eventctrl1 |= val & (BIT(0) | BIT(1) | BIT(2));
  473. break;
  474. case 0x3:
  475. /* generate Event element for all 4 events */
  476. config->eventctrl1 |= val & 0xF;
  477. break;
  478. default:
  479. break;
  480. }
  481. spin_unlock(&drvdata->spinlock);
  482. return size;
  483. }
  484. static DEVICE_ATTR_RW(event_instren);
  485. static ssize_t event_ts_show(struct device *dev,
  486. struct device_attribute *attr,
  487. char *buf)
  488. {
  489. unsigned long val;
  490. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  491. struct etmv4_config *config = &drvdata->config;
  492. val = config->ts_ctrl;
  493. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  494. }
  495. static ssize_t event_ts_store(struct device *dev,
  496. struct device_attribute *attr,
  497. const char *buf, size_t size)
  498. {
  499. unsigned long val;
  500. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  501. struct etmv4_config *config = &drvdata->config;
  502. if (kstrtoul(buf, 16, &val))
  503. return -EINVAL;
  504. if (!drvdata->ts_size)
  505. return -EINVAL;
  506. config->ts_ctrl = val & ETMv4_EVENT_MASK;
  507. return size;
  508. }
  509. static DEVICE_ATTR_RW(event_ts);
  510. static ssize_t syncfreq_show(struct device *dev,
  511. struct device_attribute *attr,
  512. char *buf)
  513. {
  514. unsigned long val;
  515. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  516. struct etmv4_config *config = &drvdata->config;
  517. val = config->syncfreq;
  518. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  519. }
  520. static ssize_t syncfreq_store(struct device *dev,
  521. struct device_attribute *attr,
  522. const char *buf, size_t size)
  523. {
  524. unsigned long val;
  525. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  526. struct etmv4_config *config = &drvdata->config;
  527. if (kstrtoul(buf, 16, &val))
  528. return -EINVAL;
  529. if (drvdata->syncpr == true)
  530. return -EINVAL;
  531. config->syncfreq = val & ETMv4_SYNC_MASK;
  532. return size;
  533. }
  534. static DEVICE_ATTR_RW(syncfreq);
  535. static ssize_t cyc_threshold_show(struct device *dev,
  536. struct device_attribute *attr,
  537. char *buf)
  538. {
  539. unsigned long val;
  540. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  541. struct etmv4_config *config = &drvdata->config;
  542. val = config->ccctlr;
  543. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  544. }
  545. static ssize_t cyc_threshold_store(struct device *dev,
  546. struct device_attribute *attr,
  547. const char *buf, size_t size)
  548. {
  549. unsigned long val;
  550. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  551. struct etmv4_config *config = &drvdata->config;
  552. if (kstrtoul(buf, 16, &val))
  553. return -EINVAL;
  554. /* mask off max threshold before checking min value */
  555. val &= ETM_CYC_THRESHOLD_MASK;
  556. if (val < drvdata->ccitmin)
  557. return -EINVAL;
  558. config->ccctlr = val;
  559. return size;
  560. }
  561. static DEVICE_ATTR_RW(cyc_threshold);
  562. static ssize_t bb_ctrl_show(struct device *dev,
  563. struct device_attribute *attr,
  564. char *buf)
  565. {
  566. unsigned long val;
  567. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  568. struct etmv4_config *config = &drvdata->config;
  569. val = config->bb_ctrl;
  570. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  571. }
  572. static ssize_t bb_ctrl_store(struct device *dev,
  573. struct device_attribute *attr,
  574. const char *buf, size_t size)
  575. {
  576. unsigned long val;
  577. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  578. struct etmv4_config *config = &drvdata->config;
  579. if (kstrtoul(buf, 16, &val))
  580. return -EINVAL;
  581. if (drvdata->trcbb == false)
  582. return -EINVAL;
  583. if (!drvdata->nr_addr_cmp)
  584. return -EINVAL;
  585. /*
  586. * Bit[8] controls include(1) / exclude(0), bits[0-7] select
  587. * individual range comparators. If include then at least 1
  588. * range must be selected.
  589. */
  590. if ((val & BIT(8)) && (BMVAL(val, 0, 7) == 0))
  591. return -EINVAL;
  592. config->bb_ctrl = val & GENMASK(8, 0);
  593. return size;
  594. }
  595. static DEVICE_ATTR_RW(bb_ctrl);
  596. static ssize_t event_vinst_show(struct device *dev,
  597. struct device_attribute *attr,
  598. char *buf)
  599. {
  600. unsigned long val;
  601. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  602. struct etmv4_config *config = &drvdata->config;
  603. val = config->vinst_ctrl & ETMv4_EVENT_MASK;
  604. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  605. }
  606. static ssize_t event_vinst_store(struct device *dev,
  607. struct device_attribute *attr,
  608. const char *buf, size_t size)
  609. {
  610. unsigned long val;
  611. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  612. struct etmv4_config *config = &drvdata->config;
  613. if (kstrtoul(buf, 16, &val))
  614. return -EINVAL;
  615. spin_lock(&drvdata->spinlock);
  616. val &= ETMv4_EVENT_MASK;
  617. config->vinst_ctrl &= ~ETMv4_EVENT_MASK;
  618. config->vinst_ctrl |= val;
  619. spin_unlock(&drvdata->spinlock);
  620. return size;
  621. }
  622. static DEVICE_ATTR_RW(event_vinst);
  623. static ssize_t s_exlevel_vinst_show(struct device *dev,
  624. struct device_attribute *attr,
  625. char *buf)
  626. {
  627. unsigned long val;
  628. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  629. struct etmv4_config *config = &drvdata->config;
  630. val = BMVAL(config->vinst_ctrl, 16, 19);
  631. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  632. }
  633. static ssize_t s_exlevel_vinst_store(struct device *dev,
  634. struct device_attribute *attr,
  635. const char *buf, size_t size)
  636. {
  637. unsigned long val;
  638. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  639. struct etmv4_config *config = &drvdata->config;
  640. if (kstrtoul(buf, 16, &val))
  641. return -EINVAL;
  642. spin_lock(&drvdata->spinlock);
  643. /* clear all EXLEVEL_S bits (bit[18] is never implemented) */
  644. config->vinst_ctrl &= ~(BIT(16) | BIT(17) | BIT(19));
  645. /* enable instruction tracing for corresponding exception level */
  646. val &= drvdata->s_ex_level;
  647. config->vinst_ctrl |= (val << 16);
  648. spin_unlock(&drvdata->spinlock);
  649. return size;
  650. }
  651. static DEVICE_ATTR_RW(s_exlevel_vinst);
  652. static ssize_t ns_exlevel_vinst_show(struct device *dev,
  653. struct device_attribute *attr,
  654. char *buf)
  655. {
  656. unsigned long val;
  657. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  658. struct etmv4_config *config = &drvdata->config;
  659. /* EXLEVEL_NS, bits[23:20] */
  660. val = BMVAL(config->vinst_ctrl, 20, 23);
  661. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  662. }
  663. static ssize_t ns_exlevel_vinst_store(struct device *dev,
  664. struct device_attribute *attr,
  665. const char *buf, size_t size)
  666. {
  667. unsigned long val;
  668. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  669. struct etmv4_config *config = &drvdata->config;
  670. if (kstrtoul(buf, 16, &val))
  671. return -EINVAL;
  672. spin_lock(&drvdata->spinlock);
  673. /* clear EXLEVEL_NS bits (bit[23] is never implemented */
  674. config->vinst_ctrl &= ~(BIT(20) | BIT(21) | BIT(22));
  675. /* enable instruction tracing for corresponding exception level */
  676. val &= drvdata->ns_ex_level;
  677. config->vinst_ctrl |= (val << 20);
  678. spin_unlock(&drvdata->spinlock);
  679. return size;
  680. }
  681. static DEVICE_ATTR_RW(ns_exlevel_vinst);
  682. static ssize_t addr_idx_show(struct device *dev,
  683. struct device_attribute *attr,
  684. char *buf)
  685. {
  686. unsigned long val;
  687. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  688. struct etmv4_config *config = &drvdata->config;
  689. val = config->addr_idx;
  690. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  691. }
  692. static ssize_t addr_idx_store(struct device *dev,
  693. struct device_attribute *attr,
  694. const char *buf, size_t size)
  695. {
  696. unsigned long val;
  697. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  698. struct etmv4_config *config = &drvdata->config;
  699. if (kstrtoul(buf, 16, &val))
  700. return -EINVAL;
  701. if (val >= drvdata->nr_addr_cmp * 2)
  702. return -EINVAL;
  703. /*
  704. * Use spinlock to ensure index doesn't change while it gets
  705. * dereferenced multiple times within a spinlock block elsewhere.
  706. */
  707. spin_lock(&drvdata->spinlock);
  708. config->addr_idx = val;
  709. spin_unlock(&drvdata->spinlock);
  710. return size;
  711. }
  712. static DEVICE_ATTR_RW(addr_idx);
  713. static ssize_t addr_instdatatype_show(struct device *dev,
  714. struct device_attribute *attr,
  715. char *buf)
  716. {
  717. ssize_t len;
  718. u8 val, idx;
  719. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  720. struct etmv4_config *config = &drvdata->config;
  721. spin_lock(&drvdata->spinlock);
  722. idx = config->addr_idx;
  723. val = BMVAL(config->addr_acc[idx], 0, 1);
  724. len = scnprintf(buf, PAGE_SIZE, "%s\n",
  725. val == ETM_INSTR_ADDR ? "instr" :
  726. (val == ETM_DATA_LOAD_ADDR ? "data_load" :
  727. (val == ETM_DATA_STORE_ADDR ? "data_store" :
  728. "data_load_store")));
  729. spin_unlock(&drvdata->spinlock);
  730. return len;
  731. }
  732. static ssize_t addr_instdatatype_store(struct device *dev,
  733. struct device_attribute *attr,
  734. const char *buf, size_t size)
  735. {
  736. u8 idx;
  737. char str[20] = "";
  738. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  739. struct etmv4_config *config = &drvdata->config;
  740. if (strlen(buf) >= 20)
  741. return -EINVAL;
  742. if (sscanf(buf, "%s", str) != 1)
  743. return -EINVAL;
  744. spin_lock(&drvdata->spinlock);
  745. idx = config->addr_idx;
  746. if (!strcmp(str, "instr"))
  747. /* TYPE, bits[1:0] */
  748. config->addr_acc[idx] &= ~(BIT(0) | BIT(1));
  749. spin_unlock(&drvdata->spinlock);
  750. return size;
  751. }
  752. static DEVICE_ATTR_RW(addr_instdatatype);
  753. static ssize_t addr_single_show(struct device *dev,
  754. struct device_attribute *attr,
  755. char *buf)
  756. {
  757. u8 idx;
  758. unsigned long val;
  759. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  760. struct etmv4_config *config = &drvdata->config;
  761. idx = config->addr_idx;
  762. spin_lock(&drvdata->spinlock);
  763. if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
  764. config->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) {
  765. spin_unlock(&drvdata->spinlock);
  766. return -EPERM;
  767. }
  768. val = (unsigned long)config->addr_val[idx];
  769. spin_unlock(&drvdata->spinlock);
  770. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  771. }
  772. static ssize_t addr_single_store(struct device *dev,
  773. struct device_attribute *attr,
  774. const char *buf, size_t size)
  775. {
  776. u8 idx;
  777. unsigned long val;
  778. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  779. struct etmv4_config *config = &drvdata->config;
  780. if (kstrtoul(buf, 16, &val))
  781. return -EINVAL;
  782. spin_lock(&drvdata->spinlock);
  783. idx = config->addr_idx;
  784. if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
  785. config->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) {
  786. spin_unlock(&drvdata->spinlock);
  787. return -EPERM;
  788. }
  789. config->addr_val[idx] = (u64)val;
  790. config->addr_type[idx] = ETM_ADDR_TYPE_SINGLE;
  791. spin_unlock(&drvdata->spinlock);
  792. return size;
  793. }
  794. static DEVICE_ATTR_RW(addr_single);
  795. static ssize_t addr_range_show(struct device *dev,
  796. struct device_attribute *attr,
  797. char *buf)
  798. {
  799. u8 idx;
  800. unsigned long val1, val2;
  801. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  802. struct etmv4_config *config = &drvdata->config;
  803. spin_lock(&drvdata->spinlock);
  804. idx = config->addr_idx;
  805. if (idx % 2 != 0) {
  806. spin_unlock(&drvdata->spinlock);
  807. return -EPERM;
  808. }
  809. if (!((config->addr_type[idx] == ETM_ADDR_TYPE_NONE &&
  810. config->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) ||
  811. (config->addr_type[idx] == ETM_ADDR_TYPE_RANGE &&
  812. config->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) {
  813. spin_unlock(&drvdata->spinlock);
  814. return -EPERM;
  815. }
  816. val1 = (unsigned long)config->addr_val[idx];
  817. val2 = (unsigned long)config->addr_val[idx + 1];
  818. spin_unlock(&drvdata->spinlock);
  819. return scnprintf(buf, PAGE_SIZE, "%#lx %#lx\n", val1, val2);
  820. }
  821. static ssize_t addr_range_store(struct device *dev,
  822. struct device_attribute *attr,
  823. const char *buf, size_t size)
  824. {
  825. u8 idx;
  826. unsigned long val1, val2;
  827. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  828. struct etmv4_config *config = &drvdata->config;
  829. if (sscanf(buf, "%lx %lx", &val1, &val2) != 2)
  830. return -EINVAL;
  831. /* lower address comparator cannot have a higher address value */
  832. if (val1 > val2)
  833. return -EINVAL;
  834. spin_lock(&drvdata->spinlock);
  835. idx = config->addr_idx;
  836. if (idx % 2 != 0) {
  837. spin_unlock(&drvdata->spinlock);
  838. return -EPERM;
  839. }
  840. if (!((config->addr_type[idx] == ETM_ADDR_TYPE_NONE &&
  841. config->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) ||
  842. (config->addr_type[idx] == ETM_ADDR_TYPE_RANGE &&
  843. config->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) {
  844. spin_unlock(&drvdata->spinlock);
  845. return -EPERM;
  846. }
  847. config->addr_val[idx] = (u64)val1;
  848. config->addr_type[idx] = ETM_ADDR_TYPE_RANGE;
  849. config->addr_val[idx + 1] = (u64)val2;
  850. config->addr_type[idx + 1] = ETM_ADDR_TYPE_RANGE;
  851. /*
  852. * Program include or exclude control bits for vinst or vdata
  853. * whenever we change addr comparators to ETM_ADDR_TYPE_RANGE
  854. */
  855. if (config->mode & ETM_MODE_EXCLUDE)
  856. etm4_set_mode_exclude(drvdata, true);
  857. else
  858. etm4_set_mode_exclude(drvdata, false);
  859. spin_unlock(&drvdata->spinlock);
  860. return size;
  861. }
  862. static DEVICE_ATTR_RW(addr_range);
  863. static ssize_t addr_start_show(struct device *dev,
  864. struct device_attribute *attr,
  865. char *buf)
  866. {
  867. u8 idx;
  868. unsigned long val;
  869. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  870. struct etmv4_config *config = &drvdata->config;
  871. spin_lock(&drvdata->spinlock);
  872. idx = config->addr_idx;
  873. if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
  874. config->addr_type[idx] == ETM_ADDR_TYPE_START)) {
  875. spin_unlock(&drvdata->spinlock);
  876. return -EPERM;
  877. }
  878. val = (unsigned long)config->addr_val[idx];
  879. spin_unlock(&drvdata->spinlock);
  880. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  881. }
  882. static ssize_t addr_start_store(struct device *dev,
  883. struct device_attribute *attr,
  884. const char *buf, size_t size)
  885. {
  886. u8 idx;
  887. unsigned long val;
  888. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  889. struct etmv4_config *config = &drvdata->config;
  890. if (kstrtoul(buf, 16, &val))
  891. return -EINVAL;
  892. spin_lock(&drvdata->spinlock);
  893. idx = config->addr_idx;
  894. if (!drvdata->nr_addr_cmp) {
  895. spin_unlock(&drvdata->spinlock);
  896. return -EINVAL;
  897. }
  898. if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
  899. config->addr_type[idx] == ETM_ADDR_TYPE_START)) {
  900. spin_unlock(&drvdata->spinlock);
  901. return -EPERM;
  902. }
  903. config->addr_val[idx] = (u64)val;
  904. config->addr_type[idx] = ETM_ADDR_TYPE_START;
  905. config->vissctlr |= BIT(idx);
  906. /* SSSTATUS, bit[9] - turn on start/stop logic */
  907. config->vinst_ctrl |= BIT(9);
  908. spin_unlock(&drvdata->spinlock);
  909. return size;
  910. }
  911. static DEVICE_ATTR_RW(addr_start);
  912. static ssize_t addr_stop_show(struct device *dev,
  913. struct device_attribute *attr,
  914. char *buf)
  915. {
  916. u8 idx;
  917. unsigned long val;
  918. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  919. struct etmv4_config *config = &drvdata->config;
  920. spin_lock(&drvdata->spinlock);
  921. idx = config->addr_idx;
  922. if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
  923. config->addr_type[idx] == ETM_ADDR_TYPE_STOP)) {
  924. spin_unlock(&drvdata->spinlock);
  925. return -EPERM;
  926. }
  927. val = (unsigned long)config->addr_val[idx];
  928. spin_unlock(&drvdata->spinlock);
  929. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  930. }
  931. static ssize_t addr_stop_store(struct device *dev,
  932. struct device_attribute *attr,
  933. const char *buf, size_t size)
  934. {
  935. u8 idx;
  936. unsigned long val;
  937. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  938. struct etmv4_config *config = &drvdata->config;
  939. if (kstrtoul(buf, 16, &val))
  940. return -EINVAL;
  941. spin_lock(&drvdata->spinlock);
  942. idx = config->addr_idx;
  943. if (!drvdata->nr_addr_cmp) {
  944. spin_unlock(&drvdata->spinlock);
  945. return -EINVAL;
  946. }
  947. if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
  948. config->addr_type[idx] == ETM_ADDR_TYPE_STOP)) {
  949. spin_unlock(&drvdata->spinlock);
  950. return -EPERM;
  951. }
  952. config->addr_val[idx] = (u64)val;
  953. config->addr_type[idx] = ETM_ADDR_TYPE_STOP;
  954. config->vissctlr |= BIT(idx + 16);
  955. /* SSSTATUS, bit[9] - turn on start/stop logic */
  956. config->vinst_ctrl |= BIT(9);
  957. spin_unlock(&drvdata->spinlock);
  958. return size;
  959. }
  960. static DEVICE_ATTR_RW(addr_stop);
  961. static ssize_t addr_ctxtype_show(struct device *dev,
  962. struct device_attribute *attr,
  963. char *buf)
  964. {
  965. ssize_t len;
  966. u8 idx, val;
  967. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  968. struct etmv4_config *config = &drvdata->config;
  969. spin_lock(&drvdata->spinlock);
  970. idx = config->addr_idx;
  971. /* CONTEXTTYPE, bits[3:2] */
  972. val = BMVAL(config->addr_acc[idx], 2, 3);
  973. len = scnprintf(buf, PAGE_SIZE, "%s\n", val == ETM_CTX_NONE ? "none" :
  974. (val == ETM_CTX_CTXID ? "ctxid" :
  975. (val == ETM_CTX_VMID ? "vmid" : "all")));
  976. spin_unlock(&drvdata->spinlock);
  977. return len;
  978. }
  979. static ssize_t addr_ctxtype_store(struct device *dev,
  980. struct device_attribute *attr,
  981. const char *buf, size_t size)
  982. {
  983. u8 idx;
  984. char str[10] = "";
  985. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  986. struct etmv4_config *config = &drvdata->config;
  987. if (strlen(buf) >= 10)
  988. return -EINVAL;
  989. if (sscanf(buf, "%s", str) != 1)
  990. return -EINVAL;
  991. spin_lock(&drvdata->spinlock);
  992. idx = config->addr_idx;
  993. if (!strcmp(str, "none"))
  994. /* start by clearing context type bits */
  995. config->addr_acc[idx] &= ~(BIT(2) | BIT(3));
  996. else if (!strcmp(str, "ctxid")) {
  997. /* 0b01 The trace unit performs a Context ID */
  998. if (drvdata->numcidc) {
  999. config->addr_acc[idx] |= BIT(2);
  1000. config->addr_acc[idx] &= ~BIT(3);
  1001. }
  1002. } else if (!strcmp(str, "vmid")) {
  1003. /* 0b10 The trace unit performs a VMID */
  1004. if (drvdata->numvmidc) {
  1005. config->addr_acc[idx] &= ~BIT(2);
  1006. config->addr_acc[idx] |= BIT(3);
  1007. }
  1008. } else if (!strcmp(str, "all")) {
  1009. /*
  1010. * 0b11 The trace unit performs a Context ID
  1011. * comparison and a VMID
  1012. */
  1013. if (drvdata->numcidc)
  1014. config->addr_acc[idx] |= BIT(2);
  1015. if (drvdata->numvmidc)
  1016. config->addr_acc[idx] |= BIT(3);
  1017. }
  1018. spin_unlock(&drvdata->spinlock);
  1019. return size;
  1020. }
  1021. static DEVICE_ATTR_RW(addr_ctxtype);
  1022. static ssize_t addr_context_show(struct device *dev,
  1023. struct device_attribute *attr,
  1024. char *buf)
  1025. {
  1026. u8 idx;
  1027. unsigned long val;
  1028. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1029. struct etmv4_config *config = &drvdata->config;
  1030. spin_lock(&drvdata->spinlock);
  1031. idx = config->addr_idx;
  1032. /* context ID comparator bits[6:4] */
  1033. val = BMVAL(config->addr_acc[idx], 4, 6);
  1034. spin_unlock(&drvdata->spinlock);
  1035. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  1036. }
  1037. static ssize_t addr_context_store(struct device *dev,
  1038. struct device_attribute *attr,
  1039. const char *buf, size_t size)
  1040. {
  1041. u8 idx;
  1042. unsigned long val;
  1043. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1044. struct etmv4_config *config = &drvdata->config;
  1045. if (kstrtoul(buf, 16, &val))
  1046. return -EINVAL;
  1047. if ((drvdata->numcidc <= 1) && (drvdata->numvmidc <= 1))
  1048. return -EINVAL;
  1049. if (val >= (drvdata->numcidc >= drvdata->numvmidc ?
  1050. drvdata->numcidc : drvdata->numvmidc))
  1051. return -EINVAL;
  1052. spin_lock(&drvdata->spinlock);
  1053. idx = config->addr_idx;
  1054. /* clear context ID comparator bits[6:4] */
  1055. config->addr_acc[idx] &= ~(BIT(4) | BIT(5) | BIT(6));
  1056. config->addr_acc[idx] |= (val << 4);
  1057. spin_unlock(&drvdata->spinlock);
  1058. return size;
  1059. }
  1060. static DEVICE_ATTR_RW(addr_context);
  1061. static ssize_t seq_idx_show(struct device *dev,
  1062. struct device_attribute *attr,
  1063. char *buf)
  1064. {
  1065. unsigned long val;
  1066. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1067. struct etmv4_config *config = &drvdata->config;
  1068. val = config->seq_idx;
  1069. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  1070. }
  1071. static ssize_t seq_idx_store(struct device *dev,
  1072. struct device_attribute *attr,
  1073. const char *buf, size_t size)
  1074. {
  1075. unsigned long val;
  1076. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1077. struct etmv4_config *config = &drvdata->config;
  1078. if (kstrtoul(buf, 16, &val))
  1079. return -EINVAL;
  1080. if (val >= drvdata->nrseqstate - 1)
  1081. return -EINVAL;
  1082. /*
  1083. * Use spinlock to ensure index doesn't change while it gets
  1084. * dereferenced multiple times within a spinlock block elsewhere.
  1085. */
  1086. spin_lock(&drvdata->spinlock);
  1087. config->seq_idx = val;
  1088. spin_unlock(&drvdata->spinlock);
  1089. return size;
  1090. }
  1091. static DEVICE_ATTR_RW(seq_idx);
  1092. static ssize_t seq_state_show(struct device *dev,
  1093. struct device_attribute *attr,
  1094. char *buf)
  1095. {
  1096. unsigned long val;
  1097. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1098. struct etmv4_config *config = &drvdata->config;
  1099. val = config->seq_state;
  1100. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  1101. }
  1102. static ssize_t seq_state_store(struct device *dev,
  1103. struct device_attribute *attr,
  1104. const char *buf, size_t size)
  1105. {
  1106. unsigned long val;
  1107. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1108. struct etmv4_config *config = &drvdata->config;
  1109. if (kstrtoul(buf, 16, &val))
  1110. return -EINVAL;
  1111. if (val >= drvdata->nrseqstate)
  1112. return -EINVAL;
  1113. config->seq_state = val;
  1114. return size;
  1115. }
  1116. static DEVICE_ATTR_RW(seq_state);
  1117. static ssize_t seq_event_show(struct device *dev,
  1118. struct device_attribute *attr,
  1119. char *buf)
  1120. {
  1121. u8 idx;
  1122. unsigned long val;
  1123. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1124. struct etmv4_config *config = &drvdata->config;
  1125. spin_lock(&drvdata->spinlock);
  1126. idx = config->seq_idx;
  1127. val = config->seq_ctrl[idx];
  1128. spin_unlock(&drvdata->spinlock);
  1129. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  1130. }
  1131. static ssize_t seq_event_store(struct device *dev,
  1132. struct device_attribute *attr,
  1133. const char *buf, size_t size)
  1134. {
  1135. u8 idx;
  1136. unsigned long val;
  1137. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1138. struct etmv4_config *config = &drvdata->config;
  1139. if (kstrtoul(buf, 16, &val))
  1140. return -EINVAL;
  1141. spin_lock(&drvdata->spinlock);
  1142. idx = config->seq_idx;
  1143. /* Seq control has two masks B[15:8] F[7:0] */
  1144. config->seq_ctrl[idx] = val & 0xFFFF;
  1145. spin_unlock(&drvdata->spinlock);
  1146. return size;
  1147. }
  1148. static DEVICE_ATTR_RW(seq_event);
  1149. static ssize_t seq_reset_event_show(struct device *dev,
  1150. struct device_attribute *attr,
  1151. char *buf)
  1152. {
  1153. unsigned long val;
  1154. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1155. struct etmv4_config *config = &drvdata->config;
  1156. val = config->seq_rst;
  1157. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  1158. }
  1159. static ssize_t seq_reset_event_store(struct device *dev,
  1160. struct device_attribute *attr,
  1161. const char *buf, size_t size)
  1162. {
  1163. unsigned long val;
  1164. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1165. struct etmv4_config *config = &drvdata->config;
  1166. if (kstrtoul(buf, 16, &val))
  1167. return -EINVAL;
  1168. if (!(drvdata->nrseqstate))
  1169. return -EINVAL;
  1170. config->seq_rst = val & ETMv4_EVENT_MASK;
  1171. return size;
  1172. }
  1173. static DEVICE_ATTR_RW(seq_reset_event);
  1174. static ssize_t cntr_idx_show(struct device *dev,
  1175. struct device_attribute *attr,
  1176. char *buf)
  1177. {
  1178. unsigned long val;
  1179. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1180. struct etmv4_config *config = &drvdata->config;
  1181. val = config->cntr_idx;
  1182. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  1183. }
  1184. static ssize_t cntr_idx_store(struct device *dev,
  1185. struct device_attribute *attr,
  1186. const char *buf, size_t size)
  1187. {
  1188. unsigned long val;
  1189. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1190. struct etmv4_config *config = &drvdata->config;
  1191. if (kstrtoul(buf, 16, &val))
  1192. return -EINVAL;
  1193. if (val >= drvdata->nr_cntr)
  1194. return -EINVAL;
  1195. /*
  1196. * Use spinlock to ensure index doesn't change while it gets
  1197. * dereferenced multiple times within a spinlock block elsewhere.
  1198. */
  1199. spin_lock(&drvdata->spinlock);
  1200. config->cntr_idx = val;
  1201. spin_unlock(&drvdata->spinlock);
  1202. return size;
  1203. }
  1204. static DEVICE_ATTR_RW(cntr_idx);
  1205. static ssize_t cntrldvr_show(struct device *dev,
  1206. struct device_attribute *attr,
  1207. char *buf)
  1208. {
  1209. u8 idx;
  1210. unsigned long val;
  1211. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1212. struct etmv4_config *config = &drvdata->config;
  1213. spin_lock(&drvdata->spinlock);
  1214. idx = config->cntr_idx;
  1215. val = config->cntrldvr[idx];
  1216. spin_unlock(&drvdata->spinlock);
  1217. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  1218. }
  1219. static ssize_t cntrldvr_store(struct device *dev,
  1220. struct device_attribute *attr,
  1221. const char *buf, size_t size)
  1222. {
  1223. u8 idx;
  1224. unsigned long val;
  1225. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1226. struct etmv4_config *config = &drvdata->config;
  1227. if (kstrtoul(buf, 16, &val))
  1228. return -EINVAL;
  1229. if (val > ETM_CNTR_MAX_VAL)
  1230. return -EINVAL;
  1231. spin_lock(&drvdata->spinlock);
  1232. idx = config->cntr_idx;
  1233. config->cntrldvr[idx] = val;
  1234. spin_unlock(&drvdata->spinlock);
  1235. return size;
  1236. }
  1237. static DEVICE_ATTR_RW(cntrldvr);
  1238. static ssize_t cntr_val_show(struct device *dev,
  1239. struct device_attribute *attr,
  1240. char *buf)
  1241. {
  1242. u8 idx;
  1243. unsigned long val;
  1244. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1245. struct etmv4_config *config = &drvdata->config;
  1246. spin_lock(&drvdata->spinlock);
  1247. idx = config->cntr_idx;
  1248. val = config->cntr_val[idx];
  1249. spin_unlock(&drvdata->spinlock);
  1250. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  1251. }
  1252. static ssize_t cntr_val_store(struct device *dev,
  1253. struct device_attribute *attr,
  1254. const char *buf, size_t size)
  1255. {
  1256. u8 idx;
  1257. unsigned long val;
  1258. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1259. struct etmv4_config *config = &drvdata->config;
  1260. if (kstrtoul(buf, 16, &val))
  1261. return -EINVAL;
  1262. if (val > ETM_CNTR_MAX_VAL)
  1263. return -EINVAL;
  1264. spin_lock(&drvdata->spinlock);
  1265. idx = config->cntr_idx;
  1266. config->cntr_val[idx] = val;
  1267. spin_unlock(&drvdata->spinlock);
  1268. return size;
  1269. }
  1270. static DEVICE_ATTR_RW(cntr_val);
  1271. static ssize_t cntr_ctrl_show(struct device *dev,
  1272. struct device_attribute *attr,
  1273. char *buf)
  1274. {
  1275. u8 idx;
  1276. unsigned long val;
  1277. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1278. struct etmv4_config *config = &drvdata->config;
  1279. spin_lock(&drvdata->spinlock);
  1280. idx = config->cntr_idx;
  1281. val = config->cntr_ctrl[idx];
  1282. spin_unlock(&drvdata->spinlock);
  1283. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  1284. }
  1285. static ssize_t cntr_ctrl_store(struct device *dev,
  1286. struct device_attribute *attr,
  1287. const char *buf, size_t size)
  1288. {
  1289. u8 idx;
  1290. unsigned long val;
  1291. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1292. struct etmv4_config *config = &drvdata->config;
  1293. if (kstrtoul(buf, 16, &val))
  1294. return -EINVAL;
  1295. spin_lock(&drvdata->spinlock);
  1296. idx = config->cntr_idx;
  1297. config->cntr_ctrl[idx] = val;
  1298. spin_unlock(&drvdata->spinlock);
  1299. return size;
  1300. }
  1301. static DEVICE_ATTR_RW(cntr_ctrl);
  1302. static ssize_t res_idx_show(struct device *dev,
  1303. struct device_attribute *attr,
  1304. char *buf)
  1305. {
  1306. unsigned long val;
  1307. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1308. struct etmv4_config *config = &drvdata->config;
  1309. val = config->res_idx;
  1310. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  1311. }
  1312. static ssize_t res_idx_store(struct device *dev,
  1313. struct device_attribute *attr,
  1314. const char *buf, size_t size)
  1315. {
  1316. unsigned long val;
  1317. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1318. struct etmv4_config *config = &drvdata->config;
  1319. if (kstrtoul(buf, 16, &val))
  1320. return -EINVAL;
  1321. /* Resource selector pair 0 is always implemented and reserved */
  1322. if ((val == 0) || (val >= drvdata->nr_resource))
  1323. return -EINVAL;
  1324. /*
  1325. * Use spinlock to ensure index doesn't change while it gets
  1326. * dereferenced multiple times within a spinlock block elsewhere.
  1327. */
  1328. spin_lock(&drvdata->spinlock);
  1329. config->res_idx = val;
  1330. spin_unlock(&drvdata->spinlock);
  1331. return size;
  1332. }
  1333. static DEVICE_ATTR_RW(res_idx);
  1334. static ssize_t res_ctrl_show(struct device *dev,
  1335. struct device_attribute *attr,
  1336. char *buf)
  1337. {
  1338. u8 idx;
  1339. unsigned long val;
  1340. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1341. struct etmv4_config *config = &drvdata->config;
  1342. spin_lock(&drvdata->spinlock);
  1343. idx = config->res_idx;
  1344. val = config->res_ctrl[idx];
  1345. spin_unlock(&drvdata->spinlock);
  1346. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  1347. }
  1348. static ssize_t res_ctrl_store(struct device *dev,
  1349. struct device_attribute *attr,
  1350. const char *buf, size_t size)
  1351. {
  1352. u8 idx;
  1353. unsigned long val;
  1354. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1355. struct etmv4_config *config = &drvdata->config;
  1356. if (kstrtoul(buf, 16, &val))
  1357. return -EINVAL;
  1358. spin_lock(&drvdata->spinlock);
  1359. idx = config->res_idx;
  1360. /* For odd idx pair inversal bit is RES0 */
  1361. if (idx % 2 != 0)
  1362. /* PAIRINV, bit[21] */
  1363. val &= ~BIT(21);
  1364. config->res_ctrl[idx] = val & GENMASK(21, 0);
  1365. spin_unlock(&drvdata->spinlock);
  1366. return size;
  1367. }
  1368. static DEVICE_ATTR_RW(res_ctrl);
  1369. static ssize_t ctxid_idx_show(struct device *dev,
  1370. struct device_attribute *attr,
  1371. char *buf)
  1372. {
  1373. unsigned long val;
  1374. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1375. struct etmv4_config *config = &drvdata->config;
  1376. val = config->ctxid_idx;
  1377. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  1378. }
  1379. static ssize_t ctxid_idx_store(struct device *dev,
  1380. struct device_attribute *attr,
  1381. const char *buf, size_t size)
  1382. {
  1383. unsigned long val;
  1384. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1385. struct etmv4_config *config = &drvdata->config;
  1386. if (kstrtoul(buf, 16, &val))
  1387. return -EINVAL;
  1388. if (val >= drvdata->numcidc)
  1389. return -EINVAL;
  1390. /*
  1391. * Use spinlock to ensure index doesn't change while it gets
  1392. * dereferenced multiple times within a spinlock block elsewhere.
  1393. */
  1394. spin_lock(&drvdata->spinlock);
  1395. config->ctxid_idx = val;
  1396. spin_unlock(&drvdata->spinlock);
  1397. return size;
  1398. }
  1399. static DEVICE_ATTR_RW(ctxid_idx);
  1400. static ssize_t ctxid_pid_show(struct device *dev,
  1401. struct device_attribute *attr,
  1402. char *buf)
  1403. {
  1404. u8 idx;
  1405. unsigned long val;
  1406. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1407. struct etmv4_config *config = &drvdata->config;
  1408. /*
  1409. * Don't use contextID tracing if coming from a PID namespace. See
  1410. * comment in ctxid_pid_store().
  1411. */
  1412. if (task_active_pid_ns(current) != &init_pid_ns)
  1413. return -EINVAL;
  1414. spin_lock(&drvdata->spinlock);
  1415. idx = config->ctxid_idx;
  1416. val = (unsigned long)config->ctxid_pid[idx];
  1417. spin_unlock(&drvdata->spinlock);
  1418. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  1419. }
  1420. static ssize_t ctxid_pid_store(struct device *dev,
  1421. struct device_attribute *attr,
  1422. const char *buf, size_t size)
  1423. {
  1424. u8 idx;
  1425. unsigned long pid;
  1426. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1427. struct etmv4_config *config = &drvdata->config;
  1428. /*
  1429. * When contextID tracing is enabled the tracers will insert the
  1430. * value found in the contextID register in the trace stream. But if
  1431. * a process is in a namespace the PID of that process as seen from the
  1432. * namespace won't be what the kernel sees, something that makes the
  1433. * feature confusing and can potentially leak kernel only information.
  1434. * As such refuse to use the feature if @current is not in the initial
  1435. * PID namespace.
  1436. */
  1437. if (task_active_pid_ns(current) != &init_pid_ns)
  1438. return -EINVAL;
  1439. /*
  1440. * only implemented when ctxid tracing is enabled, i.e. at least one
  1441. * ctxid comparator is implemented and ctxid is greater than 0 bits
  1442. * in length
  1443. */
  1444. if (!drvdata->ctxid_size || !drvdata->numcidc)
  1445. return -EINVAL;
  1446. if (kstrtoul(buf, 16, &pid))
  1447. return -EINVAL;
  1448. spin_lock(&drvdata->spinlock);
  1449. idx = config->ctxid_idx;
  1450. config->ctxid_pid[idx] = (u64)pid;
  1451. spin_unlock(&drvdata->spinlock);
  1452. return size;
  1453. }
  1454. static DEVICE_ATTR_RW(ctxid_pid);
  1455. static ssize_t ctxid_masks_show(struct device *dev,
  1456. struct device_attribute *attr,
  1457. char *buf)
  1458. {
  1459. unsigned long val1, val2;
  1460. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1461. struct etmv4_config *config = &drvdata->config;
  1462. /*
  1463. * Don't use contextID tracing if coming from a PID namespace. See
  1464. * comment in ctxid_pid_store().
  1465. */
  1466. if (task_active_pid_ns(current) != &init_pid_ns)
  1467. return -EINVAL;
  1468. spin_lock(&drvdata->spinlock);
  1469. val1 = config->ctxid_mask0;
  1470. val2 = config->ctxid_mask1;
  1471. spin_unlock(&drvdata->spinlock);
  1472. return scnprintf(buf, PAGE_SIZE, "%#lx %#lx\n", val1, val2);
  1473. }
  1474. static ssize_t ctxid_masks_store(struct device *dev,
  1475. struct device_attribute *attr,
  1476. const char *buf, size_t size)
  1477. {
  1478. u8 i, j, maskbyte;
  1479. unsigned long val1, val2, mask;
  1480. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1481. struct etmv4_config *config = &drvdata->config;
  1482. /*
  1483. * Don't use contextID tracing if coming from a PID namespace. See
  1484. * comment in ctxid_pid_store().
  1485. */
  1486. if (task_active_pid_ns(current) != &init_pid_ns)
  1487. return -EINVAL;
  1488. /*
  1489. * only implemented when ctxid tracing is enabled, i.e. at least one
  1490. * ctxid comparator is implemented and ctxid is greater than 0 bits
  1491. * in length
  1492. */
  1493. if (!drvdata->ctxid_size || !drvdata->numcidc)
  1494. return -EINVAL;
  1495. if (sscanf(buf, "%lx %lx", &val1, &val2) != 2)
  1496. return -EINVAL;
  1497. spin_lock(&drvdata->spinlock);
  1498. /*
  1499. * each byte[0..3] controls mask value applied to ctxid
  1500. * comparator[0..3]
  1501. */
  1502. switch (drvdata->numcidc) {
  1503. case 0x1:
  1504. /* COMP0, bits[7:0] */
  1505. config->ctxid_mask0 = val1 & 0xFF;
  1506. break;
  1507. case 0x2:
  1508. /* COMP1, bits[15:8] */
  1509. config->ctxid_mask0 = val1 & 0xFFFF;
  1510. break;
  1511. case 0x3:
  1512. /* COMP2, bits[23:16] */
  1513. config->ctxid_mask0 = val1 & 0xFFFFFF;
  1514. break;
  1515. case 0x4:
  1516. /* COMP3, bits[31:24] */
  1517. config->ctxid_mask0 = val1;
  1518. break;
  1519. case 0x5:
  1520. /* COMP4, bits[7:0] */
  1521. config->ctxid_mask0 = val1;
  1522. config->ctxid_mask1 = val2 & 0xFF;
  1523. break;
  1524. case 0x6:
  1525. /* COMP5, bits[15:8] */
  1526. config->ctxid_mask0 = val1;
  1527. config->ctxid_mask1 = val2 & 0xFFFF;
  1528. break;
  1529. case 0x7:
  1530. /* COMP6, bits[23:16] */
  1531. config->ctxid_mask0 = val1;
  1532. config->ctxid_mask1 = val2 & 0xFFFFFF;
  1533. break;
  1534. case 0x8:
  1535. /* COMP7, bits[31:24] */
  1536. config->ctxid_mask0 = val1;
  1537. config->ctxid_mask1 = val2;
  1538. break;
  1539. default:
  1540. break;
  1541. }
  1542. /*
  1543. * If software sets a mask bit to 1, it must program relevant byte
  1544. * of ctxid comparator value 0x0, otherwise behavior is unpredictable.
  1545. * For example, if bit[3] of ctxid_mask0 is 1, we must clear bits[31:24]
  1546. * of ctxid comparator0 value (corresponding to byte 0) register.
  1547. */
  1548. mask = config->ctxid_mask0;
  1549. for (i = 0; i < drvdata->numcidc; i++) {
  1550. /* mask value of corresponding ctxid comparator */
  1551. maskbyte = mask & ETMv4_EVENT_MASK;
  1552. /*
  1553. * each bit corresponds to a byte of respective ctxid comparator
  1554. * value register
  1555. */
  1556. for (j = 0; j < 8; j++) {
  1557. if (maskbyte & 1)
  1558. config->ctxid_pid[i] &= ~(0xFFUL << (j * 8));
  1559. maskbyte >>= 1;
  1560. }
  1561. /* Select the next ctxid comparator mask value */
  1562. if (i == 3)
  1563. /* ctxid comparators[4-7] */
  1564. mask = config->ctxid_mask1;
  1565. else
  1566. mask >>= 0x8;
  1567. }
  1568. spin_unlock(&drvdata->spinlock);
  1569. return size;
  1570. }
  1571. static DEVICE_ATTR_RW(ctxid_masks);
  1572. static ssize_t vmid_idx_show(struct device *dev,
  1573. struct device_attribute *attr,
  1574. char *buf)
  1575. {
  1576. unsigned long val;
  1577. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1578. struct etmv4_config *config = &drvdata->config;
  1579. val = config->vmid_idx;
  1580. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  1581. }
  1582. static ssize_t vmid_idx_store(struct device *dev,
  1583. struct device_attribute *attr,
  1584. const char *buf, size_t size)
  1585. {
  1586. unsigned long val;
  1587. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1588. struct etmv4_config *config = &drvdata->config;
  1589. if (kstrtoul(buf, 16, &val))
  1590. return -EINVAL;
  1591. if (val >= drvdata->numvmidc)
  1592. return -EINVAL;
  1593. /*
  1594. * Use spinlock to ensure index doesn't change while it gets
  1595. * dereferenced multiple times within a spinlock block elsewhere.
  1596. */
  1597. spin_lock(&drvdata->spinlock);
  1598. config->vmid_idx = val;
  1599. spin_unlock(&drvdata->spinlock);
  1600. return size;
  1601. }
  1602. static DEVICE_ATTR_RW(vmid_idx);
  1603. static ssize_t vmid_val_show(struct device *dev,
  1604. struct device_attribute *attr,
  1605. char *buf)
  1606. {
  1607. unsigned long val;
  1608. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1609. struct etmv4_config *config = &drvdata->config;
  1610. val = (unsigned long)config->vmid_val[config->vmid_idx];
  1611. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  1612. }
  1613. static ssize_t vmid_val_store(struct device *dev,
  1614. struct device_attribute *attr,
  1615. const char *buf, size_t size)
  1616. {
  1617. unsigned long val;
  1618. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1619. struct etmv4_config *config = &drvdata->config;
  1620. /*
  1621. * only implemented when vmid tracing is enabled, i.e. at least one
  1622. * vmid comparator is implemented and at least 8 bit vmid size
  1623. */
  1624. if (!drvdata->vmid_size || !drvdata->numvmidc)
  1625. return -EINVAL;
  1626. if (kstrtoul(buf, 16, &val))
  1627. return -EINVAL;
  1628. spin_lock(&drvdata->spinlock);
  1629. config->vmid_val[config->vmid_idx] = (u64)val;
  1630. spin_unlock(&drvdata->spinlock);
  1631. return size;
  1632. }
  1633. static DEVICE_ATTR_RW(vmid_val);
  1634. static ssize_t vmid_masks_show(struct device *dev,
  1635. struct device_attribute *attr, char *buf)
  1636. {
  1637. unsigned long val1, val2;
  1638. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1639. struct etmv4_config *config = &drvdata->config;
  1640. spin_lock(&drvdata->spinlock);
  1641. val1 = config->vmid_mask0;
  1642. val2 = config->vmid_mask1;
  1643. spin_unlock(&drvdata->spinlock);
  1644. return scnprintf(buf, PAGE_SIZE, "%#lx %#lx\n", val1, val2);
  1645. }
  1646. static ssize_t vmid_masks_store(struct device *dev,
  1647. struct device_attribute *attr,
  1648. const char *buf, size_t size)
  1649. {
  1650. u8 i, j, maskbyte;
  1651. unsigned long val1, val2, mask;
  1652. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1653. struct etmv4_config *config = &drvdata->config;
  1654. /*
  1655. * only implemented when vmid tracing is enabled, i.e. at least one
  1656. * vmid comparator is implemented and at least 8 bit vmid size
  1657. */
  1658. if (!drvdata->vmid_size || !drvdata->numvmidc)
  1659. return -EINVAL;
  1660. if (sscanf(buf, "%lx %lx", &val1, &val2) != 2)
  1661. return -EINVAL;
  1662. spin_lock(&drvdata->spinlock);
  1663. /*
  1664. * each byte[0..3] controls mask value applied to vmid
  1665. * comparator[0..3]
  1666. */
  1667. switch (drvdata->numvmidc) {
  1668. case 0x1:
  1669. /* COMP0, bits[7:0] */
  1670. config->vmid_mask0 = val1 & 0xFF;
  1671. break;
  1672. case 0x2:
  1673. /* COMP1, bits[15:8] */
  1674. config->vmid_mask0 = val1 & 0xFFFF;
  1675. break;
  1676. case 0x3:
  1677. /* COMP2, bits[23:16] */
  1678. config->vmid_mask0 = val1 & 0xFFFFFF;
  1679. break;
  1680. case 0x4:
  1681. /* COMP3, bits[31:24] */
  1682. config->vmid_mask0 = val1;
  1683. break;
  1684. case 0x5:
  1685. /* COMP4, bits[7:0] */
  1686. config->vmid_mask0 = val1;
  1687. config->vmid_mask1 = val2 & 0xFF;
  1688. break;
  1689. case 0x6:
  1690. /* COMP5, bits[15:8] */
  1691. config->vmid_mask0 = val1;
  1692. config->vmid_mask1 = val2 & 0xFFFF;
  1693. break;
  1694. case 0x7:
  1695. /* COMP6, bits[23:16] */
  1696. config->vmid_mask0 = val1;
  1697. config->vmid_mask1 = val2 & 0xFFFFFF;
  1698. break;
  1699. case 0x8:
  1700. /* COMP7, bits[31:24] */
  1701. config->vmid_mask0 = val1;
  1702. config->vmid_mask1 = val2;
  1703. break;
  1704. default:
  1705. break;
  1706. }
  1707. /*
  1708. * If software sets a mask bit to 1, it must program relevant byte
  1709. * of vmid comparator value 0x0, otherwise behavior is unpredictable.
  1710. * For example, if bit[3] of vmid_mask0 is 1, we must clear bits[31:24]
  1711. * of vmid comparator0 value (corresponding to byte 0) register.
  1712. */
  1713. mask = config->vmid_mask0;
  1714. for (i = 0; i < drvdata->numvmidc; i++) {
  1715. /* mask value of corresponding vmid comparator */
  1716. maskbyte = mask & ETMv4_EVENT_MASK;
  1717. /*
  1718. * each bit corresponds to a byte of respective vmid comparator
  1719. * value register
  1720. */
  1721. for (j = 0; j < 8; j++) {
  1722. if (maskbyte & 1)
  1723. config->vmid_val[i] &= ~(0xFFUL << (j * 8));
  1724. maskbyte >>= 1;
  1725. }
  1726. /* Select the next vmid comparator mask value */
  1727. if (i == 3)
  1728. /* vmid comparators[4-7] */
  1729. mask = config->vmid_mask1;
  1730. else
  1731. mask >>= 0x8;
  1732. }
  1733. spin_unlock(&drvdata->spinlock);
  1734. return size;
  1735. }
  1736. static DEVICE_ATTR_RW(vmid_masks);
  1737. static ssize_t cpu_show(struct device *dev,
  1738. struct device_attribute *attr, char *buf)
  1739. {
  1740. int val;
  1741. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1742. val = drvdata->cpu;
  1743. return scnprintf(buf, PAGE_SIZE, "%d\n", val);
  1744. }
  1745. static DEVICE_ATTR_RO(cpu);
  1746. static struct attribute *coresight_etmv4_attrs[] = {
  1747. &dev_attr_nr_pe_cmp.attr,
  1748. &dev_attr_nr_addr_cmp.attr,
  1749. &dev_attr_nr_cntr.attr,
  1750. &dev_attr_nr_ext_inp.attr,
  1751. &dev_attr_numcidc.attr,
  1752. &dev_attr_numvmidc.attr,
  1753. &dev_attr_nrseqstate.attr,
  1754. &dev_attr_nr_resource.attr,
  1755. &dev_attr_nr_ss_cmp.attr,
  1756. &dev_attr_reset.attr,
  1757. &dev_attr_mode.attr,
  1758. &dev_attr_pe.attr,
  1759. &dev_attr_event.attr,
  1760. &dev_attr_event_instren.attr,
  1761. &dev_attr_event_ts.attr,
  1762. &dev_attr_syncfreq.attr,
  1763. &dev_attr_cyc_threshold.attr,
  1764. &dev_attr_bb_ctrl.attr,
  1765. &dev_attr_event_vinst.attr,
  1766. &dev_attr_s_exlevel_vinst.attr,
  1767. &dev_attr_ns_exlevel_vinst.attr,
  1768. &dev_attr_addr_idx.attr,
  1769. &dev_attr_addr_instdatatype.attr,
  1770. &dev_attr_addr_single.attr,
  1771. &dev_attr_addr_range.attr,
  1772. &dev_attr_addr_start.attr,
  1773. &dev_attr_addr_stop.attr,
  1774. &dev_attr_addr_ctxtype.attr,
  1775. &dev_attr_addr_context.attr,
  1776. &dev_attr_seq_idx.attr,
  1777. &dev_attr_seq_state.attr,
  1778. &dev_attr_seq_event.attr,
  1779. &dev_attr_seq_reset_event.attr,
  1780. &dev_attr_cntr_idx.attr,
  1781. &dev_attr_cntrldvr.attr,
  1782. &dev_attr_cntr_val.attr,
  1783. &dev_attr_cntr_ctrl.attr,
  1784. &dev_attr_res_idx.attr,
  1785. &dev_attr_res_ctrl.attr,
  1786. &dev_attr_ctxid_idx.attr,
  1787. &dev_attr_ctxid_pid.attr,
  1788. &dev_attr_ctxid_masks.attr,
  1789. &dev_attr_vmid_idx.attr,
  1790. &dev_attr_vmid_val.attr,
  1791. &dev_attr_vmid_masks.attr,
  1792. &dev_attr_cpu.attr,
  1793. NULL,
  1794. };
  1795. struct etmv4_reg {
  1796. void __iomem *addr;
  1797. u32 data;
  1798. };
  1799. static void do_smp_cross_read(void *data)
  1800. {
  1801. struct etmv4_reg *reg = data;
  1802. reg->data = readl_relaxed(reg->addr);
  1803. }
  1804. static u32 etmv4_cross_read(const struct device *dev, u32 offset)
  1805. {
  1806. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev);
  1807. struct etmv4_reg reg;
  1808. reg.addr = drvdata->base + offset;
  1809. /*
  1810. * smp cross call ensures the CPU will be powered up before
  1811. * accessing the ETMv4 trace core registers
  1812. */
  1813. smp_call_function_single(drvdata->cpu, do_smp_cross_read, &reg, 1);
  1814. return reg.data;
  1815. }
  1816. #define coresight_etm4x_reg(name, offset) \
  1817. coresight_simple_reg32(struct etmv4_drvdata, name, offset)
  1818. #define coresight_etm4x_cross_read(name, offset) \
  1819. coresight_simple_func(struct etmv4_drvdata, etmv4_cross_read, \
  1820. name, offset)
  1821. coresight_etm4x_reg(trcpdcr, TRCPDCR);
  1822. coresight_etm4x_reg(trcpdsr, TRCPDSR);
  1823. coresight_etm4x_reg(trclsr, TRCLSR);
  1824. coresight_etm4x_reg(trcauthstatus, TRCAUTHSTATUS);
  1825. coresight_etm4x_reg(trcdevid, TRCDEVID);
  1826. coresight_etm4x_reg(trcdevtype, TRCDEVTYPE);
  1827. coresight_etm4x_reg(trcpidr0, TRCPIDR0);
  1828. coresight_etm4x_reg(trcpidr1, TRCPIDR1);
  1829. coresight_etm4x_reg(trcpidr2, TRCPIDR2);
  1830. coresight_etm4x_reg(trcpidr3, TRCPIDR3);
  1831. coresight_etm4x_cross_read(trcoslsr, TRCOSLSR);
  1832. coresight_etm4x_cross_read(trcconfig, TRCCONFIGR);
  1833. coresight_etm4x_cross_read(trctraceid, TRCTRACEIDR);
  1834. static struct attribute *coresight_etmv4_mgmt_attrs[] = {
  1835. &dev_attr_trcoslsr.attr,
  1836. &dev_attr_trcpdcr.attr,
  1837. &dev_attr_trcpdsr.attr,
  1838. &dev_attr_trclsr.attr,
  1839. &dev_attr_trcconfig.attr,
  1840. &dev_attr_trctraceid.attr,
  1841. &dev_attr_trcauthstatus.attr,
  1842. &dev_attr_trcdevid.attr,
  1843. &dev_attr_trcdevtype.attr,
  1844. &dev_attr_trcpidr0.attr,
  1845. &dev_attr_trcpidr1.attr,
  1846. &dev_attr_trcpidr2.attr,
  1847. &dev_attr_trcpidr3.attr,
  1848. NULL,
  1849. };
  1850. coresight_etm4x_cross_read(trcidr0, TRCIDR0);
  1851. coresight_etm4x_cross_read(trcidr1, TRCIDR1);
  1852. coresight_etm4x_cross_read(trcidr2, TRCIDR2);
  1853. coresight_etm4x_cross_read(trcidr3, TRCIDR3);
  1854. coresight_etm4x_cross_read(trcidr4, TRCIDR4);
  1855. coresight_etm4x_cross_read(trcidr5, TRCIDR5);
  1856. /* trcidr[6,7] are reserved */
  1857. coresight_etm4x_cross_read(trcidr8, TRCIDR8);
  1858. coresight_etm4x_cross_read(trcidr9, TRCIDR9);
  1859. coresight_etm4x_cross_read(trcidr10, TRCIDR10);
  1860. coresight_etm4x_cross_read(trcidr11, TRCIDR11);
  1861. coresight_etm4x_cross_read(trcidr12, TRCIDR12);
  1862. coresight_etm4x_cross_read(trcidr13, TRCIDR13);
  1863. static struct attribute *coresight_etmv4_trcidr_attrs[] = {
  1864. &dev_attr_trcidr0.attr,
  1865. &dev_attr_trcidr1.attr,
  1866. &dev_attr_trcidr2.attr,
  1867. &dev_attr_trcidr3.attr,
  1868. &dev_attr_trcidr4.attr,
  1869. &dev_attr_trcidr5.attr,
  1870. /* trcidr[6,7] are reserved */
  1871. &dev_attr_trcidr8.attr,
  1872. &dev_attr_trcidr9.attr,
  1873. &dev_attr_trcidr10.attr,
  1874. &dev_attr_trcidr11.attr,
  1875. &dev_attr_trcidr12.attr,
  1876. &dev_attr_trcidr13.attr,
  1877. NULL,
  1878. };
  1879. static const struct attribute_group coresight_etmv4_group = {
  1880. .attrs = coresight_etmv4_attrs,
  1881. };
  1882. static const struct attribute_group coresight_etmv4_mgmt_group = {
  1883. .attrs = coresight_etmv4_mgmt_attrs,
  1884. .name = "mgmt",
  1885. };
  1886. static const struct attribute_group coresight_etmv4_trcidr_group = {
  1887. .attrs = coresight_etmv4_trcidr_attrs,
  1888. .name = "trcidr",
  1889. };
  1890. const struct attribute_group *coresight_etmv4_groups[] = {
  1891. &coresight_etmv4_group,
  1892. &coresight_etmv4_mgmt_group,
  1893. &coresight_etmv4_trcidr_group,
  1894. NULL,
  1895. };