cs-etm.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright(C) 2015 Linaro Limited. All rights reserved.
  4. * Author: Mathieu Poirier <mathieu.poirier@linaro.org>
  5. */
  6. #include <api/fs/fs.h>
  7. #include <linux/bitops.h>
  8. #include <linux/compiler.h>
  9. #include <linux/coresight-pmu.h>
  10. #include <linux/kernel.h>
  11. #include <linux/log2.h>
  12. #include <linux/types.h>
  13. #include "cs-etm.h"
  14. #include "../../perf.h"
  15. #include "../../util/auxtrace.h"
  16. #include "../../util/cpumap.h"
  17. #include "../../util/evlist.h"
  18. #include "../../util/evsel.h"
  19. #include "../../util/pmu.h"
  20. #include "../../util/thread_map.h"
  21. #include "../../util/cs-etm.h"
  22. #include <stdlib.h>
  23. #include <sys/stat.h>
  24. #define ENABLE_SINK_MAX 128
  25. #define CS_BUS_DEVICE_PATH "/bus/coresight/devices/"
  26. struct cs_etm_recording {
  27. struct auxtrace_record itr;
  28. struct perf_pmu *cs_etm_pmu;
  29. struct perf_evlist *evlist;
  30. int wrapped_cnt;
  31. bool *wrapped;
  32. bool snapshot_mode;
  33. size_t snapshot_size;
  34. };
  35. static bool cs_etm_is_etmv4(struct auxtrace_record *itr, int cpu);
  36. static int cs_etm_parse_snapshot_options(struct auxtrace_record *itr,
  37. struct record_opts *opts,
  38. const char *str)
  39. {
  40. struct cs_etm_recording *ptr =
  41. container_of(itr, struct cs_etm_recording, itr);
  42. unsigned long long snapshot_size = 0;
  43. char *endptr;
  44. if (str) {
  45. snapshot_size = strtoull(str, &endptr, 0);
  46. if (*endptr || snapshot_size > SIZE_MAX)
  47. return -1;
  48. }
  49. opts->auxtrace_snapshot_mode = true;
  50. opts->auxtrace_snapshot_size = snapshot_size;
  51. ptr->snapshot_size = snapshot_size;
  52. return 0;
  53. }
  54. static int cs_etm_recording_options(struct auxtrace_record *itr,
  55. struct perf_evlist *evlist,
  56. struct record_opts *opts)
  57. {
  58. struct cs_etm_recording *ptr =
  59. container_of(itr, struct cs_etm_recording, itr);
  60. struct perf_pmu *cs_etm_pmu = ptr->cs_etm_pmu;
  61. struct perf_evsel *evsel, *cs_etm_evsel = NULL;
  62. const struct cpu_map *cpus = evlist->cpus;
  63. bool privileged = (geteuid() == 0 || perf_event_paranoid() < 0);
  64. ptr->evlist = evlist;
  65. ptr->snapshot_mode = opts->auxtrace_snapshot_mode;
  66. evlist__for_each_entry(evlist, evsel) {
  67. if (evsel->attr.type == cs_etm_pmu->type) {
  68. if (cs_etm_evsel) {
  69. pr_err("There may be only one %s event\n",
  70. CORESIGHT_ETM_PMU_NAME);
  71. return -EINVAL;
  72. }
  73. evsel->attr.freq = 0;
  74. evsel->attr.sample_period = 1;
  75. cs_etm_evsel = evsel;
  76. opts->full_auxtrace = true;
  77. }
  78. }
  79. /* no need to continue if at least one event of interest was found */
  80. if (!cs_etm_evsel)
  81. return 0;
  82. if (opts->use_clockid) {
  83. pr_err("Cannot use clockid (-k option) with %s\n",
  84. CORESIGHT_ETM_PMU_NAME);
  85. return -EINVAL;
  86. }
  87. /* we are in snapshot mode */
  88. if (opts->auxtrace_snapshot_mode) {
  89. /*
  90. * No size were given to '-S' or '-m,', so go with
  91. * the default
  92. */
  93. if (!opts->auxtrace_snapshot_size &&
  94. !opts->auxtrace_mmap_pages) {
  95. if (privileged) {
  96. opts->auxtrace_mmap_pages = MiB(4) / page_size;
  97. } else {
  98. opts->auxtrace_mmap_pages =
  99. KiB(128) / page_size;
  100. if (opts->mmap_pages == UINT_MAX)
  101. opts->mmap_pages = KiB(256) / page_size;
  102. }
  103. } else if (!opts->auxtrace_mmap_pages && !privileged &&
  104. opts->mmap_pages == UINT_MAX) {
  105. opts->mmap_pages = KiB(256) / page_size;
  106. }
  107. /*
  108. * '-m,xyz' was specified but no snapshot size, so make the
  109. * snapshot size as big as the auxtrace mmap area.
  110. */
  111. if (!opts->auxtrace_snapshot_size) {
  112. opts->auxtrace_snapshot_size =
  113. opts->auxtrace_mmap_pages * (size_t)page_size;
  114. }
  115. /*
  116. * -Sxyz was specified but no auxtrace mmap area, so make the
  117. * auxtrace mmap area big enough to fit the requested snapshot
  118. * size.
  119. */
  120. if (!opts->auxtrace_mmap_pages) {
  121. size_t sz = opts->auxtrace_snapshot_size;
  122. sz = round_up(sz, page_size) / page_size;
  123. opts->auxtrace_mmap_pages = roundup_pow_of_two(sz);
  124. }
  125. /* Snapshost size can't be bigger than the auxtrace area */
  126. if (opts->auxtrace_snapshot_size >
  127. opts->auxtrace_mmap_pages * (size_t)page_size) {
  128. pr_err("Snapshot size %zu must not be greater than AUX area tracing mmap size %zu\n",
  129. opts->auxtrace_snapshot_size,
  130. opts->auxtrace_mmap_pages * (size_t)page_size);
  131. return -EINVAL;
  132. }
  133. /* Something went wrong somewhere - this shouldn't happen */
  134. if (!opts->auxtrace_snapshot_size ||
  135. !opts->auxtrace_mmap_pages) {
  136. pr_err("Failed to calculate default snapshot size and/or AUX area tracing mmap pages\n");
  137. return -EINVAL;
  138. }
  139. }
  140. /* We are in full trace mode but '-m,xyz' wasn't specified */
  141. if (opts->full_auxtrace && !opts->auxtrace_mmap_pages) {
  142. if (privileged) {
  143. opts->auxtrace_mmap_pages = MiB(4) / page_size;
  144. } else {
  145. opts->auxtrace_mmap_pages = KiB(128) / page_size;
  146. if (opts->mmap_pages == UINT_MAX)
  147. opts->mmap_pages = KiB(256) / page_size;
  148. }
  149. }
  150. /* Validate auxtrace_mmap_pages provided by user */
  151. if (opts->auxtrace_mmap_pages) {
  152. unsigned int max_page = (KiB(128) / page_size);
  153. size_t sz = opts->auxtrace_mmap_pages * (size_t)page_size;
  154. if (!privileged &&
  155. opts->auxtrace_mmap_pages > max_page) {
  156. opts->auxtrace_mmap_pages = max_page;
  157. pr_err("auxtrace too big, truncating to %d\n",
  158. max_page);
  159. }
  160. if (!is_power_of_2(sz)) {
  161. pr_err("Invalid mmap size for %s: must be a power of 2\n",
  162. CORESIGHT_ETM_PMU_NAME);
  163. return -EINVAL;
  164. }
  165. }
  166. if (opts->auxtrace_snapshot_mode)
  167. pr_debug2("%s snapshot size: %zu\n", CORESIGHT_ETM_PMU_NAME,
  168. opts->auxtrace_snapshot_size);
  169. /*
  170. * To obtain the auxtrace buffer file descriptor, the auxtrace
  171. * event must come first.
  172. */
  173. perf_evlist__to_front(evlist, cs_etm_evsel);
  174. /*
  175. * In the case of per-cpu mmaps, we need the CPU on the
  176. * AUX event.
  177. */
  178. if (!cpu_map__empty(cpus))
  179. perf_evsel__set_sample_bit(cs_etm_evsel, CPU);
  180. /* Add dummy event to keep tracking */
  181. if (opts->full_auxtrace) {
  182. struct perf_evsel *tracking_evsel;
  183. int err;
  184. err = parse_events(evlist, "dummy:u", NULL);
  185. if (err)
  186. return err;
  187. tracking_evsel = perf_evlist__last(evlist);
  188. perf_evlist__set_tracking_event(evlist, tracking_evsel);
  189. tracking_evsel->attr.freq = 0;
  190. tracking_evsel->attr.sample_period = 1;
  191. /* In per-cpu case, always need the time of mmap events etc */
  192. if (!cpu_map__empty(cpus))
  193. perf_evsel__set_sample_bit(tracking_evsel, TIME);
  194. }
  195. return 0;
  196. }
  197. static u64 cs_etm_get_config(struct auxtrace_record *itr)
  198. {
  199. u64 config = 0;
  200. struct cs_etm_recording *ptr =
  201. container_of(itr, struct cs_etm_recording, itr);
  202. struct perf_pmu *cs_etm_pmu = ptr->cs_etm_pmu;
  203. struct perf_evlist *evlist = ptr->evlist;
  204. struct perf_evsel *evsel;
  205. evlist__for_each_entry(evlist, evsel) {
  206. if (evsel->attr.type == cs_etm_pmu->type) {
  207. /*
  208. * Variable perf_event_attr::config is assigned to
  209. * ETMv3/PTM. The bit fields have been made to match
  210. * the ETMv3.5 ETRMCR register specification. See the
  211. * PMU_FORMAT_ATTR() declarations in
  212. * drivers/hwtracing/coresight/coresight-perf.c for
  213. * details.
  214. */
  215. config = evsel->attr.config;
  216. break;
  217. }
  218. }
  219. return config;
  220. }
  221. #ifndef BIT
  222. #define BIT(N) (1UL << (N))
  223. #endif
  224. static u64 cs_etmv4_get_config(struct auxtrace_record *itr)
  225. {
  226. u64 config = 0;
  227. u64 config_opts = 0;
  228. /*
  229. * The perf event variable config bits represent both
  230. * the command line options and register programming
  231. * bits in ETMv3/PTM. For ETMv4 we must remap options
  232. * to real bits
  233. */
  234. config_opts = cs_etm_get_config(itr);
  235. if (config_opts & BIT(ETM_OPT_CYCACC))
  236. config |= BIT(ETM4_CFG_BIT_CYCACC);
  237. if (config_opts & BIT(ETM_OPT_TS))
  238. config |= BIT(ETM4_CFG_BIT_TS);
  239. if (config_opts & BIT(ETM_OPT_RETSTK))
  240. config |= BIT(ETM4_CFG_BIT_RETSTK);
  241. return config;
  242. }
  243. static size_t
  244. cs_etm_info_priv_size(struct auxtrace_record *itr __maybe_unused,
  245. struct perf_evlist *evlist __maybe_unused)
  246. {
  247. int i;
  248. int etmv3 = 0, etmv4 = 0;
  249. struct cpu_map *event_cpus = evlist->cpus;
  250. struct cpu_map *online_cpus = cpu_map__new(NULL);
  251. /* cpu map is not empty, we have specific CPUs to work with */
  252. if (!cpu_map__empty(event_cpus)) {
  253. for (i = 0; i < cpu__max_cpu(); i++) {
  254. if (!cpu_map__has(event_cpus, i) ||
  255. !cpu_map__has(online_cpus, i))
  256. continue;
  257. if (cs_etm_is_etmv4(itr, i))
  258. etmv4++;
  259. else
  260. etmv3++;
  261. }
  262. } else {
  263. /* get configuration for all CPUs in the system */
  264. for (i = 0; i < cpu__max_cpu(); i++) {
  265. if (!cpu_map__has(online_cpus, i))
  266. continue;
  267. if (cs_etm_is_etmv4(itr, i))
  268. etmv4++;
  269. else
  270. etmv3++;
  271. }
  272. }
  273. cpu_map__put(online_cpus);
  274. return (CS_ETM_HEADER_SIZE +
  275. (etmv4 * CS_ETMV4_PRIV_SIZE) +
  276. (etmv3 * CS_ETMV3_PRIV_SIZE));
  277. }
  278. static const char *metadata_etmv3_ro[CS_ETM_PRIV_MAX] = {
  279. [CS_ETM_ETMCCER] = "mgmt/etmccer",
  280. [CS_ETM_ETMIDR] = "mgmt/etmidr",
  281. };
  282. static const char *metadata_etmv4_ro[CS_ETMV4_PRIV_MAX] = {
  283. [CS_ETMV4_TRCIDR0] = "trcidr/trcidr0",
  284. [CS_ETMV4_TRCIDR1] = "trcidr/trcidr1",
  285. [CS_ETMV4_TRCIDR2] = "trcidr/trcidr2",
  286. [CS_ETMV4_TRCIDR8] = "trcidr/trcidr8",
  287. [CS_ETMV4_TRCAUTHSTATUS] = "mgmt/trcauthstatus",
  288. };
  289. static bool cs_etm_is_etmv4(struct auxtrace_record *itr, int cpu)
  290. {
  291. bool ret = false;
  292. char path[PATH_MAX];
  293. int scan;
  294. unsigned int val;
  295. struct cs_etm_recording *ptr =
  296. container_of(itr, struct cs_etm_recording, itr);
  297. struct perf_pmu *cs_etm_pmu = ptr->cs_etm_pmu;
  298. /* Take any of the RO files for ETMv4 and see if it present */
  299. snprintf(path, PATH_MAX, "cpu%d/%s",
  300. cpu, metadata_etmv4_ro[CS_ETMV4_TRCIDR0]);
  301. scan = perf_pmu__scan_file(cs_etm_pmu, path, "%x", &val);
  302. /* The file was read successfully, we have a winner */
  303. if (scan == 1)
  304. ret = true;
  305. return ret;
  306. }
  307. static int cs_etm_get_ro(struct perf_pmu *pmu, int cpu, const char *path)
  308. {
  309. char pmu_path[PATH_MAX];
  310. int scan;
  311. unsigned int val = 0;
  312. /* Get RO metadata from sysfs */
  313. snprintf(pmu_path, PATH_MAX, "cpu%d/%s", cpu, path);
  314. scan = perf_pmu__scan_file(pmu, pmu_path, "%x", &val);
  315. if (scan != 1)
  316. pr_err("%s: error reading: %s\n", __func__, pmu_path);
  317. return val;
  318. }
  319. static void cs_etm_get_metadata(int cpu, u32 *offset,
  320. struct auxtrace_record *itr,
  321. struct auxtrace_info_event *info)
  322. {
  323. u32 increment;
  324. u64 magic;
  325. struct cs_etm_recording *ptr =
  326. container_of(itr, struct cs_etm_recording, itr);
  327. struct perf_pmu *cs_etm_pmu = ptr->cs_etm_pmu;
  328. /* first see what kind of tracer this cpu is affined to */
  329. if (cs_etm_is_etmv4(itr, cpu)) {
  330. magic = __perf_cs_etmv4_magic;
  331. /* Get trace configuration register */
  332. info->priv[*offset + CS_ETMV4_TRCCONFIGR] =
  333. cs_etmv4_get_config(itr);
  334. /* Get traceID from the framework */
  335. info->priv[*offset + CS_ETMV4_TRCTRACEIDR] =
  336. coresight_get_trace_id(cpu);
  337. /* Get read-only information from sysFS */
  338. info->priv[*offset + CS_ETMV4_TRCIDR0] =
  339. cs_etm_get_ro(cs_etm_pmu, cpu,
  340. metadata_etmv4_ro[CS_ETMV4_TRCIDR0]);
  341. info->priv[*offset + CS_ETMV4_TRCIDR1] =
  342. cs_etm_get_ro(cs_etm_pmu, cpu,
  343. metadata_etmv4_ro[CS_ETMV4_TRCIDR1]);
  344. info->priv[*offset + CS_ETMV4_TRCIDR2] =
  345. cs_etm_get_ro(cs_etm_pmu, cpu,
  346. metadata_etmv4_ro[CS_ETMV4_TRCIDR2]);
  347. info->priv[*offset + CS_ETMV4_TRCIDR8] =
  348. cs_etm_get_ro(cs_etm_pmu, cpu,
  349. metadata_etmv4_ro[CS_ETMV4_TRCIDR8]);
  350. info->priv[*offset + CS_ETMV4_TRCAUTHSTATUS] =
  351. cs_etm_get_ro(cs_etm_pmu, cpu,
  352. metadata_etmv4_ro
  353. [CS_ETMV4_TRCAUTHSTATUS]);
  354. /* How much space was used */
  355. increment = CS_ETMV4_PRIV_MAX;
  356. } else {
  357. magic = __perf_cs_etmv3_magic;
  358. /* Get configuration register */
  359. info->priv[*offset + CS_ETM_ETMCR] = cs_etm_get_config(itr);
  360. /* Get traceID from the framework */
  361. info->priv[*offset + CS_ETM_ETMTRACEIDR] =
  362. coresight_get_trace_id(cpu);
  363. /* Get read-only information from sysFS */
  364. info->priv[*offset + CS_ETM_ETMCCER] =
  365. cs_etm_get_ro(cs_etm_pmu, cpu,
  366. metadata_etmv3_ro[CS_ETM_ETMCCER]);
  367. info->priv[*offset + CS_ETM_ETMIDR] =
  368. cs_etm_get_ro(cs_etm_pmu, cpu,
  369. metadata_etmv3_ro[CS_ETM_ETMIDR]);
  370. /* How much space was used */
  371. increment = CS_ETM_PRIV_MAX;
  372. }
  373. /* Build generic header portion */
  374. info->priv[*offset + CS_ETM_MAGIC] = magic;
  375. info->priv[*offset + CS_ETM_CPU] = cpu;
  376. /* Where the next CPU entry should start from */
  377. *offset += increment;
  378. }
  379. static int cs_etm_info_fill(struct auxtrace_record *itr,
  380. struct perf_session *session,
  381. struct auxtrace_info_event *info,
  382. size_t priv_size)
  383. {
  384. int i;
  385. u32 offset;
  386. u64 nr_cpu, type;
  387. struct cpu_map *cpu_map;
  388. struct cpu_map *event_cpus = session->evlist->cpus;
  389. struct cpu_map *online_cpus = cpu_map__new(NULL);
  390. struct cs_etm_recording *ptr =
  391. container_of(itr, struct cs_etm_recording, itr);
  392. struct perf_pmu *cs_etm_pmu = ptr->cs_etm_pmu;
  393. if (priv_size != cs_etm_info_priv_size(itr, session->evlist))
  394. return -EINVAL;
  395. if (!session->evlist->nr_mmaps)
  396. return -EINVAL;
  397. /* If the cpu_map is empty all online CPUs are involved */
  398. if (cpu_map__empty(event_cpus)) {
  399. cpu_map = online_cpus;
  400. } else {
  401. /* Make sure all specified CPUs are online */
  402. for (i = 0; i < cpu_map__nr(event_cpus); i++) {
  403. if (cpu_map__has(event_cpus, i) &&
  404. !cpu_map__has(online_cpus, i))
  405. return -EINVAL;
  406. }
  407. cpu_map = event_cpus;
  408. }
  409. nr_cpu = cpu_map__nr(cpu_map);
  410. /* Get PMU type as dynamically assigned by the core */
  411. type = cs_etm_pmu->type;
  412. /* First fill out the session header */
  413. info->type = PERF_AUXTRACE_CS_ETM;
  414. info->priv[CS_HEADER_VERSION_0] = 0;
  415. info->priv[CS_PMU_TYPE_CPUS] = type << 32;
  416. info->priv[CS_PMU_TYPE_CPUS] |= nr_cpu;
  417. info->priv[CS_ETM_SNAPSHOT] = ptr->snapshot_mode;
  418. offset = CS_ETM_SNAPSHOT + 1;
  419. for (i = 0; i < cpu__max_cpu() && offset < priv_size; i++)
  420. if (cpu_map__has(cpu_map, i))
  421. cs_etm_get_metadata(i, &offset, itr, info);
  422. cpu_map__put(online_cpus);
  423. return 0;
  424. }
  425. static int cs_etm_alloc_wrapped_array(struct cs_etm_recording *ptr, int idx)
  426. {
  427. bool *wrapped;
  428. int cnt = ptr->wrapped_cnt;
  429. /* Make @ptr->wrapped as big as @idx */
  430. while (cnt <= idx)
  431. cnt++;
  432. /*
  433. * Free'ed in cs_etm_recording_free(). Using realloc() to avoid
  434. * cross compilation problems where the host's system supports
  435. * reallocarray() but not the target.
  436. */
  437. wrapped = realloc(ptr->wrapped, cnt * sizeof(bool));
  438. if (!wrapped)
  439. return -ENOMEM;
  440. wrapped[cnt - 1] = false;
  441. ptr->wrapped_cnt = cnt;
  442. ptr->wrapped = wrapped;
  443. return 0;
  444. }
  445. static bool cs_etm_buffer_has_wrapped(unsigned char *buffer,
  446. size_t buffer_size, u64 head)
  447. {
  448. u64 i, watermark;
  449. u64 *buf = (u64 *)buffer;
  450. size_t buf_size = buffer_size;
  451. /*
  452. * We want to look the very last 512 byte (chosen arbitrarily) in
  453. * the ring buffer.
  454. */
  455. watermark = buf_size - 512;
  456. /*
  457. * @head is continuously increasing - if its value is equal or greater
  458. * than the size of the ring buffer, it has wrapped around.
  459. */
  460. if (head >= buffer_size)
  461. return true;
  462. /*
  463. * The value of @head is somewhere within the size of the ring buffer.
  464. * This can be that there hasn't been enough data to fill the ring
  465. * buffer yet or the trace time was so long that @head has numerically
  466. * wrapped around. To find we need to check if we have data at the very
  467. * end of the ring buffer. We can reliably do this because mmap'ed
  468. * pages are zeroed out and there is a fresh mapping with every new
  469. * session.
  470. */
  471. /* @head is less than 512 byte from the end of the ring buffer */
  472. if (head > watermark)
  473. watermark = head;
  474. /*
  475. * Speed things up by using 64 bit transactions (see "u64 *buf" above)
  476. */
  477. watermark >>= 3;
  478. buf_size >>= 3;
  479. /*
  480. * If we find trace data at the end of the ring buffer, @head has
  481. * been there and has numerically wrapped around at least once.
  482. */
  483. for (i = watermark; i < buf_size; i++)
  484. if (buf[i])
  485. return true;
  486. return false;
  487. }
  488. static int cs_etm_find_snapshot(struct auxtrace_record *itr,
  489. int idx, struct auxtrace_mmap *mm,
  490. unsigned char *data,
  491. u64 *head, u64 *old)
  492. {
  493. int err;
  494. bool wrapped;
  495. struct cs_etm_recording *ptr =
  496. container_of(itr, struct cs_etm_recording, itr);
  497. /*
  498. * Allocate memory to keep track of wrapping if this is the first
  499. * time we deal with this *mm.
  500. */
  501. if (idx >= ptr->wrapped_cnt) {
  502. err = cs_etm_alloc_wrapped_array(ptr, idx);
  503. if (err)
  504. return err;
  505. }
  506. /*
  507. * Check to see if *head has wrapped around. If it hasn't only the
  508. * amount of data between *head and *old is snapshot'ed to avoid
  509. * bloating the perf.data file with zeros. But as soon as *head has
  510. * wrapped around the entire size of the AUX ring buffer it taken.
  511. */
  512. wrapped = ptr->wrapped[idx];
  513. if (!wrapped && cs_etm_buffer_has_wrapped(data, mm->len, *head)) {
  514. wrapped = true;
  515. ptr->wrapped[idx] = true;
  516. }
  517. pr_debug3("%s: mmap index %d old head %zu new head %zu size %zu\n",
  518. __func__, idx, (size_t)*old, (size_t)*head, mm->len);
  519. /* No wrap has occurred, we can just use *head and *old. */
  520. if (!wrapped)
  521. return 0;
  522. /*
  523. * *head has wrapped around - adjust *head and *old to pickup the
  524. * entire content of the AUX buffer.
  525. */
  526. if (*head >= mm->len) {
  527. *old = *head - mm->len;
  528. } else {
  529. *head += mm->len;
  530. *old = *head - mm->len;
  531. }
  532. return 0;
  533. }
  534. static int cs_etm_snapshot_start(struct auxtrace_record *itr)
  535. {
  536. struct cs_etm_recording *ptr =
  537. container_of(itr, struct cs_etm_recording, itr);
  538. struct perf_evsel *evsel;
  539. evlist__for_each_entry(ptr->evlist, evsel) {
  540. if (evsel->attr.type == ptr->cs_etm_pmu->type)
  541. return perf_evsel__disable(evsel);
  542. }
  543. return -EINVAL;
  544. }
  545. static int cs_etm_snapshot_finish(struct auxtrace_record *itr)
  546. {
  547. struct cs_etm_recording *ptr =
  548. container_of(itr, struct cs_etm_recording, itr);
  549. struct perf_evsel *evsel;
  550. evlist__for_each_entry(ptr->evlist, evsel) {
  551. if (evsel->attr.type == ptr->cs_etm_pmu->type)
  552. return perf_evsel__enable(evsel);
  553. }
  554. return -EINVAL;
  555. }
  556. static u64 cs_etm_reference(struct auxtrace_record *itr __maybe_unused)
  557. {
  558. return (((u64) rand() << 0) & 0x00000000FFFFFFFFull) |
  559. (((u64) rand() << 32) & 0xFFFFFFFF00000000ull);
  560. }
  561. static void cs_etm_recording_free(struct auxtrace_record *itr)
  562. {
  563. struct cs_etm_recording *ptr =
  564. container_of(itr, struct cs_etm_recording, itr);
  565. zfree(&ptr->wrapped);
  566. free(ptr);
  567. }
  568. static int cs_etm_read_finish(struct auxtrace_record *itr, int idx)
  569. {
  570. struct cs_etm_recording *ptr =
  571. container_of(itr, struct cs_etm_recording, itr);
  572. struct perf_evsel *evsel;
  573. evlist__for_each_entry(ptr->evlist, evsel) {
  574. if (evsel->attr.type == ptr->cs_etm_pmu->type)
  575. return perf_evlist__enable_event_idx(ptr->evlist,
  576. evsel, idx);
  577. }
  578. return -EINVAL;
  579. }
  580. struct auxtrace_record *cs_etm_record_init(int *err)
  581. {
  582. struct perf_pmu *cs_etm_pmu;
  583. struct cs_etm_recording *ptr;
  584. cs_etm_pmu = perf_pmu__find(CORESIGHT_ETM_PMU_NAME);
  585. if (!cs_etm_pmu) {
  586. *err = -EINVAL;
  587. goto out;
  588. }
  589. ptr = zalloc(sizeof(struct cs_etm_recording));
  590. if (!ptr) {
  591. *err = -ENOMEM;
  592. goto out;
  593. }
  594. ptr->cs_etm_pmu = cs_etm_pmu;
  595. ptr->itr.parse_snapshot_options = cs_etm_parse_snapshot_options;
  596. ptr->itr.recording_options = cs_etm_recording_options;
  597. ptr->itr.info_priv_size = cs_etm_info_priv_size;
  598. ptr->itr.info_fill = cs_etm_info_fill;
  599. ptr->itr.find_snapshot = cs_etm_find_snapshot;
  600. ptr->itr.snapshot_start = cs_etm_snapshot_start;
  601. ptr->itr.snapshot_finish = cs_etm_snapshot_finish;
  602. ptr->itr.reference = cs_etm_reference;
  603. ptr->itr.free = cs_etm_recording_free;
  604. ptr->itr.read_finish = cs_etm_read_finish;
  605. *err = 0;
  606. return &ptr->itr;
  607. out:
  608. return NULL;
  609. }
  610. static FILE *cs_device__open_file(const char *name)
  611. {
  612. struct stat st;
  613. char path[PATH_MAX];
  614. const char *sysfs;
  615. sysfs = sysfs__mountpoint();
  616. if (!sysfs)
  617. return NULL;
  618. snprintf(path, PATH_MAX,
  619. "%s" CS_BUS_DEVICE_PATH "%s", sysfs, name);
  620. if (stat(path, &st) < 0)
  621. return NULL;
  622. return fopen(path, "w");
  623. }
  624. static int __printf(2, 3) cs_device__print_file(const char *name, const char *fmt, ...)
  625. {
  626. va_list args;
  627. FILE *file;
  628. int ret = -EINVAL;
  629. va_start(args, fmt);
  630. file = cs_device__open_file(name);
  631. if (file) {
  632. ret = vfprintf(file, fmt, args);
  633. fclose(file);
  634. }
  635. va_end(args);
  636. return ret;
  637. }
  638. int cs_etm_set_drv_config(struct perf_evsel_config_term *term)
  639. {
  640. int ret;
  641. char enable_sink[ENABLE_SINK_MAX];
  642. snprintf(enable_sink, ENABLE_SINK_MAX, "%s/%s",
  643. term->val.drv_cfg, "enable_sink");
  644. ret = cs_device__print_file(enable_sink, "%d", 1);
  645. if (ret < 0)
  646. return ret;
  647. return 0;
  648. }