trace.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * Copyright (c) 2012 The Chromium OS Authors.
  4. */
  5. #include <common.h>
  6. #include <mapmem.h>
  7. #include <time.h>
  8. #include <trace.h>
  9. #include <asm/global_data.h>
  10. #include <asm/io.h>
  11. #include <asm/sections.h>
  12. DECLARE_GLOBAL_DATA_PTR;
  13. static char trace_enabled __section(".data");
  14. static char trace_inited __section(".data");
  15. /* The header block at the start of the trace memory area */
  16. struct trace_hdr {
  17. int func_count; /* Total number of function call sites */
  18. u64 call_count; /* Total number of tracked function calls */
  19. u64 untracked_count; /* Total number of untracked function calls */
  20. int funcs_used; /* Total number of functions used */
  21. /*
  22. * Call count for each function. This is indexed by the word offset
  23. * of the function from gd->relocaddr
  24. */
  25. uintptr_t *call_accum;
  26. /* Function trace list */
  27. struct trace_call *ftrace; /* The function call records */
  28. ulong ftrace_size; /* Num. of ftrace records we have space for */
  29. ulong ftrace_count; /* Num. of ftrace records written */
  30. ulong ftrace_too_deep_count; /* Functions that were too deep */
  31. int depth; /* Depth of function calls */
  32. int depth_limit; /* Depth limit to trace to */
  33. int max_depth; /* Maximum depth seen so far */
  34. int min_depth; /* Minimum depth seen so far */
  35. bool trace_locked; /* Used to detect recursive tracing */
  36. };
  37. /* Pointer to start of trace buffer */
  38. static struct trace_hdr *hdr __section(".data");
  39. static inline uintptr_t __attribute__((no_instrument_function))
  40. func_ptr_to_num(void *func_ptr)
  41. {
  42. uintptr_t offset = (uintptr_t)func_ptr;
  43. #ifdef CONFIG_SANDBOX
  44. offset -= (uintptr_t)&_init;
  45. #else
  46. if (gd->flags & GD_FLG_RELOC)
  47. offset -= gd->relocaddr;
  48. else
  49. offset -= CONFIG_TEXT_BASE;
  50. #endif
  51. return offset / FUNC_SITE_SIZE;
  52. }
  53. #if defined(CONFIG_EFI_LOADER) && (defined(CONFIG_ARM) || defined(CONFIG_RISCV))
  54. /**
  55. * trace_gd - the value of the gd register
  56. */
  57. static volatile gd_t *trace_gd;
  58. /**
  59. * trace_save_gd() - save the value of the gd register
  60. */
  61. static void notrace trace_save_gd(void)
  62. {
  63. trace_gd = gd;
  64. }
  65. /**
  66. * trace_swap_gd() - swap between U-Boot and application gd register value
  67. *
  68. * An UEFI application may change the value of the register that gd lives in.
  69. * But some of our functions like get_ticks() access this register. So we
  70. * have to set the gd register to the U-Boot value when entering a trace
  71. * point and set it back to the application value when exiting the trace point.
  72. */
  73. static void notrace trace_swap_gd(void)
  74. {
  75. volatile gd_t *temp_gd = trace_gd;
  76. trace_gd = gd;
  77. set_gd(temp_gd);
  78. }
  79. #else
  80. static void notrace trace_save_gd(void)
  81. {
  82. }
  83. static void notrace trace_swap_gd(void)
  84. {
  85. }
  86. #endif
  87. static void notrace add_ftrace(void *func_ptr, void *caller, ulong flags)
  88. {
  89. if (hdr->depth > hdr->depth_limit) {
  90. hdr->ftrace_too_deep_count++;
  91. return;
  92. }
  93. if (hdr->ftrace_count < hdr->ftrace_size) {
  94. struct trace_call *rec = &hdr->ftrace[hdr->ftrace_count];
  95. rec->func = func_ptr_to_num(func_ptr);
  96. rec->caller = func_ptr_to_num(caller);
  97. rec->flags = flags | (timer_get_us() & FUNCF_TIMESTAMP_MASK);
  98. }
  99. hdr->ftrace_count++;
  100. }
  101. /**
  102. * __cyg_profile_func_enter() - record function entry
  103. *
  104. * We add to our tally for this function and add to the list of called
  105. * functions.
  106. *
  107. * @func_ptr: pointer to function being entered
  108. * @caller: pointer to function which called this function
  109. */
  110. void notrace __cyg_profile_func_enter(void *func_ptr, void *caller)
  111. {
  112. if (trace_enabled) {
  113. int func;
  114. if (hdr->trace_locked) {
  115. trace_enabled = 0;
  116. puts("trace: recursion detected, disabling\n");
  117. hdr->trace_locked = false;
  118. return;
  119. }
  120. hdr->trace_locked = true;
  121. trace_swap_gd();
  122. add_ftrace(func_ptr, caller, FUNCF_ENTRY);
  123. func = func_ptr_to_num(func_ptr);
  124. if (func < hdr->func_count) {
  125. hdr->call_accum[func]++;
  126. hdr->call_count++;
  127. } else {
  128. hdr->untracked_count++;
  129. }
  130. hdr->depth++;
  131. if (hdr->depth > hdr->max_depth)
  132. hdr->max_depth = hdr->depth;
  133. trace_swap_gd();
  134. hdr->trace_locked = false;
  135. }
  136. }
  137. /**
  138. * __cyg_profile_func_exit() - record function exit
  139. *
  140. * @func_ptr: pointer to function being entered
  141. * @caller: pointer to function which called this function
  142. */
  143. void notrace __cyg_profile_func_exit(void *func_ptr, void *caller)
  144. {
  145. if (trace_enabled) {
  146. trace_swap_gd();
  147. hdr->depth--;
  148. add_ftrace(func_ptr, caller, FUNCF_EXIT);
  149. if (hdr->depth < hdr->min_depth)
  150. hdr->min_depth = hdr->depth;
  151. trace_swap_gd();
  152. }
  153. }
  154. /**
  155. * trace_list_functions() - produce a list of called functions
  156. *
  157. * The information is written into the supplied buffer - a header followed
  158. * by a list of function records.
  159. *
  160. * @buff: buffer to place list into
  161. * @buff_size: size of buffer
  162. * @needed: returns size of buffer needed, which may be
  163. * greater than buff_size if we ran out of space.
  164. * Return: 0 if ok, -ENOSPC if space was exhausted
  165. */
  166. int trace_list_functions(void *buff, size_t buff_size, size_t *needed)
  167. {
  168. struct trace_output_hdr *output_hdr = NULL;
  169. void *end, *ptr = buff;
  170. size_t func;
  171. size_t upto;
  172. end = buff ? buff + buff_size : NULL;
  173. /* Place some header information */
  174. if (ptr + sizeof(struct trace_output_hdr) < end)
  175. output_hdr = ptr;
  176. ptr += sizeof(struct trace_output_hdr);
  177. /* Add information about each function */
  178. for (func = upto = 0; func < hdr->func_count; func++) {
  179. size_t calls = hdr->call_accum[func];
  180. if (!calls)
  181. continue;
  182. if (ptr + sizeof(struct trace_output_func) < end) {
  183. struct trace_output_func *stats = ptr;
  184. stats->offset = func * FUNC_SITE_SIZE;
  185. stats->call_count = calls;
  186. upto++;
  187. }
  188. ptr += sizeof(struct trace_output_func);
  189. }
  190. /* Update the header */
  191. if (output_hdr) {
  192. output_hdr->rec_count = upto;
  193. output_hdr->type = TRACE_CHUNK_FUNCS;
  194. }
  195. /* Work out how must of the buffer we used */
  196. *needed = ptr - buff;
  197. if (ptr > end)
  198. return -ENOSPC;
  199. return 0;
  200. }
  201. /**
  202. * trace_list_functions() - produce a list of function calls
  203. *
  204. * The information is written into the supplied buffer - a header followed
  205. * by a list of function records.
  206. *
  207. * @buff: buffer to place list into
  208. * @buff_size: size of buffer
  209. * @needed: returns size of buffer needed, which may be
  210. * greater than buff_size if we ran out of space.
  211. * Return: 0 if ok, -ENOSPC if space was exhausted
  212. */
  213. int trace_list_calls(void *buff, size_t buff_size, size_t *needed)
  214. {
  215. struct trace_output_hdr *output_hdr = NULL;
  216. void *end, *ptr = buff;
  217. size_t rec, upto;
  218. size_t count;
  219. end = buff ? buff + buff_size : NULL;
  220. /* Place some header information */
  221. if (ptr + sizeof(struct trace_output_hdr) < end)
  222. output_hdr = ptr;
  223. ptr += sizeof(struct trace_output_hdr);
  224. /* Add information about each call */
  225. count = hdr->ftrace_count;
  226. if (count > hdr->ftrace_size)
  227. count = hdr->ftrace_size;
  228. for (rec = upto = 0; rec < count; rec++) {
  229. if (ptr + sizeof(struct trace_call) < end) {
  230. struct trace_call *call = &hdr->ftrace[rec];
  231. struct trace_call *out = ptr;
  232. out->func = call->func * FUNC_SITE_SIZE;
  233. out->caller = call->caller * FUNC_SITE_SIZE;
  234. out->flags = call->flags;
  235. upto++;
  236. }
  237. ptr += sizeof(struct trace_call);
  238. }
  239. /* Update the header */
  240. if (output_hdr) {
  241. memset(output_hdr, '\0', sizeof(*output_hdr));
  242. output_hdr->rec_count = upto;
  243. output_hdr->type = TRACE_CHUNK_CALLS;
  244. output_hdr->version = TRACE_VERSION;
  245. output_hdr->text_base = CONFIG_TEXT_BASE;
  246. }
  247. /* Work out how must of the buffer we used */
  248. *needed = ptr - buff;
  249. if (ptr > end)
  250. return -ENOSPC;
  251. return 0;
  252. }
  253. /**
  254. * trace_print_stats() - print basic information about tracing
  255. */
  256. void trace_print_stats(void)
  257. {
  258. ulong count;
  259. #ifndef FTRACE
  260. puts("Warning: make U-Boot with FTRACE to enable function instrumenting.\n");
  261. puts("You will likely get zeroed data here\n");
  262. #endif
  263. if (!trace_inited) {
  264. printf("Trace is disabled\n");
  265. return;
  266. }
  267. print_grouped_ull(hdr->func_count, 10);
  268. puts(" function sites\n");
  269. print_grouped_ull(hdr->call_count, 10);
  270. puts(" function calls\n");
  271. print_grouped_ull(hdr->untracked_count, 10);
  272. puts(" untracked function calls\n");
  273. count = min(hdr->ftrace_count, hdr->ftrace_size);
  274. print_grouped_ull(count, 10);
  275. puts(" traced function calls");
  276. if (hdr->ftrace_count > hdr->ftrace_size) {
  277. printf(" (%lu dropped due to overflow)",
  278. hdr->ftrace_count - hdr->ftrace_size);
  279. }
  280. /* Add in minimum depth since the trace did not start at top level */
  281. printf("\n%15d maximum observed call depth\n",
  282. hdr->max_depth - hdr->min_depth);
  283. printf("%15d call depth limit\n", hdr->depth_limit);
  284. print_grouped_ull(hdr->ftrace_too_deep_count, 10);
  285. puts(" calls not traced due to depth\n");
  286. print_grouped_ull(hdr->ftrace_size, 10);
  287. puts(" max function calls\n");
  288. printf("\ntrace buffer %lx call records %lx\n",
  289. (ulong)map_to_sysmem(hdr), (ulong)map_to_sysmem(hdr->ftrace));
  290. }
  291. void notrace trace_set_enabled(int enabled)
  292. {
  293. trace_enabled = enabled != 0;
  294. }
  295. static int get_func_count(void)
  296. {
  297. /* Detect no support for mon_len since this means tracing cannot work */
  298. if (IS_ENABLED(CONFIG_SANDBOX) && !gd->mon_len) {
  299. puts("Tracing is not supported on this board\n");
  300. return -ENOTSUPP;
  301. }
  302. return gd->mon_len / FUNC_SITE_SIZE;
  303. }
  304. /**
  305. * trace_init() - initialize the tracing system and enable it
  306. *
  307. * @buff: Pointer to trace buffer
  308. * @buff_size: Size of trace buffer
  309. * Return: 0 if ok
  310. */
  311. int notrace trace_init(void *buff, size_t buff_size)
  312. {
  313. int func_count = get_func_count();
  314. size_t needed;
  315. int was_disabled = !trace_enabled;
  316. if (func_count < 0)
  317. return func_count;
  318. trace_save_gd();
  319. if (!was_disabled) {
  320. #ifdef CONFIG_TRACE_EARLY
  321. ulong used, count;
  322. char *end;
  323. /*
  324. * Copy over the early trace data if we have it. Disable
  325. * tracing while we are doing this.
  326. */
  327. trace_enabled = 0;
  328. hdr = map_sysmem(CONFIG_TRACE_EARLY_ADDR,
  329. CONFIG_TRACE_EARLY_SIZE);
  330. count = min(hdr->ftrace_count, hdr->ftrace_size);
  331. end = (char *)&hdr->ftrace[count];
  332. used = end - (char *)hdr;
  333. printf("trace: copying %08lx bytes of early data from %x to %08lx\n",
  334. used, CONFIG_TRACE_EARLY_ADDR,
  335. (ulong)map_to_sysmem(buff));
  336. printf("%lu traced function calls", count);
  337. if (hdr->ftrace_count > hdr->ftrace_size) {
  338. printf(" (%lu dropped due to overflow)",
  339. hdr->ftrace_count - hdr->ftrace_size);
  340. hdr->ftrace_count = hdr->ftrace_size;
  341. }
  342. puts("\n");
  343. memcpy(buff, hdr, used);
  344. #else
  345. puts("trace: already enabled\n");
  346. return -EALREADY;
  347. #endif
  348. }
  349. hdr = (struct trace_hdr *)buff;
  350. needed = sizeof(*hdr) + func_count * sizeof(uintptr_t);
  351. if (needed > buff_size) {
  352. printf("trace: buffer size %zx bytes: at least %zx needed\n",
  353. buff_size, needed);
  354. return -ENOSPC;
  355. }
  356. if (was_disabled) {
  357. memset(hdr, '\0', needed);
  358. hdr->min_depth = INT_MAX;
  359. }
  360. hdr->func_count = func_count;
  361. hdr->call_accum = (uintptr_t *)(hdr + 1);
  362. /* Use any remaining space for the timed function trace */
  363. hdr->ftrace = (struct trace_call *)(buff + needed);
  364. hdr->ftrace_size = (buff_size - needed) / sizeof(*hdr->ftrace);
  365. hdr->depth_limit = CONFIG_TRACE_CALL_DEPTH_LIMIT;
  366. puts("trace: enabled\n");
  367. trace_enabled = 1;
  368. trace_inited = 1;
  369. return 0;
  370. }
  371. #ifdef CONFIG_TRACE_EARLY
  372. /**
  373. * trace_early_init() - initialize the tracing system for early tracing
  374. *
  375. * Return: 0 if ok, -ENOSPC if not enough memory is available
  376. */
  377. int notrace trace_early_init(void)
  378. {
  379. int func_count = get_func_count();
  380. size_t buff_size = CONFIG_TRACE_EARLY_SIZE;
  381. size_t needed;
  382. if (func_count < 0)
  383. return func_count;
  384. /* We can ignore additional calls to this function */
  385. if (trace_enabled)
  386. return 0;
  387. hdr = map_sysmem(CONFIG_TRACE_EARLY_ADDR, CONFIG_TRACE_EARLY_SIZE);
  388. needed = sizeof(*hdr) + func_count * sizeof(uintptr_t);
  389. if (needed > buff_size) {
  390. printf("trace: buffer size is %zx bytes, at least %zx needed\n",
  391. buff_size, needed);
  392. return -ENOSPC;
  393. }
  394. memset(hdr, '\0', needed);
  395. hdr->call_accum = (uintptr_t *)(hdr + 1);
  396. hdr->func_count = func_count;
  397. hdr->min_depth = INT_MAX;
  398. /* Use any remaining space for the timed function trace */
  399. hdr->ftrace = (struct trace_call *)((char *)hdr + needed);
  400. hdr->ftrace_size = (buff_size - needed) / sizeof(*hdr->ftrace);
  401. hdr->depth_limit = CONFIG_TRACE_EARLY_CALL_DEPTH_LIMIT;
  402. printf("trace: early enable at %08x\n", CONFIG_TRACE_EARLY_ADDR);
  403. trace_enabled = 1;
  404. return 0;
  405. }
  406. #endif