hist.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926
  1. // SPDX-License-Identifier: GPL-2.0
  2. #include <stdio.h>
  3. #include <stdlib.h>
  4. #include <linux/string.h>
  5. #include "../../util/callchain.h"
  6. #include "../../util/debug.h"
  7. #include "../../util/event.h"
  8. #include "../../util/hist.h"
  9. #include "../../util/map.h"
  10. #include "../../util/maps.h"
  11. #include "../../util/symbol.h"
  12. #include "../../util/sort.h"
  13. #include "../../util/evsel.h"
  14. #include "../../util/srcline.h"
  15. #include "../../util/string2.h"
  16. #include "../../util/thread.h"
  17. #include "../../util/block-info.h"
  18. #include <linux/ctype.h>
  19. #include <linux/zalloc.h>
  20. static size_t callchain__fprintf_left_margin(FILE *fp, int left_margin)
  21. {
  22. int i;
  23. int ret = fprintf(fp, " ");
  24. for (i = 0; i < left_margin; i++)
  25. ret += fprintf(fp, " ");
  26. return ret;
  27. }
  28. static size_t ipchain__fprintf_graph_line(FILE *fp, int depth, int depth_mask,
  29. int left_margin)
  30. {
  31. int i;
  32. size_t ret = callchain__fprintf_left_margin(fp, left_margin);
  33. for (i = 0; i < depth; i++)
  34. if (depth_mask & (1 << i))
  35. ret += fprintf(fp, "| ");
  36. else
  37. ret += fprintf(fp, " ");
  38. ret += fprintf(fp, "\n");
  39. return ret;
  40. }
  41. static size_t ipchain__fprintf_graph(FILE *fp, struct callchain_node *node,
  42. struct callchain_list *chain,
  43. int depth, int depth_mask, int period,
  44. u64 total_samples, int left_margin)
  45. {
  46. int i;
  47. size_t ret = 0;
  48. char bf[1024], *alloc_str = NULL;
  49. char buf[64];
  50. const char *str;
  51. ret += callchain__fprintf_left_margin(fp, left_margin);
  52. for (i = 0; i < depth; i++) {
  53. if (depth_mask & (1 << i))
  54. ret += fprintf(fp, "|");
  55. else
  56. ret += fprintf(fp, " ");
  57. if (!period && i == depth - 1) {
  58. ret += fprintf(fp, "--");
  59. ret += callchain_node__fprintf_value(node, fp, total_samples);
  60. ret += fprintf(fp, "--");
  61. } else
  62. ret += fprintf(fp, "%s", " ");
  63. }
  64. str = callchain_list__sym_name(chain, bf, sizeof(bf), false);
  65. if (symbol_conf.show_branchflag_count) {
  66. callchain_list_counts__printf_value(chain, NULL,
  67. buf, sizeof(buf));
  68. if (asprintf(&alloc_str, "%s%s", str, buf) < 0)
  69. str = "Not enough memory!";
  70. else
  71. str = alloc_str;
  72. }
  73. fputs(str, fp);
  74. fputc('\n', fp);
  75. free(alloc_str);
  76. return ret;
  77. }
  78. static struct symbol *rem_sq_bracket;
  79. static struct callchain_list rem_hits;
  80. static void init_rem_hits(void)
  81. {
  82. rem_sq_bracket = malloc(sizeof(*rem_sq_bracket) + 6);
  83. if (!rem_sq_bracket) {
  84. fprintf(stderr, "Not enough memory to display remaining hits\n");
  85. return;
  86. }
  87. strcpy(rem_sq_bracket->name, "[...]");
  88. rem_hits.ms.sym = rem_sq_bracket;
  89. }
  90. static size_t __callchain__fprintf_graph(FILE *fp, struct rb_root *root,
  91. u64 total_samples, int depth,
  92. int depth_mask, int left_margin)
  93. {
  94. struct rb_node *node, *next;
  95. struct callchain_node *child = NULL;
  96. struct callchain_list *chain;
  97. int new_depth_mask = depth_mask;
  98. u64 remaining;
  99. size_t ret = 0;
  100. int i;
  101. uint entries_printed = 0;
  102. int cumul_count = 0;
  103. remaining = total_samples;
  104. node = rb_first(root);
  105. while (node) {
  106. u64 new_total;
  107. u64 cumul;
  108. child = rb_entry(node, struct callchain_node, rb_node);
  109. cumul = callchain_cumul_hits(child);
  110. remaining -= cumul;
  111. cumul_count += callchain_cumul_counts(child);
  112. /*
  113. * The depth mask manages the output of pipes that show
  114. * the depth. We don't want to keep the pipes of the current
  115. * level for the last child of this depth.
  116. * Except if we have remaining filtered hits. They will
  117. * supersede the last child
  118. */
  119. next = rb_next(node);
  120. if (!next && (callchain_param.mode != CHAIN_GRAPH_REL || !remaining))
  121. new_depth_mask &= ~(1 << (depth - 1));
  122. /*
  123. * But we keep the older depth mask for the line separator
  124. * to keep the level link until we reach the last child
  125. */
  126. ret += ipchain__fprintf_graph_line(fp, depth, depth_mask,
  127. left_margin);
  128. i = 0;
  129. list_for_each_entry(chain, &child->val, list) {
  130. ret += ipchain__fprintf_graph(fp, child, chain, depth,
  131. new_depth_mask, i++,
  132. total_samples,
  133. left_margin);
  134. }
  135. if (callchain_param.mode == CHAIN_GRAPH_REL)
  136. new_total = child->children_hit;
  137. else
  138. new_total = total_samples;
  139. ret += __callchain__fprintf_graph(fp, &child->rb_root, new_total,
  140. depth + 1,
  141. new_depth_mask | (1 << depth),
  142. left_margin);
  143. node = next;
  144. if (++entries_printed == callchain_param.print_limit)
  145. break;
  146. }
  147. if (callchain_param.mode == CHAIN_GRAPH_REL &&
  148. remaining && remaining != total_samples) {
  149. struct callchain_node rem_node = {
  150. .hit = remaining,
  151. };
  152. if (!rem_sq_bracket)
  153. return ret;
  154. if (callchain_param.value == CCVAL_COUNT && child && child->parent) {
  155. rem_node.count = child->parent->children_count - cumul_count;
  156. if (rem_node.count <= 0)
  157. return ret;
  158. }
  159. new_depth_mask &= ~(1 << (depth - 1));
  160. ret += ipchain__fprintf_graph(fp, &rem_node, &rem_hits, depth,
  161. new_depth_mask, 0, total_samples,
  162. left_margin);
  163. }
  164. return ret;
  165. }
  166. /*
  167. * If have one single callchain root, don't bother printing
  168. * its percentage (100 % in fractal mode and the same percentage
  169. * than the hist in graph mode). This also avoid one level of column.
  170. *
  171. * However when percent-limit applied, it's possible that single callchain
  172. * node have different (non-100% in fractal mode) percentage.
  173. */
  174. static bool need_percent_display(struct rb_node *node, u64 parent_samples)
  175. {
  176. struct callchain_node *cnode;
  177. if (rb_next(node))
  178. return true;
  179. cnode = rb_entry(node, struct callchain_node, rb_node);
  180. return callchain_cumul_hits(cnode) != parent_samples;
  181. }
  182. static size_t callchain__fprintf_graph(FILE *fp, struct rb_root *root,
  183. u64 total_samples, u64 parent_samples,
  184. int left_margin)
  185. {
  186. struct callchain_node *cnode;
  187. struct callchain_list *chain;
  188. u32 entries_printed = 0;
  189. bool printed = false;
  190. struct rb_node *node;
  191. int i = 0;
  192. int ret = 0;
  193. char bf[1024];
  194. node = rb_first(root);
  195. if (node && !need_percent_display(node, parent_samples)) {
  196. cnode = rb_entry(node, struct callchain_node, rb_node);
  197. list_for_each_entry(chain, &cnode->val, list) {
  198. /*
  199. * If we sort by symbol, the first entry is the same than
  200. * the symbol. No need to print it otherwise it appears as
  201. * displayed twice.
  202. */
  203. if (!i++ && field_order == NULL &&
  204. sort_order && strstarts(sort_order, "sym"))
  205. continue;
  206. if (!printed) {
  207. ret += callchain__fprintf_left_margin(fp, left_margin);
  208. ret += fprintf(fp, "|\n");
  209. ret += callchain__fprintf_left_margin(fp, left_margin);
  210. ret += fprintf(fp, "---");
  211. left_margin += 3;
  212. printed = true;
  213. } else
  214. ret += callchain__fprintf_left_margin(fp, left_margin);
  215. ret += fprintf(fp, "%s",
  216. callchain_list__sym_name(chain, bf,
  217. sizeof(bf),
  218. false));
  219. if (symbol_conf.show_branchflag_count)
  220. ret += callchain_list_counts__printf_value(
  221. chain, fp, NULL, 0);
  222. ret += fprintf(fp, "\n");
  223. if (++entries_printed == callchain_param.print_limit)
  224. break;
  225. }
  226. root = &cnode->rb_root;
  227. }
  228. if (callchain_param.mode == CHAIN_GRAPH_REL)
  229. total_samples = parent_samples;
  230. ret += __callchain__fprintf_graph(fp, root, total_samples,
  231. 1, 1, left_margin);
  232. if (ret) {
  233. /* do not add a blank line if it printed nothing */
  234. ret += fprintf(fp, "\n");
  235. }
  236. return ret;
  237. }
  238. static size_t __callchain__fprintf_flat(FILE *fp, struct callchain_node *node,
  239. u64 total_samples)
  240. {
  241. struct callchain_list *chain;
  242. size_t ret = 0;
  243. char bf[1024];
  244. if (!node)
  245. return 0;
  246. ret += __callchain__fprintf_flat(fp, node->parent, total_samples);
  247. list_for_each_entry(chain, &node->val, list) {
  248. if (chain->ip >= PERF_CONTEXT_MAX)
  249. continue;
  250. ret += fprintf(fp, " %s\n", callchain_list__sym_name(chain,
  251. bf, sizeof(bf), false));
  252. }
  253. return ret;
  254. }
  255. static size_t callchain__fprintf_flat(FILE *fp, struct rb_root *tree,
  256. u64 total_samples)
  257. {
  258. size_t ret = 0;
  259. u32 entries_printed = 0;
  260. struct callchain_node *chain;
  261. struct rb_node *rb_node = rb_first(tree);
  262. while (rb_node) {
  263. chain = rb_entry(rb_node, struct callchain_node, rb_node);
  264. ret += fprintf(fp, " ");
  265. ret += callchain_node__fprintf_value(chain, fp, total_samples);
  266. ret += fprintf(fp, "\n");
  267. ret += __callchain__fprintf_flat(fp, chain, total_samples);
  268. ret += fprintf(fp, "\n");
  269. if (++entries_printed == callchain_param.print_limit)
  270. break;
  271. rb_node = rb_next(rb_node);
  272. }
  273. return ret;
  274. }
  275. static size_t __callchain__fprintf_folded(FILE *fp, struct callchain_node *node)
  276. {
  277. const char *sep = symbol_conf.field_sep ?: ";";
  278. struct callchain_list *chain;
  279. size_t ret = 0;
  280. char bf[1024];
  281. bool first;
  282. if (!node)
  283. return 0;
  284. ret += __callchain__fprintf_folded(fp, node->parent);
  285. first = (ret == 0);
  286. list_for_each_entry(chain, &node->val, list) {
  287. if (chain->ip >= PERF_CONTEXT_MAX)
  288. continue;
  289. ret += fprintf(fp, "%s%s", first ? "" : sep,
  290. callchain_list__sym_name(chain,
  291. bf, sizeof(bf), false));
  292. first = false;
  293. }
  294. return ret;
  295. }
  296. static size_t callchain__fprintf_folded(FILE *fp, struct rb_root *tree,
  297. u64 total_samples)
  298. {
  299. size_t ret = 0;
  300. u32 entries_printed = 0;
  301. struct callchain_node *chain;
  302. struct rb_node *rb_node = rb_first(tree);
  303. while (rb_node) {
  304. chain = rb_entry(rb_node, struct callchain_node, rb_node);
  305. ret += callchain_node__fprintf_value(chain, fp, total_samples);
  306. ret += fprintf(fp, " ");
  307. ret += __callchain__fprintf_folded(fp, chain);
  308. ret += fprintf(fp, "\n");
  309. if (++entries_printed == callchain_param.print_limit)
  310. break;
  311. rb_node = rb_next(rb_node);
  312. }
  313. return ret;
  314. }
  315. static size_t hist_entry_callchain__fprintf(struct hist_entry *he,
  316. u64 total_samples, int left_margin,
  317. FILE *fp)
  318. {
  319. u64 parent_samples = he->stat.period;
  320. if (symbol_conf.cumulate_callchain)
  321. parent_samples = he->stat_acc->period;
  322. switch (callchain_param.mode) {
  323. case CHAIN_GRAPH_REL:
  324. return callchain__fprintf_graph(fp, &he->sorted_chain, total_samples,
  325. parent_samples, left_margin);
  326. break;
  327. case CHAIN_GRAPH_ABS:
  328. return callchain__fprintf_graph(fp, &he->sorted_chain, total_samples,
  329. parent_samples, left_margin);
  330. break;
  331. case CHAIN_FLAT:
  332. return callchain__fprintf_flat(fp, &he->sorted_chain, total_samples);
  333. break;
  334. case CHAIN_FOLDED:
  335. return callchain__fprintf_folded(fp, &he->sorted_chain, total_samples);
  336. break;
  337. case CHAIN_NONE:
  338. break;
  339. default:
  340. pr_err("Bad callchain mode\n");
  341. }
  342. return 0;
  343. }
  344. int __hist_entry__snprintf(struct hist_entry *he, struct perf_hpp *hpp,
  345. struct perf_hpp_list *hpp_list)
  346. {
  347. const char *sep = symbol_conf.field_sep;
  348. struct perf_hpp_fmt *fmt;
  349. char *start = hpp->buf;
  350. int ret;
  351. bool first = true;
  352. if (symbol_conf.exclude_other && !he->parent)
  353. return 0;
  354. perf_hpp_list__for_each_format(hpp_list, fmt) {
  355. if (perf_hpp__should_skip(fmt, he->hists))
  356. continue;
  357. /*
  358. * If there's no field_sep, we still need
  359. * to display initial ' '.
  360. */
  361. if (!sep || !first) {
  362. ret = scnprintf(hpp->buf, hpp->size, "%s", sep ?: " ");
  363. advance_hpp(hpp, ret);
  364. } else
  365. first = false;
  366. if (perf_hpp__use_color() && fmt->color)
  367. ret = fmt->color(fmt, hpp, he);
  368. else
  369. ret = fmt->entry(fmt, hpp, he);
  370. ret = hist_entry__snprintf_alignment(he, hpp, fmt, ret);
  371. advance_hpp(hpp, ret);
  372. }
  373. return hpp->buf - start;
  374. }
  375. static int hist_entry__snprintf(struct hist_entry *he, struct perf_hpp *hpp)
  376. {
  377. return __hist_entry__snprintf(he, hpp, he->hists->hpp_list);
  378. }
  379. static int hist_entry__hierarchy_fprintf(struct hist_entry *he,
  380. struct perf_hpp *hpp,
  381. struct hists *hists,
  382. FILE *fp)
  383. {
  384. const char *sep = symbol_conf.field_sep;
  385. struct perf_hpp_fmt *fmt;
  386. struct perf_hpp_list_node *fmt_node;
  387. char *buf = hpp->buf;
  388. size_t size = hpp->size;
  389. int ret, printed = 0;
  390. bool first = true;
  391. if (symbol_conf.exclude_other && !he->parent)
  392. return 0;
  393. ret = scnprintf(hpp->buf, hpp->size, "%*s", he->depth * HIERARCHY_INDENT, "");
  394. advance_hpp(hpp, ret);
  395. /* the first hpp_list_node is for overhead columns */
  396. fmt_node = list_first_entry(&hists->hpp_formats,
  397. struct perf_hpp_list_node, list);
  398. perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
  399. /*
  400. * If there's no field_sep, we still need
  401. * to display initial ' '.
  402. */
  403. if (!sep || !first) {
  404. ret = scnprintf(hpp->buf, hpp->size, "%s", sep ?: " ");
  405. advance_hpp(hpp, ret);
  406. } else
  407. first = false;
  408. if (perf_hpp__use_color() && fmt->color)
  409. ret = fmt->color(fmt, hpp, he);
  410. else
  411. ret = fmt->entry(fmt, hpp, he);
  412. ret = hist_entry__snprintf_alignment(he, hpp, fmt, ret);
  413. advance_hpp(hpp, ret);
  414. }
  415. if (!sep)
  416. ret = scnprintf(hpp->buf, hpp->size, "%*s",
  417. (hists->nr_hpp_node - 2) * HIERARCHY_INDENT, "");
  418. advance_hpp(hpp, ret);
  419. printed += fprintf(fp, "%s", buf);
  420. perf_hpp_list__for_each_format(he->hpp_list, fmt) {
  421. hpp->buf = buf;
  422. hpp->size = size;
  423. /*
  424. * No need to call hist_entry__snprintf_alignment() since this
  425. * fmt is always the last column in the hierarchy mode.
  426. */
  427. if (perf_hpp__use_color() && fmt->color)
  428. fmt->color(fmt, hpp, he);
  429. else
  430. fmt->entry(fmt, hpp, he);
  431. /*
  432. * dynamic entries are right-aligned but we want left-aligned
  433. * in the hierarchy mode
  434. */
  435. printed += fprintf(fp, "%s%s", sep ?: " ", skip_spaces(buf));
  436. }
  437. printed += putc('\n', fp);
  438. if (he->leaf && hist_entry__has_callchains(he) && symbol_conf.use_callchain) {
  439. u64 total = hists__total_period(hists);
  440. printed += hist_entry_callchain__fprintf(he, total, 0, fp);
  441. goto out;
  442. }
  443. out:
  444. return printed;
  445. }
  446. static int hist_entry__block_fprintf(struct hist_entry *he,
  447. char *bf, size_t size,
  448. FILE *fp)
  449. {
  450. struct block_hist *bh = container_of(he, struct block_hist, he);
  451. int ret = 0;
  452. for (unsigned int i = 0; i < bh->block_hists.nr_entries; i++) {
  453. struct perf_hpp hpp = {
  454. .buf = bf,
  455. .size = size,
  456. .skip = false,
  457. };
  458. bh->block_idx = i;
  459. hist_entry__snprintf(he, &hpp);
  460. if (!hpp.skip)
  461. ret += fprintf(fp, "%s\n", bf);
  462. }
  463. return ret;
  464. }
  465. static int hist_entry__individual_block_fprintf(struct hist_entry *he,
  466. char *bf, size_t size,
  467. FILE *fp)
  468. {
  469. int ret = 0;
  470. struct perf_hpp hpp = {
  471. .buf = bf,
  472. .size = size,
  473. .skip = false,
  474. };
  475. hist_entry__snprintf(he, &hpp);
  476. if (!hpp.skip)
  477. ret += fprintf(fp, "%s\n", bf);
  478. return ret;
  479. }
  480. static int hist_entry__fprintf(struct hist_entry *he, size_t size,
  481. char *bf, size_t bfsz, FILE *fp,
  482. bool ignore_callchains)
  483. {
  484. int ret;
  485. int callchain_ret = 0;
  486. struct perf_hpp hpp = {
  487. .buf = bf,
  488. .size = size,
  489. };
  490. struct hists *hists = he->hists;
  491. u64 total_period = hists->stats.total_period;
  492. if (size == 0 || size > bfsz)
  493. size = hpp.size = bfsz;
  494. if (symbol_conf.report_hierarchy)
  495. return hist_entry__hierarchy_fprintf(he, &hpp, hists, fp);
  496. if (symbol_conf.report_block)
  497. return hist_entry__block_fprintf(he, bf, size, fp);
  498. if (symbol_conf.report_individual_block)
  499. return hist_entry__individual_block_fprintf(he, bf, size, fp);
  500. hist_entry__snprintf(he, &hpp);
  501. ret = fprintf(fp, "%s\n", bf);
  502. if (hist_entry__has_callchains(he) && !ignore_callchains)
  503. callchain_ret = hist_entry_callchain__fprintf(he, total_period,
  504. 0, fp);
  505. ret += callchain_ret;
  506. return ret;
  507. }
  508. static int print_hierarchy_indent(const char *sep, int indent,
  509. const char *line, FILE *fp)
  510. {
  511. int width;
  512. if (sep != NULL || indent < 2)
  513. return 0;
  514. width = (indent - 2) * HIERARCHY_INDENT;
  515. return fprintf(fp, "%-*.*s", width, width, line);
  516. }
  517. static int hists__fprintf_hierarchy_headers(struct hists *hists,
  518. struct perf_hpp *hpp, FILE *fp)
  519. {
  520. bool first_node, first_col;
  521. int indent;
  522. int depth;
  523. unsigned width = 0;
  524. unsigned header_width = 0;
  525. struct perf_hpp_fmt *fmt;
  526. struct perf_hpp_list_node *fmt_node;
  527. const char *sep = symbol_conf.field_sep;
  528. indent = hists->nr_hpp_node;
  529. /* preserve max indent depth for column headers */
  530. print_hierarchy_indent(sep, indent, " ", fp);
  531. /* the first hpp_list_node is for overhead columns */
  532. fmt_node = list_first_entry(&hists->hpp_formats,
  533. struct perf_hpp_list_node, list);
  534. perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
  535. fmt->header(fmt, hpp, hists, 0, NULL);
  536. fprintf(fp, "%s%s", hpp->buf, sep ?: " ");
  537. }
  538. /* combine sort headers with ' / ' */
  539. first_node = true;
  540. list_for_each_entry_continue(fmt_node, &hists->hpp_formats, list) {
  541. if (!first_node)
  542. header_width += fprintf(fp, " / ");
  543. first_node = false;
  544. first_col = true;
  545. perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
  546. if (perf_hpp__should_skip(fmt, hists))
  547. continue;
  548. if (!first_col)
  549. header_width += fprintf(fp, "+");
  550. first_col = false;
  551. fmt->header(fmt, hpp, hists, 0, NULL);
  552. header_width += fprintf(fp, "%s", strim(hpp->buf));
  553. }
  554. }
  555. fprintf(fp, "\n# ");
  556. /* preserve max indent depth for initial dots */
  557. print_hierarchy_indent(sep, indent, dots, fp);
  558. /* the first hpp_list_node is for overhead columns */
  559. fmt_node = list_first_entry(&hists->hpp_formats,
  560. struct perf_hpp_list_node, list);
  561. first_col = true;
  562. perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
  563. if (!first_col)
  564. fprintf(fp, "%s", sep ?: "..");
  565. first_col = false;
  566. width = fmt->width(fmt, hpp, hists);
  567. fprintf(fp, "%.*s", width, dots);
  568. }
  569. depth = 0;
  570. list_for_each_entry_continue(fmt_node, &hists->hpp_formats, list) {
  571. first_col = true;
  572. width = depth * HIERARCHY_INDENT;
  573. perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
  574. if (perf_hpp__should_skip(fmt, hists))
  575. continue;
  576. if (!first_col)
  577. width++; /* for '+' sign between column header */
  578. first_col = false;
  579. width += fmt->width(fmt, hpp, hists);
  580. }
  581. if (width > header_width)
  582. header_width = width;
  583. depth++;
  584. }
  585. fprintf(fp, "%s%-.*s", sep ?: " ", header_width, dots);
  586. fprintf(fp, "\n#\n");
  587. return 2;
  588. }
  589. static void fprintf_line(struct hists *hists, struct perf_hpp *hpp,
  590. int line, FILE *fp)
  591. {
  592. struct perf_hpp_fmt *fmt;
  593. const char *sep = symbol_conf.field_sep;
  594. bool first = true;
  595. int span = 0;
  596. hists__for_each_format(hists, fmt) {
  597. if (perf_hpp__should_skip(fmt, hists))
  598. continue;
  599. if (!first && !span)
  600. fprintf(fp, "%s", sep ?: " ");
  601. else
  602. first = false;
  603. fmt->header(fmt, hpp, hists, line, &span);
  604. if (!span)
  605. fprintf(fp, "%s", hpp->buf);
  606. }
  607. }
  608. static int
  609. hists__fprintf_standard_headers(struct hists *hists,
  610. struct perf_hpp *hpp,
  611. FILE *fp)
  612. {
  613. struct perf_hpp_list *hpp_list = hists->hpp_list;
  614. struct perf_hpp_fmt *fmt;
  615. unsigned int width;
  616. const char *sep = symbol_conf.field_sep;
  617. bool first = true;
  618. int line;
  619. for (line = 0; line < hpp_list->nr_header_lines; line++) {
  620. /* first # is displayed one level up */
  621. if (line)
  622. fprintf(fp, "# ");
  623. fprintf_line(hists, hpp, line, fp);
  624. fprintf(fp, "\n");
  625. }
  626. if (sep)
  627. return hpp_list->nr_header_lines;
  628. first = true;
  629. fprintf(fp, "# ");
  630. hists__for_each_format(hists, fmt) {
  631. unsigned int i;
  632. if (perf_hpp__should_skip(fmt, hists))
  633. continue;
  634. if (!first)
  635. fprintf(fp, "%s", sep ?: " ");
  636. else
  637. first = false;
  638. width = fmt->width(fmt, hpp, hists);
  639. for (i = 0; i < width; i++)
  640. fprintf(fp, ".");
  641. }
  642. fprintf(fp, "\n");
  643. fprintf(fp, "#\n");
  644. return hpp_list->nr_header_lines + 2;
  645. }
  646. int hists__fprintf_headers(struct hists *hists, FILE *fp)
  647. {
  648. char bf[1024];
  649. struct perf_hpp dummy_hpp = {
  650. .buf = bf,
  651. .size = sizeof(bf),
  652. };
  653. fprintf(fp, "# ");
  654. if (symbol_conf.report_hierarchy)
  655. return hists__fprintf_hierarchy_headers(hists, &dummy_hpp, fp);
  656. else
  657. return hists__fprintf_standard_headers(hists, &dummy_hpp, fp);
  658. }
  659. size_t hists__fprintf(struct hists *hists, bool show_header, int max_rows,
  660. int max_cols, float min_pcnt, FILE *fp,
  661. bool ignore_callchains)
  662. {
  663. struct rb_node *nd;
  664. size_t ret = 0;
  665. const char *sep = symbol_conf.field_sep;
  666. int nr_rows = 0;
  667. size_t linesz;
  668. char *line = NULL;
  669. unsigned indent;
  670. init_rem_hits();
  671. hists__reset_column_width(hists);
  672. if (symbol_conf.col_width_list_str)
  673. perf_hpp__set_user_width(symbol_conf.col_width_list_str);
  674. if (show_header)
  675. nr_rows += hists__fprintf_headers(hists, fp);
  676. if (max_rows && nr_rows >= max_rows)
  677. goto out;
  678. linesz = hists__sort_list_width(hists) + 3 + 1;
  679. linesz += perf_hpp__color_overhead();
  680. line = malloc(linesz);
  681. if (line == NULL) {
  682. ret = -1;
  683. goto out;
  684. }
  685. indent = hists__overhead_width(hists) + 4;
  686. for (nd = rb_first_cached(&hists->entries); nd;
  687. nd = __rb_hierarchy_next(nd, HMD_FORCE_CHILD)) {
  688. struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
  689. float percent;
  690. if (h->filtered)
  691. continue;
  692. if (symbol_conf.report_individual_block)
  693. percent = block_info__total_cycles_percent(h);
  694. else
  695. percent = hist_entry__get_percent_limit(h);
  696. if (percent < min_pcnt)
  697. continue;
  698. ret += hist_entry__fprintf(h, max_cols, line, linesz, fp, ignore_callchains);
  699. if (max_rows && ++nr_rows >= max_rows)
  700. break;
  701. /*
  702. * If all children are filtered out or percent-limited,
  703. * display "no entry >= x.xx%" message.
  704. */
  705. if (!h->leaf && !hist_entry__has_hierarchy_children(h, min_pcnt)) {
  706. int depth = hists->nr_hpp_node + h->depth + 1;
  707. print_hierarchy_indent(sep, depth, " ", fp);
  708. fprintf(fp, "%*sno entry >= %.2f%%\n", indent, "", min_pcnt);
  709. if (max_rows && ++nr_rows >= max_rows)
  710. break;
  711. }
  712. if (h->ms.map == NULL && verbose > 1) {
  713. maps__fprintf(thread__maps(h->thread), fp);
  714. fprintf(fp, "%.10s end\n", graph_dotted_line);
  715. }
  716. }
  717. free(line);
  718. out:
  719. zfree(&rem_sq_bracket);
  720. return ret;
  721. }
  722. size_t events_stats__fprintf(struct events_stats *stats, FILE *fp)
  723. {
  724. int i;
  725. size_t ret = 0;
  726. u32 total = stats->nr_events[0];
  727. for (i = 0; i < PERF_RECORD_HEADER_MAX; ++i) {
  728. const char *name;
  729. name = perf_event__name(i);
  730. if (!strcmp(name, "UNKNOWN"))
  731. continue;
  732. if (symbol_conf.skip_empty && !stats->nr_events[i])
  733. continue;
  734. if (i && total) {
  735. ret += fprintf(fp, "%20s events: %10d (%4.1f%%)\n",
  736. name, stats->nr_events[i],
  737. 100.0 * stats->nr_events[i] / total);
  738. } else {
  739. ret += fprintf(fp, "%20s events: %10d\n",
  740. name, stats->nr_events[i]);
  741. }
  742. }
  743. return ret;
  744. }