hist.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948
  1. // SPDX-License-Identifier: GPL-2.0
  2. #include <inttypes.h>
  3. #include <math.h>
  4. #include <stdlib.h>
  5. #include <string.h>
  6. #include <linux/compiler.h>
  7. #include "../util/callchain.h"
  8. #include "../util/debug.h"
  9. #include "../util/hist.h"
  10. #include "../util/sort.h"
  11. #include "../util/evsel.h"
  12. #include "../util/evlist.h"
  13. #include "../util/thread.h"
  14. #include "../util/util.h"
  15. /* hist period print (hpp) functions */
  16. #define hpp__call_print_fn(hpp, fn, fmt, ...) \
  17. ({ \
  18. int __ret = fn(hpp, fmt, ##__VA_ARGS__); \
  19. advance_hpp(hpp, __ret); \
  20. __ret; \
  21. })
  22. static int __hpp__fmt_print(struct perf_hpp *hpp, struct hists *hists, u64 val,
  23. int nr_samples, const char *fmt, int len,
  24. hpp_snprint_fn print_fn, enum perf_hpp_fmt_type fmtype)
  25. {
  26. if (fmtype == PERF_HPP_FMT_TYPE__PERCENT) {
  27. double percent = 0.0;
  28. u64 total = hists__total_period(hists);
  29. if (total)
  30. percent = 100.0 * val / total;
  31. return hpp__call_print_fn(hpp, print_fn, fmt, len, percent);
  32. }
  33. if (fmtype == PERF_HPP_FMT_TYPE__AVERAGE) {
  34. double avg = nr_samples ? (1.0 * val / nr_samples) : 0;
  35. return hpp__call_print_fn(hpp, print_fn, fmt, len, avg);
  36. }
  37. return hpp__call_print_fn(hpp, print_fn, fmt, len, val);
  38. }
  39. struct hpp_fmt_value {
  40. struct hists *hists;
  41. u64 val;
  42. int samples;
  43. };
  44. static int __hpp__fmt(struct perf_hpp *hpp, struct hist_entry *he,
  45. hpp_field_fn get_field, const char *fmt, int len,
  46. hpp_snprint_fn print_fn, enum perf_hpp_fmt_type fmtype)
  47. {
  48. int ret = 0;
  49. struct hists *hists = he->hists;
  50. struct evsel *evsel = hists_to_evsel(hists);
  51. struct evsel *pos;
  52. char *buf = hpp->buf;
  53. size_t size = hpp->size;
  54. int i = 0, nr_members = 1;
  55. struct hpp_fmt_value *values;
  56. if (evsel__is_group_event(evsel))
  57. nr_members = evsel->core.nr_members;
  58. values = calloc(nr_members, sizeof(*values));
  59. if (values == NULL)
  60. return 0;
  61. values[0].hists = evsel__hists(evsel);
  62. values[0].val = get_field(he);
  63. values[0].samples = he->stat.nr_events;
  64. if (evsel__is_group_event(evsel)) {
  65. struct hist_entry *pair;
  66. for_each_group_member(pos, evsel)
  67. values[++i].hists = evsel__hists(pos);
  68. list_for_each_entry(pair, &he->pairs.head, pairs.node) {
  69. for (i = 0; i < nr_members; i++) {
  70. if (values[i].hists != pair->hists)
  71. continue;
  72. values[i].val = get_field(pair);
  73. values[i].samples = pair->stat.nr_events;
  74. break;
  75. }
  76. }
  77. }
  78. for (i = 0; i < nr_members; i++) {
  79. if (symbol_conf.skip_empty &&
  80. values[i].hists->stats.nr_samples == 0)
  81. continue;
  82. ret += __hpp__fmt_print(hpp, values[i].hists, values[i].val,
  83. values[i].samples, fmt, len,
  84. print_fn, fmtype);
  85. }
  86. free(values);
  87. /*
  88. * Restore original buf and size as it's where caller expects
  89. * the result will be saved.
  90. */
  91. hpp->buf = buf;
  92. hpp->size = size;
  93. return ret;
  94. }
  95. int hpp__fmt(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
  96. struct hist_entry *he, hpp_field_fn get_field,
  97. const char *fmtstr, hpp_snprint_fn print_fn,
  98. enum perf_hpp_fmt_type fmtype)
  99. {
  100. int len = fmt->user_len ?: fmt->len;
  101. if (symbol_conf.field_sep) {
  102. return __hpp__fmt(hpp, he, get_field, fmtstr, 1,
  103. print_fn, fmtype);
  104. }
  105. if (fmtype == PERF_HPP_FMT_TYPE__PERCENT)
  106. len -= 2; /* 2 for a space and a % sign */
  107. else
  108. len -= 1;
  109. return __hpp__fmt(hpp, he, get_field, fmtstr, len, print_fn, fmtype);
  110. }
  111. int hpp__fmt_acc(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
  112. struct hist_entry *he, hpp_field_fn get_field,
  113. const char *fmtstr, hpp_snprint_fn print_fn,
  114. enum perf_hpp_fmt_type fmtype)
  115. {
  116. if (!symbol_conf.cumulate_callchain) {
  117. int len = fmt->user_len ?: fmt->len;
  118. return snprintf(hpp->buf, hpp->size, " %*s", len - 1, "N/A");
  119. }
  120. return hpp__fmt(fmt, hpp, he, get_field, fmtstr, print_fn, fmtype);
  121. }
  122. static int field_cmp(u64 field_a, u64 field_b)
  123. {
  124. if (field_a > field_b)
  125. return 1;
  126. if (field_a < field_b)
  127. return -1;
  128. return 0;
  129. }
  130. static int hist_entry__new_pair(struct hist_entry *a, struct hist_entry *b,
  131. hpp_field_fn get_field, int nr_members,
  132. u64 **fields_a, u64 **fields_b)
  133. {
  134. u64 *fa = calloc(nr_members, sizeof(*fa)),
  135. *fb = calloc(nr_members, sizeof(*fb));
  136. struct hist_entry *pair;
  137. if (!fa || !fb)
  138. goto out_free;
  139. list_for_each_entry(pair, &a->pairs.head, pairs.node) {
  140. struct evsel *evsel = hists_to_evsel(pair->hists);
  141. fa[evsel__group_idx(evsel)] = get_field(pair);
  142. }
  143. list_for_each_entry(pair, &b->pairs.head, pairs.node) {
  144. struct evsel *evsel = hists_to_evsel(pair->hists);
  145. fb[evsel__group_idx(evsel)] = get_field(pair);
  146. }
  147. *fields_a = fa;
  148. *fields_b = fb;
  149. return 0;
  150. out_free:
  151. free(fa);
  152. free(fb);
  153. *fields_a = *fields_b = NULL;
  154. return -1;
  155. }
  156. static int __hpp__group_sort_idx(struct hist_entry *a, struct hist_entry *b,
  157. hpp_field_fn get_field, int idx)
  158. {
  159. struct evsel *evsel = hists_to_evsel(a->hists);
  160. u64 *fields_a, *fields_b;
  161. int cmp, nr_members, ret, i;
  162. cmp = field_cmp(get_field(a), get_field(b));
  163. if (!evsel__is_group_event(evsel))
  164. return cmp;
  165. nr_members = evsel->core.nr_members;
  166. if (idx < 1 || idx >= nr_members)
  167. return cmp;
  168. ret = hist_entry__new_pair(a, b, get_field, nr_members, &fields_a, &fields_b);
  169. if (ret) {
  170. ret = cmp;
  171. goto out;
  172. }
  173. ret = field_cmp(fields_a[idx], fields_b[idx]);
  174. if (ret)
  175. goto out;
  176. for (i = 1; i < nr_members; i++) {
  177. if (i != idx) {
  178. ret = field_cmp(fields_a[i], fields_b[i]);
  179. if (ret)
  180. goto out;
  181. }
  182. }
  183. out:
  184. free(fields_a);
  185. free(fields_b);
  186. return ret;
  187. }
  188. static int __hpp__sort(struct hist_entry *a, struct hist_entry *b,
  189. hpp_field_fn get_field)
  190. {
  191. s64 ret;
  192. int i, nr_members;
  193. struct evsel *evsel;
  194. u64 *fields_a, *fields_b;
  195. if (symbol_conf.group_sort_idx && symbol_conf.event_group) {
  196. return __hpp__group_sort_idx(a, b, get_field,
  197. symbol_conf.group_sort_idx);
  198. }
  199. ret = field_cmp(get_field(a), get_field(b));
  200. if (ret || !symbol_conf.event_group)
  201. return ret;
  202. evsel = hists_to_evsel(a->hists);
  203. if (!evsel__is_group_event(evsel))
  204. return ret;
  205. nr_members = evsel->core.nr_members;
  206. i = hist_entry__new_pair(a, b, get_field, nr_members, &fields_a, &fields_b);
  207. if (i)
  208. goto out;
  209. for (i = 1; i < nr_members; i++) {
  210. ret = field_cmp(fields_a[i], fields_b[i]);
  211. if (ret)
  212. break;
  213. }
  214. out:
  215. free(fields_a);
  216. free(fields_b);
  217. return ret;
  218. }
  219. static int __hpp__sort_acc(struct hist_entry *a, struct hist_entry *b,
  220. hpp_field_fn get_field)
  221. {
  222. s64 ret = 0;
  223. if (symbol_conf.cumulate_callchain) {
  224. /*
  225. * Put caller above callee when they have equal period.
  226. */
  227. ret = field_cmp(get_field(a), get_field(b));
  228. if (ret)
  229. return ret;
  230. if ((a->thread == NULL ? NULL : RC_CHK_ACCESS(a->thread)) !=
  231. (b->thread == NULL ? NULL : RC_CHK_ACCESS(b->thread)) ||
  232. !hist_entry__has_callchains(a) || !symbol_conf.use_callchain)
  233. return 0;
  234. ret = b->callchain->max_depth - a->callchain->max_depth;
  235. if (callchain_param.order == ORDER_CALLER)
  236. ret = -ret;
  237. }
  238. return ret;
  239. }
  240. static int hpp__width_fn(struct perf_hpp_fmt *fmt,
  241. struct perf_hpp *hpp __maybe_unused,
  242. struct hists *hists)
  243. {
  244. int len = fmt->user_len ?: fmt->len;
  245. struct evsel *evsel = hists_to_evsel(hists);
  246. if (symbol_conf.event_group) {
  247. int nr = 0;
  248. struct evsel *pos;
  249. for_each_group_evsel(pos, evsel) {
  250. if (!symbol_conf.skip_empty ||
  251. evsel__hists(pos)->stats.nr_samples)
  252. nr++;
  253. }
  254. len = max(len, nr * fmt->len);
  255. }
  256. if (len < (int)strlen(fmt->name))
  257. len = strlen(fmt->name);
  258. return len;
  259. }
  260. static int hpp__header_fn(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
  261. struct hists *hists, int line __maybe_unused,
  262. int *span __maybe_unused)
  263. {
  264. int len = hpp__width_fn(fmt, hpp, hists);
  265. return scnprintf(hpp->buf, hpp->size, "%*s", len, fmt->name);
  266. }
  267. int hpp_color_scnprintf(struct perf_hpp *hpp, const char *fmt, ...)
  268. {
  269. va_list args;
  270. ssize_t ssize = hpp->size;
  271. double percent;
  272. int ret, len;
  273. va_start(args, fmt);
  274. len = va_arg(args, int);
  275. percent = va_arg(args, double);
  276. ret = percent_color_len_snprintf(hpp->buf, hpp->size, fmt, len, percent);
  277. va_end(args);
  278. return (ret >= ssize) ? (ssize - 1) : ret;
  279. }
  280. static int hpp_entry_scnprintf(struct perf_hpp *hpp, const char *fmt, ...)
  281. {
  282. va_list args;
  283. ssize_t ssize = hpp->size;
  284. int ret;
  285. va_start(args, fmt);
  286. ret = vsnprintf(hpp->buf, hpp->size, fmt, args);
  287. va_end(args);
  288. return (ret >= ssize) ? (ssize - 1) : ret;
  289. }
  290. #define __HPP_COLOR_PERCENT_FN(_type, _field) \
  291. static u64 he_get_##_field(struct hist_entry *he) \
  292. { \
  293. return he->stat._field; \
  294. } \
  295. \
  296. static int hpp__color_##_type(struct perf_hpp_fmt *fmt, \
  297. struct perf_hpp *hpp, struct hist_entry *he) \
  298. { \
  299. return hpp__fmt(fmt, hpp, he, he_get_##_field, " %*.2f%%", \
  300. hpp_color_scnprintf, PERF_HPP_FMT_TYPE__PERCENT); \
  301. }
  302. #define __HPP_ENTRY_PERCENT_FN(_type, _field) \
  303. static int hpp__entry_##_type(struct perf_hpp_fmt *fmt, \
  304. struct perf_hpp *hpp, struct hist_entry *he) \
  305. { \
  306. return hpp__fmt(fmt, hpp, he, he_get_##_field, " %*.2f%%", \
  307. hpp_entry_scnprintf, PERF_HPP_FMT_TYPE__PERCENT); \
  308. }
  309. #define __HPP_SORT_FN(_type, _field) \
  310. static int64_t hpp__sort_##_type(struct perf_hpp_fmt *fmt __maybe_unused, \
  311. struct hist_entry *a, struct hist_entry *b) \
  312. { \
  313. return __hpp__sort(a, b, he_get_##_field); \
  314. }
  315. #define __HPP_COLOR_ACC_PERCENT_FN(_type, _field) \
  316. static u64 he_get_acc_##_field(struct hist_entry *he) \
  317. { \
  318. return he->stat_acc->_field; \
  319. } \
  320. \
  321. static int hpp__color_##_type(struct perf_hpp_fmt *fmt, \
  322. struct perf_hpp *hpp, struct hist_entry *he) \
  323. { \
  324. return hpp__fmt_acc(fmt, hpp, he, he_get_acc_##_field, " %*.2f%%", \
  325. hpp_color_scnprintf, PERF_HPP_FMT_TYPE__PERCENT); \
  326. }
  327. #define __HPP_ENTRY_ACC_PERCENT_FN(_type, _field) \
  328. static int hpp__entry_##_type(struct perf_hpp_fmt *fmt, \
  329. struct perf_hpp *hpp, struct hist_entry *he) \
  330. { \
  331. return hpp__fmt_acc(fmt, hpp, he, he_get_acc_##_field, " %*.2f%%", \
  332. hpp_entry_scnprintf, PERF_HPP_FMT_TYPE__PERCENT); \
  333. }
  334. #define __HPP_SORT_ACC_FN(_type, _field) \
  335. static int64_t hpp__sort_##_type(struct perf_hpp_fmt *fmt __maybe_unused, \
  336. struct hist_entry *a, struct hist_entry *b) \
  337. { \
  338. return __hpp__sort_acc(a, b, he_get_acc_##_field); \
  339. }
  340. #define __HPP_ENTRY_RAW_FN(_type, _field) \
  341. static u64 he_get_raw_##_field(struct hist_entry *he) \
  342. { \
  343. return he->stat._field; \
  344. } \
  345. \
  346. static int hpp__entry_##_type(struct perf_hpp_fmt *fmt, \
  347. struct perf_hpp *hpp, struct hist_entry *he) \
  348. { \
  349. return hpp__fmt(fmt, hpp, he, he_get_raw_##_field, " %*"PRIu64, \
  350. hpp_entry_scnprintf, PERF_HPP_FMT_TYPE__RAW); \
  351. }
  352. #define __HPP_SORT_RAW_FN(_type, _field) \
  353. static int64_t hpp__sort_##_type(struct perf_hpp_fmt *fmt __maybe_unused, \
  354. struct hist_entry *a, struct hist_entry *b) \
  355. { \
  356. return __hpp__sort(a, b, he_get_raw_##_field); \
  357. }
  358. #define __HPP_ENTRY_AVERAGE_FN(_type, _field) \
  359. static u64 he_get_##_field(struct hist_entry *he) \
  360. { \
  361. return he->stat._field; \
  362. } \
  363. \
  364. static int hpp__entry_##_type(struct perf_hpp_fmt *fmt, \
  365. struct perf_hpp *hpp, struct hist_entry *he) \
  366. { \
  367. return hpp__fmt(fmt, hpp, he, he_get_##_field, " %*.1f", \
  368. hpp_entry_scnprintf, PERF_HPP_FMT_TYPE__AVERAGE); \
  369. }
  370. #define __HPP_SORT_AVERAGE_FN(_type, _field) \
  371. static int64_t hpp__sort_##_type(struct perf_hpp_fmt *fmt __maybe_unused, \
  372. struct hist_entry *a, struct hist_entry *b) \
  373. { \
  374. return __hpp__sort(a, b, he_get_##_field); \
  375. }
  376. #define HPP_PERCENT_FNS(_type, _field) \
  377. __HPP_COLOR_PERCENT_FN(_type, _field) \
  378. __HPP_ENTRY_PERCENT_FN(_type, _field) \
  379. __HPP_SORT_FN(_type, _field)
  380. #define HPP_PERCENT_ACC_FNS(_type, _field) \
  381. __HPP_COLOR_ACC_PERCENT_FN(_type, _field) \
  382. __HPP_ENTRY_ACC_PERCENT_FN(_type, _field) \
  383. __HPP_SORT_ACC_FN(_type, _field)
  384. #define HPP_RAW_FNS(_type, _field) \
  385. __HPP_ENTRY_RAW_FN(_type, _field) \
  386. __HPP_SORT_RAW_FN(_type, _field)
  387. #define HPP_AVERAGE_FNS(_type, _field) \
  388. __HPP_ENTRY_AVERAGE_FN(_type, _field) \
  389. __HPP_SORT_AVERAGE_FN(_type, _field)
  390. HPP_PERCENT_FNS(overhead, period)
  391. HPP_PERCENT_FNS(overhead_sys, period_sys)
  392. HPP_PERCENT_FNS(overhead_us, period_us)
  393. HPP_PERCENT_FNS(overhead_guest_sys, period_guest_sys)
  394. HPP_PERCENT_FNS(overhead_guest_us, period_guest_us)
  395. HPP_PERCENT_ACC_FNS(overhead_acc, period)
  396. HPP_RAW_FNS(samples, nr_events)
  397. HPP_RAW_FNS(period, period)
  398. HPP_AVERAGE_FNS(weight1, weight1)
  399. HPP_AVERAGE_FNS(weight2, weight2)
  400. HPP_AVERAGE_FNS(weight3, weight3)
  401. static int64_t hpp__nop_cmp(struct perf_hpp_fmt *fmt __maybe_unused,
  402. struct hist_entry *a __maybe_unused,
  403. struct hist_entry *b __maybe_unused)
  404. {
  405. return 0;
  406. }
  407. static bool perf_hpp__is_hpp_entry(struct perf_hpp_fmt *a)
  408. {
  409. return a->header == hpp__header_fn;
  410. }
  411. static bool hpp__equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b)
  412. {
  413. if (!perf_hpp__is_hpp_entry(a) || !perf_hpp__is_hpp_entry(b))
  414. return false;
  415. return a->idx == b->idx;
  416. }
  417. #define HPP__COLOR_PRINT_FNS(_name, _fn, _idx) \
  418. { \
  419. .name = _name, \
  420. .header = hpp__header_fn, \
  421. .width = hpp__width_fn, \
  422. .color = hpp__color_ ## _fn, \
  423. .entry = hpp__entry_ ## _fn, \
  424. .cmp = hpp__nop_cmp, \
  425. .collapse = hpp__nop_cmp, \
  426. .sort = hpp__sort_ ## _fn, \
  427. .idx = PERF_HPP__ ## _idx, \
  428. .equal = hpp__equal, \
  429. }
  430. #define HPP__COLOR_ACC_PRINT_FNS(_name, _fn, _idx) \
  431. { \
  432. .name = _name, \
  433. .header = hpp__header_fn, \
  434. .width = hpp__width_fn, \
  435. .color = hpp__color_ ## _fn, \
  436. .entry = hpp__entry_ ## _fn, \
  437. .cmp = hpp__nop_cmp, \
  438. .collapse = hpp__nop_cmp, \
  439. .sort = hpp__sort_ ## _fn, \
  440. .idx = PERF_HPP__ ## _idx, \
  441. .equal = hpp__equal, \
  442. }
  443. #define HPP__PRINT_FNS(_name, _fn, _idx) \
  444. { \
  445. .name = _name, \
  446. .header = hpp__header_fn, \
  447. .width = hpp__width_fn, \
  448. .entry = hpp__entry_ ## _fn, \
  449. .cmp = hpp__nop_cmp, \
  450. .collapse = hpp__nop_cmp, \
  451. .sort = hpp__sort_ ## _fn, \
  452. .idx = PERF_HPP__ ## _idx, \
  453. .equal = hpp__equal, \
  454. }
  455. struct perf_hpp_fmt perf_hpp__format[] = {
  456. HPP__COLOR_PRINT_FNS("Overhead", overhead, OVERHEAD),
  457. HPP__COLOR_PRINT_FNS("sys", overhead_sys, OVERHEAD_SYS),
  458. HPP__COLOR_PRINT_FNS("usr", overhead_us, OVERHEAD_US),
  459. HPP__COLOR_PRINT_FNS("guest sys", overhead_guest_sys, OVERHEAD_GUEST_SYS),
  460. HPP__COLOR_PRINT_FNS("guest usr", overhead_guest_us, OVERHEAD_GUEST_US),
  461. HPP__COLOR_ACC_PRINT_FNS("Children", overhead_acc, OVERHEAD_ACC),
  462. HPP__PRINT_FNS("Samples", samples, SAMPLES),
  463. HPP__PRINT_FNS("Period", period, PERIOD),
  464. HPP__PRINT_FNS("Weight1", weight1, WEIGHT1),
  465. HPP__PRINT_FNS("Weight2", weight2, WEIGHT2),
  466. HPP__PRINT_FNS("Weight3", weight3, WEIGHT3),
  467. };
  468. struct perf_hpp_list perf_hpp_list = {
  469. .fields = LIST_HEAD_INIT(perf_hpp_list.fields),
  470. .sorts = LIST_HEAD_INIT(perf_hpp_list.sorts),
  471. .nr_header_lines = 1,
  472. };
  473. #undef HPP__COLOR_PRINT_FNS
  474. #undef HPP__COLOR_ACC_PRINT_FNS
  475. #undef HPP__PRINT_FNS
  476. #undef HPP_PERCENT_FNS
  477. #undef HPP_PERCENT_ACC_FNS
  478. #undef HPP_RAW_FNS
  479. #undef HPP_AVERAGE_FNS
  480. #undef __HPP_HEADER_FN
  481. #undef __HPP_WIDTH_FN
  482. #undef __HPP_COLOR_PERCENT_FN
  483. #undef __HPP_ENTRY_PERCENT_FN
  484. #undef __HPP_COLOR_ACC_PERCENT_FN
  485. #undef __HPP_ENTRY_ACC_PERCENT_FN
  486. #undef __HPP_ENTRY_RAW_FN
  487. #undef __HPP_ENTRY_AVERAGE_FN
  488. #undef __HPP_SORT_FN
  489. #undef __HPP_SORT_ACC_FN
  490. #undef __HPP_SORT_RAW_FN
  491. #undef __HPP_SORT_AVERAGE_FN
  492. static void fmt_free(struct perf_hpp_fmt *fmt)
  493. {
  494. /*
  495. * At this point fmt should be completely
  496. * unhooked, if not it's a bug.
  497. */
  498. BUG_ON(!list_empty(&fmt->list));
  499. BUG_ON(!list_empty(&fmt->sort_list));
  500. if (fmt->free)
  501. fmt->free(fmt);
  502. }
  503. void perf_hpp__init(void)
  504. {
  505. int i;
  506. for (i = 0; i < PERF_HPP__MAX_INDEX; i++) {
  507. struct perf_hpp_fmt *fmt = &perf_hpp__format[i];
  508. INIT_LIST_HEAD(&fmt->list);
  509. /* sort_list may be linked by setup_sorting() */
  510. if (fmt->sort_list.next == NULL)
  511. INIT_LIST_HEAD(&fmt->sort_list);
  512. }
  513. /*
  514. * If user specified field order, no need to setup default fields.
  515. */
  516. if (is_strict_order(field_order))
  517. return;
  518. if (symbol_conf.cumulate_callchain) {
  519. hpp_dimension__add_output(PERF_HPP__OVERHEAD_ACC);
  520. perf_hpp__format[PERF_HPP__OVERHEAD].name = "Self";
  521. }
  522. hpp_dimension__add_output(PERF_HPP__OVERHEAD);
  523. if (symbol_conf.show_cpu_utilization) {
  524. hpp_dimension__add_output(PERF_HPP__OVERHEAD_SYS);
  525. hpp_dimension__add_output(PERF_HPP__OVERHEAD_US);
  526. if (perf_guest) {
  527. hpp_dimension__add_output(PERF_HPP__OVERHEAD_GUEST_SYS);
  528. hpp_dimension__add_output(PERF_HPP__OVERHEAD_GUEST_US);
  529. }
  530. }
  531. if (symbol_conf.show_nr_samples)
  532. hpp_dimension__add_output(PERF_HPP__SAMPLES);
  533. if (symbol_conf.show_total_period)
  534. hpp_dimension__add_output(PERF_HPP__PERIOD);
  535. }
  536. void perf_hpp_list__column_register(struct perf_hpp_list *list,
  537. struct perf_hpp_fmt *format)
  538. {
  539. list_add_tail(&format->list, &list->fields);
  540. }
  541. void perf_hpp_list__register_sort_field(struct perf_hpp_list *list,
  542. struct perf_hpp_fmt *format)
  543. {
  544. list_add_tail(&format->sort_list, &list->sorts);
  545. }
  546. void perf_hpp_list__prepend_sort_field(struct perf_hpp_list *list,
  547. struct perf_hpp_fmt *format)
  548. {
  549. list_add(&format->sort_list, &list->sorts);
  550. }
  551. static void perf_hpp__column_unregister(struct perf_hpp_fmt *format)
  552. {
  553. list_del_init(&format->list);
  554. fmt_free(format);
  555. }
  556. void perf_hpp__cancel_cumulate(void)
  557. {
  558. struct perf_hpp_fmt *fmt, *acc, *ovh, *tmp;
  559. if (is_strict_order(field_order))
  560. return;
  561. ovh = &perf_hpp__format[PERF_HPP__OVERHEAD];
  562. acc = &perf_hpp__format[PERF_HPP__OVERHEAD_ACC];
  563. perf_hpp_list__for_each_format_safe(&perf_hpp_list, fmt, tmp) {
  564. if (acc->equal(acc, fmt)) {
  565. perf_hpp__column_unregister(fmt);
  566. continue;
  567. }
  568. if (ovh->equal(ovh, fmt))
  569. fmt->name = "Overhead";
  570. }
  571. }
  572. static bool fmt_equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b)
  573. {
  574. return a->equal && a->equal(a, b);
  575. }
  576. void perf_hpp__setup_output_field(struct perf_hpp_list *list)
  577. {
  578. struct perf_hpp_fmt *fmt;
  579. /* append sort keys to output field */
  580. perf_hpp_list__for_each_sort_list(list, fmt) {
  581. struct perf_hpp_fmt *pos;
  582. /* skip sort-only fields ("sort_compute" in perf diff) */
  583. if (!fmt->entry && !fmt->color)
  584. continue;
  585. perf_hpp_list__for_each_format(list, pos) {
  586. if (fmt_equal(fmt, pos))
  587. goto next;
  588. }
  589. perf_hpp__column_register(fmt);
  590. next:
  591. continue;
  592. }
  593. }
  594. void perf_hpp__append_sort_keys(struct perf_hpp_list *list)
  595. {
  596. struct perf_hpp_fmt *fmt;
  597. /* append output fields to sort keys */
  598. perf_hpp_list__for_each_format(list, fmt) {
  599. struct perf_hpp_fmt *pos;
  600. perf_hpp_list__for_each_sort_list(list, pos) {
  601. if (fmt_equal(fmt, pos))
  602. goto next;
  603. }
  604. perf_hpp__register_sort_field(fmt);
  605. next:
  606. continue;
  607. }
  608. }
  609. void perf_hpp__reset_output_field(struct perf_hpp_list *list)
  610. {
  611. struct perf_hpp_fmt *fmt, *tmp;
  612. /* reset output fields */
  613. perf_hpp_list__for_each_format_safe(list, fmt, tmp) {
  614. list_del_init(&fmt->list);
  615. list_del_init(&fmt->sort_list);
  616. fmt_free(fmt);
  617. }
  618. /* reset sort keys */
  619. perf_hpp_list__for_each_sort_list_safe(list, fmt, tmp) {
  620. list_del_init(&fmt->list);
  621. list_del_init(&fmt->sort_list);
  622. fmt_free(fmt);
  623. }
  624. }
  625. /*
  626. * See hists__fprintf to match the column widths
  627. */
  628. unsigned int hists__sort_list_width(struct hists *hists)
  629. {
  630. struct perf_hpp_fmt *fmt;
  631. int ret = 0;
  632. bool first = true;
  633. struct perf_hpp dummy_hpp;
  634. hists__for_each_format(hists, fmt) {
  635. if (perf_hpp__should_skip(fmt, hists))
  636. continue;
  637. if (first)
  638. first = false;
  639. else
  640. ret += 2;
  641. ret += fmt->width(fmt, &dummy_hpp, hists);
  642. }
  643. if (verbose > 0 && hists__has(hists, sym)) /* Addr + origin */
  644. ret += 3 + BITS_PER_LONG / 4;
  645. return ret;
  646. }
  647. unsigned int hists__overhead_width(struct hists *hists)
  648. {
  649. struct perf_hpp_fmt *fmt;
  650. int ret = 0;
  651. bool first = true;
  652. struct perf_hpp dummy_hpp;
  653. hists__for_each_format(hists, fmt) {
  654. if (perf_hpp__is_sort_entry(fmt) || perf_hpp__is_dynamic_entry(fmt))
  655. break;
  656. if (first)
  657. first = false;
  658. else
  659. ret += 2;
  660. ret += fmt->width(fmt, &dummy_hpp, hists);
  661. }
  662. return ret;
  663. }
  664. void perf_hpp__reset_width(struct perf_hpp_fmt *fmt, struct hists *hists)
  665. {
  666. if (perf_hpp__is_sort_entry(fmt))
  667. return perf_hpp__reset_sort_width(fmt, hists);
  668. if (perf_hpp__is_dynamic_entry(fmt))
  669. return;
  670. BUG_ON(fmt->idx >= PERF_HPP__MAX_INDEX);
  671. switch (fmt->idx) {
  672. case PERF_HPP__OVERHEAD:
  673. case PERF_HPP__OVERHEAD_SYS:
  674. case PERF_HPP__OVERHEAD_US:
  675. case PERF_HPP__OVERHEAD_ACC:
  676. fmt->len = 8;
  677. break;
  678. case PERF_HPP__OVERHEAD_GUEST_SYS:
  679. case PERF_HPP__OVERHEAD_GUEST_US:
  680. fmt->len = 9;
  681. break;
  682. case PERF_HPP__SAMPLES:
  683. case PERF_HPP__PERIOD:
  684. fmt->len = 12;
  685. break;
  686. case PERF_HPP__WEIGHT1:
  687. case PERF_HPP__WEIGHT2:
  688. case PERF_HPP__WEIGHT3:
  689. fmt->len = 8;
  690. break;
  691. default:
  692. break;
  693. }
  694. }
  695. void hists__reset_column_width(struct hists *hists)
  696. {
  697. struct perf_hpp_fmt *fmt;
  698. struct perf_hpp_list_node *node;
  699. hists__for_each_format(hists, fmt)
  700. perf_hpp__reset_width(fmt, hists);
  701. /* hierarchy entries have their own hpp list */
  702. list_for_each_entry(node, &hists->hpp_formats, list) {
  703. perf_hpp_list__for_each_format(&node->hpp, fmt)
  704. perf_hpp__reset_width(fmt, hists);
  705. }
  706. }
  707. void perf_hpp__set_user_width(const char *width_list_str)
  708. {
  709. struct perf_hpp_fmt *fmt;
  710. const char *ptr = width_list_str;
  711. perf_hpp_list__for_each_format(&perf_hpp_list, fmt) {
  712. char *p;
  713. int len = strtol(ptr, &p, 10);
  714. fmt->user_len = len;
  715. if (*p == ',')
  716. ptr = p + 1;
  717. else
  718. break;
  719. }
  720. }
  721. static int add_hierarchy_fmt(struct hists *hists, struct perf_hpp_fmt *fmt)
  722. {
  723. struct perf_hpp_list_node *node = NULL;
  724. struct perf_hpp_fmt *fmt_copy;
  725. bool found = false;
  726. bool skip = perf_hpp__should_skip(fmt, hists);
  727. list_for_each_entry(node, &hists->hpp_formats, list) {
  728. if (node->level == fmt->level) {
  729. found = true;
  730. break;
  731. }
  732. }
  733. if (!found) {
  734. node = malloc(sizeof(*node));
  735. if (node == NULL)
  736. return -1;
  737. node->skip = skip;
  738. node->level = fmt->level;
  739. perf_hpp_list__init(&node->hpp);
  740. hists->nr_hpp_node++;
  741. list_add_tail(&node->list, &hists->hpp_formats);
  742. }
  743. fmt_copy = perf_hpp_fmt__dup(fmt);
  744. if (fmt_copy == NULL)
  745. return -1;
  746. if (!skip)
  747. node->skip = false;
  748. list_add_tail(&fmt_copy->list, &node->hpp.fields);
  749. list_add_tail(&fmt_copy->sort_list, &node->hpp.sorts);
  750. return 0;
  751. }
  752. int perf_hpp__setup_hists_formats(struct perf_hpp_list *list,
  753. struct evlist *evlist)
  754. {
  755. struct evsel *evsel;
  756. struct perf_hpp_fmt *fmt;
  757. struct hists *hists;
  758. int ret;
  759. if (!symbol_conf.report_hierarchy)
  760. return 0;
  761. evlist__for_each_entry(evlist, evsel) {
  762. hists = evsel__hists(evsel);
  763. perf_hpp_list__for_each_sort_list(list, fmt) {
  764. if (perf_hpp__is_dynamic_entry(fmt) &&
  765. !perf_hpp__defined_dynamic_entry(fmt, hists))
  766. continue;
  767. ret = add_hierarchy_fmt(hists, fmt);
  768. if (ret < 0)
  769. return ret;
  770. }
  771. }
  772. return 0;
  773. }