sort.c 71 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015
  1. // SPDX-License-Identifier: GPL-2.0
  2. #include <errno.h>
  3. #include <inttypes.h>
  4. #include <regex.h>
  5. #include <linux/mman.h>
  6. #include "sort.h"
  7. #include "hist.h"
  8. #include "comm.h"
  9. #include "symbol.h"
  10. #include "thread.h"
  11. #include "evsel.h"
  12. #include "evlist.h"
  13. #include "strlist.h"
  14. #include <traceevent/event-parse.h>
  15. #include "mem-events.h"
  16. #include <linux/kernel.h>
  17. regex_t parent_regex;
  18. const char default_parent_pattern[] = "^sys_|^do_page_fault";
  19. const char *parent_pattern = default_parent_pattern;
  20. const char *default_sort_order = "comm,dso,symbol";
  21. const char default_branch_sort_order[] = "comm,dso_from,symbol_from,symbol_to,cycles";
  22. const char default_mem_sort_order[] = "local_weight,mem,sym,dso,symbol_daddr,dso_daddr,snoop,tlb,locked";
  23. const char default_top_sort_order[] = "dso,symbol";
  24. const char default_diff_sort_order[] = "dso,symbol";
  25. const char default_tracepoint_sort_order[] = "trace";
  26. const char *sort_order;
  27. const char *field_order;
  28. regex_t ignore_callees_regex;
  29. int have_ignore_callees = 0;
  30. enum sort_mode sort__mode = SORT_MODE__NORMAL;
  31. /*
  32. * Replaces all occurrences of a char used with the:
  33. *
  34. * -t, --field-separator
  35. *
  36. * option, that uses a special separator character and don't pad with spaces,
  37. * replacing all occurances of this separator in symbol names (and other
  38. * output) with a '.' character, that thus it's the only non valid separator.
  39. */
  40. static int repsep_snprintf(char *bf, size_t size, const char *fmt, ...)
  41. {
  42. int n;
  43. va_list ap;
  44. va_start(ap, fmt);
  45. n = vsnprintf(bf, size, fmt, ap);
  46. if (symbol_conf.field_sep && n > 0) {
  47. char *sep = bf;
  48. while (1) {
  49. sep = strchr(sep, *symbol_conf.field_sep);
  50. if (sep == NULL)
  51. break;
  52. *sep = '.';
  53. }
  54. }
  55. va_end(ap);
  56. if (n >= (int)size)
  57. return size - 1;
  58. return n;
  59. }
  60. static int64_t cmp_null(const void *l, const void *r)
  61. {
  62. if (!l && !r)
  63. return 0;
  64. else if (!l)
  65. return -1;
  66. else
  67. return 1;
  68. }
  69. /* --sort pid */
  70. static int64_t
  71. sort__thread_cmp(struct hist_entry *left, struct hist_entry *right)
  72. {
  73. return right->thread->tid - left->thread->tid;
  74. }
  75. static int hist_entry__thread_snprintf(struct hist_entry *he, char *bf,
  76. size_t size, unsigned int width)
  77. {
  78. const char *comm = thread__comm_str(he->thread);
  79. width = max(7U, width) - 8;
  80. return repsep_snprintf(bf, size, "%7d:%-*.*s", he->thread->tid,
  81. width, width, comm ?: "");
  82. }
  83. static int hist_entry__thread_filter(struct hist_entry *he, int type, const void *arg)
  84. {
  85. const struct thread *th = arg;
  86. if (type != HIST_FILTER__THREAD)
  87. return -1;
  88. return th && he->thread != th;
  89. }
  90. struct sort_entry sort_thread = {
  91. .se_header = " Pid:Command",
  92. .se_cmp = sort__thread_cmp,
  93. .se_snprintf = hist_entry__thread_snprintf,
  94. .se_filter = hist_entry__thread_filter,
  95. .se_width_idx = HISTC_THREAD,
  96. };
  97. /* --sort comm */
  98. /*
  99. * We can't use pointer comparison in functions below,
  100. * because it gives different results based on pointer
  101. * values, which could break some sorting assumptions.
  102. */
  103. static int64_t
  104. sort__comm_cmp(struct hist_entry *left, struct hist_entry *right)
  105. {
  106. return strcmp(comm__str(right->comm), comm__str(left->comm));
  107. }
  108. static int64_t
  109. sort__comm_collapse(struct hist_entry *left, struct hist_entry *right)
  110. {
  111. return strcmp(comm__str(right->comm), comm__str(left->comm));
  112. }
  113. static int64_t
  114. sort__comm_sort(struct hist_entry *left, struct hist_entry *right)
  115. {
  116. return strcmp(comm__str(right->comm), comm__str(left->comm));
  117. }
  118. static int hist_entry__comm_snprintf(struct hist_entry *he, char *bf,
  119. size_t size, unsigned int width)
  120. {
  121. return repsep_snprintf(bf, size, "%-*.*s", width, width, comm__str(he->comm));
  122. }
  123. struct sort_entry sort_comm = {
  124. .se_header = "Command",
  125. .se_cmp = sort__comm_cmp,
  126. .se_collapse = sort__comm_collapse,
  127. .se_sort = sort__comm_sort,
  128. .se_snprintf = hist_entry__comm_snprintf,
  129. .se_filter = hist_entry__thread_filter,
  130. .se_width_idx = HISTC_COMM,
  131. };
  132. /* --sort dso */
  133. static int64_t _sort__dso_cmp(struct map *map_l, struct map *map_r)
  134. {
  135. struct dso *dso_l = map_l ? map_l->dso : NULL;
  136. struct dso *dso_r = map_r ? map_r->dso : NULL;
  137. const char *dso_name_l, *dso_name_r;
  138. if (!dso_l || !dso_r)
  139. return cmp_null(dso_r, dso_l);
  140. if (verbose > 0) {
  141. dso_name_l = dso_l->long_name;
  142. dso_name_r = dso_r->long_name;
  143. } else {
  144. dso_name_l = dso_l->short_name;
  145. dso_name_r = dso_r->short_name;
  146. }
  147. return strcmp(dso_name_l, dso_name_r);
  148. }
  149. static int64_t
  150. sort__dso_cmp(struct hist_entry *left, struct hist_entry *right)
  151. {
  152. return _sort__dso_cmp(right->ms.map, left->ms.map);
  153. }
  154. static int _hist_entry__dso_snprintf(struct map *map, char *bf,
  155. size_t size, unsigned int width)
  156. {
  157. if (map && map->dso) {
  158. const char *dso_name = verbose > 0 ? map->dso->long_name :
  159. map->dso->short_name;
  160. return repsep_snprintf(bf, size, "%-*.*s", width, width, dso_name);
  161. }
  162. return repsep_snprintf(bf, size, "%-*.*s", width, width, "[unknown]");
  163. }
  164. static int hist_entry__dso_snprintf(struct hist_entry *he, char *bf,
  165. size_t size, unsigned int width)
  166. {
  167. return _hist_entry__dso_snprintf(he->ms.map, bf, size, width);
  168. }
  169. static int hist_entry__dso_filter(struct hist_entry *he, int type, const void *arg)
  170. {
  171. const struct dso *dso = arg;
  172. if (type != HIST_FILTER__DSO)
  173. return -1;
  174. return dso && (!he->ms.map || he->ms.map->dso != dso);
  175. }
  176. struct sort_entry sort_dso = {
  177. .se_header = "Shared Object",
  178. .se_cmp = sort__dso_cmp,
  179. .se_snprintf = hist_entry__dso_snprintf,
  180. .se_filter = hist_entry__dso_filter,
  181. .se_width_idx = HISTC_DSO,
  182. };
  183. /* --sort symbol */
  184. static int64_t _sort__addr_cmp(u64 left_ip, u64 right_ip)
  185. {
  186. return (int64_t)(right_ip - left_ip);
  187. }
  188. static int64_t _sort__sym_cmp(struct symbol *sym_l, struct symbol *sym_r)
  189. {
  190. if (!sym_l || !sym_r)
  191. return cmp_null(sym_l, sym_r);
  192. if (sym_l == sym_r)
  193. return 0;
  194. if (sym_l->inlined || sym_r->inlined) {
  195. int ret = strcmp(sym_l->name, sym_r->name);
  196. if (ret)
  197. return ret;
  198. if ((sym_l->start <= sym_r->end) && (sym_l->end >= sym_r->start))
  199. return 0;
  200. }
  201. if (sym_l->start != sym_r->start)
  202. return (int64_t)(sym_r->start - sym_l->start);
  203. return (int64_t)(sym_r->end - sym_l->end);
  204. }
  205. static int64_t
  206. sort__sym_cmp(struct hist_entry *left, struct hist_entry *right)
  207. {
  208. int64_t ret;
  209. if (!left->ms.sym && !right->ms.sym)
  210. return _sort__addr_cmp(left->ip, right->ip);
  211. /*
  212. * comparing symbol address alone is not enough since it's a
  213. * relative address within a dso.
  214. */
  215. if (!hists__has(left->hists, dso) || hists__has(right->hists, dso)) {
  216. ret = sort__dso_cmp(left, right);
  217. if (ret != 0)
  218. return ret;
  219. }
  220. return _sort__sym_cmp(left->ms.sym, right->ms.sym);
  221. }
  222. static int64_t
  223. sort__sym_sort(struct hist_entry *left, struct hist_entry *right)
  224. {
  225. if (!left->ms.sym || !right->ms.sym)
  226. return cmp_null(left->ms.sym, right->ms.sym);
  227. return strcmp(right->ms.sym->name, left->ms.sym->name);
  228. }
  229. static int _hist_entry__sym_snprintf(struct map *map, struct symbol *sym,
  230. u64 ip, char level, char *bf, size_t size,
  231. unsigned int width)
  232. {
  233. size_t ret = 0;
  234. if (verbose > 0) {
  235. char o = map ? dso__symtab_origin(map->dso) : '!';
  236. ret += repsep_snprintf(bf, size, "%-#*llx %c ",
  237. BITS_PER_LONG / 4 + 2, ip, o);
  238. }
  239. ret += repsep_snprintf(bf + ret, size - ret, "[%c] ", level);
  240. if (sym && map) {
  241. if (sym->type == STT_OBJECT) {
  242. ret += repsep_snprintf(bf + ret, size - ret, "%s", sym->name);
  243. ret += repsep_snprintf(bf + ret, size - ret, "+0x%llx",
  244. ip - map->unmap_ip(map, sym->start));
  245. } else {
  246. ret += repsep_snprintf(bf + ret, size - ret, "%.*s",
  247. width - ret,
  248. sym->name);
  249. if (sym->inlined)
  250. ret += repsep_snprintf(bf + ret, size - ret,
  251. " (inlined)");
  252. }
  253. } else {
  254. size_t len = BITS_PER_LONG / 4;
  255. ret += repsep_snprintf(bf + ret, size - ret, "%-#.*llx",
  256. len, ip);
  257. }
  258. return ret;
  259. }
  260. static int hist_entry__sym_snprintf(struct hist_entry *he, char *bf,
  261. size_t size, unsigned int width)
  262. {
  263. return _hist_entry__sym_snprintf(he->ms.map, he->ms.sym, he->ip,
  264. he->level, bf, size, width);
  265. }
  266. static int hist_entry__sym_filter(struct hist_entry *he, int type, const void *arg)
  267. {
  268. const char *sym = arg;
  269. if (type != HIST_FILTER__SYMBOL)
  270. return -1;
  271. return sym && (!he->ms.sym || !strstr(he->ms.sym->name, sym));
  272. }
  273. struct sort_entry sort_sym = {
  274. .se_header = "Symbol",
  275. .se_cmp = sort__sym_cmp,
  276. .se_sort = sort__sym_sort,
  277. .se_snprintf = hist_entry__sym_snprintf,
  278. .se_filter = hist_entry__sym_filter,
  279. .se_width_idx = HISTC_SYMBOL,
  280. };
  281. /* --sort srcline */
  282. char *hist_entry__srcline(struct hist_entry *he)
  283. {
  284. return map__srcline(he->ms.map, he->ip, he->ms.sym);
  285. }
  286. static int64_t
  287. sort__srcline_cmp(struct hist_entry *left, struct hist_entry *right)
  288. {
  289. if (!left->srcline)
  290. left->srcline = hist_entry__srcline(left);
  291. if (!right->srcline)
  292. right->srcline = hist_entry__srcline(right);
  293. return strcmp(right->srcline, left->srcline);
  294. }
  295. static int hist_entry__srcline_snprintf(struct hist_entry *he, char *bf,
  296. size_t size, unsigned int width)
  297. {
  298. if (!he->srcline)
  299. he->srcline = hist_entry__srcline(he);
  300. return repsep_snprintf(bf, size, "%-.*s", width, he->srcline);
  301. }
  302. struct sort_entry sort_srcline = {
  303. .se_header = "Source:Line",
  304. .se_cmp = sort__srcline_cmp,
  305. .se_snprintf = hist_entry__srcline_snprintf,
  306. .se_width_idx = HISTC_SRCLINE,
  307. };
  308. /* --sort srcline_from */
  309. static char *addr_map_symbol__srcline(struct addr_map_symbol *ams)
  310. {
  311. return map__srcline(ams->map, ams->al_addr, ams->sym);
  312. }
  313. static int64_t
  314. sort__srcline_from_cmp(struct hist_entry *left, struct hist_entry *right)
  315. {
  316. if (!left->branch_info->srcline_from)
  317. left->branch_info->srcline_from = addr_map_symbol__srcline(&left->branch_info->from);
  318. if (!right->branch_info->srcline_from)
  319. right->branch_info->srcline_from = addr_map_symbol__srcline(&right->branch_info->from);
  320. return strcmp(right->branch_info->srcline_from, left->branch_info->srcline_from);
  321. }
  322. static int hist_entry__srcline_from_snprintf(struct hist_entry *he, char *bf,
  323. size_t size, unsigned int width)
  324. {
  325. return repsep_snprintf(bf, size, "%-*.*s", width, width, he->branch_info->srcline_from);
  326. }
  327. struct sort_entry sort_srcline_from = {
  328. .se_header = "From Source:Line",
  329. .se_cmp = sort__srcline_from_cmp,
  330. .se_snprintf = hist_entry__srcline_from_snprintf,
  331. .se_width_idx = HISTC_SRCLINE_FROM,
  332. };
  333. /* --sort srcline_to */
  334. static int64_t
  335. sort__srcline_to_cmp(struct hist_entry *left, struct hist_entry *right)
  336. {
  337. if (!left->branch_info->srcline_to)
  338. left->branch_info->srcline_to = addr_map_symbol__srcline(&left->branch_info->to);
  339. if (!right->branch_info->srcline_to)
  340. right->branch_info->srcline_to = addr_map_symbol__srcline(&right->branch_info->to);
  341. return strcmp(right->branch_info->srcline_to, left->branch_info->srcline_to);
  342. }
  343. static int hist_entry__srcline_to_snprintf(struct hist_entry *he, char *bf,
  344. size_t size, unsigned int width)
  345. {
  346. return repsep_snprintf(bf, size, "%-*.*s", width, width, he->branch_info->srcline_to);
  347. }
  348. struct sort_entry sort_srcline_to = {
  349. .se_header = "To Source:Line",
  350. .se_cmp = sort__srcline_to_cmp,
  351. .se_snprintf = hist_entry__srcline_to_snprintf,
  352. .se_width_idx = HISTC_SRCLINE_TO,
  353. };
  354. /* --sort srcfile */
  355. static char no_srcfile[1];
  356. static char *hist_entry__get_srcfile(struct hist_entry *e)
  357. {
  358. char *sf, *p;
  359. struct map *map = e->ms.map;
  360. if (!map)
  361. return no_srcfile;
  362. sf = __get_srcline(map->dso, map__rip_2objdump(map, e->ip),
  363. e->ms.sym, false, true, true, e->ip);
  364. if (!strcmp(sf, SRCLINE_UNKNOWN))
  365. return no_srcfile;
  366. p = strchr(sf, ':');
  367. if (p && *sf) {
  368. *p = 0;
  369. return sf;
  370. }
  371. free(sf);
  372. return no_srcfile;
  373. }
  374. static int64_t
  375. sort__srcfile_cmp(struct hist_entry *left, struct hist_entry *right)
  376. {
  377. if (!left->srcfile)
  378. left->srcfile = hist_entry__get_srcfile(left);
  379. if (!right->srcfile)
  380. right->srcfile = hist_entry__get_srcfile(right);
  381. return strcmp(right->srcfile, left->srcfile);
  382. }
  383. static int hist_entry__srcfile_snprintf(struct hist_entry *he, char *bf,
  384. size_t size, unsigned int width)
  385. {
  386. if (!he->srcfile)
  387. he->srcfile = hist_entry__get_srcfile(he);
  388. return repsep_snprintf(bf, size, "%-.*s", width, he->srcfile);
  389. }
  390. struct sort_entry sort_srcfile = {
  391. .se_header = "Source File",
  392. .se_cmp = sort__srcfile_cmp,
  393. .se_snprintf = hist_entry__srcfile_snprintf,
  394. .se_width_idx = HISTC_SRCFILE,
  395. };
  396. /* --sort parent */
  397. static int64_t
  398. sort__parent_cmp(struct hist_entry *left, struct hist_entry *right)
  399. {
  400. struct symbol *sym_l = left->parent;
  401. struct symbol *sym_r = right->parent;
  402. if (!sym_l || !sym_r)
  403. return cmp_null(sym_l, sym_r);
  404. return strcmp(sym_r->name, sym_l->name);
  405. }
  406. static int hist_entry__parent_snprintf(struct hist_entry *he, char *bf,
  407. size_t size, unsigned int width)
  408. {
  409. return repsep_snprintf(bf, size, "%-*.*s", width, width,
  410. he->parent ? he->parent->name : "[other]");
  411. }
  412. struct sort_entry sort_parent = {
  413. .se_header = "Parent symbol",
  414. .se_cmp = sort__parent_cmp,
  415. .se_snprintf = hist_entry__parent_snprintf,
  416. .se_width_idx = HISTC_PARENT,
  417. };
  418. /* --sort cpu */
  419. static int64_t
  420. sort__cpu_cmp(struct hist_entry *left, struct hist_entry *right)
  421. {
  422. return right->cpu - left->cpu;
  423. }
  424. static int hist_entry__cpu_snprintf(struct hist_entry *he, char *bf,
  425. size_t size, unsigned int width)
  426. {
  427. return repsep_snprintf(bf, size, "%*.*d", width, width, he->cpu);
  428. }
  429. struct sort_entry sort_cpu = {
  430. .se_header = "CPU",
  431. .se_cmp = sort__cpu_cmp,
  432. .se_snprintf = hist_entry__cpu_snprintf,
  433. .se_width_idx = HISTC_CPU,
  434. };
  435. /* --sort cgroup_id */
  436. static int64_t _sort__cgroup_dev_cmp(u64 left_dev, u64 right_dev)
  437. {
  438. return (int64_t)(right_dev - left_dev);
  439. }
  440. static int64_t _sort__cgroup_inode_cmp(u64 left_ino, u64 right_ino)
  441. {
  442. return (int64_t)(right_ino - left_ino);
  443. }
  444. static int64_t
  445. sort__cgroup_id_cmp(struct hist_entry *left, struct hist_entry *right)
  446. {
  447. int64_t ret;
  448. ret = _sort__cgroup_dev_cmp(right->cgroup_id.dev, left->cgroup_id.dev);
  449. if (ret != 0)
  450. return ret;
  451. return _sort__cgroup_inode_cmp(right->cgroup_id.ino,
  452. left->cgroup_id.ino);
  453. }
  454. static int hist_entry__cgroup_id_snprintf(struct hist_entry *he,
  455. char *bf, size_t size,
  456. unsigned int width __maybe_unused)
  457. {
  458. return repsep_snprintf(bf, size, "%lu/0x%lx", he->cgroup_id.dev,
  459. he->cgroup_id.ino);
  460. }
  461. struct sort_entry sort_cgroup_id = {
  462. .se_header = "cgroup id (dev/inode)",
  463. .se_cmp = sort__cgroup_id_cmp,
  464. .se_snprintf = hist_entry__cgroup_id_snprintf,
  465. .se_width_idx = HISTC_CGROUP_ID,
  466. };
  467. /* --sort socket */
  468. static int64_t
  469. sort__socket_cmp(struct hist_entry *left, struct hist_entry *right)
  470. {
  471. return right->socket - left->socket;
  472. }
  473. static int hist_entry__socket_snprintf(struct hist_entry *he, char *bf,
  474. size_t size, unsigned int width)
  475. {
  476. return repsep_snprintf(bf, size, "%*.*d", width, width-3, he->socket);
  477. }
  478. static int hist_entry__socket_filter(struct hist_entry *he, int type, const void *arg)
  479. {
  480. int sk = *(const int *)arg;
  481. if (type != HIST_FILTER__SOCKET)
  482. return -1;
  483. return sk >= 0 && he->socket != sk;
  484. }
  485. struct sort_entry sort_socket = {
  486. .se_header = "Socket",
  487. .se_cmp = sort__socket_cmp,
  488. .se_snprintf = hist_entry__socket_snprintf,
  489. .se_filter = hist_entry__socket_filter,
  490. .se_width_idx = HISTC_SOCKET,
  491. };
  492. /* --sort trace */
  493. static char *get_trace_output(struct hist_entry *he)
  494. {
  495. struct trace_seq seq;
  496. struct perf_evsel *evsel;
  497. struct tep_record rec = {
  498. .data = he->raw_data,
  499. .size = he->raw_size,
  500. };
  501. evsel = hists_to_evsel(he->hists);
  502. trace_seq_init(&seq);
  503. if (symbol_conf.raw_trace) {
  504. tep_print_fields(&seq, he->raw_data, he->raw_size,
  505. evsel->tp_format);
  506. } else {
  507. tep_event_info(&seq, evsel->tp_format, &rec);
  508. }
  509. /*
  510. * Trim the buffer, it starts at 4KB and we're not going to
  511. * add anything more to this buffer.
  512. */
  513. return realloc(seq.buffer, seq.len + 1);
  514. }
  515. static int64_t
  516. sort__trace_cmp(struct hist_entry *left, struct hist_entry *right)
  517. {
  518. struct perf_evsel *evsel;
  519. evsel = hists_to_evsel(left->hists);
  520. if (evsel->attr.type != PERF_TYPE_TRACEPOINT)
  521. return 0;
  522. if (left->trace_output == NULL)
  523. left->trace_output = get_trace_output(left);
  524. if (right->trace_output == NULL)
  525. right->trace_output = get_trace_output(right);
  526. return strcmp(right->trace_output, left->trace_output);
  527. }
  528. static int hist_entry__trace_snprintf(struct hist_entry *he, char *bf,
  529. size_t size, unsigned int width)
  530. {
  531. struct perf_evsel *evsel;
  532. evsel = hists_to_evsel(he->hists);
  533. if (evsel->attr.type != PERF_TYPE_TRACEPOINT)
  534. return scnprintf(bf, size, "%-.*s", width, "N/A");
  535. if (he->trace_output == NULL)
  536. he->trace_output = get_trace_output(he);
  537. return repsep_snprintf(bf, size, "%-.*s", width, he->trace_output);
  538. }
  539. struct sort_entry sort_trace = {
  540. .se_header = "Trace output",
  541. .se_cmp = sort__trace_cmp,
  542. .se_snprintf = hist_entry__trace_snprintf,
  543. .se_width_idx = HISTC_TRACE,
  544. };
  545. /* sort keys for branch stacks */
  546. static int64_t
  547. sort__dso_from_cmp(struct hist_entry *left, struct hist_entry *right)
  548. {
  549. if (!left->branch_info || !right->branch_info)
  550. return cmp_null(left->branch_info, right->branch_info);
  551. return _sort__dso_cmp(left->branch_info->from.map,
  552. right->branch_info->from.map);
  553. }
  554. static int hist_entry__dso_from_snprintf(struct hist_entry *he, char *bf,
  555. size_t size, unsigned int width)
  556. {
  557. if (he->branch_info)
  558. return _hist_entry__dso_snprintf(he->branch_info->from.map,
  559. bf, size, width);
  560. else
  561. return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
  562. }
  563. static int hist_entry__dso_from_filter(struct hist_entry *he, int type,
  564. const void *arg)
  565. {
  566. const struct dso *dso = arg;
  567. if (type != HIST_FILTER__DSO)
  568. return -1;
  569. return dso && (!he->branch_info || !he->branch_info->from.map ||
  570. he->branch_info->from.map->dso != dso);
  571. }
  572. static int64_t
  573. sort__dso_to_cmp(struct hist_entry *left, struct hist_entry *right)
  574. {
  575. if (!left->branch_info || !right->branch_info)
  576. return cmp_null(left->branch_info, right->branch_info);
  577. return _sort__dso_cmp(left->branch_info->to.map,
  578. right->branch_info->to.map);
  579. }
  580. static int hist_entry__dso_to_snprintf(struct hist_entry *he, char *bf,
  581. size_t size, unsigned int width)
  582. {
  583. if (he->branch_info)
  584. return _hist_entry__dso_snprintf(he->branch_info->to.map,
  585. bf, size, width);
  586. else
  587. return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
  588. }
  589. static int hist_entry__dso_to_filter(struct hist_entry *he, int type,
  590. const void *arg)
  591. {
  592. const struct dso *dso = arg;
  593. if (type != HIST_FILTER__DSO)
  594. return -1;
  595. return dso && (!he->branch_info || !he->branch_info->to.map ||
  596. he->branch_info->to.map->dso != dso);
  597. }
  598. static int64_t
  599. sort__sym_from_cmp(struct hist_entry *left, struct hist_entry *right)
  600. {
  601. struct addr_map_symbol *from_l = &left->branch_info->from;
  602. struct addr_map_symbol *from_r = &right->branch_info->from;
  603. if (!left->branch_info || !right->branch_info)
  604. return cmp_null(left->branch_info, right->branch_info);
  605. from_l = &left->branch_info->from;
  606. from_r = &right->branch_info->from;
  607. if (!from_l->sym && !from_r->sym)
  608. return _sort__addr_cmp(from_l->addr, from_r->addr);
  609. return _sort__sym_cmp(from_l->sym, from_r->sym);
  610. }
  611. static int64_t
  612. sort__sym_to_cmp(struct hist_entry *left, struct hist_entry *right)
  613. {
  614. struct addr_map_symbol *to_l, *to_r;
  615. if (!left->branch_info || !right->branch_info)
  616. return cmp_null(left->branch_info, right->branch_info);
  617. to_l = &left->branch_info->to;
  618. to_r = &right->branch_info->to;
  619. if (!to_l->sym && !to_r->sym)
  620. return _sort__addr_cmp(to_l->addr, to_r->addr);
  621. return _sort__sym_cmp(to_l->sym, to_r->sym);
  622. }
  623. static int hist_entry__sym_from_snprintf(struct hist_entry *he, char *bf,
  624. size_t size, unsigned int width)
  625. {
  626. if (he->branch_info) {
  627. struct addr_map_symbol *from = &he->branch_info->from;
  628. return _hist_entry__sym_snprintf(from->map, from->sym, from->addr,
  629. he->level, bf, size, width);
  630. }
  631. return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
  632. }
  633. static int hist_entry__sym_to_snprintf(struct hist_entry *he, char *bf,
  634. size_t size, unsigned int width)
  635. {
  636. if (he->branch_info) {
  637. struct addr_map_symbol *to = &he->branch_info->to;
  638. return _hist_entry__sym_snprintf(to->map, to->sym, to->addr,
  639. he->level, bf, size, width);
  640. }
  641. return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
  642. }
  643. static int hist_entry__sym_from_filter(struct hist_entry *he, int type,
  644. const void *arg)
  645. {
  646. const char *sym = arg;
  647. if (type != HIST_FILTER__SYMBOL)
  648. return -1;
  649. return sym && !(he->branch_info && he->branch_info->from.sym &&
  650. strstr(he->branch_info->from.sym->name, sym));
  651. }
  652. static int hist_entry__sym_to_filter(struct hist_entry *he, int type,
  653. const void *arg)
  654. {
  655. const char *sym = arg;
  656. if (type != HIST_FILTER__SYMBOL)
  657. return -1;
  658. return sym && !(he->branch_info && he->branch_info->to.sym &&
  659. strstr(he->branch_info->to.sym->name, sym));
  660. }
  661. struct sort_entry sort_dso_from = {
  662. .se_header = "Source Shared Object",
  663. .se_cmp = sort__dso_from_cmp,
  664. .se_snprintf = hist_entry__dso_from_snprintf,
  665. .se_filter = hist_entry__dso_from_filter,
  666. .se_width_idx = HISTC_DSO_FROM,
  667. };
  668. struct sort_entry sort_dso_to = {
  669. .se_header = "Target Shared Object",
  670. .se_cmp = sort__dso_to_cmp,
  671. .se_snprintf = hist_entry__dso_to_snprintf,
  672. .se_filter = hist_entry__dso_to_filter,
  673. .se_width_idx = HISTC_DSO_TO,
  674. };
  675. struct sort_entry sort_sym_from = {
  676. .se_header = "Source Symbol",
  677. .se_cmp = sort__sym_from_cmp,
  678. .se_snprintf = hist_entry__sym_from_snprintf,
  679. .se_filter = hist_entry__sym_from_filter,
  680. .se_width_idx = HISTC_SYMBOL_FROM,
  681. };
  682. struct sort_entry sort_sym_to = {
  683. .se_header = "Target Symbol",
  684. .se_cmp = sort__sym_to_cmp,
  685. .se_snprintf = hist_entry__sym_to_snprintf,
  686. .se_filter = hist_entry__sym_to_filter,
  687. .se_width_idx = HISTC_SYMBOL_TO,
  688. };
  689. static int64_t
  690. sort__mispredict_cmp(struct hist_entry *left, struct hist_entry *right)
  691. {
  692. unsigned char mp, p;
  693. if (!left->branch_info || !right->branch_info)
  694. return cmp_null(left->branch_info, right->branch_info);
  695. mp = left->branch_info->flags.mispred != right->branch_info->flags.mispred;
  696. p = left->branch_info->flags.predicted != right->branch_info->flags.predicted;
  697. return mp || p;
  698. }
  699. static int hist_entry__mispredict_snprintf(struct hist_entry *he, char *bf,
  700. size_t size, unsigned int width){
  701. static const char *out = "N/A";
  702. if (he->branch_info) {
  703. if (he->branch_info->flags.predicted)
  704. out = "N";
  705. else if (he->branch_info->flags.mispred)
  706. out = "Y";
  707. }
  708. return repsep_snprintf(bf, size, "%-*.*s", width, width, out);
  709. }
  710. static int64_t
  711. sort__cycles_cmp(struct hist_entry *left, struct hist_entry *right)
  712. {
  713. if (!left->branch_info || !right->branch_info)
  714. return cmp_null(left->branch_info, right->branch_info);
  715. return left->branch_info->flags.cycles -
  716. right->branch_info->flags.cycles;
  717. }
  718. static int hist_entry__cycles_snprintf(struct hist_entry *he, char *bf,
  719. size_t size, unsigned int width)
  720. {
  721. if (!he->branch_info)
  722. return scnprintf(bf, size, "%-.*s", width, "N/A");
  723. if (he->branch_info->flags.cycles == 0)
  724. return repsep_snprintf(bf, size, "%-*s", width, "-");
  725. return repsep_snprintf(bf, size, "%-*hd", width,
  726. he->branch_info->flags.cycles);
  727. }
  728. struct sort_entry sort_cycles = {
  729. .se_header = "Basic Block Cycles",
  730. .se_cmp = sort__cycles_cmp,
  731. .se_snprintf = hist_entry__cycles_snprintf,
  732. .se_width_idx = HISTC_CYCLES,
  733. };
  734. /* --sort daddr_sym */
  735. int64_t
  736. sort__daddr_cmp(struct hist_entry *left, struct hist_entry *right)
  737. {
  738. uint64_t l = 0, r = 0;
  739. if (left->mem_info)
  740. l = left->mem_info->daddr.addr;
  741. if (right->mem_info)
  742. r = right->mem_info->daddr.addr;
  743. return (int64_t)(r - l);
  744. }
  745. static int hist_entry__daddr_snprintf(struct hist_entry *he, char *bf,
  746. size_t size, unsigned int width)
  747. {
  748. uint64_t addr = 0;
  749. struct map *map = NULL;
  750. struct symbol *sym = NULL;
  751. if (he->mem_info) {
  752. addr = he->mem_info->daddr.addr;
  753. map = he->mem_info->daddr.map;
  754. sym = he->mem_info->daddr.sym;
  755. }
  756. return _hist_entry__sym_snprintf(map, sym, addr, he->level, bf, size,
  757. width);
  758. }
  759. int64_t
  760. sort__iaddr_cmp(struct hist_entry *left, struct hist_entry *right)
  761. {
  762. uint64_t l = 0, r = 0;
  763. if (left->mem_info)
  764. l = left->mem_info->iaddr.addr;
  765. if (right->mem_info)
  766. r = right->mem_info->iaddr.addr;
  767. return (int64_t)(r - l);
  768. }
  769. static int hist_entry__iaddr_snprintf(struct hist_entry *he, char *bf,
  770. size_t size, unsigned int width)
  771. {
  772. uint64_t addr = 0;
  773. struct map *map = NULL;
  774. struct symbol *sym = NULL;
  775. if (he->mem_info) {
  776. addr = he->mem_info->iaddr.addr;
  777. map = he->mem_info->iaddr.map;
  778. sym = he->mem_info->iaddr.sym;
  779. }
  780. return _hist_entry__sym_snprintf(map, sym, addr, he->level, bf, size,
  781. width);
  782. }
  783. static int64_t
  784. sort__dso_daddr_cmp(struct hist_entry *left, struct hist_entry *right)
  785. {
  786. struct map *map_l = NULL;
  787. struct map *map_r = NULL;
  788. if (left->mem_info)
  789. map_l = left->mem_info->daddr.map;
  790. if (right->mem_info)
  791. map_r = right->mem_info->daddr.map;
  792. return _sort__dso_cmp(map_l, map_r);
  793. }
  794. static int hist_entry__dso_daddr_snprintf(struct hist_entry *he, char *bf,
  795. size_t size, unsigned int width)
  796. {
  797. struct map *map = NULL;
  798. if (he->mem_info)
  799. map = he->mem_info->daddr.map;
  800. return _hist_entry__dso_snprintf(map, bf, size, width);
  801. }
  802. static int64_t
  803. sort__locked_cmp(struct hist_entry *left, struct hist_entry *right)
  804. {
  805. union perf_mem_data_src data_src_l;
  806. union perf_mem_data_src data_src_r;
  807. if (left->mem_info)
  808. data_src_l = left->mem_info->data_src;
  809. else
  810. data_src_l.mem_lock = PERF_MEM_LOCK_NA;
  811. if (right->mem_info)
  812. data_src_r = right->mem_info->data_src;
  813. else
  814. data_src_r.mem_lock = PERF_MEM_LOCK_NA;
  815. return (int64_t)(data_src_r.mem_lock - data_src_l.mem_lock);
  816. }
  817. static int hist_entry__locked_snprintf(struct hist_entry *he, char *bf,
  818. size_t size, unsigned int width)
  819. {
  820. char out[10];
  821. perf_mem__lck_scnprintf(out, sizeof(out), he->mem_info);
  822. return repsep_snprintf(bf, size, "%.*s", width, out);
  823. }
  824. static int64_t
  825. sort__tlb_cmp(struct hist_entry *left, struct hist_entry *right)
  826. {
  827. union perf_mem_data_src data_src_l;
  828. union perf_mem_data_src data_src_r;
  829. if (left->mem_info)
  830. data_src_l = left->mem_info->data_src;
  831. else
  832. data_src_l.mem_dtlb = PERF_MEM_TLB_NA;
  833. if (right->mem_info)
  834. data_src_r = right->mem_info->data_src;
  835. else
  836. data_src_r.mem_dtlb = PERF_MEM_TLB_NA;
  837. return (int64_t)(data_src_r.mem_dtlb - data_src_l.mem_dtlb);
  838. }
  839. static int hist_entry__tlb_snprintf(struct hist_entry *he, char *bf,
  840. size_t size, unsigned int width)
  841. {
  842. char out[64];
  843. perf_mem__tlb_scnprintf(out, sizeof(out), he->mem_info);
  844. return repsep_snprintf(bf, size, "%-*s", width, out);
  845. }
  846. static int64_t
  847. sort__lvl_cmp(struct hist_entry *left, struct hist_entry *right)
  848. {
  849. union perf_mem_data_src data_src_l;
  850. union perf_mem_data_src data_src_r;
  851. if (left->mem_info)
  852. data_src_l = left->mem_info->data_src;
  853. else
  854. data_src_l.mem_lvl = PERF_MEM_LVL_NA;
  855. if (right->mem_info)
  856. data_src_r = right->mem_info->data_src;
  857. else
  858. data_src_r.mem_lvl = PERF_MEM_LVL_NA;
  859. return (int64_t)(data_src_r.mem_lvl - data_src_l.mem_lvl);
  860. }
  861. static int hist_entry__lvl_snprintf(struct hist_entry *he, char *bf,
  862. size_t size, unsigned int width)
  863. {
  864. char out[64];
  865. perf_mem__lvl_scnprintf(out, sizeof(out), he->mem_info);
  866. return repsep_snprintf(bf, size, "%-*s", width, out);
  867. }
  868. static int64_t
  869. sort__snoop_cmp(struct hist_entry *left, struct hist_entry *right)
  870. {
  871. union perf_mem_data_src data_src_l;
  872. union perf_mem_data_src data_src_r;
  873. if (left->mem_info)
  874. data_src_l = left->mem_info->data_src;
  875. else
  876. data_src_l.mem_snoop = PERF_MEM_SNOOP_NA;
  877. if (right->mem_info)
  878. data_src_r = right->mem_info->data_src;
  879. else
  880. data_src_r.mem_snoop = PERF_MEM_SNOOP_NA;
  881. return (int64_t)(data_src_r.mem_snoop - data_src_l.mem_snoop);
  882. }
  883. static int hist_entry__snoop_snprintf(struct hist_entry *he, char *bf,
  884. size_t size, unsigned int width)
  885. {
  886. char out[64];
  887. perf_mem__snp_scnprintf(out, sizeof(out), he->mem_info);
  888. return repsep_snprintf(bf, size, "%-*s", width, out);
  889. }
  890. int64_t
  891. sort__dcacheline_cmp(struct hist_entry *left, struct hist_entry *right)
  892. {
  893. u64 l, r;
  894. struct map *l_map, *r_map;
  895. if (!left->mem_info) return -1;
  896. if (!right->mem_info) return 1;
  897. /* group event types together */
  898. if (left->cpumode > right->cpumode) return -1;
  899. if (left->cpumode < right->cpumode) return 1;
  900. l_map = left->mem_info->daddr.map;
  901. r_map = right->mem_info->daddr.map;
  902. /* if both are NULL, jump to sort on al_addr instead */
  903. if (!l_map && !r_map)
  904. goto addr;
  905. if (!l_map) return -1;
  906. if (!r_map) return 1;
  907. if (l_map->maj > r_map->maj) return -1;
  908. if (l_map->maj < r_map->maj) return 1;
  909. if (l_map->min > r_map->min) return -1;
  910. if (l_map->min < r_map->min) return 1;
  911. if (l_map->ino > r_map->ino) return -1;
  912. if (l_map->ino < r_map->ino) return 1;
  913. if (l_map->ino_generation > r_map->ino_generation) return -1;
  914. if (l_map->ino_generation < r_map->ino_generation) return 1;
  915. /*
  916. * Addresses with no major/minor numbers are assumed to be
  917. * anonymous in userspace. Sort those on pid then address.
  918. *
  919. * The kernel and non-zero major/minor mapped areas are
  920. * assumed to be unity mapped. Sort those on address.
  921. */
  922. if ((left->cpumode != PERF_RECORD_MISC_KERNEL) &&
  923. (!(l_map->flags & MAP_SHARED)) &&
  924. !l_map->maj && !l_map->min && !l_map->ino &&
  925. !l_map->ino_generation) {
  926. /* userspace anonymous */
  927. if (left->thread->pid_ > right->thread->pid_) return -1;
  928. if (left->thread->pid_ < right->thread->pid_) return 1;
  929. }
  930. addr:
  931. /* al_addr does all the right addr - start + offset calculations */
  932. l = cl_address(left->mem_info->daddr.al_addr);
  933. r = cl_address(right->mem_info->daddr.al_addr);
  934. if (l > r) return -1;
  935. if (l < r) return 1;
  936. return 0;
  937. }
  938. static int hist_entry__dcacheline_snprintf(struct hist_entry *he, char *bf,
  939. size_t size, unsigned int width)
  940. {
  941. uint64_t addr = 0;
  942. struct map *map = NULL;
  943. struct symbol *sym = NULL;
  944. char level = he->level;
  945. if (he->mem_info) {
  946. addr = cl_address(he->mem_info->daddr.al_addr);
  947. map = he->mem_info->daddr.map;
  948. sym = he->mem_info->daddr.sym;
  949. /* print [s] for shared data mmaps */
  950. if ((he->cpumode != PERF_RECORD_MISC_KERNEL) &&
  951. map && !(map->prot & PROT_EXEC) &&
  952. (map->flags & MAP_SHARED) &&
  953. (map->maj || map->min || map->ino ||
  954. map->ino_generation))
  955. level = 's';
  956. else if (!map)
  957. level = 'X';
  958. }
  959. return _hist_entry__sym_snprintf(map, sym, addr, level, bf, size,
  960. width);
  961. }
  962. struct sort_entry sort_mispredict = {
  963. .se_header = "Branch Mispredicted",
  964. .se_cmp = sort__mispredict_cmp,
  965. .se_snprintf = hist_entry__mispredict_snprintf,
  966. .se_width_idx = HISTC_MISPREDICT,
  967. };
  968. static u64 he_weight(struct hist_entry *he)
  969. {
  970. return he->stat.nr_events ? he->stat.weight / he->stat.nr_events : 0;
  971. }
  972. static int64_t
  973. sort__local_weight_cmp(struct hist_entry *left, struct hist_entry *right)
  974. {
  975. return he_weight(left) - he_weight(right);
  976. }
  977. static int hist_entry__local_weight_snprintf(struct hist_entry *he, char *bf,
  978. size_t size, unsigned int width)
  979. {
  980. return repsep_snprintf(bf, size, "%-*llu", width, he_weight(he));
  981. }
  982. struct sort_entry sort_local_weight = {
  983. .se_header = "Local Weight",
  984. .se_cmp = sort__local_weight_cmp,
  985. .se_snprintf = hist_entry__local_weight_snprintf,
  986. .se_width_idx = HISTC_LOCAL_WEIGHT,
  987. };
  988. static int64_t
  989. sort__global_weight_cmp(struct hist_entry *left, struct hist_entry *right)
  990. {
  991. return left->stat.weight - right->stat.weight;
  992. }
  993. static int hist_entry__global_weight_snprintf(struct hist_entry *he, char *bf,
  994. size_t size, unsigned int width)
  995. {
  996. return repsep_snprintf(bf, size, "%-*llu", width, he->stat.weight);
  997. }
  998. struct sort_entry sort_global_weight = {
  999. .se_header = "Weight",
  1000. .se_cmp = sort__global_weight_cmp,
  1001. .se_snprintf = hist_entry__global_weight_snprintf,
  1002. .se_width_idx = HISTC_GLOBAL_WEIGHT,
  1003. };
  1004. struct sort_entry sort_mem_daddr_sym = {
  1005. .se_header = "Data Symbol",
  1006. .se_cmp = sort__daddr_cmp,
  1007. .se_snprintf = hist_entry__daddr_snprintf,
  1008. .se_width_idx = HISTC_MEM_DADDR_SYMBOL,
  1009. };
  1010. struct sort_entry sort_mem_iaddr_sym = {
  1011. .se_header = "Code Symbol",
  1012. .se_cmp = sort__iaddr_cmp,
  1013. .se_snprintf = hist_entry__iaddr_snprintf,
  1014. .se_width_idx = HISTC_MEM_IADDR_SYMBOL,
  1015. };
  1016. struct sort_entry sort_mem_daddr_dso = {
  1017. .se_header = "Data Object",
  1018. .se_cmp = sort__dso_daddr_cmp,
  1019. .se_snprintf = hist_entry__dso_daddr_snprintf,
  1020. .se_width_idx = HISTC_MEM_DADDR_DSO,
  1021. };
  1022. struct sort_entry sort_mem_locked = {
  1023. .se_header = "Locked",
  1024. .se_cmp = sort__locked_cmp,
  1025. .se_snprintf = hist_entry__locked_snprintf,
  1026. .se_width_idx = HISTC_MEM_LOCKED,
  1027. };
  1028. struct sort_entry sort_mem_tlb = {
  1029. .se_header = "TLB access",
  1030. .se_cmp = sort__tlb_cmp,
  1031. .se_snprintf = hist_entry__tlb_snprintf,
  1032. .se_width_idx = HISTC_MEM_TLB,
  1033. };
  1034. struct sort_entry sort_mem_lvl = {
  1035. .se_header = "Memory access",
  1036. .se_cmp = sort__lvl_cmp,
  1037. .se_snprintf = hist_entry__lvl_snprintf,
  1038. .se_width_idx = HISTC_MEM_LVL,
  1039. };
  1040. struct sort_entry sort_mem_snoop = {
  1041. .se_header = "Snoop",
  1042. .se_cmp = sort__snoop_cmp,
  1043. .se_snprintf = hist_entry__snoop_snprintf,
  1044. .se_width_idx = HISTC_MEM_SNOOP,
  1045. };
  1046. struct sort_entry sort_mem_dcacheline = {
  1047. .se_header = "Data Cacheline",
  1048. .se_cmp = sort__dcacheline_cmp,
  1049. .se_snprintf = hist_entry__dcacheline_snprintf,
  1050. .se_width_idx = HISTC_MEM_DCACHELINE,
  1051. };
  1052. static int64_t
  1053. sort__phys_daddr_cmp(struct hist_entry *left, struct hist_entry *right)
  1054. {
  1055. uint64_t l = 0, r = 0;
  1056. if (left->mem_info)
  1057. l = left->mem_info->daddr.phys_addr;
  1058. if (right->mem_info)
  1059. r = right->mem_info->daddr.phys_addr;
  1060. return (int64_t)(r - l);
  1061. }
  1062. static int hist_entry__phys_daddr_snprintf(struct hist_entry *he, char *bf,
  1063. size_t size, unsigned int width)
  1064. {
  1065. uint64_t addr = 0;
  1066. size_t ret = 0;
  1067. size_t len = BITS_PER_LONG / 4;
  1068. addr = he->mem_info->daddr.phys_addr;
  1069. ret += repsep_snprintf(bf + ret, size - ret, "[%c] ", he->level);
  1070. ret += repsep_snprintf(bf + ret, size - ret, "%-#.*llx", len, addr);
  1071. ret += repsep_snprintf(bf + ret, size - ret, "%-*s", width - ret, "");
  1072. if (ret > width)
  1073. bf[width] = '\0';
  1074. return width;
  1075. }
  1076. struct sort_entry sort_mem_phys_daddr = {
  1077. .se_header = "Data Physical Address",
  1078. .se_cmp = sort__phys_daddr_cmp,
  1079. .se_snprintf = hist_entry__phys_daddr_snprintf,
  1080. .se_width_idx = HISTC_MEM_PHYS_DADDR,
  1081. };
  1082. static int64_t
  1083. sort__abort_cmp(struct hist_entry *left, struct hist_entry *right)
  1084. {
  1085. if (!left->branch_info || !right->branch_info)
  1086. return cmp_null(left->branch_info, right->branch_info);
  1087. return left->branch_info->flags.abort !=
  1088. right->branch_info->flags.abort;
  1089. }
  1090. static int hist_entry__abort_snprintf(struct hist_entry *he, char *bf,
  1091. size_t size, unsigned int width)
  1092. {
  1093. static const char *out = "N/A";
  1094. if (he->branch_info) {
  1095. if (he->branch_info->flags.abort)
  1096. out = "A";
  1097. else
  1098. out = ".";
  1099. }
  1100. return repsep_snprintf(bf, size, "%-*s", width, out);
  1101. }
  1102. struct sort_entry sort_abort = {
  1103. .se_header = "Transaction abort",
  1104. .se_cmp = sort__abort_cmp,
  1105. .se_snprintf = hist_entry__abort_snprintf,
  1106. .se_width_idx = HISTC_ABORT,
  1107. };
  1108. static int64_t
  1109. sort__in_tx_cmp(struct hist_entry *left, struct hist_entry *right)
  1110. {
  1111. if (!left->branch_info || !right->branch_info)
  1112. return cmp_null(left->branch_info, right->branch_info);
  1113. return left->branch_info->flags.in_tx !=
  1114. right->branch_info->flags.in_tx;
  1115. }
  1116. static int hist_entry__in_tx_snprintf(struct hist_entry *he, char *bf,
  1117. size_t size, unsigned int width)
  1118. {
  1119. static const char *out = "N/A";
  1120. if (he->branch_info) {
  1121. if (he->branch_info->flags.in_tx)
  1122. out = "T";
  1123. else
  1124. out = ".";
  1125. }
  1126. return repsep_snprintf(bf, size, "%-*s", width, out);
  1127. }
  1128. struct sort_entry sort_in_tx = {
  1129. .se_header = "Branch in transaction",
  1130. .se_cmp = sort__in_tx_cmp,
  1131. .se_snprintf = hist_entry__in_tx_snprintf,
  1132. .se_width_idx = HISTC_IN_TX,
  1133. };
  1134. static int64_t
  1135. sort__transaction_cmp(struct hist_entry *left, struct hist_entry *right)
  1136. {
  1137. return left->transaction - right->transaction;
  1138. }
  1139. static inline char *add_str(char *p, const char *str)
  1140. {
  1141. strcpy(p, str);
  1142. return p + strlen(str);
  1143. }
  1144. static struct txbit {
  1145. unsigned flag;
  1146. const char *name;
  1147. int skip_for_len;
  1148. } txbits[] = {
  1149. { PERF_TXN_ELISION, "EL ", 0 },
  1150. { PERF_TXN_TRANSACTION, "TX ", 1 },
  1151. { PERF_TXN_SYNC, "SYNC ", 1 },
  1152. { PERF_TXN_ASYNC, "ASYNC ", 0 },
  1153. { PERF_TXN_RETRY, "RETRY ", 0 },
  1154. { PERF_TXN_CONFLICT, "CON ", 0 },
  1155. { PERF_TXN_CAPACITY_WRITE, "CAP-WRITE ", 1 },
  1156. { PERF_TXN_CAPACITY_READ, "CAP-READ ", 0 },
  1157. { 0, NULL, 0 }
  1158. };
  1159. int hist_entry__transaction_len(void)
  1160. {
  1161. int i;
  1162. int len = 0;
  1163. for (i = 0; txbits[i].name; i++) {
  1164. if (!txbits[i].skip_for_len)
  1165. len += strlen(txbits[i].name);
  1166. }
  1167. len += 4; /* :XX<space> */
  1168. return len;
  1169. }
  1170. static int hist_entry__transaction_snprintf(struct hist_entry *he, char *bf,
  1171. size_t size, unsigned int width)
  1172. {
  1173. u64 t = he->transaction;
  1174. char buf[128];
  1175. char *p = buf;
  1176. int i;
  1177. buf[0] = 0;
  1178. for (i = 0; txbits[i].name; i++)
  1179. if (txbits[i].flag & t)
  1180. p = add_str(p, txbits[i].name);
  1181. if (t && !(t & (PERF_TXN_SYNC|PERF_TXN_ASYNC)))
  1182. p = add_str(p, "NEITHER ");
  1183. if (t & PERF_TXN_ABORT_MASK) {
  1184. sprintf(p, ":%" PRIx64,
  1185. (t & PERF_TXN_ABORT_MASK) >>
  1186. PERF_TXN_ABORT_SHIFT);
  1187. p += strlen(p);
  1188. }
  1189. return repsep_snprintf(bf, size, "%-*s", width, buf);
  1190. }
  1191. struct sort_entry sort_transaction = {
  1192. .se_header = "Transaction ",
  1193. .se_cmp = sort__transaction_cmp,
  1194. .se_snprintf = hist_entry__transaction_snprintf,
  1195. .se_width_idx = HISTC_TRANSACTION,
  1196. };
  1197. /* --sort symbol_size */
  1198. static int64_t _sort__sym_size_cmp(struct symbol *sym_l, struct symbol *sym_r)
  1199. {
  1200. int64_t size_l = sym_l != NULL ? symbol__size(sym_l) : 0;
  1201. int64_t size_r = sym_r != NULL ? symbol__size(sym_r) : 0;
  1202. return size_l < size_r ? -1 :
  1203. size_l == size_r ? 0 : 1;
  1204. }
  1205. static int64_t
  1206. sort__sym_size_cmp(struct hist_entry *left, struct hist_entry *right)
  1207. {
  1208. return _sort__sym_size_cmp(right->ms.sym, left->ms.sym);
  1209. }
  1210. static int _hist_entry__sym_size_snprintf(struct symbol *sym, char *bf,
  1211. size_t bf_size, unsigned int width)
  1212. {
  1213. if (sym)
  1214. return repsep_snprintf(bf, bf_size, "%*d", width, symbol__size(sym));
  1215. return repsep_snprintf(bf, bf_size, "%*s", width, "unknown");
  1216. }
  1217. static int hist_entry__sym_size_snprintf(struct hist_entry *he, char *bf,
  1218. size_t size, unsigned int width)
  1219. {
  1220. return _hist_entry__sym_size_snprintf(he->ms.sym, bf, size, width);
  1221. }
  1222. struct sort_entry sort_sym_size = {
  1223. .se_header = "Symbol size",
  1224. .se_cmp = sort__sym_size_cmp,
  1225. .se_snprintf = hist_entry__sym_size_snprintf,
  1226. .se_width_idx = HISTC_SYM_SIZE,
  1227. };
  1228. /* --sort dso_size */
  1229. static int64_t _sort__dso_size_cmp(struct map *map_l, struct map *map_r)
  1230. {
  1231. int64_t size_l = map_l != NULL ? map__size(map_l) : 0;
  1232. int64_t size_r = map_r != NULL ? map__size(map_r) : 0;
  1233. return size_l < size_r ? -1 :
  1234. size_l == size_r ? 0 : 1;
  1235. }
  1236. static int64_t
  1237. sort__dso_size_cmp(struct hist_entry *left, struct hist_entry *right)
  1238. {
  1239. return _sort__dso_size_cmp(right->ms.map, left->ms.map);
  1240. }
  1241. static int _hist_entry__dso_size_snprintf(struct map *map, char *bf,
  1242. size_t bf_size, unsigned int width)
  1243. {
  1244. if (map && map->dso)
  1245. return repsep_snprintf(bf, bf_size, "%*d", width,
  1246. map__size(map));
  1247. return repsep_snprintf(bf, bf_size, "%*s", width, "unknown");
  1248. }
  1249. static int hist_entry__dso_size_snprintf(struct hist_entry *he, char *bf,
  1250. size_t size, unsigned int width)
  1251. {
  1252. return _hist_entry__dso_size_snprintf(he->ms.map, bf, size, width);
  1253. }
  1254. struct sort_entry sort_dso_size = {
  1255. .se_header = "DSO size",
  1256. .se_cmp = sort__dso_size_cmp,
  1257. .se_snprintf = hist_entry__dso_size_snprintf,
  1258. .se_width_idx = HISTC_DSO_SIZE,
  1259. };
  1260. struct sort_dimension {
  1261. const char *name;
  1262. struct sort_entry *entry;
  1263. int taken;
  1264. };
  1265. #define DIM(d, n, func) [d] = { .name = n, .entry = &(func) }
  1266. static struct sort_dimension common_sort_dimensions[] = {
  1267. DIM(SORT_PID, "pid", sort_thread),
  1268. DIM(SORT_COMM, "comm", sort_comm),
  1269. DIM(SORT_DSO, "dso", sort_dso),
  1270. DIM(SORT_SYM, "symbol", sort_sym),
  1271. DIM(SORT_PARENT, "parent", sort_parent),
  1272. DIM(SORT_CPU, "cpu", sort_cpu),
  1273. DIM(SORT_SOCKET, "socket", sort_socket),
  1274. DIM(SORT_SRCLINE, "srcline", sort_srcline),
  1275. DIM(SORT_SRCFILE, "srcfile", sort_srcfile),
  1276. DIM(SORT_LOCAL_WEIGHT, "local_weight", sort_local_weight),
  1277. DIM(SORT_GLOBAL_WEIGHT, "weight", sort_global_weight),
  1278. DIM(SORT_TRANSACTION, "transaction", sort_transaction),
  1279. DIM(SORT_TRACE, "trace", sort_trace),
  1280. DIM(SORT_SYM_SIZE, "symbol_size", sort_sym_size),
  1281. DIM(SORT_DSO_SIZE, "dso_size", sort_dso_size),
  1282. DIM(SORT_CGROUP_ID, "cgroup_id", sort_cgroup_id),
  1283. };
  1284. #undef DIM
  1285. #define DIM(d, n, func) [d - __SORT_BRANCH_STACK] = { .name = n, .entry = &(func) }
  1286. static struct sort_dimension bstack_sort_dimensions[] = {
  1287. DIM(SORT_DSO_FROM, "dso_from", sort_dso_from),
  1288. DIM(SORT_DSO_TO, "dso_to", sort_dso_to),
  1289. DIM(SORT_SYM_FROM, "symbol_from", sort_sym_from),
  1290. DIM(SORT_SYM_TO, "symbol_to", sort_sym_to),
  1291. DIM(SORT_MISPREDICT, "mispredict", sort_mispredict),
  1292. DIM(SORT_IN_TX, "in_tx", sort_in_tx),
  1293. DIM(SORT_ABORT, "abort", sort_abort),
  1294. DIM(SORT_CYCLES, "cycles", sort_cycles),
  1295. DIM(SORT_SRCLINE_FROM, "srcline_from", sort_srcline_from),
  1296. DIM(SORT_SRCLINE_TO, "srcline_to", sort_srcline_to),
  1297. };
  1298. #undef DIM
  1299. #define DIM(d, n, func) [d - __SORT_MEMORY_MODE] = { .name = n, .entry = &(func) }
  1300. static struct sort_dimension memory_sort_dimensions[] = {
  1301. DIM(SORT_MEM_DADDR_SYMBOL, "symbol_daddr", sort_mem_daddr_sym),
  1302. DIM(SORT_MEM_IADDR_SYMBOL, "symbol_iaddr", sort_mem_iaddr_sym),
  1303. DIM(SORT_MEM_DADDR_DSO, "dso_daddr", sort_mem_daddr_dso),
  1304. DIM(SORT_MEM_LOCKED, "locked", sort_mem_locked),
  1305. DIM(SORT_MEM_TLB, "tlb", sort_mem_tlb),
  1306. DIM(SORT_MEM_LVL, "mem", sort_mem_lvl),
  1307. DIM(SORT_MEM_SNOOP, "snoop", sort_mem_snoop),
  1308. DIM(SORT_MEM_DCACHELINE, "dcacheline", sort_mem_dcacheline),
  1309. DIM(SORT_MEM_PHYS_DADDR, "phys_daddr", sort_mem_phys_daddr),
  1310. };
  1311. #undef DIM
  1312. struct hpp_dimension {
  1313. const char *name;
  1314. struct perf_hpp_fmt *fmt;
  1315. int taken;
  1316. };
  1317. #define DIM(d, n) { .name = n, .fmt = &perf_hpp__format[d], }
  1318. static struct hpp_dimension hpp_sort_dimensions[] = {
  1319. DIM(PERF_HPP__OVERHEAD, "overhead"),
  1320. DIM(PERF_HPP__OVERHEAD_SYS, "overhead_sys"),
  1321. DIM(PERF_HPP__OVERHEAD_US, "overhead_us"),
  1322. DIM(PERF_HPP__OVERHEAD_GUEST_SYS, "overhead_guest_sys"),
  1323. DIM(PERF_HPP__OVERHEAD_GUEST_US, "overhead_guest_us"),
  1324. DIM(PERF_HPP__OVERHEAD_ACC, "overhead_children"),
  1325. DIM(PERF_HPP__SAMPLES, "sample"),
  1326. DIM(PERF_HPP__PERIOD, "period"),
  1327. };
  1328. #undef DIM
  1329. struct hpp_sort_entry {
  1330. struct perf_hpp_fmt hpp;
  1331. struct sort_entry *se;
  1332. };
  1333. void perf_hpp__reset_sort_width(struct perf_hpp_fmt *fmt, struct hists *hists)
  1334. {
  1335. struct hpp_sort_entry *hse;
  1336. if (!perf_hpp__is_sort_entry(fmt))
  1337. return;
  1338. hse = container_of(fmt, struct hpp_sort_entry, hpp);
  1339. hists__new_col_len(hists, hse->se->se_width_idx, strlen(fmt->name));
  1340. }
  1341. static int __sort__hpp_header(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
  1342. struct hists *hists, int line __maybe_unused,
  1343. int *span __maybe_unused)
  1344. {
  1345. struct hpp_sort_entry *hse;
  1346. size_t len = fmt->user_len;
  1347. hse = container_of(fmt, struct hpp_sort_entry, hpp);
  1348. if (!len)
  1349. len = hists__col_len(hists, hse->se->se_width_idx);
  1350. return scnprintf(hpp->buf, hpp->size, "%-*.*s", len, len, fmt->name);
  1351. }
  1352. static int __sort__hpp_width(struct perf_hpp_fmt *fmt,
  1353. struct perf_hpp *hpp __maybe_unused,
  1354. struct hists *hists)
  1355. {
  1356. struct hpp_sort_entry *hse;
  1357. size_t len = fmt->user_len;
  1358. hse = container_of(fmt, struct hpp_sort_entry, hpp);
  1359. if (!len)
  1360. len = hists__col_len(hists, hse->se->se_width_idx);
  1361. return len;
  1362. }
  1363. static int __sort__hpp_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
  1364. struct hist_entry *he)
  1365. {
  1366. struct hpp_sort_entry *hse;
  1367. size_t len = fmt->user_len;
  1368. hse = container_of(fmt, struct hpp_sort_entry, hpp);
  1369. if (!len)
  1370. len = hists__col_len(he->hists, hse->se->se_width_idx);
  1371. return hse->se->se_snprintf(he, hpp->buf, hpp->size, len);
  1372. }
  1373. static int64_t __sort__hpp_cmp(struct perf_hpp_fmt *fmt,
  1374. struct hist_entry *a, struct hist_entry *b)
  1375. {
  1376. struct hpp_sort_entry *hse;
  1377. hse = container_of(fmt, struct hpp_sort_entry, hpp);
  1378. return hse->se->se_cmp(a, b);
  1379. }
  1380. static int64_t __sort__hpp_collapse(struct perf_hpp_fmt *fmt,
  1381. struct hist_entry *a, struct hist_entry *b)
  1382. {
  1383. struct hpp_sort_entry *hse;
  1384. int64_t (*collapse_fn)(struct hist_entry *, struct hist_entry *);
  1385. hse = container_of(fmt, struct hpp_sort_entry, hpp);
  1386. collapse_fn = hse->se->se_collapse ?: hse->se->se_cmp;
  1387. return collapse_fn(a, b);
  1388. }
  1389. static int64_t __sort__hpp_sort(struct perf_hpp_fmt *fmt,
  1390. struct hist_entry *a, struct hist_entry *b)
  1391. {
  1392. struct hpp_sort_entry *hse;
  1393. int64_t (*sort_fn)(struct hist_entry *, struct hist_entry *);
  1394. hse = container_of(fmt, struct hpp_sort_entry, hpp);
  1395. sort_fn = hse->se->se_sort ?: hse->se->se_cmp;
  1396. return sort_fn(a, b);
  1397. }
  1398. bool perf_hpp__is_sort_entry(struct perf_hpp_fmt *format)
  1399. {
  1400. return format->header == __sort__hpp_header;
  1401. }
  1402. #define MK_SORT_ENTRY_CHK(key) \
  1403. bool perf_hpp__is_ ## key ## _entry(struct perf_hpp_fmt *fmt) \
  1404. { \
  1405. struct hpp_sort_entry *hse; \
  1406. \
  1407. if (!perf_hpp__is_sort_entry(fmt)) \
  1408. return false; \
  1409. \
  1410. hse = container_of(fmt, struct hpp_sort_entry, hpp); \
  1411. return hse->se == &sort_ ## key ; \
  1412. }
  1413. MK_SORT_ENTRY_CHK(trace)
  1414. MK_SORT_ENTRY_CHK(srcline)
  1415. MK_SORT_ENTRY_CHK(srcfile)
  1416. MK_SORT_ENTRY_CHK(thread)
  1417. MK_SORT_ENTRY_CHK(comm)
  1418. MK_SORT_ENTRY_CHK(dso)
  1419. MK_SORT_ENTRY_CHK(sym)
  1420. static bool __sort__hpp_equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b)
  1421. {
  1422. struct hpp_sort_entry *hse_a;
  1423. struct hpp_sort_entry *hse_b;
  1424. if (!perf_hpp__is_sort_entry(a) || !perf_hpp__is_sort_entry(b))
  1425. return false;
  1426. hse_a = container_of(a, struct hpp_sort_entry, hpp);
  1427. hse_b = container_of(b, struct hpp_sort_entry, hpp);
  1428. return hse_a->se == hse_b->se;
  1429. }
  1430. static void hse_free(struct perf_hpp_fmt *fmt)
  1431. {
  1432. struct hpp_sort_entry *hse;
  1433. hse = container_of(fmt, struct hpp_sort_entry, hpp);
  1434. free(hse);
  1435. }
  1436. static struct hpp_sort_entry *
  1437. __sort_dimension__alloc_hpp(struct sort_dimension *sd, int level)
  1438. {
  1439. struct hpp_sort_entry *hse;
  1440. hse = malloc(sizeof(*hse));
  1441. if (hse == NULL) {
  1442. pr_err("Memory allocation failed\n");
  1443. return NULL;
  1444. }
  1445. hse->se = sd->entry;
  1446. hse->hpp.name = sd->entry->se_header;
  1447. hse->hpp.header = __sort__hpp_header;
  1448. hse->hpp.width = __sort__hpp_width;
  1449. hse->hpp.entry = __sort__hpp_entry;
  1450. hse->hpp.color = NULL;
  1451. hse->hpp.cmp = __sort__hpp_cmp;
  1452. hse->hpp.collapse = __sort__hpp_collapse;
  1453. hse->hpp.sort = __sort__hpp_sort;
  1454. hse->hpp.equal = __sort__hpp_equal;
  1455. hse->hpp.free = hse_free;
  1456. INIT_LIST_HEAD(&hse->hpp.list);
  1457. INIT_LIST_HEAD(&hse->hpp.sort_list);
  1458. hse->hpp.elide = false;
  1459. hse->hpp.len = 0;
  1460. hse->hpp.user_len = 0;
  1461. hse->hpp.level = level;
  1462. return hse;
  1463. }
  1464. static void hpp_free(struct perf_hpp_fmt *fmt)
  1465. {
  1466. free(fmt);
  1467. }
  1468. static struct perf_hpp_fmt *__hpp_dimension__alloc_hpp(struct hpp_dimension *hd,
  1469. int level)
  1470. {
  1471. struct perf_hpp_fmt *fmt;
  1472. fmt = memdup(hd->fmt, sizeof(*fmt));
  1473. if (fmt) {
  1474. INIT_LIST_HEAD(&fmt->list);
  1475. INIT_LIST_HEAD(&fmt->sort_list);
  1476. fmt->free = hpp_free;
  1477. fmt->level = level;
  1478. }
  1479. return fmt;
  1480. }
  1481. int hist_entry__filter(struct hist_entry *he, int type, const void *arg)
  1482. {
  1483. struct perf_hpp_fmt *fmt;
  1484. struct hpp_sort_entry *hse;
  1485. int ret = -1;
  1486. int r;
  1487. perf_hpp_list__for_each_format(he->hpp_list, fmt) {
  1488. if (!perf_hpp__is_sort_entry(fmt))
  1489. continue;
  1490. hse = container_of(fmt, struct hpp_sort_entry, hpp);
  1491. if (hse->se->se_filter == NULL)
  1492. continue;
  1493. /*
  1494. * hist entry is filtered if any of sort key in the hpp list
  1495. * is applied. But it should skip non-matched filter types.
  1496. */
  1497. r = hse->se->se_filter(he, type, arg);
  1498. if (r >= 0) {
  1499. if (ret < 0)
  1500. ret = 0;
  1501. ret |= r;
  1502. }
  1503. }
  1504. return ret;
  1505. }
  1506. static int __sort_dimension__add_hpp_sort(struct sort_dimension *sd,
  1507. struct perf_hpp_list *list,
  1508. int level)
  1509. {
  1510. struct hpp_sort_entry *hse = __sort_dimension__alloc_hpp(sd, level);
  1511. if (hse == NULL)
  1512. return -1;
  1513. perf_hpp_list__register_sort_field(list, &hse->hpp);
  1514. return 0;
  1515. }
  1516. static int __sort_dimension__add_hpp_output(struct sort_dimension *sd,
  1517. struct perf_hpp_list *list)
  1518. {
  1519. struct hpp_sort_entry *hse = __sort_dimension__alloc_hpp(sd, 0);
  1520. if (hse == NULL)
  1521. return -1;
  1522. perf_hpp_list__column_register(list, &hse->hpp);
  1523. return 0;
  1524. }
  1525. struct hpp_dynamic_entry {
  1526. struct perf_hpp_fmt hpp;
  1527. struct perf_evsel *evsel;
  1528. struct format_field *field;
  1529. unsigned dynamic_len;
  1530. bool raw_trace;
  1531. };
  1532. static int hde_width(struct hpp_dynamic_entry *hde)
  1533. {
  1534. if (!hde->hpp.len) {
  1535. int len = hde->dynamic_len;
  1536. int namelen = strlen(hde->field->name);
  1537. int fieldlen = hde->field->size;
  1538. if (namelen > len)
  1539. len = namelen;
  1540. if (!(hde->field->flags & FIELD_IS_STRING)) {
  1541. /* length for print hex numbers */
  1542. fieldlen = hde->field->size * 2 + 2;
  1543. }
  1544. if (fieldlen > len)
  1545. len = fieldlen;
  1546. hde->hpp.len = len;
  1547. }
  1548. return hde->hpp.len;
  1549. }
  1550. static void update_dynamic_len(struct hpp_dynamic_entry *hde,
  1551. struct hist_entry *he)
  1552. {
  1553. char *str, *pos;
  1554. struct format_field *field = hde->field;
  1555. size_t namelen;
  1556. bool last = false;
  1557. if (hde->raw_trace)
  1558. return;
  1559. /* parse pretty print result and update max length */
  1560. if (!he->trace_output)
  1561. he->trace_output = get_trace_output(he);
  1562. namelen = strlen(field->name);
  1563. str = he->trace_output;
  1564. while (str) {
  1565. pos = strchr(str, ' ');
  1566. if (pos == NULL) {
  1567. last = true;
  1568. pos = str + strlen(str);
  1569. }
  1570. if (!strncmp(str, field->name, namelen)) {
  1571. size_t len;
  1572. str += namelen + 1;
  1573. len = pos - str;
  1574. if (len > hde->dynamic_len)
  1575. hde->dynamic_len = len;
  1576. break;
  1577. }
  1578. if (last)
  1579. str = NULL;
  1580. else
  1581. str = pos + 1;
  1582. }
  1583. }
  1584. static int __sort__hde_header(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
  1585. struct hists *hists __maybe_unused,
  1586. int line __maybe_unused,
  1587. int *span __maybe_unused)
  1588. {
  1589. struct hpp_dynamic_entry *hde;
  1590. size_t len = fmt->user_len;
  1591. hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
  1592. if (!len)
  1593. len = hde_width(hde);
  1594. return scnprintf(hpp->buf, hpp->size, "%*.*s", len, len, hde->field->name);
  1595. }
  1596. static int __sort__hde_width(struct perf_hpp_fmt *fmt,
  1597. struct perf_hpp *hpp __maybe_unused,
  1598. struct hists *hists __maybe_unused)
  1599. {
  1600. struct hpp_dynamic_entry *hde;
  1601. size_t len = fmt->user_len;
  1602. hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
  1603. if (!len)
  1604. len = hde_width(hde);
  1605. return len;
  1606. }
  1607. bool perf_hpp__defined_dynamic_entry(struct perf_hpp_fmt *fmt, struct hists *hists)
  1608. {
  1609. struct hpp_dynamic_entry *hde;
  1610. hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
  1611. return hists_to_evsel(hists) == hde->evsel;
  1612. }
  1613. static int __sort__hde_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
  1614. struct hist_entry *he)
  1615. {
  1616. struct hpp_dynamic_entry *hde;
  1617. size_t len = fmt->user_len;
  1618. char *str, *pos;
  1619. struct format_field *field;
  1620. size_t namelen;
  1621. bool last = false;
  1622. int ret;
  1623. hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
  1624. if (!len)
  1625. len = hde_width(hde);
  1626. if (hde->raw_trace)
  1627. goto raw_field;
  1628. if (!he->trace_output)
  1629. he->trace_output = get_trace_output(he);
  1630. field = hde->field;
  1631. namelen = strlen(field->name);
  1632. str = he->trace_output;
  1633. while (str) {
  1634. pos = strchr(str, ' ');
  1635. if (pos == NULL) {
  1636. last = true;
  1637. pos = str + strlen(str);
  1638. }
  1639. if (!strncmp(str, field->name, namelen)) {
  1640. str += namelen + 1;
  1641. str = strndup(str, pos - str);
  1642. if (str == NULL)
  1643. return scnprintf(hpp->buf, hpp->size,
  1644. "%*.*s", len, len, "ERROR");
  1645. break;
  1646. }
  1647. if (last)
  1648. str = NULL;
  1649. else
  1650. str = pos + 1;
  1651. }
  1652. if (str == NULL) {
  1653. struct trace_seq seq;
  1654. raw_field:
  1655. trace_seq_init(&seq);
  1656. tep_print_field(&seq, he->raw_data, hde->field);
  1657. str = seq.buffer;
  1658. }
  1659. ret = scnprintf(hpp->buf, hpp->size, "%*.*s", len, len, str);
  1660. free(str);
  1661. return ret;
  1662. }
  1663. static int64_t __sort__hde_cmp(struct perf_hpp_fmt *fmt,
  1664. struct hist_entry *a, struct hist_entry *b)
  1665. {
  1666. struct hpp_dynamic_entry *hde;
  1667. struct format_field *field;
  1668. unsigned offset, size;
  1669. hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
  1670. if (b == NULL) {
  1671. update_dynamic_len(hde, a);
  1672. return 0;
  1673. }
  1674. field = hde->field;
  1675. if (field->flags & FIELD_IS_DYNAMIC) {
  1676. unsigned long long dyn;
  1677. tep_read_number_field(field, a->raw_data, &dyn);
  1678. offset = dyn & 0xffff;
  1679. size = (dyn >> 16) & 0xffff;
  1680. /* record max width for output */
  1681. if (size > hde->dynamic_len)
  1682. hde->dynamic_len = size;
  1683. } else {
  1684. offset = field->offset;
  1685. size = field->size;
  1686. }
  1687. return memcmp(a->raw_data + offset, b->raw_data + offset, size);
  1688. }
  1689. bool perf_hpp__is_dynamic_entry(struct perf_hpp_fmt *fmt)
  1690. {
  1691. return fmt->cmp == __sort__hde_cmp;
  1692. }
  1693. static bool __sort__hde_equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b)
  1694. {
  1695. struct hpp_dynamic_entry *hde_a;
  1696. struct hpp_dynamic_entry *hde_b;
  1697. if (!perf_hpp__is_dynamic_entry(a) || !perf_hpp__is_dynamic_entry(b))
  1698. return false;
  1699. hde_a = container_of(a, struct hpp_dynamic_entry, hpp);
  1700. hde_b = container_of(b, struct hpp_dynamic_entry, hpp);
  1701. return hde_a->field == hde_b->field;
  1702. }
  1703. static void hde_free(struct perf_hpp_fmt *fmt)
  1704. {
  1705. struct hpp_dynamic_entry *hde;
  1706. hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
  1707. free(hde);
  1708. }
  1709. static struct hpp_dynamic_entry *
  1710. __alloc_dynamic_entry(struct perf_evsel *evsel, struct format_field *field,
  1711. int level)
  1712. {
  1713. struct hpp_dynamic_entry *hde;
  1714. hde = malloc(sizeof(*hde));
  1715. if (hde == NULL) {
  1716. pr_debug("Memory allocation failed\n");
  1717. return NULL;
  1718. }
  1719. hde->evsel = evsel;
  1720. hde->field = field;
  1721. hde->dynamic_len = 0;
  1722. hde->hpp.name = field->name;
  1723. hde->hpp.header = __sort__hde_header;
  1724. hde->hpp.width = __sort__hde_width;
  1725. hde->hpp.entry = __sort__hde_entry;
  1726. hde->hpp.color = NULL;
  1727. hde->hpp.cmp = __sort__hde_cmp;
  1728. hde->hpp.collapse = __sort__hde_cmp;
  1729. hde->hpp.sort = __sort__hde_cmp;
  1730. hde->hpp.equal = __sort__hde_equal;
  1731. hde->hpp.free = hde_free;
  1732. INIT_LIST_HEAD(&hde->hpp.list);
  1733. INIT_LIST_HEAD(&hde->hpp.sort_list);
  1734. hde->hpp.elide = false;
  1735. hde->hpp.len = 0;
  1736. hde->hpp.user_len = 0;
  1737. hde->hpp.level = level;
  1738. return hde;
  1739. }
  1740. struct perf_hpp_fmt *perf_hpp_fmt__dup(struct perf_hpp_fmt *fmt)
  1741. {
  1742. struct perf_hpp_fmt *new_fmt = NULL;
  1743. if (perf_hpp__is_sort_entry(fmt)) {
  1744. struct hpp_sort_entry *hse, *new_hse;
  1745. hse = container_of(fmt, struct hpp_sort_entry, hpp);
  1746. new_hse = memdup(hse, sizeof(*hse));
  1747. if (new_hse)
  1748. new_fmt = &new_hse->hpp;
  1749. } else if (perf_hpp__is_dynamic_entry(fmt)) {
  1750. struct hpp_dynamic_entry *hde, *new_hde;
  1751. hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
  1752. new_hde = memdup(hde, sizeof(*hde));
  1753. if (new_hde)
  1754. new_fmt = &new_hde->hpp;
  1755. } else {
  1756. new_fmt = memdup(fmt, sizeof(*fmt));
  1757. }
  1758. INIT_LIST_HEAD(&new_fmt->list);
  1759. INIT_LIST_HEAD(&new_fmt->sort_list);
  1760. return new_fmt;
  1761. }
  1762. static int parse_field_name(char *str, char **event, char **field, char **opt)
  1763. {
  1764. char *event_name, *field_name, *opt_name;
  1765. event_name = str;
  1766. field_name = strchr(str, '.');
  1767. if (field_name) {
  1768. *field_name++ = '\0';
  1769. } else {
  1770. event_name = NULL;
  1771. field_name = str;
  1772. }
  1773. opt_name = strchr(field_name, '/');
  1774. if (opt_name)
  1775. *opt_name++ = '\0';
  1776. *event = event_name;
  1777. *field = field_name;
  1778. *opt = opt_name;
  1779. return 0;
  1780. }
  1781. /* find match evsel using a given event name. The event name can be:
  1782. * 1. '%' + event index (e.g. '%1' for first event)
  1783. * 2. full event name (e.g. sched:sched_switch)
  1784. * 3. partial event name (should not contain ':')
  1785. */
  1786. static struct perf_evsel *find_evsel(struct perf_evlist *evlist, char *event_name)
  1787. {
  1788. struct perf_evsel *evsel = NULL;
  1789. struct perf_evsel *pos;
  1790. bool full_name;
  1791. /* case 1 */
  1792. if (event_name[0] == '%') {
  1793. int nr = strtol(event_name+1, NULL, 0);
  1794. if (nr > evlist->nr_entries)
  1795. return NULL;
  1796. evsel = perf_evlist__first(evlist);
  1797. while (--nr > 0)
  1798. evsel = perf_evsel__next(evsel);
  1799. return evsel;
  1800. }
  1801. full_name = !!strchr(event_name, ':');
  1802. evlist__for_each_entry(evlist, pos) {
  1803. /* case 2 */
  1804. if (full_name && !strcmp(pos->name, event_name))
  1805. return pos;
  1806. /* case 3 */
  1807. if (!full_name && strstr(pos->name, event_name)) {
  1808. if (evsel) {
  1809. pr_debug("'%s' event is ambiguous: it can be %s or %s\n",
  1810. event_name, evsel->name, pos->name);
  1811. return NULL;
  1812. }
  1813. evsel = pos;
  1814. }
  1815. }
  1816. return evsel;
  1817. }
  1818. static int __dynamic_dimension__add(struct perf_evsel *evsel,
  1819. struct format_field *field,
  1820. bool raw_trace, int level)
  1821. {
  1822. struct hpp_dynamic_entry *hde;
  1823. hde = __alloc_dynamic_entry(evsel, field, level);
  1824. if (hde == NULL)
  1825. return -ENOMEM;
  1826. hde->raw_trace = raw_trace;
  1827. perf_hpp__register_sort_field(&hde->hpp);
  1828. return 0;
  1829. }
  1830. static int add_evsel_fields(struct perf_evsel *evsel, bool raw_trace, int level)
  1831. {
  1832. int ret;
  1833. struct format_field *field;
  1834. field = evsel->tp_format->format.fields;
  1835. while (field) {
  1836. ret = __dynamic_dimension__add(evsel, field, raw_trace, level);
  1837. if (ret < 0)
  1838. return ret;
  1839. field = field->next;
  1840. }
  1841. return 0;
  1842. }
  1843. static int add_all_dynamic_fields(struct perf_evlist *evlist, bool raw_trace,
  1844. int level)
  1845. {
  1846. int ret;
  1847. struct perf_evsel *evsel;
  1848. evlist__for_each_entry(evlist, evsel) {
  1849. if (evsel->attr.type != PERF_TYPE_TRACEPOINT)
  1850. continue;
  1851. ret = add_evsel_fields(evsel, raw_trace, level);
  1852. if (ret < 0)
  1853. return ret;
  1854. }
  1855. return 0;
  1856. }
  1857. static int add_all_matching_fields(struct perf_evlist *evlist,
  1858. char *field_name, bool raw_trace, int level)
  1859. {
  1860. int ret = -ESRCH;
  1861. struct perf_evsel *evsel;
  1862. struct format_field *field;
  1863. evlist__for_each_entry(evlist, evsel) {
  1864. if (evsel->attr.type != PERF_TYPE_TRACEPOINT)
  1865. continue;
  1866. field = tep_find_any_field(evsel->tp_format, field_name);
  1867. if (field == NULL)
  1868. continue;
  1869. ret = __dynamic_dimension__add(evsel, field, raw_trace, level);
  1870. if (ret < 0)
  1871. break;
  1872. }
  1873. return ret;
  1874. }
  1875. static int add_dynamic_entry(struct perf_evlist *evlist, const char *tok,
  1876. int level)
  1877. {
  1878. char *str, *event_name, *field_name, *opt_name;
  1879. struct perf_evsel *evsel;
  1880. struct format_field *field;
  1881. bool raw_trace = symbol_conf.raw_trace;
  1882. int ret = 0;
  1883. if (evlist == NULL)
  1884. return -ENOENT;
  1885. str = strdup(tok);
  1886. if (str == NULL)
  1887. return -ENOMEM;
  1888. if (parse_field_name(str, &event_name, &field_name, &opt_name) < 0) {
  1889. ret = -EINVAL;
  1890. goto out;
  1891. }
  1892. if (opt_name) {
  1893. if (strcmp(opt_name, "raw")) {
  1894. pr_debug("unsupported field option %s\n", opt_name);
  1895. ret = -EINVAL;
  1896. goto out;
  1897. }
  1898. raw_trace = true;
  1899. }
  1900. if (!strcmp(field_name, "trace_fields")) {
  1901. ret = add_all_dynamic_fields(evlist, raw_trace, level);
  1902. goto out;
  1903. }
  1904. if (event_name == NULL) {
  1905. ret = add_all_matching_fields(evlist, field_name, raw_trace, level);
  1906. goto out;
  1907. }
  1908. evsel = find_evsel(evlist, event_name);
  1909. if (evsel == NULL) {
  1910. pr_debug("Cannot find event: %s\n", event_name);
  1911. ret = -ENOENT;
  1912. goto out;
  1913. }
  1914. if (evsel->attr.type != PERF_TYPE_TRACEPOINT) {
  1915. pr_debug("%s is not a tracepoint event\n", event_name);
  1916. ret = -EINVAL;
  1917. goto out;
  1918. }
  1919. if (!strcmp(field_name, "*")) {
  1920. ret = add_evsel_fields(evsel, raw_trace, level);
  1921. } else {
  1922. field = tep_find_any_field(evsel->tp_format, field_name);
  1923. if (field == NULL) {
  1924. pr_debug("Cannot find event field for %s.%s\n",
  1925. event_name, field_name);
  1926. return -ENOENT;
  1927. }
  1928. ret = __dynamic_dimension__add(evsel, field, raw_trace, level);
  1929. }
  1930. out:
  1931. free(str);
  1932. return ret;
  1933. }
  1934. static int __sort_dimension__add(struct sort_dimension *sd,
  1935. struct perf_hpp_list *list,
  1936. int level)
  1937. {
  1938. if (sd->taken)
  1939. return 0;
  1940. if (__sort_dimension__add_hpp_sort(sd, list, level) < 0)
  1941. return -1;
  1942. if (sd->entry->se_collapse)
  1943. list->need_collapse = 1;
  1944. sd->taken = 1;
  1945. return 0;
  1946. }
  1947. static int __hpp_dimension__add(struct hpp_dimension *hd,
  1948. struct perf_hpp_list *list,
  1949. int level)
  1950. {
  1951. struct perf_hpp_fmt *fmt;
  1952. if (hd->taken)
  1953. return 0;
  1954. fmt = __hpp_dimension__alloc_hpp(hd, level);
  1955. if (!fmt)
  1956. return -1;
  1957. hd->taken = 1;
  1958. perf_hpp_list__register_sort_field(list, fmt);
  1959. return 0;
  1960. }
  1961. static int __sort_dimension__add_output(struct perf_hpp_list *list,
  1962. struct sort_dimension *sd)
  1963. {
  1964. if (sd->taken)
  1965. return 0;
  1966. if (__sort_dimension__add_hpp_output(sd, list) < 0)
  1967. return -1;
  1968. sd->taken = 1;
  1969. return 0;
  1970. }
  1971. static int __hpp_dimension__add_output(struct perf_hpp_list *list,
  1972. struct hpp_dimension *hd)
  1973. {
  1974. struct perf_hpp_fmt *fmt;
  1975. if (hd->taken)
  1976. return 0;
  1977. fmt = __hpp_dimension__alloc_hpp(hd, 0);
  1978. if (!fmt)
  1979. return -1;
  1980. hd->taken = 1;
  1981. perf_hpp_list__column_register(list, fmt);
  1982. return 0;
  1983. }
  1984. int hpp_dimension__add_output(unsigned col)
  1985. {
  1986. BUG_ON(col >= PERF_HPP__MAX_INDEX);
  1987. return __hpp_dimension__add_output(&perf_hpp_list, &hpp_sort_dimensions[col]);
  1988. }
  1989. int sort_dimension__add(struct perf_hpp_list *list, const char *tok,
  1990. struct perf_evlist *evlist,
  1991. int level)
  1992. {
  1993. unsigned int i;
  1994. for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) {
  1995. struct sort_dimension *sd = &common_sort_dimensions[i];
  1996. if (strncasecmp(tok, sd->name, strlen(tok)))
  1997. continue;
  1998. if (sd->entry == &sort_parent) {
  1999. int ret = regcomp(&parent_regex, parent_pattern, REG_EXTENDED);
  2000. if (ret) {
  2001. char err[BUFSIZ];
  2002. regerror(ret, &parent_regex, err, sizeof(err));
  2003. pr_err("Invalid regex: %s\n%s", parent_pattern, err);
  2004. return -EINVAL;
  2005. }
  2006. list->parent = 1;
  2007. } else if (sd->entry == &sort_sym) {
  2008. list->sym = 1;
  2009. /*
  2010. * perf diff displays the performance difference amongst
  2011. * two or more perf.data files. Those files could come
  2012. * from different binaries. So we should not compare
  2013. * their ips, but the name of symbol.
  2014. */
  2015. if (sort__mode == SORT_MODE__DIFF)
  2016. sd->entry->se_collapse = sort__sym_sort;
  2017. } else if (sd->entry == &sort_dso) {
  2018. list->dso = 1;
  2019. } else if (sd->entry == &sort_socket) {
  2020. list->socket = 1;
  2021. } else if (sd->entry == &sort_thread) {
  2022. list->thread = 1;
  2023. } else if (sd->entry == &sort_comm) {
  2024. list->comm = 1;
  2025. }
  2026. return __sort_dimension__add(sd, list, level);
  2027. }
  2028. for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) {
  2029. struct hpp_dimension *hd = &hpp_sort_dimensions[i];
  2030. if (strncasecmp(tok, hd->name, strlen(tok)))
  2031. continue;
  2032. return __hpp_dimension__add(hd, list, level);
  2033. }
  2034. for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) {
  2035. struct sort_dimension *sd = &bstack_sort_dimensions[i];
  2036. if (strncasecmp(tok, sd->name, strlen(tok)))
  2037. continue;
  2038. if (sort__mode != SORT_MODE__BRANCH)
  2039. return -EINVAL;
  2040. if (sd->entry == &sort_sym_from || sd->entry == &sort_sym_to)
  2041. list->sym = 1;
  2042. __sort_dimension__add(sd, list, level);
  2043. return 0;
  2044. }
  2045. for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++) {
  2046. struct sort_dimension *sd = &memory_sort_dimensions[i];
  2047. if (strncasecmp(tok, sd->name, strlen(tok)))
  2048. continue;
  2049. if (sort__mode != SORT_MODE__MEMORY)
  2050. return -EINVAL;
  2051. if (sd->entry == &sort_mem_dcacheline && cacheline_size() == 0)
  2052. return -EINVAL;
  2053. if (sd->entry == &sort_mem_daddr_sym)
  2054. list->sym = 1;
  2055. __sort_dimension__add(sd, list, level);
  2056. return 0;
  2057. }
  2058. if (!add_dynamic_entry(evlist, tok, level))
  2059. return 0;
  2060. return -ESRCH;
  2061. }
  2062. static int setup_sort_list(struct perf_hpp_list *list, char *str,
  2063. struct perf_evlist *evlist)
  2064. {
  2065. char *tmp, *tok;
  2066. int ret = 0;
  2067. int level = 0;
  2068. int next_level = 1;
  2069. bool in_group = false;
  2070. do {
  2071. tok = str;
  2072. tmp = strpbrk(str, "{}, ");
  2073. if (tmp) {
  2074. if (in_group)
  2075. next_level = level;
  2076. else
  2077. next_level = level + 1;
  2078. if (*tmp == '{')
  2079. in_group = true;
  2080. else if (*tmp == '}')
  2081. in_group = false;
  2082. *tmp = '\0';
  2083. str = tmp + 1;
  2084. }
  2085. if (*tok) {
  2086. ret = sort_dimension__add(list, tok, evlist, level);
  2087. if (ret == -EINVAL) {
  2088. if (!cacheline_size() && !strncasecmp(tok, "dcacheline", strlen(tok)))
  2089. pr_err("The \"dcacheline\" --sort key needs to know the cacheline size and it couldn't be determined on this system");
  2090. else
  2091. pr_err("Invalid --sort key: `%s'", tok);
  2092. break;
  2093. } else if (ret == -ESRCH) {
  2094. pr_err("Unknown --sort key: `%s'", tok);
  2095. break;
  2096. }
  2097. }
  2098. level = next_level;
  2099. } while (tmp);
  2100. return ret;
  2101. }
  2102. static const char *get_default_sort_order(struct perf_evlist *evlist)
  2103. {
  2104. const char *default_sort_orders[] = {
  2105. default_sort_order,
  2106. default_branch_sort_order,
  2107. default_mem_sort_order,
  2108. default_top_sort_order,
  2109. default_diff_sort_order,
  2110. default_tracepoint_sort_order,
  2111. };
  2112. bool use_trace = true;
  2113. struct perf_evsel *evsel;
  2114. BUG_ON(sort__mode >= ARRAY_SIZE(default_sort_orders));
  2115. if (evlist == NULL || perf_evlist__empty(evlist))
  2116. goto out_no_evlist;
  2117. evlist__for_each_entry(evlist, evsel) {
  2118. if (evsel->attr.type != PERF_TYPE_TRACEPOINT) {
  2119. use_trace = false;
  2120. break;
  2121. }
  2122. }
  2123. if (use_trace) {
  2124. sort__mode = SORT_MODE__TRACEPOINT;
  2125. if (symbol_conf.raw_trace)
  2126. return "trace_fields";
  2127. }
  2128. out_no_evlist:
  2129. return default_sort_orders[sort__mode];
  2130. }
  2131. static int setup_sort_order(struct perf_evlist *evlist)
  2132. {
  2133. char *new_sort_order;
  2134. /*
  2135. * Append '+'-prefixed sort order to the default sort
  2136. * order string.
  2137. */
  2138. if (!sort_order || is_strict_order(sort_order))
  2139. return 0;
  2140. if (sort_order[1] == '\0') {
  2141. pr_err("Invalid --sort key: `+'");
  2142. return -EINVAL;
  2143. }
  2144. /*
  2145. * We allocate new sort_order string, but we never free it,
  2146. * because it's checked over the rest of the code.
  2147. */
  2148. if (asprintf(&new_sort_order, "%s,%s",
  2149. get_default_sort_order(evlist), sort_order + 1) < 0) {
  2150. pr_err("Not enough memory to set up --sort");
  2151. return -ENOMEM;
  2152. }
  2153. sort_order = new_sort_order;
  2154. return 0;
  2155. }
  2156. /*
  2157. * Adds 'pre,' prefix into 'str' is 'pre' is
  2158. * not already part of 'str'.
  2159. */
  2160. static char *prefix_if_not_in(const char *pre, char *str)
  2161. {
  2162. char *n;
  2163. if (!str || strstr(str, pre))
  2164. return str;
  2165. if (asprintf(&n, "%s,%s", pre, str) < 0)
  2166. n = NULL;
  2167. free(str);
  2168. return n;
  2169. }
  2170. static char *setup_overhead(char *keys)
  2171. {
  2172. if (sort__mode == SORT_MODE__DIFF)
  2173. return keys;
  2174. keys = prefix_if_not_in("overhead", keys);
  2175. if (symbol_conf.cumulate_callchain)
  2176. keys = prefix_if_not_in("overhead_children", keys);
  2177. return keys;
  2178. }
  2179. static int __setup_sorting(struct perf_evlist *evlist)
  2180. {
  2181. char *str;
  2182. const char *sort_keys;
  2183. int ret = 0;
  2184. ret = setup_sort_order(evlist);
  2185. if (ret)
  2186. return ret;
  2187. sort_keys = sort_order;
  2188. if (sort_keys == NULL) {
  2189. if (is_strict_order(field_order)) {
  2190. /*
  2191. * If user specified field order but no sort order,
  2192. * we'll honor it and not add default sort orders.
  2193. */
  2194. return 0;
  2195. }
  2196. sort_keys = get_default_sort_order(evlist);
  2197. }
  2198. str = strdup(sort_keys);
  2199. if (str == NULL) {
  2200. pr_err("Not enough memory to setup sort keys");
  2201. return -ENOMEM;
  2202. }
  2203. /*
  2204. * Prepend overhead fields for backward compatibility.
  2205. */
  2206. if (!is_strict_order(field_order)) {
  2207. str = setup_overhead(str);
  2208. if (str == NULL) {
  2209. pr_err("Not enough memory to setup overhead keys");
  2210. return -ENOMEM;
  2211. }
  2212. }
  2213. ret = setup_sort_list(&perf_hpp_list, str, evlist);
  2214. free(str);
  2215. return ret;
  2216. }
  2217. void perf_hpp__set_elide(int idx, bool elide)
  2218. {
  2219. struct perf_hpp_fmt *fmt;
  2220. struct hpp_sort_entry *hse;
  2221. perf_hpp_list__for_each_format(&perf_hpp_list, fmt) {
  2222. if (!perf_hpp__is_sort_entry(fmt))
  2223. continue;
  2224. hse = container_of(fmt, struct hpp_sort_entry, hpp);
  2225. if (hse->se->se_width_idx == idx) {
  2226. fmt->elide = elide;
  2227. break;
  2228. }
  2229. }
  2230. }
  2231. static bool __get_elide(struct strlist *list, const char *list_name, FILE *fp)
  2232. {
  2233. if (list && strlist__nr_entries(list) == 1) {
  2234. if (fp != NULL)
  2235. fprintf(fp, "# %s: %s\n", list_name,
  2236. strlist__entry(list, 0)->s);
  2237. return true;
  2238. }
  2239. return false;
  2240. }
  2241. static bool get_elide(int idx, FILE *output)
  2242. {
  2243. switch (idx) {
  2244. case HISTC_SYMBOL:
  2245. return __get_elide(symbol_conf.sym_list, "symbol", output);
  2246. case HISTC_DSO:
  2247. return __get_elide(symbol_conf.dso_list, "dso", output);
  2248. case HISTC_COMM:
  2249. return __get_elide(symbol_conf.comm_list, "comm", output);
  2250. default:
  2251. break;
  2252. }
  2253. if (sort__mode != SORT_MODE__BRANCH)
  2254. return false;
  2255. switch (idx) {
  2256. case HISTC_SYMBOL_FROM:
  2257. return __get_elide(symbol_conf.sym_from_list, "sym_from", output);
  2258. case HISTC_SYMBOL_TO:
  2259. return __get_elide(symbol_conf.sym_to_list, "sym_to", output);
  2260. case HISTC_DSO_FROM:
  2261. return __get_elide(symbol_conf.dso_from_list, "dso_from", output);
  2262. case HISTC_DSO_TO:
  2263. return __get_elide(symbol_conf.dso_to_list, "dso_to", output);
  2264. default:
  2265. break;
  2266. }
  2267. return false;
  2268. }
  2269. void sort__setup_elide(FILE *output)
  2270. {
  2271. struct perf_hpp_fmt *fmt;
  2272. struct hpp_sort_entry *hse;
  2273. perf_hpp_list__for_each_format(&perf_hpp_list, fmt) {
  2274. if (!perf_hpp__is_sort_entry(fmt))
  2275. continue;
  2276. hse = container_of(fmt, struct hpp_sort_entry, hpp);
  2277. fmt->elide = get_elide(hse->se->se_width_idx, output);
  2278. }
  2279. /*
  2280. * It makes no sense to elide all of sort entries.
  2281. * Just revert them to show up again.
  2282. */
  2283. perf_hpp_list__for_each_format(&perf_hpp_list, fmt) {
  2284. if (!perf_hpp__is_sort_entry(fmt))
  2285. continue;
  2286. if (!fmt->elide)
  2287. return;
  2288. }
  2289. perf_hpp_list__for_each_format(&perf_hpp_list, fmt) {
  2290. if (!perf_hpp__is_sort_entry(fmt))
  2291. continue;
  2292. fmt->elide = false;
  2293. }
  2294. }
  2295. int output_field_add(struct perf_hpp_list *list, char *tok)
  2296. {
  2297. unsigned int i;
  2298. for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) {
  2299. struct sort_dimension *sd = &common_sort_dimensions[i];
  2300. if (strncasecmp(tok, sd->name, strlen(tok)))
  2301. continue;
  2302. return __sort_dimension__add_output(list, sd);
  2303. }
  2304. for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) {
  2305. struct hpp_dimension *hd = &hpp_sort_dimensions[i];
  2306. if (strncasecmp(tok, hd->name, strlen(tok)))
  2307. continue;
  2308. return __hpp_dimension__add_output(list, hd);
  2309. }
  2310. for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) {
  2311. struct sort_dimension *sd = &bstack_sort_dimensions[i];
  2312. if (strncasecmp(tok, sd->name, strlen(tok)))
  2313. continue;
  2314. return __sort_dimension__add_output(list, sd);
  2315. }
  2316. for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++) {
  2317. struct sort_dimension *sd = &memory_sort_dimensions[i];
  2318. if (strncasecmp(tok, sd->name, strlen(tok)))
  2319. continue;
  2320. return __sort_dimension__add_output(list, sd);
  2321. }
  2322. return -ESRCH;
  2323. }
  2324. static int setup_output_list(struct perf_hpp_list *list, char *str)
  2325. {
  2326. char *tmp, *tok;
  2327. int ret = 0;
  2328. for (tok = strtok_r(str, ", ", &tmp);
  2329. tok; tok = strtok_r(NULL, ", ", &tmp)) {
  2330. ret = output_field_add(list, tok);
  2331. if (ret == -EINVAL) {
  2332. ui__error("Invalid --fields key: `%s'", tok);
  2333. break;
  2334. } else if (ret == -ESRCH) {
  2335. ui__error("Unknown --fields key: `%s'", tok);
  2336. break;
  2337. }
  2338. }
  2339. return ret;
  2340. }
  2341. void reset_dimensions(void)
  2342. {
  2343. unsigned int i;
  2344. for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++)
  2345. common_sort_dimensions[i].taken = 0;
  2346. for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++)
  2347. hpp_sort_dimensions[i].taken = 0;
  2348. for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++)
  2349. bstack_sort_dimensions[i].taken = 0;
  2350. for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++)
  2351. memory_sort_dimensions[i].taken = 0;
  2352. }
  2353. bool is_strict_order(const char *order)
  2354. {
  2355. return order && (*order != '+');
  2356. }
  2357. static int __setup_output_field(void)
  2358. {
  2359. char *str, *strp;
  2360. int ret = -EINVAL;
  2361. if (field_order == NULL)
  2362. return 0;
  2363. strp = str = strdup(field_order);
  2364. if (str == NULL) {
  2365. pr_err("Not enough memory to setup output fields");
  2366. return -ENOMEM;
  2367. }
  2368. if (!is_strict_order(field_order))
  2369. strp++;
  2370. if (!strlen(strp)) {
  2371. pr_err("Invalid --fields key: `+'");
  2372. goto out;
  2373. }
  2374. ret = setup_output_list(&perf_hpp_list, strp);
  2375. out:
  2376. free(str);
  2377. return ret;
  2378. }
  2379. int setup_sorting(struct perf_evlist *evlist)
  2380. {
  2381. int err;
  2382. err = __setup_sorting(evlist);
  2383. if (err < 0)
  2384. return err;
  2385. if (parent_pattern != default_parent_pattern) {
  2386. err = sort_dimension__add(&perf_hpp_list, "parent", evlist, -1);
  2387. if (err < 0)
  2388. return err;
  2389. }
  2390. reset_dimensions();
  2391. /*
  2392. * perf diff doesn't use default hpp output fields.
  2393. */
  2394. if (sort__mode != SORT_MODE__DIFF)
  2395. perf_hpp__init();
  2396. err = __setup_output_field();
  2397. if (err < 0)
  2398. return err;
  2399. /* copy sort keys to output fields */
  2400. perf_hpp__setup_output_field(&perf_hpp_list);
  2401. /* and then copy output fields to sort keys */
  2402. perf_hpp__append_sort_keys(&perf_hpp_list);
  2403. /* setup hists-specific output fields */
  2404. if (perf_hpp__setup_hists_formats(&perf_hpp_list, evlist) < 0)
  2405. return -1;
  2406. return 0;
  2407. }
  2408. void reset_output_field(void)
  2409. {
  2410. perf_hpp_list.need_collapse = 0;
  2411. perf_hpp_list.parent = 0;
  2412. perf_hpp_list.sym = 0;
  2413. perf_hpp_list.dso = 0;
  2414. field_order = NULL;
  2415. sort_order = NULL;
  2416. reset_dimensions();
  2417. perf_hpp__reset_output_field(&perf_hpp_list);
  2418. }