trace_output.c 39 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * trace_output.c
  4. *
  5. * Copyright (C) 2008 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
  6. *
  7. */
  8. #include <linux/module.h>
  9. #include <linux/mutex.h>
  10. #include <linux/ftrace.h>
  11. #include <linux/kprobes.h>
  12. #include <linux/sched/clock.h>
  13. #include <linux/sched/mm.h>
  14. #include <linux/idr.h>
  15. #include "trace_output.h"
  16. /* must be a power of 2 */
  17. #define EVENT_HASHSIZE 128
  18. DECLARE_RWSEM(trace_event_sem);
  19. static struct hlist_head event_hash[EVENT_HASHSIZE] __read_mostly;
  20. enum print_line_t trace_print_bputs_msg_only(struct trace_iterator *iter)
  21. {
  22. struct trace_seq *s = &iter->seq;
  23. struct trace_entry *entry = iter->ent;
  24. struct bputs_entry *field;
  25. trace_assign_type(field, entry);
  26. trace_seq_puts(s, field->str);
  27. return trace_handle_return(s);
  28. }
  29. enum print_line_t trace_print_bprintk_msg_only(struct trace_iterator *iter)
  30. {
  31. struct trace_seq *s = &iter->seq;
  32. struct trace_entry *entry = iter->ent;
  33. struct bprint_entry *field;
  34. trace_assign_type(field, entry);
  35. trace_seq_bprintf(s, field->fmt, field->buf);
  36. return trace_handle_return(s);
  37. }
  38. enum print_line_t trace_print_printk_msg_only(struct trace_iterator *iter)
  39. {
  40. struct trace_seq *s = &iter->seq;
  41. struct trace_entry *entry = iter->ent;
  42. struct print_entry *field;
  43. trace_assign_type(field, entry);
  44. trace_seq_puts(s, field->buf);
  45. return trace_handle_return(s);
  46. }
  47. const char *
  48. trace_print_flags_seq(struct trace_seq *p, const char *delim,
  49. unsigned long flags,
  50. const struct trace_print_flags *flag_array)
  51. {
  52. unsigned long mask;
  53. const char *str;
  54. const char *ret = trace_seq_buffer_ptr(p);
  55. int i, first = 1;
  56. for (i = 0; flag_array[i].name && flags; i++) {
  57. mask = flag_array[i].mask;
  58. if ((flags & mask) != mask)
  59. continue;
  60. str = flag_array[i].name;
  61. flags &= ~mask;
  62. if (!first && delim)
  63. trace_seq_puts(p, delim);
  64. else
  65. first = 0;
  66. trace_seq_puts(p, str);
  67. }
  68. /* check for left over flags */
  69. if (flags) {
  70. if (!first && delim)
  71. trace_seq_puts(p, delim);
  72. trace_seq_printf(p, "0x%lx", flags);
  73. }
  74. trace_seq_putc(p, 0);
  75. return ret;
  76. }
  77. EXPORT_SYMBOL(trace_print_flags_seq);
  78. const char *
  79. trace_print_symbols_seq(struct trace_seq *p, unsigned long val,
  80. const struct trace_print_flags *symbol_array)
  81. {
  82. int i;
  83. const char *ret = trace_seq_buffer_ptr(p);
  84. for (i = 0; symbol_array[i].name; i++) {
  85. if (val != symbol_array[i].mask)
  86. continue;
  87. trace_seq_puts(p, symbol_array[i].name);
  88. break;
  89. }
  90. if (ret == (const char *)(trace_seq_buffer_ptr(p)))
  91. trace_seq_printf(p, "0x%lx", val);
  92. trace_seq_putc(p, 0);
  93. return ret;
  94. }
  95. EXPORT_SYMBOL(trace_print_symbols_seq);
  96. #if BITS_PER_LONG == 32
  97. const char *
  98. trace_print_flags_seq_u64(struct trace_seq *p, const char *delim,
  99. unsigned long long flags,
  100. const struct trace_print_flags_u64 *flag_array)
  101. {
  102. unsigned long long mask;
  103. const char *str;
  104. const char *ret = trace_seq_buffer_ptr(p);
  105. int i, first = 1;
  106. for (i = 0; flag_array[i].name && flags; i++) {
  107. mask = flag_array[i].mask;
  108. if ((flags & mask) != mask)
  109. continue;
  110. str = flag_array[i].name;
  111. flags &= ~mask;
  112. if (!first && delim)
  113. trace_seq_puts(p, delim);
  114. else
  115. first = 0;
  116. trace_seq_puts(p, str);
  117. }
  118. /* check for left over flags */
  119. if (flags) {
  120. if (!first && delim)
  121. trace_seq_puts(p, delim);
  122. trace_seq_printf(p, "0x%llx", flags);
  123. }
  124. trace_seq_putc(p, 0);
  125. return ret;
  126. }
  127. EXPORT_SYMBOL(trace_print_flags_seq_u64);
  128. const char *
  129. trace_print_symbols_seq_u64(struct trace_seq *p, unsigned long long val,
  130. const struct trace_print_flags_u64 *symbol_array)
  131. {
  132. int i;
  133. const char *ret = trace_seq_buffer_ptr(p);
  134. for (i = 0; symbol_array[i].name; i++) {
  135. if (val != symbol_array[i].mask)
  136. continue;
  137. trace_seq_puts(p, symbol_array[i].name);
  138. break;
  139. }
  140. if (ret == (const char *)(trace_seq_buffer_ptr(p)))
  141. trace_seq_printf(p, "0x%llx", val);
  142. trace_seq_putc(p, 0);
  143. return ret;
  144. }
  145. EXPORT_SYMBOL(trace_print_symbols_seq_u64);
  146. #endif
  147. const char *
  148. trace_print_bitmask_seq(struct trace_seq *p, void *bitmask_ptr,
  149. unsigned int bitmask_size)
  150. {
  151. const char *ret = trace_seq_buffer_ptr(p);
  152. trace_seq_bitmask(p, bitmask_ptr, bitmask_size * 8);
  153. trace_seq_putc(p, 0);
  154. return ret;
  155. }
  156. EXPORT_SYMBOL_GPL(trace_print_bitmask_seq);
  157. /**
  158. * trace_print_hex_seq - print buffer as hex sequence
  159. * @p: trace seq struct to write to
  160. * @buf: The buffer to print
  161. * @buf_len: Length of @buf in bytes
  162. * @concatenate: Print @buf as single hex string or with spacing
  163. *
  164. * Prints the passed buffer as a hex sequence either as a whole,
  165. * single hex string if @concatenate is true or with spacing after
  166. * each byte in case @concatenate is false.
  167. */
  168. const char *
  169. trace_print_hex_seq(struct trace_seq *p, const unsigned char *buf, int buf_len,
  170. bool concatenate)
  171. {
  172. int i;
  173. const char *ret = trace_seq_buffer_ptr(p);
  174. const char *fmt = concatenate ? "%*phN" : "%*ph";
  175. for (i = 0; i < buf_len; i += 16) {
  176. if (!concatenate && i != 0)
  177. trace_seq_putc(p, ' ');
  178. trace_seq_printf(p, fmt, min(buf_len - i, 16), &buf[i]);
  179. }
  180. trace_seq_putc(p, 0);
  181. return ret;
  182. }
  183. EXPORT_SYMBOL(trace_print_hex_seq);
  184. const char *
  185. trace_print_array_seq(struct trace_seq *p, const void *buf, int count,
  186. size_t el_size)
  187. {
  188. const char *ret = trace_seq_buffer_ptr(p);
  189. const char *prefix = "";
  190. void *ptr = (void *)buf;
  191. size_t buf_len = count * el_size;
  192. trace_seq_putc(p, '{');
  193. while (ptr < buf + buf_len) {
  194. switch (el_size) {
  195. case 1:
  196. trace_seq_printf(p, "%s0x%x", prefix,
  197. *(u8 *)ptr);
  198. break;
  199. case 2:
  200. trace_seq_printf(p, "%s0x%x", prefix,
  201. *(u16 *)ptr);
  202. break;
  203. case 4:
  204. trace_seq_printf(p, "%s0x%x", prefix,
  205. *(u32 *)ptr);
  206. break;
  207. case 8:
  208. trace_seq_printf(p, "%s0x%llx", prefix,
  209. *(u64 *)ptr);
  210. break;
  211. default:
  212. trace_seq_printf(p, "BAD SIZE:%zu 0x%x", el_size,
  213. *(u8 *)ptr);
  214. el_size = 1;
  215. }
  216. prefix = ",";
  217. ptr += el_size;
  218. }
  219. trace_seq_putc(p, '}');
  220. trace_seq_putc(p, 0);
  221. return ret;
  222. }
  223. EXPORT_SYMBOL(trace_print_array_seq);
  224. const char *
  225. trace_print_hex_dump_seq(struct trace_seq *p, const char *prefix_str,
  226. int prefix_type, int rowsize, int groupsize,
  227. const void *buf, size_t len, bool ascii)
  228. {
  229. const char *ret = trace_seq_buffer_ptr(p);
  230. trace_seq_putc(p, '\n');
  231. trace_seq_hex_dump(p, prefix_str, prefix_type,
  232. rowsize, groupsize, buf, len, ascii);
  233. trace_seq_putc(p, 0);
  234. return ret;
  235. }
  236. EXPORT_SYMBOL(trace_print_hex_dump_seq);
  237. int trace_raw_output_prep(struct trace_iterator *iter,
  238. struct trace_event *trace_event)
  239. {
  240. struct trace_event_call *event;
  241. struct trace_seq *s = &iter->seq;
  242. struct trace_seq *p = &iter->tmp_seq;
  243. struct trace_entry *entry;
  244. event = container_of(trace_event, struct trace_event_call, event);
  245. entry = iter->ent;
  246. if (entry->type != event->event.type) {
  247. WARN_ON_ONCE(1);
  248. return TRACE_TYPE_UNHANDLED;
  249. }
  250. trace_seq_init(p);
  251. trace_seq_printf(s, "%s: ", trace_event_name(event));
  252. return trace_handle_return(s);
  253. }
  254. EXPORT_SYMBOL(trace_raw_output_prep);
  255. void trace_event_printf(struct trace_iterator *iter, const char *fmt, ...)
  256. {
  257. struct trace_seq *s = &iter->seq;
  258. va_list ap;
  259. if (ignore_event(iter))
  260. return;
  261. va_start(ap, fmt);
  262. trace_seq_vprintf(s, trace_event_format(iter, fmt), ap);
  263. va_end(ap);
  264. }
  265. EXPORT_SYMBOL(trace_event_printf);
  266. static __printf(3, 0)
  267. int trace_output_raw(struct trace_iterator *iter, char *name,
  268. char *fmt, va_list ap)
  269. {
  270. struct trace_seq *s = &iter->seq;
  271. trace_seq_printf(s, "%s: ", name);
  272. trace_seq_vprintf(s, trace_event_format(iter, fmt), ap);
  273. return trace_handle_return(s);
  274. }
  275. int trace_output_call(struct trace_iterator *iter, char *name, char *fmt, ...)
  276. {
  277. va_list ap;
  278. int ret;
  279. va_start(ap, fmt);
  280. ret = trace_output_raw(iter, name, fmt, ap);
  281. va_end(ap);
  282. return ret;
  283. }
  284. EXPORT_SYMBOL_GPL(trace_output_call);
  285. static inline const char *kretprobed(const char *name, unsigned long addr)
  286. {
  287. if (is_kretprobe_trampoline(addr))
  288. return "[unknown/kretprobe'd]";
  289. return name;
  290. }
  291. void
  292. trace_seq_print_sym(struct trace_seq *s, unsigned long address, bool offset)
  293. {
  294. #ifdef CONFIG_KALLSYMS
  295. char str[KSYM_SYMBOL_LEN];
  296. const char *name;
  297. if (offset)
  298. sprint_symbol(str, address);
  299. else
  300. kallsyms_lookup(address, NULL, NULL, NULL, str);
  301. name = kretprobed(str, address);
  302. if (name && strlen(name)) {
  303. trace_seq_puts(s, name);
  304. return;
  305. }
  306. #endif
  307. trace_seq_printf(s, "0x%08lx", address);
  308. }
  309. #ifndef CONFIG_64BIT
  310. # define IP_FMT "%08lx"
  311. #else
  312. # define IP_FMT "%016lx"
  313. #endif
  314. static int seq_print_user_ip(struct trace_seq *s, struct mm_struct *mm,
  315. unsigned long ip, unsigned long sym_flags)
  316. {
  317. struct file *file = NULL;
  318. unsigned long vmstart = 0;
  319. int ret = 1;
  320. if (s->full)
  321. return 0;
  322. if (mm) {
  323. const struct vm_area_struct *vma;
  324. mmap_read_lock(mm);
  325. vma = find_vma(mm, ip);
  326. if (vma) {
  327. file = vma->vm_file;
  328. vmstart = vma->vm_start;
  329. }
  330. if (file) {
  331. ret = trace_seq_path(s, file_user_path(file));
  332. if (ret)
  333. trace_seq_printf(s, "[+0x%lx]",
  334. ip - vmstart);
  335. }
  336. mmap_read_unlock(mm);
  337. }
  338. if (ret && ((sym_flags & TRACE_ITER_SYM_ADDR) || !file))
  339. trace_seq_printf(s, " <" IP_FMT ">", ip);
  340. return !trace_seq_has_overflowed(s);
  341. }
  342. int
  343. seq_print_ip_sym(struct trace_seq *s, unsigned long ip, unsigned long sym_flags)
  344. {
  345. if (!ip) {
  346. trace_seq_putc(s, '0');
  347. goto out;
  348. }
  349. trace_seq_print_sym(s, ip, sym_flags & TRACE_ITER_SYM_OFFSET);
  350. if (sym_flags & TRACE_ITER_SYM_ADDR)
  351. trace_seq_printf(s, " <" IP_FMT ">", ip);
  352. out:
  353. return !trace_seq_has_overflowed(s);
  354. }
  355. /**
  356. * trace_print_lat_fmt - print the irq, preempt and lockdep fields
  357. * @s: trace seq struct to write to
  358. * @entry: The trace entry field from the ring buffer
  359. *
  360. * Prints the generic fields of irqs off, in hard or softirq, preempt
  361. * count.
  362. */
  363. int trace_print_lat_fmt(struct trace_seq *s, struct trace_entry *entry)
  364. {
  365. char hardsoft_irq;
  366. char need_resched;
  367. char irqs_off;
  368. int hardirq;
  369. int softirq;
  370. int bh_off;
  371. int nmi;
  372. nmi = entry->flags & TRACE_FLAG_NMI;
  373. hardirq = entry->flags & TRACE_FLAG_HARDIRQ;
  374. softirq = entry->flags & TRACE_FLAG_SOFTIRQ;
  375. bh_off = entry->flags & TRACE_FLAG_BH_OFF;
  376. irqs_off =
  377. (entry->flags & TRACE_FLAG_IRQS_OFF && bh_off) ? 'D' :
  378. (entry->flags & TRACE_FLAG_IRQS_OFF) ? 'd' :
  379. bh_off ? 'b' :
  380. (entry->flags & TRACE_FLAG_IRQS_NOSUPPORT) ? 'X' :
  381. '.';
  382. switch (entry->flags & (TRACE_FLAG_NEED_RESCHED |
  383. TRACE_FLAG_PREEMPT_RESCHED)) {
  384. case TRACE_FLAG_NEED_RESCHED | TRACE_FLAG_PREEMPT_RESCHED:
  385. need_resched = 'N';
  386. break;
  387. case TRACE_FLAG_NEED_RESCHED:
  388. need_resched = 'n';
  389. break;
  390. case TRACE_FLAG_PREEMPT_RESCHED:
  391. need_resched = 'p';
  392. break;
  393. default:
  394. need_resched = '.';
  395. break;
  396. }
  397. hardsoft_irq =
  398. (nmi && hardirq) ? 'Z' :
  399. nmi ? 'z' :
  400. (hardirq && softirq) ? 'H' :
  401. hardirq ? 'h' :
  402. softirq ? 's' :
  403. '.' ;
  404. trace_seq_printf(s, "%c%c%c",
  405. irqs_off, need_resched, hardsoft_irq);
  406. if (entry->preempt_count & 0xf)
  407. trace_seq_printf(s, "%x", entry->preempt_count & 0xf);
  408. else
  409. trace_seq_putc(s, '.');
  410. if (entry->preempt_count & 0xf0)
  411. trace_seq_printf(s, "%x", entry->preempt_count >> 4);
  412. else
  413. trace_seq_putc(s, '.');
  414. return !trace_seq_has_overflowed(s);
  415. }
  416. static int
  417. lat_print_generic(struct trace_seq *s, struct trace_entry *entry, int cpu)
  418. {
  419. char comm[TASK_COMM_LEN];
  420. trace_find_cmdline(entry->pid, comm);
  421. trace_seq_printf(s, "%8.8s-%-7d %3d",
  422. comm, entry->pid, cpu);
  423. return trace_print_lat_fmt(s, entry);
  424. }
  425. #undef MARK
  426. #define MARK(v, s) {.val = v, .sym = s}
  427. /* trace overhead mark */
  428. static const struct trace_mark {
  429. unsigned long long val; /* unit: nsec */
  430. char sym;
  431. } mark[] = {
  432. MARK(1000000000ULL , '$'), /* 1 sec */
  433. MARK(100000000ULL , '@'), /* 100 msec */
  434. MARK(10000000ULL , '*'), /* 10 msec */
  435. MARK(1000000ULL , '#'), /* 1000 usecs */
  436. MARK(100000ULL , '!'), /* 100 usecs */
  437. MARK(10000ULL , '+'), /* 10 usecs */
  438. };
  439. #undef MARK
  440. char trace_find_mark(unsigned long long d)
  441. {
  442. int i;
  443. int size = ARRAY_SIZE(mark);
  444. for (i = 0; i < size; i++) {
  445. if (d > mark[i].val)
  446. break;
  447. }
  448. return (i == size) ? ' ' : mark[i].sym;
  449. }
  450. static int
  451. lat_print_timestamp(struct trace_iterator *iter, u64 next_ts)
  452. {
  453. struct trace_array *tr = iter->tr;
  454. unsigned long verbose = tr->trace_flags & TRACE_ITER_VERBOSE;
  455. unsigned long in_ns = iter->iter_flags & TRACE_FILE_TIME_IN_NS;
  456. unsigned long long abs_ts = iter->ts - iter->array_buffer->time_start;
  457. unsigned long long rel_ts = next_ts - iter->ts;
  458. struct trace_seq *s = &iter->seq;
  459. if (in_ns) {
  460. abs_ts = ns2usecs(abs_ts);
  461. rel_ts = ns2usecs(rel_ts);
  462. }
  463. if (verbose && in_ns) {
  464. unsigned long abs_usec = do_div(abs_ts, USEC_PER_MSEC);
  465. unsigned long abs_msec = (unsigned long)abs_ts;
  466. unsigned long rel_usec = do_div(rel_ts, USEC_PER_MSEC);
  467. unsigned long rel_msec = (unsigned long)rel_ts;
  468. trace_seq_printf(
  469. s, "[%08llx] %ld.%03ldms (+%ld.%03ldms): ",
  470. ns2usecs(iter->ts),
  471. abs_msec, abs_usec,
  472. rel_msec, rel_usec);
  473. } else if (verbose && !in_ns) {
  474. trace_seq_printf(
  475. s, "[%016llx] %lld (+%lld): ",
  476. iter->ts, abs_ts, rel_ts);
  477. } else if (!verbose && in_ns) {
  478. trace_seq_printf(
  479. s, " %4lldus%c: ",
  480. abs_ts,
  481. trace_find_mark(rel_ts * NSEC_PER_USEC));
  482. } else { /* !verbose && !in_ns */
  483. trace_seq_printf(s, " %4lld: ", abs_ts);
  484. }
  485. return !trace_seq_has_overflowed(s);
  486. }
  487. static void trace_print_time(struct trace_seq *s, struct trace_iterator *iter,
  488. unsigned long long ts)
  489. {
  490. unsigned long secs, usec_rem;
  491. unsigned long long t;
  492. if (iter->iter_flags & TRACE_FILE_TIME_IN_NS) {
  493. t = ns2usecs(ts);
  494. usec_rem = do_div(t, USEC_PER_SEC);
  495. secs = (unsigned long)t;
  496. trace_seq_printf(s, " %5lu.%06lu", secs, usec_rem);
  497. } else
  498. trace_seq_printf(s, " %12llu", ts);
  499. }
  500. int trace_print_context(struct trace_iterator *iter)
  501. {
  502. struct trace_array *tr = iter->tr;
  503. struct trace_seq *s = &iter->seq;
  504. struct trace_entry *entry = iter->ent;
  505. char comm[TASK_COMM_LEN];
  506. trace_find_cmdline(entry->pid, comm);
  507. trace_seq_printf(s, "%16s-%-7d ", comm, entry->pid);
  508. if (tr->trace_flags & TRACE_ITER_RECORD_TGID) {
  509. unsigned int tgid = trace_find_tgid(entry->pid);
  510. if (!tgid)
  511. trace_seq_printf(s, "(-------) ");
  512. else
  513. trace_seq_printf(s, "(%7d) ", tgid);
  514. }
  515. trace_seq_printf(s, "[%03d] ", iter->cpu);
  516. if (tr->trace_flags & TRACE_ITER_IRQ_INFO)
  517. trace_print_lat_fmt(s, entry);
  518. trace_print_time(s, iter, iter->ts);
  519. trace_seq_puts(s, ": ");
  520. return !trace_seq_has_overflowed(s);
  521. }
  522. int trace_print_lat_context(struct trace_iterator *iter)
  523. {
  524. struct trace_entry *entry, *next_entry;
  525. struct trace_array *tr = iter->tr;
  526. struct trace_seq *s = &iter->seq;
  527. unsigned long verbose = (tr->trace_flags & TRACE_ITER_VERBOSE);
  528. u64 next_ts;
  529. next_entry = trace_find_next_entry(iter, NULL, &next_ts);
  530. if (!next_entry)
  531. next_ts = iter->ts;
  532. /* trace_find_next_entry() may change iter->ent */
  533. entry = iter->ent;
  534. if (verbose) {
  535. char comm[TASK_COMM_LEN];
  536. trace_find_cmdline(entry->pid, comm);
  537. trace_seq_printf(
  538. s, "%16s %7d %3d %d %08x %08lx ",
  539. comm, entry->pid, iter->cpu, entry->flags,
  540. entry->preempt_count & 0xf, iter->idx);
  541. } else {
  542. lat_print_generic(s, entry, iter->cpu);
  543. }
  544. lat_print_timestamp(iter, next_ts);
  545. return !trace_seq_has_overflowed(s);
  546. }
  547. /**
  548. * ftrace_find_event - find a registered event
  549. * @type: the type of event to look for
  550. *
  551. * Returns an event of type @type otherwise NULL
  552. * Called with trace_event_read_lock() held.
  553. */
  554. struct trace_event *ftrace_find_event(int type)
  555. {
  556. struct trace_event *event;
  557. unsigned key;
  558. key = type & (EVENT_HASHSIZE - 1);
  559. hlist_for_each_entry(event, &event_hash[key], node) {
  560. if (event->type == type)
  561. return event;
  562. }
  563. return NULL;
  564. }
  565. static DEFINE_IDA(trace_event_ida);
  566. static void free_trace_event_type(int type)
  567. {
  568. if (type >= __TRACE_LAST_TYPE)
  569. ida_free(&trace_event_ida, type);
  570. }
  571. static int alloc_trace_event_type(void)
  572. {
  573. int next;
  574. /* Skip static defined type numbers */
  575. next = ida_alloc_range(&trace_event_ida, __TRACE_LAST_TYPE,
  576. TRACE_EVENT_TYPE_MAX, GFP_KERNEL);
  577. if (next < 0)
  578. return 0;
  579. return next;
  580. }
  581. void trace_event_read_lock(void)
  582. {
  583. down_read(&trace_event_sem);
  584. }
  585. void trace_event_read_unlock(void)
  586. {
  587. up_read(&trace_event_sem);
  588. }
  589. /**
  590. * register_trace_event - register output for an event type
  591. * @event: the event type to register
  592. *
  593. * Event types are stored in a hash and this hash is used to
  594. * find a way to print an event. If the @event->type is set
  595. * then it will use that type, otherwise it will assign a
  596. * type to use.
  597. *
  598. * If you assign your own type, please make sure it is added
  599. * to the trace_type enum in trace.h, to avoid collisions
  600. * with the dynamic types.
  601. *
  602. * Returns the event type number or zero on error.
  603. */
  604. int register_trace_event(struct trace_event *event)
  605. {
  606. unsigned key;
  607. int ret = 0;
  608. down_write(&trace_event_sem);
  609. if (WARN_ON(!event))
  610. goto out;
  611. if (WARN_ON(!event->funcs))
  612. goto out;
  613. if (!event->type) {
  614. event->type = alloc_trace_event_type();
  615. if (!event->type)
  616. goto out;
  617. } else if (WARN(event->type > __TRACE_LAST_TYPE,
  618. "Need to add type to trace.h")) {
  619. goto out;
  620. } else {
  621. /* Is this event already used */
  622. if (ftrace_find_event(event->type))
  623. goto out;
  624. }
  625. if (event->funcs->trace == NULL)
  626. event->funcs->trace = trace_nop_print;
  627. if (event->funcs->raw == NULL)
  628. event->funcs->raw = trace_nop_print;
  629. if (event->funcs->hex == NULL)
  630. event->funcs->hex = trace_nop_print;
  631. if (event->funcs->binary == NULL)
  632. event->funcs->binary = trace_nop_print;
  633. key = event->type & (EVENT_HASHSIZE - 1);
  634. hlist_add_head(&event->node, &event_hash[key]);
  635. ret = event->type;
  636. out:
  637. up_write(&trace_event_sem);
  638. return ret;
  639. }
  640. EXPORT_SYMBOL_GPL(register_trace_event);
  641. /*
  642. * Used by module code with the trace_event_sem held for write.
  643. */
  644. int __unregister_trace_event(struct trace_event *event)
  645. {
  646. hlist_del(&event->node);
  647. free_trace_event_type(event->type);
  648. return 0;
  649. }
  650. /**
  651. * unregister_trace_event - remove a no longer used event
  652. * @event: the event to remove
  653. */
  654. int unregister_trace_event(struct trace_event *event)
  655. {
  656. down_write(&trace_event_sem);
  657. __unregister_trace_event(event);
  658. up_write(&trace_event_sem);
  659. return 0;
  660. }
  661. EXPORT_SYMBOL_GPL(unregister_trace_event);
  662. /*
  663. * Standard events
  664. */
  665. static void print_array(struct trace_iterator *iter, void *pos,
  666. struct ftrace_event_field *field)
  667. {
  668. int offset;
  669. int len;
  670. int i;
  671. offset = *(int *)pos & 0xffff;
  672. len = *(int *)pos >> 16;
  673. if (field)
  674. offset += field->offset + sizeof(int);
  675. if (offset + len > iter->ent_size) {
  676. trace_seq_puts(&iter->seq, "<OVERFLOW>");
  677. return;
  678. }
  679. pos = (void *)iter->ent + offset;
  680. for (i = 0; i < len; i++, pos++) {
  681. if (i)
  682. trace_seq_putc(&iter->seq, ',');
  683. trace_seq_printf(&iter->seq, "%02x", *(unsigned char *)pos);
  684. }
  685. }
  686. static void print_fields(struct trace_iterator *iter, struct trace_event_call *call,
  687. struct list_head *head)
  688. {
  689. struct ftrace_event_field *field;
  690. int offset;
  691. int len;
  692. int ret;
  693. void *pos;
  694. list_for_each_entry_reverse(field, head, link) {
  695. trace_seq_printf(&iter->seq, " %s=", field->name);
  696. if (field->offset + field->size > iter->ent_size) {
  697. trace_seq_puts(&iter->seq, "<OVERFLOW>");
  698. continue;
  699. }
  700. pos = (void *)iter->ent + field->offset;
  701. switch (field->filter_type) {
  702. case FILTER_COMM:
  703. case FILTER_STATIC_STRING:
  704. trace_seq_printf(&iter->seq, "%.*s", field->size, (char *)pos);
  705. break;
  706. case FILTER_RDYN_STRING:
  707. case FILTER_DYN_STRING:
  708. offset = *(int *)pos & 0xffff;
  709. len = *(int *)pos >> 16;
  710. if (field->filter_type == FILTER_RDYN_STRING)
  711. offset += field->offset + sizeof(int);
  712. if (offset + len > iter->ent_size) {
  713. trace_seq_puts(&iter->seq, "<OVERFLOW>");
  714. break;
  715. }
  716. pos = (void *)iter->ent + offset;
  717. trace_seq_printf(&iter->seq, "%.*s", len, (char *)pos);
  718. break;
  719. case FILTER_PTR_STRING:
  720. if (!iter->fmt_size)
  721. trace_iter_expand_format(iter);
  722. pos = *(void **)pos;
  723. ret = strncpy_from_kernel_nofault(iter->fmt, pos,
  724. iter->fmt_size);
  725. if (ret < 0)
  726. trace_seq_printf(&iter->seq, "(0x%px)", pos);
  727. else
  728. trace_seq_printf(&iter->seq, "(0x%px:%s)",
  729. pos, iter->fmt);
  730. break;
  731. case FILTER_TRACE_FN:
  732. pos = *(void **)pos;
  733. trace_seq_printf(&iter->seq, "%pS", pos);
  734. break;
  735. case FILTER_CPU:
  736. case FILTER_OTHER:
  737. switch (field->size) {
  738. case 1:
  739. if (isprint(*(char *)pos)) {
  740. trace_seq_printf(&iter->seq, "'%c'",
  741. *(unsigned char *)pos);
  742. }
  743. trace_seq_printf(&iter->seq, "(%d)",
  744. *(unsigned char *)pos);
  745. break;
  746. case 2:
  747. trace_seq_printf(&iter->seq, "0x%x (%d)",
  748. *(unsigned short *)pos,
  749. *(unsigned short *)pos);
  750. break;
  751. case 4:
  752. /* dynamic array info is 4 bytes */
  753. if (strstr(field->type, "__data_loc")) {
  754. print_array(iter, pos, NULL);
  755. break;
  756. }
  757. if (strstr(field->type, "__rel_loc")) {
  758. print_array(iter, pos, field);
  759. break;
  760. }
  761. trace_seq_printf(&iter->seq, "0x%x (%d)",
  762. *(unsigned int *)pos,
  763. *(unsigned int *)pos);
  764. break;
  765. case 8:
  766. trace_seq_printf(&iter->seq, "0x%llx (%lld)",
  767. *(unsigned long long *)pos,
  768. *(unsigned long long *)pos);
  769. break;
  770. default:
  771. trace_seq_puts(&iter->seq, "<INVALID-SIZE>");
  772. break;
  773. }
  774. break;
  775. default:
  776. trace_seq_puts(&iter->seq, "<INVALID-TYPE>");
  777. }
  778. }
  779. trace_seq_putc(&iter->seq, '\n');
  780. }
  781. enum print_line_t print_event_fields(struct trace_iterator *iter,
  782. struct trace_event *event)
  783. {
  784. struct trace_event_call *call;
  785. struct list_head *head;
  786. lockdep_assert_held_read(&trace_event_sem);
  787. /* ftrace defined events have separate call structures */
  788. if (event->type <= __TRACE_LAST_TYPE) {
  789. bool found = false;
  790. list_for_each_entry(call, &ftrace_events, list) {
  791. if (call->event.type == event->type) {
  792. found = true;
  793. break;
  794. }
  795. /* No need to search all events */
  796. if (call->event.type > __TRACE_LAST_TYPE)
  797. break;
  798. }
  799. if (!found) {
  800. trace_seq_printf(&iter->seq, "UNKNOWN TYPE %d\n", event->type);
  801. goto out;
  802. }
  803. } else {
  804. call = container_of(event, struct trace_event_call, event);
  805. }
  806. head = trace_get_fields(call);
  807. trace_seq_printf(&iter->seq, "%s:", trace_event_name(call));
  808. if (head && !list_empty(head))
  809. print_fields(iter, call, head);
  810. else
  811. trace_seq_puts(&iter->seq, "No fields found\n");
  812. out:
  813. return trace_handle_return(&iter->seq);
  814. }
  815. enum print_line_t trace_nop_print(struct trace_iterator *iter, int flags,
  816. struct trace_event *event)
  817. {
  818. trace_seq_printf(&iter->seq, "type: %d\n", iter->ent->type);
  819. return trace_handle_return(&iter->seq);
  820. }
  821. static void print_fn_trace(struct trace_seq *s, unsigned long ip,
  822. unsigned long parent_ip, long delta, int flags)
  823. {
  824. ip += delta;
  825. parent_ip += delta;
  826. seq_print_ip_sym(s, ip, flags);
  827. if ((flags & TRACE_ITER_PRINT_PARENT) && parent_ip) {
  828. trace_seq_puts(s, " <-");
  829. seq_print_ip_sym(s, parent_ip, flags);
  830. }
  831. }
  832. /* TRACE_FN */
  833. static enum print_line_t trace_fn_trace(struct trace_iterator *iter, int flags,
  834. struct trace_event *event)
  835. {
  836. struct ftrace_entry *field;
  837. struct trace_seq *s = &iter->seq;
  838. trace_assign_type(field, iter->ent);
  839. print_fn_trace(s, field->ip, field->parent_ip, iter->tr->text_delta, flags);
  840. trace_seq_putc(s, '\n');
  841. return trace_handle_return(s);
  842. }
  843. static enum print_line_t trace_fn_raw(struct trace_iterator *iter, int flags,
  844. struct trace_event *event)
  845. {
  846. struct ftrace_entry *field;
  847. trace_assign_type(field, iter->ent);
  848. trace_seq_printf(&iter->seq, "%lx %lx\n",
  849. field->ip,
  850. field->parent_ip);
  851. return trace_handle_return(&iter->seq);
  852. }
  853. static enum print_line_t trace_fn_hex(struct trace_iterator *iter, int flags,
  854. struct trace_event *event)
  855. {
  856. struct ftrace_entry *field;
  857. struct trace_seq *s = &iter->seq;
  858. trace_assign_type(field, iter->ent);
  859. SEQ_PUT_HEX_FIELD(s, field->ip);
  860. SEQ_PUT_HEX_FIELD(s, field->parent_ip);
  861. return trace_handle_return(s);
  862. }
  863. static enum print_line_t trace_fn_bin(struct trace_iterator *iter, int flags,
  864. struct trace_event *event)
  865. {
  866. struct ftrace_entry *field;
  867. struct trace_seq *s = &iter->seq;
  868. trace_assign_type(field, iter->ent);
  869. SEQ_PUT_FIELD(s, field->ip);
  870. SEQ_PUT_FIELD(s, field->parent_ip);
  871. return trace_handle_return(s);
  872. }
  873. static struct trace_event_functions trace_fn_funcs = {
  874. .trace = trace_fn_trace,
  875. .raw = trace_fn_raw,
  876. .hex = trace_fn_hex,
  877. .binary = trace_fn_bin,
  878. };
  879. static struct trace_event trace_fn_event = {
  880. .type = TRACE_FN,
  881. .funcs = &trace_fn_funcs,
  882. };
  883. /* TRACE_CTX an TRACE_WAKE */
  884. static enum print_line_t trace_ctxwake_print(struct trace_iterator *iter,
  885. char *delim)
  886. {
  887. struct ctx_switch_entry *field;
  888. char comm[TASK_COMM_LEN];
  889. int S, T;
  890. trace_assign_type(field, iter->ent);
  891. T = task_index_to_char(field->next_state);
  892. S = task_index_to_char(field->prev_state);
  893. trace_find_cmdline(field->next_pid, comm);
  894. trace_seq_printf(&iter->seq,
  895. " %7d:%3d:%c %s [%03d] %7d:%3d:%c %s\n",
  896. field->prev_pid,
  897. field->prev_prio,
  898. S, delim,
  899. field->next_cpu,
  900. field->next_pid,
  901. field->next_prio,
  902. T, comm);
  903. return trace_handle_return(&iter->seq);
  904. }
  905. static enum print_line_t trace_ctx_print(struct trace_iterator *iter, int flags,
  906. struct trace_event *event)
  907. {
  908. return trace_ctxwake_print(iter, "==>");
  909. }
  910. static enum print_line_t trace_wake_print(struct trace_iterator *iter,
  911. int flags, struct trace_event *event)
  912. {
  913. return trace_ctxwake_print(iter, " +");
  914. }
  915. static int trace_ctxwake_raw(struct trace_iterator *iter, char S)
  916. {
  917. struct ctx_switch_entry *field;
  918. int T;
  919. trace_assign_type(field, iter->ent);
  920. if (!S)
  921. S = task_index_to_char(field->prev_state);
  922. T = task_index_to_char(field->next_state);
  923. trace_seq_printf(&iter->seq, "%d %d %c %d %d %d %c\n",
  924. field->prev_pid,
  925. field->prev_prio,
  926. S,
  927. field->next_cpu,
  928. field->next_pid,
  929. field->next_prio,
  930. T);
  931. return trace_handle_return(&iter->seq);
  932. }
  933. static enum print_line_t trace_ctx_raw(struct trace_iterator *iter, int flags,
  934. struct trace_event *event)
  935. {
  936. return trace_ctxwake_raw(iter, 0);
  937. }
  938. static enum print_line_t trace_wake_raw(struct trace_iterator *iter, int flags,
  939. struct trace_event *event)
  940. {
  941. return trace_ctxwake_raw(iter, '+');
  942. }
  943. static int trace_ctxwake_hex(struct trace_iterator *iter, char S)
  944. {
  945. struct ctx_switch_entry *field;
  946. struct trace_seq *s = &iter->seq;
  947. int T;
  948. trace_assign_type(field, iter->ent);
  949. if (!S)
  950. S = task_index_to_char(field->prev_state);
  951. T = task_index_to_char(field->next_state);
  952. SEQ_PUT_HEX_FIELD(s, field->prev_pid);
  953. SEQ_PUT_HEX_FIELD(s, field->prev_prio);
  954. SEQ_PUT_HEX_FIELD(s, S);
  955. SEQ_PUT_HEX_FIELD(s, field->next_cpu);
  956. SEQ_PUT_HEX_FIELD(s, field->next_pid);
  957. SEQ_PUT_HEX_FIELD(s, field->next_prio);
  958. SEQ_PUT_HEX_FIELD(s, T);
  959. return trace_handle_return(s);
  960. }
  961. static enum print_line_t trace_ctx_hex(struct trace_iterator *iter, int flags,
  962. struct trace_event *event)
  963. {
  964. return trace_ctxwake_hex(iter, 0);
  965. }
  966. static enum print_line_t trace_wake_hex(struct trace_iterator *iter, int flags,
  967. struct trace_event *event)
  968. {
  969. return trace_ctxwake_hex(iter, '+');
  970. }
  971. static enum print_line_t trace_ctxwake_bin(struct trace_iterator *iter,
  972. int flags, struct trace_event *event)
  973. {
  974. struct ctx_switch_entry *field;
  975. struct trace_seq *s = &iter->seq;
  976. trace_assign_type(field, iter->ent);
  977. SEQ_PUT_FIELD(s, field->prev_pid);
  978. SEQ_PUT_FIELD(s, field->prev_prio);
  979. SEQ_PUT_FIELD(s, field->prev_state);
  980. SEQ_PUT_FIELD(s, field->next_cpu);
  981. SEQ_PUT_FIELD(s, field->next_pid);
  982. SEQ_PUT_FIELD(s, field->next_prio);
  983. SEQ_PUT_FIELD(s, field->next_state);
  984. return trace_handle_return(s);
  985. }
  986. static struct trace_event_functions trace_ctx_funcs = {
  987. .trace = trace_ctx_print,
  988. .raw = trace_ctx_raw,
  989. .hex = trace_ctx_hex,
  990. .binary = trace_ctxwake_bin,
  991. };
  992. static struct trace_event trace_ctx_event = {
  993. .type = TRACE_CTX,
  994. .funcs = &trace_ctx_funcs,
  995. };
  996. static struct trace_event_functions trace_wake_funcs = {
  997. .trace = trace_wake_print,
  998. .raw = trace_wake_raw,
  999. .hex = trace_wake_hex,
  1000. .binary = trace_ctxwake_bin,
  1001. };
  1002. static struct trace_event trace_wake_event = {
  1003. .type = TRACE_WAKE,
  1004. .funcs = &trace_wake_funcs,
  1005. };
  1006. /* TRACE_STACK */
  1007. static enum print_line_t trace_stack_print(struct trace_iterator *iter,
  1008. int flags, struct trace_event *event)
  1009. {
  1010. struct stack_entry *field;
  1011. struct trace_seq *s = &iter->seq;
  1012. unsigned long *p;
  1013. unsigned long *end;
  1014. long delta = iter->tr->text_delta;
  1015. trace_assign_type(field, iter->ent);
  1016. end = (unsigned long *)((long)iter->ent + iter->ent_size);
  1017. trace_seq_puts(s, "<stack trace>\n");
  1018. for (p = field->caller; p && p < end && *p != ULONG_MAX; p++) {
  1019. if (trace_seq_has_overflowed(s))
  1020. break;
  1021. trace_seq_puts(s, " => ");
  1022. if ((*p) == FTRACE_TRAMPOLINE_MARKER) {
  1023. trace_seq_puts(s, "[FTRACE TRAMPOLINE]\n");
  1024. continue;
  1025. }
  1026. seq_print_ip_sym(s, (*p) + delta, flags);
  1027. trace_seq_putc(s, '\n');
  1028. }
  1029. return trace_handle_return(s);
  1030. }
  1031. static struct trace_event_functions trace_stack_funcs = {
  1032. .trace = trace_stack_print,
  1033. };
  1034. static struct trace_event trace_stack_event = {
  1035. .type = TRACE_STACK,
  1036. .funcs = &trace_stack_funcs,
  1037. };
  1038. /* TRACE_USER_STACK */
  1039. static enum print_line_t trace_user_stack_print(struct trace_iterator *iter,
  1040. int flags, struct trace_event *event)
  1041. {
  1042. struct trace_array *tr = iter->tr;
  1043. struct userstack_entry *field;
  1044. struct trace_seq *s = &iter->seq;
  1045. struct mm_struct *mm = NULL;
  1046. unsigned int i;
  1047. trace_assign_type(field, iter->ent);
  1048. trace_seq_puts(s, "<user stack trace>\n");
  1049. if (tr->trace_flags & TRACE_ITER_SYM_USEROBJ) {
  1050. struct task_struct *task;
  1051. /*
  1052. * we do the lookup on the thread group leader,
  1053. * since individual threads might have already quit!
  1054. */
  1055. rcu_read_lock();
  1056. task = find_task_by_vpid(field->tgid);
  1057. if (task)
  1058. mm = get_task_mm(task);
  1059. rcu_read_unlock();
  1060. }
  1061. for (i = 0; i < FTRACE_STACK_ENTRIES; i++) {
  1062. unsigned long ip = field->caller[i];
  1063. if (!ip || trace_seq_has_overflowed(s))
  1064. break;
  1065. trace_seq_puts(s, " => ");
  1066. seq_print_user_ip(s, mm, ip, flags);
  1067. trace_seq_putc(s, '\n');
  1068. }
  1069. if (mm)
  1070. mmput(mm);
  1071. return trace_handle_return(s);
  1072. }
  1073. static struct trace_event_functions trace_user_stack_funcs = {
  1074. .trace = trace_user_stack_print,
  1075. };
  1076. static struct trace_event trace_user_stack_event = {
  1077. .type = TRACE_USER_STACK,
  1078. .funcs = &trace_user_stack_funcs,
  1079. };
  1080. /* TRACE_HWLAT */
  1081. static enum print_line_t
  1082. trace_hwlat_print(struct trace_iterator *iter, int flags,
  1083. struct trace_event *event)
  1084. {
  1085. struct trace_entry *entry = iter->ent;
  1086. struct trace_seq *s = &iter->seq;
  1087. struct hwlat_entry *field;
  1088. trace_assign_type(field, entry);
  1089. trace_seq_printf(s, "#%-5u inner/outer(us): %4llu/%-5llu ts:%lld.%09ld count:%d",
  1090. field->seqnum,
  1091. field->duration,
  1092. field->outer_duration,
  1093. (long long)field->timestamp.tv_sec,
  1094. field->timestamp.tv_nsec, field->count);
  1095. if (field->nmi_count) {
  1096. /*
  1097. * The generic sched_clock() is not NMI safe, thus
  1098. * we only record the count and not the time.
  1099. */
  1100. if (!IS_ENABLED(CONFIG_GENERIC_SCHED_CLOCK))
  1101. trace_seq_printf(s, " nmi-total:%llu",
  1102. field->nmi_total_ts);
  1103. trace_seq_printf(s, " nmi-count:%u",
  1104. field->nmi_count);
  1105. }
  1106. trace_seq_putc(s, '\n');
  1107. return trace_handle_return(s);
  1108. }
  1109. static enum print_line_t
  1110. trace_hwlat_raw(struct trace_iterator *iter, int flags,
  1111. struct trace_event *event)
  1112. {
  1113. struct hwlat_entry *field;
  1114. struct trace_seq *s = &iter->seq;
  1115. trace_assign_type(field, iter->ent);
  1116. trace_seq_printf(s, "%llu %lld %lld %09ld %u\n",
  1117. field->duration,
  1118. field->outer_duration,
  1119. (long long)field->timestamp.tv_sec,
  1120. field->timestamp.tv_nsec,
  1121. field->seqnum);
  1122. return trace_handle_return(s);
  1123. }
  1124. static struct trace_event_functions trace_hwlat_funcs = {
  1125. .trace = trace_hwlat_print,
  1126. .raw = trace_hwlat_raw,
  1127. };
  1128. static struct trace_event trace_hwlat_event = {
  1129. .type = TRACE_HWLAT,
  1130. .funcs = &trace_hwlat_funcs,
  1131. };
  1132. /* TRACE_OSNOISE */
  1133. static enum print_line_t
  1134. trace_osnoise_print(struct trace_iterator *iter, int flags,
  1135. struct trace_event *event)
  1136. {
  1137. struct trace_entry *entry = iter->ent;
  1138. struct trace_seq *s = &iter->seq;
  1139. struct osnoise_entry *field;
  1140. u64 ratio, ratio_dec;
  1141. u64 net_runtime;
  1142. trace_assign_type(field, entry);
  1143. /*
  1144. * compute the available % of cpu time.
  1145. */
  1146. net_runtime = field->runtime - field->noise;
  1147. ratio = net_runtime * 10000000;
  1148. do_div(ratio, field->runtime);
  1149. ratio_dec = do_div(ratio, 100000);
  1150. trace_seq_printf(s, "%llu %10llu %3llu.%05llu %7llu",
  1151. field->runtime,
  1152. field->noise,
  1153. ratio, ratio_dec,
  1154. field->max_sample);
  1155. trace_seq_printf(s, " %6u", field->hw_count);
  1156. trace_seq_printf(s, " %6u", field->nmi_count);
  1157. trace_seq_printf(s, " %6u", field->irq_count);
  1158. trace_seq_printf(s, " %6u", field->softirq_count);
  1159. trace_seq_printf(s, " %6u", field->thread_count);
  1160. trace_seq_putc(s, '\n');
  1161. return trace_handle_return(s);
  1162. }
  1163. static enum print_line_t
  1164. trace_osnoise_raw(struct trace_iterator *iter, int flags,
  1165. struct trace_event *event)
  1166. {
  1167. struct osnoise_entry *field;
  1168. struct trace_seq *s = &iter->seq;
  1169. trace_assign_type(field, iter->ent);
  1170. trace_seq_printf(s, "%lld %llu %llu %u %u %u %u %u\n",
  1171. field->runtime,
  1172. field->noise,
  1173. field->max_sample,
  1174. field->hw_count,
  1175. field->nmi_count,
  1176. field->irq_count,
  1177. field->softirq_count,
  1178. field->thread_count);
  1179. return trace_handle_return(s);
  1180. }
  1181. static struct trace_event_functions trace_osnoise_funcs = {
  1182. .trace = trace_osnoise_print,
  1183. .raw = trace_osnoise_raw,
  1184. };
  1185. static struct trace_event trace_osnoise_event = {
  1186. .type = TRACE_OSNOISE,
  1187. .funcs = &trace_osnoise_funcs,
  1188. };
  1189. /* TRACE_TIMERLAT */
  1190. static char *timerlat_lat_context[] = {"irq", "thread", "user-ret"};
  1191. static enum print_line_t
  1192. trace_timerlat_print(struct trace_iterator *iter, int flags,
  1193. struct trace_event *event)
  1194. {
  1195. struct trace_entry *entry = iter->ent;
  1196. struct trace_seq *s = &iter->seq;
  1197. struct timerlat_entry *field;
  1198. trace_assign_type(field, entry);
  1199. trace_seq_printf(s, "#%-5u context %6s timer_latency %9llu ns\n",
  1200. field->seqnum,
  1201. timerlat_lat_context[field->context],
  1202. field->timer_latency);
  1203. return trace_handle_return(s);
  1204. }
  1205. static enum print_line_t
  1206. trace_timerlat_raw(struct trace_iterator *iter, int flags,
  1207. struct trace_event *event)
  1208. {
  1209. struct timerlat_entry *field;
  1210. struct trace_seq *s = &iter->seq;
  1211. trace_assign_type(field, iter->ent);
  1212. trace_seq_printf(s, "%u %d %llu\n",
  1213. field->seqnum,
  1214. field->context,
  1215. field->timer_latency);
  1216. return trace_handle_return(s);
  1217. }
  1218. static struct trace_event_functions trace_timerlat_funcs = {
  1219. .trace = trace_timerlat_print,
  1220. .raw = trace_timerlat_raw,
  1221. };
  1222. static struct trace_event trace_timerlat_event = {
  1223. .type = TRACE_TIMERLAT,
  1224. .funcs = &trace_timerlat_funcs,
  1225. };
  1226. /* TRACE_BPUTS */
  1227. static enum print_line_t
  1228. trace_bputs_print(struct trace_iterator *iter, int flags,
  1229. struct trace_event *event)
  1230. {
  1231. struct trace_entry *entry = iter->ent;
  1232. struct trace_seq *s = &iter->seq;
  1233. struct bputs_entry *field;
  1234. trace_assign_type(field, entry);
  1235. seq_print_ip_sym(s, field->ip, flags);
  1236. trace_seq_puts(s, ": ");
  1237. trace_seq_puts(s, field->str);
  1238. return trace_handle_return(s);
  1239. }
  1240. static enum print_line_t
  1241. trace_bputs_raw(struct trace_iterator *iter, int flags,
  1242. struct trace_event *event)
  1243. {
  1244. struct bputs_entry *field;
  1245. struct trace_seq *s = &iter->seq;
  1246. trace_assign_type(field, iter->ent);
  1247. trace_seq_printf(s, ": %lx : ", field->ip);
  1248. trace_seq_puts(s, field->str);
  1249. return trace_handle_return(s);
  1250. }
  1251. static struct trace_event_functions trace_bputs_funcs = {
  1252. .trace = trace_bputs_print,
  1253. .raw = trace_bputs_raw,
  1254. };
  1255. static struct trace_event trace_bputs_event = {
  1256. .type = TRACE_BPUTS,
  1257. .funcs = &trace_bputs_funcs,
  1258. };
  1259. /* TRACE_BPRINT */
  1260. static enum print_line_t
  1261. trace_bprint_print(struct trace_iterator *iter, int flags,
  1262. struct trace_event *event)
  1263. {
  1264. struct trace_entry *entry = iter->ent;
  1265. struct trace_seq *s = &iter->seq;
  1266. struct bprint_entry *field;
  1267. trace_assign_type(field, entry);
  1268. seq_print_ip_sym(s, field->ip, flags);
  1269. trace_seq_puts(s, ": ");
  1270. trace_seq_bprintf(s, field->fmt, field->buf);
  1271. return trace_handle_return(s);
  1272. }
  1273. static enum print_line_t
  1274. trace_bprint_raw(struct trace_iterator *iter, int flags,
  1275. struct trace_event *event)
  1276. {
  1277. struct bprint_entry *field;
  1278. struct trace_seq *s = &iter->seq;
  1279. trace_assign_type(field, iter->ent);
  1280. trace_seq_printf(s, ": %lx : ", field->ip);
  1281. trace_seq_bprintf(s, field->fmt, field->buf);
  1282. return trace_handle_return(s);
  1283. }
  1284. static struct trace_event_functions trace_bprint_funcs = {
  1285. .trace = trace_bprint_print,
  1286. .raw = trace_bprint_raw,
  1287. };
  1288. static struct trace_event trace_bprint_event = {
  1289. .type = TRACE_BPRINT,
  1290. .funcs = &trace_bprint_funcs,
  1291. };
  1292. /* TRACE_PRINT */
  1293. static enum print_line_t trace_print_print(struct trace_iterator *iter,
  1294. int flags, struct trace_event *event)
  1295. {
  1296. struct print_entry *field;
  1297. struct trace_seq *s = &iter->seq;
  1298. unsigned long ip;
  1299. trace_assign_type(field, iter->ent);
  1300. ip = field->ip + iter->tr->text_delta;
  1301. seq_print_ip_sym(s, ip, flags);
  1302. trace_seq_printf(s, ": %s", field->buf);
  1303. return trace_handle_return(s);
  1304. }
  1305. static enum print_line_t trace_print_raw(struct trace_iterator *iter, int flags,
  1306. struct trace_event *event)
  1307. {
  1308. struct print_entry *field;
  1309. trace_assign_type(field, iter->ent);
  1310. trace_seq_printf(&iter->seq, "# %lx %s", field->ip, field->buf);
  1311. return trace_handle_return(&iter->seq);
  1312. }
  1313. static struct trace_event_functions trace_print_funcs = {
  1314. .trace = trace_print_print,
  1315. .raw = trace_print_raw,
  1316. };
  1317. static struct trace_event trace_print_event = {
  1318. .type = TRACE_PRINT,
  1319. .funcs = &trace_print_funcs,
  1320. };
  1321. static enum print_line_t trace_raw_data(struct trace_iterator *iter, int flags,
  1322. struct trace_event *event)
  1323. {
  1324. struct raw_data_entry *field;
  1325. int i;
  1326. trace_assign_type(field, iter->ent);
  1327. trace_seq_printf(&iter->seq, "# %x buf:", field->id);
  1328. for (i = 0; i < iter->ent_size - offsetof(struct raw_data_entry, buf); i++)
  1329. trace_seq_printf(&iter->seq, " %02x",
  1330. (unsigned char)field->buf[i]);
  1331. trace_seq_putc(&iter->seq, '\n');
  1332. return trace_handle_return(&iter->seq);
  1333. }
  1334. static struct trace_event_functions trace_raw_data_funcs = {
  1335. .trace = trace_raw_data,
  1336. .raw = trace_raw_data,
  1337. };
  1338. static struct trace_event trace_raw_data_event = {
  1339. .type = TRACE_RAW_DATA,
  1340. .funcs = &trace_raw_data_funcs,
  1341. };
  1342. static enum print_line_t
  1343. trace_func_repeats_raw(struct trace_iterator *iter, int flags,
  1344. struct trace_event *event)
  1345. {
  1346. struct func_repeats_entry *field;
  1347. struct trace_seq *s = &iter->seq;
  1348. trace_assign_type(field, iter->ent);
  1349. trace_seq_printf(s, "%lu %lu %u %llu\n",
  1350. field->ip,
  1351. field->parent_ip,
  1352. field->count,
  1353. FUNC_REPEATS_GET_DELTA_TS(field));
  1354. return trace_handle_return(s);
  1355. }
  1356. static enum print_line_t
  1357. trace_func_repeats_print(struct trace_iterator *iter, int flags,
  1358. struct trace_event *event)
  1359. {
  1360. struct func_repeats_entry *field;
  1361. struct trace_seq *s = &iter->seq;
  1362. trace_assign_type(field, iter->ent);
  1363. print_fn_trace(s, field->ip, field->parent_ip, iter->tr->text_delta, flags);
  1364. trace_seq_printf(s, " (repeats: %u, last_ts:", field->count);
  1365. trace_print_time(s, iter,
  1366. iter->ts - FUNC_REPEATS_GET_DELTA_TS(field));
  1367. trace_seq_puts(s, ")\n");
  1368. return trace_handle_return(s);
  1369. }
  1370. static struct trace_event_functions trace_func_repeats_funcs = {
  1371. .trace = trace_func_repeats_print,
  1372. .raw = trace_func_repeats_raw,
  1373. };
  1374. static struct trace_event trace_func_repeats_event = {
  1375. .type = TRACE_FUNC_REPEATS,
  1376. .funcs = &trace_func_repeats_funcs,
  1377. };
  1378. static struct trace_event *events[] __initdata = {
  1379. &trace_fn_event,
  1380. &trace_ctx_event,
  1381. &trace_wake_event,
  1382. &trace_stack_event,
  1383. &trace_user_stack_event,
  1384. &trace_bputs_event,
  1385. &trace_bprint_event,
  1386. &trace_print_event,
  1387. &trace_hwlat_event,
  1388. &trace_osnoise_event,
  1389. &trace_timerlat_event,
  1390. &trace_raw_data_event,
  1391. &trace_func_repeats_event,
  1392. NULL
  1393. };
  1394. __init int init_events(void)
  1395. {
  1396. struct trace_event *event;
  1397. int i, ret;
  1398. for (i = 0; events[i]; i++) {
  1399. event = events[i];
  1400. ret = register_trace_event(event);
  1401. WARN_ONCE(!ret, "event %d failed to register", event->type);
  1402. }
  1403. return 0;
  1404. }