trace_irqsoff.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * trace irqs off critical timings
  4. *
  5. * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
  6. * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
  7. *
  8. * From code in the latency_tracer, that is:
  9. *
  10. * Copyright (C) 2004-2006 Ingo Molnar
  11. * Copyright (C) 2004 Nadia Yvette Chambers
  12. */
  13. #include <linux/kallsyms.h>
  14. #include <linux/uaccess.h>
  15. #include <linux/module.h>
  16. #include <linux/ftrace.h>
  17. #include <linux/kprobes.h>
  18. #include "trace.h"
  19. #include <trace/events/preemptirq.h>
  20. #if defined(CONFIG_IRQSOFF_TRACER) || defined(CONFIG_PREEMPT_TRACER)
  21. static struct trace_array *irqsoff_trace __read_mostly;
  22. static int tracer_enabled __read_mostly;
  23. static DEFINE_PER_CPU(int, tracing_cpu);
  24. static DEFINE_RAW_SPINLOCK(max_trace_lock);
  25. enum {
  26. TRACER_IRQS_OFF = (1 << 1),
  27. TRACER_PREEMPT_OFF = (1 << 2),
  28. };
  29. static int trace_type __read_mostly;
  30. static int save_flags;
  31. static void stop_irqsoff_tracer(struct trace_array *tr, int graph);
  32. static int start_irqsoff_tracer(struct trace_array *tr, int graph);
  33. #ifdef CONFIG_PREEMPT_TRACER
  34. static inline int
  35. preempt_trace(int pc)
  36. {
  37. return ((trace_type & TRACER_PREEMPT_OFF) && pc);
  38. }
  39. #else
  40. # define preempt_trace(pc) (0)
  41. #endif
  42. #ifdef CONFIG_IRQSOFF_TRACER
  43. static inline int
  44. irq_trace(void)
  45. {
  46. return ((trace_type & TRACER_IRQS_OFF) &&
  47. irqs_disabled());
  48. }
  49. #else
  50. # define irq_trace() (0)
  51. #endif
  52. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  53. static int irqsoff_display_graph(struct trace_array *tr, int set);
  54. # define is_graph(tr) ((tr)->trace_flags & TRACE_ITER_DISPLAY_GRAPH)
  55. #else
  56. static inline int irqsoff_display_graph(struct trace_array *tr, int set)
  57. {
  58. return -EINVAL;
  59. }
  60. # define is_graph(tr) false
  61. #endif
  62. /*
  63. * Sequence count - we record it when starting a measurement and
  64. * skip the latency if the sequence has changed - some other section
  65. * did a maximum and could disturb our measurement with serial console
  66. * printouts, etc. Truly coinciding maximum latencies should be rare
  67. * and what happens together happens separately as well, so this doesn't
  68. * decrease the validity of the maximum found:
  69. */
  70. static __cacheline_aligned_in_smp unsigned long max_sequence;
  71. #ifdef CONFIG_FUNCTION_TRACER
  72. /*
  73. * Prologue for the preempt and irqs off function tracers.
  74. *
  75. * Returns 1 if it is OK to continue, and data->disabled is
  76. * incremented.
  77. * 0 if the trace is to be ignored, and data->disabled
  78. * is kept the same.
  79. *
  80. * Note, this function is also used outside this ifdef but
  81. * inside the #ifdef of the function graph tracer below.
  82. * This is OK, since the function graph tracer is
  83. * dependent on the function tracer.
  84. */
  85. static int func_prolog_dec(struct trace_array *tr,
  86. struct trace_array_cpu **data,
  87. unsigned long *flags)
  88. {
  89. long disabled;
  90. int cpu;
  91. /*
  92. * Does not matter if we preempt. We test the flags
  93. * afterward, to see if irqs are disabled or not.
  94. * If we preempt and get a false positive, the flags
  95. * test will fail.
  96. */
  97. cpu = raw_smp_processor_id();
  98. if (likely(!per_cpu(tracing_cpu, cpu)))
  99. return 0;
  100. local_save_flags(*flags);
  101. /*
  102. * Slight chance to get a false positive on tracing_cpu,
  103. * although I'm starting to think there isn't a chance.
  104. * Leave this for now just to be paranoid.
  105. */
  106. if (!irqs_disabled_flags(*flags) && !preempt_count())
  107. return 0;
  108. *data = per_cpu_ptr(tr->array_buffer.data, cpu);
  109. disabled = atomic_inc_return(&(*data)->disabled);
  110. if (likely(disabled == 1))
  111. return 1;
  112. atomic_dec(&(*data)->disabled);
  113. return 0;
  114. }
  115. /*
  116. * irqsoff uses its own tracer function to keep the overhead down:
  117. */
  118. static void
  119. irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip,
  120. struct ftrace_ops *op, struct ftrace_regs *fregs)
  121. {
  122. struct trace_array *tr = irqsoff_trace;
  123. struct trace_array_cpu *data;
  124. unsigned long flags;
  125. unsigned int trace_ctx;
  126. if (!func_prolog_dec(tr, &data, &flags))
  127. return;
  128. trace_ctx = tracing_gen_ctx_flags(flags);
  129. trace_function(tr, ip, parent_ip, trace_ctx);
  130. atomic_dec(&data->disabled);
  131. }
  132. #endif /* CONFIG_FUNCTION_TRACER */
  133. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  134. static int irqsoff_display_graph(struct trace_array *tr, int set)
  135. {
  136. int cpu;
  137. if (!(is_graph(tr) ^ set))
  138. return 0;
  139. stop_irqsoff_tracer(irqsoff_trace, !set);
  140. for_each_possible_cpu(cpu)
  141. per_cpu(tracing_cpu, cpu) = 0;
  142. tr->max_latency = 0;
  143. tracing_reset_online_cpus(&irqsoff_trace->array_buffer);
  144. return start_irqsoff_tracer(irqsoff_trace, set);
  145. }
  146. static int irqsoff_graph_entry(struct ftrace_graph_ent *trace,
  147. struct fgraph_ops *gops)
  148. {
  149. struct trace_array *tr = irqsoff_trace;
  150. struct trace_array_cpu *data;
  151. unsigned long flags;
  152. unsigned int trace_ctx;
  153. int ret;
  154. if (ftrace_graph_ignore_func(gops, trace))
  155. return 0;
  156. /*
  157. * Do not trace a function if it's filtered by set_graph_notrace.
  158. * Make the index of ret stack negative to indicate that it should
  159. * ignore further functions. But it needs its own ret stack entry
  160. * to recover the original index in order to continue tracing after
  161. * returning from the function.
  162. */
  163. if (ftrace_graph_notrace_addr(trace->func))
  164. return 1;
  165. if (!func_prolog_dec(tr, &data, &flags))
  166. return 0;
  167. trace_ctx = tracing_gen_ctx_flags(flags);
  168. ret = __trace_graph_entry(tr, trace, trace_ctx);
  169. atomic_dec(&data->disabled);
  170. return ret;
  171. }
  172. static void irqsoff_graph_return(struct ftrace_graph_ret *trace,
  173. struct fgraph_ops *gops)
  174. {
  175. struct trace_array *tr = irqsoff_trace;
  176. struct trace_array_cpu *data;
  177. unsigned long flags;
  178. unsigned int trace_ctx;
  179. ftrace_graph_addr_finish(gops, trace);
  180. if (!func_prolog_dec(tr, &data, &flags))
  181. return;
  182. trace_ctx = tracing_gen_ctx_flags(flags);
  183. __trace_graph_return(tr, trace, trace_ctx);
  184. atomic_dec(&data->disabled);
  185. }
  186. static struct fgraph_ops fgraph_ops = {
  187. .entryfunc = &irqsoff_graph_entry,
  188. .retfunc = &irqsoff_graph_return,
  189. };
  190. static void irqsoff_trace_open(struct trace_iterator *iter)
  191. {
  192. if (is_graph(iter->tr))
  193. graph_trace_open(iter);
  194. else
  195. iter->private = NULL;
  196. }
  197. static void irqsoff_trace_close(struct trace_iterator *iter)
  198. {
  199. if (iter->private)
  200. graph_trace_close(iter);
  201. }
  202. #define GRAPH_TRACER_FLAGS (TRACE_GRAPH_PRINT_CPU | \
  203. TRACE_GRAPH_PRINT_PROC | \
  204. TRACE_GRAPH_PRINT_REL_TIME | \
  205. TRACE_GRAPH_PRINT_DURATION)
  206. static enum print_line_t irqsoff_print_line(struct trace_iterator *iter)
  207. {
  208. /*
  209. * In graph mode call the graph tracer output function,
  210. * otherwise go with the TRACE_FN event handler
  211. */
  212. if (is_graph(iter->tr))
  213. return print_graph_function_flags(iter, GRAPH_TRACER_FLAGS);
  214. return TRACE_TYPE_UNHANDLED;
  215. }
  216. static void irqsoff_print_header(struct seq_file *s)
  217. {
  218. struct trace_array *tr = irqsoff_trace;
  219. if (is_graph(tr))
  220. print_graph_headers_flags(s, GRAPH_TRACER_FLAGS);
  221. else
  222. trace_default_header(s);
  223. }
  224. static void
  225. __trace_function(struct trace_array *tr,
  226. unsigned long ip, unsigned long parent_ip,
  227. unsigned int trace_ctx)
  228. {
  229. if (is_graph(tr))
  230. trace_graph_function(tr, ip, parent_ip, trace_ctx);
  231. else
  232. trace_function(tr, ip, parent_ip, trace_ctx);
  233. }
  234. #else
  235. #define __trace_function trace_function
  236. static enum print_line_t irqsoff_print_line(struct trace_iterator *iter)
  237. {
  238. return TRACE_TYPE_UNHANDLED;
  239. }
  240. static void irqsoff_trace_open(struct trace_iterator *iter) { }
  241. static void irqsoff_trace_close(struct trace_iterator *iter) { }
  242. #ifdef CONFIG_FUNCTION_TRACER
  243. static void irqsoff_print_header(struct seq_file *s)
  244. {
  245. trace_default_header(s);
  246. }
  247. #else
  248. static void irqsoff_print_header(struct seq_file *s)
  249. {
  250. trace_latency_header(s);
  251. }
  252. #endif /* CONFIG_FUNCTION_TRACER */
  253. #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
  254. /*
  255. * Should this new latency be reported/recorded?
  256. */
  257. static bool report_latency(struct trace_array *tr, u64 delta)
  258. {
  259. if (tracing_thresh) {
  260. if (delta < tracing_thresh)
  261. return false;
  262. } else {
  263. if (delta <= tr->max_latency)
  264. return false;
  265. }
  266. return true;
  267. }
  268. static void
  269. check_critical_timing(struct trace_array *tr,
  270. struct trace_array_cpu *data,
  271. unsigned long parent_ip,
  272. int cpu)
  273. {
  274. u64 T0, T1, delta;
  275. unsigned long flags;
  276. unsigned int trace_ctx;
  277. T0 = data->preempt_timestamp;
  278. T1 = ftrace_now(cpu);
  279. delta = T1-T0;
  280. trace_ctx = tracing_gen_ctx();
  281. if (!report_latency(tr, delta))
  282. goto out;
  283. raw_spin_lock_irqsave(&max_trace_lock, flags);
  284. /* check if we are still the max latency */
  285. if (!report_latency(tr, delta))
  286. goto out_unlock;
  287. __trace_function(tr, CALLER_ADDR0, parent_ip, trace_ctx);
  288. /* Skip 5 functions to get to the irq/preempt enable function */
  289. __trace_stack(tr, trace_ctx, 5);
  290. if (data->critical_sequence != max_sequence)
  291. goto out_unlock;
  292. data->critical_end = parent_ip;
  293. if (likely(!is_tracing_stopped())) {
  294. tr->max_latency = delta;
  295. update_max_tr_single(tr, current, cpu);
  296. }
  297. max_sequence++;
  298. out_unlock:
  299. raw_spin_unlock_irqrestore(&max_trace_lock, flags);
  300. out:
  301. data->critical_sequence = max_sequence;
  302. data->preempt_timestamp = ftrace_now(cpu);
  303. __trace_function(tr, CALLER_ADDR0, parent_ip, trace_ctx);
  304. }
  305. static nokprobe_inline void
  306. start_critical_timing(unsigned long ip, unsigned long parent_ip)
  307. {
  308. int cpu;
  309. struct trace_array *tr = irqsoff_trace;
  310. struct trace_array_cpu *data;
  311. if (!tracer_enabled || !tracing_is_enabled())
  312. return;
  313. cpu = raw_smp_processor_id();
  314. if (per_cpu(tracing_cpu, cpu))
  315. return;
  316. data = per_cpu_ptr(tr->array_buffer.data, cpu);
  317. if (unlikely(!data) || atomic_read(&data->disabled))
  318. return;
  319. atomic_inc(&data->disabled);
  320. data->critical_sequence = max_sequence;
  321. data->preempt_timestamp = ftrace_now(cpu);
  322. data->critical_start = parent_ip ? : ip;
  323. __trace_function(tr, ip, parent_ip, tracing_gen_ctx());
  324. per_cpu(tracing_cpu, cpu) = 1;
  325. atomic_dec(&data->disabled);
  326. }
  327. static nokprobe_inline void
  328. stop_critical_timing(unsigned long ip, unsigned long parent_ip)
  329. {
  330. int cpu;
  331. struct trace_array *tr = irqsoff_trace;
  332. struct trace_array_cpu *data;
  333. unsigned int trace_ctx;
  334. cpu = raw_smp_processor_id();
  335. /* Always clear the tracing cpu on stopping the trace */
  336. if (unlikely(per_cpu(tracing_cpu, cpu)))
  337. per_cpu(tracing_cpu, cpu) = 0;
  338. else
  339. return;
  340. if (!tracer_enabled || !tracing_is_enabled())
  341. return;
  342. data = per_cpu_ptr(tr->array_buffer.data, cpu);
  343. if (unlikely(!data) ||
  344. !data->critical_start || atomic_read(&data->disabled))
  345. return;
  346. atomic_inc(&data->disabled);
  347. trace_ctx = tracing_gen_ctx();
  348. __trace_function(tr, ip, parent_ip, trace_ctx);
  349. check_critical_timing(tr, data, parent_ip ? : ip, cpu);
  350. data->critical_start = 0;
  351. atomic_dec(&data->disabled);
  352. }
  353. /* start and stop critical timings used to for stoppage (in idle) */
  354. void start_critical_timings(void)
  355. {
  356. if (preempt_trace(preempt_count()) || irq_trace())
  357. start_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
  358. }
  359. EXPORT_SYMBOL_GPL(start_critical_timings);
  360. NOKPROBE_SYMBOL(start_critical_timings);
  361. void stop_critical_timings(void)
  362. {
  363. if (preempt_trace(preempt_count()) || irq_trace())
  364. stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
  365. }
  366. EXPORT_SYMBOL_GPL(stop_critical_timings);
  367. NOKPROBE_SYMBOL(stop_critical_timings);
  368. #ifdef CONFIG_FUNCTION_TRACER
  369. static bool function_enabled;
  370. static int register_irqsoff_function(struct trace_array *tr, int graph, int set)
  371. {
  372. int ret;
  373. /* 'set' is set if TRACE_ITER_FUNCTION is about to be set */
  374. if (function_enabled || (!set && !(tr->trace_flags & TRACE_ITER_FUNCTION)))
  375. return 0;
  376. if (graph)
  377. ret = register_ftrace_graph(&fgraph_ops);
  378. else
  379. ret = register_ftrace_function(tr->ops);
  380. if (!ret)
  381. function_enabled = true;
  382. return ret;
  383. }
  384. static void unregister_irqsoff_function(struct trace_array *tr, int graph)
  385. {
  386. if (!function_enabled)
  387. return;
  388. if (graph)
  389. unregister_ftrace_graph(&fgraph_ops);
  390. else
  391. unregister_ftrace_function(tr->ops);
  392. function_enabled = false;
  393. }
  394. static int irqsoff_function_set(struct trace_array *tr, u32 mask, int set)
  395. {
  396. if (!(mask & TRACE_ITER_FUNCTION))
  397. return 0;
  398. if (set)
  399. register_irqsoff_function(tr, is_graph(tr), 1);
  400. else
  401. unregister_irqsoff_function(tr, is_graph(tr));
  402. return 1;
  403. }
  404. #else
  405. static int register_irqsoff_function(struct trace_array *tr, int graph, int set)
  406. {
  407. return 0;
  408. }
  409. static void unregister_irqsoff_function(struct trace_array *tr, int graph) { }
  410. static inline int irqsoff_function_set(struct trace_array *tr, u32 mask, int set)
  411. {
  412. return 0;
  413. }
  414. #endif /* CONFIG_FUNCTION_TRACER */
  415. static int irqsoff_flag_changed(struct trace_array *tr, u32 mask, int set)
  416. {
  417. struct tracer *tracer = tr->current_trace;
  418. if (irqsoff_function_set(tr, mask, set))
  419. return 0;
  420. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  421. if (mask & TRACE_ITER_DISPLAY_GRAPH)
  422. return irqsoff_display_graph(tr, set);
  423. #endif
  424. return trace_keep_overwrite(tracer, mask, set);
  425. }
  426. static int start_irqsoff_tracer(struct trace_array *tr, int graph)
  427. {
  428. int ret;
  429. ret = register_irqsoff_function(tr, graph, 0);
  430. if (!ret && tracing_is_enabled())
  431. tracer_enabled = 1;
  432. else
  433. tracer_enabled = 0;
  434. return ret;
  435. }
  436. static void stop_irqsoff_tracer(struct trace_array *tr, int graph)
  437. {
  438. tracer_enabled = 0;
  439. unregister_irqsoff_function(tr, graph);
  440. }
  441. static bool irqsoff_busy;
  442. static int __irqsoff_tracer_init(struct trace_array *tr)
  443. {
  444. if (irqsoff_busy)
  445. return -EBUSY;
  446. save_flags = tr->trace_flags;
  447. /* non overwrite screws up the latency tracers */
  448. set_tracer_flag(tr, TRACE_ITER_OVERWRITE, 1);
  449. set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, 1);
  450. /* without pause, we will produce garbage if another latency occurs */
  451. set_tracer_flag(tr, TRACE_ITER_PAUSE_ON_TRACE, 1);
  452. tr->max_latency = 0;
  453. irqsoff_trace = tr;
  454. /* make sure that the tracer is visible */
  455. smp_wmb();
  456. ftrace_init_array_ops(tr, irqsoff_tracer_call);
  457. /* Only toplevel instance supports graph tracing */
  458. if (start_irqsoff_tracer(tr, (tr->flags & TRACE_ARRAY_FL_GLOBAL &&
  459. is_graph(tr))))
  460. printk(KERN_ERR "failed to start irqsoff tracer\n");
  461. irqsoff_busy = true;
  462. return 0;
  463. }
  464. static void __irqsoff_tracer_reset(struct trace_array *tr)
  465. {
  466. int lat_flag = save_flags & TRACE_ITER_LATENCY_FMT;
  467. int overwrite_flag = save_flags & TRACE_ITER_OVERWRITE;
  468. int pause_flag = save_flags & TRACE_ITER_PAUSE_ON_TRACE;
  469. stop_irqsoff_tracer(tr, is_graph(tr));
  470. set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, lat_flag);
  471. set_tracer_flag(tr, TRACE_ITER_OVERWRITE, overwrite_flag);
  472. set_tracer_flag(tr, TRACE_ITER_PAUSE_ON_TRACE, pause_flag);
  473. ftrace_reset_array_ops(tr);
  474. irqsoff_busy = false;
  475. }
  476. static void irqsoff_tracer_start(struct trace_array *tr)
  477. {
  478. tracer_enabled = 1;
  479. }
  480. static void irqsoff_tracer_stop(struct trace_array *tr)
  481. {
  482. tracer_enabled = 0;
  483. }
  484. #ifdef CONFIG_IRQSOFF_TRACER
  485. /*
  486. * We are only interested in hardirq on/off events:
  487. */
  488. void tracer_hardirqs_on(unsigned long a0, unsigned long a1)
  489. {
  490. if (!preempt_trace(preempt_count()) && irq_trace())
  491. stop_critical_timing(a0, a1);
  492. }
  493. NOKPROBE_SYMBOL(tracer_hardirqs_on);
  494. void tracer_hardirqs_off(unsigned long a0, unsigned long a1)
  495. {
  496. if (!preempt_trace(preempt_count()) && irq_trace())
  497. start_critical_timing(a0, a1);
  498. }
  499. NOKPROBE_SYMBOL(tracer_hardirqs_off);
  500. static int irqsoff_tracer_init(struct trace_array *tr)
  501. {
  502. trace_type = TRACER_IRQS_OFF;
  503. return __irqsoff_tracer_init(tr);
  504. }
  505. static void irqsoff_tracer_reset(struct trace_array *tr)
  506. {
  507. __irqsoff_tracer_reset(tr);
  508. }
  509. static struct tracer irqsoff_tracer __read_mostly =
  510. {
  511. .name = "irqsoff",
  512. .init = irqsoff_tracer_init,
  513. .reset = irqsoff_tracer_reset,
  514. .start = irqsoff_tracer_start,
  515. .stop = irqsoff_tracer_stop,
  516. .print_max = true,
  517. .print_header = irqsoff_print_header,
  518. .print_line = irqsoff_print_line,
  519. .flag_changed = irqsoff_flag_changed,
  520. #ifdef CONFIG_FTRACE_SELFTEST
  521. .selftest = trace_selftest_startup_irqsoff,
  522. #endif
  523. .open = irqsoff_trace_open,
  524. .close = irqsoff_trace_close,
  525. .allow_instances = true,
  526. .use_max_tr = true,
  527. };
  528. #endif /* CONFIG_IRQSOFF_TRACER */
  529. #ifdef CONFIG_PREEMPT_TRACER
  530. void tracer_preempt_on(unsigned long a0, unsigned long a1)
  531. {
  532. if (preempt_trace(preempt_count()) && !irq_trace())
  533. stop_critical_timing(a0, a1);
  534. }
  535. void tracer_preempt_off(unsigned long a0, unsigned long a1)
  536. {
  537. if (preempt_trace(preempt_count()) && !irq_trace())
  538. start_critical_timing(a0, a1);
  539. }
  540. static int preemptoff_tracer_init(struct trace_array *tr)
  541. {
  542. trace_type = TRACER_PREEMPT_OFF;
  543. return __irqsoff_tracer_init(tr);
  544. }
  545. static void preemptoff_tracer_reset(struct trace_array *tr)
  546. {
  547. __irqsoff_tracer_reset(tr);
  548. }
  549. static struct tracer preemptoff_tracer __read_mostly =
  550. {
  551. .name = "preemptoff",
  552. .init = preemptoff_tracer_init,
  553. .reset = preemptoff_tracer_reset,
  554. .start = irqsoff_tracer_start,
  555. .stop = irqsoff_tracer_stop,
  556. .print_max = true,
  557. .print_header = irqsoff_print_header,
  558. .print_line = irqsoff_print_line,
  559. .flag_changed = irqsoff_flag_changed,
  560. #ifdef CONFIG_FTRACE_SELFTEST
  561. .selftest = trace_selftest_startup_preemptoff,
  562. #endif
  563. .open = irqsoff_trace_open,
  564. .close = irqsoff_trace_close,
  565. .allow_instances = true,
  566. .use_max_tr = true,
  567. };
  568. #endif /* CONFIG_PREEMPT_TRACER */
  569. #if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER)
  570. static int preemptirqsoff_tracer_init(struct trace_array *tr)
  571. {
  572. trace_type = TRACER_IRQS_OFF | TRACER_PREEMPT_OFF;
  573. return __irqsoff_tracer_init(tr);
  574. }
  575. static void preemptirqsoff_tracer_reset(struct trace_array *tr)
  576. {
  577. __irqsoff_tracer_reset(tr);
  578. }
  579. static struct tracer preemptirqsoff_tracer __read_mostly =
  580. {
  581. .name = "preemptirqsoff",
  582. .init = preemptirqsoff_tracer_init,
  583. .reset = preemptirqsoff_tracer_reset,
  584. .start = irqsoff_tracer_start,
  585. .stop = irqsoff_tracer_stop,
  586. .print_max = true,
  587. .print_header = irqsoff_print_header,
  588. .print_line = irqsoff_print_line,
  589. .flag_changed = irqsoff_flag_changed,
  590. #ifdef CONFIG_FTRACE_SELFTEST
  591. .selftest = trace_selftest_startup_preemptirqsoff,
  592. #endif
  593. .open = irqsoff_trace_open,
  594. .close = irqsoff_trace_close,
  595. .allow_instances = true,
  596. .use_max_tr = true,
  597. };
  598. #endif
  599. __init static int init_irqsoff_tracer(void)
  600. {
  601. #ifdef CONFIG_IRQSOFF_TRACER
  602. register_tracer(&irqsoff_tracer);
  603. #endif
  604. #ifdef CONFIG_PREEMPT_TRACER
  605. register_tracer(&preemptoff_tracer);
  606. #endif
  607. #if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER)
  608. register_tracer(&preemptirqsoff_tracer);
  609. #endif
  610. return 0;
  611. }
  612. core_initcall(init_irqsoff_tracer);
  613. #endif /* IRQSOFF_TRACER || PREEMPTOFF_TRACER */