trace_functions.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * ring buffer based function tracer
  4. *
  5. * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
  6. * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
  7. *
  8. * Based on code from the latency_tracer, that is:
  9. *
  10. * Copyright (C) 2004-2006 Ingo Molnar
  11. * Copyright (C) 2004 Nadia Yvette Chambers
  12. */
  13. #include <linux/ring_buffer.h>
  14. #include <linux/debugfs.h>
  15. #include <linux/uaccess.h>
  16. #include <linux/ftrace.h>
  17. #include <linux/slab.h>
  18. #include <linux/fs.h>
  19. #include "trace.h"
  20. static void tracing_start_function_trace(struct trace_array *tr);
  21. static void tracing_stop_function_trace(struct trace_array *tr);
  22. static void
  23. function_trace_call(unsigned long ip, unsigned long parent_ip,
  24. struct ftrace_ops *op, struct pt_regs *pt_regs);
  25. static void
  26. function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
  27. struct ftrace_ops *op, struct pt_regs *pt_regs);
  28. static struct tracer_flags func_flags;
  29. /* Our option */
  30. enum {
  31. TRACE_FUNC_OPT_STACK = 0x1,
  32. };
  33. static int allocate_ftrace_ops(struct trace_array *tr)
  34. {
  35. struct ftrace_ops *ops;
  36. ops = kzalloc(sizeof(*ops), GFP_KERNEL);
  37. if (!ops)
  38. return -ENOMEM;
  39. /* Currently only the non stack verision is supported */
  40. ops->func = function_trace_call;
  41. ops->flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_PID;
  42. tr->ops = ops;
  43. ops->private = tr;
  44. return 0;
  45. }
  46. int ftrace_create_function_files(struct trace_array *tr,
  47. struct dentry *parent)
  48. {
  49. int ret;
  50. /*
  51. * The top level array uses the "global_ops", and the files are
  52. * created on boot up.
  53. */
  54. if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
  55. return 0;
  56. ret = allocate_ftrace_ops(tr);
  57. if (ret)
  58. return ret;
  59. ftrace_create_filter_files(tr->ops, parent);
  60. return 0;
  61. }
  62. void ftrace_destroy_function_files(struct trace_array *tr)
  63. {
  64. ftrace_destroy_filter_files(tr->ops);
  65. kfree(tr->ops);
  66. tr->ops = NULL;
  67. }
  68. static int function_trace_init(struct trace_array *tr)
  69. {
  70. ftrace_func_t func;
  71. /*
  72. * Instance trace_arrays get their ops allocated
  73. * at instance creation. Unless it failed
  74. * the allocation.
  75. */
  76. if (!tr->ops)
  77. return -ENOMEM;
  78. /* Currently only the global instance can do stack tracing */
  79. if (tr->flags & TRACE_ARRAY_FL_GLOBAL &&
  80. func_flags.val & TRACE_FUNC_OPT_STACK)
  81. func = function_stack_trace_call;
  82. else
  83. func = function_trace_call;
  84. ftrace_init_array_ops(tr, func);
  85. tr->trace_buffer.cpu = get_cpu();
  86. put_cpu();
  87. tracing_start_cmdline_record();
  88. tracing_start_function_trace(tr);
  89. return 0;
  90. }
  91. static void function_trace_reset(struct trace_array *tr)
  92. {
  93. tracing_stop_function_trace(tr);
  94. tracing_stop_cmdline_record();
  95. ftrace_reset_array_ops(tr);
  96. }
  97. static void function_trace_start(struct trace_array *tr)
  98. {
  99. tracing_reset_online_cpus(&tr->trace_buffer);
  100. }
  101. static void
  102. function_trace_call(unsigned long ip, unsigned long parent_ip,
  103. struct ftrace_ops *op, struct pt_regs *pt_regs)
  104. {
  105. struct trace_array *tr = op->private;
  106. struct trace_array_cpu *data;
  107. unsigned long flags;
  108. int bit;
  109. int cpu;
  110. int pc;
  111. if (unlikely(!tr->function_enabled))
  112. return;
  113. pc = preempt_count();
  114. preempt_disable_notrace();
  115. bit = trace_test_and_set_recursion(TRACE_FTRACE_START, TRACE_FTRACE_MAX);
  116. if (bit < 0)
  117. goto out;
  118. cpu = smp_processor_id();
  119. data = per_cpu_ptr(tr->trace_buffer.data, cpu);
  120. if (!atomic_read(&data->disabled)) {
  121. local_save_flags(flags);
  122. trace_function(tr, ip, parent_ip, flags, pc);
  123. }
  124. trace_clear_recursion(bit);
  125. out:
  126. preempt_enable_notrace();
  127. }
  128. #ifdef CONFIG_UNWINDER_ORC
  129. /*
  130. * Skip 2:
  131. *
  132. * function_stack_trace_call()
  133. * ftrace_call()
  134. */
  135. #define STACK_SKIP 2
  136. #else
  137. /*
  138. * Skip 3:
  139. * __trace_stack()
  140. * function_stack_trace_call()
  141. * ftrace_call()
  142. */
  143. #define STACK_SKIP 3
  144. #endif
  145. static void
  146. function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
  147. struct ftrace_ops *op, struct pt_regs *pt_regs)
  148. {
  149. struct trace_array *tr = op->private;
  150. struct trace_array_cpu *data;
  151. unsigned long flags;
  152. long disabled;
  153. int cpu;
  154. int pc;
  155. if (unlikely(!tr->function_enabled))
  156. return;
  157. /*
  158. * Need to use raw, since this must be called before the
  159. * recursive protection is performed.
  160. */
  161. local_irq_save(flags);
  162. cpu = raw_smp_processor_id();
  163. data = per_cpu_ptr(tr->trace_buffer.data, cpu);
  164. disabled = atomic_inc_return(&data->disabled);
  165. if (likely(disabled == 1)) {
  166. pc = preempt_count();
  167. trace_function(tr, ip, parent_ip, flags, pc);
  168. __trace_stack(tr, flags, STACK_SKIP, pc);
  169. }
  170. atomic_dec(&data->disabled);
  171. local_irq_restore(flags);
  172. }
  173. static struct tracer_opt func_opts[] = {
  174. #ifdef CONFIG_STACKTRACE
  175. { TRACER_OPT(func_stack_trace, TRACE_FUNC_OPT_STACK) },
  176. #endif
  177. { } /* Always set a last empty entry */
  178. };
  179. static struct tracer_flags func_flags = {
  180. .val = 0, /* By default: all flags disabled */
  181. .opts = func_opts
  182. };
  183. static void tracing_start_function_trace(struct trace_array *tr)
  184. {
  185. tr->function_enabled = 0;
  186. register_ftrace_function(tr->ops);
  187. tr->function_enabled = 1;
  188. }
  189. static void tracing_stop_function_trace(struct trace_array *tr)
  190. {
  191. tr->function_enabled = 0;
  192. unregister_ftrace_function(tr->ops);
  193. }
  194. static struct tracer function_trace;
  195. static int
  196. func_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
  197. {
  198. switch (bit) {
  199. case TRACE_FUNC_OPT_STACK:
  200. /* do nothing if already set */
  201. if (!!set == !!(func_flags.val & TRACE_FUNC_OPT_STACK))
  202. break;
  203. /* We can change this flag when not running. */
  204. if (tr->current_trace != &function_trace)
  205. break;
  206. unregister_ftrace_function(tr->ops);
  207. if (set) {
  208. tr->ops->func = function_stack_trace_call;
  209. register_ftrace_function(tr->ops);
  210. } else {
  211. tr->ops->func = function_trace_call;
  212. register_ftrace_function(tr->ops);
  213. }
  214. break;
  215. default:
  216. return -EINVAL;
  217. }
  218. return 0;
  219. }
  220. static struct tracer function_trace __tracer_data =
  221. {
  222. .name = "function",
  223. .init = function_trace_init,
  224. .reset = function_trace_reset,
  225. .start = function_trace_start,
  226. .flags = &func_flags,
  227. .set_flag = func_set_flag,
  228. .allow_instances = true,
  229. #ifdef CONFIG_FTRACE_SELFTEST
  230. .selftest = trace_selftest_startup_function,
  231. #endif
  232. };
  233. #ifdef CONFIG_DYNAMIC_FTRACE
  234. static void update_traceon_count(struct ftrace_probe_ops *ops,
  235. unsigned long ip,
  236. struct trace_array *tr, bool on,
  237. void *data)
  238. {
  239. struct ftrace_func_mapper *mapper = data;
  240. long *count;
  241. long old_count;
  242. /*
  243. * Tracing gets disabled (or enabled) once per count.
  244. * This function can be called at the same time on multiple CPUs.
  245. * It is fine if both disable (or enable) tracing, as disabling
  246. * (or enabling) the second time doesn't do anything as the
  247. * state of the tracer is already disabled (or enabled).
  248. * What needs to be synchronized in this case is that the count
  249. * only gets decremented once, even if the tracer is disabled
  250. * (or enabled) twice, as the second one is really a nop.
  251. *
  252. * The memory barriers guarantee that we only decrement the
  253. * counter once. First the count is read to a local variable
  254. * and a read barrier is used to make sure that it is loaded
  255. * before checking if the tracer is in the state we want.
  256. * If the tracer is not in the state we want, then the count
  257. * is guaranteed to be the old count.
  258. *
  259. * Next the tracer is set to the state we want (disabled or enabled)
  260. * then a write memory barrier is used to make sure that
  261. * the new state is visible before changing the counter by
  262. * one minus the old counter. This guarantees that another CPU
  263. * executing this code will see the new state before seeing
  264. * the new counter value, and would not do anything if the new
  265. * counter is seen.
  266. *
  267. * Note, there is no synchronization between this and a user
  268. * setting the tracing_on file. But we currently don't care
  269. * about that.
  270. */
  271. count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
  272. old_count = *count;
  273. if (old_count <= 0)
  274. return;
  275. /* Make sure we see count before checking tracing state */
  276. smp_rmb();
  277. if (on == !!tracer_tracing_is_on(tr))
  278. return;
  279. if (on)
  280. tracer_tracing_on(tr);
  281. else
  282. tracer_tracing_off(tr);
  283. /* Make sure tracing state is visible before updating count */
  284. smp_wmb();
  285. *count = old_count - 1;
  286. }
  287. static void
  288. ftrace_traceon_count(unsigned long ip, unsigned long parent_ip,
  289. struct trace_array *tr, struct ftrace_probe_ops *ops,
  290. void *data)
  291. {
  292. update_traceon_count(ops, ip, tr, 1, data);
  293. }
  294. static void
  295. ftrace_traceoff_count(unsigned long ip, unsigned long parent_ip,
  296. struct trace_array *tr, struct ftrace_probe_ops *ops,
  297. void *data)
  298. {
  299. update_traceon_count(ops, ip, tr, 0, data);
  300. }
  301. static void
  302. ftrace_traceon(unsigned long ip, unsigned long parent_ip,
  303. struct trace_array *tr, struct ftrace_probe_ops *ops,
  304. void *data)
  305. {
  306. if (tracer_tracing_is_on(tr))
  307. return;
  308. tracer_tracing_on(tr);
  309. }
  310. static void
  311. ftrace_traceoff(unsigned long ip, unsigned long parent_ip,
  312. struct trace_array *tr, struct ftrace_probe_ops *ops,
  313. void *data)
  314. {
  315. if (!tracer_tracing_is_on(tr))
  316. return;
  317. tracer_tracing_off(tr);
  318. }
  319. #ifdef CONFIG_UNWINDER_ORC
  320. /*
  321. * Skip 3:
  322. *
  323. * function_trace_probe_call()
  324. * ftrace_ops_assist_func()
  325. * ftrace_call()
  326. */
  327. #define FTRACE_STACK_SKIP 3
  328. #else
  329. /*
  330. * Skip 5:
  331. *
  332. * __trace_stack()
  333. * ftrace_stacktrace()
  334. * function_trace_probe_call()
  335. * ftrace_ops_assist_func()
  336. * ftrace_call()
  337. */
  338. #define FTRACE_STACK_SKIP 5
  339. #endif
  340. static __always_inline void trace_stack(struct trace_array *tr)
  341. {
  342. unsigned long flags;
  343. int pc;
  344. local_save_flags(flags);
  345. pc = preempt_count();
  346. __trace_stack(tr, flags, FTRACE_STACK_SKIP, pc);
  347. }
  348. static void
  349. ftrace_stacktrace(unsigned long ip, unsigned long parent_ip,
  350. struct trace_array *tr, struct ftrace_probe_ops *ops,
  351. void *data)
  352. {
  353. trace_stack(tr);
  354. }
  355. static void
  356. ftrace_stacktrace_count(unsigned long ip, unsigned long parent_ip,
  357. struct trace_array *tr, struct ftrace_probe_ops *ops,
  358. void *data)
  359. {
  360. struct ftrace_func_mapper *mapper = data;
  361. long *count;
  362. long old_count;
  363. long new_count;
  364. if (!tracing_is_on())
  365. return;
  366. /* unlimited? */
  367. if (!mapper) {
  368. trace_stack(tr);
  369. return;
  370. }
  371. count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
  372. /*
  373. * Stack traces should only execute the number of times the
  374. * user specified in the counter.
  375. */
  376. do {
  377. old_count = *count;
  378. if (!old_count)
  379. return;
  380. new_count = old_count - 1;
  381. new_count = cmpxchg(count, old_count, new_count);
  382. if (new_count == old_count)
  383. trace_stack(tr);
  384. if (!tracing_is_on())
  385. return;
  386. } while (new_count != old_count);
  387. }
  388. static int update_count(struct ftrace_probe_ops *ops, unsigned long ip,
  389. void *data)
  390. {
  391. struct ftrace_func_mapper *mapper = data;
  392. long *count = NULL;
  393. if (mapper)
  394. count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
  395. if (count) {
  396. if (*count <= 0)
  397. return 0;
  398. (*count)--;
  399. }
  400. return 1;
  401. }
  402. static void
  403. ftrace_dump_probe(unsigned long ip, unsigned long parent_ip,
  404. struct trace_array *tr, struct ftrace_probe_ops *ops,
  405. void *data)
  406. {
  407. if (update_count(ops, ip, data))
  408. ftrace_dump(DUMP_ALL);
  409. }
  410. /* Only dump the current CPU buffer. */
  411. static void
  412. ftrace_cpudump_probe(unsigned long ip, unsigned long parent_ip,
  413. struct trace_array *tr, struct ftrace_probe_ops *ops,
  414. void *data)
  415. {
  416. if (update_count(ops, ip, data))
  417. ftrace_dump(DUMP_ORIG);
  418. }
  419. static int
  420. ftrace_probe_print(const char *name, struct seq_file *m,
  421. unsigned long ip, struct ftrace_probe_ops *ops,
  422. void *data)
  423. {
  424. struct ftrace_func_mapper *mapper = data;
  425. long *count = NULL;
  426. seq_printf(m, "%ps:%s", (void *)ip, name);
  427. if (mapper)
  428. count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
  429. if (count)
  430. seq_printf(m, ":count=%ld\n", *count);
  431. else
  432. seq_puts(m, ":unlimited\n");
  433. return 0;
  434. }
  435. static int
  436. ftrace_traceon_print(struct seq_file *m, unsigned long ip,
  437. struct ftrace_probe_ops *ops,
  438. void *data)
  439. {
  440. return ftrace_probe_print("traceon", m, ip, ops, data);
  441. }
  442. static int
  443. ftrace_traceoff_print(struct seq_file *m, unsigned long ip,
  444. struct ftrace_probe_ops *ops, void *data)
  445. {
  446. return ftrace_probe_print("traceoff", m, ip, ops, data);
  447. }
  448. static int
  449. ftrace_stacktrace_print(struct seq_file *m, unsigned long ip,
  450. struct ftrace_probe_ops *ops, void *data)
  451. {
  452. return ftrace_probe_print("stacktrace", m, ip, ops, data);
  453. }
  454. static int
  455. ftrace_dump_print(struct seq_file *m, unsigned long ip,
  456. struct ftrace_probe_ops *ops, void *data)
  457. {
  458. return ftrace_probe_print("dump", m, ip, ops, data);
  459. }
  460. static int
  461. ftrace_cpudump_print(struct seq_file *m, unsigned long ip,
  462. struct ftrace_probe_ops *ops, void *data)
  463. {
  464. return ftrace_probe_print("cpudump", m, ip, ops, data);
  465. }
  466. static int
  467. ftrace_count_init(struct ftrace_probe_ops *ops, struct trace_array *tr,
  468. unsigned long ip, void *init_data, void **data)
  469. {
  470. struct ftrace_func_mapper *mapper = *data;
  471. if (!mapper) {
  472. mapper = allocate_ftrace_func_mapper();
  473. if (!mapper)
  474. return -ENOMEM;
  475. *data = mapper;
  476. }
  477. return ftrace_func_mapper_add_ip(mapper, ip, init_data);
  478. }
  479. static void
  480. ftrace_count_free(struct ftrace_probe_ops *ops, struct trace_array *tr,
  481. unsigned long ip, void *data)
  482. {
  483. struct ftrace_func_mapper *mapper = data;
  484. if (!ip) {
  485. free_ftrace_func_mapper(mapper, NULL);
  486. return;
  487. }
  488. ftrace_func_mapper_remove_ip(mapper, ip);
  489. }
  490. static struct ftrace_probe_ops traceon_count_probe_ops = {
  491. .func = ftrace_traceon_count,
  492. .print = ftrace_traceon_print,
  493. .init = ftrace_count_init,
  494. .free = ftrace_count_free,
  495. };
  496. static struct ftrace_probe_ops traceoff_count_probe_ops = {
  497. .func = ftrace_traceoff_count,
  498. .print = ftrace_traceoff_print,
  499. .init = ftrace_count_init,
  500. .free = ftrace_count_free,
  501. };
  502. static struct ftrace_probe_ops stacktrace_count_probe_ops = {
  503. .func = ftrace_stacktrace_count,
  504. .print = ftrace_stacktrace_print,
  505. .init = ftrace_count_init,
  506. .free = ftrace_count_free,
  507. };
  508. static struct ftrace_probe_ops dump_probe_ops = {
  509. .func = ftrace_dump_probe,
  510. .print = ftrace_dump_print,
  511. .init = ftrace_count_init,
  512. .free = ftrace_count_free,
  513. };
  514. static struct ftrace_probe_ops cpudump_probe_ops = {
  515. .func = ftrace_cpudump_probe,
  516. .print = ftrace_cpudump_print,
  517. };
  518. static struct ftrace_probe_ops traceon_probe_ops = {
  519. .func = ftrace_traceon,
  520. .print = ftrace_traceon_print,
  521. };
  522. static struct ftrace_probe_ops traceoff_probe_ops = {
  523. .func = ftrace_traceoff,
  524. .print = ftrace_traceoff_print,
  525. };
  526. static struct ftrace_probe_ops stacktrace_probe_ops = {
  527. .func = ftrace_stacktrace,
  528. .print = ftrace_stacktrace_print,
  529. };
  530. static int
  531. ftrace_trace_probe_callback(struct trace_array *tr,
  532. struct ftrace_probe_ops *ops,
  533. struct ftrace_hash *hash, char *glob,
  534. char *cmd, char *param, int enable)
  535. {
  536. void *count = (void *)-1;
  537. char *number;
  538. int ret;
  539. /* hash funcs only work with set_ftrace_filter */
  540. if (!enable)
  541. return -EINVAL;
  542. if (glob[0] == '!')
  543. return unregister_ftrace_function_probe_func(glob+1, tr, ops);
  544. if (!param)
  545. goto out_reg;
  546. number = strsep(&param, ":");
  547. if (!strlen(number))
  548. goto out_reg;
  549. /*
  550. * We use the callback data field (which is a pointer)
  551. * as our counter.
  552. */
  553. ret = kstrtoul(number, 0, (unsigned long *)&count);
  554. if (ret)
  555. return ret;
  556. out_reg:
  557. ret = register_ftrace_function_probe(glob, tr, ops, count);
  558. return ret < 0 ? ret : 0;
  559. }
  560. static int
  561. ftrace_trace_onoff_callback(struct trace_array *tr, struct ftrace_hash *hash,
  562. char *glob, char *cmd, char *param, int enable)
  563. {
  564. struct ftrace_probe_ops *ops;
  565. if (!tr)
  566. return -ENODEV;
  567. /* we register both traceon and traceoff to this callback */
  568. if (strcmp(cmd, "traceon") == 0)
  569. ops = param ? &traceon_count_probe_ops : &traceon_probe_ops;
  570. else
  571. ops = param ? &traceoff_count_probe_ops : &traceoff_probe_ops;
  572. return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
  573. param, enable);
  574. }
  575. static int
  576. ftrace_stacktrace_callback(struct trace_array *tr, struct ftrace_hash *hash,
  577. char *glob, char *cmd, char *param, int enable)
  578. {
  579. struct ftrace_probe_ops *ops;
  580. if (!tr)
  581. return -ENODEV;
  582. ops = param ? &stacktrace_count_probe_ops : &stacktrace_probe_ops;
  583. return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
  584. param, enable);
  585. }
  586. static int
  587. ftrace_dump_callback(struct trace_array *tr, struct ftrace_hash *hash,
  588. char *glob, char *cmd, char *param, int enable)
  589. {
  590. struct ftrace_probe_ops *ops;
  591. if (!tr)
  592. return -ENODEV;
  593. ops = &dump_probe_ops;
  594. /* Only dump once. */
  595. return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
  596. "1", enable);
  597. }
  598. static int
  599. ftrace_cpudump_callback(struct trace_array *tr, struct ftrace_hash *hash,
  600. char *glob, char *cmd, char *param, int enable)
  601. {
  602. struct ftrace_probe_ops *ops;
  603. if (!tr)
  604. return -ENODEV;
  605. ops = &cpudump_probe_ops;
  606. /* Only dump once. */
  607. return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
  608. "1", enable);
  609. }
  610. static struct ftrace_func_command ftrace_traceon_cmd = {
  611. .name = "traceon",
  612. .func = ftrace_trace_onoff_callback,
  613. };
  614. static struct ftrace_func_command ftrace_traceoff_cmd = {
  615. .name = "traceoff",
  616. .func = ftrace_trace_onoff_callback,
  617. };
  618. static struct ftrace_func_command ftrace_stacktrace_cmd = {
  619. .name = "stacktrace",
  620. .func = ftrace_stacktrace_callback,
  621. };
  622. static struct ftrace_func_command ftrace_dump_cmd = {
  623. .name = "dump",
  624. .func = ftrace_dump_callback,
  625. };
  626. static struct ftrace_func_command ftrace_cpudump_cmd = {
  627. .name = "cpudump",
  628. .func = ftrace_cpudump_callback,
  629. };
  630. static int __init init_func_cmd_traceon(void)
  631. {
  632. int ret;
  633. ret = register_ftrace_command(&ftrace_traceoff_cmd);
  634. if (ret)
  635. return ret;
  636. ret = register_ftrace_command(&ftrace_traceon_cmd);
  637. if (ret)
  638. goto out_free_traceoff;
  639. ret = register_ftrace_command(&ftrace_stacktrace_cmd);
  640. if (ret)
  641. goto out_free_traceon;
  642. ret = register_ftrace_command(&ftrace_dump_cmd);
  643. if (ret)
  644. goto out_free_stacktrace;
  645. ret = register_ftrace_command(&ftrace_cpudump_cmd);
  646. if (ret)
  647. goto out_free_dump;
  648. return 0;
  649. out_free_dump:
  650. unregister_ftrace_command(&ftrace_dump_cmd);
  651. out_free_stacktrace:
  652. unregister_ftrace_command(&ftrace_stacktrace_cmd);
  653. out_free_traceon:
  654. unregister_ftrace_command(&ftrace_traceon_cmd);
  655. out_free_traceoff:
  656. unregister_ftrace_command(&ftrace_traceoff_cmd);
  657. return ret;
  658. }
  659. #else
  660. static inline int init_func_cmd_traceon(void)
  661. {
  662. return 0;
  663. }
  664. #endif /* CONFIG_DYNAMIC_FTRACE */
  665. __init int init_function_trace(void)
  666. {
  667. init_func_cmd_traceon();
  668. return register_tracer(&function_trace);
  669. }