trace_functions.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * ring buffer based function tracer
  4. *
  5. * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
  6. * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
  7. *
  8. * Based on code from the latency_tracer, that is:
  9. *
  10. * Copyright (C) 2004-2006 Ingo Molnar
  11. * Copyright (C) 2004 Nadia Yvette Chambers
  12. */
  13. #include <linux/ring_buffer.h>
  14. #include <linux/debugfs.h>
  15. #include <linux/uaccess.h>
  16. #include <linux/ftrace.h>
  17. #include <linux/slab.h>
  18. #include <linux/fs.h>
  19. #include "trace.h"
  20. static void tracing_start_function_trace(struct trace_array *tr);
  21. static void tracing_stop_function_trace(struct trace_array *tr);
  22. static void
  23. function_trace_call(unsigned long ip, unsigned long parent_ip,
  24. struct ftrace_ops *op, struct ftrace_regs *fregs);
  25. static void
  26. function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
  27. struct ftrace_ops *op, struct ftrace_regs *fregs);
  28. static void
  29. function_no_repeats_trace_call(unsigned long ip, unsigned long parent_ip,
  30. struct ftrace_ops *op, struct ftrace_regs *fregs);
  31. static void
  32. function_stack_no_repeats_trace_call(unsigned long ip, unsigned long parent_ip,
  33. struct ftrace_ops *op,
  34. struct ftrace_regs *fregs);
  35. static struct tracer_flags func_flags;
  36. /* Our option */
  37. enum {
  38. TRACE_FUNC_NO_OPTS = 0x0, /* No flags set. */
  39. TRACE_FUNC_OPT_STACK = 0x1,
  40. TRACE_FUNC_OPT_NO_REPEATS = 0x2,
  41. /* Update this to next highest bit. */
  42. TRACE_FUNC_OPT_HIGHEST_BIT = 0x4
  43. };
  44. #define TRACE_FUNC_OPT_MASK (TRACE_FUNC_OPT_HIGHEST_BIT - 1)
  45. int ftrace_allocate_ftrace_ops(struct trace_array *tr)
  46. {
  47. struct ftrace_ops *ops;
  48. /* The top level array uses the "global_ops" */
  49. if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
  50. return 0;
  51. ops = kzalloc(sizeof(*ops), GFP_KERNEL);
  52. if (!ops)
  53. return -ENOMEM;
  54. /* Currently only the non stack version is supported */
  55. ops->func = function_trace_call;
  56. ops->flags = FTRACE_OPS_FL_PID;
  57. tr->ops = ops;
  58. ops->private = tr;
  59. return 0;
  60. }
  61. void ftrace_free_ftrace_ops(struct trace_array *tr)
  62. {
  63. kfree(tr->ops);
  64. tr->ops = NULL;
  65. }
  66. int ftrace_create_function_files(struct trace_array *tr,
  67. struct dentry *parent)
  68. {
  69. int ret;
  70. /*
  71. * The top level array uses the "global_ops", and the files are
  72. * created on boot up.
  73. */
  74. if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
  75. return 0;
  76. if (!tr->ops)
  77. return -EINVAL;
  78. ret = allocate_fgraph_ops(tr, tr->ops);
  79. if (ret) {
  80. kfree(tr->ops);
  81. return ret;
  82. }
  83. ftrace_create_filter_files(tr->ops, parent);
  84. return 0;
  85. }
  86. void ftrace_destroy_function_files(struct trace_array *tr)
  87. {
  88. ftrace_destroy_filter_files(tr->ops);
  89. ftrace_free_ftrace_ops(tr);
  90. free_fgraph_ops(tr);
  91. }
  92. static ftrace_func_t select_trace_function(u32 flags_val)
  93. {
  94. switch (flags_val & TRACE_FUNC_OPT_MASK) {
  95. case TRACE_FUNC_NO_OPTS:
  96. return function_trace_call;
  97. case TRACE_FUNC_OPT_STACK:
  98. return function_stack_trace_call;
  99. case TRACE_FUNC_OPT_NO_REPEATS:
  100. return function_no_repeats_trace_call;
  101. case TRACE_FUNC_OPT_STACK | TRACE_FUNC_OPT_NO_REPEATS:
  102. return function_stack_no_repeats_trace_call;
  103. default:
  104. return NULL;
  105. }
  106. }
  107. static bool handle_func_repeats(struct trace_array *tr, u32 flags_val)
  108. {
  109. if (!tr->last_func_repeats &&
  110. (flags_val & TRACE_FUNC_OPT_NO_REPEATS)) {
  111. tr->last_func_repeats = alloc_percpu(struct trace_func_repeats);
  112. if (!tr->last_func_repeats)
  113. return false;
  114. }
  115. return true;
  116. }
  117. static int function_trace_init(struct trace_array *tr)
  118. {
  119. ftrace_func_t func;
  120. /*
  121. * Instance trace_arrays get their ops allocated
  122. * at instance creation. Unless it failed
  123. * the allocation.
  124. */
  125. if (!tr->ops)
  126. return -ENOMEM;
  127. func = select_trace_function(func_flags.val);
  128. if (!func)
  129. return -EINVAL;
  130. if (!handle_func_repeats(tr, func_flags.val))
  131. return -ENOMEM;
  132. ftrace_init_array_ops(tr, func);
  133. tr->array_buffer.cpu = raw_smp_processor_id();
  134. tracing_start_cmdline_record();
  135. tracing_start_function_trace(tr);
  136. return 0;
  137. }
  138. static void function_trace_reset(struct trace_array *tr)
  139. {
  140. tracing_stop_function_trace(tr);
  141. tracing_stop_cmdline_record();
  142. ftrace_reset_array_ops(tr);
  143. }
  144. static void function_trace_start(struct trace_array *tr)
  145. {
  146. tracing_reset_online_cpus(&tr->array_buffer);
  147. }
  148. static void
  149. function_trace_call(unsigned long ip, unsigned long parent_ip,
  150. struct ftrace_ops *op, struct ftrace_regs *fregs)
  151. {
  152. struct trace_array *tr = op->private;
  153. struct trace_array_cpu *data;
  154. unsigned int trace_ctx;
  155. int bit;
  156. int cpu;
  157. if (unlikely(!tr->function_enabled))
  158. return;
  159. bit = ftrace_test_recursion_trylock(ip, parent_ip);
  160. if (bit < 0)
  161. return;
  162. trace_ctx = tracing_gen_ctx_dec();
  163. cpu = smp_processor_id();
  164. data = per_cpu_ptr(tr->array_buffer.data, cpu);
  165. if (!atomic_read(&data->disabled))
  166. trace_function(tr, ip, parent_ip, trace_ctx);
  167. ftrace_test_recursion_unlock(bit);
  168. }
  169. #ifdef CONFIG_UNWINDER_ORC
  170. /*
  171. * Skip 2:
  172. *
  173. * function_stack_trace_call()
  174. * ftrace_call()
  175. */
  176. #define STACK_SKIP 2
  177. #else
  178. /*
  179. * Skip 3:
  180. * __trace_stack()
  181. * function_stack_trace_call()
  182. * ftrace_call()
  183. */
  184. #define STACK_SKIP 3
  185. #endif
  186. static void
  187. function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
  188. struct ftrace_ops *op, struct ftrace_regs *fregs)
  189. {
  190. struct trace_array *tr = op->private;
  191. struct trace_array_cpu *data;
  192. unsigned long flags;
  193. long disabled;
  194. int cpu;
  195. unsigned int trace_ctx;
  196. int skip = STACK_SKIP;
  197. if (unlikely(!tr->function_enabled))
  198. return;
  199. /*
  200. * Need to use raw, since this must be called before the
  201. * recursive protection is performed.
  202. */
  203. local_irq_save(flags);
  204. cpu = raw_smp_processor_id();
  205. data = per_cpu_ptr(tr->array_buffer.data, cpu);
  206. disabled = atomic_inc_return(&data->disabled);
  207. if (likely(disabled == 1)) {
  208. trace_ctx = tracing_gen_ctx_flags(flags);
  209. trace_function(tr, ip, parent_ip, trace_ctx);
  210. #ifdef CONFIG_UNWINDER_FRAME_POINTER
  211. if (ftrace_pids_enabled(op))
  212. skip++;
  213. #endif
  214. __trace_stack(tr, trace_ctx, skip);
  215. }
  216. atomic_dec(&data->disabled);
  217. local_irq_restore(flags);
  218. }
  219. static inline bool is_repeat_check(struct trace_array *tr,
  220. struct trace_func_repeats *last_info,
  221. unsigned long ip, unsigned long parent_ip)
  222. {
  223. if (last_info->ip == ip &&
  224. last_info->parent_ip == parent_ip &&
  225. last_info->count < U16_MAX) {
  226. last_info->ts_last_call =
  227. ring_buffer_time_stamp(tr->array_buffer.buffer);
  228. last_info->count++;
  229. return true;
  230. }
  231. return false;
  232. }
  233. static inline void process_repeats(struct trace_array *tr,
  234. unsigned long ip, unsigned long parent_ip,
  235. struct trace_func_repeats *last_info,
  236. unsigned int trace_ctx)
  237. {
  238. if (last_info->count) {
  239. trace_last_func_repeats(tr, last_info, trace_ctx);
  240. last_info->count = 0;
  241. }
  242. last_info->ip = ip;
  243. last_info->parent_ip = parent_ip;
  244. }
  245. static void
  246. function_no_repeats_trace_call(unsigned long ip, unsigned long parent_ip,
  247. struct ftrace_ops *op,
  248. struct ftrace_regs *fregs)
  249. {
  250. struct trace_func_repeats *last_info;
  251. struct trace_array *tr = op->private;
  252. struct trace_array_cpu *data;
  253. unsigned int trace_ctx;
  254. int bit;
  255. int cpu;
  256. if (unlikely(!tr->function_enabled))
  257. return;
  258. bit = ftrace_test_recursion_trylock(ip, parent_ip);
  259. if (bit < 0)
  260. return;
  261. cpu = smp_processor_id();
  262. data = per_cpu_ptr(tr->array_buffer.data, cpu);
  263. if (atomic_read(&data->disabled))
  264. goto out;
  265. /*
  266. * An interrupt may happen at any place here. But as far as I can see,
  267. * the only damage that this can cause is to mess up the repetition
  268. * counter without valuable data being lost.
  269. * TODO: think about a solution that is better than just hoping to be
  270. * lucky.
  271. */
  272. last_info = per_cpu_ptr(tr->last_func_repeats, cpu);
  273. if (is_repeat_check(tr, last_info, ip, parent_ip))
  274. goto out;
  275. trace_ctx = tracing_gen_ctx_dec();
  276. process_repeats(tr, ip, parent_ip, last_info, trace_ctx);
  277. trace_function(tr, ip, parent_ip, trace_ctx);
  278. out:
  279. ftrace_test_recursion_unlock(bit);
  280. }
  281. static void
  282. function_stack_no_repeats_trace_call(unsigned long ip, unsigned long parent_ip,
  283. struct ftrace_ops *op,
  284. struct ftrace_regs *fregs)
  285. {
  286. struct trace_func_repeats *last_info;
  287. struct trace_array *tr = op->private;
  288. struct trace_array_cpu *data;
  289. unsigned long flags;
  290. long disabled;
  291. int cpu;
  292. unsigned int trace_ctx;
  293. if (unlikely(!tr->function_enabled))
  294. return;
  295. /*
  296. * Need to use raw, since this must be called before the
  297. * recursive protection is performed.
  298. */
  299. local_irq_save(flags);
  300. cpu = raw_smp_processor_id();
  301. data = per_cpu_ptr(tr->array_buffer.data, cpu);
  302. disabled = atomic_inc_return(&data->disabled);
  303. if (likely(disabled == 1)) {
  304. last_info = per_cpu_ptr(tr->last_func_repeats, cpu);
  305. if (is_repeat_check(tr, last_info, ip, parent_ip))
  306. goto out;
  307. trace_ctx = tracing_gen_ctx_flags(flags);
  308. process_repeats(tr, ip, parent_ip, last_info, trace_ctx);
  309. trace_function(tr, ip, parent_ip, trace_ctx);
  310. __trace_stack(tr, trace_ctx, STACK_SKIP);
  311. }
  312. out:
  313. atomic_dec(&data->disabled);
  314. local_irq_restore(flags);
  315. }
  316. static struct tracer_opt func_opts[] = {
  317. #ifdef CONFIG_STACKTRACE
  318. { TRACER_OPT(func_stack_trace, TRACE_FUNC_OPT_STACK) },
  319. #endif
  320. { TRACER_OPT(func-no-repeats, TRACE_FUNC_OPT_NO_REPEATS) },
  321. { } /* Always set a last empty entry */
  322. };
  323. static struct tracer_flags func_flags = {
  324. .val = TRACE_FUNC_NO_OPTS, /* By default: all flags disabled */
  325. .opts = func_opts
  326. };
  327. static void tracing_start_function_trace(struct trace_array *tr)
  328. {
  329. tr->function_enabled = 0;
  330. register_ftrace_function(tr->ops);
  331. tr->function_enabled = 1;
  332. }
  333. static void tracing_stop_function_trace(struct trace_array *tr)
  334. {
  335. tr->function_enabled = 0;
  336. unregister_ftrace_function(tr->ops);
  337. }
  338. static struct tracer function_trace;
  339. static int
  340. func_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
  341. {
  342. ftrace_func_t func;
  343. u32 new_flags;
  344. /* Do nothing if already set. */
  345. if (!!set == !!(func_flags.val & bit))
  346. return 0;
  347. /* We can change this flag only when not running. */
  348. if (tr->current_trace != &function_trace)
  349. return 0;
  350. new_flags = (func_flags.val & ~bit) | (set ? bit : 0);
  351. func = select_trace_function(new_flags);
  352. if (!func)
  353. return -EINVAL;
  354. /* Check if there's anything to change. */
  355. if (tr->ops->func == func)
  356. return 0;
  357. if (!handle_func_repeats(tr, new_flags))
  358. return -ENOMEM;
  359. unregister_ftrace_function(tr->ops);
  360. tr->ops->func = func;
  361. register_ftrace_function(tr->ops);
  362. return 0;
  363. }
  364. static struct tracer function_trace __tracer_data =
  365. {
  366. .name = "function",
  367. .init = function_trace_init,
  368. .reset = function_trace_reset,
  369. .start = function_trace_start,
  370. .flags = &func_flags,
  371. .set_flag = func_set_flag,
  372. .allow_instances = true,
  373. #ifdef CONFIG_FTRACE_SELFTEST
  374. .selftest = trace_selftest_startup_function,
  375. #endif
  376. };
  377. #ifdef CONFIG_DYNAMIC_FTRACE
  378. static void update_traceon_count(struct ftrace_probe_ops *ops,
  379. unsigned long ip,
  380. struct trace_array *tr, bool on,
  381. void *data)
  382. {
  383. struct ftrace_func_mapper *mapper = data;
  384. long *count;
  385. long old_count;
  386. /*
  387. * Tracing gets disabled (or enabled) once per count.
  388. * This function can be called at the same time on multiple CPUs.
  389. * It is fine if both disable (or enable) tracing, as disabling
  390. * (or enabling) the second time doesn't do anything as the
  391. * state of the tracer is already disabled (or enabled).
  392. * What needs to be synchronized in this case is that the count
  393. * only gets decremented once, even if the tracer is disabled
  394. * (or enabled) twice, as the second one is really a nop.
  395. *
  396. * The memory barriers guarantee that we only decrement the
  397. * counter once. First the count is read to a local variable
  398. * and a read barrier is used to make sure that it is loaded
  399. * before checking if the tracer is in the state we want.
  400. * If the tracer is not in the state we want, then the count
  401. * is guaranteed to be the old count.
  402. *
  403. * Next the tracer is set to the state we want (disabled or enabled)
  404. * then a write memory barrier is used to make sure that
  405. * the new state is visible before changing the counter by
  406. * one minus the old counter. This guarantees that another CPU
  407. * executing this code will see the new state before seeing
  408. * the new counter value, and would not do anything if the new
  409. * counter is seen.
  410. *
  411. * Note, there is no synchronization between this and a user
  412. * setting the tracing_on file. But we currently don't care
  413. * about that.
  414. */
  415. count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
  416. old_count = *count;
  417. if (old_count <= 0)
  418. return;
  419. /* Make sure we see count before checking tracing state */
  420. smp_rmb();
  421. if (on == !!tracer_tracing_is_on(tr))
  422. return;
  423. if (on)
  424. tracer_tracing_on(tr);
  425. else
  426. tracer_tracing_off(tr);
  427. /* Make sure tracing state is visible before updating count */
  428. smp_wmb();
  429. *count = old_count - 1;
  430. }
  431. static void
  432. ftrace_traceon_count(unsigned long ip, unsigned long parent_ip,
  433. struct trace_array *tr, struct ftrace_probe_ops *ops,
  434. void *data)
  435. {
  436. update_traceon_count(ops, ip, tr, 1, data);
  437. }
  438. static void
  439. ftrace_traceoff_count(unsigned long ip, unsigned long parent_ip,
  440. struct trace_array *tr, struct ftrace_probe_ops *ops,
  441. void *data)
  442. {
  443. update_traceon_count(ops, ip, tr, 0, data);
  444. }
  445. static void
  446. ftrace_traceon(unsigned long ip, unsigned long parent_ip,
  447. struct trace_array *tr, struct ftrace_probe_ops *ops,
  448. void *data)
  449. {
  450. if (tracer_tracing_is_on(tr))
  451. return;
  452. tracer_tracing_on(tr);
  453. }
  454. static void
  455. ftrace_traceoff(unsigned long ip, unsigned long parent_ip,
  456. struct trace_array *tr, struct ftrace_probe_ops *ops,
  457. void *data)
  458. {
  459. if (!tracer_tracing_is_on(tr))
  460. return;
  461. tracer_tracing_off(tr);
  462. }
  463. #ifdef CONFIG_UNWINDER_ORC
  464. /*
  465. * Skip 3:
  466. *
  467. * function_trace_probe_call()
  468. * ftrace_ops_assist_func()
  469. * ftrace_call()
  470. */
  471. #define FTRACE_STACK_SKIP 3
  472. #else
  473. /*
  474. * Skip 5:
  475. *
  476. * __trace_stack()
  477. * ftrace_stacktrace()
  478. * function_trace_probe_call()
  479. * ftrace_ops_assist_func()
  480. * ftrace_call()
  481. */
  482. #define FTRACE_STACK_SKIP 5
  483. #endif
  484. static __always_inline void trace_stack(struct trace_array *tr)
  485. {
  486. __trace_stack(tr, tracing_gen_ctx_dec(), FTRACE_STACK_SKIP);
  487. }
  488. static void
  489. ftrace_stacktrace(unsigned long ip, unsigned long parent_ip,
  490. struct trace_array *tr, struct ftrace_probe_ops *ops,
  491. void *data)
  492. {
  493. trace_stack(tr);
  494. }
  495. static void
  496. ftrace_stacktrace_count(unsigned long ip, unsigned long parent_ip,
  497. struct trace_array *tr, struct ftrace_probe_ops *ops,
  498. void *data)
  499. {
  500. struct ftrace_func_mapper *mapper = data;
  501. long *count;
  502. long old_count;
  503. long new_count;
  504. if (!tracing_is_on())
  505. return;
  506. /* unlimited? */
  507. if (!mapper) {
  508. trace_stack(tr);
  509. return;
  510. }
  511. count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
  512. /*
  513. * Stack traces should only execute the number of times the
  514. * user specified in the counter.
  515. */
  516. do {
  517. old_count = *count;
  518. if (!old_count)
  519. return;
  520. new_count = old_count - 1;
  521. new_count = cmpxchg(count, old_count, new_count);
  522. if (new_count == old_count)
  523. trace_stack(tr);
  524. if (!tracing_is_on())
  525. return;
  526. } while (new_count != old_count);
  527. }
  528. static int update_count(struct ftrace_probe_ops *ops, unsigned long ip,
  529. void *data)
  530. {
  531. struct ftrace_func_mapper *mapper = data;
  532. long *count = NULL;
  533. if (mapper)
  534. count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
  535. if (count) {
  536. if (*count <= 0)
  537. return 0;
  538. (*count)--;
  539. }
  540. return 1;
  541. }
  542. static void
  543. ftrace_dump_probe(unsigned long ip, unsigned long parent_ip,
  544. struct trace_array *tr, struct ftrace_probe_ops *ops,
  545. void *data)
  546. {
  547. if (update_count(ops, ip, data))
  548. ftrace_dump(DUMP_ALL);
  549. }
  550. /* Only dump the current CPU buffer. */
  551. static void
  552. ftrace_cpudump_probe(unsigned long ip, unsigned long parent_ip,
  553. struct trace_array *tr, struct ftrace_probe_ops *ops,
  554. void *data)
  555. {
  556. if (update_count(ops, ip, data))
  557. ftrace_dump(DUMP_ORIG);
  558. }
  559. static int
  560. ftrace_probe_print(const char *name, struct seq_file *m,
  561. unsigned long ip, struct ftrace_probe_ops *ops,
  562. void *data)
  563. {
  564. struct ftrace_func_mapper *mapper = data;
  565. long *count = NULL;
  566. seq_printf(m, "%ps:%s", (void *)ip, name);
  567. if (mapper)
  568. count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
  569. if (count)
  570. seq_printf(m, ":count=%ld\n", *count);
  571. else
  572. seq_puts(m, ":unlimited\n");
  573. return 0;
  574. }
  575. static int
  576. ftrace_traceon_print(struct seq_file *m, unsigned long ip,
  577. struct ftrace_probe_ops *ops,
  578. void *data)
  579. {
  580. return ftrace_probe_print("traceon", m, ip, ops, data);
  581. }
  582. static int
  583. ftrace_traceoff_print(struct seq_file *m, unsigned long ip,
  584. struct ftrace_probe_ops *ops, void *data)
  585. {
  586. return ftrace_probe_print("traceoff", m, ip, ops, data);
  587. }
  588. static int
  589. ftrace_stacktrace_print(struct seq_file *m, unsigned long ip,
  590. struct ftrace_probe_ops *ops, void *data)
  591. {
  592. return ftrace_probe_print("stacktrace", m, ip, ops, data);
  593. }
  594. static int
  595. ftrace_dump_print(struct seq_file *m, unsigned long ip,
  596. struct ftrace_probe_ops *ops, void *data)
  597. {
  598. return ftrace_probe_print("dump", m, ip, ops, data);
  599. }
  600. static int
  601. ftrace_cpudump_print(struct seq_file *m, unsigned long ip,
  602. struct ftrace_probe_ops *ops, void *data)
  603. {
  604. return ftrace_probe_print("cpudump", m, ip, ops, data);
  605. }
  606. static int
  607. ftrace_count_init(struct ftrace_probe_ops *ops, struct trace_array *tr,
  608. unsigned long ip, void *init_data, void **data)
  609. {
  610. struct ftrace_func_mapper *mapper = *data;
  611. if (!mapper) {
  612. mapper = allocate_ftrace_func_mapper();
  613. if (!mapper)
  614. return -ENOMEM;
  615. *data = mapper;
  616. }
  617. return ftrace_func_mapper_add_ip(mapper, ip, init_data);
  618. }
  619. static void
  620. ftrace_count_free(struct ftrace_probe_ops *ops, struct trace_array *tr,
  621. unsigned long ip, void *data)
  622. {
  623. struct ftrace_func_mapper *mapper = data;
  624. if (!ip) {
  625. free_ftrace_func_mapper(mapper, NULL);
  626. return;
  627. }
  628. ftrace_func_mapper_remove_ip(mapper, ip);
  629. }
  630. static struct ftrace_probe_ops traceon_count_probe_ops = {
  631. .func = ftrace_traceon_count,
  632. .print = ftrace_traceon_print,
  633. .init = ftrace_count_init,
  634. .free = ftrace_count_free,
  635. };
  636. static struct ftrace_probe_ops traceoff_count_probe_ops = {
  637. .func = ftrace_traceoff_count,
  638. .print = ftrace_traceoff_print,
  639. .init = ftrace_count_init,
  640. .free = ftrace_count_free,
  641. };
  642. static struct ftrace_probe_ops stacktrace_count_probe_ops = {
  643. .func = ftrace_stacktrace_count,
  644. .print = ftrace_stacktrace_print,
  645. .init = ftrace_count_init,
  646. .free = ftrace_count_free,
  647. };
  648. static struct ftrace_probe_ops dump_probe_ops = {
  649. .func = ftrace_dump_probe,
  650. .print = ftrace_dump_print,
  651. .init = ftrace_count_init,
  652. .free = ftrace_count_free,
  653. };
  654. static struct ftrace_probe_ops cpudump_probe_ops = {
  655. .func = ftrace_cpudump_probe,
  656. .print = ftrace_cpudump_print,
  657. };
  658. static struct ftrace_probe_ops traceon_probe_ops = {
  659. .func = ftrace_traceon,
  660. .print = ftrace_traceon_print,
  661. };
  662. static struct ftrace_probe_ops traceoff_probe_ops = {
  663. .func = ftrace_traceoff,
  664. .print = ftrace_traceoff_print,
  665. };
  666. static struct ftrace_probe_ops stacktrace_probe_ops = {
  667. .func = ftrace_stacktrace,
  668. .print = ftrace_stacktrace_print,
  669. };
  670. static int
  671. ftrace_trace_probe_callback(struct trace_array *tr,
  672. struct ftrace_probe_ops *ops,
  673. struct ftrace_hash *hash, char *glob,
  674. char *cmd, char *param, int enable)
  675. {
  676. void *count = (void *)-1;
  677. char *number;
  678. int ret;
  679. /* hash funcs only work with set_ftrace_filter */
  680. if (!enable)
  681. return -EINVAL;
  682. if (glob[0] == '!')
  683. return unregister_ftrace_function_probe_func(glob+1, tr, ops);
  684. if (!param)
  685. goto out_reg;
  686. number = strsep(&param, ":");
  687. if (!strlen(number))
  688. goto out_reg;
  689. /*
  690. * We use the callback data field (which is a pointer)
  691. * as our counter.
  692. */
  693. ret = kstrtoul(number, 0, (unsigned long *)&count);
  694. if (ret)
  695. return ret;
  696. out_reg:
  697. ret = register_ftrace_function_probe(glob, tr, ops, count);
  698. return ret < 0 ? ret : 0;
  699. }
  700. static int
  701. ftrace_trace_onoff_callback(struct trace_array *tr, struct ftrace_hash *hash,
  702. char *glob, char *cmd, char *param, int enable)
  703. {
  704. struct ftrace_probe_ops *ops;
  705. if (!tr)
  706. return -ENODEV;
  707. /* we register both traceon and traceoff to this callback */
  708. if (strcmp(cmd, "traceon") == 0)
  709. ops = param ? &traceon_count_probe_ops : &traceon_probe_ops;
  710. else
  711. ops = param ? &traceoff_count_probe_ops : &traceoff_probe_ops;
  712. return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
  713. param, enable);
  714. }
  715. static int
  716. ftrace_stacktrace_callback(struct trace_array *tr, struct ftrace_hash *hash,
  717. char *glob, char *cmd, char *param, int enable)
  718. {
  719. struct ftrace_probe_ops *ops;
  720. if (!tr)
  721. return -ENODEV;
  722. ops = param ? &stacktrace_count_probe_ops : &stacktrace_probe_ops;
  723. return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
  724. param, enable);
  725. }
  726. static int
  727. ftrace_dump_callback(struct trace_array *tr, struct ftrace_hash *hash,
  728. char *glob, char *cmd, char *param, int enable)
  729. {
  730. struct ftrace_probe_ops *ops;
  731. if (!tr)
  732. return -ENODEV;
  733. ops = &dump_probe_ops;
  734. /* Only dump once. */
  735. return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
  736. "1", enable);
  737. }
  738. static int
  739. ftrace_cpudump_callback(struct trace_array *tr, struct ftrace_hash *hash,
  740. char *glob, char *cmd, char *param, int enable)
  741. {
  742. struct ftrace_probe_ops *ops;
  743. if (!tr)
  744. return -ENODEV;
  745. ops = &cpudump_probe_ops;
  746. /* Only dump once. */
  747. return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
  748. "1", enable);
  749. }
  750. static struct ftrace_func_command ftrace_traceon_cmd = {
  751. .name = "traceon",
  752. .func = ftrace_trace_onoff_callback,
  753. };
  754. static struct ftrace_func_command ftrace_traceoff_cmd = {
  755. .name = "traceoff",
  756. .func = ftrace_trace_onoff_callback,
  757. };
  758. static struct ftrace_func_command ftrace_stacktrace_cmd = {
  759. .name = "stacktrace",
  760. .func = ftrace_stacktrace_callback,
  761. };
  762. static struct ftrace_func_command ftrace_dump_cmd = {
  763. .name = "dump",
  764. .func = ftrace_dump_callback,
  765. };
  766. static struct ftrace_func_command ftrace_cpudump_cmd = {
  767. .name = "cpudump",
  768. .func = ftrace_cpudump_callback,
  769. };
  770. static int __init init_func_cmd_traceon(void)
  771. {
  772. int ret;
  773. ret = register_ftrace_command(&ftrace_traceoff_cmd);
  774. if (ret)
  775. return ret;
  776. ret = register_ftrace_command(&ftrace_traceon_cmd);
  777. if (ret)
  778. goto out_free_traceoff;
  779. ret = register_ftrace_command(&ftrace_stacktrace_cmd);
  780. if (ret)
  781. goto out_free_traceon;
  782. ret = register_ftrace_command(&ftrace_dump_cmd);
  783. if (ret)
  784. goto out_free_stacktrace;
  785. ret = register_ftrace_command(&ftrace_cpudump_cmd);
  786. if (ret)
  787. goto out_free_dump;
  788. return 0;
  789. out_free_dump:
  790. unregister_ftrace_command(&ftrace_dump_cmd);
  791. out_free_stacktrace:
  792. unregister_ftrace_command(&ftrace_stacktrace_cmd);
  793. out_free_traceon:
  794. unregister_ftrace_command(&ftrace_traceon_cmd);
  795. out_free_traceoff:
  796. unregister_ftrace_command(&ftrace_traceoff_cmd);
  797. return ret;
  798. }
  799. #else
  800. static inline int init_func_cmd_traceon(void)
  801. {
  802. return 0;
  803. }
  804. #endif /* CONFIG_DYNAMIC_FTRACE */
  805. __init int init_function_trace(void)
  806. {
  807. init_func_cmd_traceon();
  808. return register_tracer(&function_trace);
  809. }