ftrace.c 5.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. /*
  3. * Copyright (C) 2013 Linaro Limited
  4. * Author: AKASHI Takahiro <takahiro.akashi@linaro.org>
  5. * Copyright (C) 2017 Andes Technology Corporation
  6. */
  7. #include <linux/ftrace.h>
  8. #include <linux/uaccess.h>
  9. #include <asm/cacheflush.h>
  10. #ifdef CONFIG_DYNAMIC_FTRACE
  11. static int ftrace_check_current_call(unsigned long hook_pos,
  12. unsigned int *expected)
  13. {
  14. unsigned int replaced[2];
  15. unsigned int nops[2] = {NOP4, NOP4};
  16. /* we expect nops at the hook position */
  17. if (!expected)
  18. expected = nops;
  19. /*
  20. * Read the text we want to modify;
  21. * return must be -EFAULT on read error
  22. */
  23. if (probe_kernel_read(replaced, (void *)hook_pos, MCOUNT_INSN_SIZE))
  24. return -EFAULT;
  25. /*
  26. * Make sure it is what we expect it to be;
  27. * return must be -EINVAL on failed comparison
  28. */
  29. if (memcmp(expected, replaced, sizeof(replaced))) {
  30. pr_err("%p: expected (%08x %08x) but get (%08x %08x)",
  31. (void *)hook_pos, expected[0], expected[1], replaced[0],
  32. replaced[1]);
  33. return -EINVAL;
  34. }
  35. return 0;
  36. }
  37. static int __ftrace_modify_call(unsigned long hook_pos, unsigned long target,
  38. bool enable)
  39. {
  40. unsigned int call[2];
  41. unsigned int nops[2] = {NOP4, NOP4};
  42. int ret = 0;
  43. make_call(hook_pos, target, call);
  44. /* replace the auipc-jalr pair at once */
  45. ret = probe_kernel_write((void *)hook_pos, enable ? call : nops,
  46. MCOUNT_INSN_SIZE);
  47. /* return must be -EPERM on write error */
  48. if (ret)
  49. return -EPERM;
  50. smp_mb();
  51. flush_icache_range((void *)hook_pos, (void *)hook_pos + MCOUNT_INSN_SIZE);
  52. return 0;
  53. }
  54. int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
  55. {
  56. int ret = ftrace_check_current_call(rec->ip, NULL);
  57. if (ret)
  58. return ret;
  59. return __ftrace_modify_call(rec->ip, addr, true);
  60. }
  61. int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
  62. unsigned long addr)
  63. {
  64. unsigned int call[2];
  65. int ret;
  66. make_call(rec->ip, addr, call);
  67. ret = ftrace_check_current_call(rec->ip, call);
  68. if (ret)
  69. return ret;
  70. return __ftrace_modify_call(rec->ip, addr, false);
  71. }
  72. /*
  73. * This is called early on, and isn't wrapped by
  74. * ftrace_arch_code_modify_{prepare,post_process}() and therefor doesn't hold
  75. * text_mutex, which triggers a lockdep failure. SMP isn't running so we could
  76. * just directly poke the text, but it's simpler to just take the lock
  77. * ourselves.
  78. */
  79. int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec)
  80. {
  81. int out;
  82. ftrace_arch_code_modify_prepare();
  83. out = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
  84. ftrace_arch_code_modify_post_process();
  85. return out;
  86. }
  87. int ftrace_update_ftrace_func(ftrace_func_t func)
  88. {
  89. int ret = __ftrace_modify_call((unsigned long)&ftrace_call,
  90. (unsigned long)func, true);
  91. if (!ret) {
  92. ret = __ftrace_modify_call((unsigned long)&ftrace_regs_call,
  93. (unsigned long)func, true);
  94. }
  95. return ret;
  96. }
  97. int __init ftrace_dyn_arch_init(void)
  98. {
  99. return 0;
  100. }
  101. #endif
  102. #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
  103. int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
  104. unsigned long addr)
  105. {
  106. unsigned int call[2];
  107. int ret;
  108. make_call(rec->ip, old_addr, call);
  109. ret = ftrace_check_current_call(rec->ip, call);
  110. if (ret)
  111. return ret;
  112. return __ftrace_modify_call(rec->ip, addr, true);
  113. }
  114. #endif
  115. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  116. /*
  117. * Most of this function is copied from arm64.
  118. */
  119. void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
  120. unsigned long frame_pointer)
  121. {
  122. unsigned long return_hooker = (unsigned long)&return_to_handler;
  123. unsigned long old;
  124. if (unlikely(atomic_read(&current->tracing_graph_pause)))
  125. return;
  126. /*
  127. * We don't suffer access faults, so no extra fault-recovery assembly
  128. * is needed here.
  129. */
  130. old = *parent;
  131. if (!function_graph_enter(old, self_addr, frame_pointer, parent))
  132. *parent = return_hooker;
  133. }
  134. #ifdef CONFIG_DYNAMIC_FTRACE
  135. extern void ftrace_graph_call(void);
  136. int ftrace_enable_ftrace_graph_caller(void)
  137. {
  138. unsigned int call[2];
  139. static int init_graph = 1;
  140. int ret;
  141. make_call(&ftrace_graph_call, &ftrace_stub, call);
  142. /*
  143. * When enabling graph tracer for the first time, ftrace_graph_call
  144. * should contains a call to ftrace_stub. Once it has been disabled,
  145. * the 8-bytes at the position becomes NOPs.
  146. */
  147. if (init_graph) {
  148. ret = ftrace_check_current_call((unsigned long)&ftrace_graph_call,
  149. call);
  150. init_graph = 0;
  151. } else {
  152. ret = ftrace_check_current_call((unsigned long)&ftrace_graph_call,
  153. NULL);
  154. }
  155. if (ret)
  156. return ret;
  157. return __ftrace_modify_call((unsigned long)&ftrace_graph_call,
  158. (unsigned long)&prepare_ftrace_return, true);
  159. }
  160. int ftrace_disable_ftrace_graph_caller(void)
  161. {
  162. unsigned int call[2];
  163. int ret;
  164. make_call(&ftrace_graph_call, &prepare_ftrace_return, call);
  165. /*
  166. * This is to make sure that ftrace_enable_ftrace_graph_caller
  167. * did the right thing.
  168. */
  169. ret = ftrace_check_current_call((unsigned long)&ftrace_graph_call,
  170. call);
  171. if (ret)
  172. return ret;
  173. return __ftrace_modify_call((unsigned long)&ftrace_graph_call,
  174. (unsigned long)&prepare_ftrace_return, false);
  175. }
  176. #endif /* CONFIG_DYNAMIC_FTRACE */
  177. #endif /* CONFIG_FUNCTION_GRAPH_TRACER */