ftrace.c 5.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (C) 2013 Linaro Limited
  4. * Author: AKASHI Takahiro <takahiro.akashi@linaro.org>
  5. * Copyright (C) 2017 Andes Technology Corporation
  6. */
  7. #include <linux/ftrace.h>
  8. #include <linux/uaccess.h>
  9. #include <linux/memory.h>
  10. #include <linux/stop_machine.h>
  11. #include <asm/cacheflush.h>
  12. #include <asm/patch.h>
  13. #ifdef CONFIG_DYNAMIC_FTRACE
  14. void ftrace_arch_code_modify_prepare(void) __acquires(&text_mutex)
  15. {
  16. mutex_lock(&text_mutex);
  17. /*
  18. * The code sequences we use for ftrace can't be patched while the
  19. * kernel is running, so we need to use stop_machine() to modify them
  20. * for now. This doesn't play nice with text_mutex, we use this flag
  21. * to elide the check.
  22. */
  23. riscv_patch_in_stop_machine = true;
  24. }
  25. void ftrace_arch_code_modify_post_process(void) __releases(&text_mutex)
  26. {
  27. riscv_patch_in_stop_machine = false;
  28. mutex_unlock(&text_mutex);
  29. }
  30. static int ftrace_check_current_call(unsigned long hook_pos,
  31. unsigned int *expected)
  32. {
  33. unsigned int replaced[2];
  34. unsigned int nops[2] = {NOP4, NOP4};
  35. /* we expect nops at the hook position */
  36. if (!expected)
  37. expected = nops;
  38. /*
  39. * Read the text we want to modify;
  40. * return must be -EFAULT on read error
  41. */
  42. if (copy_from_kernel_nofault(replaced, (void *)hook_pos,
  43. MCOUNT_INSN_SIZE))
  44. return -EFAULT;
  45. /*
  46. * Make sure it is what we expect it to be;
  47. * return must be -EINVAL on failed comparison
  48. */
  49. if (memcmp(expected, replaced, sizeof(replaced))) {
  50. pr_err("%p: expected (%08x %08x) but got (%08x %08x)\n",
  51. (void *)hook_pos, expected[0], expected[1], replaced[0],
  52. replaced[1]);
  53. return -EINVAL;
  54. }
  55. return 0;
  56. }
  57. static int __ftrace_modify_call(unsigned long hook_pos, unsigned long target,
  58. bool enable, bool ra)
  59. {
  60. unsigned int call[2];
  61. unsigned int nops[2] = {NOP4, NOP4};
  62. if (ra)
  63. make_call_ra(hook_pos, target, call);
  64. else
  65. make_call_t0(hook_pos, target, call);
  66. /* Replace the auipc-jalr pair at once. Return -EPERM on write error. */
  67. if (patch_insn_write((void *)hook_pos, enable ? call : nops, MCOUNT_INSN_SIZE))
  68. return -EPERM;
  69. return 0;
  70. }
  71. int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
  72. {
  73. unsigned int call[2];
  74. make_call_t0(rec->ip, addr, call);
  75. if (patch_insn_write((void *)rec->ip, call, MCOUNT_INSN_SIZE))
  76. return -EPERM;
  77. return 0;
  78. }
  79. int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
  80. unsigned long addr)
  81. {
  82. unsigned int nops[2] = {NOP4, NOP4};
  83. if (patch_insn_write((void *)rec->ip, nops, MCOUNT_INSN_SIZE))
  84. return -EPERM;
  85. return 0;
  86. }
  87. /*
  88. * This is called early on, and isn't wrapped by
  89. * ftrace_arch_code_modify_{prepare,post_process}() and therefor doesn't hold
  90. * text_mutex, which triggers a lockdep failure. SMP isn't running so we could
  91. * just directly poke the text, but it's simpler to just take the lock
  92. * ourselves.
  93. */
  94. int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec)
  95. {
  96. int out;
  97. mutex_lock(&text_mutex);
  98. out = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
  99. mutex_unlock(&text_mutex);
  100. return out;
  101. }
  102. int ftrace_update_ftrace_func(ftrace_func_t func)
  103. {
  104. int ret = __ftrace_modify_call((unsigned long)&ftrace_call,
  105. (unsigned long)func, true, true);
  106. return ret;
  107. }
  108. struct ftrace_modify_param {
  109. int command;
  110. atomic_t cpu_count;
  111. };
  112. static int __ftrace_modify_code(void *data)
  113. {
  114. struct ftrace_modify_param *param = data;
  115. if (atomic_inc_return(&param->cpu_count) == num_online_cpus()) {
  116. ftrace_modify_all_code(param->command);
  117. /*
  118. * Make sure the patching store is effective *before* we
  119. * increment the counter which releases all waiting CPUs
  120. * by using the release variant of atomic increment. The
  121. * release pairs with the call to local_flush_icache_all()
  122. * on the waiting CPU.
  123. */
  124. atomic_inc_return_release(&param->cpu_count);
  125. } else {
  126. while (atomic_read(&param->cpu_count) <= num_online_cpus())
  127. cpu_relax();
  128. local_flush_icache_all();
  129. }
  130. return 0;
  131. }
  132. void arch_ftrace_update_code(int command)
  133. {
  134. struct ftrace_modify_param param = { command, ATOMIC_INIT(0) };
  135. stop_machine(__ftrace_modify_code, &param, cpu_online_mask);
  136. }
  137. #endif
  138. #ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
  139. int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
  140. unsigned long addr)
  141. {
  142. unsigned int call[2];
  143. unsigned long caller = rec->ip;
  144. int ret;
  145. make_call_t0(caller, old_addr, call);
  146. ret = ftrace_check_current_call(caller, call);
  147. if (ret)
  148. return ret;
  149. return __ftrace_modify_call(caller, addr, true, false);
  150. }
  151. #endif
  152. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  153. /*
  154. * Most of this function is copied from arm64.
  155. */
  156. void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
  157. unsigned long frame_pointer)
  158. {
  159. unsigned long return_hooker = (unsigned long)&return_to_handler;
  160. unsigned long old;
  161. if (unlikely(atomic_read(&current->tracing_graph_pause)))
  162. return;
  163. /*
  164. * We don't suffer access faults, so no extra fault-recovery assembly
  165. * is needed here.
  166. */
  167. old = *parent;
  168. if (!function_graph_enter(old, self_addr, frame_pointer, parent))
  169. *parent = return_hooker;
  170. }
  171. #ifdef CONFIG_DYNAMIC_FTRACE
  172. #ifdef CONFIG_DYNAMIC_FTRACE_WITH_ARGS
  173. void ftrace_graph_func(unsigned long ip, unsigned long parent_ip,
  174. struct ftrace_ops *op, struct ftrace_regs *fregs)
  175. {
  176. prepare_ftrace_return(&fregs->ra, ip, fregs->s0);
  177. }
  178. #else /* CONFIG_DYNAMIC_FTRACE_WITH_ARGS */
  179. extern void ftrace_graph_call(void);
  180. int ftrace_enable_ftrace_graph_caller(void)
  181. {
  182. return __ftrace_modify_call((unsigned long)&ftrace_graph_call,
  183. (unsigned long)&prepare_ftrace_return, true, true);
  184. }
  185. int ftrace_disable_ftrace_graph_caller(void)
  186. {
  187. return __ftrace_modify_call((unsigned long)&ftrace_graph_call,
  188. (unsigned long)&prepare_ftrace_return, false, true);
  189. }
  190. #endif /* CONFIG_DYNAMIC_FTRACE_WITH_ARGS */
  191. #endif /* CONFIG_DYNAMIC_FTRACE */
  192. #endif /* CONFIG_FUNCTION_GRAPH_TRACER */