ftrace_dyn.c 8.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Based on arch/arm64/kernel/ftrace.c
  4. *
  5. * Copyright (C) 2022 Loongson Technology Corporation Limited
  6. */
  7. #include <linux/ftrace.h>
  8. #include <linux/kprobes.h>
  9. #include <linux/uaccess.h>
  10. #include <asm/inst.h>
  11. #include <asm/module.h>
  12. static int ftrace_modify_code(unsigned long pc, u32 old, u32 new, bool validate)
  13. {
  14. u32 replaced;
  15. if (validate) {
  16. if (larch_insn_read((void *)pc, &replaced))
  17. return -EFAULT;
  18. if (replaced != old)
  19. return -EINVAL;
  20. }
  21. if (larch_insn_patch_text((void *)pc, new))
  22. return -EPERM;
  23. return 0;
  24. }
  25. #ifdef CONFIG_MODULES
  26. static bool reachable_by_bl(unsigned long addr, unsigned long pc)
  27. {
  28. long offset = (long)addr - (long)pc;
  29. return offset >= -SZ_128M && offset < SZ_128M;
  30. }
  31. static struct plt_entry *get_ftrace_plt(struct module *mod, unsigned long addr)
  32. {
  33. struct plt_entry *plt = mod->arch.ftrace_trampolines;
  34. if (addr == FTRACE_ADDR)
  35. return &plt[FTRACE_PLT_IDX];
  36. if (addr == FTRACE_REGS_ADDR &&
  37. IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS))
  38. return &plt[FTRACE_REGS_PLT_IDX];
  39. return NULL;
  40. }
  41. /*
  42. * Find the address the callsite must branch to in order to reach '*addr'.
  43. *
  44. * Due to the limited range of 'bl' instruction, modules may be placed too far
  45. * away to branch directly and we must use a PLT.
  46. *
  47. * Returns true when '*addr' contains a reachable target address, or has been
  48. * modified to contain a PLT address. Returns false otherwise.
  49. */
  50. static bool ftrace_find_callable_addr(struct dyn_ftrace *rec, struct module *mod, unsigned long *addr)
  51. {
  52. unsigned long pc = rec->ip + LOONGARCH_INSN_SIZE;
  53. struct plt_entry *plt;
  54. /*
  55. * If a custom trampoline is unreachable, rely on the ftrace_regs_caller
  56. * trampoline which knows how to indirectly reach that trampoline through
  57. * ops->direct_call.
  58. */
  59. if (*addr != FTRACE_ADDR && *addr != FTRACE_REGS_ADDR && !reachable_by_bl(*addr, pc))
  60. *addr = FTRACE_REGS_ADDR;
  61. /*
  62. * When the target is within range of the 'bl' instruction, use 'addr'
  63. * as-is and branch to that directly.
  64. */
  65. if (reachable_by_bl(*addr, pc))
  66. return true;
  67. /*
  68. * 'mod' is only set at module load time, but if we end up
  69. * dealing with an out-of-range condition, we can assume it
  70. * is due to a module being loaded far away from the kernel.
  71. *
  72. * NOTE: __module_text_address() must be called with preemption
  73. * disabled, but we can rely on ftrace_lock to ensure that 'mod'
  74. * retains its validity throughout the remainder of this code.
  75. */
  76. if (!mod) {
  77. preempt_disable();
  78. mod = __module_text_address(pc);
  79. preempt_enable();
  80. }
  81. if (WARN_ON(!mod))
  82. return false;
  83. plt = get_ftrace_plt(mod, *addr);
  84. if (!plt) {
  85. pr_err("ftrace: no module PLT for %ps\n", (void *)*addr);
  86. return false;
  87. }
  88. *addr = (unsigned long)plt;
  89. return true;
  90. }
  91. #else /* !CONFIG_MODULES */
  92. static bool ftrace_find_callable_addr(struct dyn_ftrace *rec, struct module *mod, unsigned long *addr)
  93. {
  94. return true;
  95. }
  96. #endif
  97. #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
  98. int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, unsigned long addr)
  99. {
  100. u32 old, new;
  101. unsigned long pc;
  102. pc = rec->ip + LOONGARCH_INSN_SIZE;
  103. if (!ftrace_find_callable_addr(rec, NULL, &addr))
  104. return -EINVAL;
  105. if (!ftrace_find_callable_addr(rec, NULL, &old_addr))
  106. return -EINVAL;
  107. new = larch_insn_gen_bl(pc, addr);
  108. old = larch_insn_gen_bl(pc, old_addr);
  109. return ftrace_modify_code(pc, old, new, true);
  110. }
  111. #endif /* CONFIG_DYNAMIC_FTRACE_WITH_REGS */
  112. int ftrace_update_ftrace_func(ftrace_func_t func)
  113. {
  114. u32 new;
  115. unsigned long pc;
  116. pc = (unsigned long)&ftrace_call;
  117. new = larch_insn_gen_bl(pc, (unsigned long)func);
  118. return ftrace_modify_code(pc, 0, new, false);
  119. }
  120. /*
  121. * The compiler has inserted 2 NOPs before the regular function prologue.
  122. * T series registers are available and safe because of LoongArch's psABI.
  123. *
  124. * At runtime, we can replace nop with bl to enable ftrace call and replace bl
  125. * with nop to disable ftrace call. The bl requires us to save the original RA
  126. * value, so it saves RA at t0 here.
  127. *
  128. * Details are:
  129. *
  130. * | Compiled | Disabled | Enabled |
  131. * +------------+------------------------+------------------------+
  132. * | nop | move t0, ra | move t0, ra |
  133. * | nop | nop | bl ftrace_caller |
  134. * | func_body | func_body | func_body |
  135. *
  136. * The RA value will be recovered by ftrace_regs_entry, and restored into RA
  137. * before returning to the regular function prologue. When a function is not
  138. * being traced, the "move t0, ra" is not harmful.
  139. */
  140. int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec)
  141. {
  142. u32 old, new;
  143. unsigned long pc;
  144. pc = rec->ip;
  145. old = larch_insn_gen_nop();
  146. new = larch_insn_gen_move(LOONGARCH_GPR_T0, LOONGARCH_GPR_RA);
  147. return ftrace_modify_code(pc, old, new, true);
  148. }
  149. int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
  150. {
  151. u32 old, new;
  152. unsigned long pc;
  153. pc = rec->ip + LOONGARCH_INSN_SIZE;
  154. if (!ftrace_find_callable_addr(rec, NULL, &addr))
  155. return -EINVAL;
  156. old = larch_insn_gen_nop();
  157. new = larch_insn_gen_bl(pc, addr);
  158. return ftrace_modify_code(pc, old, new, true);
  159. }
  160. int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, unsigned long addr)
  161. {
  162. u32 old, new;
  163. unsigned long pc;
  164. pc = rec->ip + LOONGARCH_INSN_SIZE;
  165. if (!ftrace_find_callable_addr(rec, NULL, &addr))
  166. return -EINVAL;
  167. new = larch_insn_gen_nop();
  168. old = larch_insn_gen_bl(pc, addr);
  169. return ftrace_modify_code(pc, old, new, true);
  170. }
  171. void arch_ftrace_update_code(int command)
  172. {
  173. command |= FTRACE_MAY_SLEEP;
  174. ftrace_modify_all_code(command);
  175. }
  176. int __init ftrace_dyn_arch_init(void)
  177. {
  178. return 0;
  179. }
  180. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  181. void prepare_ftrace_return(unsigned long self_addr, unsigned long *parent)
  182. {
  183. unsigned long old;
  184. unsigned long return_hooker = (unsigned long)&return_to_handler;
  185. if (unlikely(atomic_read(&current->tracing_graph_pause)))
  186. return;
  187. old = *parent;
  188. if (!function_graph_enter(old, self_addr, 0, parent))
  189. *parent = return_hooker;
  190. }
  191. #ifdef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS
  192. void ftrace_graph_func(unsigned long ip, unsigned long parent_ip,
  193. struct ftrace_ops *op, struct ftrace_regs *fregs)
  194. {
  195. struct pt_regs *regs = &fregs->regs;
  196. unsigned long *parent = (unsigned long *)&regs->regs[1];
  197. prepare_ftrace_return(ip, (unsigned long *)parent);
  198. }
  199. #else
  200. static int ftrace_modify_graph_caller(bool enable)
  201. {
  202. u32 branch, nop;
  203. unsigned long pc, func;
  204. extern void ftrace_graph_call(void);
  205. pc = (unsigned long)&ftrace_graph_call;
  206. func = (unsigned long)&ftrace_graph_caller;
  207. nop = larch_insn_gen_nop();
  208. branch = larch_insn_gen_b(pc, func);
  209. if (enable)
  210. return ftrace_modify_code(pc, nop, branch, true);
  211. else
  212. return ftrace_modify_code(pc, branch, nop, true);
  213. }
  214. int ftrace_enable_ftrace_graph_caller(void)
  215. {
  216. return ftrace_modify_graph_caller(true);
  217. }
  218. int ftrace_disable_ftrace_graph_caller(void)
  219. {
  220. return ftrace_modify_graph_caller(false);
  221. }
  222. #endif /* CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS */
  223. #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
  224. #ifdef CONFIG_KPROBES_ON_FTRACE
  225. /* Ftrace callback handler for kprobes -- called under preepmt disabled */
  226. void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
  227. struct ftrace_ops *ops, struct ftrace_regs *fregs)
  228. {
  229. int bit;
  230. struct pt_regs *regs;
  231. struct kprobe *p;
  232. struct kprobe_ctlblk *kcb;
  233. if (unlikely(kprobe_ftrace_disabled))
  234. return;
  235. bit = ftrace_test_recursion_trylock(ip, parent_ip);
  236. if (bit < 0)
  237. return;
  238. p = get_kprobe((kprobe_opcode_t *)ip);
  239. if (unlikely(!p) || kprobe_disabled(p))
  240. goto out;
  241. regs = ftrace_get_regs(fregs);
  242. if (!regs)
  243. goto out;
  244. kcb = get_kprobe_ctlblk();
  245. if (kprobe_running()) {
  246. kprobes_inc_nmissed_count(p);
  247. } else {
  248. unsigned long orig_ip = instruction_pointer(regs);
  249. instruction_pointer_set(regs, ip);
  250. __this_cpu_write(current_kprobe, p);
  251. kcb->kprobe_status = KPROBE_HIT_ACTIVE;
  252. if (!p->pre_handler || !p->pre_handler(p, regs)) {
  253. /*
  254. * Emulate singlestep (and also recover regs->csr_era)
  255. * as if there is a nop
  256. */
  257. instruction_pointer_set(regs, (unsigned long)p->addr + MCOUNT_INSN_SIZE);
  258. if (unlikely(p->post_handler)) {
  259. kcb->kprobe_status = KPROBE_HIT_SSDONE;
  260. p->post_handler(p, regs, 0);
  261. }
  262. instruction_pointer_set(regs, orig_ip);
  263. }
  264. /*
  265. * If pre_handler returns !0, it changes regs->csr_era. We have to
  266. * skip emulating post_handler.
  267. */
  268. __this_cpu_write(current_kprobe, NULL);
  269. }
  270. out:
  271. ftrace_test_recursion_unlock(bit);
  272. }
  273. NOKPROBE_SYMBOL(kprobe_ftrace_handler);
  274. int arch_prepare_kprobe_ftrace(struct kprobe *p)
  275. {
  276. p->ainsn.insn = NULL;
  277. return 0;
  278. }
  279. #endif /* CONFIG_KPROBES_ON_FTRACE */