ftrace.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Code for replacing ftrace calls with jumps.
  4. *
  5. * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
  6. *
  7. * Thanks goes out to P.A. Semi, Inc for supplying me with a PPC64 box.
  8. *
  9. * Added function graph tracer code, taken from x86 that was written
  10. * by Frederic Weisbecker, and ported to PPC by Steven Rostedt.
  11. *
  12. */
  13. #define pr_fmt(fmt) "ftrace-powerpc: " fmt
  14. #include <linux/spinlock.h>
  15. #include <linux/hardirq.h>
  16. #include <linux/uaccess.h>
  17. #include <linux/module.h>
  18. #include <linux/ftrace.h>
  19. #include <linux/percpu.h>
  20. #include <linux/init.h>
  21. #include <linux/list.h>
  22. #include <asm/asm-prototypes.h>
  23. #include <asm/cacheflush.h>
  24. #include <asm/code-patching.h>
  25. #include <asm/ftrace.h>
  26. #include <asm/syscall.h>
  27. #ifdef CONFIG_DYNAMIC_FTRACE
  28. static unsigned int
  29. ftrace_call_replace(unsigned long ip, unsigned long addr, int link)
  30. {
  31. unsigned int op;
  32. addr = ppc_function_entry((void *)addr);
  33. /* if (link) set op to 'bl' else 'b' */
  34. op = create_branch((unsigned int *)ip, addr, link ? 1 : 0);
  35. return op;
  36. }
  37. static int
  38. ftrace_modify_code(unsigned long ip, unsigned int old, unsigned int new)
  39. {
  40. unsigned int replaced;
  41. /*
  42. * Note:
  43. * We are paranoid about modifying text, as if a bug was to happen, it
  44. * could cause us to read or write to someplace that could cause harm.
  45. * Carefully read and modify the code with probe_kernel_*(), and make
  46. * sure what we read is what we expected it to be before modifying it.
  47. */
  48. /* read the text we want to modify */
  49. if (probe_kernel_read(&replaced, (void *)ip, MCOUNT_INSN_SIZE))
  50. return -EFAULT;
  51. /* Make sure it is what we expect it to be */
  52. if (replaced != old) {
  53. pr_err("%p: replaced (%#x) != old (%#x)",
  54. (void *)ip, replaced, old);
  55. return -EINVAL;
  56. }
  57. /* replace the text with the new text */
  58. if (patch_instruction((unsigned int *)ip, new))
  59. return -EPERM;
  60. return 0;
  61. }
  62. /*
  63. * Helper functions that are the same for both PPC64 and PPC32.
  64. */
  65. static int test_24bit_addr(unsigned long ip, unsigned long addr)
  66. {
  67. addr = ppc_function_entry((void *)addr);
  68. /* use the create_branch to verify that this offset can be branched */
  69. return create_branch((unsigned int *)ip, addr, 0);
  70. }
  71. #ifdef CONFIG_MODULES
  72. static int is_bl_op(unsigned int op)
  73. {
  74. return (op & 0xfc000003) == 0x48000001;
  75. }
  76. static unsigned long find_bl_target(unsigned long ip, unsigned int op)
  77. {
  78. static int offset;
  79. offset = (op & 0x03fffffc);
  80. /* make it signed */
  81. if (offset & 0x02000000)
  82. offset |= 0xfe000000;
  83. return ip + (long)offset;
  84. }
  85. #ifdef CONFIG_PPC64
  86. static int
  87. __ftrace_make_nop(struct module *mod,
  88. struct dyn_ftrace *rec, unsigned long addr)
  89. {
  90. unsigned long entry, ptr, tramp;
  91. unsigned long ip = rec->ip;
  92. unsigned int op, pop;
  93. /* read where this goes */
  94. if (probe_kernel_read(&op, (void *)ip, sizeof(int))) {
  95. pr_err("Fetching opcode failed.\n");
  96. return -EFAULT;
  97. }
  98. /* Make sure that that this is still a 24bit jump */
  99. if (!is_bl_op(op)) {
  100. pr_err("Not expected bl: opcode is %x\n", op);
  101. return -EINVAL;
  102. }
  103. /* lets find where the pointer goes */
  104. tramp = find_bl_target(ip, op);
  105. pr_devel("ip:%lx jumps to %lx", ip, tramp);
  106. if (module_trampoline_target(mod, tramp, &ptr)) {
  107. pr_err("Failed to get trampoline target\n");
  108. return -EFAULT;
  109. }
  110. pr_devel("trampoline target %lx", ptr);
  111. entry = ppc_global_function_entry((void *)addr);
  112. /* This should match what was called */
  113. if (ptr != entry) {
  114. pr_err("addr %lx does not match expected %lx\n", ptr, entry);
  115. return -EINVAL;
  116. }
  117. #ifdef CONFIG_MPROFILE_KERNEL
  118. /* When using -mkernel_profile there is no load to jump over */
  119. pop = PPC_INST_NOP;
  120. if (probe_kernel_read(&op, (void *)(ip - 4), 4)) {
  121. pr_err("Fetching instruction at %lx failed.\n", ip - 4);
  122. return -EFAULT;
  123. }
  124. /* We expect either a mflr r0, or a std r0, LRSAVE(r1) */
  125. if (op != PPC_INST_MFLR && op != PPC_INST_STD_LR) {
  126. pr_err("Unexpected instruction %08x around bl _mcount\n", op);
  127. return -EINVAL;
  128. }
  129. #else
  130. /*
  131. * Our original call site looks like:
  132. *
  133. * bl <tramp>
  134. * ld r2,XX(r1)
  135. *
  136. * Milton Miller pointed out that we can not simply nop the branch.
  137. * If a task was preempted when calling a trace function, the nops
  138. * will remove the way to restore the TOC in r2 and the r2 TOC will
  139. * get corrupted.
  140. *
  141. * Use a b +8 to jump over the load.
  142. */
  143. pop = PPC_INST_BRANCH | 8; /* b +8 */
  144. /*
  145. * Check what is in the next instruction. We can see ld r2,40(r1), but
  146. * on first pass after boot we will see mflr r0.
  147. */
  148. if (probe_kernel_read(&op, (void *)(ip+4), MCOUNT_INSN_SIZE)) {
  149. pr_err("Fetching op failed.\n");
  150. return -EFAULT;
  151. }
  152. if (op != PPC_INST_LD_TOC) {
  153. pr_err("Expected %08x found %08x\n", PPC_INST_LD_TOC, op);
  154. return -EINVAL;
  155. }
  156. #endif /* CONFIG_MPROFILE_KERNEL */
  157. if (patch_instruction((unsigned int *)ip, pop)) {
  158. pr_err("Patching NOP failed.\n");
  159. return -EPERM;
  160. }
  161. return 0;
  162. }
  163. #else /* !PPC64 */
  164. static int
  165. __ftrace_make_nop(struct module *mod,
  166. struct dyn_ftrace *rec, unsigned long addr)
  167. {
  168. unsigned int op;
  169. unsigned int jmp[4];
  170. unsigned long ip = rec->ip;
  171. unsigned long tramp;
  172. if (probe_kernel_read(&op, (void *)ip, MCOUNT_INSN_SIZE))
  173. return -EFAULT;
  174. /* Make sure that that this is still a 24bit jump */
  175. if (!is_bl_op(op)) {
  176. pr_err("Not expected bl: opcode is %x\n", op);
  177. return -EINVAL;
  178. }
  179. /* lets find where the pointer goes */
  180. tramp = find_bl_target(ip, op);
  181. /*
  182. * On PPC32 the trampoline looks like:
  183. * 0x3d, 0x80, 0x00, 0x00 lis r12,sym@ha
  184. * 0x39, 0x8c, 0x00, 0x00 addi r12,r12,sym@l
  185. * 0x7d, 0x89, 0x03, 0xa6 mtctr r12
  186. * 0x4e, 0x80, 0x04, 0x20 bctr
  187. */
  188. pr_devel("ip:%lx jumps to %lx", ip, tramp);
  189. /* Find where the trampoline jumps to */
  190. if (probe_kernel_read(jmp, (void *)tramp, sizeof(jmp))) {
  191. pr_err("Failed to read %lx\n", tramp);
  192. return -EFAULT;
  193. }
  194. pr_devel(" %08x %08x ", jmp[0], jmp[1]);
  195. /* verify that this is what we expect it to be */
  196. if (((jmp[0] & 0xffff0000) != 0x3d800000) ||
  197. ((jmp[1] & 0xffff0000) != 0x398c0000) ||
  198. (jmp[2] != 0x7d8903a6) ||
  199. (jmp[3] != 0x4e800420)) {
  200. pr_err("Not a trampoline\n");
  201. return -EINVAL;
  202. }
  203. tramp = (jmp[1] & 0xffff) |
  204. ((jmp[0] & 0xffff) << 16);
  205. if (tramp & 0x8000)
  206. tramp -= 0x10000;
  207. pr_devel(" %lx ", tramp);
  208. if (tramp != addr) {
  209. pr_err("Trampoline location %08lx does not match addr\n",
  210. tramp);
  211. return -EINVAL;
  212. }
  213. op = PPC_INST_NOP;
  214. if (patch_instruction((unsigned int *)ip, op))
  215. return -EPERM;
  216. return 0;
  217. }
  218. #endif /* PPC64 */
  219. #endif /* CONFIG_MODULES */
  220. int ftrace_make_nop(struct module *mod,
  221. struct dyn_ftrace *rec, unsigned long addr)
  222. {
  223. unsigned long ip = rec->ip;
  224. unsigned int old, new;
  225. /*
  226. * If the calling address is more that 24 bits away,
  227. * then we had to use a trampoline to make the call.
  228. * Otherwise just update the call site.
  229. */
  230. if (test_24bit_addr(ip, addr)) {
  231. /* within range */
  232. old = ftrace_call_replace(ip, addr, 1);
  233. new = PPC_INST_NOP;
  234. return ftrace_modify_code(ip, old, new);
  235. }
  236. #ifdef CONFIG_MODULES
  237. /*
  238. * Out of range jumps are called from modules.
  239. * We should either already have a pointer to the module
  240. * or it has been passed in.
  241. */
  242. if (!rec->arch.mod) {
  243. if (!mod) {
  244. pr_err("No module loaded addr=%lx\n", addr);
  245. return -EFAULT;
  246. }
  247. rec->arch.mod = mod;
  248. } else if (mod) {
  249. if (mod != rec->arch.mod) {
  250. pr_err("Record mod %p not equal to passed in mod %p\n",
  251. rec->arch.mod, mod);
  252. return -EINVAL;
  253. }
  254. /* nothing to do if mod == rec->arch.mod */
  255. } else
  256. mod = rec->arch.mod;
  257. return __ftrace_make_nop(mod, rec, addr);
  258. #else
  259. /* We should not get here without modules */
  260. return -EINVAL;
  261. #endif /* CONFIG_MODULES */
  262. }
  263. #ifdef CONFIG_MODULES
  264. #ifdef CONFIG_PPC64
  265. /*
  266. * Examine the existing instructions for __ftrace_make_call.
  267. * They should effectively be a NOP, and follow formal constraints,
  268. * depending on the ABI. Return false if they don't.
  269. */
  270. #ifndef CONFIG_MPROFILE_KERNEL
  271. static int
  272. expected_nop_sequence(void *ip, unsigned int op0, unsigned int op1)
  273. {
  274. /*
  275. * We expect to see:
  276. *
  277. * b +8
  278. * ld r2,XX(r1)
  279. *
  280. * The load offset is different depending on the ABI. For simplicity
  281. * just mask it out when doing the compare.
  282. */
  283. if ((op0 != 0x48000008) || ((op1 & 0xffff0000) != 0xe8410000))
  284. return 0;
  285. return 1;
  286. }
  287. #else
  288. static int
  289. expected_nop_sequence(void *ip, unsigned int op0, unsigned int op1)
  290. {
  291. /* look for patched "NOP" on ppc64 with -mprofile-kernel */
  292. if (op0 != PPC_INST_NOP)
  293. return 0;
  294. return 1;
  295. }
  296. #endif
  297. static int
  298. __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
  299. {
  300. unsigned int op[2];
  301. void *ip = (void *)rec->ip;
  302. unsigned long entry, ptr, tramp;
  303. struct module *mod = rec->arch.mod;
  304. /* read where this goes */
  305. if (probe_kernel_read(op, ip, sizeof(op)))
  306. return -EFAULT;
  307. if (!expected_nop_sequence(ip, op[0], op[1])) {
  308. pr_err("Unexpected call sequence at %p: %x %x\n",
  309. ip, op[0], op[1]);
  310. return -EINVAL;
  311. }
  312. /* If we never set up ftrace trampoline(s), then bail */
  313. #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
  314. if (!mod->arch.tramp || !mod->arch.tramp_regs) {
  315. #else
  316. if (!mod->arch.tramp) {
  317. #endif
  318. pr_err("No ftrace trampoline\n");
  319. return -EINVAL;
  320. }
  321. #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
  322. if (rec->flags & FTRACE_FL_REGS)
  323. tramp = mod->arch.tramp_regs;
  324. else
  325. #endif
  326. tramp = mod->arch.tramp;
  327. if (module_trampoline_target(mod, tramp, &ptr)) {
  328. pr_err("Failed to get trampoline target\n");
  329. return -EFAULT;
  330. }
  331. pr_devel("trampoline target %lx", ptr);
  332. entry = ppc_global_function_entry((void *)addr);
  333. /* This should match what was called */
  334. if (ptr != entry) {
  335. pr_err("addr %lx does not match expected %lx\n", ptr, entry);
  336. return -EINVAL;
  337. }
  338. /* Ensure branch is within 24 bits */
  339. if (!create_branch(ip, tramp, BRANCH_SET_LINK)) {
  340. pr_err("Branch out of range\n");
  341. return -EINVAL;
  342. }
  343. if (patch_branch(ip, tramp, BRANCH_SET_LINK)) {
  344. pr_err("REL24 out of range!\n");
  345. return -EINVAL;
  346. }
  347. return 0;
  348. }
  349. #else /* !CONFIG_PPC64: */
  350. static int
  351. __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
  352. {
  353. unsigned int op;
  354. unsigned long ip = rec->ip;
  355. /* read where this goes */
  356. if (probe_kernel_read(&op, (void *)ip, MCOUNT_INSN_SIZE))
  357. return -EFAULT;
  358. /* It should be pointing to a nop */
  359. if (op != PPC_INST_NOP) {
  360. pr_err("Expected NOP but have %x\n", op);
  361. return -EINVAL;
  362. }
  363. /* If we never set up a trampoline to ftrace_caller, then bail */
  364. if (!rec->arch.mod->arch.tramp) {
  365. pr_err("No ftrace trampoline\n");
  366. return -EINVAL;
  367. }
  368. /* create the branch to the trampoline */
  369. op = create_branch((unsigned int *)ip,
  370. rec->arch.mod->arch.tramp, BRANCH_SET_LINK);
  371. if (!op) {
  372. pr_err("REL24 out of range!\n");
  373. return -EINVAL;
  374. }
  375. pr_devel("write to %lx\n", rec->ip);
  376. if (patch_instruction((unsigned int *)ip, op))
  377. return -EPERM;
  378. return 0;
  379. }
  380. #endif /* CONFIG_PPC64 */
  381. #endif /* CONFIG_MODULES */
  382. int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
  383. {
  384. unsigned long ip = rec->ip;
  385. unsigned int old, new;
  386. /*
  387. * If the calling address is more that 24 bits away,
  388. * then we had to use a trampoline to make the call.
  389. * Otherwise just update the call site.
  390. */
  391. if (test_24bit_addr(ip, addr)) {
  392. /* within range */
  393. old = PPC_INST_NOP;
  394. new = ftrace_call_replace(ip, addr, 1);
  395. return ftrace_modify_code(ip, old, new);
  396. }
  397. #ifdef CONFIG_MODULES
  398. /*
  399. * Out of range jumps are called from modules.
  400. * Being that we are converting from nop, it had better
  401. * already have a module defined.
  402. */
  403. if (!rec->arch.mod) {
  404. pr_err("No module loaded\n");
  405. return -EINVAL;
  406. }
  407. return __ftrace_make_call(rec, addr);
  408. #else
  409. /* We should not get here without modules */
  410. return -EINVAL;
  411. #endif /* CONFIG_MODULES */
  412. }
  413. #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
  414. #ifdef CONFIG_MODULES
  415. static int
  416. __ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
  417. unsigned long addr)
  418. {
  419. unsigned int op;
  420. unsigned long ip = rec->ip;
  421. unsigned long entry, ptr, tramp;
  422. struct module *mod = rec->arch.mod;
  423. /* If we never set up ftrace trampolines, then bail */
  424. if (!mod->arch.tramp || !mod->arch.tramp_regs) {
  425. pr_err("No ftrace trampoline\n");
  426. return -EINVAL;
  427. }
  428. /* read where this goes */
  429. if (probe_kernel_read(&op, (void *)ip, sizeof(int))) {
  430. pr_err("Fetching opcode failed.\n");
  431. return -EFAULT;
  432. }
  433. /* Make sure that that this is still a 24bit jump */
  434. if (!is_bl_op(op)) {
  435. pr_err("Not expected bl: opcode is %x\n", op);
  436. return -EINVAL;
  437. }
  438. /* lets find where the pointer goes */
  439. tramp = find_bl_target(ip, op);
  440. entry = ppc_global_function_entry((void *)old_addr);
  441. pr_devel("ip:%lx jumps to %lx", ip, tramp);
  442. if (tramp != entry) {
  443. /* old_addr is not within range, so we must have used a trampoline */
  444. if (module_trampoline_target(mod, tramp, &ptr)) {
  445. pr_err("Failed to get trampoline target\n");
  446. return -EFAULT;
  447. }
  448. pr_devel("trampoline target %lx", ptr);
  449. /* This should match what was called */
  450. if (ptr != entry) {
  451. pr_err("addr %lx does not match expected %lx\n", ptr, entry);
  452. return -EINVAL;
  453. }
  454. }
  455. /* The new target may be within range */
  456. if (test_24bit_addr(ip, addr)) {
  457. /* within range */
  458. if (patch_branch((unsigned int *)ip, addr, BRANCH_SET_LINK)) {
  459. pr_err("REL24 out of range!\n");
  460. return -EINVAL;
  461. }
  462. return 0;
  463. }
  464. if (rec->flags & FTRACE_FL_REGS)
  465. tramp = mod->arch.tramp_regs;
  466. else
  467. tramp = mod->arch.tramp;
  468. if (module_trampoline_target(mod, tramp, &ptr)) {
  469. pr_err("Failed to get trampoline target\n");
  470. return -EFAULT;
  471. }
  472. pr_devel("trampoline target %lx", ptr);
  473. entry = ppc_global_function_entry((void *)addr);
  474. /* This should match what was called */
  475. if (ptr != entry) {
  476. pr_err("addr %lx does not match expected %lx\n", ptr, entry);
  477. return -EINVAL;
  478. }
  479. /* Ensure branch is within 24 bits */
  480. if (!create_branch((unsigned int *)ip, tramp, BRANCH_SET_LINK)) {
  481. pr_err("Branch out of range\n");
  482. return -EINVAL;
  483. }
  484. if (patch_branch((unsigned int *)ip, tramp, BRANCH_SET_LINK)) {
  485. pr_err("REL24 out of range!\n");
  486. return -EINVAL;
  487. }
  488. return 0;
  489. }
  490. #endif
  491. int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
  492. unsigned long addr)
  493. {
  494. unsigned long ip = rec->ip;
  495. unsigned int old, new;
  496. /*
  497. * If the calling address is more that 24 bits away,
  498. * then we had to use a trampoline to make the call.
  499. * Otherwise just update the call site.
  500. */
  501. if (test_24bit_addr(ip, addr) && test_24bit_addr(ip, old_addr)) {
  502. /* within range */
  503. old = ftrace_call_replace(ip, old_addr, 1);
  504. new = ftrace_call_replace(ip, addr, 1);
  505. return ftrace_modify_code(ip, old, new);
  506. }
  507. #ifdef CONFIG_MODULES
  508. /*
  509. * Out of range jumps are called from modules.
  510. */
  511. if (!rec->arch.mod) {
  512. pr_err("No module loaded\n");
  513. return -EINVAL;
  514. }
  515. return __ftrace_modify_call(rec, old_addr, addr);
  516. #else
  517. /* We should not get here without modules */
  518. return -EINVAL;
  519. #endif /* CONFIG_MODULES */
  520. }
  521. #endif
  522. int ftrace_update_ftrace_func(ftrace_func_t func)
  523. {
  524. unsigned long ip = (unsigned long)(&ftrace_call);
  525. unsigned int old, new;
  526. int ret;
  527. old = *(unsigned int *)&ftrace_call;
  528. new = ftrace_call_replace(ip, (unsigned long)func, 1);
  529. ret = ftrace_modify_code(ip, old, new);
  530. #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
  531. /* Also update the regs callback function */
  532. if (!ret) {
  533. ip = (unsigned long)(&ftrace_regs_call);
  534. old = *(unsigned int *)&ftrace_regs_call;
  535. new = ftrace_call_replace(ip, (unsigned long)func, 1);
  536. ret = ftrace_modify_code(ip, old, new);
  537. }
  538. #endif
  539. return ret;
  540. }
  541. /*
  542. * Use the default ftrace_modify_all_code, but without
  543. * stop_machine().
  544. */
  545. void arch_ftrace_update_code(int command)
  546. {
  547. ftrace_modify_all_code(command);
  548. }
  549. int __init ftrace_dyn_arch_init(void)
  550. {
  551. return 0;
  552. }
  553. #endif /* CONFIG_DYNAMIC_FTRACE */
  554. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  555. extern void ftrace_graph_call(void);
  556. extern void ftrace_graph_stub(void);
  557. int ftrace_enable_ftrace_graph_caller(void)
  558. {
  559. unsigned long ip = (unsigned long)(&ftrace_graph_call);
  560. unsigned long addr = (unsigned long)(&ftrace_graph_caller);
  561. unsigned long stub = (unsigned long)(&ftrace_graph_stub);
  562. unsigned int old, new;
  563. old = ftrace_call_replace(ip, stub, 0);
  564. new = ftrace_call_replace(ip, addr, 0);
  565. return ftrace_modify_code(ip, old, new);
  566. }
  567. int ftrace_disable_ftrace_graph_caller(void)
  568. {
  569. unsigned long ip = (unsigned long)(&ftrace_graph_call);
  570. unsigned long addr = (unsigned long)(&ftrace_graph_caller);
  571. unsigned long stub = (unsigned long)(&ftrace_graph_stub);
  572. unsigned int old, new;
  573. old = ftrace_call_replace(ip, addr, 0);
  574. new = ftrace_call_replace(ip, stub, 0);
  575. return ftrace_modify_code(ip, old, new);
  576. }
  577. /*
  578. * Hook the return address and push it in the stack of return addrs
  579. * in current thread info. Return the address we want to divert to.
  580. */
  581. unsigned long prepare_ftrace_return(unsigned long parent, unsigned long ip)
  582. {
  583. unsigned long return_hooker;
  584. if (unlikely(ftrace_graph_is_dead()))
  585. goto out;
  586. if (unlikely(atomic_read(&current->tracing_graph_pause)))
  587. goto out;
  588. return_hooker = ppc_function_entry(return_to_handler);
  589. if (!function_graph_enter(parent, ip, 0, NULL))
  590. parent = return_hooker;
  591. out:
  592. return parent;
  593. }
  594. #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
  595. #if defined(CONFIG_FTRACE_SYSCALLS) && defined(CONFIG_PPC64)
  596. unsigned long __init arch_syscall_addr(int nr)
  597. {
  598. return sys_call_table[nr*2];
  599. }
  600. #endif /* CONFIG_FTRACE_SYSCALLS && CONFIG_PPC64 */
  601. #ifdef PPC64_ELF_ABI_v1
  602. char *arch_ftrace_match_adjust(char *str, const char *search)
  603. {
  604. if (str[0] == '.' && search[0] != '.')
  605. return str + 1;
  606. else
  607. return str;
  608. }
  609. #endif /* PPC64_ELF_ABI_v1 */