stacktrace.c 2.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Stack trace management functions
  4. *
  5. * Copyright (C) 2022 Loongson Technology Corporation Limited
  6. */
  7. #include <linux/sched.h>
  8. #include <linux/stacktrace.h>
  9. #include <linux/uaccess.h>
  10. #include <asm/stacktrace.h>
  11. #include <asm/unwind.h>
  12. void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie,
  13. struct task_struct *task, struct pt_regs *regs)
  14. {
  15. unsigned long addr;
  16. struct pt_regs dummyregs;
  17. struct unwind_state state;
  18. if (!regs) {
  19. regs = &dummyregs;
  20. if (task == current) {
  21. regs->regs[3] = (unsigned long)__builtin_frame_address(0);
  22. regs->csr_era = (unsigned long)__builtin_return_address(0);
  23. } else {
  24. regs->regs[3] = thread_saved_fp(task);
  25. regs->csr_era = thread_saved_ra(task);
  26. }
  27. regs->regs[1] = 0;
  28. regs->regs[22] = 0;
  29. }
  30. for (unwind_start(&state, task, regs);
  31. !unwind_done(&state); unwind_next_frame(&state)) {
  32. addr = unwind_get_return_address(&state);
  33. if (!addr || !consume_entry(cookie, addr))
  34. break;
  35. }
  36. }
  37. int arch_stack_walk_reliable(stack_trace_consume_fn consume_entry,
  38. void *cookie, struct task_struct *task)
  39. {
  40. unsigned long addr;
  41. struct pt_regs dummyregs;
  42. struct pt_regs *regs = &dummyregs;
  43. struct unwind_state state;
  44. if (task == current) {
  45. regs->regs[3] = (unsigned long)__builtin_frame_address(0);
  46. regs->csr_era = (unsigned long)__builtin_return_address(0);
  47. } else {
  48. regs->regs[3] = thread_saved_fp(task);
  49. regs->csr_era = thread_saved_ra(task);
  50. }
  51. regs->regs[1] = 0;
  52. regs->regs[22] = 0;
  53. for (unwind_start(&state, task, regs);
  54. !unwind_done(&state) && !unwind_error(&state); unwind_next_frame(&state)) {
  55. addr = unwind_get_return_address(&state);
  56. /*
  57. * A NULL or invalid return address probably means there's some
  58. * generated code which __kernel_text_address() doesn't know about.
  59. */
  60. if (!addr)
  61. return -EINVAL;
  62. if (!consume_entry(cookie, addr))
  63. return -EINVAL;
  64. }
  65. /* Check for stack corruption */
  66. if (unwind_error(&state))
  67. return -EINVAL;
  68. return 0;
  69. }
  70. static int
  71. copy_stack_frame(unsigned long fp, struct stack_frame *frame)
  72. {
  73. int ret = 1;
  74. unsigned long err;
  75. unsigned long __user *user_frame_tail;
  76. user_frame_tail = (unsigned long *)(fp - sizeof(struct stack_frame));
  77. if (!access_ok(user_frame_tail, sizeof(*frame)))
  78. return 0;
  79. pagefault_disable();
  80. err = (__copy_from_user_inatomic(frame, user_frame_tail, sizeof(*frame)));
  81. if (err || (unsigned long)user_frame_tail >= frame->fp)
  82. ret = 0;
  83. pagefault_enable();
  84. return ret;
  85. }
  86. void arch_stack_walk_user(stack_trace_consume_fn consume_entry, void *cookie,
  87. const struct pt_regs *regs)
  88. {
  89. unsigned long fp = regs->regs[22];
  90. while (fp && !((unsigned long)fp & 0xf)) {
  91. struct stack_frame frame;
  92. frame.fp = 0;
  93. frame.ra = 0;
  94. if (!copy_stack_frame(fp, &frame))
  95. break;
  96. if (!frame.ra)
  97. break;
  98. if (!consume_entry(cookie, frame.ra))
  99. break;
  100. fp = frame.fp;
  101. }
  102. }