stacktrace.c 5.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Stack trace utility functions etc.
  4. *
  5. * Copyright 2008 Christoph Hellwig, IBM Corp.
  6. * Copyright 2018 SUSE Linux GmbH
  7. * Copyright 2018 Nick Piggin, Michael Ellerman, IBM Corp.
  8. */
  9. #include <linux/delay.h>
  10. #include <linux/export.h>
  11. #include <linux/kallsyms.h>
  12. #include <linux/module.h>
  13. #include <linux/nmi.h>
  14. #include <linux/sched.h>
  15. #include <linux/sched/debug.h>
  16. #include <linux/sched/task_stack.h>
  17. #include <linux/stacktrace.h>
  18. #include <asm/ptrace.h>
  19. #include <asm/processor.h>
  20. #include <linux/ftrace.h>
  21. #include <asm/kprobes.h>
  22. #include <linux/rethook.h>
  23. #include <asm/paca.h>
  24. void __no_sanitize_address arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie,
  25. struct task_struct *task, struct pt_regs *regs)
  26. {
  27. unsigned long sp;
  28. if (regs && !consume_entry(cookie, regs->nip))
  29. return;
  30. if (regs)
  31. sp = regs->gpr[1];
  32. else if (task == current)
  33. sp = current_stack_frame();
  34. else
  35. sp = task->thread.ksp;
  36. for (;;) {
  37. unsigned long *stack = (unsigned long *) sp;
  38. unsigned long newsp, ip;
  39. if (!validate_sp(sp, task))
  40. return;
  41. newsp = stack[0];
  42. ip = stack[STACK_FRAME_LR_SAVE];
  43. if (!consume_entry(cookie, ip))
  44. return;
  45. sp = newsp;
  46. }
  47. }
  48. /*
  49. * This function returns an error if it detects any unreliable features of the
  50. * stack. Otherwise it guarantees that the stack trace is reliable.
  51. *
  52. * If the task is not 'current', the caller *must* ensure the task is inactive.
  53. */
  54. int __no_sanitize_address arch_stack_walk_reliable(stack_trace_consume_fn consume_entry,
  55. void *cookie, struct task_struct *task)
  56. {
  57. unsigned long sp;
  58. unsigned long newsp;
  59. unsigned long stack_page = (unsigned long)task_stack_page(task);
  60. unsigned long stack_end;
  61. int graph_idx = 0;
  62. bool firstframe;
  63. stack_end = stack_page + THREAD_SIZE;
  64. // See copy_thread() for details.
  65. if (task->flags & PF_KTHREAD)
  66. stack_end -= STACK_FRAME_MIN_SIZE;
  67. else
  68. stack_end -= STACK_USER_INT_FRAME_SIZE;
  69. if (task == current)
  70. sp = current_stack_frame();
  71. else
  72. sp = task->thread.ksp;
  73. if (sp < stack_page + sizeof(struct thread_struct) ||
  74. sp > stack_end - STACK_FRAME_MIN_SIZE) {
  75. return -EINVAL;
  76. }
  77. for (firstframe = true; sp != stack_end;
  78. firstframe = false, sp = newsp) {
  79. unsigned long *stack = (unsigned long *) sp;
  80. unsigned long ip;
  81. /* sanity check: ABI requires SP to be aligned 16 bytes. */
  82. if (sp & 0xF)
  83. return -EINVAL;
  84. newsp = stack[0];
  85. /* Stack grows downwards; unwinder may only go up. */
  86. if (newsp <= sp)
  87. return -EINVAL;
  88. if (newsp != stack_end &&
  89. newsp > stack_end - STACK_FRAME_MIN_SIZE) {
  90. return -EINVAL; /* invalid backlink, too far up. */
  91. }
  92. /*
  93. * We can only trust the bottom frame's backlink, the
  94. * rest of the frame may be uninitialized, continue to
  95. * the next.
  96. */
  97. if (firstframe)
  98. continue;
  99. /* Mark stacktraces with exception frames as unreliable. */
  100. if (sp <= stack_end - STACK_INT_FRAME_SIZE &&
  101. stack[STACK_INT_FRAME_MARKER_LONGS] == STACK_FRAME_REGS_MARKER) {
  102. return -EINVAL;
  103. }
  104. /* Examine the saved LR: it must point into kernel code. */
  105. ip = stack[STACK_FRAME_LR_SAVE];
  106. if (!__kernel_text_address(ip))
  107. return -EINVAL;
  108. /*
  109. * FIXME: IMHO these tests do not belong in
  110. * arch-dependent code, they are generic.
  111. */
  112. ip = ftrace_graph_ret_addr(task, &graph_idx, ip, stack);
  113. /*
  114. * Mark stacktraces with kretprobed functions on them
  115. * as unreliable.
  116. */
  117. #ifdef CONFIG_RETHOOK
  118. if (ip == (unsigned long)arch_rethook_trampoline)
  119. return -EINVAL;
  120. #endif
  121. if (!consume_entry(cookie, ip))
  122. return -EINVAL;
  123. }
  124. return 0;
  125. }
  126. #if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_NMI_IPI)
  127. static void handle_backtrace_ipi(struct pt_regs *regs)
  128. {
  129. nmi_cpu_backtrace(regs);
  130. }
  131. static void raise_backtrace_ipi(cpumask_t *mask)
  132. {
  133. struct paca_struct *p;
  134. unsigned int cpu;
  135. u64 delay_us;
  136. for_each_cpu(cpu, mask) {
  137. if (cpu == smp_processor_id()) {
  138. handle_backtrace_ipi(NULL);
  139. continue;
  140. }
  141. delay_us = 5 * USEC_PER_SEC;
  142. if (smp_send_safe_nmi_ipi(cpu, handle_backtrace_ipi, delay_us)) {
  143. // Now wait up to 5s for the other CPU to do its backtrace
  144. while (cpumask_test_cpu(cpu, mask) && delay_us) {
  145. udelay(1);
  146. delay_us--;
  147. }
  148. // Other CPU cleared itself from the mask
  149. if (delay_us)
  150. continue;
  151. }
  152. p = paca_ptrs[cpu];
  153. cpumask_clear_cpu(cpu, mask);
  154. pr_warn("CPU %d didn't respond to backtrace IPI, inspecting paca.\n", cpu);
  155. if (!virt_addr_valid(p)) {
  156. pr_warn("paca pointer appears corrupt? (%px)\n", p);
  157. continue;
  158. }
  159. pr_warn("irq_soft_mask: 0x%02x in_mce: %d in_nmi: %d",
  160. p->irq_soft_mask, p->in_mce, p->in_nmi);
  161. if (virt_addr_valid(p->__current))
  162. pr_cont(" current: %d (%s)\n", p->__current->pid,
  163. p->__current->comm);
  164. else
  165. pr_cont(" current pointer corrupt? (%px)\n", p->__current);
  166. pr_warn("Back trace of paca->saved_r1 (0x%016llx) (possibly stale):\n", p->saved_r1);
  167. show_stack(p->__current, (unsigned long *)p->saved_r1, KERN_WARNING);
  168. }
  169. }
  170. void arch_trigger_cpumask_backtrace(const cpumask_t *mask, int exclude_cpu)
  171. {
  172. nmi_trigger_cpumask_backtrace(mask, exclude_cpu, raise_backtrace_ipi);
  173. }
  174. #endif /* defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_NMI_IPI) */