trace_stack.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
  4. *
  5. */
  6. #include <linux/sched/task_stack.h>
  7. #include <linux/stacktrace.h>
  8. #include <linux/kallsyms.h>
  9. #include <linux/seq_file.h>
  10. #include <linux/spinlock.h>
  11. #include <linux/uaccess.h>
  12. #include <linux/ftrace.h>
  13. #include <linux/module.h>
  14. #include <linux/sysctl.h>
  15. #include <linux/init.h>
  16. #include <asm/setup.h>
  17. #include "trace.h"
  18. static unsigned long stack_dump_trace[STACK_TRACE_ENTRIES+1] =
  19. { [0 ... (STACK_TRACE_ENTRIES)] = ULONG_MAX };
  20. unsigned stack_trace_index[STACK_TRACE_ENTRIES];
  21. /*
  22. * Reserve one entry for the passed in ip. This will allow
  23. * us to remove most or all of the stack size overhead
  24. * added by the stack tracer itself.
  25. */
  26. struct stack_trace stack_trace_max = {
  27. .max_entries = STACK_TRACE_ENTRIES - 1,
  28. .entries = &stack_dump_trace[0],
  29. };
  30. unsigned long stack_trace_max_size;
  31. arch_spinlock_t stack_trace_max_lock =
  32. (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
  33. DEFINE_PER_CPU(int, disable_stack_tracer);
  34. static DEFINE_MUTEX(stack_sysctl_mutex);
  35. int stack_tracer_enabled;
  36. static int last_stack_tracer_enabled;
  37. void stack_trace_print(void)
  38. {
  39. long i;
  40. int size;
  41. pr_emerg(" Depth Size Location (%d entries)\n"
  42. " ----- ---- --------\n",
  43. stack_trace_max.nr_entries);
  44. for (i = 0; i < stack_trace_max.nr_entries; i++) {
  45. if (stack_dump_trace[i] == ULONG_MAX)
  46. break;
  47. if (i+1 == stack_trace_max.nr_entries ||
  48. stack_dump_trace[i+1] == ULONG_MAX)
  49. size = stack_trace_index[i];
  50. else
  51. size = stack_trace_index[i] - stack_trace_index[i+1];
  52. pr_emerg("%3ld) %8d %5d %pS\n", i, stack_trace_index[i],
  53. size, (void *)stack_dump_trace[i]);
  54. }
  55. }
  56. /*
  57. * When arch-specific code overrides this function, the following
  58. * data should be filled up, assuming stack_trace_max_lock is held to
  59. * prevent concurrent updates.
  60. * stack_trace_index[]
  61. * stack_trace_max
  62. * stack_trace_max_size
  63. */
  64. void __weak
  65. check_stack(unsigned long ip, unsigned long *stack)
  66. {
  67. unsigned long this_size, flags; unsigned long *p, *top, *start;
  68. static int tracer_frame;
  69. int frame_size = READ_ONCE(tracer_frame);
  70. int i, x;
  71. this_size = ((unsigned long)stack) & (THREAD_SIZE-1);
  72. this_size = THREAD_SIZE - this_size;
  73. /* Remove the frame of the tracer */
  74. this_size -= frame_size;
  75. if (this_size <= stack_trace_max_size)
  76. return;
  77. /* we do not handle interrupt stacks yet */
  78. if (!object_is_on_stack(stack))
  79. return;
  80. /* Can't do this from NMI context (can cause deadlocks) */
  81. if (in_nmi())
  82. return;
  83. local_irq_save(flags);
  84. arch_spin_lock(&stack_trace_max_lock);
  85. /* In case another CPU set the tracer_frame on us */
  86. if (unlikely(!frame_size))
  87. this_size -= tracer_frame;
  88. /* a race could have already updated it */
  89. if (this_size <= stack_trace_max_size)
  90. goto out;
  91. stack_trace_max_size = this_size;
  92. stack_trace_max.nr_entries = 0;
  93. stack_trace_max.skip = 3;
  94. save_stack_trace(&stack_trace_max);
  95. /* Skip over the overhead of the stack tracer itself */
  96. for (i = 0; i < stack_trace_max.nr_entries; i++) {
  97. if (stack_dump_trace[i] == ip)
  98. break;
  99. }
  100. /*
  101. * Some archs may not have the passed in ip in the dump.
  102. * If that happens, we need to show everything.
  103. */
  104. if (i == stack_trace_max.nr_entries)
  105. i = 0;
  106. /*
  107. * Now find where in the stack these are.
  108. */
  109. x = 0;
  110. start = stack;
  111. top = (unsigned long *)
  112. (((unsigned long)start & ~(THREAD_SIZE-1)) + THREAD_SIZE);
  113. /*
  114. * Loop through all the entries. One of the entries may
  115. * for some reason be missed on the stack, so we may
  116. * have to account for them. If they are all there, this
  117. * loop will only happen once. This code only takes place
  118. * on a new max, so it is far from a fast path.
  119. */
  120. while (i < stack_trace_max.nr_entries) {
  121. int found = 0;
  122. stack_trace_index[x] = this_size;
  123. p = start;
  124. for (; p < top && i < stack_trace_max.nr_entries; p++) {
  125. if (stack_dump_trace[i] == ULONG_MAX)
  126. break;
  127. /*
  128. * The READ_ONCE_NOCHECK is used to let KASAN know that
  129. * this is not a stack-out-of-bounds error.
  130. */
  131. if ((READ_ONCE_NOCHECK(*p)) == stack_dump_trace[i]) {
  132. stack_dump_trace[x] = stack_dump_trace[i++];
  133. this_size = stack_trace_index[x++] =
  134. (top - p) * sizeof(unsigned long);
  135. found = 1;
  136. /* Start the search from here */
  137. start = p + 1;
  138. /*
  139. * We do not want to show the overhead
  140. * of the stack tracer stack in the
  141. * max stack. If we haven't figured
  142. * out what that is, then figure it out
  143. * now.
  144. */
  145. if (unlikely(!tracer_frame)) {
  146. tracer_frame = (p - stack) *
  147. sizeof(unsigned long);
  148. stack_trace_max_size -= tracer_frame;
  149. }
  150. }
  151. }
  152. if (!found)
  153. i++;
  154. }
  155. stack_trace_max.nr_entries = x;
  156. for (; x < i; x++)
  157. stack_dump_trace[x] = ULONG_MAX;
  158. if (task_stack_end_corrupted(current)) {
  159. stack_trace_print();
  160. BUG();
  161. }
  162. out:
  163. arch_spin_unlock(&stack_trace_max_lock);
  164. local_irq_restore(flags);
  165. }
  166. /* Some archs may not define MCOUNT_INSN_SIZE */
  167. #ifndef MCOUNT_INSN_SIZE
  168. # define MCOUNT_INSN_SIZE 0
  169. #endif
  170. static void
  171. stack_trace_call(unsigned long ip, unsigned long parent_ip,
  172. struct ftrace_ops *op, struct pt_regs *pt_regs)
  173. {
  174. unsigned long stack;
  175. preempt_disable_notrace();
  176. /* no atomic needed, we only modify this variable by this cpu */
  177. __this_cpu_inc(disable_stack_tracer);
  178. if (__this_cpu_read(disable_stack_tracer) != 1)
  179. goto out;
  180. /* If rcu is not watching, then save stack trace can fail */
  181. if (!rcu_is_watching())
  182. goto out;
  183. ip += MCOUNT_INSN_SIZE;
  184. check_stack(ip, &stack);
  185. out:
  186. __this_cpu_dec(disable_stack_tracer);
  187. /* prevent recursion in schedule */
  188. preempt_enable_notrace();
  189. }
  190. static struct ftrace_ops trace_ops __read_mostly =
  191. {
  192. .func = stack_trace_call,
  193. .flags = FTRACE_OPS_FL_RECURSION_SAFE,
  194. };
  195. static ssize_t
  196. stack_max_size_read(struct file *filp, char __user *ubuf,
  197. size_t count, loff_t *ppos)
  198. {
  199. unsigned long *ptr = filp->private_data;
  200. char buf[64];
  201. int r;
  202. r = snprintf(buf, sizeof(buf), "%ld\n", *ptr);
  203. if (r > sizeof(buf))
  204. r = sizeof(buf);
  205. return simple_read_from_buffer(ubuf, count, ppos, buf, r);
  206. }
  207. static ssize_t
  208. stack_max_size_write(struct file *filp, const char __user *ubuf,
  209. size_t count, loff_t *ppos)
  210. {
  211. long *ptr = filp->private_data;
  212. unsigned long val, flags;
  213. int ret;
  214. ret = kstrtoul_from_user(ubuf, count, 10, &val);
  215. if (ret)
  216. return ret;
  217. local_irq_save(flags);
  218. /*
  219. * In case we trace inside arch_spin_lock() or after (NMI),
  220. * we will cause circular lock, so we also need to increase
  221. * the percpu disable_stack_tracer here.
  222. */
  223. __this_cpu_inc(disable_stack_tracer);
  224. arch_spin_lock(&stack_trace_max_lock);
  225. *ptr = val;
  226. arch_spin_unlock(&stack_trace_max_lock);
  227. __this_cpu_dec(disable_stack_tracer);
  228. local_irq_restore(flags);
  229. return count;
  230. }
  231. static const struct file_operations stack_max_size_fops = {
  232. .open = tracing_open_generic,
  233. .read = stack_max_size_read,
  234. .write = stack_max_size_write,
  235. .llseek = default_llseek,
  236. };
  237. static void *
  238. __next(struct seq_file *m, loff_t *pos)
  239. {
  240. long n = *pos - 1;
  241. if (n > stack_trace_max.nr_entries || stack_dump_trace[n] == ULONG_MAX)
  242. return NULL;
  243. m->private = (void *)n;
  244. return &m->private;
  245. }
  246. static void *
  247. t_next(struct seq_file *m, void *v, loff_t *pos)
  248. {
  249. (*pos)++;
  250. return __next(m, pos);
  251. }
  252. static void *t_start(struct seq_file *m, loff_t *pos)
  253. {
  254. local_irq_disable();
  255. __this_cpu_inc(disable_stack_tracer);
  256. arch_spin_lock(&stack_trace_max_lock);
  257. if (*pos == 0)
  258. return SEQ_START_TOKEN;
  259. return __next(m, pos);
  260. }
  261. static void t_stop(struct seq_file *m, void *p)
  262. {
  263. arch_spin_unlock(&stack_trace_max_lock);
  264. __this_cpu_dec(disable_stack_tracer);
  265. local_irq_enable();
  266. }
  267. static void trace_lookup_stack(struct seq_file *m, long i)
  268. {
  269. unsigned long addr = stack_dump_trace[i];
  270. seq_printf(m, "%pS\n", (void *)addr);
  271. }
  272. static void print_disabled(struct seq_file *m)
  273. {
  274. seq_puts(m, "#\n"
  275. "# Stack tracer disabled\n"
  276. "#\n"
  277. "# To enable the stack tracer, either add 'stacktrace' to the\n"
  278. "# kernel command line\n"
  279. "# or 'echo 1 > /proc/sys/kernel/stack_tracer_enabled'\n"
  280. "#\n");
  281. }
  282. static int t_show(struct seq_file *m, void *v)
  283. {
  284. long i;
  285. int size;
  286. if (v == SEQ_START_TOKEN) {
  287. seq_printf(m, " Depth Size Location"
  288. " (%d entries)\n"
  289. " ----- ---- --------\n",
  290. stack_trace_max.nr_entries);
  291. if (!stack_tracer_enabled && !stack_trace_max_size)
  292. print_disabled(m);
  293. return 0;
  294. }
  295. i = *(long *)v;
  296. if (i >= stack_trace_max.nr_entries ||
  297. stack_dump_trace[i] == ULONG_MAX)
  298. return 0;
  299. if (i+1 == stack_trace_max.nr_entries ||
  300. stack_dump_trace[i+1] == ULONG_MAX)
  301. size = stack_trace_index[i];
  302. else
  303. size = stack_trace_index[i] - stack_trace_index[i+1];
  304. seq_printf(m, "%3ld) %8d %5d ", i, stack_trace_index[i], size);
  305. trace_lookup_stack(m, i);
  306. return 0;
  307. }
  308. static const struct seq_operations stack_trace_seq_ops = {
  309. .start = t_start,
  310. .next = t_next,
  311. .stop = t_stop,
  312. .show = t_show,
  313. };
  314. static int stack_trace_open(struct inode *inode, struct file *file)
  315. {
  316. return seq_open(file, &stack_trace_seq_ops);
  317. }
  318. static const struct file_operations stack_trace_fops = {
  319. .open = stack_trace_open,
  320. .read = seq_read,
  321. .llseek = seq_lseek,
  322. .release = seq_release,
  323. };
  324. #ifdef CONFIG_DYNAMIC_FTRACE
  325. static int
  326. stack_trace_filter_open(struct inode *inode, struct file *file)
  327. {
  328. struct ftrace_ops *ops = inode->i_private;
  329. return ftrace_regex_open(ops, FTRACE_ITER_FILTER,
  330. inode, file);
  331. }
  332. static const struct file_operations stack_trace_filter_fops = {
  333. .open = stack_trace_filter_open,
  334. .read = seq_read,
  335. .write = ftrace_filter_write,
  336. .llseek = tracing_lseek,
  337. .release = ftrace_regex_release,
  338. };
  339. #endif /* CONFIG_DYNAMIC_FTRACE */
  340. int
  341. stack_trace_sysctl(struct ctl_table *table, int write,
  342. void __user *buffer, size_t *lenp,
  343. loff_t *ppos)
  344. {
  345. int ret;
  346. mutex_lock(&stack_sysctl_mutex);
  347. ret = proc_dointvec(table, write, buffer, lenp, ppos);
  348. if (ret || !write ||
  349. (last_stack_tracer_enabled == !!stack_tracer_enabled))
  350. goto out;
  351. last_stack_tracer_enabled = !!stack_tracer_enabled;
  352. if (stack_tracer_enabled)
  353. register_ftrace_function(&trace_ops);
  354. else
  355. unregister_ftrace_function(&trace_ops);
  356. out:
  357. mutex_unlock(&stack_sysctl_mutex);
  358. return ret;
  359. }
  360. static char stack_trace_filter_buf[COMMAND_LINE_SIZE+1] __initdata;
  361. static __init int enable_stacktrace(char *str)
  362. {
  363. if (strncmp(str, "_filter=", 8) == 0)
  364. strncpy(stack_trace_filter_buf, str+8, COMMAND_LINE_SIZE);
  365. stack_tracer_enabled = 1;
  366. last_stack_tracer_enabled = 1;
  367. return 1;
  368. }
  369. __setup("stacktrace", enable_stacktrace);
  370. static __init int stack_trace_init(void)
  371. {
  372. struct dentry *d_tracer;
  373. d_tracer = tracing_init_dentry();
  374. if (IS_ERR(d_tracer))
  375. return 0;
  376. trace_create_file("stack_max_size", 0644, d_tracer,
  377. &stack_trace_max_size, &stack_max_size_fops);
  378. trace_create_file("stack_trace", 0444, d_tracer,
  379. NULL, &stack_trace_fops);
  380. #ifdef CONFIG_DYNAMIC_FTRACE
  381. trace_create_file("stack_trace_filter", 0644, d_tracer,
  382. &trace_ops, &stack_trace_filter_fops);
  383. #endif
  384. if (stack_trace_filter_buf[0])
  385. ftrace_set_early_filter(&trace_ops, stack_trace_filter_buf, 1);
  386. if (stack_tracer_enabled)
  387. register_ftrace_function(&trace_ops);
  388. return 0;
  389. }
  390. device_initcall(stack_trace_init);