unwind.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * arch/arm/kernel/unwind.c
  4. *
  5. * Copyright (C) 2008 ARM Limited
  6. *
  7. * Stack unwinding support for ARM
  8. *
  9. * An ARM EABI version of gcc is required to generate the unwind
  10. * tables. For information about the structure of the unwind tables,
  11. * see "Exception Handling ABI for the ARM Architecture" at:
  12. *
  13. * http://infocenter.arm.com/help/topic/com.arm.doc.subset.swdev.abi/index.html
  14. */
  15. #ifndef __CHECKER__
  16. #if !defined (__ARM_EABI__)
  17. #warning Your compiler does not have EABI support.
  18. #warning ARM unwind is known to compile only with EABI compilers.
  19. #warning Change compiler or disable ARM_UNWIND option.
  20. #endif
  21. #endif /* __CHECKER__ */
  22. #include <linux/kernel.h>
  23. #include <linux/init.h>
  24. #include <linux/export.h>
  25. #include <linux/sched.h>
  26. #include <linux/slab.h>
  27. #include <linux/spinlock.h>
  28. #include <linux/list.h>
  29. #include <linux/module.h>
  30. #include <asm/stacktrace.h>
  31. #include <asm/traps.h>
  32. #include <asm/unwind.h>
  33. #include "reboot.h"
  34. /* Dummy functions to avoid linker complaints */
  35. void __aeabi_unwind_cpp_pr0(void)
  36. {
  37. };
  38. EXPORT_SYMBOL(__aeabi_unwind_cpp_pr0);
  39. void __aeabi_unwind_cpp_pr1(void)
  40. {
  41. };
  42. EXPORT_SYMBOL(__aeabi_unwind_cpp_pr1);
  43. void __aeabi_unwind_cpp_pr2(void)
  44. {
  45. };
  46. EXPORT_SYMBOL(__aeabi_unwind_cpp_pr2);
  47. struct unwind_ctrl_block {
  48. unsigned long vrs[16]; /* virtual register set */
  49. const unsigned long *insn; /* pointer to the current instructions word */
  50. unsigned long sp_high; /* highest value of sp allowed */
  51. unsigned long *lr_addr; /* address of LR value on the stack */
  52. /*
  53. * 1 : check for stack overflow for each register pop.
  54. * 0 : save overhead if there is plenty of stack remaining.
  55. */
  56. int check_each_pop;
  57. int entries; /* number of entries left to interpret */
  58. int byte; /* current byte number in the instructions word */
  59. };
  60. enum regs {
  61. #ifdef CONFIG_THUMB2_KERNEL
  62. FP = 7,
  63. #else
  64. FP = 11,
  65. #endif
  66. SP = 13,
  67. LR = 14,
  68. PC = 15
  69. };
  70. extern const struct unwind_idx __start_unwind_idx[];
  71. static const struct unwind_idx *__origin_unwind_idx;
  72. extern const struct unwind_idx __stop_unwind_idx[];
  73. static DEFINE_RAW_SPINLOCK(unwind_lock);
  74. static LIST_HEAD(unwind_tables);
  75. /* Convert a prel31 symbol to an absolute address */
  76. #define prel31_to_addr(ptr) \
  77. ({ \
  78. /* sign-extend to 32 bits */ \
  79. long offset = (((long)*(ptr)) << 1) >> 1; \
  80. (unsigned long)(ptr) + offset; \
  81. })
  82. /*
  83. * Binary search in the unwind index. The entries are
  84. * guaranteed to be sorted in ascending order by the linker.
  85. *
  86. * start = first entry
  87. * origin = first entry with positive offset (or stop if there is no such entry)
  88. * stop - 1 = last entry
  89. */
  90. static const struct unwind_idx *search_index(unsigned long addr,
  91. const struct unwind_idx *start,
  92. const struct unwind_idx *origin,
  93. const struct unwind_idx *stop)
  94. {
  95. unsigned long addr_prel31;
  96. pr_debug("%s(%08lx, %p, %p, %p)\n",
  97. __func__, addr, start, origin, stop);
  98. /*
  99. * only search in the section with the matching sign. This way the
  100. * prel31 numbers can be compared as unsigned longs.
  101. */
  102. if (addr < (unsigned long)start)
  103. /* negative offsets: [start; origin) */
  104. stop = origin;
  105. else
  106. /* positive offsets: [origin; stop) */
  107. start = origin;
  108. /* prel31 for address relavive to start */
  109. addr_prel31 = (addr - (unsigned long)start) & 0x7fffffff;
  110. while (start < stop - 1) {
  111. const struct unwind_idx *mid = start + ((stop - start) >> 1);
  112. /*
  113. * As addr_prel31 is relative to start an offset is needed to
  114. * make it relative to mid.
  115. */
  116. if (addr_prel31 - ((unsigned long)mid - (unsigned long)start) <
  117. mid->addr_offset)
  118. stop = mid;
  119. else {
  120. /* keep addr_prel31 relative to start */
  121. addr_prel31 -= ((unsigned long)mid -
  122. (unsigned long)start);
  123. start = mid;
  124. }
  125. }
  126. if (likely(start->addr_offset <= addr_prel31))
  127. return start;
  128. else {
  129. pr_warn("unwind: Unknown symbol address %08lx\n", addr);
  130. return NULL;
  131. }
  132. }
  133. static const struct unwind_idx *unwind_find_origin(
  134. const struct unwind_idx *start, const struct unwind_idx *stop)
  135. {
  136. pr_debug("%s(%p, %p)\n", __func__, start, stop);
  137. while (start < stop) {
  138. const struct unwind_idx *mid = start + ((stop - start) >> 1);
  139. if (mid->addr_offset >= 0x40000000)
  140. /* negative offset */
  141. start = mid + 1;
  142. else
  143. /* positive offset */
  144. stop = mid;
  145. }
  146. pr_debug("%s -> %p\n", __func__, stop);
  147. return stop;
  148. }
  149. static const struct unwind_idx *unwind_find_idx(unsigned long addr)
  150. {
  151. const struct unwind_idx *idx = NULL;
  152. unsigned long flags;
  153. pr_debug("%s(%08lx)\n", __func__, addr);
  154. if (core_kernel_text(addr)) {
  155. if (unlikely(!__origin_unwind_idx))
  156. __origin_unwind_idx =
  157. unwind_find_origin(__start_unwind_idx,
  158. __stop_unwind_idx);
  159. /* main unwind table */
  160. idx = search_index(addr, __start_unwind_idx,
  161. __origin_unwind_idx,
  162. __stop_unwind_idx);
  163. } else {
  164. /* module unwind tables */
  165. struct unwind_table *table;
  166. raw_spin_lock_irqsave(&unwind_lock, flags);
  167. list_for_each_entry(table, &unwind_tables, list) {
  168. if (addr >= table->begin_addr &&
  169. addr < table->end_addr) {
  170. idx = search_index(addr, table->start,
  171. table->origin,
  172. table->stop);
  173. /* Move-to-front to exploit common traces */
  174. list_move(&table->list, &unwind_tables);
  175. break;
  176. }
  177. }
  178. raw_spin_unlock_irqrestore(&unwind_lock, flags);
  179. }
  180. pr_debug("%s: idx = %p\n", __func__, idx);
  181. return idx;
  182. }
  183. static unsigned long unwind_get_byte(struct unwind_ctrl_block *ctrl)
  184. {
  185. unsigned long ret;
  186. if (ctrl->entries <= 0) {
  187. pr_warn("unwind: Corrupt unwind table\n");
  188. return 0;
  189. }
  190. ret = (*ctrl->insn >> (ctrl->byte * 8)) & 0xff;
  191. if (ctrl->byte == 0) {
  192. ctrl->insn++;
  193. ctrl->entries--;
  194. ctrl->byte = 3;
  195. } else
  196. ctrl->byte--;
  197. return ret;
  198. }
  199. /* Before poping a register check whether it is feasible or not */
  200. static int unwind_pop_register(struct unwind_ctrl_block *ctrl,
  201. unsigned long **vsp, unsigned int reg)
  202. {
  203. if (unlikely(ctrl->check_each_pop))
  204. if (*vsp >= (unsigned long *)ctrl->sp_high)
  205. return -URC_FAILURE;
  206. /* Use READ_ONCE_NOCHECK here to avoid this memory access
  207. * from being tracked by KASAN.
  208. */
  209. ctrl->vrs[reg] = READ_ONCE_NOCHECK(*(*vsp));
  210. if (reg == 14)
  211. ctrl->lr_addr = *vsp;
  212. (*vsp)++;
  213. return URC_OK;
  214. }
  215. /* Helper functions to execute the instructions */
  216. static int unwind_exec_pop_subset_r4_to_r13(struct unwind_ctrl_block *ctrl,
  217. unsigned long mask)
  218. {
  219. unsigned long *vsp = (unsigned long *)ctrl->vrs[SP];
  220. int load_sp, reg = 4;
  221. load_sp = mask & (1 << (13 - 4));
  222. while (mask) {
  223. if (mask & 1)
  224. if (unwind_pop_register(ctrl, &vsp, reg))
  225. return -URC_FAILURE;
  226. mask >>= 1;
  227. reg++;
  228. }
  229. if (!load_sp) {
  230. ctrl->vrs[SP] = (unsigned long)vsp;
  231. }
  232. return URC_OK;
  233. }
  234. static int unwind_exec_pop_r4_to_rN(struct unwind_ctrl_block *ctrl,
  235. unsigned long insn)
  236. {
  237. unsigned long *vsp = (unsigned long *)ctrl->vrs[SP];
  238. int reg;
  239. /* pop R4-R[4+bbb] */
  240. for (reg = 4; reg <= 4 + (insn & 7); reg++)
  241. if (unwind_pop_register(ctrl, &vsp, reg))
  242. return -URC_FAILURE;
  243. if (insn & 0x8)
  244. if (unwind_pop_register(ctrl, &vsp, 14))
  245. return -URC_FAILURE;
  246. ctrl->vrs[SP] = (unsigned long)vsp;
  247. return URC_OK;
  248. }
  249. static int unwind_exec_pop_subset_r0_to_r3(struct unwind_ctrl_block *ctrl,
  250. unsigned long mask)
  251. {
  252. unsigned long *vsp = (unsigned long *)ctrl->vrs[SP];
  253. int reg = 0;
  254. /* pop R0-R3 according to mask */
  255. while (mask) {
  256. if (mask & 1)
  257. if (unwind_pop_register(ctrl, &vsp, reg))
  258. return -URC_FAILURE;
  259. mask >>= 1;
  260. reg++;
  261. }
  262. ctrl->vrs[SP] = (unsigned long)vsp;
  263. return URC_OK;
  264. }
  265. static unsigned long unwind_decode_uleb128(struct unwind_ctrl_block *ctrl)
  266. {
  267. unsigned long bytes = 0;
  268. unsigned long insn;
  269. unsigned long result = 0;
  270. /*
  271. * unwind_get_byte() will advance `ctrl` one instruction at a time, so
  272. * loop until we get an instruction byte where bit 7 is not set.
  273. *
  274. * Note: This decodes a maximum of 4 bytes to output 28 bits data where
  275. * max is 0xfffffff: that will cover a vsp increment of 1073742336, hence
  276. * it is sufficient for unwinding the stack.
  277. */
  278. do {
  279. insn = unwind_get_byte(ctrl);
  280. result |= (insn & 0x7f) << (bytes * 7);
  281. bytes++;
  282. } while (!!(insn & 0x80) && (bytes != sizeof(result)));
  283. return result;
  284. }
  285. /*
  286. * Execute the current unwind instruction.
  287. */
  288. static int unwind_exec_insn(struct unwind_ctrl_block *ctrl)
  289. {
  290. unsigned long insn = unwind_get_byte(ctrl);
  291. int ret = URC_OK;
  292. pr_debug("%s: insn = %08lx\n", __func__, insn);
  293. if ((insn & 0xc0) == 0x00)
  294. ctrl->vrs[SP] += ((insn & 0x3f) << 2) + 4;
  295. else if ((insn & 0xc0) == 0x40) {
  296. ctrl->vrs[SP] -= ((insn & 0x3f) << 2) + 4;
  297. } else if ((insn & 0xf0) == 0x80) {
  298. unsigned long mask;
  299. insn = (insn << 8) | unwind_get_byte(ctrl);
  300. mask = insn & 0x0fff;
  301. if (mask == 0) {
  302. pr_warn("unwind: 'Refuse to unwind' instruction %04lx\n",
  303. insn);
  304. return -URC_FAILURE;
  305. }
  306. ret = unwind_exec_pop_subset_r4_to_r13(ctrl, mask);
  307. if (ret)
  308. goto error;
  309. } else if ((insn & 0xf0) == 0x90 &&
  310. (insn & 0x0d) != 0x0d) {
  311. ctrl->vrs[SP] = ctrl->vrs[insn & 0x0f];
  312. } else if ((insn & 0xf0) == 0xa0) {
  313. ret = unwind_exec_pop_r4_to_rN(ctrl, insn);
  314. if (ret)
  315. goto error;
  316. } else if (insn == 0xb0) {
  317. if (ctrl->vrs[PC] == 0)
  318. ctrl->vrs[PC] = ctrl->vrs[LR];
  319. /* no further processing */
  320. ctrl->entries = 0;
  321. } else if (insn == 0xb1) {
  322. unsigned long mask = unwind_get_byte(ctrl);
  323. if (mask == 0 || mask & 0xf0) {
  324. pr_warn("unwind: Spare encoding %04lx\n",
  325. (insn << 8) | mask);
  326. return -URC_FAILURE;
  327. }
  328. ret = unwind_exec_pop_subset_r0_to_r3(ctrl, mask);
  329. if (ret)
  330. goto error;
  331. } else if (insn == 0xb2) {
  332. unsigned long uleb128 = unwind_decode_uleb128(ctrl);
  333. ctrl->vrs[SP] += 0x204 + (uleb128 << 2);
  334. } else {
  335. pr_warn("unwind: Unhandled instruction %02lx\n", insn);
  336. return -URC_FAILURE;
  337. }
  338. pr_debug("%s: fp = %08lx sp = %08lx lr = %08lx pc = %08lx\n", __func__,
  339. ctrl->vrs[FP], ctrl->vrs[SP], ctrl->vrs[LR], ctrl->vrs[PC]);
  340. error:
  341. return ret;
  342. }
  343. /*
  344. * Unwind a single frame starting with *sp for the symbol at *pc. It
  345. * updates the *pc and *sp with the new values.
  346. */
  347. int unwind_frame(struct stackframe *frame)
  348. {
  349. const struct unwind_idx *idx;
  350. struct unwind_ctrl_block ctrl;
  351. unsigned long sp_low;
  352. /* store the highest address on the stack to avoid crossing it*/
  353. sp_low = frame->sp;
  354. ctrl.sp_high = ALIGN(sp_low - THREAD_SIZE, THREAD_ALIGN)
  355. + THREAD_SIZE;
  356. pr_debug("%s(pc = %08lx lr = %08lx sp = %08lx)\n", __func__,
  357. frame->pc, frame->lr, frame->sp);
  358. idx = unwind_find_idx(frame->pc);
  359. if (!idx) {
  360. if (frame->pc && kernel_text_address(frame->pc)) {
  361. if (in_module_plt(frame->pc) && frame->pc != frame->lr) {
  362. /*
  363. * Quoting Ard: Veneers only set PC using a
  364. * PC+immediate LDR, and so they don't affect
  365. * the state of the stack or the register file
  366. */
  367. frame->pc = frame->lr;
  368. return URC_OK;
  369. }
  370. pr_warn("unwind: Index not found %08lx\n", frame->pc);
  371. }
  372. return -URC_FAILURE;
  373. }
  374. ctrl.vrs[FP] = frame->fp;
  375. ctrl.vrs[SP] = frame->sp;
  376. ctrl.vrs[LR] = frame->lr;
  377. ctrl.vrs[PC] = 0;
  378. if (idx->insn == 1)
  379. /* can't unwind */
  380. return -URC_FAILURE;
  381. else if (frame->pc == prel31_to_addr(&idx->addr_offset)) {
  382. /*
  383. * Unwinding is tricky when we're halfway through the prologue,
  384. * since the stack frame that the unwinder expects may not be
  385. * fully set up yet. However, one thing we do know for sure is
  386. * that if we are unwinding from the very first instruction of
  387. * a function, we are still effectively in the stack frame of
  388. * the caller, and the unwind info has no relevance yet.
  389. */
  390. if (frame->pc == frame->lr)
  391. return -URC_FAILURE;
  392. frame->pc = frame->lr;
  393. return URC_OK;
  394. } else if ((idx->insn & 0x80000000) == 0)
  395. /* prel31 to the unwind table */
  396. ctrl.insn = (unsigned long *)prel31_to_addr(&idx->insn);
  397. else if ((idx->insn & 0xff000000) == 0x80000000)
  398. /* only personality routine 0 supported in the index */
  399. ctrl.insn = &idx->insn;
  400. else {
  401. pr_warn("unwind: Unsupported personality routine %08lx in the index at %p\n",
  402. idx->insn, idx);
  403. return -URC_FAILURE;
  404. }
  405. /* check the personality routine */
  406. if ((*ctrl.insn & 0xff000000) == 0x80000000) {
  407. ctrl.byte = 2;
  408. ctrl.entries = 1;
  409. } else if ((*ctrl.insn & 0xff000000) == 0x81000000) {
  410. ctrl.byte = 1;
  411. ctrl.entries = 1 + ((*ctrl.insn & 0x00ff0000) >> 16);
  412. } else {
  413. pr_warn("unwind: Unsupported personality routine %08lx at %p\n",
  414. *ctrl.insn, ctrl.insn);
  415. return -URC_FAILURE;
  416. }
  417. ctrl.check_each_pop = 0;
  418. if (prel31_to_addr(&idx->addr_offset) == (u32)&call_with_stack) {
  419. /*
  420. * call_with_stack() is the only place where we permit SP to
  421. * jump from one stack to another, and since we know it is
  422. * guaranteed to happen, set up the SP bounds accordingly.
  423. */
  424. sp_low = frame->fp;
  425. ctrl.sp_high = ALIGN(frame->fp, THREAD_SIZE);
  426. }
  427. while (ctrl.entries > 0) {
  428. int urc;
  429. if ((ctrl.sp_high - ctrl.vrs[SP]) < sizeof(ctrl.vrs))
  430. ctrl.check_each_pop = 1;
  431. urc = unwind_exec_insn(&ctrl);
  432. if (urc < 0)
  433. return urc;
  434. if (ctrl.vrs[SP] < sp_low || ctrl.vrs[SP] > ctrl.sp_high)
  435. return -URC_FAILURE;
  436. }
  437. if (ctrl.vrs[PC] == 0)
  438. ctrl.vrs[PC] = ctrl.vrs[LR];
  439. /* check for infinite loop */
  440. if (frame->pc == ctrl.vrs[PC] && frame->sp == ctrl.vrs[SP])
  441. return -URC_FAILURE;
  442. frame->fp = ctrl.vrs[FP];
  443. frame->sp = ctrl.vrs[SP];
  444. frame->lr = ctrl.vrs[LR];
  445. frame->pc = ctrl.vrs[PC];
  446. frame->lr_addr = ctrl.lr_addr;
  447. return URC_OK;
  448. }
  449. void unwind_backtrace(struct pt_regs *regs, struct task_struct *tsk,
  450. const char *loglvl)
  451. {
  452. struct stackframe frame;
  453. printk("%sCall trace: ", loglvl);
  454. pr_debug("%s(regs = %p tsk = %p)\n", __func__, regs, tsk);
  455. if (!tsk)
  456. tsk = current;
  457. if (regs) {
  458. arm_get_current_stackframe(regs, &frame);
  459. /* PC might be corrupted, use LR in that case. */
  460. if (!kernel_text_address(regs->ARM_pc))
  461. frame.pc = regs->ARM_lr;
  462. } else if (tsk == current) {
  463. frame.fp = (unsigned long)__builtin_frame_address(0);
  464. frame.sp = current_stack_pointer;
  465. frame.lr = (unsigned long)__builtin_return_address(0);
  466. /* We are saving the stack and execution state at this
  467. * point, so we should ensure that frame.pc is within
  468. * this block of code.
  469. */
  470. here:
  471. frame.pc = (unsigned long)&&here;
  472. } else {
  473. /* task blocked in __switch_to */
  474. frame.fp = thread_saved_fp(tsk);
  475. frame.sp = thread_saved_sp(tsk);
  476. /*
  477. * The function calling __switch_to cannot be a leaf function
  478. * so LR is recovered from the stack.
  479. */
  480. frame.lr = 0;
  481. frame.pc = thread_saved_pc(tsk);
  482. }
  483. while (1) {
  484. int urc;
  485. unsigned long where = frame.pc;
  486. urc = unwind_frame(&frame);
  487. if (urc < 0)
  488. break;
  489. dump_backtrace_entry(where, frame.pc, frame.sp - 4, loglvl);
  490. }
  491. }
  492. struct unwind_table *unwind_table_add(unsigned long start, unsigned long size,
  493. unsigned long text_addr,
  494. unsigned long text_size)
  495. {
  496. unsigned long flags;
  497. struct unwind_table *tab = kmalloc(sizeof(*tab), GFP_KERNEL);
  498. pr_debug("%s(%08lx, %08lx, %08lx, %08lx)\n", __func__, start, size,
  499. text_addr, text_size);
  500. if (!tab)
  501. return tab;
  502. tab->start = (const struct unwind_idx *)start;
  503. tab->stop = (const struct unwind_idx *)(start + size);
  504. tab->origin = unwind_find_origin(tab->start, tab->stop);
  505. tab->begin_addr = text_addr;
  506. tab->end_addr = text_addr + text_size;
  507. raw_spin_lock_irqsave(&unwind_lock, flags);
  508. list_add_tail(&tab->list, &unwind_tables);
  509. raw_spin_unlock_irqrestore(&unwind_lock, flags);
  510. return tab;
  511. }
  512. void unwind_table_del(struct unwind_table *tab)
  513. {
  514. unsigned long flags;
  515. if (!tab)
  516. return;
  517. raw_spin_lock_irqsave(&unwind_lock, flags);
  518. list_del(&tab->list);
  519. raw_spin_unlock_irqrestore(&unwind_lock, flags);
  520. kfree(tab);
  521. }