unwind_orc.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. #include <linux/objtool.h>
  3. #include <linux/module.h>
  4. #include <linux/sort.h>
  5. #include <asm/ptrace.h>
  6. #include <asm/stacktrace.h>
  7. #include <asm/unwind.h>
  8. #include <asm/orc_types.h>
  9. #include <asm/orc_lookup.h>
  10. #include <asm/orc_header.h>
  11. ORC_HEADER;
  12. #define orc_warn(fmt, ...) \
  13. printk_deferred_once(KERN_WARNING "WARNING: " fmt, ##__VA_ARGS__)
  14. #define orc_warn_current(args...) \
  15. ({ \
  16. static bool dumped_before; \
  17. if (state->task == current && !state->error) { \
  18. orc_warn(args); \
  19. if (unwind_debug && !dumped_before) { \
  20. dumped_before = true; \
  21. unwind_dump(state); \
  22. } \
  23. } \
  24. })
  25. extern int __start_orc_unwind_ip[];
  26. extern int __stop_orc_unwind_ip[];
  27. extern struct orc_entry __start_orc_unwind[];
  28. extern struct orc_entry __stop_orc_unwind[];
  29. static bool orc_init __ro_after_init;
  30. static bool unwind_debug __ro_after_init;
  31. static unsigned int lookup_num_blocks __ro_after_init;
  32. static int __init unwind_debug_cmdline(char *str)
  33. {
  34. unwind_debug = true;
  35. return 0;
  36. }
  37. early_param("unwind_debug", unwind_debug_cmdline);
  38. static void unwind_dump(struct unwind_state *state)
  39. {
  40. static bool dumped_before;
  41. unsigned long word, *sp;
  42. struct stack_info stack_info = {0};
  43. unsigned long visit_mask = 0;
  44. if (dumped_before)
  45. return;
  46. dumped_before = true;
  47. printk_deferred("unwind stack type:%d next_sp:%p mask:0x%lx graph_idx:%d\n",
  48. state->stack_info.type, state->stack_info.next_sp,
  49. state->stack_mask, state->graph_idx);
  50. for (sp = __builtin_frame_address(0); sp;
  51. sp = PTR_ALIGN(stack_info.next_sp, sizeof(long))) {
  52. if (get_stack_info(sp, state->task, &stack_info, &visit_mask))
  53. break;
  54. for (; sp < stack_info.end; sp++) {
  55. word = READ_ONCE_NOCHECK(*sp);
  56. printk_deferred("%0*lx: %0*lx (%pB)\n", BITS_PER_LONG/4,
  57. (unsigned long)sp, BITS_PER_LONG/4,
  58. word, (void *)word);
  59. }
  60. }
  61. }
  62. static inline unsigned long orc_ip(const int *ip)
  63. {
  64. return (unsigned long)ip + *ip;
  65. }
  66. static struct orc_entry *__orc_find(int *ip_table, struct orc_entry *u_table,
  67. unsigned int num_entries, unsigned long ip)
  68. {
  69. int *first = ip_table;
  70. int *last = ip_table + num_entries - 1;
  71. int *mid, *found = first;
  72. if (!num_entries)
  73. return NULL;
  74. /*
  75. * Do a binary range search to find the rightmost duplicate of a given
  76. * starting address. Some entries are section terminators which are
  77. * "weak" entries for ensuring there are no gaps. They should be
  78. * ignored when they conflict with a real entry.
  79. */
  80. while (first <= last) {
  81. mid = first + ((last - first) / 2);
  82. if (orc_ip(mid) <= ip) {
  83. found = mid;
  84. first = mid + 1;
  85. } else
  86. last = mid - 1;
  87. }
  88. return u_table + (found - ip_table);
  89. }
  90. #ifdef CONFIG_MODULES
  91. static struct orc_entry *orc_module_find(unsigned long ip)
  92. {
  93. struct module *mod;
  94. mod = __module_address(ip);
  95. if (!mod || !mod->arch.orc_unwind || !mod->arch.orc_unwind_ip)
  96. return NULL;
  97. return __orc_find(mod->arch.orc_unwind_ip, mod->arch.orc_unwind,
  98. mod->arch.num_orcs, ip);
  99. }
  100. #else
  101. static struct orc_entry *orc_module_find(unsigned long ip)
  102. {
  103. return NULL;
  104. }
  105. #endif
  106. #ifdef CONFIG_DYNAMIC_FTRACE
  107. static struct orc_entry *orc_find(unsigned long ip);
  108. /*
  109. * Ftrace dynamic trampolines do not have orc entries of their own.
  110. * But they are copies of the ftrace entries that are static and
  111. * defined in ftrace_*.S, which do have orc entries.
  112. *
  113. * If the unwinder comes across a ftrace trampoline, then find the
  114. * ftrace function that was used to create it, and use that ftrace
  115. * function's orc entry, as the placement of the return code in
  116. * the stack will be identical.
  117. */
  118. static struct orc_entry *orc_ftrace_find(unsigned long ip)
  119. {
  120. struct ftrace_ops *ops;
  121. unsigned long tramp_addr, offset;
  122. ops = ftrace_ops_trampoline(ip);
  123. if (!ops)
  124. return NULL;
  125. /* Set tramp_addr to the start of the code copied by the trampoline */
  126. if (ops->flags & FTRACE_OPS_FL_SAVE_REGS)
  127. tramp_addr = (unsigned long)ftrace_regs_caller;
  128. else
  129. tramp_addr = (unsigned long)ftrace_caller;
  130. /* Now place tramp_addr to the location within the trampoline ip is at */
  131. offset = ip - ops->trampoline;
  132. tramp_addr += offset;
  133. /* Prevent unlikely recursion */
  134. if (ip == tramp_addr)
  135. return NULL;
  136. return orc_find(tramp_addr);
  137. }
  138. #else
  139. static struct orc_entry *orc_ftrace_find(unsigned long ip)
  140. {
  141. return NULL;
  142. }
  143. #endif
  144. /*
  145. * If we crash with IP==0, the last successfully executed instruction
  146. * was probably an indirect function call with a NULL function pointer,
  147. * and we don't have unwind information for NULL.
  148. * This hardcoded ORC entry for IP==0 allows us to unwind from a NULL function
  149. * pointer into its parent and then continue normally from there.
  150. */
  151. static struct orc_entry null_orc_entry = {
  152. .sp_offset = sizeof(long),
  153. .sp_reg = ORC_REG_SP,
  154. .bp_reg = ORC_REG_UNDEFINED,
  155. .type = ORC_TYPE_CALL
  156. };
  157. /* Fake frame pointer entry -- used as a fallback for generated code */
  158. static struct orc_entry orc_fp_entry = {
  159. .type = ORC_TYPE_CALL,
  160. .sp_reg = ORC_REG_BP,
  161. .sp_offset = 16,
  162. .bp_reg = ORC_REG_PREV_SP,
  163. .bp_offset = -16,
  164. };
  165. static struct orc_entry *orc_find(unsigned long ip)
  166. {
  167. static struct orc_entry *orc;
  168. if (ip == 0)
  169. return &null_orc_entry;
  170. /* For non-init vmlinux addresses, use the fast lookup table: */
  171. if (ip >= LOOKUP_START_IP && ip < LOOKUP_STOP_IP) {
  172. unsigned int idx, start, stop;
  173. idx = (ip - LOOKUP_START_IP) / LOOKUP_BLOCK_SIZE;
  174. if (unlikely((idx >= lookup_num_blocks-1))) {
  175. orc_warn("WARNING: bad lookup idx: idx=%u num=%u ip=%pB\n",
  176. idx, lookup_num_blocks, (void *)ip);
  177. return NULL;
  178. }
  179. start = orc_lookup[idx];
  180. stop = orc_lookup[idx + 1] + 1;
  181. if (unlikely((__start_orc_unwind + start >= __stop_orc_unwind) ||
  182. (__start_orc_unwind + stop > __stop_orc_unwind))) {
  183. orc_warn("WARNING: bad lookup value: idx=%u num=%u start=%u stop=%u ip=%pB\n",
  184. idx, lookup_num_blocks, start, stop, (void *)ip);
  185. return NULL;
  186. }
  187. return __orc_find(__start_orc_unwind_ip + start,
  188. __start_orc_unwind + start, stop - start, ip);
  189. }
  190. /* vmlinux .init slow lookup: */
  191. if (is_kernel_inittext(ip))
  192. return __orc_find(__start_orc_unwind_ip, __start_orc_unwind,
  193. __stop_orc_unwind_ip - __start_orc_unwind_ip, ip);
  194. /* Module lookup: */
  195. orc = orc_module_find(ip);
  196. if (orc)
  197. return orc;
  198. return orc_ftrace_find(ip);
  199. }
  200. #ifdef CONFIG_MODULES
  201. static DEFINE_MUTEX(sort_mutex);
  202. static int *cur_orc_ip_table = __start_orc_unwind_ip;
  203. static struct orc_entry *cur_orc_table = __start_orc_unwind;
  204. static void orc_sort_swap(void *_a, void *_b, int size)
  205. {
  206. struct orc_entry *orc_a, *orc_b;
  207. int *a = _a, *b = _b, tmp;
  208. int delta = _b - _a;
  209. /* Swap the .orc_unwind_ip entries: */
  210. tmp = *a;
  211. *a = *b + delta;
  212. *b = tmp - delta;
  213. /* Swap the corresponding .orc_unwind entries: */
  214. orc_a = cur_orc_table + (a - cur_orc_ip_table);
  215. orc_b = cur_orc_table + (b - cur_orc_ip_table);
  216. swap(*orc_a, *orc_b);
  217. }
  218. static int orc_sort_cmp(const void *_a, const void *_b)
  219. {
  220. struct orc_entry *orc_a;
  221. const int *a = _a, *b = _b;
  222. unsigned long a_val = orc_ip(a);
  223. unsigned long b_val = orc_ip(b);
  224. if (a_val > b_val)
  225. return 1;
  226. if (a_val < b_val)
  227. return -1;
  228. /*
  229. * The "weak" section terminator entries need to always be first
  230. * to ensure the lookup code skips them in favor of real entries.
  231. * These terminator entries exist to handle any gaps created by
  232. * whitelisted .o files which didn't get objtool generation.
  233. */
  234. orc_a = cur_orc_table + (a - cur_orc_ip_table);
  235. return orc_a->type == ORC_TYPE_UNDEFINED ? -1 : 1;
  236. }
  237. void unwind_module_init(struct module *mod, void *_orc_ip, size_t orc_ip_size,
  238. void *_orc, size_t orc_size)
  239. {
  240. int *orc_ip = _orc_ip;
  241. struct orc_entry *orc = _orc;
  242. unsigned int num_entries = orc_ip_size / sizeof(int);
  243. WARN_ON_ONCE(orc_ip_size % sizeof(int) != 0 ||
  244. orc_size % sizeof(*orc) != 0 ||
  245. num_entries != orc_size / sizeof(*orc));
  246. /*
  247. * The 'cur_orc_*' globals allow the orc_sort_swap() callback to
  248. * associate an .orc_unwind_ip table entry with its corresponding
  249. * .orc_unwind entry so they can both be swapped.
  250. */
  251. mutex_lock(&sort_mutex);
  252. cur_orc_ip_table = orc_ip;
  253. cur_orc_table = orc;
  254. sort(orc_ip, num_entries, sizeof(int), orc_sort_cmp, orc_sort_swap);
  255. mutex_unlock(&sort_mutex);
  256. mod->arch.orc_unwind_ip = orc_ip;
  257. mod->arch.orc_unwind = orc;
  258. mod->arch.num_orcs = num_entries;
  259. }
  260. #endif
  261. void __init unwind_init(void)
  262. {
  263. size_t orc_ip_size = (void *)__stop_orc_unwind_ip - (void *)__start_orc_unwind_ip;
  264. size_t orc_size = (void *)__stop_orc_unwind - (void *)__start_orc_unwind;
  265. size_t num_entries = orc_ip_size / sizeof(int);
  266. struct orc_entry *orc;
  267. int i;
  268. if (!num_entries || orc_ip_size % sizeof(int) != 0 ||
  269. orc_size % sizeof(struct orc_entry) != 0 ||
  270. num_entries != orc_size / sizeof(struct orc_entry)) {
  271. orc_warn("WARNING: Bad or missing .orc_unwind table. Disabling unwinder.\n");
  272. return;
  273. }
  274. /*
  275. * Note, the orc_unwind and orc_unwind_ip tables were already
  276. * sorted at build time via the 'sorttable' tool.
  277. * It's ready for binary search straight away, no need to sort it.
  278. */
  279. /* Initialize the fast lookup table: */
  280. lookup_num_blocks = orc_lookup_end - orc_lookup;
  281. for (i = 0; i < lookup_num_blocks-1; i++) {
  282. orc = __orc_find(__start_orc_unwind_ip, __start_orc_unwind,
  283. num_entries,
  284. LOOKUP_START_IP + (LOOKUP_BLOCK_SIZE * i));
  285. if (!orc) {
  286. orc_warn("WARNING: Corrupt .orc_unwind table. Disabling unwinder.\n");
  287. return;
  288. }
  289. orc_lookup[i] = orc - __start_orc_unwind;
  290. }
  291. /* Initialize the ending block: */
  292. orc = __orc_find(__start_orc_unwind_ip, __start_orc_unwind, num_entries,
  293. LOOKUP_STOP_IP);
  294. if (!orc) {
  295. orc_warn("WARNING: Corrupt .orc_unwind table. Disabling unwinder.\n");
  296. return;
  297. }
  298. orc_lookup[lookup_num_blocks-1] = orc - __start_orc_unwind;
  299. orc_init = true;
  300. }
  301. unsigned long unwind_get_return_address(struct unwind_state *state)
  302. {
  303. if (unwind_done(state))
  304. return 0;
  305. return __kernel_text_address(state->ip) ? state->ip : 0;
  306. }
  307. EXPORT_SYMBOL_GPL(unwind_get_return_address);
  308. unsigned long *unwind_get_return_address_ptr(struct unwind_state *state)
  309. {
  310. if (unwind_done(state))
  311. return NULL;
  312. if (state->regs)
  313. return &state->regs->ip;
  314. if (state->sp)
  315. return (unsigned long *)state->sp - 1;
  316. return NULL;
  317. }
  318. static bool stack_access_ok(struct unwind_state *state, unsigned long _addr,
  319. size_t len)
  320. {
  321. struct stack_info *info = &state->stack_info;
  322. void *addr = (void *)_addr;
  323. if (on_stack(info, addr, len))
  324. return true;
  325. return !get_stack_info(addr, state->task, info, &state->stack_mask) &&
  326. on_stack(info, addr, len);
  327. }
  328. static bool deref_stack_reg(struct unwind_state *state, unsigned long addr,
  329. unsigned long *val)
  330. {
  331. if (!stack_access_ok(state, addr, sizeof(long)))
  332. return false;
  333. *val = READ_ONCE_NOCHECK(*(unsigned long *)addr);
  334. return true;
  335. }
  336. static bool deref_stack_regs(struct unwind_state *state, unsigned long addr,
  337. unsigned long *ip, unsigned long *sp)
  338. {
  339. struct pt_regs *regs = (struct pt_regs *)addr;
  340. /* x86-32 support will be more complicated due to the &regs->sp hack */
  341. BUILD_BUG_ON(IS_ENABLED(CONFIG_X86_32));
  342. if (!stack_access_ok(state, addr, sizeof(struct pt_regs)))
  343. return false;
  344. *ip = READ_ONCE_NOCHECK(regs->ip);
  345. *sp = READ_ONCE_NOCHECK(regs->sp);
  346. return true;
  347. }
  348. static bool deref_stack_iret_regs(struct unwind_state *state, unsigned long addr,
  349. unsigned long *ip, unsigned long *sp)
  350. {
  351. struct pt_regs *regs = (void *)addr - IRET_FRAME_OFFSET;
  352. if (!stack_access_ok(state, addr, IRET_FRAME_SIZE))
  353. return false;
  354. *ip = READ_ONCE_NOCHECK(regs->ip);
  355. *sp = READ_ONCE_NOCHECK(regs->sp);
  356. return true;
  357. }
  358. /*
  359. * If state->regs is non-NULL, and points to a full pt_regs, just get the reg
  360. * value from state->regs.
  361. *
  362. * Otherwise, if state->regs just points to IRET regs, and the previous frame
  363. * had full regs, it's safe to get the value from the previous regs. This can
  364. * happen when early/late IRQ entry code gets interrupted by an NMI.
  365. */
  366. static bool get_reg(struct unwind_state *state, unsigned int reg_off,
  367. unsigned long *val)
  368. {
  369. unsigned int reg = reg_off/8;
  370. if (!state->regs)
  371. return false;
  372. if (state->full_regs) {
  373. *val = READ_ONCE_NOCHECK(((unsigned long *)state->regs)[reg]);
  374. return true;
  375. }
  376. if (state->prev_regs) {
  377. *val = READ_ONCE_NOCHECK(((unsigned long *)state->prev_regs)[reg]);
  378. return true;
  379. }
  380. return false;
  381. }
  382. bool unwind_next_frame(struct unwind_state *state)
  383. {
  384. unsigned long ip_p, sp, tmp, orig_ip = state->ip, prev_sp = state->sp;
  385. enum stack_type prev_type = state->stack_info.type;
  386. struct orc_entry *orc;
  387. bool indirect = false;
  388. if (unwind_done(state))
  389. return false;
  390. /* Don't let modules unload while we're reading their ORC data. */
  391. preempt_disable();
  392. /* End-of-stack check for user tasks: */
  393. if (state->regs && user_mode(state->regs))
  394. goto the_end;
  395. /*
  396. * Find the orc_entry associated with the text address.
  397. *
  398. * For a call frame (as opposed to a signal frame), state->ip points to
  399. * the instruction after the call. That instruction's stack layout
  400. * could be different from the call instruction's layout, for example
  401. * if the call was to a noreturn function. So get the ORC data for the
  402. * call instruction itself.
  403. */
  404. orc = orc_find(state->signal ? state->ip : state->ip - 1);
  405. if (!orc) {
  406. /*
  407. * As a fallback, try to assume this code uses a frame pointer.
  408. * This is useful for generated code, like BPF, which ORC
  409. * doesn't know about. This is just a guess, so the rest of
  410. * the unwind is no longer considered reliable.
  411. */
  412. orc = &orc_fp_entry;
  413. state->error = true;
  414. } else {
  415. if (orc->type == ORC_TYPE_UNDEFINED)
  416. goto err;
  417. if (orc->type == ORC_TYPE_END_OF_STACK)
  418. goto the_end;
  419. }
  420. state->signal = orc->signal;
  421. /* Find the previous frame's stack: */
  422. switch (orc->sp_reg) {
  423. case ORC_REG_SP:
  424. sp = state->sp + orc->sp_offset;
  425. break;
  426. case ORC_REG_BP:
  427. sp = state->bp + orc->sp_offset;
  428. break;
  429. case ORC_REG_SP_INDIRECT:
  430. sp = state->sp;
  431. indirect = true;
  432. break;
  433. case ORC_REG_BP_INDIRECT:
  434. sp = state->bp + orc->sp_offset;
  435. indirect = true;
  436. break;
  437. case ORC_REG_R10:
  438. if (!get_reg(state, offsetof(struct pt_regs, r10), &sp)) {
  439. orc_warn_current("missing R10 value at %pB\n",
  440. (void *)state->ip);
  441. goto err;
  442. }
  443. break;
  444. case ORC_REG_R13:
  445. if (!get_reg(state, offsetof(struct pt_regs, r13), &sp)) {
  446. orc_warn_current("missing R13 value at %pB\n",
  447. (void *)state->ip);
  448. goto err;
  449. }
  450. break;
  451. case ORC_REG_DI:
  452. if (!get_reg(state, offsetof(struct pt_regs, di), &sp)) {
  453. orc_warn_current("missing RDI value at %pB\n",
  454. (void *)state->ip);
  455. goto err;
  456. }
  457. break;
  458. case ORC_REG_DX:
  459. if (!get_reg(state, offsetof(struct pt_regs, dx), &sp)) {
  460. orc_warn_current("missing DX value at %pB\n",
  461. (void *)state->ip);
  462. goto err;
  463. }
  464. break;
  465. default:
  466. orc_warn("unknown SP base reg %d at %pB\n",
  467. orc->sp_reg, (void *)state->ip);
  468. goto err;
  469. }
  470. if (indirect) {
  471. if (!deref_stack_reg(state, sp, &sp))
  472. goto err;
  473. if (orc->sp_reg == ORC_REG_SP_INDIRECT)
  474. sp += orc->sp_offset;
  475. }
  476. /* Find IP, SP and possibly regs: */
  477. switch (orc->type) {
  478. case ORC_TYPE_CALL:
  479. ip_p = sp - sizeof(long);
  480. if (!deref_stack_reg(state, ip_p, &state->ip))
  481. goto err;
  482. state->ip = unwind_recover_ret_addr(state, state->ip,
  483. (unsigned long *)ip_p);
  484. state->sp = sp;
  485. state->regs = NULL;
  486. state->prev_regs = NULL;
  487. break;
  488. case ORC_TYPE_REGS:
  489. if (!deref_stack_regs(state, sp, &state->ip, &state->sp)) {
  490. orc_warn_current("can't access registers at %pB\n",
  491. (void *)orig_ip);
  492. goto err;
  493. }
  494. /*
  495. * There is a small chance to interrupt at the entry of
  496. * arch_rethook_trampoline() where the ORC info doesn't exist.
  497. * That point is right after the RET to arch_rethook_trampoline()
  498. * which was modified return address.
  499. * At that point, the @addr_p of the unwind_recover_rethook()
  500. * (this has to point the address of the stack entry storing
  501. * the modified return address) must be "SP - (a stack entry)"
  502. * because SP is incremented by the RET.
  503. */
  504. state->ip = unwind_recover_rethook(state, state->ip,
  505. (unsigned long *)(state->sp - sizeof(long)));
  506. state->regs = (struct pt_regs *)sp;
  507. state->prev_regs = NULL;
  508. state->full_regs = true;
  509. break;
  510. case ORC_TYPE_REGS_PARTIAL:
  511. if (!deref_stack_iret_regs(state, sp, &state->ip, &state->sp)) {
  512. orc_warn_current("can't access iret registers at %pB\n",
  513. (void *)orig_ip);
  514. goto err;
  515. }
  516. /* See ORC_TYPE_REGS case comment. */
  517. state->ip = unwind_recover_rethook(state, state->ip,
  518. (unsigned long *)(state->sp - sizeof(long)));
  519. if (state->full_regs)
  520. state->prev_regs = state->regs;
  521. state->regs = (void *)sp - IRET_FRAME_OFFSET;
  522. state->full_regs = false;
  523. break;
  524. default:
  525. orc_warn("unknown .orc_unwind entry type %d at %pB\n",
  526. orc->type, (void *)orig_ip);
  527. goto err;
  528. }
  529. /* Find BP: */
  530. switch (orc->bp_reg) {
  531. case ORC_REG_UNDEFINED:
  532. if (get_reg(state, offsetof(struct pt_regs, bp), &tmp))
  533. state->bp = tmp;
  534. break;
  535. case ORC_REG_PREV_SP:
  536. if (!deref_stack_reg(state, sp + orc->bp_offset, &state->bp))
  537. goto err;
  538. break;
  539. case ORC_REG_BP:
  540. if (!deref_stack_reg(state, state->bp + orc->bp_offset, &state->bp))
  541. goto err;
  542. break;
  543. default:
  544. orc_warn("unknown BP base reg %d for ip %pB\n",
  545. orc->bp_reg, (void *)orig_ip);
  546. goto err;
  547. }
  548. /* Prevent a recursive loop due to bad ORC data: */
  549. if (state->stack_info.type == prev_type &&
  550. on_stack(&state->stack_info, (void *)state->sp, sizeof(long)) &&
  551. state->sp <= prev_sp) {
  552. orc_warn_current("stack going in the wrong direction? at %pB\n",
  553. (void *)orig_ip);
  554. goto err;
  555. }
  556. preempt_enable();
  557. return true;
  558. err:
  559. state->error = true;
  560. the_end:
  561. preempt_enable();
  562. state->stack_info.type = STACK_TYPE_UNKNOWN;
  563. return false;
  564. }
  565. EXPORT_SYMBOL_GPL(unwind_next_frame);
  566. void __unwind_start(struct unwind_state *state, struct task_struct *task,
  567. struct pt_regs *regs, unsigned long *first_frame)
  568. {
  569. memset(state, 0, sizeof(*state));
  570. state->task = task;
  571. if (!orc_init)
  572. goto err;
  573. /*
  574. * Refuse to unwind the stack of a task while it's executing on another
  575. * CPU. This check is racy, but that's ok: the unwinder has other
  576. * checks to prevent it from going off the rails.
  577. */
  578. if (task_on_another_cpu(task))
  579. goto err;
  580. if (regs) {
  581. if (user_mode(regs))
  582. goto the_end;
  583. state->ip = regs->ip;
  584. state->sp = regs->sp;
  585. state->bp = regs->bp;
  586. state->regs = regs;
  587. state->full_regs = true;
  588. state->signal = true;
  589. } else if (task == current) {
  590. asm volatile("lea (%%rip), %0\n\t"
  591. "mov %%rsp, %1\n\t"
  592. "mov %%rbp, %2\n\t"
  593. : "=r" (state->ip), "=r" (state->sp),
  594. "=r" (state->bp));
  595. } else {
  596. struct inactive_task_frame *frame = (void *)task->thread.sp;
  597. state->sp = task->thread.sp + sizeof(*frame);
  598. state->bp = READ_ONCE_NOCHECK(frame->bp);
  599. state->ip = READ_ONCE_NOCHECK(frame->ret_addr);
  600. state->signal = (void *)state->ip == ret_from_fork_asm;
  601. }
  602. if (get_stack_info((unsigned long *)state->sp, state->task,
  603. &state->stack_info, &state->stack_mask)) {
  604. /*
  605. * We weren't on a valid stack. It's possible that
  606. * we overflowed a valid stack into a guard page.
  607. * See if the next page up is valid so that we can
  608. * generate some kind of backtrace if this happens.
  609. */
  610. void *next_page = (void *)PAGE_ALIGN((unsigned long)state->sp);
  611. state->error = true;
  612. if (get_stack_info(next_page, state->task, &state->stack_info,
  613. &state->stack_mask))
  614. return;
  615. }
  616. /*
  617. * The caller can provide the address of the first frame directly
  618. * (first_frame) or indirectly (regs->sp) to indicate which stack frame
  619. * to start unwinding at. Skip ahead until we reach it.
  620. */
  621. /* When starting from regs, skip the regs frame: */
  622. if (regs) {
  623. unwind_next_frame(state);
  624. return;
  625. }
  626. /* Otherwise, skip ahead to the user-specified starting frame: */
  627. while (!unwind_done(state) &&
  628. (!on_stack(&state->stack_info, first_frame, sizeof(long)) ||
  629. state->sp <= (unsigned long)first_frame))
  630. unwind_next_frame(state);
  631. return;
  632. err:
  633. state->error = true;
  634. the_end:
  635. state->stack_info.type = STACK_TYPE_UNKNOWN;
  636. }
  637. EXPORT_SYMBOL_GPL(__unwind_start);