unwind_orc.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676
  1. #include <linux/module.h>
  2. #include <linux/sort.h>
  3. #include <asm/ptrace.h>
  4. #include <asm/stacktrace.h>
  5. #include <asm/unwind.h>
  6. #include <asm/orc_types.h>
  7. #include <asm/orc_lookup.h>
  8. #define orc_warn(fmt, ...) \
  9. printk_deferred_once(KERN_WARNING pr_fmt("WARNING: " fmt), ##__VA_ARGS__)
  10. extern int __start_orc_unwind_ip[];
  11. extern int __stop_orc_unwind_ip[];
  12. extern struct orc_entry __start_orc_unwind[];
  13. extern struct orc_entry __stop_orc_unwind[];
  14. static DEFINE_MUTEX(sort_mutex);
  15. int *cur_orc_ip_table = __start_orc_unwind_ip;
  16. struct orc_entry *cur_orc_table = __start_orc_unwind;
  17. unsigned int lookup_num_blocks;
  18. bool orc_init;
  19. static inline unsigned long orc_ip(const int *ip)
  20. {
  21. return (unsigned long)ip + *ip;
  22. }
  23. static struct orc_entry *__orc_find(int *ip_table, struct orc_entry *u_table,
  24. unsigned int num_entries, unsigned long ip)
  25. {
  26. int *first = ip_table;
  27. int *last = ip_table + num_entries - 1;
  28. int *mid = first, *found = first;
  29. if (!num_entries)
  30. return NULL;
  31. /*
  32. * Do a binary range search to find the rightmost duplicate of a given
  33. * starting address. Some entries are section terminators which are
  34. * "weak" entries for ensuring there are no gaps. They should be
  35. * ignored when they conflict with a real entry.
  36. */
  37. while (first <= last) {
  38. mid = first + ((last - first) / 2);
  39. if (orc_ip(mid) <= ip) {
  40. found = mid;
  41. first = mid + 1;
  42. } else
  43. last = mid - 1;
  44. }
  45. return u_table + (found - ip_table);
  46. }
  47. #ifdef CONFIG_MODULES
  48. static struct orc_entry *orc_module_find(unsigned long ip)
  49. {
  50. struct module *mod;
  51. mod = __module_address(ip);
  52. if (!mod || !mod->arch.orc_unwind || !mod->arch.orc_unwind_ip)
  53. return NULL;
  54. return __orc_find(mod->arch.orc_unwind_ip, mod->arch.orc_unwind,
  55. mod->arch.num_orcs, ip);
  56. }
  57. #else
  58. static struct orc_entry *orc_module_find(unsigned long ip)
  59. {
  60. return NULL;
  61. }
  62. #endif
  63. #ifdef CONFIG_DYNAMIC_FTRACE
  64. static struct orc_entry *orc_find(unsigned long ip);
  65. /*
  66. * Ftrace dynamic trampolines do not have orc entries of their own.
  67. * But they are copies of the ftrace entries that are static and
  68. * defined in ftrace_*.S, which do have orc entries.
  69. *
  70. * If the undwinder comes across a ftrace trampoline, then find the
  71. * ftrace function that was used to create it, and use that ftrace
  72. * function's orc entrie, as the placement of the return code in
  73. * the stack will be identical.
  74. */
  75. static struct orc_entry *orc_ftrace_find(unsigned long ip)
  76. {
  77. struct ftrace_ops *ops;
  78. unsigned long caller;
  79. ops = ftrace_ops_trampoline(ip);
  80. if (!ops)
  81. return NULL;
  82. if (ops->flags & FTRACE_OPS_FL_SAVE_REGS)
  83. caller = (unsigned long)ftrace_regs_call;
  84. else
  85. caller = (unsigned long)ftrace_call;
  86. /* Prevent unlikely recursion */
  87. if (ip == caller)
  88. return NULL;
  89. return orc_find(caller);
  90. }
  91. #else
  92. static struct orc_entry *orc_ftrace_find(unsigned long ip)
  93. {
  94. return NULL;
  95. }
  96. #endif
  97. /*
  98. * If we crash with IP==0, the last successfully executed instruction
  99. * was probably an indirect function call with a NULL function pointer,
  100. * and we don't have unwind information for NULL.
  101. * This hardcoded ORC entry for IP==0 allows us to unwind from a NULL function
  102. * pointer into its parent and then continue normally from there.
  103. */
  104. static struct orc_entry null_orc_entry = {
  105. .sp_offset = sizeof(long),
  106. .sp_reg = ORC_REG_SP,
  107. .bp_reg = ORC_REG_UNDEFINED,
  108. .type = ORC_TYPE_CALL
  109. };
  110. static struct orc_entry *orc_find(unsigned long ip)
  111. {
  112. static struct orc_entry *orc;
  113. if (ip == 0)
  114. return &null_orc_entry;
  115. /* For non-init vmlinux addresses, use the fast lookup table: */
  116. if (ip >= LOOKUP_START_IP && ip < LOOKUP_STOP_IP) {
  117. unsigned int idx, start, stop;
  118. idx = (ip - LOOKUP_START_IP) / LOOKUP_BLOCK_SIZE;
  119. if (unlikely((idx >= lookup_num_blocks-1))) {
  120. orc_warn("WARNING: bad lookup idx: idx=%u num=%u ip=%pB\n",
  121. idx, lookup_num_blocks, (void *)ip);
  122. return NULL;
  123. }
  124. start = orc_lookup[idx];
  125. stop = orc_lookup[idx + 1] + 1;
  126. if (unlikely((__start_orc_unwind + start >= __stop_orc_unwind) ||
  127. (__start_orc_unwind + stop > __stop_orc_unwind))) {
  128. orc_warn("WARNING: bad lookup value: idx=%u num=%u start=%u stop=%u ip=%pB\n",
  129. idx, lookup_num_blocks, start, stop, (void *)ip);
  130. return NULL;
  131. }
  132. return __orc_find(__start_orc_unwind_ip + start,
  133. __start_orc_unwind + start, stop - start, ip);
  134. }
  135. /* vmlinux .init slow lookup: */
  136. if (init_kernel_text(ip))
  137. return __orc_find(__start_orc_unwind_ip, __start_orc_unwind,
  138. __stop_orc_unwind_ip - __start_orc_unwind_ip, ip);
  139. /* Module lookup: */
  140. orc = orc_module_find(ip);
  141. if (orc)
  142. return orc;
  143. return orc_ftrace_find(ip);
  144. }
  145. static void orc_sort_swap(void *_a, void *_b, int size)
  146. {
  147. struct orc_entry *orc_a, *orc_b;
  148. struct orc_entry orc_tmp;
  149. int *a = _a, *b = _b, tmp;
  150. int delta = _b - _a;
  151. /* Swap the .orc_unwind_ip entries: */
  152. tmp = *a;
  153. *a = *b + delta;
  154. *b = tmp - delta;
  155. /* Swap the corresponding .orc_unwind entries: */
  156. orc_a = cur_orc_table + (a - cur_orc_ip_table);
  157. orc_b = cur_orc_table + (b - cur_orc_ip_table);
  158. orc_tmp = *orc_a;
  159. *orc_a = *orc_b;
  160. *orc_b = orc_tmp;
  161. }
  162. static int orc_sort_cmp(const void *_a, const void *_b)
  163. {
  164. struct orc_entry *orc_a;
  165. const int *a = _a, *b = _b;
  166. unsigned long a_val = orc_ip(a);
  167. unsigned long b_val = orc_ip(b);
  168. if (a_val > b_val)
  169. return 1;
  170. if (a_val < b_val)
  171. return -1;
  172. /*
  173. * The "weak" section terminator entries need to always be on the left
  174. * to ensure the lookup code skips them in favor of real entries.
  175. * These terminator entries exist to handle any gaps created by
  176. * whitelisted .o files which didn't get objtool generation.
  177. */
  178. orc_a = cur_orc_table + (a - cur_orc_ip_table);
  179. return orc_a->sp_reg == ORC_REG_UNDEFINED && !orc_a->end ? -1 : 1;
  180. }
  181. #ifdef CONFIG_MODULES
  182. void unwind_module_init(struct module *mod, void *_orc_ip, size_t orc_ip_size,
  183. void *_orc, size_t orc_size)
  184. {
  185. int *orc_ip = _orc_ip;
  186. struct orc_entry *orc = _orc;
  187. unsigned int num_entries = orc_ip_size / sizeof(int);
  188. WARN_ON_ONCE(orc_ip_size % sizeof(int) != 0 ||
  189. orc_size % sizeof(*orc) != 0 ||
  190. num_entries != orc_size / sizeof(*orc));
  191. /*
  192. * The 'cur_orc_*' globals allow the orc_sort_swap() callback to
  193. * associate an .orc_unwind_ip table entry with its corresponding
  194. * .orc_unwind entry so they can both be swapped.
  195. */
  196. mutex_lock(&sort_mutex);
  197. cur_orc_ip_table = orc_ip;
  198. cur_orc_table = orc;
  199. sort(orc_ip, num_entries, sizeof(int), orc_sort_cmp, orc_sort_swap);
  200. mutex_unlock(&sort_mutex);
  201. mod->arch.orc_unwind_ip = orc_ip;
  202. mod->arch.orc_unwind = orc;
  203. mod->arch.num_orcs = num_entries;
  204. }
  205. #endif
  206. void __init unwind_init(void)
  207. {
  208. size_t orc_ip_size = (void *)__stop_orc_unwind_ip - (void *)__start_orc_unwind_ip;
  209. size_t orc_size = (void *)__stop_orc_unwind - (void *)__start_orc_unwind;
  210. size_t num_entries = orc_ip_size / sizeof(int);
  211. struct orc_entry *orc;
  212. int i;
  213. if (!num_entries || orc_ip_size % sizeof(int) != 0 ||
  214. orc_size % sizeof(struct orc_entry) != 0 ||
  215. num_entries != orc_size / sizeof(struct orc_entry)) {
  216. orc_warn("WARNING: Bad or missing .orc_unwind table. Disabling unwinder.\n");
  217. return;
  218. }
  219. /* Sort the .orc_unwind and .orc_unwind_ip tables: */
  220. sort(__start_orc_unwind_ip, num_entries, sizeof(int), orc_sort_cmp,
  221. orc_sort_swap);
  222. /* Initialize the fast lookup table: */
  223. lookup_num_blocks = orc_lookup_end - orc_lookup;
  224. for (i = 0; i < lookup_num_blocks-1; i++) {
  225. orc = __orc_find(__start_orc_unwind_ip, __start_orc_unwind,
  226. num_entries,
  227. LOOKUP_START_IP + (LOOKUP_BLOCK_SIZE * i));
  228. if (!orc) {
  229. orc_warn("WARNING: Corrupt .orc_unwind table. Disabling unwinder.\n");
  230. return;
  231. }
  232. orc_lookup[i] = orc - __start_orc_unwind;
  233. }
  234. /* Initialize the ending block: */
  235. orc = __orc_find(__start_orc_unwind_ip, __start_orc_unwind, num_entries,
  236. LOOKUP_STOP_IP);
  237. if (!orc) {
  238. orc_warn("WARNING: Corrupt .orc_unwind table. Disabling unwinder.\n");
  239. return;
  240. }
  241. orc_lookup[lookup_num_blocks-1] = orc - __start_orc_unwind;
  242. orc_init = true;
  243. }
  244. unsigned long unwind_get_return_address(struct unwind_state *state)
  245. {
  246. if (unwind_done(state))
  247. return 0;
  248. return __kernel_text_address(state->ip) ? state->ip : 0;
  249. }
  250. EXPORT_SYMBOL_GPL(unwind_get_return_address);
  251. unsigned long *unwind_get_return_address_ptr(struct unwind_state *state)
  252. {
  253. if (unwind_done(state))
  254. return NULL;
  255. if (state->regs)
  256. return &state->regs->ip;
  257. if (state->sp)
  258. return (unsigned long *)state->sp - 1;
  259. return NULL;
  260. }
  261. static bool stack_access_ok(struct unwind_state *state, unsigned long _addr,
  262. size_t len)
  263. {
  264. struct stack_info *info = &state->stack_info;
  265. void *addr = (void *)_addr;
  266. if (!on_stack(info, addr, len) &&
  267. (get_stack_info(addr, state->task, info, &state->stack_mask)))
  268. return false;
  269. return true;
  270. }
  271. static bool deref_stack_reg(struct unwind_state *state, unsigned long addr,
  272. unsigned long *val)
  273. {
  274. if (!stack_access_ok(state, addr, sizeof(long)))
  275. return false;
  276. *val = READ_ONCE_NOCHECK(*(unsigned long *)addr);
  277. return true;
  278. }
  279. static bool deref_stack_regs(struct unwind_state *state, unsigned long addr,
  280. unsigned long *ip, unsigned long *sp)
  281. {
  282. struct pt_regs *regs = (struct pt_regs *)addr;
  283. /* x86-32 support will be more complicated due to the &regs->sp hack */
  284. BUILD_BUG_ON(IS_ENABLED(CONFIG_X86_32));
  285. if (!stack_access_ok(state, addr, sizeof(struct pt_regs)))
  286. return false;
  287. *ip = READ_ONCE_NOCHECK(regs->ip);
  288. *sp = READ_ONCE_NOCHECK(regs->sp);
  289. return true;
  290. }
  291. static bool deref_stack_iret_regs(struct unwind_state *state, unsigned long addr,
  292. unsigned long *ip, unsigned long *sp)
  293. {
  294. struct pt_regs *regs = (void *)addr - IRET_FRAME_OFFSET;
  295. if (!stack_access_ok(state, addr, IRET_FRAME_SIZE))
  296. return false;
  297. *ip = READ_ONCE_NOCHECK(regs->ip);
  298. *sp = READ_ONCE_NOCHECK(regs->sp);
  299. return true;
  300. }
  301. /*
  302. * If state->regs is non-NULL, and points to a full pt_regs, just get the reg
  303. * value from state->regs.
  304. *
  305. * Otherwise, if state->regs just points to IRET regs, and the previous frame
  306. * had full regs, it's safe to get the value from the previous regs. This can
  307. * happen when early/late IRQ entry code gets interrupted by an NMI.
  308. */
  309. static bool get_reg(struct unwind_state *state, unsigned int reg_off,
  310. unsigned long *val)
  311. {
  312. unsigned int reg = reg_off/8;
  313. if (!state->regs)
  314. return false;
  315. if (state->full_regs) {
  316. *val = READ_ONCE_NOCHECK(((unsigned long *)state->regs)[reg]);
  317. return true;
  318. }
  319. if (state->prev_regs) {
  320. *val = READ_ONCE_NOCHECK(((unsigned long *)state->prev_regs)[reg]);
  321. return true;
  322. }
  323. return false;
  324. }
  325. bool unwind_next_frame(struct unwind_state *state)
  326. {
  327. unsigned long ip_p, sp, tmp, orig_ip = state->ip, prev_sp = state->sp;
  328. enum stack_type prev_type = state->stack_info.type;
  329. struct orc_entry *orc;
  330. bool indirect = false;
  331. if (unwind_done(state))
  332. return false;
  333. /* Don't let modules unload while we're reading their ORC data. */
  334. preempt_disable();
  335. /* End-of-stack check for user tasks: */
  336. if (state->regs && user_mode(state->regs))
  337. goto the_end;
  338. /*
  339. * Find the orc_entry associated with the text address.
  340. *
  341. * For a call frame (as opposed to a signal frame), state->ip points to
  342. * the instruction after the call. That instruction's stack layout
  343. * could be different from the call instruction's layout, for example
  344. * if the call was to a noreturn function. So get the ORC data for the
  345. * call instruction itself.
  346. */
  347. orc = orc_find(state->signal ? state->ip : state->ip - 1);
  348. if (!orc)
  349. goto err;
  350. /* End-of-stack check for kernel threads: */
  351. if (orc->sp_reg == ORC_REG_UNDEFINED) {
  352. if (!orc->end)
  353. goto err;
  354. goto the_end;
  355. }
  356. /* Find the previous frame's stack: */
  357. switch (orc->sp_reg) {
  358. case ORC_REG_SP:
  359. sp = state->sp + orc->sp_offset;
  360. break;
  361. case ORC_REG_BP:
  362. sp = state->bp + orc->sp_offset;
  363. break;
  364. case ORC_REG_SP_INDIRECT:
  365. sp = state->sp + orc->sp_offset;
  366. indirect = true;
  367. break;
  368. case ORC_REG_BP_INDIRECT:
  369. sp = state->bp + orc->sp_offset;
  370. indirect = true;
  371. break;
  372. case ORC_REG_R10:
  373. if (!get_reg(state, offsetof(struct pt_regs, r10), &sp)) {
  374. orc_warn("missing regs for base reg R10 at ip %pB\n",
  375. (void *)state->ip);
  376. goto err;
  377. }
  378. break;
  379. case ORC_REG_R13:
  380. if (!get_reg(state, offsetof(struct pt_regs, r13), &sp)) {
  381. orc_warn("missing regs for base reg R13 at ip %pB\n",
  382. (void *)state->ip);
  383. goto err;
  384. }
  385. break;
  386. case ORC_REG_DI:
  387. if (!get_reg(state, offsetof(struct pt_regs, di), &sp)) {
  388. orc_warn("missing regs for base reg DI at ip %pB\n",
  389. (void *)state->ip);
  390. goto err;
  391. }
  392. break;
  393. case ORC_REG_DX:
  394. if (!get_reg(state, offsetof(struct pt_regs, dx), &sp)) {
  395. orc_warn("missing regs for base reg DX at ip %pB\n",
  396. (void *)state->ip);
  397. goto err;
  398. }
  399. break;
  400. default:
  401. orc_warn("unknown SP base reg %d for ip %pB\n",
  402. orc->sp_reg, (void *)state->ip);
  403. goto err;
  404. }
  405. if (indirect) {
  406. if (!deref_stack_reg(state, sp, &sp))
  407. goto err;
  408. }
  409. /* Find IP, SP and possibly regs: */
  410. switch (orc->type) {
  411. case ORC_TYPE_CALL:
  412. ip_p = sp - sizeof(long);
  413. if (!deref_stack_reg(state, ip_p, &state->ip))
  414. goto err;
  415. state->ip = ftrace_graph_ret_addr(state->task, &state->graph_idx,
  416. state->ip, (void *)ip_p);
  417. state->sp = sp;
  418. state->regs = NULL;
  419. state->prev_regs = NULL;
  420. state->signal = false;
  421. break;
  422. case ORC_TYPE_REGS:
  423. if (!deref_stack_regs(state, sp, &state->ip, &state->sp)) {
  424. orc_warn("can't dereference registers at %p for ip %pB\n",
  425. (void *)sp, (void *)orig_ip);
  426. goto err;
  427. }
  428. state->regs = (struct pt_regs *)sp;
  429. state->prev_regs = NULL;
  430. state->full_regs = true;
  431. state->signal = true;
  432. break;
  433. case ORC_TYPE_REGS_IRET:
  434. if (!deref_stack_iret_regs(state, sp, &state->ip, &state->sp)) {
  435. orc_warn("can't dereference iret registers at %p for ip %pB\n",
  436. (void *)sp, (void *)orig_ip);
  437. goto err;
  438. }
  439. if (state->full_regs)
  440. state->prev_regs = state->regs;
  441. state->regs = (void *)sp - IRET_FRAME_OFFSET;
  442. state->full_regs = false;
  443. state->signal = true;
  444. break;
  445. default:
  446. orc_warn("unknown .orc_unwind entry type %d for ip %pB\n",
  447. orc->type, (void *)orig_ip);
  448. goto err;
  449. }
  450. /* Find BP: */
  451. switch (orc->bp_reg) {
  452. case ORC_REG_UNDEFINED:
  453. if (get_reg(state, offsetof(struct pt_regs, bp), &tmp))
  454. state->bp = tmp;
  455. break;
  456. case ORC_REG_PREV_SP:
  457. if (!deref_stack_reg(state, sp + orc->bp_offset, &state->bp))
  458. goto err;
  459. break;
  460. case ORC_REG_BP:
  461. if (!deref_stack_reg(state, state->bp + orc->bp_offset, &state->bp))
  462. goto err;
  463. break;
  464. default:
  465. orc_warn("unknown BP base reg %d for ip %pB\n",
  466. orc->bp_reg, (void *)orig_ip);
  467. goto err;
  468. }
  469. /* Prevent a recursive loop due to bad ORC data: */
  470. if (state->stack_info.type == prev_type &&
  471. on_stack(&state->stack_info, (void *)state->sp, sizeof(long)) &&
  472. state->sp <= prev_sp) {
  473. orc_warn("stack going in the wrong direction? ip=%pB\n",
  474. (void *)orig_ip);
  475. goto err;
  476. }
  477. preempt_enable();
  478. return true;
  479. err:
  480. state->error = true;
  481. the_end:
  482. preempt_enable();
  483. state->stack_info.type = STACK_TYPE_UNKNOWN;
  484. return false;
  485. }
  486. EXPORT_SYMBOL_GPL(unwind_next_frame);
  487. void __unwind_start(struct unwind_state *state, struct task_struct *task,
  488. struct pt_regs *regs, unsigned long *first_frame)
  489. {
  490. memset(state, 0, sizeof(*state));
  491. state->task = task;
  492. if (!orc_init)
  493. goto err;
  494. /*
  495. * Refuse to unwind the stack of a task while it's executing on another
  496. * CPU. This check is racy, but that's ok: the unwinder has other
  497. * checks to prevent it from going off the rails.
  498. */
  499. if (task_on_another_cpu(task))
  500. goto err;
  501. if (regs) {
  502. if (user_mode(regs))
  503. goto the_end;
  504. state->ip = regs->ip;
  505. state->sp = kernel_stack_pointer(regs);
  506. state->bp = regs->bp;
  507. state->regs = regs;
  508. state->full_regs = true;
  509. state->signal = true;
  510. } else if (task == current) {
  511. asm volatile("lea (%%rip), %0\n\t"
  512. "mov %%rsp, %1\n\t"
  513. "mov %%rbp, %2\n\t"
  514. : "=r" (state->ip), "=r" (state->sp),
  515. "=r" (state->bp));
  516. } else {
  517. struct inactive_task_frame *frame = (void *)task->thread.sp;
  518. state->sp = task->thread.sp + sizeof(*frame);
  519. state->bp = READ_ONCE_NOCHECK(frame->bp);
  520. state->ip = READ_ONCE_NOCHECK(frame->ret_addr);
  521. state->signal = (void *)state->ip == ret_from_fork;
  522. }
  523. if (get_stack_info((unsigned long *)state->sp, state->task,
  524. &state->stack_info, &state->stack_mask)) {
  525. /*
  526. * We weren't on a valid stack. It's possible that
  527. * we overflowed a valid stack into a guard page.
  528. * See if the next page up is valid so that we can
  529. * generate some kind of backtrace if this happens.
  530. */
  531. void *next_page = (void *)PAGE_ALIGN((unsigned long)state->sp);
  532. state->error = true;
  533. if (get_stack_info(next_page, state->task, &state->stack_info,
  534. &state->stack_mask))
  535. return;
  536. }
  537. /*
  538. * The caller can provide the address of the first frame directly
  539. * (first_frame) or indirectly (regs->sp) to indicate which stack frame
  540. * to start unwinding at. Skip ahead until we reach it.
  541. */
  542. /* When starting from regs, skip the regs frame: */
  543. if (regs) {
  544. unwind_next_frame(state);
  545. return;
  546. }
  547. /* Otherwise, skip ahead to the user-specified starting frame: */
  548. while (!unwind_done(state) &&
  549. (!on_stack(&state->stack_info, first_frame, sizeof(long)) ||
  550. state->sp < (unsigned long)first_frame))
  551. unwind_next_frame(state);
  552. return;
  553. err:
  554. state->error = true;
  555. the_end:
  556. state->stack_info.type = STACK_TYPE_UNKNOWN;
  557. }
  558. EXPORT_SYMBOL_GPL(__unwind_start);