process.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653
  1. /*
  2. * Copyright (C) 2015 Thomas Meyer (thomas@m3y3r.de)
  3. * Copyright (C) 2002- 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
  4. * Licensed under the GPL
  5. */
  6. #include <stdlib.h>
  7. #include <unistd.h>
  8. #include <sched.h>
  9. #include <errno.h>
  10. #include <string.h>
  11. #include <sys/mman.h>
  12. #include <sys/wait.h>
  13. #include <asm/unistd.h>
  14. #include <as-layout.h>
  15. #include <init.h>
  16. #include <kern_util.h>
  17. #include <mem.h>
  18. #include <os.h>
  19. #include <ptrace_user.h>
  20. #include <registers.h>
  21. #include <skas.h>
  22. #include <sysdep/stub.h>
  23. #include <linux/threads.h>
  24. int is_skas_winch(int pid, int fd, void *data)
  25. {
  26. return pid == getpgrp();
  27. }
  28. static int ptrace_dump_regs(int pid)
  29. {
  30. unsigned long regs[MAX_REG_NR];
  31. int i;
  32. if (ptrace(PTRACE_GETREGS, pid, 0, regs) < 0)
  33. return -errno;
  34. printk(UM_KERN_ERR "Stub registers -\n");
  35. for (i = 0; i < ARRAY_SIZE(regs); i++)
  36. printk(UM_KERN_ERR "\t%d - %lx\n", i, regs[i]);
  37. return 0;
  38. }
  39. /*
  40. * Signals that are OK to receive in the stub - we'll just continue it.
  41. * SIGWINCH will happen when UML is inside a detached screen.
  42. */
  43. #define STUB_SIG_MASK ((1 << SIGALRM) | (1 << SIGWINCH))
  44. /* Signals that the stub will finish with - anything else is an error */
  45. #define STUB_DONE_MASK (1 << SIGTRAP)
  46. void wait_stub_done(int pid)
  47. {
  48. int n, status, err;
  49. while (1) {
  50. CATCH_EINTR(n = waitpid(pid, &status, WUNTRACED | __WALL));
  51. if ((n < 0) || !WIFSTOPPED(status))
  52. goto bad_wait;
  53. if (((1 << WSTOPSIG(status)) & STUB_SIG_MASK) == 0)
  54. break;
  55. err = ptrace(PTRACE_CONT, pid, 0, 0);
  56. if (err) {
  57. printk(UM_KERN_ERR "wait_stub_done : continue failed, "
  58. "errno = %d\n", errno);
  59. fatal_sigsegv();
  60. }
  61. }
  62. if (((1 << WSTOPSIG(status)) & STUB_DONE_MASK) != 0)
  63. return;
  64. bad_wait:
  65. err = ptrace_dump_regs(pid);
  66. if (err)
  67. printk(UM_KERN_ERR "Failed to get registers from stub, "
  68. "errno = %d\n", -err);
  69. printk(UM_KERN_ERR "wait_stub_done : failed to wait for SIGTRAP, "
  70. "pid = %d, n = %d, errno = %d, status = 0x%x\n", pid, n, errno,
  71. status);
  72. fatal_sigsegv();
  73. }
  74. extern unsigned long current_stub_stack(void);
  75. static void get_skas_faultinfo(int pid, struct faultinfo *fi, unsigned long *aux_fp_regs)
  76. {
  77. int err;
  78. err = get_fp_registers(pid, aux_fp_regs);
  79. if (err < 0) {
  80. printk(UM_KERN_ERR "save_fp_registers returned %d\n",
  81. err);
  82. fatal_sigsegv();
  83. }
  84. err = ptrace(PTRACE_CONT, pid, 0, SIGSEGV);
  85. if (err) {
  86. printk(UM_KERN_ERR "Failed to continue stub, pid = %d, "
  87. "errno = %d\n", pid, errno);
  88. fatal_sigsegv();
  89. }
  90. wait_stub_done(pid);
  91. /*
  92. * faultinfo is prepared by the stub_segv_handler at start of
  93. * the stub stack page. We just have to copy it.
  94. */
  95. memcpy(fi, (void *)current_stub_stack(), sizeof(*fi));
  96. err = put_fp_registers(pid, aux_fp_regs);
  97. if (err < 0) {
  98. printk(UM_KERN_ERR "put_fp_registers returned %d\n",
  99. err);
  100. fatal_sigsegv();
  101. }
  102. }
  103. static void handle_segv(int pid, struct uml_pt_regs *regs, unsigned long *aux_fp_regs)
  104. {
  105. get_skas_faultinfo(pid, &regs->faultinfo, aux_fp_regs);
  106. segv(regs->faultinfo, 0, 1, NULL);
  107. }
  108. /*
  109. * To use the same value of using_sysemu as the caller, ask it that value
  110. * (in local_using_sysemu
  111. */
  112. static void handle_trap(int pid, struct uml_pt_regs *regs,
  113. int local_using_sysemu)
  114. {
  115. int err, status;
  116. if ((UPT_IP(regs) >= STUB_START) && (UPT_IP(regs) < STUB_END))
  117. fatal_sigsegv();
  118. if (!local_using_sysemu)
  119. {
  120. err = ptrace(PTRACE_POKEUSER, pid, PT_SYSCALL_NR_OFFSET,
  121. __NR_getpid);
  122. if (err < 0) {
  123. printk(UM_KERN_ERR "handle_trap - nullifying syscall "
  124. "failed, errno = %d\n", errno);
  125. fatal_sigsegv();
  126. }
  127. err = ptrace(PTRACE_SYSCALL, pid, 0, 0);
  128. if (err < 0) {
  129. printk(UM_KERN_ERR "handle_trap - continuing to end of "
  130. "syscall failed, errno = %d\n", errno);
  131. fatal_sigsegv();
  132. }
  133. CATCH_EINTR(err = waitpid(pid, &status, WUNTRACED | __WALL));
  134. if ((err < 0) || !WIFSTOPPED(status) ||
  135. (WSTOPSIG(status) != SIGTRAP + 0x80)) {
  136. err = ptrace_dump_regs(pid);
  137. if (err)
  138. printk(UM_KERN_ERR "Failed to get registers "
  139. "from process, errno = %d\n", -err);
  140. printk(UM_KERN_ERR "handle_trap - failed to wait at "
  141. "end of syscall, errno = %d, status = %d\n",
  142. errno, status);
  143. fatal_sigsegv();
  144. }
  145. }
  146. handle_syscall(regs);
  147. }
  148. extern char __syscall_stub_start[];
  149. /**
  150. * userspace_tramp() - userspace trampoline
  151. * @stack: pointer to the new userspace stack page, can be NULL, if? FIXME:
  152. *
  153. * The userspace trampoline is used to setup a new userspace process in start_userspace() after it was clone()'ed.
  154. * This function will run on a temporary stack page.
  155. * It ptrace()'es itself, then
  156. * Two pages are mapped into the userspace address space:
  157. * - STUB_CODE (with EXEC), which contains the skas stub code
  158. * - STUB_DATA (with R/W), which contains a data page that is used to transfer certain data between the UML userspace process and the UML kernel.
  159. * Also for the userspace process a SIGSEGV handler is installed to catch pagefaults in the userspace process.
  160. * And last the process stops itself to give control to the UML kernel for this userspace process.
  161. *
  162. * Return: Always zero, otherwise the current userspace process is ended with non null exit() call
  163. */
  164. static int userspace_tramp(void *stack)
  165. {
  166. void *addr;
  167. int fd;
  168. unsigned long long offset;
  169. ptrace(PTRACE_TRACEME, 0, 0, 0);
  170. signal(SIGTERM, SIG_DFL);
  171. signal(SIGWINCH, SIG_IGN);
  172. /*
  173. * This has a pte, but it can't be mapped in with the usual
  174. * tlb_flush mechanism because this is part of that mechanism
  175. */
  176. fd = phys_mapping(to_phys(__syscall_stub_start), &offset);
  177. addr = mmap64((void *) STUB_CODE, UM_KERN_PAGE_SIZE,
  178. PROT_EXEC, MAP_FIXED | MAP_PRIVATE, fd, offset);
  179. if (addr == MAP_FAILED) {
  180. printk(UM_KERN_ERR "mapping mmap stub at 0x%lx failed, "
  181. "errno = %d\n", STUB_CODE, errno);
  182. exit(1);
  183. }
  184. if (stack != NULL) {
  185. fd = phys_mapping(to_phys(stack), &offset);
  186. addr = mmap((void *) STUB_DATA,
  187. UM_KERN_PAGE_SIZE, PROT_READ | PROT_WRITE,
  188. MAP_FIXED | MAP_SHARED, fd, offset);
  189. if (addr == MAP_FAILED) {
  190. printk(UM_KERN_ERR "mapping segfault stack "
  191. "at 0x%lx failed, errno = %d\n",
  192. STUB_DATA, errno);
  193. exit(1);
  194. }
  195. }
  196. if (stack != NULL) {
  197. struct sigaction sa;
  198. unsigned long v = STUB_CODE +
  199. (unsigned long) stub_segv_handler -
  200. (unsigned long) __syscall_stub_start;
  201. set_sigstack((void *) STUB_DATA, UM_KERN_PAGE_SIZE);
  202. sigemptyset(&sa.sa_mask);
  203. sa.sa_flags = SA_ONSTACK | SA_NODEFER | SA_SIGINFO;
  204. sa.sa_sigaction = (void *) v;
  205. sa.sa_restorer = NULL;
  206. if (sigaction(SIGSEGV, &sa, NULL) < 0) {
  207. printk(UM_KERN_ERR "userspace_tramp - setting SIGSEGV "
  208. "handler failed - errno = %d\n", errno);
  209. exit(1);
  210. }
  211. }
  212. kill(os_getpid(), SIGSTOP);
  213. return 0;
  214. }
  215. int userspace_pid[NR_CPUS];
  216. /**
  217. * start_userspace() - prepare a new userspace process
  218. * @stub_stack: pointer to the stub stack. Can be NULL, if? FIXME:
  219. *
  220. * Setups a new temporary stack page that is used while userspace_tramp() runs
  221. * Clones the kernel process into a new userspace process, with FDs only.
  222. *
  223. * Return: When positive: the process id of the new userspace process,
  224. * when negative: an error number.
  225. * FIXME: can PIDs become negative?!
  226. */
  227. int start_userspace(unsigned long stub_stack)
  228. {
  229. void *stack;
  230. unsigned long sp;
  231. int pid, status, n, flags, err;
  232. /* setup a temporary stack page */
  233. stack = mmap(NULL, UM_KERN_PAGE_SIZE,
  234. PROT_READ | PROT_WRITE | PROT_EXEC,
  235. MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
  236. if (stack == MAP_FAILED) {
  237. err = -errno;
  238. printk(UM_KERN_ERR "start_userspace : mmap failed, "
  239. "errno = %d\n", errno);
  240. return err;
  241. }
  242. /* set stack pointer to the end of the stack page, so it can grow downwards */
  243. sp = (unsigned long) stack + UM_KERN_PAGE_SIZE - sizeof(void *);
  244. flags = CLONE_FILES | SIGCHLD;
  245. /* clone into new userspace process */
  246. pid = clone(userspace_tramp, (void *) sp, flags, (void *) stub_stack);
  247. if (pid < 0) {
  248. err = -errno;
  249. printk(UM_KERN_ERR "start_userspace : clone failed, "
  250. "errno = %d\n", errno);
  251. return err;
  252. }
  253. do {
  254. CATCH_EINTR(n = waitpid(pid, &status, WUNTRACED | __WALL));
  255. if (n < 0) {
  256. err = -errno;
  257. printk(UM_KERN_ERR "start_userspace : wait failed, "
  258. "errno = %d\n", errno);
  259. goto out_kill;
  260. }
  261. } while (WIFSTOPPED(status) && (WSTOPSIG(status) == SIGALRM));
  262. if (!WIFSTOPPED(status) || (WSTOPSIG(status) != SIGSTOP)) {
  263. err = -EINVAL;
  264. printk(UM_KERN_ERR "start_userspace : expected SIGSTOP, got "
  265. "status = %d\n", status);
  266. goto out_kill;
  267. }
  268. if (ptrace(PTRACE_OLDSETOPTIONS, pid, NULL,
  269. (void *) PTRACE_O_TRACESYSGOOD) < 0) {
  270. err = -errno;
  271. printk(UM_KERN_ERR "start_userspace : PTRACE_OLDSETOPTIONS "
  272. "failed, errno = %d\n", errno);
  273. goto out_kill;
  274. }
  275. if (munmap(stack, UM_KERN_PAGE_SIZE) < 0) {
  276. err = -errno;
  277. printk(UM_KERN_ERR "start_userspace : munmap failed, "
  278. "errno = %d\n", errno);
  279. goto out_kill;
  280. }
  281. return pid;
  282. out_kill:
  283. os_kill_ptraced_process(pid, 1);
  284. return err;
  285. }
  286. void userspace(struct uml_pt_regs *regs, unsigned long *aux_fp_regs)
  287. {
  288. int err, status, op, pid = userspace_pid[0];
  289. /* To prevent races if using_sysemu changes under us.*/
  290. int local_using_sysemu;
  291. siginfo_t si;
  292. /* Handle any immediate reschedules or signals */
  293. interrupt_end();
  294. while (1) {
  295. /*
  296. * This can legitimately fail if the process loads a
  297. * bogus value into a segment register. It will
  298. * segfault and PTRACE_GETREGS will read that value
  299. * out of the process. However, PTRACE_SETREGS will
  300. * fail. In this case, there is nothing to do but
  301. * just kill the process.
  302. */
  303. if (ptrace(PTRACE_SETREGS, pid, 0, regs->gp)) {
  304. printk(UM_KERN_ERR "userspace - ptrace set regs "
  305. "failed, errno = %d\n", errno);
  306. fatal_sigsegv();
  307. }
  308. if (put_fp_registers(pid, regs->fp)) {
  309. printk(UM_KERN_ERR "userspace - ptrace set fp regs "
  310. "failed, errno = %d\n", errno);
  311. fatal_sigsegv();
  312. }
  313. /* Now we set local_using_sysemu to be used for one loop */
  314. local_using_sysemu = get_using_sysemu();
  315. op = SELECT_PTRACE_OPERATION(local_using_sysemu,
  316. singlestepping(NULL));
  317. if (ptrace(op, pid, 0, 0)) {
  318. printk(UM_KERN_ERR "userspace - ptrace continue "
  319. "failed, op = %d, errno = %d\n", op, errno);
  320. fatal_sigsegv();
  321. }
  322. CATCH_EINTR(err = waitpid(pid, &status, WUNTRACED | __WALL));
  323. if (err < 0) {
  324. printk(UM_KERN_ERR "userspace - wait failed, "
  325. "errno = %d\n", errno);
  326. fatal_sigsegv();
  327. }
  328. regs->is_user = 1;
  329. if (ptrace(PTRACE_GETREGS, pid, 0, regs->gp)) {
  330. printk(UM_KERN_ERR "userspace - PTRACE_GETREGS failed, "
  331. "errno = %d\n", errno);
  332. fatal_sigsegv();
  333. }
  334. if (get_fp_registers(pid, regs->fp)) {
  335. printk(UM_KERN_ERR "userspace - get_fp_registers failed, "
  336. "errno = %d\n", errno);
  337. fatal_sigsegv();
  338. }
  339. UPT_SYSCALL_NR(regs) = -1; /* Assume: It's not a syscall */
  340. if (WIFSTOPPED(status)) {
  341. int sig = WSTOPSIG(status);
  342. ptrace(PTRACE_GETSIGINFO, pid, 0, (struct siginfo *)&si);
  343. switch (sig) {
  344. case SIGSEGV:
  345. if (PTRACE_FULL_FAULTINFO) {
  346. get_skas_faultinfo(pid,
  347. &regs->faultinfo, aux_fp_regs);
  348. (*sig_info[SIGSEGV])(SIGSEGV, (struct siginfo *)&si,
  349. regs);
  350. }
  351. else handle_segv(pid, regs, aux_fp_regs);
  352. break;
  353. case SIGTRAP + 0x80:
  354. handle_trap(pid, regs, local_using_sysemu);
  355. break;
  356. case SIGTRAP:
  357. relay_signal(SIGTRAP, (struct siginfo *)&si, regs);
  358. break;
  359. case SIGALRM:
  360. break;
  361. case SIGIO:
  362. case SIGILL:
  363. case SIGBUS:
  364. case SIGFPE:
  365. case SIGWINCH:
  366. block_signals();
  367. (*sig_info[sig])(sig, (struct siginfo *)&si, regs);
  368. unblock_signals();
  369. break;
  370. default:
  371. printk(UM_KERN_ERR "userspace - child stopped "
  372. "with signal %d\n", sig);
  373. fatal_sigsegv();
  374. }
  375. pid = userspace_pid[0];
  376. interrupt_end();
  377. /* Avoid -ERESTARTSYS handling in host */
  378. if (PT_SYSCALL_NR_OFFSET != PT_SYSCALL_RET_OFFSET)
  379. PT_SYSCALL_NR(regs->gp) = -1;
  380. }
  381. }
  382. }
  383. static unsigned long thread_regs[MAX_REG_NR];
  384. static unsigned long thread_fp_regs[FP_SIZE];
  385. static int __init init_thread_regs(void)
  386. {
  387. get_safe_registers(thread_regs, thread_fp_regs);
  388. /* Set parent's instruction pointer to start of clone-stub */
  389. thread_regs[REGS_IP_INDEX] = STUB_CODE +
  390. (unsigned long) stub_clone_handler -
  391. (unsigned long) __syscall_stub_start;
  392. thread_regs[REGS_SP_INDEX] = STUB_DATA + UM_KERN_PAGE_SIZE -
  393. sizeof(void *);
  394. #ifdef __SIGNAL_FRAMESIZE
  395. thread_regs[REGS_SP_INDEX] -= __SIGNAL_FRAMESIZE;
  396. #endif
  397. return 0;
  398. }
  399. __initcall(init_thread_regs);
  400. int copy_context_skas0(unsigned long new_stack, int pid)
  401. {
  402. int err;
  403. unsigned long current_stack = current_stub_stack();
  404. struct stub_data *data = (struct stub_data *) current_stack;
  405. struct stub_data *child_data = (struct stub_data *) new_stack;
  406. unsigned long long new_offset;
  407. int new_fd = phys_mapping(to_phys((void *)new_stack), &new_offset);
  408. /*
  409. * prepare offset and fd of child's stack as argument for parent's
  410. * and child's mmap2 calls
  411. */
  412. *data = ((struct stub_data) {
  413. .offset = MMAP_OFFSET(new_offset),
  414. .fd = new_fd
  415. });
  416. err = ptrace_setregs(pid, thread_regs);
  417. if (err < 0) {
  418. err = -errno;
  419. printk(UM_KERN_ERR "copy_context_skas0 : PTRACE_SETREGS "
  420. "failed, pid = %d, errno = %d\n", pid, -err);
  421. return err;
  422. }
  423. err = put_fp_registers(pid, thread_fp_regs);
  424. if (err < 0) {
  425. printk(UM_KERN_ERR "copy_context_skas0 : put_fp_registers "
  426. "failed, pid = %d, err = %d\n", pid, err);
  427. return err;
  428. }
  429. /* set a well known return code for detection of child write failure */
  430. child_data->err = 12345678;
  431. /*
  432. * Wait, until parent has finished its work: read child's pid from
  433. * parent's stack, and check, if bad result.
  434. */
  435. err = ptrace(PTRACE_CONT, pid, 0, 0);
  436. if (err) {
  437. err = -errno;
  438. printk(UM_KERN_ERR "Failed to continue new process, pid = %d, "
  439. "errno = %d\n", pid, errno);
  440. return err;
  441. }
  442. wait_stub_done(pid);
  443. pid = data->err;
  444. if (pid < 0) {
  445. printk(UM_KERN_ERR "copy_context_skas0 - stub-parent reports "
  446. "error %d\n", -pid);
  447. return pid;
  448. }
  449. /*
  450. * Wait, until child has finished too: read child's result from
  451. * child's stack and check it.
  452. */
  453. wait_stub_done(pid);
  454. if (child_data->err != STUB_DATA) {
  455. printk(UM_KERN_ERR "copy_context_skas0 - stub-child reports "
  456. "error %ld\n", child_data->err);
  457. err = child_data->err;
  458. goto out_kill;
  459. }
  460. if (ptrace(PTRACE_OLDSETOPTIONS, pid, NULL,
  461. (void *)PTRACE_O_TRACESYSGOOD) < 0) {
  462. err = -errno;
  463. printk(UM_KERN_ERR "copy_context_skas0 : PTRACE_OLDSETOPTIONS "
  464. "failed, errno = %d\n", errno);
  465. goto out_kill;
  466. }
  467. return pid;
  468. out_kill:
  469. os_kill_ptraced_process(pid, 1);
  470. return err;
  471. }
  472. void new_thread(void *stack, jmp_buf *buf, void (*handler)(void))
  473. {
  474. (*buf)[0].JB_IP = (unsigned long) handler;
  475. (*buf)[0].JB_SP = (unsigned long) stack + UM_THREAD_SIZE -
  476. sizeof(void *);
  477. }
  478. #define INIT_JMP_NEW_THREAD 0
  479. #define INIT_JMP_CALLBACK 1
  480. #define INIT_JMP_HALT 2
  481. #define INIT_JMP_REBOOT 3
  482. void switch_threads(jmp_buf *me, jmp_buf *you)
  483. {
  484. if (UML_SETJMP(me) == 0)
  485. UML_LONGJMP(you, 1);
  486. }
  487. static jmp_buf initial_jmpbuf;
  488. /* XXX Make these percpu */
  489. static void (*cb_proc)(void *arg);
  490. static void *cb_arg;
  491. static jmp_buf *cb_back;
  492. int start_idle_thread(void *stack, jmp_buf *switch_buf)
  493. {
  494. int n;
  495. set_handler(SIGWINCH);
  496. /*
  497. * Can't use UML_SETJMP or UML_LONGJMP here because they save
  498. * and restore signals, with the possible side-effect of
  499. * trying to handle any signals which came when they were
  500. * blocked, which can't be done on this stack.
  501. * Signals must be blocked when jumping back here and restored
  502. * after returning to the jumper.
  503. */
  504. n = setjmp(initial_jmpbuf);
  505. switch (n) {
  506. case INIT_JMP_NEW_THREAD:
  507. (*switch_buf)[0].JB_IP = (unsigned long) uml_finishsetup;
  508. (*switch_buf)[0].JB_SP = (unsigned long) stack +
  509. UM_THREAD_SIZE - sizeof(void *);
  510. break;
  511. case INIT_JMP_CALLBACK:
  512. (*cb_proc)(cb_arg);
  513. longjmp(*cb_back, 1);
  514. break;
  515. case INIT_JMP_HALT:
  516. kmalloc_ok = 0;
  517. return 0;
  518. case INIT_JMP_REBOOT:
  519. kmalloc_ok = 0;
  520. return 1;
  521. default:
  522. printk(UM_KERN_ERR "Bad sigsetjmp return in "
  523. "start_idle_thread - %d\n", n);
  524. fatal_sigsegv();
  525. }
  526. longjmp(*switch_buf, 1);
  527. /* unreachable */
  528. printk(UM_KERN_ERR "impossible long jump!");
  529. fatal_sigsegv();
  530. return 0;
  531. }
  532. void initial_thread_cb_skas(void (*proc)(void *), void *arg)
  533. {
  534. jmp_buf here;
  535. cb_proc = proc;
  536. cb_arg = arg;
  537. cb_back = &here;
  538. block_signals();
  539. if (UML_SETJMP(&here) == 0)
  540. UML_LONGJMP(&initial_jmpbuf, INIT_JMP_CALLBACK);
  541. unblock_signals();
  542. cb_proc = NULL;
  543. cb_arg = NULL;
  544. cb_back = NULL;
  545. }
  546. void halt_skas(void)
  547. {
  548. block_signals();
  549. UML_LONGJMP(&initial_jmpbuf, INIT_JMP_HALT);
  550. }
  551. void reboot_skas(void)
  552. {
  553. block_signals();
  554. UML_LONGJMP(&initial_jmpbuf, INIT_JMP_REBOOT);
  555. }
  556. void __switch_mm(struct mm_id *mm_idp)
  557. {
  558. userspace_pid[0] = mm_idp->u.pid;
  559. }