common.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432
  1. /*
  2. * common.c - C code for kernel entry and exit
  3. * Copyright (c) 2015 Andrew Lutomirski
  4. * GPL v2
  5. *
  6. * Based on asm and ptrace code by many authors. The code here originated
  7. * in ptrace.c and signal.c.
  8. */
  9. #include <linux/kernel.h>
  10. #include <linux/sched.h>
  11. #include <linux/sched/task_stack.h>
  12. #include <linux/mm.h>
  13. #include <linux/smp.h>
  14. #include <linux/errno.h>
  15. #include <linux/ptrace.h>
  16. #include <linux/tracehook.h>
  17. #include <linux/audit.h>
  18. #include <linux/seccomp.h>
  19. #include <linux/signal.h>
  20. #include <linux/export.h>
  21. #include <linux/context_tracking.h>
  22. #include <linux/user-return-notifier.h>
  23. #include <linux/nospec.h>
  24. #include <linux/uprobes.h>
  25. #include <linux/livepatch.h>
  26. #include <linux/syscalls.h>
  27. #include <asm/desc.h>
  28. #include <asm/traps.h>
  29. #include <asm/vdso.h>
  30. #include <linux/uaccess.h>
  31. #include <asm/cpufeature.h>
  32. #include <asm/nospec-branch.h>
  33. #define CREATE_TRACE_POINTS
  34. #include <trace/events/syscalls.h>
  35. #ifdef CONFIG_CONTEXT_TRACKING
  36. /* Called on entry from user mode with IRQs off. */
  37. __visible inline void enter_from_user_mode(void)
  38. {
  39. CT_WARN_ON(ct_state() != CONTEXT_USER);
  40. user_exit_irqoff();
  41. }
  42. #else
  43. static inline void enter_from_user_mode(void) {}
  44. #endif
  45. static void do_audit_syscall_entry(struct pt_regs *regs, u32 arch)
  46. {
  47. #ifdef CONFIG_X86_64
  48. if (arch == AUDIT_ARCH_X86_64) {
  49. audit_syscall_entry(regs->orig_ax, regs->di,
  50. regs->si, regs->dx, regs->r10);
  51. } else
  52. #endif
  53. {
  54. audit_syscall_entry(regs->orig_ax, regs->bx,
  55. regs->cx, regs->dx, regs->si);
  56. }
  57. }
  58. /*
  59. * Returns the syscall nr to run (which should match regs->orig_ax) or -1
  60. * to skip the syscall.
  61. */
  62. static long syscall_trace_enter(struct pt_regs *regs)
  63. {
  64. u32 arch = in_ia32_syscall() ? AUDIT_ARCH_I386 : AUDIT_ARCH_X86_64;
  65. struct thread_info *ti = current_thread_info();
  66. unsigned long ret = 0;
  67. bool emulated = false;
  68. u32 work;
  69. if (IS_ENABLED(CONFIG_DEBUG_ENTRY))
  70. BUG_ON(regs != task_pt_regs(current));
  71. work = READ_ONCE(ti->flags) & _TIF_WORK_SYSCALL_ENTRY;
  72. if (unlikely(work & _TIF_SYSCALL_EMU))
  73. emulated = true;
  74. if ((emulated || (work & _TIF_SYSCALL_TRACE)) &&
  75. tracehook_report_syscall_entry(regs))
  76. return -1L;
  77. if (emulated)
  78. return -1L;
  79. #ifdef CONFIG_SECCOMP
  80. /*
  81. * Do seccomp after ptrace, to catch any tracer changes.
  82. */
  83. if (work & _TIF_SECCOMP) {
  84. struct seccomp_data sd;
  85. sd.arch = arch;
  86. sd.nr = regs->orig_ax;
  87. sd.instruction_pointer = regs->ip;
  88. #ifdef CONFIG_X86_64
  89. if (arch == AUDIT_ARCH_X86_64) {
  90. sd.args[0] = regs->di;
  91. sd.args[1] = regs->si;
  92. sd.args[2] = regs->dx;
  93. sd.args[3] = regs->r10;
  94. sd.args[4] = regs->r8;
  95. sd.args[5] = regs->r9;
  96. } else
  97. #endif
  98. {
  99. sd.args[0] = regs->bx;
  100. sd.args[1] = regs->cx;
  101. sd.args[2] = regs->dx;
  102. sd.args[3] = regs->si;
  103. sd.args[4] = regs->di;
  104. sd.args[5] = regs->bp;
  105. }
  106. ret = __secure_computing(&sd);
  107. if (ret == -1)
  108. return ret;
  109. }
  110. #endif
  111. if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
  112. trace_sys_enter(regs, regs->orig_ax);
  113. do_audit_syscall_entry(regs, arch);
  114. return ret ?: regs->orig_ax;
  115. }
  116. #define EXIT_TO_USERMODE_LOOP_FLAGS \
  117. (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_UPROBE | \
  118. _TIF_NEED_RESCHED | _TIF_USER_RETURN_NOTIFY | _TIF_PATCH_PENDING)
  119. static void exit_to_usermode_loop(struct pt_regs *regs, u32 cached_flags)
  120. {
  121. /*
  122. * In order to return to user mode, we need to have IRQs off with
  123. * none of EXIT_TO_USERMODE_LOOP_FLAGS set. Several of these flags
  124. * can be set at any time on preemptable kernels if we have IRQs on,
  125. * so we need to loop. Disabling preemption wouldn't help: doing the
  126. * work to clear some of the flags can sleep.
  127. */
  128. while (true) {
  129. /* We have work to do. */
  130. local_irq_enable();
  131. if (cached_flags & _TIF_NEED_RESCHED)
  132. schedule();
  133. if (cached_flags & _TIF_UPROBE)
  134. uprobe_notify_resume(regs);
  135. if (cached_flags & _TIF_PATCH_PENDING)
  136. klp_update_patch_state(current);
  137. /* deal with pending signal delivery */
  138. if (cached_flags & _TIF_SIGPENDING)
  139. do_signal(regs);
  140. if (cached_flags & _TIF_NOTIFY_RESUME) {
  141. clear_thread_flag(TIF_NOTIFY_RESUME);
  142. tracehook_notify_resume(regs);
  143. rseq_handle_notify_resume(NULL, regs);
  144. }
  145. if (cached_flags & _TIF_USER_RETURN_NOTIFY)
  146. fire_user_return_notifiers();
  147. /* Disable IRQs and retry */
  148. local_irq_disable();
  149. cached_flags = READ_ONCE(current_thread_info()->flags);
  150. if (!(cached_flags & EXIT_TO_USERMODE_LOOP_FLAGS))
  151. break;
  152. }
  153. }
  154. /* Called with IRQs disabled. */
  155. __visible inline void prepare_exit_to_usermode(struct pt_regs *regs)
  156. {
  157. struct thread_info *ti = current_thread_info();
  158. u32 cached_flags;
  159. addr_limit_user_check();
  160. lockdep_assert_irqs_disabled();
  161. lockdep_sys_exit();
  162. cached_flags = READ_ONCE(ti->flags);
  163. if (unlikely(cached_flags & EXIT_TO_USERMODE_LOOP_FLAGS))
  164. exit_to_usermode_loop(regs, cached_flags);
  165. #ifdef CONFIG_COMPAT
  166. /*
  167. * Compat syscalls set TS_COMPAT. Make sure we clear it before
  168. * returning to user mode. We need to clear it *after* signal
  169. * handling, because syscall restart has a fixup for compat
  170. * syscalls. The fixup is exercised by the ptrace_syscall_32
  171. * selftest.
  172. *
  173. * We also need to clear TS_REGS_POKED_I386: the 32-bit tracer
  174. * special case only applies after poking regs and before the
  175. * very next return to user mode.
  176. */
  177. ti->status &= ~(TS_COMPAT|TS_I386_REGS_POKED);
  178. #endif
  179. user_enter_irqoff();
  180. mds_user_clear_cpu_buffers();
  181. }
  182. #define SYSCALL_EXIT_WORK_FLAGS \
  183. (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
  184. _TIF_SINGLESTEP | _TIF_SYSCALL_TRACEPOINT)
  185. static void syscall_slow_exit_work(struct pt_regs *regs, u32 cached_flags)
  186. {
  187. bool step;
  188. audit_syscall_exit(regs);
  189. if (cached_flags & _TIF_SYSCALL_TRACEPOINT)
  190. trace_sys_exit(regs, regs->ax);
  191. /*
  192. * If TIF_SYSCALL_EMU is set, we only get here because of
  193. * TIF_SINGLESTEP (i.e. this is PTRACE_SYSEMU_SINGLESTEP).
  194. * We already reported this syscall instruction in
  195. * syscall_trace_enter().
  196. */
  197. step = unlikely(
  198. (cached_flags & (_TIF_SINGLESTEP | _TIF_SYSCALL_EMU))
  199. == _TIF_SINGLESTEP);
  200. if (step || cached_flags & _TIF_SYSCALL_TRACE)
  201. tracehook_report_syscall_exit(regs, step);
  202. }
  203. /*
  204. * Called with IRQs on and fully valid regs. Returns with IRQs off in a
  205. * state such that we can immediately switch to user mode.
  206. */
  207. __visible inline void syscall_return_slowpath(struct pt_regs *regs)
  208. {
  209. struct thread_info *ti = current_thread_info();
  210. u32 cached_flags = READ_ONCE(ti->flags);
  211. CT_WARN_ON(ct_state() != CONTEXT_KERNEL);
  212. if (IS_ENABLED(CONFIG_PROVE_LOCKING) &&
  213. WARN(irqs_disabled(), "syscall %ld left IRQs disabled", regs->orig_ax))
  214. local_irq_enable();
  215. rseq_syscall(regs);
  216. /*
  217. * First do one-time work. If these work items are enabled, we
  218. * want to run them exactly once per syscall exit with IRQs on.
  219. */
  220. if (unlikely(cached_flags & SYSCALL_EXIT_WORK_FLAGS))
  221. syscall_slow_exit_work(regs, cached_flags);
  222. local_irq_disable();
  223. prepare_exit_to_usermode(regs);
  224. }
  225. #ifdef CONFIG_X86_64
  226. __visible void do_syscall_64(unsigned long nr, struct pt_regs *regs)
  227. {
  228. struct thread_info *ti;
  229. enter_from_user_mode();
  230. local_irq_enable();
  231. ti = current_thread_info();
  232. if (READ_ONCE(ti->flags) & _TIF_WORK_SYSCALL_ENTRY)
  233. nr = syscall_trace_enter(regs);
  234. /*
  235. * NB: Native and x32 syscalls are dispatched from the same
  236. * table. The only functional difference is the x32 bit in
  237. * regs->orig_ax, which changes the behavior of some syscalls.
  238. */
  239. nr &= __SYSCALL_MASK;
  240. if (likely(nr < NR_syscalls)) {
  241. nr = array_index_nospec(nr, NR_syscalls);
  242. regs->ax = sys_call_table[nr](regs);
  243. }
  244. syscall_return_slowpath(regs);
  245. }
  246. #endif
  247. #if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION)
  248. /*
  249. * Does a 32-bit syscall. Called with IRQs on in CONTEXT_KERNEL. Does
  250. * all entry and exit work and returns with IRQs off. This function is
  251. * extremely hot in workloads that use it, and it's usually called from
  252. * do_fast_syscall_32, so forcibly inline it to improve performance.
  253. */
  254. static __always_inline void do_syscall_32_irqs_on(struct pt_regs *regs)
  255. {
  256. struct thread_info *ti = current_thread_info();
  257. unsigned int nr = (unsigned int)regs->orig_ax;
  258. #ifdef CONFIG_IA32_EMULATION
  259. ti->status |= TS_COMPAT;
  260. #endif
  261. if (READ_ONCE(ti->flags) & _TIF_WORK_SYSCALL_ENTRY) {
  262. /*
  263. * Subtlety here: if ptrace pokes something larger than
  264. * 2^32-1 into orig_ax, this truncates it. This may or
  265. * may not be necessary, but it matches the old asm
  266. * behavior.
  267. */
  268. nr = syscall_trace_enter(regs);
  269. }
  270. if (likely(nr < IA32_NR_syscalls)) {
  271. nr = array_index_nospec(nr, IA32_NR_syscalls);
  272. #ifdef CONFIG_IA32_EMULATION
  273. regs->ax = ia32_sys_call_table[nr](regs);
  274. #else
  275. /*
  276. * It's possible that a 32-bit syscall implementation
  277. * takes a 64-bit parameter but nonetheless assumes that
  278. * the high bits are zero. Make sure we zero-extend all
  279. * of the args.
  280. */
  281. regs->ax = ia32_sys_call_table[nr](
  282. (unsigned int)regs->bx, (unsigned int)regs->cx,
  283. (unsigned int)regs->dx, (unsigned int)regs->si,
  284. (unsigned int)regs->di, (unsigned int)regs->bp);
  285. #endif /* CONFIG_IA32_EMULATION */
  286. }
  287. syscall_return_slowpath(regs);
  288. }
  289. /* Handles int $0x80 */
  290. __visible void do_int80_syscall_32(struct pt_regs *regs)
  291. {
  292. enter_from_user_mode();
  293. local_irq_enable();
  294. do_syscall_32_irqs_on(regs);
  295. }
  296. /* Returns 0 to return using IRET or 1 to return using SYSEXIT/SYSRETL. */
  297. __visible long do_fast_syscall_32(struct pt_regs *regs)
  298. {
  299. /*
  300. * Called using the internal vDSO SYSENTER/SYSCALL32 calling
  301. * convention. Adjust regs so it looks like we entered using int80.
  302. */
  303. unsigned long landing_pad = (unsigned long)current->mm->context.vdso +
  304. vdso_image_32.sym_int80_landing_pad;
  305. /*
  306. * SYSENTER loses EIP, and even SYSCALL32 needs us to skip forward
  307. * so that 'regs->ip -= 2' lands back on an int $0x80 instruction.
  308. * Fix it up.
  309. */
  310. regs->ip = landing_pad;
  311. enter_from_user_mode();
  312. local_irq_enable();
  313. /* Fetch EBP from where the vDSO stashed it. */
  314. if (
  315. #ifdef CONFIG_X86_64
  316. /*
  317. * Micro-optimization: the pointer we're following is explicitly
  318. * 32 bits, so it can't be out of range.
  319. */
  320. __get_user(*(u32 *)&regs->bp,
  321. (u32 __user __force *)(unsigned long)(u32)regs->sp)
  322. #else
  323. get_user(*(u32 *)&regs->bp,
  324. (u32 __user __force *)(unsigned long)(u32)regs->sp)
  325. #endif
  326. ) {
  327. /* User code screwed up. */
  328. local_irq_disable();
  329. regs->ax = -EFAULT;
  330. prepare_exit_to_usermode(regs);
  331. return 0; /* Keep it simple: use IRET. */
  332. }
  333. /* Now this is just like a normal syscall. */
  334. do_syscall_32_irqs_on(regs);
  335. #ifdef CONFIG_X86_64
  336. /*
  337. * Opportunistic SYSRETL: if possible, try to return using SYSRETL.
  338. * SYSRETL is available on all 64-bit CPUs, so we don't need to
  339. * bother with SYSEXIT.
  340. *
  341. * Unlike 64-bit opportunistic SYSRET, we can't check that CX == IP,
  342. * because the ECX fixup above will ensure that this is essentially
  343. * never the case.
  344. */
  345. return regs->cs == __USER32_CS && regs->ss == __USER_DS &&
  346. regs->ip == landing_pad &&
  347. (regs->flags & (X86_EFLAGS_RF | X86_EFLAGS_TF)) == 0;
  348. #else
  349. /*
  350. * Opportunistic SYSEXIT: if possible, try to return using SYSEXIT.
  351. *
  352. * Unlike 64-bit opportunistic SYSRET, we can't check that CX == IP,
  353. * because the ECX fixup above will ensure that this is essentially
  354. * never the case.
  355. *
  356. * We don't allow syscalls at all from VM86 mode, but we still
  357. * need to check VM, because we might be returning from sys_vm86.
  358. */
  359. return static_cpu_has(X86_FEATURE_SEP) &&
  360. regs->cs == __USER_CS && regs->ss == __USER_DS &&
  361. regs->ip == landing_pad &&
  362. (regs->flags & (X86_EFLAGS_RF | X86_EFLAGS_TF | X86_EFLAGS_VM)) == 0;
  363. #endif
  364. }
  365. #endif