traps.c 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (C) 2012 Regents of the University of California
  4. */
  5. #include <linux/cpu.h>
  6. #include <linux/kernel.h>
  7. #include <linux/init.h>
  8. #include <linux/randomize_kstack.h>
  9. #include <linux/sched.h>
  10. #include <linux/sched/debug.h>
  11. #include <linux/sched/signal.h>
  12. #include <linux/signal.h>
  13. #include <linux/kdebug.h>
  14. #include <linux/uaccess.h>
  15. #include <linux/kprobes.h>
  16. #include <linux/uprobes.h>
  17. #include <asm/uprobes.h>
  18. #include <linux/mm.h>
  19. #include <linux/module.h>
  20. #include <linux/irq.h>
  21. #include <linux/kexec.h>
  22. #include <linux/entry-common.h>
  23. #include <asm/asm-prototypes.h>
  24. #include <asm/bug.h>
  25. #include <asm/cfi.h>
  26. #include <asm/csr.h>
  27. #include <asm/processor.h>
  28. #include <asm/ptrace.h>
  29. #include <asm/syscall.h>
  30. #include <asm/thread_info.h>
  31. #include <asm/vector.h>
  32. #include <asm/irq_stack.h>
  33. int show_unhandled_signals = 1;
  34. static DEFINE_RAW_SPINLOCK(die_lock);
  35. static int copy_code(struct pt_regs *regs, u16 *val, const u16 *insns)
  36. {
  37. const void __user *uaddr = (__force const void __user *)insns;
  38. if (!user_mode(regs))
  39. return get_kernel_nofault(*val, insns);
  40. /* The user space code from other tasks cannot be accessed. */
  41. if (regs != task_pt_regs(current))
  42. return -EPERM;
  43. return copy_from_user_nofault(val, uaddr, sizeof(*val));
  44. }
  45. static void dump_instr(const char *loglvl, struct pt_regs *regs)
  46. {
  47. char str[sizeof("0000 ") * 12 + 2 + 1], *p = str;
  48. const u16 *insns = (u16 *)instruction_pointer(regs);
  49. long bad;
  50. u16 val;
  51. int i;
  52. for (i = -10; i < 2; i++) {
  53. bad = copy_code(regs, &val, &insns[i]);
  54. if (!bad) {
  55. p += sprintf(p, i == 0 ? "(%04hx) " : "%04hx ", val);
  56. } else {
  57. printk("%sCode: Unable to access instruction at 0x%px.\n",
  58. loglvl, &insns[i]);
  59. return;
  60. }
  61. }
  62. printk("%sCode: %s\n", loglvl, str);
  63. }
  64. void die(struct pt_regs *regs, const char *str)
  65. {
  66. static int die_counter;
  67. int ret;
  68. long cause;
  69. unsigned long flags;
  70. oops_enter();
  71. raw_spin_lock_irqsave(&die_lock, flags);
  72. console_verbose();
  73. bust_spinlocks(1);
  74. pr_emerg("%s [#%d]\n", str, ++die_counter);
  75. print_modules();
  76. if (regs) {
  77. show_regs(regs);
  78. dump_instr(KERN_EMERG, regs);
  79. }
  80. cause = regs ? regs->cause : -1;
  81. ret = notify_die(DIE_OOPS, str, regs, 0, cause, SIGSEGV);
  82. if (kexec_should_crash(current))
  83. crash_kexec(regs);
  84. bust_spinlocks(0);
  85. add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
  86. raw_spin_unlock_irqrestore(&die_lock, flags);
  87. oops_exit();
  88. if (in_interrupt())
  89. panic("Fatal exception in interrupt");
  90. if (panic_on_oops)
  91. panic("Fatal exception");
  92. if (ret != NOTIFY_STOP)
  93. make_task_dead(SIGSEGV);
  94. }
  95. void do_trap(struct pt_regs *regs, int signo, int code, unsigned long addr)
  96. {
  97. struct task_struct *tsk = current;
  98. if (show_unhandled_signals && unhandled_signal(tsk, signo)
  99. && printk_ratelimit()) {
  100. pr_info("%s[%d]: unhandled signal %d code 0x%x at 0x" REG_FMT,
  101. tsk->comm, task_pid_nr(tsk), signo, code, addr);
  102. print_vma_addr(KERN_CONT " in ", instruction_pointer(regs));
  103. pr_cont("\n");
  104. __show_regs(regs);
  105. dump_instr(KERN_INFO, regs);
  106. }
  107. force_sig_fault(signo, code, (void __user *)addr);
  108. }
  109. static void do_trap_error(struct pt_regs *regs, int signo, int code,
  110. unsigned long addr, const char *str)
  111. {
  112. current->thread.bad_cause = regs->cause;
  113. if (user_mode(regs)) {
  114. do_trap(regs, signo, code, addr);
  115. } else {
  116. if (!fixup_exception(regs))
  117. die(regs, str);
  118. }
  119. }
  120. #if defined(CONFIG_XIP_KERNEL) && defined(CONFIG_RISCV_ALTERNATIVE)
  121. #define __trap_section __noinstr_section(".xip.traps")
  122. #else
  123. #define __trap_section noinstr
  124. #endif
  125. #define DO_ERROR_INFO(name, signo, code, str) \
  126. asmlinkage __visible __trap_section void name(struct pt_regs *regs) \
  127. { \
  128. if (user_mode(regs)) { \
  129. irqentry_enter_from_user_mode(regs); \
  130. do_trap_error(regs, signo, code, regs->epc, "Oops - " str); \
  131. irqentry_exit_to_user_mode(regs); \
  132. } else { \
  133. irqentry_state_t state = irqentry_nmi_enter(regs); \
  134. do_trap_error(regs, signo, code, regs->epc, "Oops - " str); \
  135. irqentry_nmi_exit(regs, state); \
  136. } \
  137. }
  138. DO_ERROR_INFO(do_trap_unknown,
  139. SIGILL, ILL_ILLTRP, "unknown exception");
  140. DO_ERROR_INFO(do_trap_insn_misaligned,
  141. SIGBUS, BUS_ADRALN, "instruction address misaligned");
  142. DO_ERROR_INFO(do_trap_insn_fault,
  143. SIGSEGV, SEGV_ACCERR, "instruction access fault");
  144. asmlinkage __visible __trap_section void do_trap_insn_illegal(struct pt_regs *regs)
  145. {
  146. bool handled;
  147. if (user_mode(regs)) {
  148. irqentry_enter_from_user_mode(regs);
  149. local_irq_enable();
  150. handled = riscv_v_first_use_handler(regs);
  151. local_irq_disable();
  152. if (!handled)
  153. do_trap_error(regs, SIGILL, ILL_ILLOPC, regs->epc,
  154. "Oops - illegal instruction");
  155. irqentry_exit_to_user_mode(regs);
  156. } else {
  157. irqentry_state_t state = irqentry_nmi_enter(regs);
  158. do_trap_error(regs, SIGILL, ILL_ILLOPC, regs->epc,
  159. "Oops - illegal instruction");
  160. irqentry_nmi_exit(regs, state);
  161. }
  162. }
  163. DO_ERROR_INFO(do_trap_load_fault,
  164. SIGSEGV, SEGV_ACCERR, "load access fault");
  165. asmlinkage __visible __trap_section void do_trap_load_misaligned(struct pt_regs *regs)
  166. {
  167. if (user_mode(regs)) {
  168. irqentry_enter_from_user_mode(regs);
  169. if (handle_misaligned_load(regs))
  170. do_trap_error(regs, SIGBUS, BUS_ADRALN, regs->epc,
  171. "Oops - load address misaligned");
  172. irqentry_exit_to_user_mode(regs);
  173. } else {
  174. irqentry_state_t state = irqentry_nmi_enter(regs);
  175. if (handle_misaligned_load(regs))
  176. do_trap_error(regs, SIGBUS, BUS_ADRALN, regs->epc,
  177. "Oops - load address misaligned");
  178. irqentry_nmi_exit(regs, state);
  179. }
  180. }
  181. asmlinkage __visible __trap_section void do_trap_store_misaligned(struct pt_regs *regs)
  182. {
  183. if (user_mode(regs)) {
  184. irqentry_enter_from_user_mode(regs);
  185. if (handle_misaligned_store(regs))
  186. do_trap_error(regs, SIGBUS, BUS_ADRALN, regs->epc,
  187. "Oops - store (or AMO) address misaligned");
  188. irqentry_exit_to_user_mode(regs);
  189. } else {
  190. irqentry_state_t state = irqentry_nmi_enter(regs);
  191. if (handle_misaligned_store(regs))
  192. do_trap_error(regs, SIGBUS, BUS_ADRALN, regs->epc,
  193. "Oops - store (or AMO) address misaligned");
  194. irqentry_nmi_exit(regs, state);
  195. }
  196. }
  197. DO_ERROR_INFO(do_trap_store_fault,
  198. SIGSEGV, SEGV_ACCERR, "store (or AMO) access fault");
  199. DO_ERROR_INFO(do_trap_ecall_s,
  200. SIGILL, ILL_ILLTRP, "environment call from S-mode");
  201. DO_ERROR_INFO(do_trap_ecall_m,
  202. SIGILL, ILL_ILLTRP, "environment call from M-mode");
  203. static inline unsigned long get_break_insn_length(unsigned long pc)
  204. {
  205. bug_insn_t insn;
  206. if (get_kernel_nofault(insn, (bug_insn_t *)pc))
  207. return 0;
  208. return GET_INSN_LENGTH(insn);
  209. }
  210. static bool probe_single_step_handler(struct pt_regs *regs)
  211. {
  212. bool user = user_mode(regs);
  213. return user ? uprobe_single_step_handler(regs) : kprobe_single_step_handler(regs);
  214. }
  215. static bool probe_breakpoint_handler(struct pt_regs *regs)
  216. {
  217. bool user = user_mode(regs);
  218. return user ? uprobe_breakpoint_handler(regs) : kprobe_breakpoint_handler(regs);
  219. }
  220. void handle_break(struct pt_regs *regs)
  221. {
  222. if (probe_single_step_handler(regs))
  223. return;
  224. if (probe_breakpoint_handler(regs))
  225. return;
  226. current->thread.bad_cause = regs->cause;
  227. if (user_mode(regs))
  228. force_sig_fault(SIGTRAP, TRAP_BRKPT, (void __user *)regs->epc);
  229. #ifdef CONFIG_KGDB
  230. else if (notify_die(DIE_TRAP, "EBREAK", regs, 0, regs->cause, SIGTRAP)
  231. == NOTIFY_STOP)
  232. return;
  233. #endif
  234. else if (report_bug(regs->epc, regs) == BUG_TRAP_TYPE_WARN ||
  235. handle_cfi_failure(regs) == BUG_TRAP_TYPE_WARN)
  236. regs->epc += get_break_insn_length(regs->epc);
  237. else
  238. die(regs, "Kernel BUG");
  239. }
  240. asmlinkage __visible __trap_section void do_trap_break(struct pt_regs *regs)
  241. {
  242. if (user_mode(regs)) {
  243. irqentry_enter_from_user_mode(regs);
  244. handle_break(regs);
  245. irqentry_exit_to_user_mode(regs);
  246. } else {
  247. irqentry_state_t state = irqentry_nmi_enter(regs);
  248. handle_break(regs);
  249. irqentry_nmi_exit(regs, state);
  250. }
  251. }
  252. asmlinkage __visible __trap_section __no_stack_protector
  253. void do_trap_ecall_u(struct pt_regs *regs)
  254. {
  255. if (user_mode(regs)) {
  256. long syscall = regs->a7;
  257. regs->epc += 4;
  258. regs->orig_a0 = regs->a0;
  259. regs->a0 = -ENOSYS;
  260. riscv_v_vstate_discard(regs);
  261. syscall = syscall_enter_from_user_mode(regs, syscall);
  262. add_random_kstack_offset();
  263. if (syscall >= 0 && syscall < NR_syscalls)
  264. syscall_handler(regs, syscall);
  265. /*
  266. * Ultimately, this value will get limited by KSTACK_OFFSET_MAX(),
  267. * so the maximum stack offset is 1k bytes (10 bits).
  268. *
  269. * The actual entropy will be further reduced by the compiler when
  270. * applying stack alignment constraints: 16-byte (i.e. 4-bit) aligned
  271. * for RV32I or RV64I.
  272. *
  273. * The resulting 6 bits of entropy is seen in SP[9:4].
  274. */
  275. choose_random_kstack_offset(get_random_u16());
  276. syscall_exit_to_user_mode(regs);
  277. } else {
  278. irqentry_state_t state = irqentry_nmi_enter(regs);
  279. do_trap_error(regs, SIGILL, ILL_ILLTRP, regs->epc,
  280. "Oops - environment call from U-mode");
  281. irqentry_nmi_exit(regs, state);
  282. }
  283. }
  284. #ifdef CONFIG_MMU
  285. asmlinkage __visible noinstr void do_page_fault(struct pt_regs *regs)
  286. {
  287. irqentry_state_t state = irqentry_enter(regs);
  288. handle_page_fault(regs);
  289. local_irq_disable();
  290. irqentry_exit(regs, state);
  291. }
  292. #endif
  293. static void noinstr handle_riscv_irq(struct pt_regs *regs)
  294. {
  295. struct pt_regs *old_regs;
  296. irq_enter_rcu();
  297. old_regs = set_irq_regs(regs);
  298. handle_arch_irq(regs);
  299. set_irq_regs(old_regs);
  300. irq_exit_rcu();
  301. }
  302. asmlinkage void noinstr do_irq(struct pt_regs *regs)
  303. {
  304. irqentry_state_t state = irqentry_enter(regs);
  305. if (IS_ENABLED(CONFIG_IRQ_STACKS) && on_thread_stack())
  306. call_on_irq_stack(regs, handle_riscv_irq);
  307. else
  308. handle_riscv_irq(regs);
  309. irqentry_exit(regs, state);
  310. }
  311. #ifdef CONFIG_GENERIC_BUG
  312. int is_valid_bugaddr(unsigned long pc)
  313. {
  314. bug_insn_t insn;
  315. if (pc < VMALLOC_START)
  316. return 0;
  317. if (get_kernel_nofault(insn, (bug_insn_t *)pc))
  318. return 0;
  319. if ((insn & __INSN_LENGTH_MASK) == __INSN_LENGTH_32)
  320. return (insn == __BUG_INSN_32);
  321. else
  322. return ((insn & __COMPRESSED_INSN_MASK) == __BUG_INSN_16);
  323. }
  324. #endif /* CONFIG_GENERIC_BUG */
  325. #ifdef CONFIG_VMAP_STACK
  326. DEFINE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)],
  327. overflow_stack)__aligned(16);
  328. asmlinkage void handle_bad_stack(struct pt_regs *regs)
  329. {
  330. unsigned long tsk_stk = (unsigned long)current->stack;
  331. unsigned long ovf_stk = (unsigned long)this_cpu_ptr(overflow_stack);
  332. console_verbose();
  333. pr_emerg("Insufficient stack space to handle exception!\n");
  334. pr_emerg("Task stack: [0x%016lx..0x%016lx]\n",
  335. tsk_stk, tsk_stk + THREAD_SIZE);
  336. pr_emerg("Overflow stack: [0x%016lx..0x%016lx]\n",
  337. ovf_stk, ovf_stk + OVERFLOW_STACK_SIZE);
  338. __show_regs(regs);
  339. panic("Kernel stack overflow");
  340. for (;;)
  341. wait_for_interrupt();
  342. }
  343. #endif