entry.S 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456
  1. /* SPDX-License-Identifier: GPL-2.0-only */
  2. /*
  3. * Copyright (C) 2012 Regents of the University of California
  4. * Copyright (C) 2017 SiFive
  5. */
  6. #include <linux/init.h>
  7. #include <linux/linkage.h>
  8. #include <asm/asm.h>
  9. #include <asm/csr.h>
  10. #include <asm/scs.h>
  11. #include <asm/unistd.h>
  12. #include <asm/page.h>
  13. #include <asm/thread_info.h>
  14. #include <asm/asm-offsets.h>
  15. #include <asm/errata_list.h>
  16. #include <linux/sizes.h>
  17. .section .irqentry.text, "ax"
  18. .macro new_vmalloc_check
  19. REG_S a0, TASK_TI_A0(tp)
  20. csrr a0, CSR_CAUSE
  21. /* Exclude IRQs */
  22. blt a0, zero, .Lnew_vmalloc_restore_context_a0
  23. REG_S a1, TASK_TI_A1(tp)
  24. /* Only check new_vmalloc if we are in page/protection fault */
  25. li a1, EXC_LOAD_PAGE_FAULT
  26. beq a0, a1, .Lnew_vmalloc_kernel_address
  27. li a1, EXC_STORE_PAGE_FAULT
  28. beq a0, a1, .Lnew_vmalloc_kernel_address
  29. li a1, EXC_INST_PAGE_FAULT
  30. bne a0, a1, .Lnew_vmalloc_restore_context_a1
  31. .Lnew_vmalloc_kernel_address:
  32. /* Is it a kernel address? */
  33. csrr a0, CSR_TVAL
  34. bge a0, zero, .Lnew_vmalloc_restore_context_a1
  35. /* Check if a new vmalloc mapping appeared that could explain the trap */
  36. REG_S a2, TASK_TI_A2(tp)
  37. /*
  38. * Computes:
  39. * a0 = &new_vmalloc[BIT_WORD(cpu)]
  40. * a1 = BIT_MASK(cpu)
  41. */
  42. lw a2, TASK_TI_CPU(tp)
  43. /*
  44. * Compute the new_vmalloc element position:
  45. * (cpu / 64) * 8 = (cpu >> 6) << 3
  46. */
  47. srli a1, a2, 6
  48. slli a1, a1, 3
  49. la a0, new_vmalloc
  50. add a0, a0, a1
  51. /*
  52. * Compute the bit position in the new_vmalloc element:
  53. * bit_pos = cpu % 64 = cpu - (cpu / 64) * 64 = cpu - (cpu >> 6) << 6
  54. * = cpu - ((cpu >> 6) << 3) << 3
  55. */
  56. slli a1, a1, 3
  57. sub a1, a2, a1
  58. /* Compute the "get mask": 1 << bit_pos */
  59. li a2, 1
  60. sll a1, a2, a1
  61. /* Check the value of new_vmalloc for this cpu */
  62. REG_L a2, 0(a0)
  63. and a2, a2, a1
  64. beq a2, zero, .Lnew_vmalloc_restore_context
  65. /* Atomically reset the current cpu bit in new_vmalloc */
  66. amoxor.d a0, a1, (a0)
  67. /* Only emit a sfence.vma if the uarch caches invalid entries */
  68. ALTERNATIVE("sfence.vma", "nop", 0, RISCV_ISA_EXT_SVVPTC, 1)
  69. REG_L a0, TASK_TI_A0(tp)
  70. REG_L a1, TASK_TI_A1(tp)
  71. REG_L a2, TASK_TI_A2(tp)
  72. csrw CSR_SCRATCH, x0
  73. sret
  74. .Lnew_vmalloc_restore_context:
  75. REG_L a2, TASK_TI_A2(tp)
  76. .Lnew_vmalloc_restore_context_a1:
  77. REG_L a1, TASK_TI_A1(tp)
  78. .Lnew_vmalloc_restore_context_a0:
  79. REG_L a0, TASK_TI_A0(tp)
  80. .endm
  81. SYM_CODE_START(handle_exception)
  82. /*
  83. * If coming from userspace, preserve the user thread pointer and load
  84. * the kernel thread pointer. If we came from the kernel, the scratch
  85. * register will contain 0, and we should continue on the current TP.
  86. */
  87. csrrw tp, CSR_SCRATCH, tp
  88. bnez tp, .Lsave_context
  89. .Lrestore_kernel_tpsp:
  90. csrr tp, CSR_SCRATCH
  91. #ifdef CONFIG_64BIT
  92. /*
  93. * The RISC-V kernel does not eagerly emit a sfence.vma after each
  94. * new vmalloc mapping, which may result in exceptions:
  95. * - if the uarch caches invalid entries, the new mapping would not be
  96. * observed by the page table walker and an invalidation is needed.
  97. * - if the uarch does not cache invalid entries, a reordered access
  98. * could "miss" the new mapping and traps: in that case, we only need
  99. * to retry the access, no sfence.vma is required.
  100. */
  101. new_vmalloc_check
  102. #endif
  103. REG_S sp, TASK_TI_KERNEL_SP(tp)
  104. #ifdef CONFIG_VMAP_STACK
  105. addi sp, sp, -(PT_SIZE_ON_STACK)
  106. srli sp, sp, THREAD_SHIFT
  107. andi sp, sp, 0x1
  108. bnez sp, handle_kernel_stack_overflow
  109. REG_L sp, TASK_TI_KERNEL_SP(tp)
  110. #endif
  111. .Lsave_context:
  112. REG_S sp, TASK_TI_USER_SP(tp)
  113. REG_L sp, TASK_TI_KERNEL_SP(tp)
  114. addi sp, sp, -(PT_SIZE_ON_STACK)
  115. REG_S x1, PT_RA(sp)
  116. REG_S x3, PT_GP(sp)
  117. REG_S x5, PT_T0(sp)
  118. save_from_x6_to_x31
  119. /*
  120. * Disable user-mode memory access as it should only be set in the
  121. * actual user copy routines.
  122. *
  123. * Disable the FPU/Vector to detect illegal usage of floating point
  124. * or vector in kernel space.
  125. */
  126. li t0, SR_SUM | SR_FS_VS
  127. REG_L s0, TASK_TI_USER_SP(tp)
  128. csrrc s1, CSR_STATUS, t0
  129. csrr s2, CSR_EPC
  130. csrr s3, CSR_TVAL
  131. csrr s4, CSR_CAUSE
  132. csrr s5, CSR_SCRATCH
  133. REG_S s0, PT_SP(sp)
  134. REG_S s1, PT_STATUS(sp)
  135. REG_S s2, PT_EPC(sp)
  136. REG_S s3, PT_BADADDR(sp)
  137. REG_S s4, PT_CAUSE(sp)
  138. REG_S s5, PT_TP(sp)
  139. /*
  140. * Set the scratch register to 0, so that if a recursive exception
  141. * occurs, the exception vector knows it came from the kernel
  142. */
  143. csrw CSR_SCRATCH, x0
  144. /* Load the global pointer */
  145. load_global_pointer
  146. /* Load the kernel shadow call stack pointer if coming from userspace */
  147. scs_load_current_if_task_changed s5
  148. #ifdef CONFIG_RISCV_ISA_V_PREEMPTIVE
  149. move a0, sp
  150. call riscv_v_context_nesting_start
  151. #endif
  152. move a0, sp /* pt_regs */
  153. /*
  154. * MSB of cause differentiates between
  155. * interrupts and exceptions
  156. */
  157. bge s4, zero, 1f
  158. /* Handle interrupts */
  159. call do_irq
  160. j ret_from_exception
  161. 1:
  162. /* Handle other exceptions */
  163. slli t0, s4, RISCV_LGPTR
  164. la t1, excp_vect_table
  165. la t2, excp_vect_table_end
  166. add t0, t1, t0
  167. /* Check if exception code lies within bounds */
  168. bgeu t0, t2, 3f
  169. REG_L t1, 0(t0)
  170. 2: jalr t1
  171. j ret_from_exception
  172. 3:
  173. la t1, do_trap_unknown
  174. j 2b
  175. SYM_CODE_END(handle_exception)
  176. ASM_NOKPROBE(handle_exception)
  177. /*
  178. * The ret_from_exception must be called with interrupt disabled. Here is the
  179. * caller list:
  180. * - handle_exception
  181. * - ret_from_fork
  182. */
  183. SYM_CODE_START_NOALIGN(ret_from_exception)
  184. REG_L s0, PT_STATUS(sp)
  185. #ifdef CONFIG_RISCV_M_MODE
  186. /* the MPP value is too large to be used as an immediate arg for addi */
  187. li t0, SR_MPP
  188. and s0, s0, t0
  189. #else
  190. andi s0, s0, SR_SPP
  191. #endif
  192. bnez s0, 1f
  193. #ifdef CONFIG_GCC_PLUGIN_STACKLEAK
  194. call stackleak_erase_on_task_stack
  195. #endif
  196. /* Save unwound kernel stack pointer in thread_info */
  197. addi s0, sp, PT_SIZE_ON_STACK
  198. REG_S s0, TASK_TI_KERNEL_SP(tp)
  199. /* Save the kernel shadow call stack pointer */
  200. scs_save_current
  201. /*
  202. * Save TP into the scratch register , so we can find the kernel data
  203. * structures again.
  204. */
  205. csrw CSR_SCRATCH, tp
  206. 1:
  207. #ifdef CONFIG_RISCV_ISA_V_PREEMPTIVE
  208. move a0, sp
  209. call riscv_v_context_nesting_end
  210. #endif
  211. REG_L a0, PT_STATUS(sp)
  212. /*
  213. * The current load reservation is effectively part of the processor's
  214. * state, in the sense that load reservations cannot be shared between
  215. * different hart contexts. We can't actually save and restore a load
  216. * reservation, so instead here we clear any existing reservation --
  217. * it's always legal for implementations to clear load reservations at
  218. * any point (as long as the forward progress guarantee is kept, but
  219. * we'll ignore that here).
  220. *
  221. * Dangling load reservations can be the result of taking a trap in the
  222. * middle of an LR/SC sequence, but can also be the result of a taken
  223. * forward branch around an SC -- which is how we implement CAS. As a
  224. * result we need to clear reservations between the last CAS and the
  225. * jump back to the new context. While it is unlikely the store
  226. * completes, implementations are allowed to expand reservations to be
  227. * arbitrarily large.
  228. */
  229. REG_L a2, PT_EPC(sp)
  230. REG_SC x0, a2, PT_EPC(sp)
  231. csrw CSR_STATUS, a0
  232. csrw CSR_EPC, a2
  233. REG_L x1, PT_RA(sp)
  234. REG_L x3, PT_GP(sp)
  235. REG_L x4, PT_TP(sp)
  236. REG_L x5, PT_T0(sp)
  237. restore_from_x6_to_x31
  238. REG_L x2, PT_SP(sp)
  239. #ifdef CONFIG_RISCV_M_MODE
  240. mret
  241. #else
  242. sret
  243. #endif
  244. SYM_INNER_LABEL(ret_from_exception_end, SYM_L_GLOBAL)
  245. SYM_CODE_END(ret_from_exception)
  246. ASM_NOKPROBE(ret_from_exception)
  247. #ifdef CONFIG_VMAP_STACK
  248. SYM_CODE_START_LOCAL(handle_kernel_stack_overflow)
  249. /* we reach here from kernel context, sscratch must be 0 */
  250. csrrw x31, CSR_SCRATCH, x31
  251. asm_per_cpu sp, overflow_stack, x31
  252. li x31, OVERFLOW_STACK_SIZE
  253. add sp, sp, x31
  254. /* zero out x31 again and restore x31 */
  255. xor x31, x31, x31
  256. csrrw x31, CSR_SCRATCH, x31
  257. addi sp, sp, -(PT_SIZE_ON_STACK)
  258. //save context to overflow stack
  259. REG_S x1, PT_RA(sp)
  260. REG_S x3, PT_GP(sp)
  261. REG_S x5, PT_T0(sp)
  262. save_from_x6_to_x31
  263. REG_L s0, TASK_TI_KERNEL_SP(tp)
  264. csrr s1, CSR_STATUS
  265. csrr s2, CSR_EPC
  266. csrr s3, CSR_TVAL
  267. csrr s4, CSR_CAUSE
  268. csrr s5, CSR_SCRATCH
  269. REG_S s0, PT_SP(sp)
  270. REG_S s1, PT_STATUS(sp)
  271. REG_S s2, PT_EPC(sp)
  272. REG_S s3, PT_BADADDR(sp)
  273. REG_S s4, PT_CAUSE(sp)
  274. REG_S s5, PT_TP(sp)
  275. move a0, sp
  276. tail handle_bad_stack
  277. SYM_CODE_END(handle_kernel_stack_overflow)
  278. ASM_NOKPROBE(handle_kernel_stack_overflow)
  279. #endif
  280. SYM_CODE_START(ret_from_fork)
  281. call schedule_tail
  282. beqz s0, 1f /* not from kernel thread */
  283. /* Call fn(arg) */
  284. move a0, s1
  285. jalr s0
  286. 1:
  287. move a0, sp /* pt_regs */
  288. call syscall_exit_to_user_mode
  289. j ret_from_exception
  290. SYM_CODE_END(ret_from_fork)
  291. #ifdef CONFIG_IRQ_STACKS
  292. /*
  293. * void call_on_irq_stack(struct pt_regs *regs,
  294. * void (*func)(struct pt_regs *));
  295. *
  296. * Calls func(regs) using the per-CPU IRQ stack.
  297. */
  298. SYM_FUNC_START(call_on_irq_stack)
  299. /* Create a frame record to save ra and s0 (fp) */
  300. addi sp, sp, -STACKFRAME_SIZE_ON_STACK
  301. REG_S ra, STACKFRAME_RA(sp)
  302. REG_S s0, STACKFRAME_FP(sp)
  303. addi s0, sp, STACKFRAME_SIZE_ON_STACK
  304. /* Switch to the per-CPU shadow call stack */
  305. scs_save_current
  306. scs_load_irq_stack t0
  307. /* Switch to the per-CPU IRQ stack and call the handler */
  308. load_per_cpu t0, irq_stack_ptr, t1
  309. li t1, IRQ_STACK_SIZE
  310. add sp, t0, t1
  311. jalr a1
  312. /* Switch back to the thread shadow call stack */
  313. scs_load_current
  314. /* Switch back to the thread stack and restore ra and s0 */
  315. addi sp, s0, -STACKFRAME_SIZE_ON_STACK
  316. REG_L ra, STACKFRAME_RA(sp)
  317. REG_L s0, STACKFRAME_FP(sp)
  318. addi sp, sp, STACKFRAME_SIZE_ON_STACK
  319. ret
  320. SYM_FUNC_END(call_on_irq_stack)
  321. #endif /* CONFIG_IRQ_STACKS */
  322. /*
  323. * Integer register context switch
  324. * The callee-saved registers must be saved and restored.
  325. *
  326. * a0: previous task_struct (must be preserved across the switch)
  327. * a1: next task_struct
  328. *
  329. * The value of a0 and a1 must be preserved by this function, as that's how
  330. * arguments are passed to schedule_tail.
  331. */
  332. SYM_FUNC_START(__switch_to)
  333. /* Save context into prev->thread */
  334. li a4, TASK_THREAD_RA
  335. add a3, a0, a4
  336. add a4, a1, a4
  337. REG_S ra, TASK_THREAD_RA_RA(a3)
  338. REG_S sp, TASK_THREAD_SP_RA(a3)
  339. REG_S s0, TASK_THREAD_S0_RA(a3)
  340. REG_S s1, TASK_THREAD_S1_RA(a3)
  341. REG_S s2, TASK_THREAD_S2_RA(a3)
  342. REG_S s3, TASK_THREAD_S3_RA(a3)
  343. REG_S s4, TASK_THREAD_S4_RA(a3)
  344. REG_S s5, TASK_THREAD_S5_RA(a3)
  345. REG_S s6, TASK_THREAD_S6_RA(a3)
  346. REG_S s7, TASK_THREAD_S7_RA(a3)
  347. REG_S s8, TASK_THREAD_S8_RA(a3)
  348. REG_S s9, TASK_THREAD_S9_RA(a3)
  349. REG_S s10, TASK_THREAD_S10_RA(a3)
  350. REG_S s11, TASK_THREAD_S11_RA(a3)
  351. /* Save the kernel shadow call stack pointer */
  352. scs_save_current
  353. /* Restore context from next->thread */
  354. REG_L ra, TASK_THREAD_RA_RA(a4)
  355. REG_L sp, TASK_THREAD_SP_RA(a4)
  356. REG_L s0, TASK_THREAD_S0_RA(a4)
  357. REG_L s1, TASK_THREAD_S1_RA(a4)
  358. REG_L s2, TASK_THREAD_S2_RA(a4)
  359. REG_L s3, TASK_THREAD_S3_RA(a4)
  360. REG_L s4, TASK_THREAD_S4_RA(a4)
  361. REG_L s5, TASK_THREAD_S5_RA(a4)
  362. REG_L s6, TASK_THREAD_S6_RA(a4)
  363. REG_L s7, TASK_THREAD_S7_RA(a4)
  364. REG_L s8, TASK_THREAD_S8_RA(a4)
  365. REG_L s9, TASK_THREAD_S9_RA(a4)
  366. REG_L s10, TASK_THREAD_S10_RA(a4)
  367. REG_L s11, TASK_THREAD_S11_RA(a4)
  368. /* The offset of thread_info in task_struct is zero. */
  369. move tp, a1
  370. /* Switch to the next shadow call stack */
  371. scs_load_current
  372. ret
  373. SYM_FUNC_END(__switch_to)
  374. #ifndef CONFIG_MMU
  375. #define do_page_fault do_trap_unknown
  376. #endif
  377. .section ".rodata"
  378. .align LGREG
  379. /* Exception vector table */
  380. SYM_DATA_START_LOCAL(excp_vect_table)
  381. RISCV_PTR do_trap_insn_misaligned
  382. ALT_INSN_FAULT(RISCV_PTR do_trap_insn_fault)
  383. RISCV_PTR do_trap_insn_illegal
  384. RISCV_PTR do_trap_break
  385. RISCV_PTR do_trap_load_misaligned
  386. RISCV_PTR do_trap_load_fault
  387. RISCV_PTR do_trap_store_misaligned
  388. RISCV_PTR do_trap_store_fault
  389. RISCV_PTR do_trap_ecall_u /* system call */
  390. RISCV_PTR do_trap_ecall_s
  391. RISCV_PTR do_trap_unknown
  392. RISCV_PTR do_trap_ecall_m
  393. /* instruciton page fault */
  394. ALT_PAGE_FAULT(RISCV_PTR do_page_fault)
  395. RISCV_PTR do_page_fault /* load page fault */
  396. RISCV_PTR do_trap_unknown
  397. RISCV_PTR do_page_fault /* store page fault */
  398. SYM_DATA_END_LABEL(excp_vect_table, SYM_L_LOCAL, excp_vect_table_end)
  399. #ifndef CONFIG_MMU
  400. SYM_DATA_START(__user_rt_sigreturn)
  401. li a7, __NR_rt_sigreturn
  402. ecall
  403. SYM_DATA_END(__user_rt_sigreturn)
  404. #endif