entry_32.S 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470
  1. /* SPDX-License-Identifier: GPL-2.0-or-later */
  2. /*
  3. * PowerPC version
  4. * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
  5. * Rewritten by Cort Dougan (cort@fsmlabs.com) for PReP
  6. * Copyright (C) 1996 Cort Dougan <cort@fsmlabs.com>
  7. * Adapted for Power Macintosh by Paul Mackerras.
  8. * Low-level exception handlers and MMU support
  9. * rewritten by Paul Mackerras.
  10. * Copyright (C) 1996 Paul Mackerras.
  11. * MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
  12. *
  13. * This file contains the system call entry code, context switch
  14. * code, and exception/interrupt return code for PowerPC.
  15. */
  16. #include <linux/errno.h>
  17. #include <linux/err.h>
  18. #include <linux/sys.h>
  19. #include <linux/threads.h>
  20. #include <linux/linkage.h>
  21. #include <asm/reg.h>
  22. #include <asm/page.h>
  23. #include <asm/mmu.h>
  24. #include <asm/cputable.h>
  25. #include <asm/thread_info.h>
  26. #include <asm/ppc_asm.h>
  27. #include <asm/asm-offsets.h>
  28. #include <asm/unistd.h>
  29. #include <asm/ptrace.h>
  30. #include <asm/feature-fixups.h>
  31. #include <asm/barrier.h>
  32. #include <asm/kup.h>
  33. #include <asm/bug.h>
  34. #include <asm/interrupt.h>
  35. #include "head_32.h"
  36. /*
  37. * powerpc relies on return from interrupt/syscall being context synchronising
  38. * (which rfi is) to support ARCH_HAS_MEMBARRIER_SYNC_CORE without additional
  39. * synchronisation instructions.
  40. */
  41. /*
  42. * Align to 4k in order to ensure that all functions modyfing srr0/srr1
  43. * fit into one page in order to not encounter a TLB miss between the
  44. * modification of srr0/srr1 and the associated rfi.
  45. */
  46. .align 12
  47. #if defined(CONFIG_PPC_BOOK3S_32) || defined(CONFIG_PPC_E500)
  48. .globl prepare_transfer_to_handler
  49. prepare_transfer_to_handler:
  50. /* if from kernel, check interrupted DOZE/NAP mode */
  51. lwz r12,TI_LOCAL_FLAGS(r2)
  52. mtcrf 0x01,r12
  53. bt- 31-TLF_NAPPING,4f
  54. bt- 31-TLF_SLEEPING,7f
  55. blr
  56. 4: rlwinm r12,r12,0,~_TLF_NAPPING
  57. stw r12,TI_LOCAL_FLAGS(r2)
  58. b power_save_ppc32_restore
  59. 7: rlwinm r12,r12,0,~_TLF_SLEEPING
  60. stw r12,TI_LOCAL_FLAGS(r2)
  61. lwz r9,_MSR(r11) /* if sleeping, clear MSR.EE */
  62. rlwinm r9,r9,0,~MSR_EE
  63. lwz r12,_LINK(r11) /* and return to address in LR */
  64. REST_GPR(2, r11)
  65. b fast_exception_return
  66. _ASM_NOKPROBE_SYMBOL(prepare_transfer_to_handler)
  67. #endif /* CONFIG_PPC_BOOK3S_32 || CONFIG_PPC_E500 */
  68. #if defined(CONFIG_PPC_KUEP) && defined(CONFIG_PPC_BOOK3S_32)
  69. SYM_FUNC_START(__kuep_lock)
  70. lwz r9, THREAD+THSR0(r2)
  71. update_user_segments_by_4 r9, r10, r11, r12
  72. blr
  73. SYM_FUNC_END(__kuep_lock)
  74. SYM_FUNC_START_LOCAL(__kuep_unlock)
  75. lwz r9, THREAD+THSR0(r2)
  76. rlwinm r9,r9,0,~SR_NX
  77. update_user_segments_by_4 r9, r10, r11, r12
  78. blr
  79. SYM_FUNC_END(__kuep_unlock)
  80. .macro kuep_lock
  81. bl __kuep_lock
  82. .endm
  83. .macro kuep_unlock
  84. bl __kuep_unlock
  85. .endm
  86. #else
  87. .macro kuep_lock
  88. .endm
  89. .macro kuep_unlock
  90. .endm
  91. #endif
  92. .globl transfer_to_syscall
  93. transfer_to_syscall:
  94. stw r3, ORIG_GPR3(r1)
  95. stw r11, GPR1(r1)
  96. stw r11, 0(r1)
  97. mflr r12
  98. stw r12, _LINK(r1)
  99. #ifdef CONFIG_BOOKE
  100. rlwinm r9,r9,0,14,12 /* clear MSR_WE (necessary?) */
  101. #endif
  102. lis r12,STACK_FRAME_REGS_MARKER@ha /* exception frame marker */
  103. SAVE_GPR(2, r1)
  104. addi r12,r12,STACK_FRAME_REGS_MARKER@l
  105. stw r9,_MSR(r1)
  106. li r2, INTERRUPT_SYSCALL
  107. stw r12,STACK_INT_FRAME_MARKER(r1)
  108. stw r2,_TRAP(r1)
  109. SAVE_GPR(0, r1)
  110. SAVE_GPRS(3, 8, r1)
  111. addi r2,r10,-THREAD
  112. SAVE_NVGPRS(r1)
  113. kuep_lock
  114. /* Calling convention has r3 = regs, r4 = orig r0 */
  115. addi r3,r1,STACK_INT_FRAME_REGS
  116. mr r4,r0
  117. bl system_call_exception
  118. ret_from_syscall:
  119. addi r4,r1,STACK_INT_FRAME_REGS
  120. li r5,0
  121. bl syscall_exit_prepare
  122. #ifdef CONFIG_PPC_47x
  123. lis r4,icache_44x_need_flush@ha
  124. lwz r5,icache_44x_need_flush@l(r4)
  125. cmplwi cr0,r5,0
  126. bne- .L44x_icache_flush
  127. #endif /* CONFIG_PPC_47x */
  128. .L44x_icache_flush_return:
  129. kuep_unlock
  130. lwz r4,_LINK(r1)
  131. lwz r5,_CCR(r1)
  132. mtlr r4
  133. lwz r7,_NIP(r1)
  134. lwz r8,_MSR(r1)
  135. cmpwi r3,0
  136. REST_GPR(3, r1)
  137. syscall_exit_finish:
  138. mtspr SPRN_SRR0,r7
  139. mtspr SPRN_SRR1,r8
  140. bne 3f
  141. mtcr r5
  142. 1: REST_GPR(2, r1)
  143. REST_GPR(1, r1)
  144. rfi
  145. 3: mtcr r5
  146. lwz r4,_CTR(r1)
  147. lwz r5,_XER(r1)
  148. REST_NVGPRS(r1)
  149. mtctr r4
  150. mtxer r5
  151. REST_GPR(0, r1)
  152. REST_GPRS(3, 12, r1)
  153. b 1b
  154. #ifdef CONFIG_44x
  155. .L44x_icache_flush:
  156. li r7,0
  157. iccci r0,r0
  158. stw r7,icache_44x_need_flush@l(r4)
  159. b .L44x_icache_flush_return
  160. #endif /* CONFIG_44x */
  161. .globl ret_from_fork
  162. ret_from_fork:
  163. REST_NVGPRS(r1)
  164. bl schedule_tail
  165. li r3,0 /* fork() return value */
  166. b ret_from_syscall
  167. .globl ret_from_kernel_user_thread
  168. ret_from_kernel_user_thread:
  169. bl schedule_tail
  170. mtctr r14
  171. mr r3,r15
  172. PPC440EP_ERR42
  173. bctrl
  174. li r3,0
  175. b ret_from_syscall
  176. .globl start_kernel_thread
  177. start_kernel_thread:
  178. bl schedule_tail
  179. mtctr r14
  180. mr r3,r15
  181. PPC440EP_ERR42
  182. bctrl
  183. /*
  184. * This must not return. We actually want to BUG here, not WARN,
  185. * because BUG will exit the process which is what the kernel thread
  186. * should have done, which may give some hope of continuing.
  187. */
  188. 100: trap
  189. EMIT_BUG_ENTRY 100b,__FILE__,__LINE__,0
  190. .globl fast_exception_return
  191. fast_exception_return:
  192. #ifndef CONFIG_BOOKE
  193. andi. r10,r9,MSR_RI /* check for recoverable interrupt */
  194. beq 3f /* if not, we've got problems */
  195. #endif
  196. 2: lwz r10,_CCR(r11)
  197. REST_GPRS(1, 6, r11)
  198. mtcr r10
  199. lwz r10,_LINK(r11)
  200. mtlr r10
  201. /* Clear the exception marker on the stack to avoid confusing stacktrace */
  202. li r10, 0
  203. stw r10, 8(r11)
  204. REST_GPR(10, r11)
  205. #if defined(CONFIG_PPC_8xx) && defined(CONFIG_PERF_EVENTS)
  206. mtspr SPRN_NRI, r0
  207. #endif
  208. mtspr SPRN_SRR1,r9
  209. mtspr SPRN_SRR0,r12
  210. REST_GPR(9, r11)
  211. REST_GPR(12, r11)
  212. REST_GPR(11, r11)
  213. rfi
  214. _ASM_NOKPROBE_SYMBOL(fast_exception_return)
  215. /* aargh, a nonrecoverable interrupt, panic */
  216. /* aargh, we don't know which trap this is */
  217. 3:
  218. li r10,-1
  219. stw r10,_TRAP(r11)
  220. prepare_transfer_to_handler
  221. bl unrecoverable_exception
  222. trap /* should not get here */
  223. .globl interrupt_return
  224. interrupt_return:
  225. lwz r4,_MSR(r1)
  226. addi r3,r1,STACK_INT_FRAME_REGS
  227. andi. r0,r4,MSR_PR
  228. beq .Lkernel_interrupt_return
  229. bl interrupt_exit_user_prepare
  230. cmpwi r3,0
  231. kuep_unlock
  232. bne- .Lrestore_nvgprs
  233. .Lfast_user_interrupt_return:
  234. lwz r11,_NIP(r1)
  235. lwz r12,_MSR(r1)
  236. mtspr SPRN_SRR0,r11
  237. mtspr SPRN_SRR1,r12
  238. BEGIN_FTR_SECTION
  239. stwcx. r0,0,r1 /* to clear the reservation */
  240. FTR_SECTION_ELSE
  241. lwarx r0,0,r1
  242. ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
  243. lwz r3,_CCR(r1)
  244. lwz r4,_LINK(r1)
  245. lwz r5,_CTR(r1)
  246. lwz r6,_XER(r1)
  247. li r0,0
  248. /*
  249. * Leaving a stale exception marker on the stack can confuse
  250. * the reliable stack unwinder later on. Clear it.
  251. */
  252. stw r0,8(r1)
  253. REST_GPRS(7, 12, r1)
  254. mtcr r3
  255. mtlr r4
  256. mtctr r5
  257. mtspr SPRN_XER,r6
  258. REST_GPRS(2, 6, r1)
  259. REST_GPR(0, r1)
  260. REST_GPR(1, r1)
  261. rfi
  262. .Lrestore_nvgprs:
  263. REST_NVGPRS(r1)
  264. b .Lfast_user_interrupt_return
  265. .Lkernel_interrupt_return:
  266. bl interrupt_exit_kernel_prepare
  267. .Lfast_kernel_interrupt_return:
  268. cmpwi cr1,r3,0
  269. lwz r11,_NIP(r1)
  270. lwz r12,_MSR(r1)
  271. mtspr SPRN_SRR0,r11
  272. mtspr SPRN_SRR1,r12
  273. BEGIN_FTR_SECTION
  274. stwcx. r0,0,r1 /* to clear the reservation */
  275. FTR_SECTION_ELSE
  276. lwarx r0,0,r1
  277. ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
  278. lwz r3,_LINK(r1)
  279. lwz r4,_CTR(r1)
  280. lwz r5,_XER(r1)
  281. lwz r6,_CCR(r1)
  282. li r0,0
  283. REST_GPRS(7, 12, r1)
  284. mtlr r3
  285. mtctr r4
  286. mtspr SPRN_XER,r5
  287. /*
  288. * Leaving a stale exception marker on the stack can confuse
  289. * the reliable stack unwinder later on. Clear it.
  290. */
  291. stw r0,8(r1)
  292. REST_GPRS(2, 5, r1)
  293. bne- cr1,1f /* emulate stack store */
  294. mtcr r6
  295. REST_GPR(6, r1)
  296. REST_GPR(0, r1)
  297. REST_GPR(1, r1)
  298. rfi
  299. 1: /*
  300. * Emulate stack store with update. New r1 value was already calculated
  301. * and updated in our interrupt regs by emulate_loadstore, but we can't
  302. * store the previous value of r1 to the stack before re-loading our
  303. * registers from it, otherwise they could be clobbered. Use
  304. * SPRG Scratch0 as temporary storage to hold the store
  305. * data, as interrupts are disabled here so it won't be clobbered.
  306. */
  307. mtcr r6
  308. #ifdef CONFIG_BOOKE
  309. mtspr SPRN_SPRG_WSCRATCH0, r9
  310. #else
  311. mtspr SPRN_SPRG_SCRATCH0, r9
  312. #endif
  313. addi r9,r1,INT_FRAME_SIZE /* get original r1 */
  314. REST_GPR(6, r1)
  315. REST_GPR(0, r1)
  316. REST_GPR(1, r1)
  317. stw r9,0(r1) /* perform store component of stwu */
  318. #ifdef CONFIG_BOOKE
  319. mfspr r9, SPRN_SPRG_RSCRATCH0
  320. #else
  321. mfspr r9, SPRN_SPRG_SCRATCH0
  322. #endif
  323. rfi
  324. _ASM_NOKPROBE_SYMBOL(interrupt_return)
  325. #ifdef CONFIG_BOOKE
  326. /*
  327. * Returning from a critical interrupt in user mode doesn't need
  328. * to be any different from a normal exception. For a critical
  329. * interrupt in the kernel, we just return (without checking for
  330. * preemption) since the interrupt may have happened at some crucial
  331. * place (e.g. inside the TLB miss handler), and because we will be
  332. * running with r1 pointing into critical_stack, not the current
  333. * process's kernel stack (and therefore current_thread_info() will
  334. * give the wrong answer).
  335. * We have to restore various SPRs that may have been in use at the
  336. * time of the critical interrupt.
  337. *
  338. */
  339. #define RET_FROM_EXC_LEVEL(exc_lvl_srr0, exc_lvl_srr1, exc_lvl_rfi) \
  340. REST_NVGPRS(r1); \
  341. lwz r3,_MSR(r1); \
  342. andi. r3,r3,MSR_PR; \
  343. bne interrupt_return; \
  344. REST_GPR(0, r1); \
  345. REST_GPRS(2, 8, r1); \
  346. lwz r10,_XER(r1); \
  347. lwz r11,_CTR(r1); \
  348. mtspr SPRN_XER,r10; \
  349. mtctr r11; \
  350. stwcx. r0,0,r1; /* to clear the reservation */ \
  351. lwz r11,_LINK(r1); \
  352. mtlr r11; \
  353. lwz r10,_CCR(r1); \
  354. mtcrf 0xff,r10; \
  355. lwz r9,_DEAR(r1); \
  356. lwz r10,_ESR(r1); \
  357. mtspr SPRN_DEAR,r9; \
  358. mtspr SPRN_ESR,r10; \
  359. lwz r11,_NIP(r1); \
  360. lwz r12,_MSR(r1); \
  361. mtspr exc_lvl_srr0,r11; \
  362. mtspr exc_lvl_srr1,r12; \
  363. REST_GPRS(9, 12, r1); \
  364. REST_GPR(1, r1); \
  365. exc_lvl_rfi; \
  366. b .; /* prevent prefetch past exc_lvl_rfi */
  367. #define RESTORE_xSRR(exc_lvl_srr0, exc_lvl_srr1) \
  368. lwz r9,_##exc_lvl_srr0(r1); \
  369. lwz r10,_##exc_lvl_srr1(r1); \
  370. mtspr SPRN_##exc_lvl_srr0,r9; \
  371. mtspr SPRN_##exc_lvl_srr1,r10;
  372. #if defined(CONFIG_PPC_E500)
  373. #ifdef CONFIG_PHYS_64BIT
  374. #define RESTORE_MAS7 \
  375. lwz r11,MAS7(r1); \
  376. mtspr SPRN_MAS7,r11;
  377. #else
  378. #define RESTORE_MAS7
  379. #endif /* CONFIG_PHYS_64BIT */
  380. #define RESTORE_MMU_REGS \
  381. lwz r9,MAS0(r1); \
  382. lwz r10,MAS1(r1); \
  383. lwz r11,MAS2(r1); \
  384. mtspr SPRN_MAS0,r9; \
  385. lwz r9,MAS3(r1); \
  386. mtspr SPRN_MAS1,r10; \
  387. lwz r10,MAS6(r1); \
  388. mtspr SPRN_MAS2,r11; \
  389. mtspr SPRN_MAS3,r9; \
  390. mtspr SPRN_MAS6,r10; \
  391. RESTORE_MAS7;
  392. #elif defined(CONFIG_44x)
  393. #define RESTORE_MMU_REGS \
  394. lwz r9,MMUCR(r1); \
  395. mtspr SPRN_MMUCR,r9;
  396. #else
  397. #define RESTORE_MMU_REGS
  398. #endif
  399. .globl ret_from_crit_exc
  400. ret_from_crit_exc:
  401. RESTORE_xSRR(SRR0,SRR1);
  402. RESTORE_MMU_REGS;
  403. RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, PPC_RFCI)
  404. _ASM_NOKPROBE_SYMBOL(ret_from_crit_exc)
  405. .globl ret_from_debug_exc
  406. ret_from_debug_exc:
  407. RESTORE_xSRR(SRR0,SRR1);
  408. RESTORE_xSRR(CSRR0,CSRR1);
  409. RESTORE_MMU_REGS;
  410. RET_FROM_EXC_LEVEL(SPRN_DSRR0, SPRN_DSRR1, PPC_RFDI)
  411. _ASM_NOKPROBE_SYMBOL(ret_from_debug_exc)
  412. .globl ret_from_mcheck_exc
  413. ret_from_mcheck_exc:
  414. RESTORE_xSRR(SRR0,SRR1);
  415. RESTORE_xSRR(CSRR0,CSRR1);
  416. RESTORE_xSRR(DSRR0,DSRR1);
  417. RESTORE_MMU_REGS;
  418. RET_FROM_EXC_LEVEL(SPRN_MCSRR0, SPRN_MCSRR1, PPC_RFMCI)
  419. _ASM_NOKPROBE_SYMBOL(ret_from_mcheck_exc)
  420. #endif /* CONFIG_BOOKE */