interrupt_64.S 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772
  1. #include <asm/asm-offsets.h>
  2. #include <asm/bug.h>
  3. #ifdef CONFIG_PPC_BOOK3S
  4. #include <asm/exception-64s.h>
  5. #else
  6. #include <asm/exception-64e.h>
  7. #endif
  8. #include <asm/feature-fixups.h>
  9. #include <asm/head-64.h>
  10. #include <asm/hw_irq.h>
  11. #include <asm/kup.h>
  12. #include <asm/mmu.h>
  13. #include <asm/ppc_asm.h>
  14. #include <asm/ptrace.h>
  15. .align 7
  16. .macro DEBUG_SRR_VALID srr
  17. #ifdef CONFIG_PPC_RFI_SRR_DEBUG
  18. .ifc \srr,srr
  19. mfspr r11,SPRN_SRR0
  20. ld r12,_NIP(r1)
  21. clrrdi r11,r11,2
  22. clrrdi r12,r12,2
  23. 100: tdne r11,r12
  24. EMIT_WARN_ENTRY 100b,__FILE__,__LINE__,(BUGFLAG_WARNING | BUGFLAG_ONCE)
  25. mfspr r11,SPRN_SRR1
  26. ld r12,_MSR(r1)
  27. 100: tdne r11,r12
  28. EMIT_WARN_ENTRY 100b,__FILE__,__LINE__,(BUGFLAG_WARNING | BUGFLAG_ONCE)
  29. .else
  30. mfspr r11,SPRN_HSRR0
  31. ld r12,_NIP(r1)
  32. clrrdi r11,r11,2
  33. clrrdi r12,r12,2
  34. 100: tdne r11,r12
  35. EMIT_WARN_ENTRY 100b,__FILE__,__LINE__,(BUGFLAG_WARNING | BUGFLAG_ONCE)
  36. mfspr r11,SPRN_HSRR1
  37. ld r12,_MSR(r1)
  38. 100: tdne r11,r12
  39. EMIT_WARN_ENTRY 100b,__FILE__,__LINE__,(BUGFLAG_WARNING | BUGFLAG_ONCE)
  40. .endif
  41. #endif
  42. .endm
  43. #ifdef CONFIG_PPC_BOOK3S
  44. .macro system_call_vectored name trapnr
  45. .globl system_call_vectored_\name
  46. system_call_vectored_\name:
  47. _ASM_NOKPROBE_SYMBOL(system_call_vectored_\name)
  48. SCV_INTERRUPT_TO_KERNEL
  49. mr r10,r1
  50. ld r1,PACAKSAVE(r13)
  51. std r10,0(r1)
  52. std r11,_LINK(r1)
  53. std r11,_NIP(r1) /* Saved LR is also the next instruction */
  54. std r12,_MSR(r1)
  55. std r0,GPR0(r1)
  56. std r10,GPR1(r1)
  57. std r2,GPR2(r1)
  58. LOAD_PACA_TOC()
  59. mfcr r12
  60. li r11,0
  61. /* Save syscall parameters in r3-r8 */
  62. SAVE_GPRS(3, 8, r1)
  63. /* Zero r9-r12, this should only be required when restoring all GPRs */
  64. std r11,GPR9(r1)
  65. std r11,GPR10(r1)
  66. std r11,GPR11(r1)
  67. std r11,GPR12(r1)
  68. std r9,GPR13(r1)
  69. SAVE_NVGPRS(r1)
  70. std r11,_XER(r1)
  71. std r11,_CTR(r1)
  72. li r11,\trapnr
  73. std r11,_TRAP(r1)
  74. std r12,_CCR(r1)
  75. std r3,ORIG_GPR3(r1)
  76. LOAD_REG_IMMEDIATE(r11, STACK_FRAME_REGS_MARKER)
  77. std r11,STACK_INT_FRAME_MARKER(r1) /* "regs" marker */
  78. /* Calling convention has r3 = regs, r4 = orig r0 */
  79. addi r3,r1,STACK_INT_FRAME_REGS
  80. mr r4,r0
  81. BEGIN_FTR_SECTION
  82. HMT_MEDIUM
  83. END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
  84. /*
  85. * scv enters with MSR[EE]=1 and is immediately considered soft-masked.
  86. * The entry vector already sets PACAIRQSOFTMASK to IRQS_ALL_DISABLED,
  87. * and interrupts may be masked and pending already.
  88. * system_call_exception() will call trace_hardirqs_off() which means
  89. * interrupts could already have been blocked before trace_hardirqs_off,
  90. * but this is the best we can do.
  91. */
  92. /*
  93. * Zero user registers to prevent influencing speculative execution
  94. * state of kernel code.
  95. */
  96. SANITIZE_SYSCALL_GPRS()
  97. bl CFUNC(system_call_exception)
  98. .Lsyscall_vectored_\name\()_exit:
  99. addi r4,r1,STACK_INT_FRAME_REGS
  100. li r5,1 /* scv */
  101. bl CFUNC(syscall_exit_prepare)
  102. std r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
  103. .Lsyscall_vectored_\name\()_rst_start:
  104. lbz r11,PACAIRQHAPPENED(r13)
  105. andi. r11,r11,(~PACA_IRQ_HARD_DIS)@l
  106. bne- syscall_vectored_\name\()_restart
  107. li r11,IRQS_ENABLED
  108. stb r11,PACAIRQSOFTMASK(r13)
  109. li r11,0
  110. stb r11,PACAIRQHAPPENED(r13) # clear out possible HARD_DIS
  111. ld r2,_CCR(r1)
  112. ld r4,_NIP(r1)
  113. ld r5,_MSR(r1)
  114. BEGIN_FTR_SECTION
  115. stdcx. r0,0,r1 /* to clear the reservation */
  116. END_FTR_SECTION_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
  117. BEGIN_FTR_SECTION
  118. HMT_MEDIUM_LOW
  119. END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
  120. SANITIZE_RESTORE_NVGPRS()
  121. cmpdi r3,0
  122. bne .Lsyscall_vectored_\name\()_restore_regs
  123. /* rfscv returns with LR->NIA and CTR->MSR */
  124. mtlr r4
  125. mtctr r5
  126. /* Could zero these as per ABI, but we may consider a stricter ABI
  127. * which preserves these if libc implementations can benefit, so
  128. * restore them for now until further measurement is done. */
  129. REST_GPR(0, r1)
  130. REST_GPRS(4, 8, r1)
  131. /* Zero volatile regs that may contain sensitive kernel data */
  132. ZEROIZE_GPRS(9, 12)
  133. mtspr SPRN_XER,r0
  134. /*
  135. * We don't need to restore AMR on the way back to userspace for KUAP.
  136. * The value of AMR only matters while we're in the kernel.
  137. */
  138. mtcr r2
  139. REST_GPRS(2, 3, r1)
  140. REST_GPR(13, r1)
  141. REST_GPR(1, r1)
  142. RFSCV_TO_USER
  143. b . /* prevent speculative execution */
  144. .Lsyscall_vectored_\name\()_restore_regs:
  145. mtspr SPRN_SRR0,r4
  146. mtspr SPRN_SRR1,r5
  147. ld r3,_CTR(r1)
  148. ld r4,_LINK(r1)
  149. ld r5,_XER(r1)
  150. HANDLER_RESTORE_NVGPRS()
  151. REST_GPR(0, r1)
  152. mtcr r2
  153. mtctr r3
  154. mtlr r4
  155. mtspr SPRN_XER,r5
  156. REST_GPRS(2, 13, r1)
  157. REST_GPR(1, r1)
  158. RFI_TO_USER
  159. .Lsyscall_vectored_\name\()_rst_end:
  160. syscall_vectored_\name\()_restart:
  161. _ASM_NOKPROBE_SYMBOL(syscall_vectored_\name\()_restart)
  162. GET_PACA(r13)
  163. ld r1,PACA_EXIT_SAVE_R1(r13)
  164. LOAD_PACA_TOC()
  165. ld r3,RESULT(r1)
  166. addi r4,r1,STACK_INT_FRAME_REGS
  167. li r11,IRQS_ALL_DISABLED
  168. stb r11,PACAIRQSOFTMASK(r13)
  169. bl CFUNC(syscall_exit_restart)
  170. std r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
  171. b .Lsyscall_vectored_\name\()_rst_start
  172. 1:
  173. SOFT_MASK_TABLE(.Lsyscall_vectored_\name\()_rst_start, 1b)
  174. RESTART_TABLE(.Lsyscall_vectored_\name\()_rst_start, .Lsyscall_vectored_\name\()_rst_end, syscall_vectored_\name\()_restart)
  175. .endm
  176. system_call_vectored common 0x3000
  177. /*
  178. * We instantiate another entry copy for the SIGILL variant, with TRAP=0x7ff0
  179. * which is tested by system_call_exception when r0 is -1 (as set by vector
  180. * entry code).
  181. */
  182. system_call_vectored sigill 0x7ff0
  183. #endif /* CONFIG_PPC_BOOK3S */
  184. .balign IFETCH_ALIGN_BYTES
  185. .globl system_call_common_real
  186. system_call_common_real:
  187. _ASM_NOKPROBE_SYMBOL(system_call_common_real)
  188. ld r10,PACAKMSR(r13) /* get MSR value for kernel */
  189. mtmsrd r10
  190. .balign IFETCH_ALIGN_BYTES
  191. .globl system_call_common
  192. system_call_common:
  193. _ASM_NOKPROBE_SYMBOL(system_call_common)
  194. mr r10,r1
  195. ld r1,PACAKSAVE(r13)
  196. std r10,0(r1)
  197. std r11,_NIP(r1)
  198. std r12,_MSR(r1)
  199. std r0,GPR0(r1)
  200. std r10,GPR1(r1)
  201. std r2,GPR2(r1)
  202. #ifdef CONFIG_PPC_E500
  203. START_BTB_FLUSH_SECTION
  204. BTB_FLUSH(r10)
  205. END_BTB_FLUSH_SECTION
  206. #endif
  207. LOAD_PACA_TOC()
  208. mfcr r12
  209. li r11,0
  210. /* Save syscall parameters in r3-r8 */
  211. SAVE_GPRS(3, 8, r1)
  212. /* Zero r9-r12, this should only be required when restoring all GPRs */
  213. std r11,GPR9(r1)
  214. std r11,GPR10(r1)
  215. std r11,GPR11(r1)
  216. std r11,GPR12(r1)
  217. std r9,GPR13(r1)
  218. SAVE_NVGPRS(r1)
  219. std r11,_XER(r1)
  220. std r11,_CTR(r1)
  221. mflr r10
  222. /*
  223. * This clears CR0.SO (bit 28), which is the error indication on
  224. * return from this system call.
  225. */
  226. rldimi r12,r11,28,(63-28)
  227. li r11,0xc00
  228. std r10,_LINK(r1)
  229. std r11,_TRAP(r1)
  230. std r12,_CCR(r1)
  231. std r3,ORIG_GPR3(r1)
  232. LOAD_REG_IMMEDIATE(r11, STACK_FRAME_REGS_MARKER)
  233. std r11,STACK_INT_FRAME_MARKER(r1) /* "regs" marker */
  234. /* Calling convention has r3 = regs, r4 = orig r0 */
  235. addi r3,r1,STACK_INT_FRAME_REGS
  236. mr r4,r0
  237. #ifdef CONFIG_PPC_BOOK3S
  238. li r11,1
  239. stb r11,PACASRR_VALID(r13)
  240. #endif
  241. /*
  242. * We always enter kernel from userspace with irq soft-mask enabled and
  243. * nothing pending. system_call_exception() will call
  244. * trace_hardirqs_off().
  245. */
  246. li r11,IRQS_ALL_DISABLED
  247. stb r11,PACAIRQSOFTMASK(r13)
  248. #ifdef CONFIG_PPC_BOOK3S
  249. li r12,-1 /* Set MSR_EE and MSR_RI */
  250. mtmsrd r12,1
  251. #else
  252. wrteei 1
  253. #endif
  254. /*
  255. * Zero user registers to prevent influencing speculative execution
  256. * state of kernel code.
  257. */
  258. SANITIZE_SYSCALL_GPRS()
  259. bl CFUNC(system_call_exception)
  260. .Lsyscall_exit:
  261. addi r4,r1,STACK_INT_FRAME_REGS
  262. li r5,0 /* !scv */
  263. bl CFUNC(syscall_exit_prepare)
  264. std r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
  265. #ifdef CONFIG_PPC_BOOK3S
  266. .Lsyscall_rst_start:
  267. lbz r11,PACAIRQHAPPENED(r13)
  268. andi. r11,r11,(~PACA_IRQ_HARD_DIS)@l
  269. bne- syscall_restart
  270. #endif
  271. li r11,IRQS_ENABLED
  272. stb r11,PACAIRQSOFTMASK(r13)
  273. li r11,0
  274. stb r11,PACAIRQHAPPENED(r13) # clear out possible HARD_DIS
  275. ld r2,_CCR(r1)
  276. ld r6,_LINK(r1)
  277. mtlr r6
  278. #ifdef CONFIG_PPC_BOOK3S
  279. lbz r4,PACASRR_VALID(r13)
  280. cmpdi r4,0
  281. bne 1f
  282. li r4,0
  283. stb r4,PACASRR_VALID(r13)
  284. #endif
  285. ld r4,_NIP(r1)
  286. ld r5,_MSR(r1)
  287. mtspr SPRN_SRR0,r4
  288. mtspr SPRN_SRR1,r5
  289. 1:
  290. DEBUG_SRR_VALID srr
  291. BEGIN_FTR_SECTION
  292. stdcx. r0,0,r1 /* to clear the reservation */
  293. END_FTR_SECTION_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
  294. SANITIZE_RESTORE_NVGPRS()
  295. cmpdi r3,0
  296. bne .Lsyscall_restore_regs
  297. /* Zero volatile regs that may contain sensitive kernel data */
  298. ZEROIZE_GPR(0)
  299. ZEROIZE_GPRS(4, 12)
  300. mtctr r0
  301. mtspr SPRN_XER,r0
  302. .Lsyscall_restore_regs_cont:
  303. BEGIN_FTR_SECTION
  304. HMT_MEDIUM_LOW
  305. END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
  306. /*
  307. * We don't need to restore AMR on the way back to userspace for KUAP.
  308. * The value of AMR only matters while we're in the kernel.
  309. */
  310. mtcr r2
  311. REST_GPRS(2, 3, r1)
  312. REST_GPR(13, r1)
  313. REST_GPR(1, r1)
  314. RFI_TO_USER
  315. b . /* prevent speculative execution */
  316. .Lsyscall_restore_regs:
  317. ld r3,_CTR(r1)
  318. ld r4,_XER(r1)
  319. HANDLER_RESTORE_NVGPRS()
  320. mtctr r3
  321. mtspr SPRN_XER,r4
  322. REST_GPR(0, r1)
  323. REST_GPRS(4, 12, r1)
  324. b .Lsyscall_restore_regs_cont
  325. .Lsyscall_rst_end:
  326. #ifdef CONFIG_PPC_BOOK3S
  327. syscall_restart:
  328. _ASM_NOKPROBE_SYMBOL(syscall_restart)
  329. GET_PACA(r13)
  330. ld r1,PACA_EXIT_SAVE_R1(r13)
  331. LOAD_PACA_TOC()
  332. ld r3,RESULT(r1)
  333. addi r4,r1,STACK_INT_FRAME_REGS
  334. li r11,IRQS_ALL_DISABLED
  335. stb r11,PACAIRQSOFTMASK(r13)
  336. bl CFUNC(syscall_exit_restart)
  337. std r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
  338. b .Lsyscall_rst_start
  339. 1:
  340. SOFT_MASK_TABLE(.Lsyscall_rst_start, 1b)
  341. RESTART_TABLE(.Lsyscall_rst_start, .Lsyscall_rst_end, syscall_restart)
  342. #endif
  343. /*
  344. * If MSR EE/RI was never enabled, IRQs not reconciled, NVGPRs not
  345. * touched, no exit work created, then this can be used.
  346. */
  347. .balign IFETCH_ALIGN_BYTES
  348. .globl fast_interrupt_return_srr
  349. fast_interrupt_return_srr:
  350. _ASM_NOKPROBE_SYMBOL(fast_interrupt_return_srr)
  351. kuap_check_amr r3, r4
  352. ld r5,_MSR(r1)
  353. andi. r0,r5,MSR_PR
  354. #ifdef CONFIG_PPC_BOOK3S
  355. beq 1f
  356. kuap_user_restore r3, r4
  357. b .Lfast_user_interrupt_return_srr
  358. 1: kuap_kernel_restore r3, r4
  359. andi. r0,r5,MSR_RI
  360. li r3,0 /* 0 return value, no EMULATE_STACK_STORE */
  361. bne+ .Lfast_kernel_interrupt_return_srr
  362. addi r3,r1,STACK_INT_FRAME_REGS
  363. bl CFUNC(unrecoverable_exception)
  364. b . /* should not get here */
  365. #else
  366. bne .Lfast_user_interrupt_return_srr
  367. b .Lfast_kernel_interrupt_return_srr
  368. #endif
  369. .macro interrupt_return_macro srr
  370. .balign IFETCH_ALIGN_BYTES
  371. .globl interrupt_return_\srr
  372. interrupt_return_\srr\():
  373. _ASM_NOKPROBE_SYMBOL(interrupt_return_\srr\())
  374. ld r4,_MSR(r1)
  375. andi. r0,r4,MSR_PR
  376. beq interrupt_return_\srr\()_kernel
  377. interrupt_return_\srr\()_user: /* make backtraces match the _kernel variant */
  378. _ASM_NOKPROBE_SYMBOL(interrupt_return_\srr\()_user)
  379. addi r3,r1,STACK_INT_FRAME_REGS
  380. bl CFUNC(interrupt_exit_user_prepare)
  381. #ifndef CONFIG_INTERRUPT_SANITIZE_REGISTERS
  382. cmpdi r3,0
  383. bne- .Lrestore_nvgprs_\srr
  384. .Lrestore_nvgprs_\srr\()_cont:
  385. #endif
  386. std r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
  387. #ifdef CONFIG_PPC_BOOK3S
  388. .Linterrupt_return_\srr\()_user_rst_start:
  389. lbz r11,PACAIRQHAPPENED(r13)
  390. andi. r11,r11,(~PACA_IRQ_HARD_DIS)@l
  391. bne- interrupt_return_\srr\()_user_restart
  392. #endif
  393. li r11,IRQS_ENABLED
  394. stb r11,PACAIRQSOFTMASK(r13)
  395. li r11,0
  396. stb r11,PACAIRQHAPPENED(r13) # clear out possible HARD_DIS
  397. .Lfast_user_interrupt_return_\srr\():
  398. SANITIZE_RESTORE_NVGPRS()
  399. #ifdef CONFIG_PPC_BOOK3S
  400. .ifc \srr,srr
  401. lbz r4,PACASRR_VALID(r13)
  402. .else
  403. lbz r4,PACAHSRR_VALID(r13)
  404. .endif
  405. cmpdi r4,0
  406. li r4,0
  407. bne 1f
  408. #endif
  409. ld r11,_NIP(r1)
  410. ld r12,_MSR(r1)
  411. .ifc \srr,srr
  412. mtspr SPRN_SRR0,r11
  413. mtspr SPRN_SRR1,r12
  414. 1:
  415. #ifdef CONFIG_PPC_BOOK3S
  416. stb r4,PACASRR_VALID(r13)
  417. #endif
  418. .else
  419. mtspr SPRN_HSRR0,r11
  420. mtspr SPRN_HSRR1,r12
  421. 1:
  422. #ifdef CONFIG_PPC_BOOK3S
  423. stb r4,PACAHSRR_VALID(r13)
  424. #endif
  425. .endif
  426. DEBUG_SRR_VALID \srr
  427. #ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG
  428. lbz r4,PACAIRQSOFTMASK(r13)
  429. tdnei r4,IRQS_ENABLED
  430. #endif
  431. BEGIN_FTR_SECTION
  432. ld r10,_PPR(r1)
  433. mtspr SPRN_PPR,r10
  434. END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
  435. BEGIN_FTR_SECTION
  436. stdcx. r0,0,r1 /* to clear the reservation */
  437. FTR_SECTION_ELSE
  438. ldarx r0,0,r1
  439. ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
  440. ld r3,_CCR(r1)
  441. ld r4,_LINK(r1)
  442. ld r5,_CTR(r1)
  443. ld r6,_XER(r1)
  444. li r0,0
  445. REST_GPRS(7, 13, r1)
  446. mtcr r3
  447. mtlr r4
  448. mtctr r5
  449. mtspr SPRN_XER,r6
  450. REST_GPRS(2, 6, r1)
  451. REST_GPR(0, r1)
  452. REST_GPR(1, r1)
  453. .ifc \srr,srr
  454. RFI_TO_USER
  455. .else
  456. HRFI_TO_USER
  457. .endif
  458. b . /* prevent speculative execution */
  459. .Linterrupt_return_\srr\()_user_rst_end:
  460. #ifndef CONFIG_INTERRUPT_SANITIZE_REGISTERS
  461. .Lrestore_nvgprs_\srr\():
  462. REST_NVGPRS(r1)
  463. b .Lrestore_nvgprs_\srr\()_cont
  464. #endif
  465. #ifdef CONFIG_PPC_BOOK3S
  466. interrupt_return_\srr\()_user_restart:
  467. _ASM_NOKPROBE_SYMBOL(interrupt_return_\srr\()_user_restart)
  468. GET_PACA(r13)
  469. ld r1,PACA_EXIT_SAVE_R1(r13)
  470. LOAD_PACA_TOC()
  471. addi r3,r1,STACK_INT_FRAME_REGS
  472. li r11,IRQS_ALL_DISABLED
  473. stb r11,PACAIRQSOFTMASK(r13)
  474. bl CFUNC(interrupt_exit_user_restart)
  475. std r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
  476. b .Linterrupt_return_\srr\()_user_rst_start
  477. 1:
  478. SOFT_MASK_TABLE(.Linterrupt_return_\srr\()_user_rst_start, 1b)
  479. RESTART_TABLE(.Linterrupt_return_\srr\()_user_rst_start, .Linterrupt_return_\srr\()_user_rst_end, interrupt_return_\srr\()_user_restart)
  480. #endif
  481. .balign IFETCH_ALIGN_BYTES
  482. interrupt_return_\srr\()_kernel:
  483. _ASM_NOKPROBE_SYMBOL(interrupt_return_\srr\()_kernel)
  484. addi r3,r1,STACK_INT_FRAME_REGS
  485. bl CFUNC(interrupt_exit_kernel_prepare)
  486. std r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
  487. .Linterrupt_return_\srr\()_kernel_rst_start:
  488. ld r11,SOFTE(r1)
  489. cmpwi r11,IRQS_ENABLED
  490. stb r11,PACAIRQSOFTMASK(r13)
  491. beq .Linterrupt_return_\srr\()_soft_enabled
  492. /*
  493. * Returning to soft-disabled context.
  494. * Check if a MUST_HARD_MASK interrupt has become pending, in which
  495. * case we need to disable MSR[EE] in the return context.
  496. *
  497. * The MSR[EE] check catches among other things the short incoherency
  498. * in hard_irq_disable() between clearing MSR[EE] and setting
  499. * PACA_IRQ_HARD_DIS.
  500. */
  501. ld r12,_MSR(r1)
  502. andi. r10,r12,MSR_EE
  503. beq .Lfast_kernel_interrupt_return_\srr\() // EE already disabled
  504. lbz r11,PACAIRQHAPPENED(r13)
  505. andi. r10,r11,PACA_IRQ_MUST_HARD_MASK
  506. bne 1f // HARD_MASK is pending
  507. // No HARD_MASK pending, clear possible HARD_DIS set by interrupt
  508. andi. r11,r11,(~PACA_IRQ_HARD_DIS)@l
  509. stb r11,PACAIRQHAPPENED(r13)
  510. b .Lfast_kernel_interrupt_return_\srr\()
  511. 1: /* Must clear MSR_EE from _MSR */
  512. #ifdef CONFIG_PPC_BOOK3S
  513. li r10,0
  514. /* Clear valid before changing _MSR */
  515. .ifc \srr,srr
  516. stb r10,PACASRR_VALID(r13)
  517. .else
  518. stb r10,PACAHSRR_VALID(r13)
  519. .endif
  520. #endif
  521. xori r12,r12,MSR_EE
  522. std r12,_MSR(r1)
  523. b .Lfast_kernel_interrupt_return_\srr\()
  524. .Linterrupt_return_\srr\()_soft_enabled:
  525. /*
  526. * In the soft-enabled case, need to double-check that we have no
  527. * pending interrupts that might have come in before we reached the
  528. * restart section of code, and restart the exit so those can be
  529. * handled.
  530. *
  531. * If there are none, it is be possible that the interrupt still
  532. * has PACA_IRQ_HARD_DIS set, which needs to be cleared for the
  533. * interrupted context. This clear will not clobber a new pending
  534. * interrupt coming in, because we're in the restart section, so
  535. * such would return to the restart location.
  536. */
  537. #ifdef CONFIG_PPC_BOOK3S
  538. lbz r11,PACAIRQHAPPENED(r13)
  539. andi. r11,r11,(~PACA_IRQ_HARD_DIS)@l
  540. bne- interrupt_return_\srr\()_kernel_restart
  541. #endif
  542. li r11,0
  543. stb r11,PACAIRQHAPPENED(r13) // clear the possible HARD_DIS
  544. .Lfast_kernel_interrupt_return_\srr\():
  545. SANITIZE_RESTORE_NVGPRS()
  546. cmpdi cr1,r3,0
  547. #ifdef CONFIG_PPC_BOOK3S
  548. .ifc \srr,srr
  549. lbz r4,PACASRR_VALID(r13)
  550. .else
  551. lbz r4,PACAHSRR_VALID(r13)
  552. .endif
  553. cmpdi r4,0
  554. li r4,0
  555. bne 1f
  556. #endif
  557. ld r11,_NIP(r1)
  558. ld r12,_MSR(r1)
  559. .ifc \srr,srr
  560. mtspr SPRN_SRR0,r11
  561. mtspr SPRN_SRR1,r12
  562. 1:
  563. #ifdef CONFIG_PPC_BOOK3S
  564. stb r4,PACASRR_VALID(r13)
  565. #endif
  566. .else
  567. mtspr SPRN_HSRR0,r11
  568. mtspr SPRN_HSRR1,r12
  569. 1:
  570. #ifdef CONFIG_PPC_BOOK3S
  571. stb r4,PACAHSRR_VALID(r13)
  572. #endif
  573. .endif
  574. DEBUG_SRR_VALID \srr
  575. BEGIN_FTR_SECTION
  576. stdcx. r0,0,r1 /* to clear the reservation */
  577. FTR_SECTION_ELSE
  578. ldarx r0,0,r1
  579. ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
  580. ld r3,_LINK(r1)
  581. ld r4,_CTR(r1)
  582. ld r5,_XER(r1)
  583. ld r6,_CCR(r1)
  584. li r0,0
  585. REST_GPRS(7, 12, r1)
  586. mtlr r3
  587. mtctr r4
  588. mtspr SPRN_XER,r5
  589. /*
  590. * Leaving a stale STACK_FRAME_REGS_MARKER on the stack can confuse
  591. * the reliable stack unwinder later on. Clear it.
  592. */
  593. std r0,STACK_INT_FRAME_MARKER(r1)
  594. REST_GPRS(2, 5, r1)
  595. bne- cr1,1f /* emulate stack store */
  596. mtcr r6
  597. REST_GPR(6, r1)
  598. REST_GPR(0, r1)
  599. REST_GPR(1, r1)
  600. .ifc \srr,srr
  601. RFI_TO_KERNEL
  602. .else
  603. HRFI_TO_KERNEL
  604. .endif
  605. b . /* prevent speculative execution */
  606. 1: /*
  607. * Emulate stack store with update. New r1 value was already calculated
  608. * and updated in our interrupt regs by emulate_loadstore, but we can't
  609. * store the previous value of r1 to the stack before re-loading our
  610. * registers from it, otherwise they could be clobbered. Use
  611. * PACA_EXGEN as temporary storage to hold the store data, as
  612. * interrupts are disabled here so it won't be clobbered.
  613. */
  614. mtcr r6
  615. std r9,PACA_EXGEN+0(r13)
  616. addi r9,r1,INT_FRAME_SIZE /* get original r1 */
  617. REST_GPR(6, r1)
  618. REST_GPR(0, r1)
  619. REST_GPR(1, r1)
  620. std r9,0(r1) /* perform store component of stdu */
  621. ld r9,PACA_EXGEN+0(r13)
  622. .ifc \srr,srr
  623. RFI_TO_KERNEL
  624. .else
  625. HRFI_TO_KERNEL
  626. .endif
  627. b . /* prevent speculative execution */
  628. .Linterrupt_return_\srr\()_kernel_rst_end:
  629. #ifdef CONFIG_PPC_BOOK3S
  630. interrupt_return_\srr\()_kernel_restart:
  631. _ASM_NOKPROBE_SYMBOL(interrupt_return_\srr\()_kernel_restart)
  632. GET_PACA(r13)
  633. ld r1,PACA_EXIT_SAVE_R1(r13)
  634. LOAD_PACA_TOC()
  635. addi r3,r1,STACK_INT_FRAME_REGS
  636. li r11,IRQS_ALL_DISABLED
  637. stb r11,PACAIRQSOFTMASK(r13)
  638. bl CFUNC(interrupt_exit_kernel_restart)
  639. std r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
  640. b .Linterrupt_return_\srr\()_kernel_rst_start
  641. 1:
  642. SOFT_MASK_TABLE(.Linterrupt_return_\srr\()_kernel_rst_start, 1b)
  643. RESTART_TABLE(.Linterrupt_return_\srr\()_kernel_rst_start, .Linterrupt_return_\srr\()_kernel_rst_end, interrupt_return_\srr\()_kernel_restart)
  644. #endif
  645. .endm
  646. interrupt_return_macro srr
  647. #ifdef CONFIG_PPC_BOOK3S
  648. interrupt_return_macro hsrr
  649. .globl __end_soft_masked
  650. __end_soft_masked:
  651. DEFINE_FIXED_SYMBOL(__end_soft_masked, text)
  652. #endif /* CONFIG_PPC_BOOK3S */
  653. #ifdef CONFIG_PPC_BOOK3S
  654. _GLOBAL(ret_from_fork_scv)
  655. bl CFUNC(schedule_tail)
  656. HANDLER_RESTORE_NVGPRS()
  657. li r3,0 /* fork() return value */
  658. b .Lsyscall_vectored_common_exit
  659. #endif
  660. _GLOBAL(ret_from_fork)
  661. bl CFUNC(schedule_tail)
  662. HANDLER_RESTORE_NVGPRS()
  663. li r3,0 /* fork() return value */
  664. b .Lsyscall_exit
  665. _GLOBAL(ret_from_kernel_user_thread)
  666. bl CFUNC(schedule_tail)
  667. mtctr r14
  668. mr r3,r15
  669. #ifdef CONFIG_PPC64_ELF_ABI_V2
  670. mr r12,r14
  671. #endif
  672. bctrl
  673. li r3,0
  674. /*
  675. * It does not matter whether this returns via the scv or sc path
  676. * because it returns as execve() and therefore has no calling ABI
  677. * (i.e., it sets registers according to the exec()ed entry point).
  678. */
  679. b .Lsyscall_exit
  680. _GLOBAL(start_kernel_thread)
  681. bl CFUNC(schedule_tail)
  682. mtctr r14
  683. mr r3,r15
  684. #ifdef CONFIG_PPC64_ELF_ABI_V2
  685. mr r12,r14
  686. #endif
  687. bctrl
  688. /*
  689. * This must not return. We actually want to BUG here, not WARN,
  690. * because BUG will exit the process which is what the kernel thread
  691. * should have done, which may give some hope of continuing.
  692. */
  693. 100: trap
  694. EMIT_BUG_ENTRY 100b,__FILE__,__LINE__,0