entry.S 8.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. /*
  3. *
  4. * linux/arch/h8300/kernel/entry.S
  5. *
  6. * Yoshinori Sato <ysato@users.sourceforge.jp>
  7. * David McCullough <davidm@snapgear.com>
  8. *
  9. */
  10. /*
  11. * entry.S
  12. * include exception/interrupt gateway
  13. * system call entry
  14. */
  15. #include <linux/sys.h>
  16. #include <asm/unistd.h>
  17. #include <asm/setup.h>
  18. #include <asm/segment.h>
  19. #include <asm/linkage.h>
  20. #include <asm/asm-offsets.h>
  21. #include <asm/thread_info.h>
  22. #include <asm/errno.h>
  23. #if defined(CONFIG_CPU_H8300H)
  24. #define USERRET 8
  25. INTERRUPTS = 64
  26. .h8300h
  27. .macro SHLL2 reg
  28. shll.l \reg
  29. shll.l \reg
  30. .endm
  31. .macro SHLR2 reg
  32. shlr.l \reg
  33. shlr.l \reg
  34. .endm
  35. .macro SAVEREGS
  36. mov.l er0,@-sp
  37. mov.l er1,@-sp
  38. mov.l er2,@-sp
  39. mov.l er3,@-sp
  40. .endm
  41. .macro RESTOREREGS
  42. mov.l @sp+,er3
  43. mov.l @sp+,er2
  44. .endm
  45. .macro SAVEEXR
  46. .endm
  47. .macro RESTOREEXR
  48. .endm
  49. #endif
  50. #if defined(CONFIG_CPU_H8S)
  51. #define USERRET 10
  52. #define USEREXR 8
  53. INTERRUPTS = 128
  54. .h8300s
  55. .macro SHLL2 reg
  56. shll.l #2,\reg
  57. .endm
  58. .macro SHLR2 reg
  59. shlr.l #2,\reg
  60. .endm
  61. .macro SAVEREGS
  62. stm.l er0-er3,@-sp
  63. .endm
  64. .macro RESTOREREGS
  65. ldm.l @sp+,er2-er3
  66. .endm
  67. .macro SAVEEXR
  68. mov.w @(USEREXR:16,er0),r1
  69. mov.w r1,@(LEXR-LER3:16,sp) /* copy EXR */
  70. .endm
  71. .macro RESTOREEXR
  72. mov.w @(LEXR-LER1:16,sp),r1 /* restore EXR */
  73. mov.b r1l,r1h
  74. mov.w r1,@(USEREXR:16,er0)
  75. .endm
  76. #endif
  77. /* CPU context save/restore macros. */
  78. .macro SAVE_ALL
  79. mov.l er0,@-sp
  80. stc ccr,r0l /* check kernel mode */
  81. btst #4,r0l
  82. bne 5f
  83. /* user mode */
  84. mov.l sp,@_sw_usp
  85. mov.l @sp,er0 /* restore saved er0 */
  86. orc #0x10,ccr /* switch kernel stack */
  87. mov.l @_sw_ksp,sp
  88. sub.l #(LRET-LORIG),sp /* allocate LORIG - LRET */
  89. SAVEREGS
  90. mov.l @_sw_usp,er0
  91. mov.l @(USERRET:16,er0),er1 /* copy the RET addr */
  92. mov.l er1,@(LRET-LER3:16,sp)
  93. SAVEEXR
  94. mov.l @(LORIG-LER3:16,sp),er0
  95. mov.l er0,@(LER0-LER3:16,sp) /* copy ER0 */
  96. mov.w e1,r1 /* e1 highbyte = ccr */
  97. and #0xef,r1h /* mask mode? flag */
  98. bra 6f
  99. 5:
  100. /* kernel mode */
  101. mov.l @sp,er0 /* restore saved er0 */
  102. subs #2,sp /* set dummy ccr */
  103. subs #4,sp /* set dummp sp */
  104. SAVEREGS
  105. mov.w @(LRET-LER3:16,sp),r1 /* copy old ccr */
  106. 6:
  107. mov.b r1h,r1l
  108. mov.b #0,r1h
  109. mov.w r1,@(LCCR-LER3:16,sp) /* set ccr */
  110. mov.l @_sw_usp,er2
  111. mov.l er2,@(LSP-LER3:16,sp) /* set usp */
  112. mov.l er6,@-sp /* syscall arg #6 */
  113. mov.l er5,@-sp /* syscall arg #5 */
  114. mov.l er4,@-sp /* syscall arg #4 */
  115. .endm /* r1 = ccr */
  116. .macro RESTORE_ALL
  117. mov.l @sp+,er4
  118. mov.l @sp+,er5
  119. mov.l @sp+,er6
  120. RESTOREREGS
  121. mov.w @(LCCR-LER1:16,sp),r0 /* check kernel mode */
  122. btst #4,r0l
  123. bne 7f
  124. orc #0xc0,ccr
  125. mov.l @(LSP-LER1:16,sp),er0
  126. mov.l @(LER0-LER1:16,sp),er1 /* restore ER0 */
  127. mov.l er1,@er0
  128. RESTOREEXR
  129. mov.w @(LCCR-LER1:16,sp),r1 /* restore the RET addr */
  130. mov.b r1l,r1h
  131. mov.b @(LRET+1-LER1:16,sp),r1l
  132. mov.w r1,e1
  133. mov.w @(LRET+2-LER1:16,sp),r1
  134. mov.l er1,@(USERRET:16,er0)
  135. mov.l @sp+,er1
  136. add.l #(LRET-LER1),sp /* remove LORIG - LRET */
  137. mov.l sp,@_sw_ksp
  138. andc #0xef,ccr /* switch to user mode */
  139. mov.l er0,sp
  140. bra 8f
  141. 7:
  142. mov.l @sp+,er1
  143. add.l #10,sp
  144. 8:
  145. mov.l @sp+,er0
  146. adds #4,sp /* remove the sw created LVEC */
  147. rte
  148. .endm
  149. .globl _system_call
  150. .globl ret_from_exception
  151. .globl ret_from_fork
  152. .globl ret_from_kernel_thread
  153. .globl ret_from_interrupt
  154. .globl _interrupt_redirect_table
  155. .globl _sw_ksp,_sw_usp
  156. .globl _resume
  157. .globl _interrupt_entry
  158. .globl _trace_break
  159. .globl _nmi
  160. #if defined(CONFIG_ROMKERNEL)
  161. .section .int_redirect,"ax"
  162. _interrupt_redirect_table:
  163. #if defined(CONFIG_CPU_H8300H)
  164. .rept 7
  165. .long 0
  166. .endr
  167. #endif
  168. #if defined(CONFIG_CPU_H8S)
  169. .rept 5
  170. .long 0
  171. .endr
  172. jmp @_trace_break
  173. .long 0
  174. #endif
  175. jsr @_interrupt_entry /* NMI */
  176. jmp @_system_call /* TRAPA #0 (System call) */
  177. .long 0
  178. #if defined(CONFIG_KGDB)
  179. jmp @_kgdb_trap
  180. #else
  181. .long 0
  182. #endif
  183. jmp @_trace_break /* TRAPA #3 (breakpoint) */
  184. .rept INTERRUPTS-12
  185. jsr @_interrupt_entry
  186. .endr
  187. #endif
  188. #if defined(CONFIG_RAMKERNEL)
  189. .globl _interrupt_redirect_table
  190. .section .bss
  191. _interrupt_redirect_table:
  192. .space 4
  193. #endif
  194. .section .text
  195. .align 2
  196. _interrupt_entry:
  197. SAVE_ALL
  198. /* r1l is saved ccr */
  199. mov.l sp,er0
  200. add.l #LVEC,er0
  201. btst #4,r1l
  202. bne 1f
  203. /* user LVEC */
  204. mov.l @_sw_usp,er0
  205. adds #4,er0
  206. 1:
  207. mov.l @er0,er0 /* LVEC address */
  208. #if defined(CONFIG_ROMKERNEL)
  209. sub.l #_interrupt_redirect_table,er0
  210. #endif
  211. #if defined(CONFIG_RAMKERNEL)
  212. mov.l @_interrupt_redirect_table,er1
  213. sub.l er1,er0
  214. #endif
  215. SHLR2 er0
  216. dec.l #1,er0
  217. mov.l sp,er1
  218. subs #4,er1 /* adjust ret_pc */
  219. #if defined(CONFIG_CPU_H8S)
  220. orc #7,exr
  221. #endif
  222. jsr @do_IRQ
  223. jmp @ret_from_interrupt
  224. _system_call:
  225. subs #4,sp /* dummy LVEC */
  226. SAVE_ALL
  227. /* er0: syscall nr */
  228. andc #0xbf,ccr
  229. mov.l er0,er4
  230. /* save top of frame */
  231. mov.l sp,er0
  232. jsr @set_esp0
  233. andc #0x3f,ccr
  234. mov.l sp,er2
  235. and.w #0xe000,r2
  236. mov.l @(TI_FLAGS:16,er2),er2
  237. and.w #_TIF_WORK_SYSCALL_MASK,r2
  238. beq 1f
  239. mov.l sp,er0
  240. jsr @do_syscall_trace_enter
  241. 1:
  242. cmp.l #__NR_syscalls,er4
  243. bcc badsys
  244. SHLL2 er4
  245. mov.l #_sys_call_table,er0
  246. add.l er4,er0
  247. mov.l @er0,er4
  248. beq ret_from_exception:16
  249. mov.l @(LER1:16,sp),er0
  250. mov.l @(LER2:16,sp),er1
  251. mov.l @(LER3:16,sp),er2
  252. jsr @er4
  253. mov.l er0,@(LER0:16,sp) /* save the return value */
  254. mov.l sp,er2
  255. and.w #0xe000,r2
  256. mov.l @(TI_FLAGS:16,er2),er2
  257. and.w #_TIF_WORK_SYSCALL_MASK,r2
  258. beq 2f
  259. mov.l sp,er0
  260. jsr @do_syscall_trace_leave
  261. 2:
  262. orc #0xc0,ccr
  263. bra resume_userspace
  264. badsys:
  265. mov.l #-ENOSYS,er0
  266. mov.l er0,@(LER0:16,sp)
  267. bra resume_userspace
  268. #if !defined(CONFIG_PREEMPT)
  269. #define resume_kernel restore_all
  270. #endif
  271. ret_from_exception:
  272. #if defined(CONFIG_PREEMPT)
  273. orc #0xc0,ccr
  274. #endif
  275. ret_from_interrupt:
  276. mov.b @(LCCR+1:16,sp),r0l
  277. btst #4,r0l
  278. bne resume_kernel:16 /* return from kernel */
  279. resume_userspace:
  280. andc #0xbf,ccr
  281. mov.l sp,er4
  282. and.w #0xe000,r4 /* er4 <- current thread info */
  283. mov.l @(TI_FLAGS:16,er4),er1
  284. and.l #_TIF_WORK_MASK,er1
  285. beq restore_all:8
  286. work_pending:
  287. btst #TIF_NEED_RESCHED,r1l
  288. bne work_resched:8
  289. /* work notifysig */
  290. mov.l sp,er0
  291. subs #4,er0 /* er0: pt_regs */
  292. jsr @do_notify_resume
  293. bra resume_userspace:8
  294. work_resched:
  295. mov.l sp,er0
  296. jsr @set_esp0
  297. jsr @schedule
  298. bra resume_userspace:8
  299. restore_all:
  300. RESTORE_ALL /* Does RTE */
  301. #if defined(CONFIG_PREEMPT)
  302. resume_kernel:
  303. mov.l @(TI_PRE_COUNT:16,er4),er0
  304. bne restore_all:8
  305. need_resched:
  306. mov.l @(TI_FLAGS:16,er4),er0
  307. btst #TIF_NEED_RESCHED,r0l
  308. beq restore_all:8
  309. mov.b @(LCCR+1:16,sp),r0l /* Interrupt Enabled? */
  310. bmi restore_all:8
  311. mov.l sp,er0
  312. jsr @set_esp0
  313. jsr @preempt_schedule_irq
  314. bra need_resched:8
  315. #endif
  316. ret_from_fork:
  317. mov.l er2,er0
  318. jsr @schedule_tail
  319. jmp @ret_from_exception
  320. ret_from_kernel_thread:
  321. mov.l er2,er0
  322. jsr @schedule_tail
  323. mov.l @(LER4:16,sp),er0
  324. mov.l @(LER5:16,sp),er1
  325. jsr @er1
  326. jmp @ret_from_exception
  327. _resume:
  328. /*
  329. * Beware - when entering resume, offset of tss is in d1,
  330. * prev (the current task) is in a0, next (the new task)
  331. * is in a1 and d2.b is non-zero if the mm structure is
  332. * shared between the tasks, so don't change these
  333. * registers until their contents are no longer needed.
  334. */
  335. /* save sr */
  336. sub.w r3,r3
  337. stc ccr,r3l
  338. mov.w r3,@(THREAD_CCR+2:16,er0)
  339. /* disable interrupts */
  340. orc #0xc0,ccr
  341. mov.l @_sw_usp,er3
  342. mov.l er3,@(THREAD_USP:16,er0)
  343. mov.l sp,@(THREAD_KSP:16,er0)
  344. /* Skip address space switching if they are the same. */
  345. /* FIXME: what did we hack out of here, this does nothing! */
  346. mov.l @(THREAD_USP:16,er1),er0
  347. mov.l er0,@_sw_usp
  348. mov.l @(THREAD_KSP:16,er1),sp
  349. /* restore status register */
  350. mov.w @(THREAD_CCR+2:16,er1),r3
  351. ldc r3l,ccr
  352. rts
  353. _trace_break:
  354. subs #4,sp
  355. SAVE_ALL
  356. sub.l er1,er1
  357. dec.l #1,er1
  358. mov.l er1,@(LORIG,sp)
  359. mov.l sp,er0
  360. jsr @set_esp0
  361. mov.l @_sw_usp,er0
  362. mov.l @er0,er1
  363. mov.w @(-2:16,er1),r2
  364. cmp.w #0x5730,r2
  365. beq 1f
  366. subs #2,er1
  367. mov.l er1,@er0
  368. 1:
  369. and.w #0xff,e1
  370. mov.l er1,er0
  371. jsr @trace_trap
  372. jmp @ret_from_exception
  373. _nmi:
  374. subs #4, sp
  375. mov.l er0, @-sp
  376. mov.l @_interrupt_redirect_table, er0
  377. add.l #8*4, er0
  378. mov.l er0, @(4,sp)
  379. mov.l @sp+, er0
  380. jmp @_interrupt_entry
  381. #if defined(CONFIG_KGDB)
  382. _kgdb_trap:
  383. subs #4,sp
  384. SAVE_ALL
  385. mov.l sp,er0
  386. add.l #LRET,er0
  387. mov.l er0,@(LSP,sp)
  388. jsr @set_esp0
  389. mov.l sp,er0
  390. subs #4,er0
  391. jsr @h8300_kgdb_trap
  392. jmp @ret_from_exception
  393. #endif
  394. .section .bss
  395. _sw_ksp:
  396. .space 4
  397. _sw_usp:
  398. .space 4
  399. .end