head.S 9.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463
  1. /* SPDX-License-Identifier: GPL-2.0-only */
  2. /*
  3. * Copyright (C) 2012 Regents of the University of California
  4. */
  5. #include <asm/asm-offsets.h>
  6. #include <asm/asm.h>
  7. #include <linux/init.h>
  8. #include <linux/linkage.h>
  9. #include <asm/thread_info.h>
  10. #include <asm/page.h>
  11. #include <asm/pgtable.h>
  12. #include <asm/csr.h>
  13. #include <asm/hwcap.h>
  14. #include <asm/image.h>
  15. #include <asm/scs.h>
  16. #include <asm/xip_fixup.h>
  17. #include "efi-header.S"
  18. __HEAD
  19. SYM_CODE_START(_start)
  20. /*
  21. * Image header expected by Linux boot-loaders. The image header data
  22. * structure is described in asm/image.h.
  23. * Do not modify it without modifying the structure and all bootloaders
  24. * that expects this header format!!
  25. */
  26. #ifdef CONFIG_EFI
  27. /*
  28. * This instruction decodes to "MZ" ASCII required by UEFI.
  29. */
  30. c.li s4,-13
  31. j _start_kernel
  32. #else
  33. /* jump to start kernel */
  34. j _start_kernel
  35. /* reserved */
  36. .word 0
  37. #endif
  38. .balign 8
  39. #ifdef CONFIG_RISCV_M_MODE
  40. /* Image load offset (0MB) from start of RAM for M-mode */
  41. .dword 0
  42. #else
  43. #if __riscv_xlen == 64
  44. /* Image load offset(2MB) from start of RAM */
  45. .dword 0x200000
  46. #else
  47. /* Image load offset(4MB) from start of RAM */
  48. .dword 0x400000
  49. #endif
  50. #endif
  51. /* Effective size of kernel image */
  52. .dword _end - _start
  53. .dword __HEAD_FLAGS
  54. .word RISCV_HEADER_VERSION
  55. .word 0
  56. .dword 0
  57. .ascii RISCV_IMAGE_MAGIC
  58. .balign 4
  59. .ascii RISCV_IMAGE_MAGIC2
  60. #ifdef CONFIG_EFI
  61. .word pe_head_start - _start
  62. pe_head_start:
  63. __EFI_PE_HEADER
  64. #else
  65. .word 0
  66. #endif
  67. .align 2
  68. #ifdef CONFIG_MMU
  69. .global relocate_enable_mmu
  70. relocate_enable_mmu:
  71. /* Relocate return address */
  72. la a1, kernel_map
  73. XIP_FIXUP_OFFSET a1
  74. REG_L a1, KERNEL_MAP_VIRT_ADDR(a1)
  75. la a2, _start
  76. sub a1, a1, a2
  77. add ra, ra, a1
  78. /* Point stvec to virtual address of intruction after satp write */
  79. la a2, 1f
  80. add a2, a2, a1
  81. csrw CSR_TVEC, a2
  82. /* Compute satp for kernel page tables, but don't load it yet */
  83. srl a2, a0, PAGE_SHIFT
  84. la a1, satp_mode
  85. XIP_FIXUP_OFFSET a1
  86. REG_L a1, 0(a1)
  87. or a2, a2, a1
  88. /*
  89. * Load trampoline page directory, which will cause us to trap to
  90. * stvec if VA != PA, or simply fall through if VA == PA. We need a
  91. * full fence here because setup_vm() just wrote these PTEs and we need
  92. * to ensure the new translations are in use.
  93. */
  94. la a0, trampoline_pg_dir
  95. XIP_FIXUP_OFFSET a0
  96. srl a0, a0, PAGE_SHIFT
  97. or a0, a0, a1
  98. sfence.vma
  99. csrw CSR_SATP, a0
  100. .align 2
  101. 1:
  102. /* Set trap vector to spin forever to help debug */
  103. la a0, .Lsecondary_park
  104. csrw CSR_TVEC, a0
  105. /* Reload the global pointer */
  106. load_global_pointer
  107. /*
  108. * Switch to kernel page tables. A full fence is necessary in order to
  109. * avoid using the trampoline translations, which are only correct for
  110. * the first superpage. Fetching the fence is guaranteed to work
  111. * because that first superpage is translated the same way.
  112. */
  113. csrw CSR_SATP, a2
  114. sfence.vma
  115. ret
  116. #endif /* CONFIG_MMU */
  117. #ifdef CONFIG_SMP
  118. .global secondary_start_sbi
  119. secondary_start_sbi:
  120. /* Mask all interrupts */
  121. csrw CSR_IE, zero
  122. csrw CSR_IP, zero
  123. /* Load the global pointer */
  124. load_global_pointer
  125. /*
  126. * Disable FPU & VECTOR to detect illegal usage of
  127. * floating point or vector in kernel space
  128. */
  129. li t0, SR_FS_VS
  130. csrc CSR_STATUS, t0
  131. /* Set trap vector to spin forever to help debug */
  132. la a3, .Lsecondary_park
  133. csrw CSR_TVEC, a3
  134. /* a0 contains the hartid & a1 contains boot data */
  135. li a2, SBI_HART_BOOT_TASK_PTR_OFFSET
  136. XIP_FIXUP_OFFSET a2
  137. add a2, a2, a1
  138. REG_L tp, (a2)
  139. li a3, SBI_HART_BOOT_STACK_PTR_OFFSET
  140. XIP_FIXUP_OFFSET a3
  141. add a3, a3, a1
  142. REG_L sp, (a3)
  143. .Lsecondary_start_common:
  144. #ifdef CONFIG_MMU
  145. /* Enable virtual memory and relocate to virtual address */
  146. la a0, swapper_pg_dir
  147. XIP_FIXUP_OFFSET a0
  148. call relocate_enable_mmu
  149. #endif
  150. call .Lsetup_trap_vector
  151. scs_load_current
  152. call smp_callin
  153. #endif /* CONFIG_SMP */
  154. .align 2
  155. .Lsecondary_park:
  156. /*
  157. * Park this hart if we:
  158. * - have too many harts on CONFIG_RISCV_BOOT_SPINWAIT
  159. * - receive an early trap, before setup_trap_vector finished
  160. * - fail in smp_callin(), as a successful one wouldn't return
  161. */
  162. wfi
  163. j .Lsecondary_park
  164. .align 2
  165. .Lsetup_trap_vector:
  166. /* Set trap vector to exception handler */
  167. la a0, handle_exception
  168. csrw CSR_TVEC, a0
  169. /*
  170. * Set sup0 scratch register to 0, indicating to exception vector that
  171. * we are presently executing in kernel.
  172. */
  173. csrw CSR_SCRATCH, zero
  174. ret
  175. SYM_CODE_END(_start)
  176. SYM_CODE_START(_start_kernel)
  177. /* Mask all interrupts */
  178. csrw CSR_IE, zero
  179. csrw CSR_IP, zero
  180. #ifdef CONFIG_RISCV_M_MODE
  181. /* flush the instruction cache */
  182. fence.i
  183. /* Reset all registers except ra, a0, a1 */
  184. call reset_regs
  185. /*
  186. * Setup a PMP to permit access to all of memory. Some machines may
  187. * not implement PMPs, so we set up a quick trap handler to just skip
  188. * touching the PMPs on any trap.
  189. */
  190. la a0, .Lpmp_done
  191. csrw CSR_TVEC, a0
  192. li a0, -1
  193. csrw CSR_PMPADDR0, a0
  194. li a0, (PMP_A_NAPOT | PMP_R | PMP_W | PMP_X)
  195. csrw CSR_PMPCFG0, a0
  196. .align 2
  197. .Lpmp_done:
  198. /*
  199. * The hartid in a0 is expected later on, and we have no firmware
  200. * to hand it to us.
  201. */
  202. csrr a0, CSR_MHARTID
  203. #endif /* CONFIG_RISCV_M_MODE */
  204. /* Load the global pointer */
  205. load_global_pointer
  206. /*
  207. * Disable FPU & VECTOR to detect illegal usage of
  208. * floating point or vector in kernel space
  209. */
  210. li t0, SR_FS_VS
  211. csrc CSR_STATUS, t0
  212. #ifdef CONFIG_RISCV_BOOT_SPINWAIT
  213. li t0, CONFIG_NR_CPUS
  214. blt a0, t0, .Lgood_cores
  215. tail .Lsecondary_park
  216. .Lgood_cores:
  217. /* The lottery system is only required for spinwait booting method */
  218. #ifndef CONFIG_XIP_KERNEL
  219. /* Pick one hart to run the main boot sequence */
  220. la a3, hart_lottery
  221. li a2, 1
  222. amoadd.w a3, a2, (a3)
  223. bnez a3, .Lsecondary_start
  224. #else
  225. /* hart_lottery in flash contains a magic number */
  226. la a3, hart_lottery
  227. mv a2, a3
  228. XIP_FIXUP_OFFSET a2
  229. XIP_FIXUP_FLASH_OFFSET a3
  230. lw t1, (a3)
  231. amoswap.w t0, t1, (a2)
  232. /* first time here if hart_lottery in RAM is not set */
  233. beq t0, t1, .Lsecondary_start
  234. #endif /* CONFIG_XIP */
  235. #endif /* CONFIG_RISCV_BOOT_SPINWAIT */
  236. #ifdef CONFIG_XIP_KERNEL
  237. la sp, _end + THREAD_SIZE
  238. XIP_FIXUP_OFFSET sp
  239. mv s0, a0
  240. mv s1, a1
  241. call __copy_data
  242. /* Restore a0 & a1 copy */
  243. mv a0, s0
  244. mv a1, s1
  245. #endif
  246. #ifndef CONFIG_XIP_KERNEL
  247. /* Clear BSS for flat non-ELF images */
  248. la a3, __bss_start
  249. la a4, __bss_stop
  250. ble a4, a3, .Lclear_bss_done
  251. .Lclear_bss:
  252. REG_S zero, (a3)
  253. add a3, a3, RISCV_SZPTR
  254. blt a3, a4, .Lclear_bss
  255. .Lclear_bss_done:
  256. #endif
  257. la a2, boot_cpu_hartid
  258. XIP_FIXUP_OFFSET a2
  259. REG_S a0, (a2)
  260. /* Initialize page tables and relocate to virtual addresses */
  261. la tp, init_task
  262. la sp, init_thread_union + THREAD_SIZE
  263. XIP_FIXUP_OFFSET sp
  264. addi sp, sp, -PT_SIZE_ON_STACK
  265. scs_load_init_stack
  266. #ifdef CONFIG_BUILTIN_DTB
  267. la a0, __dtb_start
  268. XIP_FIXUP_OFFSET a0
  269. #else
  270. mv a0, a1
  271. #endif /* CONFIG_BUILTIN_DTB */
  272. /* Set trap vector to spin forever to help debug */
  273. la a3, .Lsecondary_park
  274. csrw CSR_TVEC, a3
  275. call setup_vm
  276. #ifdef CONFIG_MMU
  277. la a0, early_pg_dir
  278. XIP_FIXUP_OFFSET a0
  279. call relocate_enable_mmu
  280. #endif /* CONFIG_MMU */
  281. call .Lsetup_trap_vector
  282. /* Restore C environment */
  283. la tp, init_task
  284. la sp, init_thread_union + THREAD_SIZE
  285. addi sp, sp, -PT_SIZE_ON_STACK
  286. scs_load_current
  287. #ifdef CONFIG_KASAN
  288. call kasan_early_init
  289. #endif
  290. /* Start the kernel */
  291. call soc_early_init
  292. tail start_kernel
  293. #ifdef CONFIG_RISCV_BOOT_SPINWAIT
  294. .Lsecondary_start:
  295. /* Set trap vector to spin forever to help debug */
  296. la a3, .Lsecondary_park
  297. csrw CSR_TVEC, a3
  298. slli a3, a0, LGREG
  299. la a1, __cpu_spinwait_stack_pointer
  300. XIP_FIXUP_OFFSET a1
  301. la a2, __cpu_spinwait_task_pointer
  302. XIP_FIXUP_OFFSET a2
  303. add a1, a3, a1
  304. add a2, a3, a2
  305. /*
  306. * This hart didn't win the lottery, so we wait for the winning hart to
  307. * get far enough along the boot process that it should continue.
  308. */
  309. .Lwait_for_cpu_up:
  310. /* FIXME: We should WFI to save some energy here. */
  311. REG_L sp, (a1)
  312. REG_L tp, (a2)
  313. beqz sp, .Lwait_for_cpu_up
  314. beqz tp, .Lwait_for_cpu_up
  315. fence
  316. tail .Lsecondary_start_common
  317. #endif /* CONFIG_RISCV_BOOT_SPINWAIT */
  318. SYM_CODE_END(_start_kernel)
  319. #ifdef CONFIG_RISCV_M_MODE
  320. SYM_CODE_START_LOCAL(reset_regs)
  321. li sp, 0
  322. li gp, 0
  323. li tp, 0
  324. li t0, 0
  325. li t1, 0
  326. li t2, 0
  327. li s0, 0
  328. li s1, 0
  329. li a2, 0
  330. li a3, 0
  331. li a4, 0
  332. li a5, 0
  333. li a6, 0
  334. li a7, 0
  335. li s2, 0
  336. li s3, 0
  337. li s4, 0
  338. li s5, 0
  339. li s6, 0
  340. li s7, 0
  341. li s8, 0
  342. li s9, 0
  343. li s10, 0
  344. li s11, 0
  345. li t3, 0
  346. li t4, 0
  347. li t5, 0
  348. li t6, 0
  349. csrw CSR_SCRATCH, 0
  350. #ifdef CONFIG_FPU
  351. csrr t0, CSR_MISA
  352. andi t0, t0, (COMPAT_HWCAP_ISA_F | COMPAT_HWCAP_ISA_D)
  353. beqz t0, .Lreset_regs_done_fpu
  354. li t1, SR_FS
  355. csrs CSR_STATUS, t1
  356. fmv.s.x f0, zero
  357. fmv.s.x f1, zero
  358. fmv.s.x f2, zero
  359. fmv.s.x f3, zero
  360. fmv.s.x f4, zero
  361. fmv.s.x f5, zero
  362. fmv.s.x f6, zero
  363. fmv.s.x f7, zero
  364. fmv.s.x f8, zero
  365. fmv.s.x f9, zero
  366. fmv.s.x f10, zero
  367. fmv.s.x f11, zero
  368. fmv.s.x f12, zero
  369. fmv.s.x f13, zero
  370. fmv.s.x f14, zero
  371. fmv.s.x f15, zero
  372. fmv.s.x f16, zero
  373. fmv.s.x f17, zero
  374. fmv.s.x f18, zero
  375. fmv.s.x f19, zero
  376. fmv.s.x f20, zero
  377. fmv.s.x f21, zero
  378. fmv.s.x f22, zero
  379. fmv.s.x f23, zero
  380. fmv.s.x f24, zero
  381. fmv.s.x f25, zero
  382. fmv.s.x f26, zero
  383. fmv.s.x f27, zero
  384. fmv.s.x f28, zero
  385. fmv.s.x f29, zero
  386. fmv.s.x f30, zero
  387. fmv.s.x f31, zero
  388. csrw fcsr, 0
  389. /* note that the caller must clear SR_FS */
  390. .Lreset_regs_done_fpu:
  391. #endif /* CONFIG_FPU */
  392. #ifdef CONFIG_RISCV_ISA_V
  393. csrr t0, CSR_MISA
  394. li t1, COMPAT_HWCAP_ISA_V
  395. and t0, t0, t1
  396. beqz t0, .Lreset_regs_done_vector
  397. /*
  398. * Clear vector registers and reset vcsr
  399. * VLMAX has a defined value, VLEN is a constant,
  400. * and this form of vsetvli is defined to set vl to VLMAX.
  401. */
  402. li t1, SR_VS
  403. csrs CSR_STATUS, t1
  404. csrs CSR_VCSR, x0
  405. vsetvli t1, x0, e8, m8, ta, ma
  406. vmv.v.i v0, 0
  407. vmv.v.i v8, 0
  408. vmv.v.i v16, 0
  409. vmv.v.i v24, 0
  410. /* note that the caller must clear SR_VS */
  411. .Lreset_regs_done_vector:
  412. #endif /* CONFIG_RISCV_ISA_V */
  413. ret
  414. SYM_CODE_END(reset_regs)
  415. #endif /* CONFIG_RISCV_M_MODE */