lowlevel.S 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573
  1. /* SPDX-License-Identifier: GPL-2.0+ */
  2. /*
  3. * (C) Copyright 2014-2015 Freescale Semiconductor
  4. *
  5. * Extracted from armv8/start.S
  6. */
  7. #include <config.h>
  8. #include <linux/linkage.h>
  9. #include <asm/gic.h>
  10. #include <asm/macro.h>
  11. #include <asm/arch-fsl-layerscape/soc.h>
  12. #ifdef CONFIG_MP
  13. #include <asm/arch/mp.h>
  14. #endif
  15. #ifdef CONFIG_FSL_LSCH3
  16. #include <asm/arch-fsl-layerscape/immap_lsch3.h>
  17. #endif
  18. #include <asm/u-boot.h>
  19. /* Get GIC offset
  20. * For LS1043a rev1.0, GIC base address align with 4k.
  21. * For LS1043a rev1.1, if DCFG_GIC400_ALIGN[GIC_ADDR_BIT]
  22. * is set, GIC base address align with 4K, or else align
  23. * with 64k.
  24. * output:
  25. * x0: the base address of GICD
  26. * x1: the base address of GICC
  27. */
  28. ENTRY(get_gic_offset)
  29. ldr x0, =GICD_BASE
  30. #ifdef CONFIG_GICV2
  31. ldr x1, =GICC_BASE
  32. #endif
  33. #ifdef CONFIG_HAS_FEATURE_GIC64K_ALIGN
  34. ldr x2, =DCFG_CCSR_SVR
  35. ldr w2, [x2]
  36. rev w2, w2
  37. lsr w3, w2, #16
  38. ldr w4, =SVR_DEV(SVR_LS1043A)
  39. cmp w3, w4
  40. b.ne 1f
  41. ands w2, w2, #0xff
  42. cmp w2, #REV1_0
  43. b.eq 1f
  44. ldr x2, =SCFG_GIC400_ALIGN
  45. ldr w2, [x2]
  46. rev w2, w2
  47. tbnz w2, #GIC_ADDR_BIT, 1f
  48. ldr x0, =GICD_BASE_64K
  49. #ifdef CONFIG_GICV2
  50. ldr x1, =GICC_BASE_64K
  51. #endif
  52. 1:
  53. #endif
  54. ret
  55. ENDPROC(get_gic_offset)
  56. ENTRY(smp_kick_all_cpus)
  57. /* Kick secondary cpus up by SGI 0 interrupt */
  58. #if defined(CONFIG_GICV2) || defined(CONFIG_GICV3)
  59. mov x29, lr /* Save LR */
  60. bl get_gic_offset
  61. bl gic_kick_secondary_cpus
  62. mov lr, x29 /* Restore LR */
  63. #endif
  64. ret
  65. ENDPROC(smp_kick_all_cpus)
  66. ENTRY(lowlevel_init)
  67. mov x29, lr /* Save LR */
  68. switch_el x1, 1f, 100f, 100f /* skip if not in EL3 */
  69. 1:
  70. #if defined (CONFIG_SYS_FSL_HAS_CCN504)
  71. /* Set Wuo bit for RN-I 20 */
  72. #ifdef CONFIG_ARCH_LS2080A
  73. ldr x0, =CCI_AUX_CONTROL_BASE(20)
  74. ldr x1, =0x00000010
  75. bl ccn504_set_aux
  76. /*
  77. * Set forced-order mode in RNI-6, RNI-20
  78. * This is required for performance optimization on LS2088A
  79. * LS2080A family does not support setting forced-order mode,
  80. * so skip this operation for LS2080A family
  81. */
  82. bl get_svr
  83. lsr w0, w0, #16
  84. ldr w1, =SVR_DEV(SVR_LS2080A)
  85. cmp w0, w1
  86. b.eq 1f
  87. ldr x0, =CCI_AUX_CONTROL_BASE(6)
  88. ldr x1, =0x00000020
  89. bl ccn504_set_aux
  90. ldr x0, =CCI_AUX_CONTROL_BASE(20)
  91. ldr x1, =0x00000020
  92. bl ccn504_set_aux
  93. 1:
  94. #endif
  95. /* Add fully-coherent masters to DVM domain */
  96. ldr x0, =CCI_MN_BASE
  97. ldr x1, =CCI_MN_RNF_NODEID_LIST
  98. ldr x2, =CCI_MN_DVM_DOMAIN_CTL_SET
  99. bl ccn504_add_masters_to_dvm
  100. /* Set all RN-I ports to QoS of 15 */
  101. ldr x0, =CCI_S0_QOS_CONTROL_BASE(0)
  102. ldr x1, =0x00FF000C
  103. bl ccn504_set_qos
  104. ldr x0, =CCI_S1_QOS_CONTROL_BASE(0)
  105. ldr x1, =0x00FF000C
  106. bl ccn504_set_qos
  107. ldr x0, =CCI_S2_QOS_CONTROL_BASE(0)
  108. ldr x1, =0x00FF000C
  109. bl ccn504_set_qos
  110. ldr x0, =CCI_S0_QOS_CONTROL_BASE(2)
  111. ldr x1, =0x00FF000C
  112. bl ccn504_set_qos
  113. ldr x0, =CCI_S1_QOS_CONTROL_BASE(2)
  114. ldr x1, =0x00FF000C
  115. bl ccn504_set_qos
  116. ldr x0, =CCI_S2_QOS_CONTROL_BASE(2)
  117. ldr x1, =0x00FF000C
  118. bl ccn504_set_qos
  119. ldr x0, =CCI_S0_QOS_CONTROL_BASE(6)
  120. ldr x1, =0x00FF000C
  121. bl ccn504_set_qos
  122. ldr x0, =CCI_S1_QOS_CONTROL_BASE(6)
  123. ldr x1, =0x00FF000C
  124. bl ccn504_set_qos
  125. ldr x0, =CCI_S2_QOS_CONTROL_BASE(6)
  126. ldr x1, =0x00FF000C
  127. bl ccn504_set_qos
  128. ldr x0, =CCI_S0_QOS_CONTROL_BASE(12)
  129. ldr x1, =0x00FF000C
  130. bl ccn504_set_qos
  131. ldr x0, =CCI_S1_QOS_CONTROL_BASE(12)
  132. ldr x1, =0x00FF000C
  133. bl ccn504_set_qos
  134. ldr x0, =CCI_S2_QOS_CONTROL_BASE(12)
  135. ldr x1, =0x00FF000C
  136. bl ccn504_set_qos
  137. ldr x0, =CCI_S0_QOS_CONTROL_BASE(16)
  138. ldr x1, =0x00FF000C
  139. bl ccn504_set_qos
  140. ldr x0, =CCI_S1_QOS_CONTROL_BASE(16)
  141. ldr x1, =0x00FF000C
  142. bl ccn504_set_qos
  143. ldr x0, =CCI_S2_QOS_CONTROL_BASE(16)
  144. ldr x1, =0x00FF000C
  145. bl ccn504_set_qos
  146. ldr x0, =CCI_S0_QOS_CONTROL_BASE(20)
  147. ldr x1, =0x00FF000C
  148. bl ccn504_set_qos
  149. ldr x0, =CCI_S1_QOS_CONTROL_BASE(20)
  150. ldr x1, =0x00FF000C
  151. bl ccn504_set_qos
  152. ldr x0, =CCI_S2_QOS_CONTROL_BASE(20)
  153. ldr x1, =0x00FF000C
  154. bl ccn504_set_qos
  155. #endif /* CONFIG_SYS_FSL_HAS_CCN504 */
  156. #ifdef SMMU_BASE
  157. /* Set the SMMU page size in the sACR register */
  158. ldr x1, =SMMU_BASE
  159. ldr w0, [x1, #0x10]
  160. orr w0, w0, #1 << 16 /* set sACR.pagesize to indicate 64K page */
  161. str w0, [x1, #0x10]
  162. #endif
  163. /* Initialize GIC Secure Bank Status */
  164. #if defined(CONFIG_GICV2) || defined(CONFIG_GICV3)
  165. branch_if_slave x0, 1f
  166. bl get_gic_offset
  167. bl gic_init_secure
  168. 1:
  169. #ifdef CONFIG_GICV3
  170. ldr x0, =GICR_BASE
  171. bl gic_init_secure_percpu
  172. #elif defined(CONFIG_GICV2)
  173. bl get_gic_offset
  174. bl gic_init_secure_percpu
  175. #endif
  176. #endif
  177. 100:
  178. branch_if_master x0, x1, 2f
  179. #if defined(CONFIG_MP) && defined(CONFIG_ARMV8_MULTIENTRY)
  180. ldr x0, =secondary_boot_func
  181. blr x0
  182. #endif
  183. 2:
  184. switch_el x1, 1f, 100f, 100f /* skip if not in EL3 */
  185. 1:
  186. #ifdef CONFIG_FSL_TZPC_BP147
  187. /* Set Non Secure access for all devices protected via TZPC */
  188. ldr x1, =TZPCDECPROT_0_SET_BASE /* Decode Protection-0 Set Reg */
  189. orr w0, w0, #1 << 3 /* DCFG_RESET is accessible from NS world */
  190. str w0, [x1]
  191. isb
  192. dsb sy
  193. #endif
  194. #ifdef CONFIG_FSL_TZASC_400
  195. /*
  196. * LS2080 and its personalities does not support TZASC
  197. * So skip TZASC related operations
  198. */
  199. bl get_svr
  200. lsr w0, w0, #16
  201. ldr w1, =SVR_DEV(SVR_LS2080A)
  202. cmp w0, w1
  203. b.eq 1f
  204. /* Set TZASC so that:
  205. * a. We use only Region0 whose global secure write/read is EN
  206. * b. We use only Region0 whose NSAID write/read is EN
  207. *
  208. * NOTE: As per the CCSR map doc, TZASC 3 and TZASC 4 are just
  209. * placeholders.
  210. */
  211. .macro tzasc_prog, xreg
  212. mov x12, TZASC1_BASE
  213. mov x16, #0x10000
  214. mul x14, \xreg, x16
  215. add x14, x14,x12
  216. mov x1, #0x8
  217. add x1, x1, x14
  218. ldr w0, [x1] /* Filter 0 Gate Keeper Register */
  219. orr w0, w0, #1 << 0 /* Set open_request for Filter 0 */
  220. str w0, [x1]
  221. mov x1, #0x110
  222. add x1, x1, x14
  223. ldr w0, [x1] /* Region-0 Attributes Register */
  224. orr w0, w0, #1 << 31 /* Set Sec global write en, Bit[31] */
  225. orr w0, w0, #1 << 30 /* Set Sec global read en, Bit[30] */
  226. str w0, [x1]
  227. mov x1, #0x114
  228. add x1, x1, x14
  229. ldr w0, [x1] /* Region-0 Access Register */
  230. mov w0, #0xFFFFFFFF /* Set nsaid_wr_en and nsaid_rd_en */
  231. str w0, [x1]
  232. .endm
  233. #ifdef CONFIG_FSL_TZASC_1
  234. mov x13, #0
  235. tzasc_prog x13
  236. #endif
  237. #ifdef CONFIG_FSL_TZASC_2
  238. mov x13, #1
  239. tzasc_prog x13
  240. #endif
  241. isb
  242. dsb sy
  243. #endif
  244. 100:
  245. 1:
  246. #ifdef CONFIG_ARCH_LS1046A
  247. switch_el x1, 1f, 100f, 100f /* skip if not in EL3 */
  248. 1:
  249. /* Initialize the L2 RAM latency */
  250. mrs x1, S3_1_c11_c0_2
  251. mov x0, #0x1C7
  252. /* Clear L2 Tag RAM latency and L2 Data RAM latency */
  253. bic x1, x1, x0
  254. /* Set L2 data ram latency bits [2:0] */
  255. orr x1, x1, #0x2
  256. /* set L2 tag ram latency bits [8:6] */
  257. orr x1, x1, #0x80
  258. msr S3_1_c11_c0_2, x1
  259. isb
  260. 100:
  261. #endif
  262. #if defined(CONFIG_FSL_LSCH2) && !defined(CONFIG_SPL_BUILD)
  263. bl fsl_ocram_init
  264. #endif
  265. mov lr, x29 /* Restore LR */
  266. ret
  267. ENDPROC(lowlevel_init)
  268. #if defined(CONFIG_FSL_LSCH2) && !defined(CONFIG_SPL_BUILD)
  269. ENTRY(fsl_ocram_init)
  270. mov x28, lr /* Save LR */
  271. bl fsl_clear_ocram
  272. bl fsl_ocram_clear_ecc_err
  273. mov lr, x28 /* Restore LR */
  274. ret
  275. ENDPROC(fsl_ocram_init)
  276. ENTRY(fsl_clear_ocram)
  277. /* Clear OCRAM */
  278. ldr x0, =CONFIG_SYS_FSL_OCRAM_BASE
  279. ldr x1, =(CONFIG_SYS_FSL_OCRAM_BASE + CONFIG_SYS_FSL_OCRAM_SIZE)
  280. mov x2, #0
  281. clear_loop:
  282. str x2, [x0]
  283. add x0, x0, #8
  284. cmp x0, x1
  285. b.lo clear_loop
  286. ret
  287. ENDPROC(fsl_clear_ocram)
  288. ENTRY(fsl_ocram_clear_ecc_err)
  289. /* OCRAM1/2 ECC status bit */
  290. mov w1, #0x60
  291. ldr x0, =DCSR_DCFG_SBEESR2
  292. str w1, [x0]
  293. ldr x0, =DCSR_DCFG_MBEESR2
  294. str w1, [x0]
  295. ret
  296. ENDPROC(fsl_ocram_init)
  297. #endif
  298. #ifdef CONFIG_FSL_LSCH3
  299. .globl get_svr
  300. get_svr:
  301. ldr x1, =FSL_LSCH3_SVR
  302. ldr w0, [x1]
  303. ret
  304. #endif
  305. #ifdef CONFIG_SYS_FSL_HAS_CCN504
  306. hnf_pstate_poll:
  307. /* x0 has the desired status, return 0 for success, 1 for timeout
  308. * clobber x1, x2, x3, x4, x6, x7
  309. */
  310. mov x1, x0
  311. mov x7, #0 /* flag for timeout */
  312. mrs x3, cntpct_el0 /* read timer */
  313. add x3, x3, #1200 /* timeout after 100 microseconds */
  314. mov x0, #0x18
  315. movk x0, #0x420, lsl #16 /* HNF0_PSTATE_STATUS */
  316. mov w6, #8 /* HN-F node count */
  317. 1:
  318. ldr x2, [x0]
  319. cmp x2, x1 /* check status */
  320. b.eq 2f
  321. mrs x4, cntpct_el0
  322. cmp x4, x3
  323. b.ls 1b
  324. mov x7, #1 /* timeout */
  325. b 3f
  326. 2:
  327. add x0, x0, #0x10000 /* move to next node */
  328. subs w6, w6, #1
  329. cbnz w6, 1b
  330. 3:
  331. mov x0, x7
  332. ret
  333. hnf_set_pstate:
  334. /* x0 has the desired state, clobber x1, x2, x6 */
  335. mov x1, x0
  336. /* power state to SFONLY */
  337. mov w6, #8 /* HN-F node count */
  338. mov x0, #0x10
  339. movk x0, #0x420, lsl #16 /* HNF0_PSTATE_REQ */
  340. 1: /* set pstate to sfonly */
  341. ldr x2, [x0]
  342. and x2, x2, #0xfffffffffffffffc /* & HNFPSTAT_MASK */
  343. orr x2, x2, x1
  344. str x2, [x0]
  345. add x0, x0, #0x10000 /* move to next node */
  346. subs w6, w6, #1
  347. cbnz w6, 1b
  348. ret
  349. ENTRY(__asm_flush_l3_dcache)
  350. /*
  351. * Return status in x0
  352. * success 0
  353. * timeout 1 for setting SFONLY, 2 for FAM, 3 for both
  354. */
  355. mov x29, lr
  356. mov x8, #0
  357. dsb sy
  358. mov x0, #0x1 /* HNFPSTAT_SFONLY */
  359. bl hnf_set_pstate
  360. mov x0, #0x4 /* SFONLY status */
  361. bl hnf_pstate_poll
  362. cbz x0, 1f
  363. mov x8, #1 /* timeout */
  364. 1:
  365. dsb sy
  366. mov x0, #0x3 /* HNFPSTAT_FAM */
  367. bl hnf_set_pstate
  368. mov x0, #0xc /* FAM status */
  369. bl hnf_pstate_poll
  370. cbz x0, 1f
  371. add x8, x8, #0x2
  372. 1:
  373. mov x0, x8
  374. mov lr, x29
  375. ret
  376. ENDPROC(__asm_flush_l3_dcache)
  377. #endif /* CONFIG_SYS_FSL_HAS_CCN504 */
  378. #ifdef CONFIG_MP
  379. /* Keep literals not used by the secondary boot code outside it */
  380. .ltorg
  381. /* Using 64 bit alignment since the spin table is accessed as data */
  382. .align 4
  383. .global secondary_boot_code
  384. /* Secondary Boot Code starts here */
  385. secondary_boot_code:
  386. .global __spin_table
  387. __spin_table:
  388. .space CONFIG_MAX_CPUS*SPIN_TABLE_ELEM_SIZE
  389. .align 2
  390. ENTRY(secondary_boot_func)
  391. /*
  392. * MPIDR_EL1 Fields:
  393. * MPIDR[1:0] = AFF0_CPUID <- Core ID (0,1)
  394. * MPIDR[7:2] = AFF0_RES
  395. * MPIDR[15:8] = AFF1_CLUSTERID <- Cluster ID (0,1,2,3)
  396. * MPIDR[23:16] = AFF2_CLUSTERID
  397. * MPIDR[24] = MT
  398. * MPIDR[29:25] = RES0
  399. * MPIDR[30] = U
  400. * MPIDR[31] = ME
  401. * MPIDR[39:32] = AFF3
  402. *
  403. * Linear Processor ID (LPID) calculation from MPIDR_EL1:
  404. * (We only use AFF0_CPUID and AFF1_CLUSTERID for now
  405. * until AFF2_CLUSTERID and AFF3 have non-zero values)
  406. *
  407. * LPID = MPIDR[15:8] | MPIDR[1:0]
  408. */
  409. mrs x0, mpidr_el1
  410. ubfm x1, x0, #8, #15
  411. ubfm x2, x0, #0, #1
  412. orr x10, x2, x1, lsl #2 /* x10 has LPID */
  413. ubfm x9, x0, #0, #15 /* x9 contains MPIDR[15:0] */
  414. /*
  415. * offset of the spin table element for this core from start of spin
  416. * table (each elem is padded to 64 bytes)
  417. */
  418. lsl x1, x10, #6
  419. ldr x0, =__spin_table
  420. /* physical address of this cpus spin table element */
  421. add x11, x1, x0
  422. ldr x0, =__real_cntfrq
  423. ldr x0, [x0]
  424. msr cntfrq_el0, x0 /* set with real frequency */
  425. str x9, [x11, #16] /* LPID */
  426. mov x4, #1
  427. str x4, [x11, #8] /* STATUS */
  428. dsb sy
  429. #if defined(CONFIG_GICV3)
  430. gic_wait_for_interrupt_m x0
  431. #elif defined(CONFIG_GICV2)
  432. bl get_gic_offset
  433. mov x0, x1
  434. gic_wait_for_interrupt_m x0, w1
  435. #endif
  436. slave_cpu:
  437. wfe
  438. ldr x0, [x11]
  439. cbz x0, slave_cpu
  440. #ifndef CONFIG_ARMV8_SWITCH_TO_EL1
  441. mrs x1, sctlr_el2
  442. #else
  443. mrs x1, sctlr_el1
  444. #endif
  445. tbz x1, #25, cpu_is_le
  446. rev x0, x0 /* BE to LE conversion */
  447. cpu_is_le:
  448. ldr x5, [x11, #24]
  449. cbz x5, 1f
  450. #ifdef CONFIG_ARMV8_SWITCH_TO_EL1
  451. adr x4, secondary_switch_to_el1
  452. ldr x5, =ES_TO_AARCH64
  453. #else
  454. ldr x4, [x11]
  455. ldr x5, =ES_TO_AARCH32
  456. #endif
  457. bl secondary_switch_to_el2
  458. 1:
  459. #ifdef CONFIG_ARMV8_SWITCH_TO_EL1
  460. adr x4, secondary_switch_to_el1
  461. #else
  462. ldr x4, [x11]
  463. #endif
  464. ldr x5, =ES_TO_AARCH64
  465. bl secondary_switch_to_el2
  466. ENDPROC(secondary_boot_func)
  467. ENTRY(secondary_switch_to_el2)
  468. switch_el x6, 1f, 0f, 0f
  469. 0: ret
  470. 1: armv8_switch_to_el2_m x4, x5, x6
  471. ENDPROC(secondary_switch_to_el2)
  472. ENTRY(secondary_switch_to_el1)
  473. mrs x0, mpidr_el1
  474. ubfm x1, x0, #8, #15
  475. ubfm x2, x0, #0, #1
  476. orr x10, x2, x1, lsl #2 /* x10 has LPID */
  477. lsl x1, x10, #6
  478. ldr x0, =__spin_table
  479. /* physical address of this cpus spin table element */
  480. add x11, x1, x0
  481. ldr x4, [x11]
  482. ldr x5, [x11, #24]
  483. cbz x5, 2f
  484. ldr x5, =ES_TO_AARCH32
  485. bl switch_to_el1
  486. 2: ldr x5, =ES_TO_AARCH64
  487. switch_to_el1:
  488. switch_el x6, 0f, 1f, 0f
  489. 0: ret
  490. 1: armv8_switch_to_el1_m x4, x5, x6
  491. ENDPROC(secondary_switch_to_el1)
  492. /* Ensure that the literals used by the secondary boot code are
  493. * assembled within it (this is required so that we can protect
  494. * this area with a single memreserve region
  495. */
  496. .ltorg
  497. /* 64 bit alignment for elements accessed as data */
  498. .align 4
  499. .global __real_cntfrq
  500. __real_cntfrq:
  501. .quad COUNTER_FREQUENCY
  502. .globl __secondary_boot_code_size
  503. .type __secondary_boot_code_size, %object
  504. /* Secondary Boot Code ends here */
  505. __secondary_boot_code_size:
  506. .quad .-secondary_boot_code
  507. #endif