head-nommu.S 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539
  1. /*
  2. * linux/arch/arm/kernel/head-nommu.S
  3. *
  4. * Copyright (C) 1994-2002 Russell King
  5. * Copyright (C) 2003-2006 Hyok S. Choi
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License version 2 as
  9. * published by the Free Software Foundation.
  10. *
  11. * Common kernel startup code (non-paged MM)
  12. *
  13. */
  14. #include <linux/linkage.h>
  15. #include <linux/init.h>
  16. #include <linux/errno.h>
  17. #include <asm/assembler.h>
  18. #include <asm/ptrace.h>
  19. #include <asm/asm-offsets.h>
  20. #include <asm/memory.h>
  21. #include <asm/cp15.h>
  22. #include <asm/thread_info.h>
  23. #include <asm/v7m.h>
  24. #include <asm/mpu.h>
  25. #include <asm/page.h>
  26. /*
  27. * Kernel startup entry point.
  28. * ---------------------------
  29. *
  30. * This is normally called from the decompressor code. The requirements
  31. * are: MMU = off, D-cache = off, I-cache = dont care, r0 = 0,
  32. * r1 = machine nr.
  33. *
  34. * See linux/arch/arm/tools/mach-types for the complete list of machine
  35. * numbers for r1.
  36. *
  37. */
  38. __HEAD
  39. #ifdef CONFIG_CPU_THUMBONLY
  40. .thumb
  41. ENTRY(stext)
  42. #else
  43. .arm
  44. ENTRY(stext)
  45. THUMB( badr r9, 1f ) @ Kernel is always entered in ARM.
  46. THUMB( bx r9 ) @ If this is a Thumb-2 kernel,
  47. THUMB( .thumb ) @ switch to Thumb now.
  48. THUMB(1: )
  49. #endif
  50. #ifdef CONFIG_ARM_VIRT_EXT
  51. bl __hyp_stub_install
  52. #endif
  53. @ ensure svc mode and all interrupts masked
  54. safe_svcmode_maskall r9
  55. @ and irqs disabled
  56. #if defined(CONFIG_CPU_CP15)
  57. mrc p15, 0, r9, c0, c0 @ get processor id
  58. #elif defined(CONFIG_CPU_V7M)
  59. ldr r9, =BASEADDR_V7M_SCB
  60. ldr r9, [r9, V7M_SCB_CPUID]
  61. #else
  62. ldr r9, =CONFIG_PROCESSOR_ID
  63. #endif
  64. bl __lookup_processor_type @ r5=procinfo r9=cpuid
  65. movs r10, r5 @ invalid processor (r5=0)?
  66. beq __error_p @ yes, error 'p'
  67. #ifdef CONFIG_ARM_MPU
  68. bl __setup_mpu
  69. #endif
  70. badr lr, 1f @ return (PIC) address
  71. ldr r12, [r10, #PROCINFO_INITFUNC]
  72. add r12, r12, r10
  73. ret r12
  74. 1: ldr lr, =__mmap_switched
  75. b __after_proc_init
  76. ENDPROC(stext)
  77. #ifdef CONFIG_SMP
  78. .text
  79. ENTRY(secondary_startup)
  80. /*
  81. * Common entry point for secondary CPUs.
  82. *
  83. * Ensure that we're in SVC mode, and IRQs are disabled. Lookup
  84. * the processor type - there is no need to check the machine type
  85. * as it has already been validated by the primary processor.
  86. */
  87. #ifdef CONFIG_ARM_VIRT_EXT
  88. bl __hyp_stub_install_secondary
  89. #endif
  90. safe_svcmode_maskall r9
  91. #ifndef CONFIG_CPU_CP15
  92. ldr r9, =CONFIG_PROCESSOR_ID
  93. #else
  94. mrc p15, 0, r9, c0, c0 @ get processor id
  95. #endif
  96. bl __lookup_processor_type @ r5=procinfo r9=cpuid
  97. movs r10, r5 @ invalid processor?
  98. beq __error_p @ yes, error 'p'
  99. ldr r7, __secondary_data
  100. #ifdef CONFIG_ARM_MPU
  101. bl __secondary_setup_mpu @ Initialize the MPU
  102. #endif
  103. badr lr, 1f @ return (PIC) address
  104. ldr r12, [r10, #PROCINFO_INITFUNC]
  105. add r12, r12, r10
  106. ret r12
  107. 1: bl __after_proc_init
  108. ldr sp, [r7, #12] @ set up the stack pointer
  109. mov fp, #0
  110. b secondary_start_kernel
  111. ENDPROC(secondary_startup)
  112. .type __secondary_data, %object
  113. __secondary_data:
  114. .long secondary_data
  115. #endif /* CONFIG_SMP */
  116. /*
  117. * Set the Control Register and Read the process ID.
  118. */
  119. .text
  120. __after_proc_init:
  121. M_CLASS(movw r12, #:lower16:BASEADDR_V7M_SCB)
  122. M_CLASS(movt r12, #:upper16:BASEADDR_V7M_SCB)
  123. #ifdef CONFIG_ARM_MPU
  124. M_CLASS(ldr r3, [r12, 0x50])
  125. AR_CLASS(mrc p15, 0, r3, c0, c1, 4) @ Read ID_MMFR0
  126. and r3, r3, #(MMFR0_PMSA) @ PMSA field
  127. teq r3, #(MMFR0_PMSAv7) @ PMSA v7
  128. beq 1f
  129. teq r3, #(MMFR0_PMSAv8) @ PMSA v8
  130. /*
  131. * Memory region attributes for PMSAv8:
  132. *
  133. * n = AttrIndx[2:0]
  134. * n MAIR
  135. * DEVICE_nGnRnE 000 00000000
  136. * NORMAL 001 11111111
  137. */
  138. ldreq r3, =PMSAv8_MAIR(0x00, PMSAv8_RGN_DEVICE_nGnRnE) | \
  139. PMSAv8_MAIR(0xff, PMSAv8_RGN_NORMAL)
  140. AR_CLASS(mcreq p15, 0, r3, c10, c2, 0) @ MAIR 0
  141. M_CLASS(streq r3, [r12, #PMSAv8_MAIR0])
  142. moveq r3, #0
  143. AR_CLASS(mcreq p15, 0, r3, c10, c2, 1) @ MAIR 1
  144. M_CLASS(streq r3, [r12, #PMSAv8_MAIR1])
  145. 1:
  146. #endif
  147. #ifdef CONFIG_CPU_CP15
  148. /*
  149. * CP15 system control register value returned in r0 from
  150. * the CPU init function.
  151. */
  152. #ifdef CONFIG_ARM_MPU
  153. biceq r0, r0, #CR_BR @ Disable the 'default mem-map'
  154. orreq r0, r0, #CR_M @ Set SCTRL.M (MPU on)
  155. #endif
  156. #if defined(CONFIG_ALIGNMENT_TRAP) && __LINUX_ARM_ARCH__ < 6
  157. orr r0, r0, #CR_A
  158. #else
  159. bic r0, r0, #CR_A
  160. #endif
  161. #ifdef CONFIG_CPU_DCACHE_DISABLE
  162. bic r0, r0, #CR_C
  163. #endif
  164. #ifdef CONFIG_CPU_BPREDICT_DISABLE
  165. bic r0, r0, #CR_Z
  166. #endif
  167. #ifdef CONFIG_CPU_ICACHE_DISABLE
  168. bic r0, r0, #CR_I
  169. #endif
  170. mcr p15, 0, r0, c1, c0, 0 @ write control reg
  171. instr_sync
  172. #elif defined (CONFIG_CPU_V7M)
  173. #ifdef CONFIG_ARM_MPU
  174. ldreq r3, [r12, MPU_CTRL]
  175. biceq r3, #MPU_CTRL_PRIVDEFENA
  176. orreq r3, #MPU_CTRL_ENABLE
  177. streq r3, [r12, MPU_CTRL]
  178. isb
  179. #endif
  180. /* For V7M systems we want to modify the CCR similarly to the SCTLR */
  181. #ifdef CONFIG_CPU_DCACHE_DISABLE
  182. bic r0, r0, #V7M_SCB_CCR_DC
  183. #endif
  184. #ifdef CONFIG_CPU_BPREDICT_DISABLE
  185. bic r0, r0, #V7M_SCB_CCR_BP
  186. #endif
  187. #ifdef CONFIG_CPU_ICACHE_DISABLE
  188. bic r0, r0, #V7M_SCB_CCR_IC
  189. #endif
  190. str r0, [r12, V7M_SCB_CCR]
  191. /* Pass exc_ret to __mmap_switched */
  192. mov r0, r10
  193. #endif /* CONFIG_CPU_CP15 elif CONFIG_CPU_V7M */
  194. ret lr
  195. ENDPROC(__after_proc_init)
  196. .ltorg
  197. #ifdef CONFIG_ARM_MPU
  198. #ifndef CONFIG_CPU_V7M
  199. /* Set which MPU region should be programmed */
  200. .macro set_region_nr tmp, rgnr, unused
  201. mov \tmp, \rgnr @ Use static region numbers
  202. mcr p15, 0, \tmp, c6, c2, 0 @ Write RGNR
  203. .endm
  204. /* Setup a single MPU region, either D or I side (D-side for unified) */
  205. .macro setup_region bar, acr, sr, side = PMSAv7_DATA_SIDE, unused
  206. mcr p15, 0, \bar, c6, c1, (0 + \side) @ I/DRBAR
  207. mcr p15, 0, \acr, c6, c1, (4 + \side) @ I/DRACR
  208. mcr p15, 0, \sr, c6, c1, (2 + \side) @ I/DRSR
  209. .endm
  210. #else
  211. .macro set_region_nr tmp, rgnr, base
  212. mov \tmp, \rgnr
  213. str \tmp, [\base, #PMSAv7_RNR]
  214. .endm
  215. .macro setup_region bar, acr, sr, unused, base
  216. lsl \acr, \acr, #16
  217. orr \acr, \acr, \sr
  218. str \bar, [\base, #PMSAv7_RBAR]
  219. str \acr, [\base, #PMSAv7_RASR]
  220. .endm
  221. #endif
  222. /*
  223. * Setup the MPU and initial MPU Regions. We create the following regions:
  224. * Region 0: Use this for probing the MPU details, so leave disabled.
  225. * Region 1: Background region - covers the whole of RAM as strongly ordered
  226. * Region 2: Normal, Shared, cacheable for RAM. From PHYS_OFFSET, size from r6
  227. * Region 3: Normal, shared, inaccessible from PL0 to protect the vectors page
  228. *
  229. * r6: Value to be written to DRSR (and IRSR if required) for PMSAv7_RAM_REGION
  230. */
  231. __HEAD
  232. ENTRY(__setup_mpu)
  233. /* Probe for v7 PMSA compliance */
  234. M_CLASS(movw r12, #:lower16:BASEADDR_V7M_SCB)
  235. M_CLASS(movt r12, #:upper16:BASEADDR_V7M_SCB)
  236. AR_CLASS(mrc p15, 0, r0, c0, c1, 4) @ Read ID_MMFR0
  237. M_CLASS(ldr r0, [r12, 0x50])
  238. and r0, r0, #(MMFR0_PMSA) @ PMSA field
  239. teq r0, #(MMFR0_PMSAv7) @ PMSA v7
  240. beq __setup_pmsa_v7
  241. teq r0, #(MMFR0_PMSAv8) @ PMSA v8
  242. beq __setup_pmsa_v8
  243. ret lr
  244. ENDPROC(__setup_mpu)
  245. ENTRY(__setup_pmsa_v7)
  246. /* Calculate the size of a region covering just the kernel */
  247. ldr r5, =PLAT_PHYS_OFFSET @ Region start: PHYS_OFFSET
  248. ldr r6, =(_end) @ Cover whole kernel
  249. sub r6, r6, r5 @ Minimum size of region to map
  250. clz r6, r6 @ Region size must be 2^N...
  251. rsb r6, r6, #31 @ ...so round up region size
  252. lsl r6, r6, #PMSAv7_RSR_SZ @ Put size in right field
  253. orr r6, r6, #(1 << PMSAv7_RSR_EN) @ Set region enabled bit
  254. /* Determine whether the D/I-side memory map is unified. We set the
  255. * flags here and continue to use them for the rest of this function */
  256. AR_CLASS(mrc p15, 0, r0, c0, c0, 4) @ MPUIR
  257. M_CLASS(ldr r0, [r12, #MPU_TYPE])
  258. ands r5, r0, #MPUIR_DREGION_SZMASK @ 0 size d region => No MPU
  259. bxeq lr
  260. tst r0, #MPUIR_nU @ MPUIR_nU = 0 for unified
  261. /* Setup second region first to free up r6 */
  262. set_region_nr r0, #PMSAv7_RAM_REGION, r12
  263. isb
  264. /* Full access from PL0, PL1, shared for CONFIG_SMP, cacheable */
  265. ldr r0, =PLAT_PHYS_OFFSET @ RAM starts at PHYS_OFFSET
  266. ldr r5,=(PMSAv7_AP_PL1RW_PL0RW | PMSAv7_RGN_NORMAL)
  267. setup_region r0, r5, r6, PMSAv7_DATA_SIDE, r12 @ PHYS_OFFSET, shared, enabled
  268. beq 1f @ Memory-map not unified
  269. setup_region r0, r5, r6, PMSAv7_INSTR_SIDE, r12 @ PHYS_OFFSET, shared, enabled
  270. 1: isb
  271. /* First/background region */
  272. set_region_nr r0, #PMSAv7_BG_REGION, r12
  273. isb
  274. /* Execute Never, strongly ordered, inaccessible to PL0, rw PL1 */
  275. mov r0, #0 @ BG region starts at 0x0
  276. ldr r5,=(PMSAv7_ACR_XN | PMSAv7_RGN_STRONGLY_ORDERED | PMSAv7_AP_PL1RW_PL0NA)
  277. mov r6, #PMSAv7_RSR_ALL_MEM @ 4GB region, enabled
  278. setup_region r0, r5, r6, PMSAv7_DATA_SIDE, r12 @ 0x0, BG region, enabled
  279. beq 2f @ Memory-map not unified
  280. setup_region r0, r5, r6, PMSAv7_INSTR_SIDE r12 @ 0x0, BG region, enabled
  281. 2: isb
  282. #ifdef CONFIG_XIP_KERNEL
  283. set_region_nr r0, #PMSAv7_ROM_REGION, r12
  284. isb
  285. ldr r5,=(PMSAv7_AP_PL1RO_PL0NA | PMSAv7_RGN_NORMAL)
  286. ldr r0, =CONFIG_XIP_PHYS_ADDR @ ROM start
  287. ldr r6, =(_exiprom) @ ROM end
  288. sub r6, r6, r0 @ Minimum size of region to map
  289. clz r6, r6 @ Region size must be 2^N...
  290. rsb r6, r6, #31 @ ...so round up region size
  291. lsl r6, r6, #PMSAv7_RSR_SZ @ Put size in right field
  292. orr r6, r6, #(1 << PMSAv7_RSR_EN) @ Set region enabled bit
  293. setup_region r0, r5, r6, PMSAv7_DATA_SIDE, r12 @ XIP_PHYS_ADDR, shared, enabled
  294. beq 3f @ Memory-map not unified
  295. setup_region r0, r5, r6, PMSAv7_INSTR_SIDE, r12 @ XIP_PHYS_ADDR, shared, enabled
  296. 3: isb
  297. #endif
  298. ret lr
  299. ENDPROC(__setup_pmsa_v7)
  300. ENTRY(__setup_pmsa_v8)
  301. mov r0, #0
  302. AR_CLASS(mcr p15, 0, r0, c6, c2, 1) @ PRSEL
  303. M_CLASS(str r0, [r12, #PMSAv8_RNR])
  304. isb
  305. #ifdef CONFIG_XIP_KERNEL
  306. ldr r5, =CONFIG_XIP_PHYS_ADDR @ ROM start
  307. ldr r6, =(_exiprom) @ ROM end
  308. sub r6, r6, #1
  309. bic r6, r6, #(PMSAv8_MINALIGN - 1)
  310. orr r5, r5, #(PMSAv8_AP_PL1RW_PL0NA | PMSAv8_RGN_SHARED)
  311. orr r6, r6, #(PMSAv8_LAR_IDX(PMSAv8_RGN_NORMAL) | PMSAv8_LAR_EN)
  312. AR_CLASS(mcr p15, 0, r5, c6, c8, 0) @ PRBAR0
  313. AR_CLASS(mcr p15, 0, r6, c6, c8, 1) @ PRLAR0
  314. M_CLASS(str r5, [r12, #PMSAv8_RBAR_A(0)])
  315. M_CLASS(str r6, [r12, #PMSAv8_RLAR_A(0)])
  316. #endif
  317. ldr r5, =KERNEL_START
  318. ldr r6, =KERNEL_END
  319. sub r6, r6, #1
  320. bic r6, r6, #(PMSAv8_MINALIGN - 1)
  321. orr r5, r5, #(PMSAv8_AP_PL1RW_PL0NA | PMSAv8_RGN_SHARED)
  322. orr r6, r6, #(PMSAv8_LAR_IDX(PMSAv8_RGN_NORMAL) | PMSAv8_LAR_EN)
  323. AR_CLASS(mcr p15, 0, r5, c6, c8, 4) @ PRBAR1
  324. AR_CLASS(mcr p15, 0, r6, c6, c8, 5) @ PRLAR1
  325. M_CLASS(str r5, [r12, #PMSAv8_RBAR_A(1)])
  326. M_CLASS(str r6, [r12, #PMSAv8_RLAR_A(1)])
  327. /* Setup Background: 0x0 - min(KERNEL_START, XIP_PHYS_ADDR) */
  328. #ifdef CONFIG_XIP_KERNEL
  329. ldr r6, =KERNEL_START
  330. ldr r5, =CONFIG_XIP_PHYS_ADDR
  331. cmp r6, r5
  332. movcs r6, r5
  333. #else
  334. ldr r6, =KERNEL_START
  335. #endif
  336. cmp r6, #0
  337. beq 1f
  338. mov r5, #0
  339. sub r6, r6, #1
  340. bic r6, r6, #(PMSAv8_MINALIGN - 1)
  341. orr r5, r5, #(PMSAv8_AP_PL1RW_PL0NA | PMSAv8_RGN_SHARED | PMSAv8_BAR_XN)
  342. orr r6, r6, #(PMSAv8_LAR_IDX(PMSAv8_RGN_DEVICE_nGnRnE) | PMSAv8_LAR_EN)
  343. AR_CLASS(mcr p15, 0, r5, c6, c9, 0) @ PRBAR2
  344. AR_CLASS(mcr p15, 0, r6, c6, c9, 1) @ PRLAR2
  345. M_CLASS(str r5, [r12, #PMSAv8_RBAR_A(2)])
  346. M_CLASS(str r6, [r12, #PMSAv8_RLAR_A(2)])
  347. 1:
  348. /* Setup Background: max(KERNEL_END, _exiprom) - 0xffffffff */
  349. #ifdef CONFIG_XIP_KERNEL
  350. ldr r5, =KERNEL_END
  351. ldr r6, =(_exiprom)
  352. cmp r5, r6
  353. movcc r5, r6
  354. #else
  355. ldr r5, =KERNEL_END
  356. #endif
  357. mov r6, #0xffffffff
  358. bic r6, r6, #(PMSAv8_MINALIGN - 1)
  359. orr r5, r5, #(PMSAv8_AP_PL1RW_PL0NA | PMSAv8_RGN_SHARED | PMSAv8_BAR_XN)
  360. orr r6, r6, #(PMSAv8_LAR_IDX(PMSAv8_RGN_DEVICE_nGnRnE) | PMSAv8_LAR_EN)
  361. AR_CLASS(mcr p15, 0, r5, c6, c9, 4) @ PRBAR3
  362. AR_CLASS(mcr p15, 0, r6, c6, c9, 5) @ PRLAR3
  363. M_CLASS(str r5, [r12, #PMSAv8_RBAR_A(3)])
  364. M_CLASS(str r6, [r12, #PMSAv8_RLAR_A(3)])
  365. #ifdef CONFIG_XIP_KERNEL
  366. /* Setup Background: min(_exiprom, KERNEL_END) - max(KERNEL_START, XIP_PHYS_ADDR) */
  367. ldr r5, =(_exiprom)
  368. ldr r6, =KERNEL_END
  369. cmp r5, r6
  370. movcs r5, r6
  371. ldr r6, =KERNEL_START
  372. ldr r0, =CONFIG_XIP_PHYS_ADDR
  373. cmp r6, r0
  374. movcc r6, r0
  375. sub r6, r6, #1
  376. bic r6, r6, #(PMSAv8_MINALIGN - 1)
  377. orr r5, r5, #(PMSAv8_AP_PL1RW_PL0NA | PMSAv8_RGN_SHARED | PMSAv8_BAR_XN)
  378. orr r6, r6, #(PMSAv8_LAR_IDX(PMSAv8_RGN_DEVICE_nGnRnE) | PMSAv8_LAR_EN)
  379. #ifdef CONFIG_CPU_V7M
  380. /* There is no alias for n == 4 */
  381. mov r0, #4
  382. str r0, [r12, #PMSAv8_RNR] @ PRSEL
  383. isb
  384. str r5, [r12, #PMSAv8_RBAR_A(0)]
  385. str r6, [r12, #PMSAv8_RLAR_A(0)]
  386. #else
  387. mcr p15, 0, r5, c6, c10, 0 @ PRBAR4
  388. mcr p15, 0, r6, c6, c10, 1 @ PRLAR4
  389. #endif
  390. #endif
  391. ret lr
  392. ENDPROC(__setup_pmsa_v8)
  393. #ifdef CONFIG_SMP
  394. /*
  395. * r6: pointer at mpu_rgn_info
  396. */
  397. .text
  398. ENTRY(__secondary_setup_mpu)
  399. /* Use MPU region info supplied by __cpu_up */
  400. ldr r6, [r7] @ get secondary_data.mpu_rgn_info
  401. /* Probe for v7 PMSA compliance */
  402. mrc p15, 0, r0, c0, c1, 4 @ Read ID_MMFR0
  403. and r0, r0, #(MMFR0_PMSA) @ PMSA field
  404. teq r0, #(MMFR0_PMSAv7) @ PMSA v7
  405. beq __secondary_setup_pmsa_v7
  406. teq r0, #(MMFR0_PMSAv8) @ PMSA v8
  407. beq __secondary_setup_pmsa_v8
  408. b __error_p
  409. ENDPROC(__secondary_setup_mpu)
  410. /*
  411. * r6: pointer at mpu_rgn_info
  412. */
  413. ENTRY(__secondary_setup_pmsa_v7)
  414. /* Determine whether the D/I-side memory map is unified. We set the
  415. * flags here and continue to use them for the rest of this function */
  416. mrc p15, 0, r0, c0, c0, 4 @ MPUIR
  417. ands r5, r0, #MPUIR_DREGION_SZMASK @ 0 size d region => No MPU
  418. beq __error_p
  419. ldr r4, [r6, #MPU_RNG_INFO_USED]
  420. mov r5, #MPU_RNG_SIZE
  421. add r3, r6, #MPU_RNG_INFO_RNGS
  422. mla r3, r4, r5, r3
  423. 1:
  424. tst r0, #MPUIR_nU @ MPUIR_nU = 0 for unified
  425. sub r3, r3, #MPU_RNG_SIZE
  426. sub r4, r4, #1
  427. set_region_nr r0, r4
  428. isb
  429. ldr r0, [r3, #MPU_RGN_DRBAR]
  430. ldr r6, [r3, #MPU_RGN_DRSR]
  431. ldr r5, [r3, #MPU_RGN_DRACR]
  432. setup_region r0, r5, r6, PMSAv7_DATA_SIDE
  433. beq 2f
  434. setup_region r0, r5, r6, PMSAv7_INSTR_SIDE
  435. 2: isb
  436. mrc p15, 0, r0, c0, c0, 4 @ Reevaluate the MPUIR
  437. cmp r4, #0
  438. bgt 1b
  439. ret lr
  440. ENDPROC(__secondary_setup_pmsa_v7)
  441. ENTRY(__secondary_setup_pmsa_v8)
  442. ldr r4, [r6, #MPU_RNG_INFO_USED]
  443. #ifndef CONFIG_XIP_KERNEL
  444. add r4, r4, #1
  445. #endif
  446. mov r5, #MPU_RNG_SIZE
  447. add r3, r6, #MPU_RNG_INFO_RNGS
  448. mla r3, r4, r5, r3
  449. 1:
  450. sub r3, r3, #MPU_RNG_SIZE
  451. sub r4, r4, #1
  452. mcr p15, 0, r4, c6, c2, 1 @ PRSEL
  453. isb
  454. ldr r5, [r3, #MPU_RGN_PRBAR]
  455. ldr r6, [r3, #MPU_RGN_PRLAR]
  456. mcr p15, 0, r5, c6, c3, 0 @ PRBAR
  457. mcr p15, 0, r6, c6, c3, 1 @ PRLAR
  458. cmp r4, #0
  459. bgt 1b
  460. ret lr
  461. ENDPROC(__secondary_setup_pmsa_v8)
  462. #endif /* CONFIG_SMP */
  463. #endif /* CONFIG_ARM_MPU */
  464. #include "head-common.S"