proc.S 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541
  1. /* SPDX-License-Identifier: GPL-2.0-only */
  2. /*
  3. * Based on arch/arm/mm/proc.S
  4. *
  5. * Copyright (C) 2001 Deep Blue Solutions Ltd.
  6. * Copyright (C) 2012 ARM Ltd.
  7. * Author: Catalin Marinas <catalin.marinas@arm.com>
  8. */
  9. #include <linux/init.h>
  10. #include <linux/linkage.h>
  11. #include <linux/pgtable.h>
  12. #include <linux/cfi_types.h>
  13. #include <asm/assembler.h>
  14. #include <asm/asm-offsets.h>
  15. #include <asm/asm_pointer_auth.h>
  16. #include <asm/hwcap.h>
  17. #include <asm/kernel-pgtable.h>
  18. #include <asm/pgtable-hwdef.h>
  19. #include <asm/cpufeature.h>
  20. #include <asm/alternative.h>
  21. #include <asm/smp.h>
  22. #include <asm/sysreg.h>
  23. #ifdef CONFIG_ARM64_64K_PAGES
  24. #define TCR_TG_FLAGS TCR_TG0_64K | TCR_TG1_64K
  25. #elif defined(CONFIG_ARM64_16K_PAGES)
  26. #define TCR_TG_FLAGS TCR_TG0_16K | TCR_TG1_16K
  27. #else /* CONFIG_ARM64_4K_PAGES */
  28. #define TCR_TG_FLAGS TCR_TG0_4K | TCR_TG1_4K
  29. #endif
  30. #ifdef CONFIG_RANDOMIZE_BASE
  31. #define TCR_KASLR_FLAGS TCR_NFD1
  32. #else
  33. #define TCR_KASLR_FLAGS 0
  34. #endif
  35. /* PTWs cacheable, inner/outer WBWA */
  36. #define TCR_CACHE_FLAGS TCR_IRGN_WBWA | TCR_ORGN_WBWA
  37. #ifdef CONFIG_KASAN_SW_TAGS
  38. #define TCR_KASAN_SW_FLAGS TCR_TBI1 | TCR_TBID1
  39. #else
  40. #define TCR_KASAN_SW_FLAGS 0
  41. #endif
  42. #ifdef CONFIG_KASAN_HW_TAGS
  43. #define TCR_MTE_FLAGS TCR_TCMA1 | TCR_TBI1 | TCR_TBID1
  44. #elif defined(CONFIG_ARM64_MTE)
  45. /*
  46. * The mte_zero_clear_page_tags() implementation uses DC GZVA, which relies on
  47. * TBI being enabled at EL1.
  48. */
  49. #define TCR_MTE_FLAGS TCR_TBI1 | TCR_TBID1
  50. #else
  51. #define TCR_MTE_FLAGS 0
  52. #endif
  53. /*
  54. * Default MAIR_EL1. MT_NORMAL_TAGGED is initially mapped as Normal memory and
  55. * changed during mte_cpu_setup to Normal Tagged if the system supports MTE.
  56. */
  57. #define MAIR_EL1_SET \
  58. (MAIR_ATTRIDX(MAIR_ATTR_DEVICE_nGnRnE, MT_DEVICE_nGnRnE) | \
  59. MAIR_ATTRIDX(MAIR_ATTR_DEVICE_nGnRE, MT_DEVICE_nGnRE) | \
  60. MAIR_ATTRIDX(MAIR_ATTR_NORMAL_NC, MT_NORMAL_NC) | \
  61. MAIR_ATTRIDX(MAIR_ATTR_NORMAL, MT_NORMAL) | \
  62. MAIR_ATTRIDX(MAIR_ATTR_NORMAL, MT_NORMAL_TAGGED))
  63. #ifdef CONFIG_CPU_PM
  64. /**
  65. * cpu_do_suspend - save CPU registers context
  66. *
  67. * x0: virtual address of context pointer
  68. *
  69. * This must be kept in sync with struct cpu_suspend_ctx in <asm/suspend.h>.
  70. */
  71. SYM_FUNC_START(cpu_do_suspend)
  72. mrs x2, tpidr_el0
  73. mrs x3, tpidrro_el0
  74. mrs x4, contextidr_el1
  75. mrs x5, osdlr_el1
  76. mrs x6, cpacr_el1
  77. mrs x7, tcr_el1
  78. mrs x8, vbar_el1
  79. mrs x9, mdscr_el1
  80. mrs x10, oslsr_el1
  81. mrs x11, sctlr_el1
  82. get_this_cpu_offset x12
  83. mrs x13, sp_el0
  84. stp x2, x3, [x0]
  85. stp x4, x5, [x0, #16]
  86. stp x6, x7, [x0, #32]
  87. stp x8, x9, [x0, #48]
  88. stp x10, x11, [x0, #64]
  89. stp x12, x13, [x0, #80]
  90. /*
  91. * Save x18 as it may be used as a platform register, e.g. by shadow
  92. * call stack.
  93. */
  94. str x18, [x0, #96]
  95. ret
  96. SYM_FUNC_END(cpu_do_suspend)
  97. /**
  98. * cpu_do_resume - restore CPU register context
  99. *
  100. * x0: Address of context pointer
  101. */
  102. SYM_FUNC_START(cpu_do_resume)
  103. ldp x2, x3, [x0]
  104. ldp x4, x5, [x0, #16]
  105. ldp x6, x8, [x0, #32]
  106. ldp x9, x10, [x0, #48]
  107. ldp x11, x12, [x0, #64]
  108. ldp x13, x14, [x0, #80]
  109. /*
  110. * Restore x18, as it may be used as a platform register, and clear
  111. * the buffer to minimize the risk of exposure when used for shadow
  112. * call stack.
  113. */
  114. ldr x18, [x0, #96]
  115. str xzr, [x0, #96]
  116. msr tpidr_el0, x2
  117. msr tpidrro_el0, x3
  118. msr contextidr_el1, x4
  119. msr cpacr_el1, x6
  120. /* Don't change t0sz here, mask those bits when restoring */
  121. mrs x7, tcr_el1
  122. bfi x8, x7, TCR_T0SZ_OFFSET, TCR_TxSZ_WIDTH
  123. msr tcr_el1, x8
  124. msr vbar_el1, x9
  125. msr mdscr_el1, x10
  126. msr sctlr_el1, x12
  127. set_this_cpu_offset x13
  128. msr sp_el0, x14
  129. /*
  130. * Restore oslsr_el1 by writing oslar_el1
  131. */
  132. msr osdlr_el1, x5
  133. ubfx x11, x11, #1, #1
  134. msr oslar_el1, x11
  135. reset_pmuserenr_el0 x0 // Disable PMU access from EL0
  136. reset_amuserenr_el0 x0 // Disable AMU access from EL0
  137. alternative_if ARM64_HAS_RAS_EXTN
  138. msr_s SYS_DISR_EL1, xzr
  139. alternative_else_nop_endif
  140. ptrauth_keys_install_kernel_nosync x14, x1, x2, x3
  141. isb
  142. ret
  143. SYM_FUNC_END(cpu_do_resume)
  144. #endif
  145. .pushsection ".idmap.text", "a"
  146. .macro __idmap_cpu_set_reserved_ttbr1, tmp1, tmp2
  147. adrp \tmp1, reserved_pg_dir
  148. phys_to_ttbr \tmp2, \tmp1
  149. offset_ttbr1 \tmp2, \tmp1
  150. msr ttbr1_el1, \tmp2
  151. isb
  152. tlbi vmalle1
  153. dsb nsh
  154. isb
  155. .endm
  156. /*
  157. * void idmap_cpu_replace_ttbr1(phys_addr_t ttbr1)
  158. *
  159. * This is the low-level counterpart to cpu_replace_ttbr1, and should not be
  160. * called by anything else. It can only be executed from a TTBR0 mapping.
  161. */
  162. SYM_TYPED_FUNC_START(idmap_cpu_replace_ttbr1)
  163. __idmap_cpu_set_reserved_ttbr1 x1, x3
  164. offset_ttbr1 x0, x3
  165. msr ttbr1_el1, x0
  166. isb
  167. ret
  168. SYM_FUNC_END(idmap_cpu_replace_ttbr1)
  169. SYM_FUNC_ALIAS(__pi_idmap_cpu_replace_ttbr1, idmap_cpu_replace_ttbr1)
  170. .popsection
  171. #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
  172. #define KPTI_NG_PTE_FLAGS (PTE_ATTRINDX(MT_NORMAL) | PTE_TYPE_PAGE | \
  173. PTE_AF | PTE_SHARED | PTE_UXN | PTE_WRITE)
  174. .pushsection ".idmap.text", "a"
  175. .macro pte_to_phys, phys, pte
  176. and \phys, \pte, #PTE_ADDR_LOW
  177. #ifdef CONFIG_ARM64_PA_BITS_52
  178. and \pte, \pte, #PTE_ADDR_HIGH
  179. orr \phys, \phys, \pte, lsl #PTE_ADDR_HIGH_SHIFT
  180. #endif
  181. .endm
  182. .macro kpti_mk_tbl_ng, type, num_entries
  183. add end_\type\()p, cur_\type\()p, #\num_entries * 8
  184. .Ldo_\type:
  185. ldr \type, [cur_\type\()p], #8 // Load the entry and advance
  186. tbz \type, #0, .Lnext_\type // Skip invalid and
  187. tbnz \type, #11, .Lnext_\type // non-global entries
  188. orr \type, \type, #PTE_NG // Same bit for blocks and pages
  189. str \type, [cur_\type\()p, #-8] // Update the entry
  190. .ifnc \type, pte
  191. tbnz \type, #1, .Lderef_\type
  192. .endif
  193. .Lnext_\type:
  194. cmp cur_\type\()p, end_\type\()p
  195. b.ne .Ldo_\type
  196. .endm
  197. /*
  198. * Dereference the current table entry and map it into the temporary
  199. * fixmap slot associated with the current level.
  200. */
  201. .macro kpti_map_pgtbl, type, level
  202. str xzr, [temp_pte, #8 * (\level + 2)] // break before make
  203. dsb nshst
  204. add pte, temp_pte, #PAGE_SIZE * (\level + 2)
  205. lsr pte, pte, #12
  206. tlbi vaae1, pte
  207. dsb nsh
  208. isb
  209. phys_to_pte pte, cur_\type\()p
  210. add cur_\type\()p, temp_pte, #PAGE_SIZE * (\level + 2)
  211. orr pte, pte, pte_flags
  212. str pte, [temp_pte, #8 * (\level + 2)]
  213. dsb nshst
  214. .endm
  215. /*
  216. * void __kpti_install_ng_mappings(int cpu, int num_secondaries, phys_addr_t temp_pgd,
  217. * unsigned long temp_pte_va)
  218. *
  219. * Called exactly once from stop_machine context by each CPU found during boot.
  220. */
  221. .pushsection ".data", "aw", %progbits
  222. SYM_DATA(__idmap_kpti_flag, .long 1)
  223. .popsection
  224. SYM_TYPED_FUNC_START(idmap_kpti_install_ng_mappings)
  225. cpu .req w0
  226. temp_pte .req x0
  227. num_cpus .req w1
  228. pte_flags .req x1
  229. temp_pgd_phys .req x2
  230. swapper_ttb .req x3
  231. flag_ptr .req x4
  232. cur_pgdp .req x5
  233. end_pgdp .req x6
  234. pgd .req x7
  235. cur_pudp .req x8
  236. end_pudp .req x9
  237. cur_pmdp .req x11
  238. end_pmdp .req x12
  239. cur_ptep .req x14
  240. end_ptep .req x15
  241. pte .req x16
  242. valid .req x17
  243. cur_p4dp .req x19
  244. end_p4dp .req x20
  245. mov x5, x3 // preserve temp_pte arg
  246. mrs swapper_ttb, ttbr1_el1
  247. adr_l flag_ptr, __idmap_kpti_flag
  248. cbnz cpu, __idmap_kpti_secondary
  249. #if CONFIG_PGTABLE_LEVELS > 4
  250. stp x29, x30, [sp, #-32]!
  251. mov x29, sp
  252. stp x19, x20, [sp, #16]
  253. #endif
  254. /* We're the boot CPU. Wait for the others to catch up */
  255. sevl
  256. 1: wfe
  257. ldaxr w17, [flag_ptr]
  258. eor w17, w17, num_cpus
  259. cbnz w17, 1b
  260. /* Switch to the temporary page tables on this CPU only */
  261. __idmap_cpu_set_reserved_ttbr1 x8, x9
  262. offset_ttbr1 temp_pgd_phys, x8
  263. msr ttbr1_el1, temp_pgd_phys
  264. isb
  265. mov temp_pte, x5
  266. mov_q pte_flags, KPTI_NG_PTE_FLAGS
  267. /* Everybody is enjoying the idmap, so we can rewrite swapper. */
  268. #ifdef CONFIG_ARM64_LPA2
  269. /*
  270. * If LPA2 support is configured, but 52-bit virtual addressing is not
  271. * enabled at runtime, we will fall back to one level of paging less,
  272. * and so we have to walk swapper_pg_dir as if we dereferenced its
  273. * address from a PGD level entry, and terminate the PGD level loop
  274. * right after.
  275. */
  276. adrp pgd, swapper_pg_dir // walk &swapper_pg_dir at the next level
  277. mov cur_pgdp, end_pgdp // must be equal to terminate the PGD loop
  278. alternative_if_not ARM64_HAS_VA52
  279. b .Lderef_pgd // skip to the next level
  280. alternative_else_nop_endif
  281. /*
  282. * LPA2 based 52-bit virtual addressing requires 52-bit physical
  283. * addressing to be enabled as well. In this case, the shareability
  284. * bits are repurposed as physical address bits, and should not be
  285. * set in pte_flags.
  286. */
  287. bic pte_flags, pte_flags, #PTE_SHARED
  288. #endif
  289. /* PGD */
  290. adrp cur_pgdp, swapper_pg_dir
  291. kpti_map_pgtbl pgd, -1
  292. kpti_mk_tbl_ng pgd, PTRS_PER_PGD
  293. /* Ensure all the updated entries are visible to secondary CPUs */
  294. dsb ishst
  295. /* We're done: fire up swapper_pg_dir again */
  296. __idmap_cpu_set_reserved_ttbr1 x8, x9
  297. msr ttbr1_el1, swapper_ttb
  298. isb
  299. /* Set the flag to zero to indicate that we're all done */
  300. str wzr, [flag_ptr]
  301. #if CONFIG_PGTABLE_LEVELS > 4
  302. ldp x19, x20, [sp, #16]
  303. ldp x29, x30, [sp], #32
  304. #endif
  305. ret
  306. .Lderef_pgd:
  307. /* P4D */
  308. .if CONFIG_PGTABLE_LEVELS > 4
  309. p4d .req x30
  310. pte_to_phys cur_p4dp, pgd
  311. kpti_map_pgtbl p4d, 0
  312. kpti_mk_tbl_ng p4d, PTRS_PER_P4D
  313. b .Lnext_pgd
  314. .else /* CONFIG_PGTABLE_LEVELS <= 4 */
  315. p4d .req pgd
  316. .set .Lnext_p4d, .Lnext_pgd
  317. .endif
  318. .Lderef_p4d:
  319. /* PUD */
  320. .if CONFIG_PGTABLE_LEVELS > 3
  321. pud .req x10
  322. pte_to_phys cur_pudp, p4d
  323. kpti_map_pgtbl pud, 1
  324. kpti_mk_tbl_ng pud, PTRS_PER_PUD
  325. b .Lnext_p4d
  326. .else /* CONFIG_PGTABLE_LEVELS <= 3 */
  327. pud .req pgd
  328. .set .Lnext_pud, .Lnext_pgd
  329. .endif
  330. .Lderef_pud:
  331. /* PMD */
  332. .if CONFIG_PGTABLE_LEVELS > 2
  333. pmd .req x13
  334. pte_to_phys cur_pmdp, pud
  335. kpti_map_pgtbl pmd, 2
  336. kpti_mk_tbl_ng pmd, PTRS_PER_PMD
  337. b .Lnext_pud
  338. .else /* CONFIG_PGTABLE_LEVELS <= 2 */
  339. pmd .req pgd
  340. .set .Lnext_pmd, .Lnext_pgd
  341. .endif
  342. .Lderef_pmd:
  343. /* PTE */
  344. pte_to_phys cur_ptep, pmd
  345. kpti_map_pgtbl pte, 3
  346. kpti_mk_tbl_ng pte, PTRS_PER_PTE
  347. b .Lnext_pmd
  348. .unreq cpu
  349. .unreq temp_pte
  350. .unreq num_cpus
  351. .unreq pte_flags
  352. .unreq temp_pgd_phys
  353. .unreq cur_pgdp
  354. .unreq end_pgdp
  355. .unreq pgd
  356. .unreq cur_pudp
  357. .unreq end_pudp
  358. .unreq pud
  359. .unreq cur_pmdp
  360. .unreq end_pmdp
  361. .unreq pmd
  362. .unreq cur_ptep
  363. .unreq end_ptep
  364. .unreq pte
  365. .unreq valid
  366. .unreq cur_p4dp
  367. .unreq end_p4dp
  368. .unreq p4d
  369. /* Secondary CPUs end up here */
  370. __idmap_kpti_secondary:
  371. /* Uninstall swapper before surgery begins */
  372. __idmap_cpu_set_reserved_ttbr1 x16, x17
  373. /* Increment the flag to let the boot CPU we're ready */
  374. 1: ldxr w16, [flag_ptr]
  375. add w16, w16, #1
  376. stxr w17, w16, [flag_ptr]
  377. cbnz w17, 1b
  378. /* Wait for the boot CPU to finish messing around with swapper */
  379. sevl
  380. 1: wfe
  381. ldxr w16, [flag_ptr]
  382. cbnz w16, 1b
  383. /* All done, act like nothing happened */
  384. msr ttbr1_el1, swapper_ttb
  385. isb
  386. ret
  387. .unreq swapper_ttb
  388. .unreq flag_ptr
  389. SYM_FUNC_END(idmap_kpti_install_ng_mappings)
  390. .popsection
  391. #endif
  392. /*
  393. * __cpu_setup
  394. *
  395. * Initialise the processor for turning the MMU on.
  396. *
  397. * Output:
  398. * Return in x0 the value of the SCTLR_EL1 register.
  399. */
  400. .pushsection ".idmap.text", "a"
  401. SYM_FUNC_START(__cpu_setup)
  402. tlbi vmalle1 // Invalidate local TLB
  403. dsb nsh
  404. msr cpacr_el1, xzr // Reset cpacr_el1
  405. mov x1, #1 << 12 // Reset mdscr_el1 and disable
  406. msr mdscr_el1, x1 // access to the DCC from EL0
  407. reset_pmuserenr_el0 x1 // Disable PMU access from EL0
  408. reset_amuserenr_el0 x1 // Disable AMU access from EL0
  409. /*
  410. * Default values for VMSA control registers. These will be adjusted
  411. * below depending on detected CPU features.
  412. */
  413. mair .req x17
  414. tcr .req x16
  415. mov_q mair, MAIR_EL1_SET
  416. mov_q tcr, TCR_T0SZ(IDMAP_VA_BITS) | TCR_T1SZ(VA_BITS_MIN) | TCR_CACHE_FLAGS | \
  417. TCR_SHARED | TCR_TG_FLAGS | TCR_KASLR_FLAGS | TCR_ASID16 | \
  418. TCR_TBI0 | TCR_A1 | TCR_KASAN_SW_FLAGS | TCR_MTE_FLAGS
  419. tcr_clear_errata_bits tcr, x9, x5
  420. #ifdef CONFIG_ARM64_VA_BITS_52
  421. mov x9, #64 - VA_BITS
  422. alternative_if ARM64_HAS_VA52
  423. tcr_set_t1sz tcr, x9
  424. #ifdef CONFIG_ARM64_LPA2
  425. orr tcr, tcr, #TCR_DS
  426. #endif
  427. alternative_else_nop_endif
  428. #endif
  429. /*
  430. * Set the IPS bits in TCR_EL1.
  431. */
  432. tcr_compute_pa_size tcr, #TCR_IPS_SHIFT, x5, x6
  433. #ifdef CONFIG_ARM64_HW_AFDBM
  434. /*
  435. * Enable hardware update of the Access Flags bit.
  436. * Hardware dirty bit management is enabled later,
  437. * via capabilities.
  438. */
  439. mrs x9, ID_AA64MMFR1_EL1
  440. and x9, x9, ID_AA64MMFR1_EL1_HAFDBS_MASK
  441. cbz x9, 1f
  442. orr tcr, tcr, #TCR_HA // hardware Access flag update
  443. 1:
  444. #endif /* CONFIG_ARM64_HW_AFDBM */
  445. msr mair_el1, mair
  446. msr tcr_el1, tcr
  447. mrs_s x1, SYS_ID_AA64MMFR3_EL1
  448. ubfx x1, x1, #ID_AA64MMFR3_EL1_S1PIE_SHIFT, #4
  449. cbz x1, .Lskip_indirection
  450. /*
  451. * The PROT_* macros describing the various memory types may resolve to
  452. * C expressions if they include the PTE_MAYBE_* macros, and so they
  453. * can only be used from C code. The PIE_E* constants below are also
  454. * defined in terms of those macros, but will mask out those
  455. * PTE_MAYBE_* constants, whether they are set or not. So #define them
  456. * as 0x0 here so we can evaluate the PIE_E* constants in asm context.
  457. */
  458. #define PTE_MAYBE_NG 0
  459. #define PTE_MAYBE_SHARED 0
  460. mov_q x0, PIE_E0
  461. msr REG_PIRE0_EL1, x0
  462. mov_q x0, PIE_E1
  463. msr REG_PIR_EL1, x0
  464. #undef PTE_MAYBE_NG
  465. #undef PTE_MAYBE_SHARED
  466. mov x0, TCR2_EL1x_PIE
  467. msr REG_TCR2_EL1, x0
  468. .Lskip_indirection:
  469. /*
  470. * Prepare SCTLR
  471. */
  472. mov_q x0, INIT_SCTLR_EL1_MMU_ON
  473. ret // return to head.S
  474. .unreq mair
  475. .unreq tcr
  476. SYM_FUNC_END(__cpu_setup)