123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884 |
- /*
- * Low-level CPU initialisation
- * Based on arch/arm/kernel/head.S
- *
- * Copyright (C) 1994-2002 Russell King
- * Copyright (C) 2003-2012 ARM Ltd.
- * Authors: Catalin Marinas <catalin.marinas@arm.com>
- * Will Deacon <will.deacon@arm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- */
- #include <linux/linkage.h>
- #include <linux/init.h>
- #include <linux/irqchip/arm-gic-v3.h>
- #include <asm/assembler.h>
- #include <asm/boot.h>
- #include <asm/ptrace.h>
- #include <asm/asm-offsets.h>
- #include <asm/cache.h>
- #include <asm/cputype.h>
- #include <asm/elf.h>
- #include <asm/kernel-pgtable.h>
- #include <asm/kvm_arm.h>
- #include <asm/memory.h>
- #include <asm/pgtable-hwdef.h>
- #include <asm/pgtable.h>
- #include <asm/page.h>
- #include <asm/smp.h>
- #include <asm/sysreg.h>
- #include <asm/thread_info.h>
- #include <asm/virt.h>
- #include "efi-header.S"
- #define __PHYS_OFFSET (KERNEL_START - TEXT_OFFSET)
- #if (TEXT_OFFSET & 0xfff) != 0
- #error TEXT_OFFSET must be at least 4KB aligned
- #elif (PAGE_OFFSET & 0x1fffff) != 0
- #error PAGE_OFFSET must be at least 2MB aligned
- #elif TEXT_OFFSET > 0x1fffff
- #error TEXT_OFFSET must be less than 2MB
- #endif
- /*
- * Kernel startup entry point.
- * ---------------------------
- *
- * The requirements are:
- * MMU = off, D-cache = off, I-cache = on or off,
- * x0 = physical address to the FDT blob.
- *
- * This code is mostly position independent so you call this at
- * __pa(PAGE_OFFSET + TEXT_OFFSET).
- *
- * Note that the callee-saved registers are used for storing variables
- * that are useful before the MMU is enabled. The allocations are described
- * in the entry routines.
- */
- __HEAD
- _head:
- /*
- * DO NOT MODIFY. Image header expected by Linux boot-loaders.
- */
- #ifdef CONFIG_EFI
- /*
- * This add instruction has no meaningful effect except that
- * its opcode forms the magic "MZ" signature required by UEFI.
- */
- add x13, x18, #0x16
- b stext
- #else
- b stext // branch to kernel start, magic
- .long 0 // reserved
- #endif
- le64sym _kernel_offset_le // Image load offset from start of RAM, little-endian
- le64sym _kernel_size_le // Effective size of kernel image, little-endian
- le64sym _kernel_flags_le // Informative flags, little-endian
- .quad 0 // reserved
- .quad 0 // reserved
- .quad 0 // reserved
- .ascii "ARM\x64" // Magic number
- #ifdef CONFIG_EFI
- .long pe_header - _head // Offset to the PE header.
- pe_header:
- __EFI_PE_HEADER
- #else
- .long 0 // reserved
- #endif
- __INIT
- /*
- * The following callee saved general purpose registers are used on the
- * primary lowlevel boot path:
- *
- * Register Scope Purpose
- * x21 stext() .. start_kernel() FDT pointer passed at boot in x0
- * x23 stext() .. start_kernel() physical misalignment/KASLR offset
- * x28 __create_page_tables() callee preserved temp register
- * x19/x20 __primary_switch() callee preserved temp registers
- */
- ENTRY(stext)
- bl preserve_boot_args
- bl el2_setup // Drop to EL1, w0=cpu_boot_mode
- adrp x23, __PHYS_OFFSET
- and x23, x23, MIN_KIMG_ALIGN - 1 // KASLR offset, defaults to 0
- bl set_cpu_boot_mode_flag
- bl __create_page_tables
- /*
- * The following calls CPU setup code, see arch/arm64/mm/proc.S for
- * details.
- * On return, the CPU will be ready for the MMU to be turned on and
- * the TCR will have been set.
- */
- bl __cpu_setup // initialise processor
- b __primary_switch
- ENDPROC(stext)
- /*
- * Preserve the arguments passed by the bootloader in x0 .. x3
- */
- preserve_boot_args:
- mov x21, x0 // x21=FDT
- adr_l x0, boot_args // record the contents of
- stp x21, x1, [x0] // x0 .. x3 at kernel entry
- stp x2, x3, [x0, #16]
- dmb sy // needed before dc ivac with
- // MMU off
- mov x1, #0x20 // 4 x 8 bytes
- b __inval_dcache_area // tail call
- ENDPROC(preserve_boot_args)
- /*
- * Macro to create a table entry to the next page.
- *
- * tbl: page table address
- * virt: virtual address
- * shift: #imm page table shift
- * ptrs: #imm pointers per table page
- *
- * Preserves: virt
- * Corrupts: ptrs, tmp1, tmp2
- * Returns: tbl -> next level table page address
- */
- .macro create_table_entry, tbl, virt, shift, ptrs, tmp1, tmp2
- add \tmp1, \tbl, #PAGE_SIZE
- phys_to_pte \tmp2, \tmp1
- orr \tmp2, \tmp2, #PMD_TYPE_TABLE // address of next table and entry type
- lsr \tmp1, \virt, #\shift
- sub \ptrs, \ptrs, #1
- and \tmp1, \tmp1, \ptrs // table index
- str \tmp2, [\tbl, \tmp1, lsl #3]
- add \tbl, \tbl, #PAGE_SIZE // next level table page
- .endm
- /*
- * Macro to populate page table entries, these entries can be pointers to the next level
- * or last level entries pointing to physical memory.
- *
- * tbl: page table address
- * rtbl: pointer to page table or physical memory
- * index: start index to write
- * eindex: end index to write - [index, eindex] written to
- * flags: flags for pagetable entry to or in
- * inc: increment to rtbl between each entry
- * tmp1: temporary variable
- *
- * Preserves: tbl, eindex, flags, inc
- * Corrupts: index, tmp1
- * Returns: rtbl
- */
- .macro populate_entries, tbl, rtbl, index, eindex, flags, inc, tmp1
- .Lpe\@: phys_to_pte \tmp1, \rtbl
- orr \tmp1, \tmp1, \flags // tmp1 = table entry
- str \tmp1, [\tbl, \index, lsl #3]
- add \rtbl, \rtbl, \inc // rtbl = pa next level
- add \index, \index, #1
- cmp \index, \eindex
- b.ls .Lpe\@
- .endm
- /*
- * Compute indices of table entries from virtual address range. If multiple entries
- * were needed in the previous page table level then the next page table level is assumed
- * to be composed of multiple pages. (This effectively scales the end index).
- *
- * vstart: virtual address of start of range
- * vend: virtual address of end of range
- * shift: shift used to transform virtual address into index
- * ptrs: number of entries in page table
- * istart: index in table corresponding to vstart
- * iend: index in table corresponding to vend
- * count: On entry: how many extra entries were required in previous level, scales
- * our end index.
- * On exit: returns how many extra entries required for next page table level
- *
- * Preserves: vstart, vend, shift, ptrs
- * Returns: istart, iend, count
- */
- .macro compute_indices, vstart, vend, shift, ptrs, istart, iend, count
- lsr \iend, \vend, \shift
- mov \istart, \ptrs
- sub \istart, \istart, #1
- and \iend, \iend, \istart // iend = (vend >> shift) & (ptrs - 1)
- mov \istart, \ptrs
- mul \istart, \istart, \count
- add \iend, \iend, \istart // iend += (count - 1) * ptrs
- // our entries span multiple tables
- lsr \istart, \vstart, \shift
- mov \count, \ptrs
- sub \count, \count, #1
- and \istart, \istart, \count
- sub \count, \iend, \istart
- .endm
- /*
- * Map memory for specified virtual address range. Each level of page table needed supports
- * multiple entries. If a level requires n entries the next page table level is assumed to be
- * formed from n pages.
- *
- * tbl: location of page table
- * rtbl: address to be used for first level page table entry (typically tbl + PAGE_SIZE)
- * vstart: start address to map
- * vend: end address to map - we map [vstart, vend]
- * flags: flags to use to map last level entries
- * phys: physical address corresponding to vstart - physical memory is contiguous
- * pgds: the number of pgd entries
- *
- * Temporaries: istart, iend, tmp, count, sv - these need to be different registers
- * Preserves: vstart, vend, flags
- * Corrupts: tbl, rtbl, istart, iend, tmp, count, sv
- */
- .macro map_memory, tbl, rtbl, vstart, vend, flags, phys, pgds, istart, iend, tmp, count, sv
- add \rtbl, \tbl, #PAGE_SIZE
- mov \sv, \rtbl
- mov \count, #0
- compute_indices \vstart, \vend, #PGDIR_SHIFT, \pgds, \istart, \iend, \count
- populate_entries \tbl, \rtbl, \istart, \iend, #PMD_TYPE_TABLE, #PAGE_SIZE, \tmp
- mov \tbl, \sv
- mov \sv, \rtbl
- #if SWAPPER_PGTABLE_LEVELS > 3
- compute_indices \vstart, \vend, #PUD_SHIFT, #PTRS_PER_PUD, \istart, \iend, \count
- populate_entries \tbl, \rtbl, \istart, \iend, #PMD_TYPE_TABLE, #PAGE_SIZE, \tmp
- mov \tbl, \sv
- mov \sv, \rtbl
- #endif
- #if SWAPPER_PGTABLE_LEVELS > 2
- compute_indices \vstart, \vend, #SWAPPER_TABLE_SHIFT, #PTRS_PER_PMD, \istart, \iend, \count
- populate_entries \tbl, \rtbl, \istart, \iend, #PMD_TYPE_TABLE, #PAGE_SIZE, \tmp
- mov \tbl, \sv
- #endif
- compute_indices \vstart, \vend, #SWAPPER_BLOCK_SHIFT, #PTRS_PER_PTE, \istart, \iend, \count
- bic \count, \phys, #SWAPPER_BLOCK_SIZE - 1
- populate_entries \tbl, \count, \istart, \iend, \flags, #SWAPPER_BLOCK_SIZE, \tmp
- .endm
- /*
- * Setup the initial page tables. We only setup the barest amount which is
- * required to get the kernel running. The following sections are required:
- * - identity mapping to enable the MMU (low address, TTBR0)
- * - first few MB of the kernel linear mapping to jump to once the MMU has
- * been enabled
- */
- __create_page_tables:
- mov x28, lr
- /*
- * Invalidate the idmap and swapper page tables to avoid potential
- * dirty cache lines being evicted.
- */
- adrp x0, idmap_pg_dir
- adrp x1, swapper_pg_end
- sub x1, x1, x0
- bl __inval_dcache_area
- /*
- * Clear the idmap and swapper page tables.
- */
- adrp x0, idmap_pg_dir
- adrp x1, swapper_pg_end
- sub x1, x1, x0
- 1: stp xzr, xzr, [x0], #16
- stp xzr, xzr, [x0], #16
- stp xzr, xzr, [x0], #16
- stp xzr, xzr, [x0], #16
- subs x1, x1, #64
- b.ne 1b
- mov x7, SWAPPER_MM_MMUFLAGS
- /*
- * Create the identity mapping.
- */
- adrp x0, idmap_pg_dir
- adrp x3, __idmap_text_start // __pa(__idmap_text_start)
- /*
- * VA_BITS may be too small to allow for an ID mapping to be created
- * that covers system RAM if that is located sufficiently high in the
- * physical address space. So for the ID map, use an extended virtual
- * range in that case, and configure an additional translation level
- * if needed.
- *
- * Calculate the maximum allowed value for TCR_EL1.T0SZ so that the
- * entire ID map region can be mapped. As T0SZ == (64 - #bits used),
- * this number conveniently equals the number of leading zeroes in
- * the physical address of __idmap_text_end.
- */
- adrp x5, __idmap_text_end
- clz x5, x5
- cmp x5, TCR_T0SZ(VA_BITS) // default T0SZ small enough?
- b.ge 1f // .. then skip VA range extension
- adr_l x6, idmap_t0sz
- str x5, [x6]
- dmb sy
- dc ivac, x6 // Invalidate potentially stale cache line
- #if (VA_BITS < 48)
- #define EXTRA_SHIFT (PGDIR_SHIFT + PAGE_SHIFT - 3)
- #define EXTRA_PTRS (1 << (PHYS_MASK_SHIFT - EXTRA_SHIFT))
- /*
- * If VA_BITS < 48, we have to configure an additional table level.
- * First, we have to verify our assumption that the current value of
- * VA_BITS was chosen such that all translation levels are fully
- * utilised, and that lowering T0SZ will always result in an additional
- * translation level to be configured.
- */
- #if VA_BITS != EXTRA_SHIFT
- #error "Mismatch between VA_BITS and page size/number of translation levels"
- #endif
- mov x4, EXTRA_PTRS
- create_table_entry x0, x3, EXTRA_SHIFT, x4, x5, x6
- #else
- /*
- * If VA_BITS == 48, we don't have to configure an additional
- * translation level, but the top-level table has more entries.
- */
- mov x4, #1 << (PHYS_MASK_SHIFT - PGDIR_SHIFT)
- str_l x4, idmap_ptrs_per_pgd, x5
- #endif
- 1:
- ldr_l x4, idmap_ptrs_per_pgd
- mov x5, x3 // __pa(__idmap_text_start)
- adr_l x6, __idmap_text_end // __pa(__idmap_text_end)
- map_memory x0, x1, x3, x6, x7, x3, x4, x10, x11, x12, x13, x14
- /*
- * Map the kernel image (starting with PHYS_OFFSET).
- */
- adrp x0, swapper_pg_dir
- mov_q x5, KIMAGE_VADDR + TEXT_OFFSET // compile time __va(_text)
- add x5, x5, x23 // add KASLR displacement
- mov x4, PTRS_PER_PGD
- adrp x6, _end // runtime __pa(_end)
- adrp x3, _text // runtime __pa(_text)
- sub x6, x6, x3 // _end - _text
- add x6, x6, x5 // runtime __va(_end)
- map_memory x0, x1, x5, x6, x7, x3, x4, x10, x11, x12, x13, x14
- /*
- * Since the page tables have been populated with non-cacheable
- * accesses (MMU disabled), invalidate the idmap and swapper page
- * tables again to remove any speculatively loaded cache lines.
- */
- adrp x0, idmap_pg_dir
- adrp x1, swapper_pg_end
- sub x1, x1, x0
- dmb sy
- bl __inval_dcache_area
- ret x28
- ENDPROC(__create_page_tables)
- .ltorg
- /*
- * The following fragment of code is executed with the MMU enabled.
- *
- * x0 = __PHYS_OFFSET
- */
- __primary_switched:
- adrp x4, init_thread_union
- add sp, x4, #THREAD_SIZE
- adr_l x5, init_task
- msr sp_el0, x5 // Save thread_info
- adr_l x8, vectors // load VBAR_EL1 with virtual
- msr vbar_el1, x8 // vector table address
- isb
- stp xzr, x30, [sp, #-16]!
- mov x29, sp
- str_l x21, __fdt_pointer, x5 // Save FDT pointer
- ldr_l x4, kimage_vaddr // Save the offset between
- sub x4, x4, x0 // the kernel virtual and
- str_l x4, kimage_voffset, x5 // physical mappings
- // Clear BSS
- adr_l x0, __bss_start
- mov x1, xzr
- adr_l x2, __bss_stop
- sub x2, x2, x0
- bl __pi_memset
- dsb ishst // Make zero page visible to PTW
- #ifdef CONFIG_KASAN
- bl kasan_early_init
- #endif
- #ifdef CONFIG_RANDOMIZE_BASE
- tst x23, ~(MIN_KIMG_ALIGN - 1) // already running randomized?
- b.ne 0f
- mov x0, x21 // pass FDT address in x0
- bl kaslr_early_init // parse FDT for KASLR options
- cbz x0, 0f // KASLR disabled? just proceed
- orr x23, x23, x0 // record KASLR offset
- ldp x29, x30, [sp], #16 // we must enable KASLR, return
- ret // to __primary_switch()
- 0:
- #endif
- add sp, sp, #16
- mov x29, #0
- mov x30, #0
- b start_kernel
- ENDPROC(__primary_switched)
- /*
- * end early head section, begin head code that is also used for
- * hotplug and needs to have the same protections as the text region
- */
- .section ".idmap.text","awx"
- ENTRY(kimage_vaddr)
- .quad _text - TEXT_OFFSET
- /*
- * If we're fortunate enough to boot at EL2, ensure that the world is
- * sane before dropping to EL1.
- *
- * Returns either BOOT_CPU_MODE_EL1 or BOOT_CPU_MODE_EL2 in w0 if
- * booted in EL1 or EL2 respectively.
- */
- ENTRY(el2_setup)
- msr SPsel, #1 // We want to use SP_EL{1,2}
- mrs x0, CurrentEL
- cmp x0, #CurrentEL_EL2
- b.eq 1f
- mov_q x0, (SCTLR_EL1_RES1 | ENDIAN_SET_EL1)
- msr sctlr_el1, x0
- mov w0, #BOOT_CPU_MODE_EL1 // This cpu booted in EL1
- isb
- ret
- 1: mov_q x0, (SCTLR_EL2_RES1 | ENDIAN_SET_EL2)
- msr sctlr_el2, x0
- #ifdef CONFIG_ARM64_VHE
- /*
- * Check for VHE being present. For the rest of the EL2 setup,
- * x2 being non-zero indicates that we do have VHE, and that the
- * kernel is intended to run at EL2.
- */
- mrs x2, id_aa64mmfr1_el1
- ubfx x2, x2, #8, #4
- #else
- mov x2, xzr
- #endif
- /* Hyp configuration. */
- mov_q x0, HCR_HOST_NVHE_FLAGS
- cbz x2, set_hcr
- mov_q x0, HCR_HOST_VHE_FLAGS
- set_hcr:
- msr hcr_el2, x0
- isb
- /*
- * Allow Non-secure EL1 and EL0 to access physical timer and counter.
- * This is not necessary for VHE, since the host kernel runs in EL2,
- * and EL0 accesses are configured in the later stage of boot process.
- * Note that when HCR_EL2.E2H == 1, CNTHCTL_EL2 has the same bit layout
- * as CNTKCTL_EL1, and CNTKCTL_EL1 accessing instructions are redefined
- * to access CNTHCTL_EL2. This allows the kernel designed to run at EL1
- * to transparently mess with the EL0 bits via CNTKCTL_EL1 access in
- * EL2.
- */
- cbnz x2, 1f
- mrs x0, cnthctl_el2
- orr x0, x0, #3 // Enable EL1 physical timers
- msr cnthctl_el2, x0
- 1:
- msr cntvoff_el2, xzr // Clear virtual offset
- #ifdef CONFIG_ARM_GIC_V3
- /* GICv3 system register access */
- mrs x0, id_aa64pfr0_el1
- ubfx x0, x0, #24, #4
- cbz x0, 3f
- mrs_s x0, SYS_ICC_SRE_EL2
- orr x0, x0, #ICC_SRE_EL2_SRE // Set ICC_SRE_EL2.SRE==1
- orr x0, x0, #ICC_SRE_EL2_ENABLE // Set ICC_SRE_EL2.Enable==1
- msr_s SYS_ICC_SRE_EL2, x0
- isb // Make sure SRE is now set
- mrs_s x0, SYS_ICC_SRE_EL2 // Read SRE back,
- tbz x0, #0, 3f // and check that it sticks
- msr_s SYS_ICH_HCR_EL2, xzr // Reset ICC_HCR_EL2 to defaults
- 3:
- #endif
- /* Populate ID registers. */
- mrs x0, midr_el1
- mrs x1, mpidr_el1
- msr vpidr_el2, x0
- msr vmpidr_el2, x1
- #ifdef CONFIG_COMPAT
- msr hstr_el2, xzr // Disable CP15 traps to EL2
- #endif
- /* EL2 debug */
- mrs x1, id_aa64dfr0_el1 // Check ID_AA64DFR0_EL1 PMUVer
- sbfx x0, x1, #8, #4
- cmp x0, #1
- b.lt 4f // Skip if no PMU present
- mrs x0, pmcr_el0 // Disable debug access traps
- ubfx x0, x0, #11, #5 // to EL2 and allow access to
- 4:
- csel x3, xzr, x0, lt // all PMU counters from EL1
- /* Statistical profiling */
- ubfx x0, x1, #32, #4 // Check ID_AA64DFR0_EL1 PMSVer
- cbz x0, 7f // Skip if SPE not present
- cbnz x2, 6f // VHE?
- mrs_s x4, SYS_PMBIDR_EL1 // If SPE available at EL2,
- and x4, x4, #(1 << SYS_PMBIDR_EL1_P_SHIFT)
- cbnz x4, 5f // then permit sampling of physical
- mov x4, #(1 << SYS_PMSCR_EL2_PCT_SHIFT | \
- 1 << SYS_PMSCR_EL2_PA_SHIFT)
- msr_s SYS_PMSCR_EL2, x4 // addresses and physical counter
- 5:
- mov x1, #(MDCR_EL2_E2PB_MASK << MDCR_EL2_E2PB_SHIFT)
- orr x3, x3, x1 // If we don't have VHE, then
- b 7f // use EL1&0 translation.
- 6: // For VHE, use EL2 translation
- orr x3, x3, #MDCR_EL2_TPMS // and disable access from EL1
- 7:
- msr mdcr_el2, x3 // Configure debug traps
- /* LORegions */
- mrs x1, id_aa64mmfr1_el1
- ubfx x0, x1, #ID_AA64MMFR1_LOR_SHIFT, 4
- cbz x0, 1f
- msr_s SYS_LORC_EL1, xzr
- 1:
- /* Stage-2 translation */
- msr vttbr_el2, xzr
- cbz x2, install_el2_stub
- mov w0, #BOOT_CPU_MODE_EL2 // This CPU booted in EL2
- isb
- ret
- install_el2_stub:
- /*
- * When VHE is not in use, early init of EL2 and EL1 needs to be
- * done here.
- * When VHE _is_ in use, EL1 will not be used in the host and
- * requires no configuration, and all non-hyp-specific EL2 setup
- * will be done via the _EL1 system register aliases in __cpu_setup.
- */
- mov_q x0, (SCTLR_EL1_RES1 | ENDIAN_SET_EL1)
- msr sctlr_el1, x0
- /* Coprocessor traps. */
- mov x0, #0x33ff
- msr cptr_el2, x0 // Disable copro. traps to EL2
- /* SVE register access */
- mrs x1, id_aa64pfr0_el1
- ubfx x1, x1, #ID_AA64PFR0_SVE_SHIFT, #4
- cbz x1, 7f
- bic x0, x0, #CPTR_EL2_TZ // Also disable SVE traps
- msr cptr_el2, x0 // Disable copro. traps to EL2
- isb
- mov x1, #ZCR_ELx_LEN_MASK // SVE: Enable full vector
- msr_s SYS_ZCR_EL2, x1 // length for EL1.
- /* Hypervisor stub */
- 7: adr_l x0, __hyp_stub_vectors
- msr vbar_el2, x0
- /* spsr */
- mov x0, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\
- PSR_MODE_EL1h)
- msr spsr_el2, x0
- msr elr_el2, lr
- mov w0, #BOOT_CPU_MODE_EL2 // This CPU booted in EL2
- eret
- ENDPROC(el2_setup)
- /*
- * Sets the __boot_cpu_mode flag depending on the CPU boot mode passed
- * in w0. See arch/arm64/include/asm/virt.h for more info.
- */
- set_cpu_boot_mode_flag:
- adr_l x1, __boot_cpu_mode
- cmp w0, #BOOT_CPU_MODE_EL2
- b.ne 1f
- add x1, x1, #4
- 1: str w0, [x1] // This CPU has booted in EL1
- dmb sy
- dc ivac, x1 // Invalidate potentially stale cache line
- ret
- ENDPROC(set_cpu_boot_mode_flag)
- /*
- * These values are written with the MMU off, but read with the MMU on.
- * Writers will invalidate the corresponding address, discarding up to a
- * 'Cache Writeback Granule' (CWG) worth of data. The linker script ensures
- * sufficient alignment that the CWG doesn't overlap another section.
- */
- .pushsection ".mmuoff.data.write", "aw"
- /*
- * We need to find out the CPU boot mode long after boot, so we need to
- * store it in a writable variable.
- *
- * This is not in .bss, because we set it sufficiently early that the boot-time
- * zeroing of .bss would clobber it.
- */
- ENTRY(__boot_cpu_mode)
- .long BOOT_CPU_MODE_EL2
- .long BOOT_CPU_MODE_EL1
- /*
- * The booting CPU updates the failed status @__early_cpu_boot_status,
- * with MMU turned off.
- */
- ENTRY(__early_cpu_boot_status)
- .quad 0
- .popsection
- /*
- * This provides a "holding pen" for platforms to hold all secondary
- * cores are held until we're ready for them to initialise.
- */
- ENTRY(secondary_holding_pen)
- bl el2_setup // Drop to EL1, w0=cpu_boot_mode
- bl set_cpu_boot_mode_flag
- mrs x0, mpidr_el1
- mov_q x1, MPIDR_HWID_BITMASK
- and x0, x0, x1
- adr_l x3, secondary_holding_pen_release
- pen: ldr x4, [x3]
- cmp x4, x0
- b.eq secondary_startup
- wfe
- b pen
- ENDPROC(secondary_holding_pen)
- /*
- * Secondary entry point that jumps straight into the kernel. Only to
- * be used where CPUs are brought online dynamically by the kernel.
- */
- ENTRY(secondary_entry)
- bl el2_setup // Drop to EL1
- bl set_cpu_boot_mode_flag
- b secondary_startup
- ENDPROC(secondary_entry)
- secondary_startup:
- /*
- * Common entry point for secondary CPUs.
- */
- bl __cpu_secondary_check52bitva
- bl __cpu_setup // initialise processor
- bl __enable_mmu
- ldr x8, =__secondary_switched
- br x8
- ENDPROC(secondary_startup)
- __secondary_switched:
- adr_l x5, vectors
- msr vbar_el1, x5
- isb
- adr_l x0, secondary_data
- ldr x1, [x0, #CPU_BOOT_STACK] // get secondary_data.stack
- mov sp, x1
- ldr x2, [x0, #CPU_BOOT_TASK]
- msr sp_el0, x2
- mov x29, #0
- mov x30, #0
- b secondary_start_kernel
- ENDPROC(__secondary_switched)
- /*
- * The booting CPU updates the failed status @__early_cpu_boot_status,
- * with MMU turned off.
- *
- * update_early_cpu_boot_status tmp, status
- * - Corrupts tmp1, tmp2
- * - Writes 'status' to __early_cpu_boot_status and makes sure
- * it is committed to memory.
- */
- .macro update_early_cpu_boot_status status, tmp1, tmp2
- mov \tmp2, #\status
- adr_l \tmp1, __early_cpu_boot_status
- str \tmp2, [\tmp1]
- dmb sy
- dc ivac, \tmp1 // Invalidate potentially stale cache line
- .endm
- /*
- * Enable the MMU.
- *
- * x0 = SCTLR_EL1 value for turning on the MMU.
- *
- * Returns to the caller via x30/lr. This requires the caller to be covered
- * by the .idmap.text section.
- *
- * Checks if the selected granule size is supported by the CPU.
- * If it isn't, park the CPU
- */
- ENTRY(__enable_mmu)
- mrs x1, ID_AA64MMFR0_EL1
- ubfx x2, x1, #ID_AA64MMFR0_TGRAN_SHIFT, 4
- cmp x2, #ID_AA64MMFR0_TGRAN_SUPPORTED
- b.ne __no_granule_support
- update_early_cpu_boot_status 0, x1, x2
- adrp x1, idmap_pg_dir
- adrp x2, swapper_pg_dir
- phys_to_ttbr x3, x1
- phys_to_ttbr x4, x2
- msr ttbr0_el1, x3 // load TTBR0
- msr ttbr1_el1, x4 // load TTBR1
- isb
- msr sctlr_el1, x0
- isb
- /*
- * Invalidate the local I-cache so that any instructions fetched
- * speculatively from the PoC are discarded, since they may have
- * been dynamically patched at the PoU.
- */
- ic iallu
- dsb nsh
- isb
- ret
- ENDPROC(__enable_mmu)
- ENTRY(__cpu_secondary_check52bitva)
- #ifdef CONFIG_ARM64_52BIT_VA
- ldr_l x0, vabits_user
- cmp x0, #52
- b.ne 2f
- mrs_s x0, SYS_ID_AA64MMFR2_EL1
- and x0, x0, #(0xf << ID_AA64MMFR2_LVA_SHIFT)
- cbnz x0, 2f
- adr_l x0, va52mismatch
- mov w1, #1
- strb w1, [x0]
- dmb sy
- dc ivac, x0 // Invalidate potentially stale cache line
- update_early_cpu_boot_status CPU_STUCK_IN_KERNEL, x0, x1
- 1: wfe
- wfi
- b 1b
- #endif
- 2: ret
- ENDPROC(__cpu_secondary_check52bitva)
- __no_granule_support:
- /* Indicate that this CPU can't boot and is stuck in the kernel */
- update_early_cpu_boot_status CPU_STUCK_IN_KERNEL, x1, x2
- 1:
- wfe
- wfi
- b 1b
- ENDPROC(__no_granule_support)
- #ifdef CONFIG_RELOCATABLE
- __relocate_kernel:
- /*
- * Iterate over each entry in the relocation table, and apply the
- * relocations in place.
- */
- ldr w9, =__rela_offset // offset to reloc table
- ldr w10, =__rela_size // size of reloc table
- mov_q x11, KIMAGE_VADDR // default virtual offset
- add x11, x11, x23 // actual virtual offset
- add x9, x9, x11 // __va(.rela)
- add x10, x9, x10 // __va(.rela) + sizeof(.rela)
- 0: cmp x9, x10
- b.hs 1f
- ldp x11, x12, [x9], #24
- ldr x13, [x9, #-8]
- cmp w12, #R_AARCH64_RELATIVE
- b.ne 0b
- add x13, x13, x23 // relocate
- str x13, [x11, x23]
- b 0b
- 1: ret
- ENDPROC(__relocate_kernel)
- #endif
- __primary_switch:
- #ifdef CONFIG_RANDOMIZE_BASE
- mov x19, x0 // preserve new SCTLR_EL1 value
- mrs x20, sctlr_el1 // preserve old SCTLR_EL1 value
- #endif
- bl __enable_mmu
- #ifdef CONFIG_RELOCATABLE
- bl __relocate_kernel
- #ifdef CONFIG_RANDOMIZE_BASE
- ldr x8, =__primary_switched
- adrp x0, __PHYS_OFFSET
- blr x8
- /*
- * If we return here, we have a KASLR displacement in x23 which we need
- * to take into account by discarding the current kernel mapping and
- * creating a new one.
- */
- pre_disable_mmu_workaround
- msr sctlr_el1, x20 // disable the MMU
- isb
- bl __create_page_tables // recreate kernel mapping
- tlbi vmalle1 // Remove any stale TLB entries
- dsb nsh
- isb
- msr sctlr_el1, x19 // re-enable the MMU
- isb
- ic iallu // flush instructions fetched
- dsb nsh // via old mapping
- isb
- bl __relocate_kernel
- #endif
- #endif
- ldr x8, =__primary_switched
- adrp x0, __PHYS_OFFSET
- br x8
- ENDPROC(__primary_switch)
|