head64.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * prepare to run common code
  4. *
  5. * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
  6. */
  7. #define DISABLE_BRANCH_PROFILING
  8. /* cpu_feature_enabled() cannot be used this early */
  9. #define USE_EARLY_PGTABLE_L5
  10. #include <linux/init.h>
  11. #include <linux/linkage.h>
  12. #include <linux/types.h>
  13. #include <linux/kernel.h>
  14. #include <linux/string.h>
  15. #include <linux/percpu.h>
  16. #include <linux/start_kernel.h>
  17. #include <linux/io.h>
  18. #include <linux/memblock.h>
  19. #include <linux/mem_encrypt.h>
  20. #include <asm/processor.h>
  21. #include <asm/proto.h>
  22. #include <asm/smp.h>
  23. #include <asm/setup.h>
  24. #include <asm/desc.h>
  25. #include <asm/pgtable.h>
  26. #include <asm/tlbflush.h>
  27. #include <asm/sections.h>
  28. #include <asm/kdebug.h>
  29. #include <asm/e820/api.h>
  30. #include <asm/bios_ebda.h>
  31. #include <asm/bootparam_utils.h>
  32. #include <asm/microcode.h>
  33. #include <asm/kasan.h>
  34. #include <asm/fixmap.h>
  35. /*
  36. * Manage page tables very early on.
  37. */
  38. extern pmd_t early_dynamic_pgts[EARLY_DYNAMIC_PAGE_TABLES][PTRS_PER_PMD];
  39. static unsigned int __initdata next_early_pgt;
  40. pmdval_t early_pmd_flags = __PAGE_KERNEL_LARGE & ~(_PAGE_GLOBAL | _PAGE_NX);
  41. #ifdef CONFIG_X86_5LEVEL
  42. unsigned int __pgtable_l5_enabled __ro_after_init;
  43. unsigned int pgdir_shift __ro_after_init = 39;
  44. EXPORT_SYMBOL(pgdir_shift);
  45. unsigned int ptrs_per_p4d __ro_after_init = 1;
  46. EXPORT_SYMBOL(ptrs_per_p4d);
  47. #endif
  48. #ifdef CONFIG_DYNAMIC_MEMORY_LAYOUT
  49. unsigned long page_offset_base __ro_after_init = __PAGE_OFFSET_BASE_L4;
  50. EXPORT_SYMBOL(page_offset_base);
  51. unsigned long vmalloc_base __ro_after_init = __VMALLOC_BASE_L4;
  52. EXPORT_SYMBOL(vmalloc_base);
  53. unsigned long vmemmap_base __ro_after_init = __VMEMMAP_BASE_L4;
  54. EXPORT_SYMBOL(vmemmap_base);
  55. #endif
  56. #define __head __section(.head.text)
  57. static void __head *fixup_pointer(void *ptr, unsigned long physaddr)
  58. {
  59. return ptr - (void *)_text + (void *)physaddr;
  60. }
  61. static unsigned long __head *fixup_long(void *ptr, unsigned long physaddr)
  62. {
  63. return fixup_pointer(ptr, physaddr);
  64. }
  65. #ifdef CONFIG_X86_5LEVEL
  66. static unsigned int __head *fixup_int(void *ptr, unsigned long physaddr)
  67. {
  68. return fixup_pointer(ptr, physaddr);
  69. }
  70. static bool __head check_la57_support(unsigned long physaddr)
  71. {
  72. /*
  73. * 5-level paging is detected and enabled at kernel decomression
  74. * stage. Only check if it has been enabled there.
  75. */
  76. if (!(native_read_cr4() & X86_CR4_LA57))
  77. return false;
  78. *fixup_int(&__pgtable_l5_enabled, physaddr) = 1;
  79. *fixup_int(&pgdir_shift, physaddr) = 48;
  80. *fixup_int(&ptrs_per_p4d, physaddr) = 512;
  81. *fixup_long(&page_offset_base, physaddr) = __PAGE_OFFSET_BASE_L5;
  82. *fixup_long(&vmalloc_base, physaddr) = __VMALLOC_BASE_L5;
  83. *fixup_long(&vmemmap_base, physaddr) = __VMEMMAP_BASE_L5;
  84. return true;
  85. }
  86. #else
  87. static bool __head check_la57_support(unsigned long physaddr)
  88. {
  89. return false;
  90. }
  91. #endif
  92. /* Code in __startup_64() can be relocated during execution, but the compiler
  93. * doesn't have to generate PC-relative relocations when accessing globals from
  94. * that function. Clang actually does not generate them, which leads to
  95. * boot-time crashes. To work around this problem, every global pointer must
  96. * be adjusted using fixup_pointer().
  97. */
  98. unsigned long __head __startup_64(unsigned long physaddr,
  99. struct boot_params *bp)
  100. {
  101. unsigned long vaddr, vaddr_end;
  102. unsigned long load_delta, *p;
  103. unsigned long pgtable_flags;
  104. pgdval_t *pgd;
  105. p4dval_t *p4d;
  106. pudval_t *pud;
  107. pmdval_t *pmd, pmd_entry;
  108. pteval_t *mask_ptr;
  109. bool la57;
  110. int i;
  111. unsigned int *next_pgt_ptr;
  112. la57 = check_la57_support(physaddr);
  113. /* Is the address too large? */
  114. if (physaddr >> MAX_PHYSMEM_BITS)
  115. for (;;);
  116. /*
  117. * Compute the delta between the address I am compiled to run at
  118. * and the address I am actually running at.
  119. */
  120. load_delta = physaddr - (unsigned long)(_text - __START_KERNEL_map);
  121. /* Is the address not 2M aligned? */
  122. if (load_delta & ~PMD_PAGE_MASK)
  123. for (;;);
  124. /* Activate Secure Memory Encryption (SME) if supported and enabled */
  125. sme_enable(bp);
  126. /* Include the SME encryption mask in the fixup value */
  127. load_delta += sme_get_me_mask();
  128. /* Fixup the physical addresses in the page table */
  129. pgd = fixup_pointer(&early_top_pgt, physaddr);
  130. p = pgd + pgd_index(__START_KERNEL_map);
  131. if (la57)
  132. *p = (unsigned long)level4_kernel_pgt;
  133. else
  134. *p = (unsigned long)level3_kernel_pgt;
  135. *p += _PAGE_TABLE_NOENC - __START_KERNEL_map + load_delta;
  136. if (la57) {
  137. p4d = fixup_pointer(&level4_kernel_pgt, physaddr);
  138. p4d[511] += load_delta;
  139. }
  140. pud = fixup_pointer(&level3_kernel_pgt, physaddr);
  141. pud[510] += load_delta;
  142. pud[511] += load_delta;
  143. pmd = fixup_pointer(level2_fixmap_pgt, physaddr);
  144. for (i = FIXMAP_PMD_TOP; i > FIXMAP_PMD_TOP - FIXMAP_PMD_NUM; i--)
  145. pmd[i] += load_delta;
  146. /*
  147. * Set up the identity mapping for the switchover. These
  148. * entries should *NOT* have the global bit set! This also
  149. * creates a bunch of nonsense entries but that is fine --
  150. * it avoids problems around wraparound.
  151. */
  152. next_pgt_ptr = fixup_pointer(&next_early_pgt, physaddr);
  153. pud = fixup_pointer(early_dynamic_pgts[(*next_pgt_ptr)++], physaddr);
  154. pmd = fixup_pointer(early_dynamic_pgts[(*next_pgt_ptr)++], physaddr);
  155. pgtable_flags = _KERNPG_TABLE_NOENC + sme_get_me_mask();
  156. if (la57) {
  157. p4d = fixup_pointer(early_dynamic_pgts[(*next_pgt_ptr)++],
  158. physaddr);
  159. i = (physaddr >> PGDIR_SHIFT) % PTRS_PER_PGD;
  160. pgd[i + 0] = (pgdval_t)p4d + pgtable_flags;
  161. pgd[i + 1] = (pgdval_t)p4d + pgtable_flags;
  162. i = physaddr >> P4D_SHIFT;
  163. p4d[(i + 0) % PTRS_PER_P4D] = (pgdval_t)pud + pgtable_flags;
  164. p4d[(i + 1) % PTRS_PER_P4D] = (pgdval_t)pud + pgtable_flags;
  165. } else {
  166. i = (physaddr >> PGDIR_SHIFT) % PTRS_PER_PGD;
  167. pgd[i + 0] = (pgdval_t)pud + pgtable_flags;
  168. pgd[i + 1] = (pgdval_t)pud + pgtable_flags;
  169. }
  170. i = physaddr >> PUD_SHIFT;
  171. pud[(i + 0) % PTRS_PER_PUD] = (pudval_t)pmd + pgtable_flags;
  172. pud[(i + 1) % PTRS_PER_PUD] = (pudval_t)pmd + pgtable_flags;
  173. pmd_entry = __PAGE_KERNEL_LARGE_EXEC & ~_PAGE_GLOBAL;
  174. /* Filter out unsupported __PAGE_KERNEL_* bits: */
  175. mask_ptr = fixup_pointer(&__supported_pte_mask, physaddr);
  176. pmd_entry &= *mask_ptr;
  177. pmd_entry += sme_get_me_mask();
  178. pmd_entry += physaddr;
  179. for (i = 0; i < DIV_ROUND_UP(_end - _text, PMD_SIZE); i++) {
  180. int idx = i + (physaddr >> PMD_SHIFT);
  181. pmd[idx % PTRS_PER_PMD] = pmd_entry + i * PMD_SIZE;
  182. }
  183. /*
  184. * Fixup the kernel text+data virtual addresses. Note that
  185. * we might write invalid pmds, when the kernel is relocated
  186. * cleanup_highmap() fixes this up along with the mappings
  187. * beyond _end.
  188. *
  189. * Only the region occupied by the kernel image has so far
  190. * been checked against the table of usable memory regions
  191. * provided by the firmware, so invalidate pages outside that
  192. * region. A page table entry that maps to a reserved area of
  193. * memory would allow processor speculation into that area,
  194. * and on some hardware (particularly the UV platform) even
  195. * speculative access to some reserved areas is caught as an
  196. * error, causing the BIOS to halt the system.
  197. */
  198. pmd = fixup_pointer(level2_kernel_pgt, physaddr);
  199. /* invalidate pages before the kernel image */
  200. for (i = 0; i < pmd_index((unsigned long)_text); i++)
  201. pmd[i] &= ~_PAGE_PRESENT;
  202. /* fixup pages that are part of the kernel image */
  203. for (; i <= pmd_index((unsigned long)_end); i++)
  204. if (pmd[i] & _PAGE_PRESENT)
  205. pmd[i] += load_delta;
  206. /* invalidate pages after the kernel image */
  207. for (; i < PTRS_PER_PMD; i++)
  208. pmd[i] &= ~_PAGE_PRESENT;
  209. /*
  210. * Fixup phys_base - remove the memory encryption mask to obtain
  211. * the true physical address.
  212. */
  213. *fixup_long(&phys_base, physaddr) += load_delta - sme_get_me_mask();
  214. /* Encrypt the kernel and related (if SME is active) */
  215. sme_encrypt_kernel(bp);
  216. /*
  217. * Clear the memory encryption mask from the .bss..decrypted section.
  218. * The bss section will be memset to zero later in the initialization so
  219. * there is no need to zero it after changing the memory encryption
  220. * attribute.
  221. */
  222. if (mem_encrypt_active()) {
  223. vaddr = (unsigned long)__start_bss_decrypted;
  224. vaddr_end = (unsigned long)__end_bss_decrypted;
  225. for (; vaddr < vaddr_end; vaddr += PMD_SIZE) {
  226. i = pmd_index(vaddr);
  227. pmd[i] -= sme_get_me_mask();
  228. }
  229. }
  230. /*
  231. * Return the SME encryption mask (if SME is active) to be used as a
  232. * modifier for the initial pgdir entry programmed into CR3.
  233. */
  234. return sme_get_me_mask();
  235. }
  236. unsigned long __startup_secondary_64(void)
  237. {
  238. /*
  239. * Return the SME encryption mask (if SME is active) to be used as a
  240. * modifier for the initial pgdir entry programmed into CR3.
  241. */
  242. return sme_get_me_mask();
  243. }
  244. /* Wipe all early page tables except for the kernel symbol map */
  245. static void __init reset_early_page_tables(void)
  246. {
  247. memset(early_top_pgt, 0, sizeof(pgd_t)*(PTRS_PER_PGD-1));
  248. next_early_pgt = 0;
  249. write_cr3(__sme_pa_nodebug(early_top_pgt));
  250. }
  251. /* Create a new PMD entry */
  252. int __init __early_make_pgtable(unsigned long address, pmdval_t pmd)
  253. {
  254. unsigned long physaddr = address - __PAGE_OFFSET;
  255. pgdval_t pgd, *pgd_p;
  256. p4dval_t p4d, *p4d_p;
  257. pudval_t pud, *pud_p;
  258. pmdval_t *pmd_p;
  259. /* Invalid address or early pgt is done ? */
  260. if (physaddr >= MAXMEM || read_cr3_pa() != __pa_nodebug(early_top_pgt))
  261. return -1;
  262. again:
  263. pgd_p = &early_top_pgt[pgd_index(address)].pgd;
  264. pgd = *pgd_p;
  265. /*
  266. * The use of __START_KERNEL_map rather than __PAGE_OFFSET here is
  267. * critical -- __PAGE_OFFSET would point us back into the dynamic
  268. * range and we might end up looping forever...
  269. */
  270. if (!pgtable_l5_enabled())
  271. p4d_p = pgd_p;
  272. else if (pgd)
  273. p4d_p = (p4dval_t *)((pgd & PTE_PFN_MASK) + __START_KERNEL_map - phys_base);
  274. else {
  275. if (next_early_pgt >= EARLY_DYNAMIC_PAGE_TABLES) {
  276. reset_early_page_tables();
  277. goto again;
  278. }
  279. p4d_p = (p4dval_t *)early_dynamic_pgts[next_early_pgt++];
  280. memset(p4d_p, 0, sizeof(*p4d_p) * PTRS_PER_P4D);
  281. *pgd_p = (pgdval_t)p4d_p - __START_KERNEL_map + phys_base + _KERNPG_TABLE;
  282. }
  283. p4d_p += p4d_index(address);
  284. p4d = *p4d_p;
  285. if (p4d)
  286. pud_p = (pudval_t *)((p4d & PTE_PFN_MASK) + __START_KERNEL_map - phys_base);
  287. else {
  288. if (next_early_pgt >= EARLY_DYNAMIC_PAGE_TABLES) {
  289. reset_early_page_tables();
  290. goto again;
  291. }
  292. pud_p = (pudval_t *)early_dynamic_pgts[next_early_pgt++];
  293. memset(pud_p, 0, sizeof(*pud_p) * PTRS_PER_PUD);
  294. *p4d_p = (p4dval_t)pud_p - __START_KERNEL_map + phys_base + _KERNPG_TABLE;
  295. }
  296. pud_p += pud_index(address);
  297. pud = *pud_p;
  298. if (pud)
  299. pmd_p = (pmdval_t *)((pud & PTE_PFN_MASK) + __START_KERNEL_map - phys_base);
  300. else {
  301. if (next_early_pgt >= EARLY_DYNAMIC_PAGE_TABLES) {
  302. reset_early_page_tables();
  303. goto again;
  304. }
  305. pmd_p = (pmdval_t *)early_dynamic_pgts[next_early_pgt++];
  306. memset(pmd_p, 0, sizeof(*pmd_p) * PTRS_PER_PMD);
  307. *pud_p = (pudval_t)pmd_p - __START_KERNEL_map + phys_base + _KERNPG_TABLE;
  308. }
  309. pmd_p[pmd_index(address)] = pmd;
  310. return 0;
  311. }
  312. int __init early_make_pgtable(unsigned long address)
  313. {
  314. unsigned long physaddr = address - __PAGE_OFFSET;
  315. pmdval_t pmd;
  316. pmd = (physaddr & PMD_MASK) + early_pmd_flags;
  317. return __early_make_pgtable(address, pmd);
  318. }
  319. /* Don't add a printk in there. printk relies on the PDA which is not initialized
  320. yet. */
  321. static void __init clear_bss(void)
  322. {
  323. memset(__bss_start, 0,
  324. (unsigned long) __bss_stop - (unsigned long) __bss_start);
  325. }
  326. static unsigned long get_cmd_line_ptr(void)
  327. {
  328. unsigned long cmd_line_ptr = boot_params.hdr.cmd_line_ptr;
  329. cmd_line_ptr |= (u64)boot_params.ext_cmd_line_ptr << 32;
  330. return cmd_line_ptr;
  331. }
  332. static void __init copy_bootdata(char *real_mode_data)
  333. {
  334. char * command_line;
  335. unsigned long cmd_line_ptr;
  336. /*
  337. * If SME is active, this will create decrypted mappings of the
  338. * boot data in advance of the copy operations.
  339. */
  340. sme_map_bootdata(real_mode_data);
  341. memcpy(&boot_params, real_mode_data, sizeof boot_params);
  342. sanitize_boot_params(&boot_params);
  343. cmd_line_ptr = get_cmd_line_ptr();
  344. if (cmd_line_ptr) {
  345. command_line = __va(cmd_line_ptr);
  346. memcpy(boot_command_line, command_line, COMMAND_LINE_SIZE);
  347. }
  348. /*
  349. * The old boot data is no longer needed and won't be reserved,
  350. * freeing up that memory for use by the system. If SME is active,
  351. * we need to remove the mappings that were created so that the
  352. * memory doesn't remain mapped as decrypted.
  353. */
  354. sme_unmap_bootdata(real_mode_data);
  355. }
  356. asmlinkage __visible void __init x86_64_start_kernel(char * real_mode_data)
  357. {
  358. /*
  359. * Build-time sanity checks on the kernel image and module
  360. * area mappings. (these are purely build-time and produce no code)
  361. */
  362. BUILD_BUG_ON(MODULES_VADDR < __START_KERNEL_map);
  363. BUILD_BUG_ON(MODULES_VADDR - __START_KERNEL_map < KERNEL_IMAGE_SIZE);
  364. BUILD_BUG_ON(MODULES_LEN + KERNEL_IMAGE_SIZE > 2*PUD_SIZE);
  365. BUILD_BUG_ON((__START_KERNEL_map & ~PMD_MASK) != 0);
  366. BUILD_BUG_ON((MODULES_VADDR & ~PMD_MASK) != 0);
  367. BUILD_BUG_ON(!(MODULES_VADDR > __START_KERNEL));
  368. MAYBE_BUILD_BUG_ON(!(((MODULES_END - 1) & PGDIR_MASK) ==
  369. (__START_KERNEL & PGDIR_MASK)));
  370. BUILD_BUG_ON(__fix_to_virt(__end_of_fixed_addresses) <= MODULES_END);
  371. cr4_init_shadow();
  372. /* Kill off the identity-map trampoline */
  373. reset_early_page_tables();
  374. clear_bss();
  375. clear_page(init_top_pgt);
  376. /*
  377. * SME support may update early_pmd_flags to include the memory
  378. * encryption mask, so it needs to be called before anything
  379. * that may generate a page fault.
  380. */
  381. sme_early_init();
  382. kasan_early_init();
  383. idt_setup_early_handler();
  384. copy_bootdata(__va(real_mode_data));
  385. /*
  386. * Load microcode early on BSP.
  387. */
  388. load_ucode_bsp();
  389. /* set init_top_pgt kernel high mapping*/
  390. init_top_pgt[511] = early_top_pgt[511];
  391. x86_64_start_reservations(real_mode_data);
  392. }
  393. void __init x86_64_start_reservations(char *real_mode_data)
  394. {
  395. /* version is always not zero if it is copied */
  396. if (!boot_params.hdr.version)
  397. copy_bootdata(__va(real_mode_data));
  398. x86_early_init_platform_quirks();
  399. switch (boot_params.hdr.hardware_subarch) {
  400. case X86_SUBARCH_INTEL_MID:
  401. x86_intel_mid_early_setup();
  402. break;
  403. default:
  404. break;
  405. }
  406. start_kernel();
  407. }