efi_64.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * x86_64 specific EFI support functions
  4. * Based on Extensible Firmware Interface Specification version 1.0
  5. *
  6. * Copyright (C) 2005-2008 Intel Co.
  7. * Fenghua Yu <fenghua.yu@intel.com>
  8. * Bibo Mao <bibo.mao@intel.com>
  9. * Chandramouli Narayanan <mouli@linux.intel.com>
  10. * Huang Ying <ying.huang@intel.com>
  11. *
  12. * Code to convert EFI to E820 map has been implemented in elilo bootloader
  13. * based on a EFI patch by Edgar Hucek. Based on the E820 map, the page table
  14. * is setup appropriately for EFI runtime code.
  15. * - mouli 06/14/2007.
  16. *
  17. */
  18. #define pr_fmt(fmt) "efi: " fmt
  19. #include <linux/kernel.h>
  20. #include <linux/init.h>
  21. #include <linux/mm.h>
  22. #include <linux/types.h>
  23. #include <linux/spinlock.h>
  24. #include <linux/memblock.h>
  25. #include <linux/ioport.h>
  26. #include <linux/mc146818rtc.h>
  27. #include <linux/efi.h>
  28. #include <linux/export.h>
  29. #include <linux/uaccess.h>
  30. #include <linux/io.h>
  31. #include <linux/reboot.h>
  32. #include <linux/slab.h>
  33. #include <linux/ucs2_string.h>
  34. #include <linux/cc_platform.h>
  35. #include <linux/sched/task.h>
  36. #include <asm/setup.h>
  37. #include <asm/page.h>
  38. #include <asm/e820/api.h>
  39. #include <asm/tlbflush.h>
  40. #include <asm/proto.h>
  41. #include <asm/efi.h>
  42. #include <asm/cacheflush.h>
  43. #include <asm/fixmap.h>
  44. #include <asm/realmode.h>
  45. #include <asm/time.h>
  46. #include <asm/pgalloc.h>
  47. #include <asm/sev.h>
  48. /*
  49. * We allocate runtime services regions top-down, starting from -4G, i.e.
  50. * 0xffff_ffff_0000_0000 and limit EFI VA mapping space to 64G.
  51. */
  52. static u64 efi_va = EFI_VA_START;
  53. static struct mm_struct *efi_prev_mm;
  54. /*
  55. * We need our own copy of the higher levels of the page tables
  56. * because we want to avoid inserting EFI region mappings (EFI_VA_END
  57. * to EFI_VA_START) into the standard kernel page tables. Everything
  58. * else can be shared, see efi_sync_low_kernel_mappings().
  59. *
  60. * We don't want the pgd on the pgd_list and cannot use pgd_alloc() for the
  61. * allocation.
  62. */
  63. int __init efi_alloc_page_tables(void)
  64. {
  65. pgd_t *pgd, *efi_pgd;
  66. p4d_t *p4d;
  67. pud_t *pud;
  68. gfp_t gfp_mask;
  69. gfp_mask = GFP_KERNEL | __GFP_ZERO;
  70. efi_pgd = (pgd_t *)__get_free_pages(gfp_mask, PGD_ALLOCATION_ORDER);
  71. if (!efi_pgd)
  72. goto fail;
  73. pgd = efi_pgd + pgd_index(EFI_VA_END);
  74. p4d = p4d_alloc(&init_mm, pgd, EFI_VA_END);
  75. if (!p4d)
  76. goto free_pgd;
  77. pud = pud_alloc(&init_mm, p4d, EFI_VA_END);
  78. if (!pud)
  79. goto free_p4d;
  80. efi_mm.pgd = efi_pgd;
  81. mm_init_cpumask(&efi_mm);
  82. init_new_context(NULL, &efi_mm);
  83. return 0;
  84. free_p4d:
  85. if (pgtable_l5_enabled())
  86. free_page((unsigned long)pgd_page_vaddr(*pgd));
  87. free_pgd:
  88. free_pages((unsigned long)efi_pgd, PGD_ALLOCATION_ORDER);
  89. fail:
  90. return -ENOMEM;
  91. }
  92. /*
  93. * Add low kernel mappings for passing arguments to EFI functions.
  94. */
  95. void efi_sync_low_kernel_mappings(void)
  96. {
  97. unsigned num_entries;
  98. pgd_t *pgd_k, *pgd_efi;
  99. p4d_t *p4d_k, *p4d_efi;
  100. pud_t *pud_k, *pud_efi;
  101. pgd_t *efi_pgd = efi_mm.pgd;
  102. pgd_efi = efi_pgd + pgd_index(PAGE_OFFSET);
  103. pgd_k = pgd_offset_k(PAGE_OFFSET);
  104. num_entries = pgd_index(EFI_VA_END) - pgd_index(PAGE_OFFSET);
  105. memcpy(pgd_efi, pgd_k, sizeof(pgd_t) * num_entries);
  106. pgd_efi = efi_pgd + pgd_index(EFI_VA_END);
  107. pgd_k = pgd_offset_k(EFI_VA_END);
  108. p4d_efi = p4d_offset(pgd_efi, 0);
  109. p4d_k = p4d_offset(pgd_k, 0);
  110. num_entries = p4d_index(EFI_VA_END);
  111. memcpy(p4d_efi, p4d_k, sizeof(p4d_t) * num_entries);
  112. /*
  113. * We share all the PUD entries apart from those that map the
  114. * EFI regions. Copy around them.
  115. */
  116. BUILD_BUG_ON((EFI_VA_START & ~PUD_MASK) != 0);
  117. BUILD_BUG_ON((EFI_VA_END & ~PUD_MASK) != 0);
  118. p4d_efi = p4d_offset(pgd_efi, EFI_VA_END);
  119. p4d_k = p4d_offset(pgd_k, EFI_VA_END);
  120. pud_efi = pud_offset(p4d_efi, 0);
  121. pud_k = pud_offset(p4d_k, 0);
  122. num_entries = pud_index(EFI_VA_END);
  123. memcpy(pud_efi, pud_k, sizeof(pud_t) * num_entries);
  124. pud_efi = pud_offset(p4d_efi, EFI_VA_START);
  125. pud_k = pud_offset(p4d_k, EFI_VA_START);
  126. num_entries = PTRS_PER_PUD - pud_index(EFI_VA_START);
  127. memcpy(pud_efi, pud_k, sizeof(pud_t) * num_entries);
  128. }
  129. /*
  130. * Wrapper for slow_virt_to_phys() that handles NULL addresses.
  131. */
  132. static inline phys_addr_t
  133. virt_to_phys_or_null_size(void *va, unsigned long size)
  134. {
  135. phys_addr_t pa;
  136. if (!va)
  137. return 0;
  138. if (virt_addr_valid(va))
  139. return virt_to_phys(va);
  140. pa = slow_virt_to_phys(va);
  141. /* check if the object crosses a page boundary */
  142. if (WARN_ON((pa ^ (pa + size - 1)) & PAGE_MASK))
  143. return 0;
  144. return pa;
  145. }
  146. #define virt_to_phys_or_null(addr) \
  147. virt_to_phys_or_null_size((addr), sizeof(*(addr)))
  148. int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages)
  149. {
  150. extern const u8 __efi64_thunk_ret_tramp[];
  151. unsigned long pfn, text, pf, rodata, tramp;
  152. struct page *page;
  153. unsigned npages;
  154. pgd_t *pgd = efi_mm.pgd;
  155. /*
  156. * It can happen that the physical address of new_memmap lands in memory
  157. * which is not mapped in the EFI page table. Therefore we need to go
  158. * and ident-map those pages containing the map before calling
  159. * phys_efi_set_virtual_address_map().
  160. */
  161. pfn = pa_memmap >> PAGE_SHIFT;
  162. pf = _PAGE_NX | _PAGE_RW | _PAGE_ENC;
  163. if (kernel_map_pages_in_pgd(pgd, pfn, pa_memmap, num_pages, pf)) {
  164. pr_err("Error ident-mapping new memmap (0x%lx)!\n", pa_memmap);
  165. return 1;
  166. }
  167. /*
  168. * Certain firmware versions are way too sentimental and still believe
  169. * they are exclusive and unquestionable owners of the first physical page,
  170. * even though they explicitly mark it as EFI_CONVENTIONAL_MEMORY
  171. * (but then write-access it later during SetVirtualAddressMap()).
  172. *
  173. * Create a 1:1 mapping for this page, to avoid triple faults during early
  174. * boot with such firmware. We are free to hand this page to the BIOS,
  175. * as trim_bios_range() will reserve the first page and isolate it away
  176. * from memory allocators anyway.
  177. */
  178. if (kernel_map_pages_in_pgd(pgd, 0x0, 0x0, 1, pf)) {
  179. pr_err("Failed to create 1:1 mapping for the first page!\n");
  180. return 1;
  181. }
  182. /*
  183. * When SEV-ES is active, the GHCB as set by the kernel will be used
  184. * by firmware. Create a 1:1 unencrypted mapping for each GHCB.
  185. */
  186. if (sev_es_efi_map_ghcbs(pgd)) {
  187. pr_err("Failed to create 1:1 mapping for the GHCBs!\n");
  188. return 1;
  189. }
  190. /*
  191. * When making calls to the firmware everything needs to be 1:1
  192. * mapped and addressable with 32-bit pointers. Map the kernel
  193. * text and allocate a new stack because we can't rely on the
  194. * stack pointer being < 4GB.
  195. */
  196. if (!efi_is_mixed())
  197. return 0;
  198. page = alloc_page(GFP_KERNEL|__GFP_DMA32);
  199. if (!page) {
  200. pr_err("Unable to allocate EFI runtime stack < 4GB\n");
  201. return 1;
  202. }
  203. efi_mixed_mode_stack_pa = page_to_phys(page + 1); /* stack grows down */
  204. npages = (_etext - _text) >> PAGE_SHIFT;
  205. text = __pa(_text);
  206. if (kernel_unmap_pages_in_pgd(pgd, text, npages)) {
  207. pr_err("Failed to unmap kernel text 1:1 mapping\n");
  208. return 1;
  209. }
  210. npages = (__end_rodata - __start_rodata) >> PAGE_SHIFT;
  211. rodata = __pa(__start_rodata);
  212. pfn = rodata >> PAGE_SHIFT;
  213. pf = _PAGE_NX | _PAGE_ENC;
  214. if (kernel_map_pages_in_pgd(pgd, pfn, rodata, npages, pf)) {
  215. pr_err("Failed to map kernel rodata 1:1\n");
  216. return 1;
  217. }
  218. tramp = __pa(__efi64_thunk_ret_tramp);
  219. pfn = tramp >> PAGE_SHIFT;
  220. pf = _PAGE_ENC;
  221. if (kernel_map_pages_in_pgd(pgd, pfn, tramp, 1, pf)) {
  222. pr_err("Failed to map mixed mode return trampoline\n");
  223. return 1;
  224. }
  225. return 0;
  226. }
  227. static void __init __map_region(efi_memory_desc_t *md, u64 va)
  228. {
  229. unsigned long flags = _PAGE_RW;
  230. unsigned long pfn;
  231. pgd_t *pgd = efi_mm.pgd;
  232. /*
  233. * EFI_RUNTIME_SERVICES_CODE regions typically cover PE/COFF
  234. * executable images in memory that consist of both R-X and
  235. * RW- sections, so we cannot apply read-only or non-exec
  236. * permissions just yet. However, modern EFI systems provide
  237. * a memory attributes table that describes those sections
  238. * with the appropriate restricted permissions, which are
  239. * applied in efi_runtime_update_mappings() below. All other
  240. * regions can be mapped non-executable at this point, with
  241. * the exception of boot services code regions, but those will
  242. * be unmapped again entirely in efi_free_boot_services().
  243. */
  244. if (md->type != EFI_BOOT_SERVICES_CODE &&
  245. md->type != EFI_RUNTIME_SERVICES_CODE)
  246. flags |= _PAGE_NX;
  247. if (!(md->attribute & EFI_MEMORY_WB))
  248. flags |= _PAGE_PCD;
  249. if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT) &&
  250. md->type != EFI_MEMORY_MAPPED_IO)
  251. flags |= _PAGE_ENC;
  252. pfn = md->phys_addr >> PAGE_SHIFT;
  253. if (kernel_map_pages_in_pgd(pgd, pfn, va, md->num_pages, flags))
  254. pr_warn("Error mapping PA 0x%llx -> VA 0x%llx!\n",
  255. md->phys_addr, va);
  256. }
  257. void __init efi_map_region(efi_memory_desc_t *md)
  258. {
  259. unsigned long size = md->num_pages << PAGE_SHIFT;
  260. u64 pa = md->phys_addr;
  261. /*
  262. * Make sure the 1:1 mappings are present as a catch-all for b0rked
  263. * firmware which doesn't update all internal pointers after switching
  264. * to virtual mode and would otherwise crap on us.
  265. */
  266. __map_region(md, md->phys_addr);
  267. /*
  268. * Enforce the 1:1 mapping as the default virtual address when
  269. * booting in EFI mixed mode, because even though we may be
  270. * running a 64-bit kernel, the firmware may only be 32-bit.
  271. */
  272. if (efi_is_mixed()) {
  273. md->virt_addr = md->phys_addr;
  274. return;
  275. }
  276. efi_va -= size;
  277. /* Is PA 2M-aligned? */
  278. if (!(pa & (PMD_SIZE - 1))) {
  279. efi_va &= PMD_MASK;
  280. } else {
  281. u64 pa_offset = pa & (PMD_SIZE - 1);
  282. u64 prev_va = efi_va;
  283. /* get us the same offset within this 2M page */
  284. efi_va = (efi_va & PMD_MASK) + pa_offset;
  285. if (efi_va > prev_va)
  286. efi_va -= PMD_SIZE;
  287. }
  288. if (efi_va < EFI_VA_END) {
  289. pr_warn(FW_WARN "VA address range overflow!\n");
  290. return;
  291. }
  292. /* Do the VA map */
  293. __map_region(md, efi_va);
  294. md->virt_addr = efi_va;
  295. }
  296. /*
  297. * kexec kernel will use efi_map_region_fixed to map efi runtime memory ranges.
  298. * md->virt_addr is the original virtual address which had been mapped in kexec
  299. * 1st kernel.
  300. */
  301. void __init efi_map_region_fixed(efi_memory_desc_t *md)
  302. {
  303. __map_region(md, md->phys_addr);
  304. __map_region(md, md->virt_addr);
  305. }
  306. void __init parse_efi_setup(u64 phys_addr, u32 data_len)
  307. {
  308. efi_setup = phys_addr + sizeof(struct setup_data);
  309. }
  310. static int __init efi_update_mappings(efi_memory_desc_t *md, unsigned long pf)
  311. {
  312. unsigned long pfn;
  313. pgd_t *pgd = efi_mm.pgd;
  314. int err1, err2;
  315. /* Update the 1:1 mapping */
  316. pfn = md->phys_addr >> PAGE_SHIFT;
  317. err1 = kernel_map_pages_in_pgd(pgd, pfn, md->phys_addr, md->num_pages, pf);
  318. if (err1) {
  319. pr_err("Error while updating 1:1 mapping PA 0x%llx -> VA 0x%llx!\n",
  320. md->phys_addr, md->virt_addr);
  321. }
  322. err2 = kernel_map_pages_in_pgd(pgd, pfn, md->virt_addr, md->num_pages, pf);
  323. if (err2) {
  324. pr_err("Error while updating VA mapping PA 0x%llx -> VA 0x%llx!\n",
  325. md->phys_addr, md->virt_addr);
  326. }
  327. return err1 || err2;
  328. }
  329. bool efi_disable_ibt_for_runtime __ro_after_init = true;
  330. static int __init efi_update_mem_attr(struct mm_struct *mm, efi_memory_desc_t *md,
  331. bool has_ibt)
  332. {
  333. unsigned long pf = 0;
  334. efi_disable_ibt_for_runtime |= !has_ibt;
  335. if (md->attribute & EFI_MEMORY_XP)
  336. pf |= _PAGE_NX;
  337. if (!(md->attribute & EFI_MEMORY_RO))
  338. pf |= _PAGE_RW;
  339. if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT))
  340. pf |= _PAGE_ENC;
  341. return efi_update_mappings(md, pf);
  342. }
  343. void __init efi_runtime_update_mappings(void)
  344. {
  345. efi_memory_desc_t *md;
  346. /*
  347. * Use the EFI Memory Attribute Table for mapping permissions if it
  348. * exists, since it is intended to supersede EFI_PROPERTIES_TABLE.
  349. */
  350. if (efi_enabled(EFI_MEM_ATTR)) {
  351. efi_disable_ibt_for_runtime = false;
  352. efi_memattr_apply_permissions(NULL, efi_update_mem_attr);
  353. return;
  354. }
  355. /*
  356. * EFI_MEMORY_ATTRIBUTES_TABLE is intended to replace
  357. * EFI_PROPERTIES_TABLE. So, use EFI_PROPERTIES_TABLE to update
  358. * permissions only if EFI_MEMORY_ATTRIBUTES_TABLE is not
  359. * published by the firmware. Even if we find a buggy implementation of
  360. * EFI_MEMORY_ATTRIBUTES_TABLE, don't fall back to
  361. * EFI_PROPERTIES_TABLE, because of the same reason.
  362. */
  363. if (!efi_enabled(EFI_NX_PE_DATA))
  364. return;
  365. for_each_efi_memory_desc(md) {
  366. unsigned long pf = 0;
  367. if (!(md->attribute & EFI_MEMORY_RUNTIME))
  368. continue;
  369. if (!(md->attribute & EFI_MEMORY_WB))
  370. pf |= _PAGE_PCD;
  371. if ((md->attribute & EFI_MEMORY_XP) ||
  372. (md->type == EFI_RUNTIME_SERVICES_DATA))
  373. pf |= _PAGE_NX;
  374. if (!(md->attribute & EFI_MEMORY_RO) &&
  375. (md->type != EFI_RUNTIME_SERVICES_CODE))
  376. pf |= _PAGE_RW;
  377. if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT))
  378. pf |= _PAGE_ENC;
  379. efi_update_mappings(md, pf);
  380. }
  381. }
  382. void __init efi_dump_pagetable(void)
  383. {
  384. #ifdef CONFIG_EFI_PGT_DUMP
  385. ptdump_walk_pgd_level(NULL, &efi_mm);
  386. #endif
  387. }
  388. /*
  389. * Makes the calling thread switch to/from efi_mm context. Can be used
  390. * in a kernel thread and user context. Preemption needs to remain disabled
  391. * while the EFI-mm is borrowed. mmgrab()/mmdrop() is not used because the mm
  392. * can not change under us.
  393. * It should be ensured that there are no concurrent calls to this function.
  394. */
  395. static void efi_enter_mm(void)
  396. {
  397. efi_prev_mm = current->active_mm;
  398. current->active_mm = &efi_mm;
  399. switch_mm(efi_prev_mm, &efi_mm, NULL);
  400. }
  401. static void efi_leave_mm(void)
  402. {
  403. current->active_mm = efi_prev_mm;
  404. switch_mm(&efi_mm, efi_prev_mm, NULL);
  405. }
  406. void arch_efi_call_virt_setup(void)
  407. {
  408. efi_sync_low_kernel_mappings();
  409. efi_fpu_begin();
  410. firmware_restrict_branch_speculation_start();
  411. efi_enter_mm();
  412. }
  413. void arch_efi_call_virt_teardown(void)
  414. {
  415. efi_leave_mm();
  416. firmware_restrict_branch_speculation_end();
  417. efi_fpu_end();
  418. }
  419. static DEFINE_SPINLOCK(efi_runtime_lock);
  420. /*
  421. * DS and ES contain user values. We need to save them.
  422. * The 32-bit EFI code needs a valid DS, ES, and SS. There's no
  423. * need to save the old SS: __KERNEL_DS is always acceptable.
  424. */
  425. #define __efi_thunk(func, ...) \
  426. ({ \
  427. unsigned short __ds, __es; \
  428. efi_status_t ____s; \
  429. \
  430. savesegment(ds, __ds); \
  431. savesegment(es, __es); \
  432. \
  433. loadsegment(ss, __KERNEL_DS); \
  434. loadsegment(ds, __KERNEL_DS); \
  435. loadsegment(es, __KERNEL_DS); \
  436. \
  437. ____s = efi64_thunk(efi.runtime->mixed_mode.func, __VA_ARGS__); \
  438. \
  439. loadsegment(ds, __ds); \
  440. loadsegment(es, __es); \
  441. \
  442. ____s ^= (____s & BIT(31)) | (____s & BIT_ULL(31)) << 32; \
  443. ____s; \
  444. })
  445. /*
  446. * Switch to the EFI page tables early so that we can access the 1:1
  447. * runtime services mappings which are not mapped in any other page
  448. * tables.
  449. *
  450. * Also, disable interrupts because the IDT points to 64-bit handlers,
  451. * which aren't going to function correctly when we switch to 32-bit.
  452. */
  453. #define efi_thunk(func...) \
  454. ({ \
  455. efi_status_t __s; \
  456. \
  457. arch_efi_call_virt_setup(); \
  458. \
  459. __s = __efi_thunk(func); \
  460. \
  461. arch_efi_call_virt_teardown(); \
  462. \
  463. __s; \
  464. })
  465. static efi_status_t __init __no_sanitize_address
  466. efi_thunk_set_virtual_address_map(unsigned long memory_map_size,
  467. unsigned long descriptor_size,
  468. u32 descriptor_version,
  469. efi_memory_desc_t *virtual_map)
  470. {
  471. efi_status_t status;
  472. unsigned long flags;
  473. efi_sync_low_kernel_mappings();
  474. local_irq_save(flags);
  475. efi_enter_mm();
  476. status = __efi_thunk(set_virtual_address_map, memory_map_size,
  477. descriptor_size, descriptor_version, virtual_map);
  478. efi_leave_mm();
  479. local_irq_restore(flags);
  480. return status;
  481. }
  482. static efi_status_t efi_thunk_get_time(efi_time_t *tm, efi_time_cap_t *tc)
  483. {
  484. return EFI_UNSUPPORTED;
  485. }
  486. static efi_status_t efi_thunk_set_time(efi_time_t *tm)
  487. {
  488. return EFI_UNSUPPORTED;
  489. }
  490. static efi_status_t
  491. efi_thunk_get_wakeup_time(efi_bool_t *enabled, efi_bool_t *pending,
  492. efi_time_t *tm)
  493. {
  494. return EFI_UNSUPPORTED;
  495. }
  496. static efi_status_t
  497. efi_thunk_set_wakeup_time(efi_bool_t enabled, efi_time_t *tm)
  498. {
  499. return EFI_UNSUPPORTED;
  500. }
  501. static unsigned long efi_name_size(efi_char16_t *name)
  502. {
  503. return ucs2_strsize(name, EFI_VAR_NAME_LEN) + 1;
  504. }
  505. static efi_status_t
  506. efi_thunk_get_variable(efi_char16_t *name, efi_guid_t *vendor,
  507. u32 *attr, unsigned long *data_size, void *data)
  508. {
  509. u8 buf[24] __aligned(8);
  510. efi_guid_t *vnd = PTR_ALIGN((efi_guid_t *)buf, sizeof(*vnd));
  511. efi_status_t status;
  512. u32 phys_name, phys_vendor, phys_attr;
  513. u32 phys_data_size, phys_data;
  514. unsigned long flags;
  515. spin_lock_irqsave(&efi_runtime_lock, flags);
  516. *vnd = *vendor;
  517. phys_data_size = virt_to_phys_or_null(data_size);
  518. phys_vendor = virt_to_phys_or_null(vnd);
  519. phys_name = virt_to_phys_or_null_size(name, efi_name_size(name));
  520. phys_attr = virt_to_phys_or_null(attr);
  521. phys_data = virt_to_phys_or_null_size(data, *data_size);
  522. if (!phys_name || (data && !phys_data))
  523. status = EFI_INVALID_PARAMETER;
  524. else
  525. status = efi_thunk(get_variable, phys_name, phys_vendor,
  526. phys_attr, phys_data_size, phys_data);
  527. spin_unlock_irqrestore(&efi_runtime_lock, flags);
  528. return status;
  529. }
  530. static efi_status_t
  531. efi_thunk_set_variable(efi_char16_t *name, efi_guid_t *vendor,
  532. u32 attr, unsigned long data_size, void *data)
  533. {
  534. u8 buf[24] __aligned(8);
  535. efi_guid_t *vnd = PTR_ALIGN((efi_guid_t *)buf, sizeof(*vnd));
  536. u32 phys_name, phys_vendor, phys_data;
  537. efi_status_t status;
  538. unsigned long flags;
  539. spin_lock_irqsave(&efi_runtime_lock, flags);
  540. *vnd = *vendor;
  541. phys_name = virt_to_phys_or_null_size(name, efi_name_size(name));
  542. phys_vendor = virt_to_phys_or_null(vnd);
  543. phys_data = virt_to_phys_or_null_size(data, data_size);
  544. if (!phys_name || (data && !phys_data))
  545. status = EFI_INVALID_PARAMETER;
  546. else
  547. status = efi_thunk(set_variable, phys_name, phys_vendor,
  548. attr, data_size, phys_data);
  549. spin_unlock_irqrestore(&efi_runtime_lock, flags);
  550. return status;
  551. }
  552. static efi_status_t
  553. efi_thunk_set_variable_nonblocking(efi_char16_t *name, efi_guid_t *vendor,
  554. u32 attr, unsigned long data_size,
  555. void *data)
  556. {
  557. u8 buf[24] __aligned(8);
  558. efi_guid_t *vnd = PTR_ALIGN((efi_guid_t *)buf, sizeof(*vnd));
  559. u32 phys_name, phys_vendor, phys_data;
  560. efi_status_t status;
  561. unsigned long flags;
  562. if (!spin_trylock_irqsave(&efi_runtime_lock, flags))
  563. return EFI_NOT_READY;
  564. *vnd = *vendor;
  565. phys_name = virt_to_phys_or_null_size(name, efi_name_size(name));
  566. phys_vendor = virt_to_phys_or_null(vnd);
  567. phys_data = virt_to_phys_or_null_size(data, data_size);
  568. if (!phys_name || (data && !phys_data))
  569. status = EFI_INVALID_PARAMETER;
  570. else
  571. status = efi_thunk(set_variable, phys_name, phys_vendor,
  572. attr, data_size, phys_data);
  573. spin_unlock_irqrestore(&efi_runtime_lock, flags);
  574. return status;
  575. }
  576. static efi_status_t
  577. efi_thunk_get_next_variable(unsigned long *name_size,
  578. efi_char16_t *name,
  579. efi_guid_t *vendor)
  580. {
  581. u8 buf[24] __aligned(8);
  582. efi_guid_t *vnd = PTR_ALIGN((efi_guid_t *)buf, sizeof(*vnd));
  583. efi_status_t status;
  584. u32 phys_name_size, phys_name, phys_vendor;
  585. unsigned long flags;
  586. spin_lock_irqsave(&efi_runtime_lock, flags);
  587. *vnd = *vendor;
  588. phys_name_size = virt_to_phys_or_null(name_size);
  589. phys_vendor = virt_to_phys_or_null(vnd);
  590. phys_name = virt_to_phys_or_null_size(name, *name_size);
  591. if (!phys_name)
  592. status = EFI_INVALID_PARAMETER;
  593. else
  594. status = efi_thunk(get_next_variable, phys_name_size,
  595. phys_name, phys_vendor);
  596. spin_unlock_irqrestore(&efi_runtime_lock, flags);
  597. *vendor = *vnd;
  598. return status;
  599. }
  600. static efi_status_t
  601. efi_thunk_get_next_high_mono_count(u32 *count)
  602. {
  603. return EFI_UNSUPPORTED;
  604. }
  605. static void
  606. efi_thunk_reset_system(int reset_type, efi_status_t status,
  607. unsigned long data_size, efi_char16_t *data)
  608. {
  609. u32 phys_data;
  610. unsigned long flags;
  611. spin_lock_irqsave(&efi_runtime_lock, flags);
  612. phys_data = virt_to_phys_or_null_size(data, data_size);
  613. efi_thunk(reset_system, reset_type, status, data_size, phys_data);
  614. spin_unlock_irqrestore(&efi_runtime_lock, flags);
  615. }
  616. static efi_status_t
  617. efi_thunk_update_capsule(efi_capsule_header_t **capsules,
  618. unsigned long count, unsigned long sg_list)
  619. {
  620. /*
  621. * To properly support this function we would need to repackage
  622. * 'capsules' because the firmware doesn't understand 64-bit
  623. * pointers.
  624. */
  625. return EFI_UNSUPPORTED;
  626. }
  627. static efi_status_t
  628. efi_thunk_query_variable_info(u32 attr, u64 *storage_space,
  629. u64 *remaining_space,
  630. u64 *max_variable_size)
  631. {
  632. efi_status_t status;
  633. u32 phys_storage, phys_remaining, phys_max;
  634. unsigned long flags;
  635. if (efi.runtime_version < EFI_2_00_SYSTEM_TABLE_REVISION)
  636. return EFI_UNSUPPORTED;
  637. spin_lock_irqsave(&efi_runtime_lock, flags);
  638. phys_storage = virt_to_phys_or_null(storage_space);
  639. phys_remaining = virt_to_phys_or_null(remaining_space);
  640. phys_max = virt_to_phys_or_null(max_variable_size);
  641. status = efi_thunk(query_variable_info, attr, phys_storage,
  642. phys_remaining, phys_max);
  643. spin_unlock_irqrestore(&efi_runtime_lock, flags);
  644. return status;
  645. }
  646. static efi_status_t
  647. efi_thunk_query_variable_info_nonblocking(u32 attr, u64 *storage_space,
  648. u64 *remaining_space,
  649. u64 *max_variable_size)
  650. {
  651. efi_status_t status;
  652. u32 phys_storage, phys_remaining, phys_max;
  653. unsigned long flags;
  654. if (efi.runtime_version < EFI_2_00_SYSTEM_TABLE_REVISION)
  655. return EFI_UNSUPPORTED;
  656. if (!spin_trylock_irqsave(&efi_runtime_lock, flags))
  657. return EFI_NOT_READY;
  658. phys_storage = virt_to_phys_or_null(storage_space);
  659. phys_remaining = virt_to_phys_or_null(remaining_space);
  660. phys_max = virt_to_phys_or_null(max_variable_size);
  661. status = efi_thunk(query_variable_info, attr, phys_storage,
  662. phys_remaining, phys_max);
  663. spin_unlock_irqrestore(&efi_runtime_lock, flags);
  664. return status;
  665. }
  666. static efi_status_t
  667. efi_thunk_query_capsule_caps(efi_capsule_header_t **capsules,
  668. unsigned long count, u64 *max_size,
  669. int *reset_type)
  670. {
  671. /*
  672. * To properly support this function we would need to repackage
  673. * 'capsules' because the firmware doesn't understand 64-bit
  674. * pointers.
  675. */
  676. return EFI_UNSUPPORTED;
  677. }
  678. void __init efi_thunk_runtime_setup(void)
  679. {
  680. if (!IS_ENABLED(CONFIG_EFI_MIXED))
  681. return;
  682. efi.get_time = efi_thunk_get_time;
  683. efi.set_time = efi_thunk_set_time;
  684. efi.get_wakeup_time = efi_thunk_get_wakeup_time;
  685. efi.set_wakeup_time = efi_thunk_set_wakeup_time;
  686. efi.get_variable = efi_thunk_get_variable;
  687. efi.get_next_variable = efi_thunk_get_next_variable;
  688. efi.set_variable = efi_thunk_set_variable;
  689. efi.set_variable_nonblocking = efi_thunk_set_variable_nonblocking;
  690. efi.get_next_high_mono_count = efi_thunk_get_next_high_mono_count;
  691. efi.reset_system = efi_thunk_reset_system;
  692. efi.query_variable_info = efi_thunk_query_variable_info;
  693. efi.query_variable_info_nonblocking = efi_thunk_query_variable_info_nonblocking;
  694. efi.update_capsule = efi_thunk_update_capsule;
  695. efi.query_capsule_caps = efi_thunk_query_capsule_caps;
  696. }
  697. efi_status_t __init __no_sanitize_address
  698. efi_set_virtual_address_map(unsigned long memory_map_size,
  699. unsigned long descriptor_size,
  700. u32 descriptor_version,
  701. efi_memory_desc_t *virtual_map,
  702. unsigned long systab_phys)
  703. {
  704. const efi_system_table_t *systab = (efi_system_table_t *)systab_phys;
  705. efi_status_t status;
  706. unsigned long flags;
  707. if (efi_is_mixed())
  708. return efi_thunk_set_virtual_address_map(memory_map_size,
  709. descriptor_size,
  710. descriptor_version,
  711. virtual_map);
  712. efi_enter_mm();
  713. efi_fpu_begin();
  714. /* Disable interrupts around EFI calls: */
  715. local_irq_save(flags);
  716. status = arch_efi_call_virt(efi.runtime, set_virtual_address_map,
  717. memory_map_size, descriptor_size,
  718. descriptor_version, virtual_map);
  719. local_irq_restore(flags);
  720. efi_fpu_end();
  721. /* grab the virtually remapped EFI runtime services table pointer */
  722. efi.runtime = READ_ONCE(systab->runtime);
  723. efi_leave_mm();
  724. return status;
  725. }