efi_memory.c 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * EFI application memory management
  4. *
  5. * Copyright (c) 2016 Alexander Graf
  6. */
  7. #define LOG_CATEGORY LOGC_EFI
  8. #include <common.h>
  9. #include <efi_loader.h>
  10. #include <init.h>
  11. #include <log.h>
  12. #include <malloc.h>
  13. #include <mapmem.h>
  14. #include <watchdog.h>
  15. #include <asm/cache.h>
  16. #include <asm/global_data.h>
  17. #include <linux/list_sort.h>
  18. #include <linux/sizes.h>
  19. DECLARE_GLOBAL_DATA_PTR;
  20. /* Magic number identifying memory allocated from pool */
  21. #define EFI_ALLOC_POOL_MAGIC 0x1fe67ddf6491caa2
  22. efi_uintn_t efi_memory_map_key;
  23. struct efi_mem_list {
  24. struct list_head link;
  25. struct efi_mem_desc desc;
  26. };
  27. #define EFI_CARVE_NO_OVERLAP -1
  28. #define EFI_CARVE_LOOP_AGAIN -2
  29. #define EFI_CARVE_OVERLAPS_NONRAM -3
  30. #define EFI_CARVE_OUT_OF_RESOURCES -4
  31. /* This list contains all memory map items */
  32. static LIST_HEAD(efi_mem);
  33. #ifdef CONFIG_EFI_LOADER_BOUNCE_BUFFER
  34. void *efi_bounce_buffer;
  35. #endif
  36. /**
  37. * struct efi_pool_allocation - memory block allocated from pool
  38. *
  39. * @num_pages: number of pages allocated
  40. * @checksum: checksum
  41. * @data: allocated pool memory
  42. *
  43. * U-Boot services each UEFI AllocatePool() request as a separate
  44. * (multiple) page allocation. We have to track the number of pages
  45. * to be able to free the correct amount later.
  46. *
  47. * The checksum calculated in function checksum() is used in FreePool() to avoid
  48. * freeing memory not allocated by AllocatePool() and duplicate freeing.
  49. *
  50. * EFI requires 8 byte alignment for pool allocations, so we can
  51. * prepend each allocation with these header fields.
  52. */
  53. struct efi_pool_allocation {
  54. u64 num_pages;
  55. u64 checksum;
  56. char data[] __aligned(ARCH_DMA_MINALIGN);
  57. };
  58. /**
  59. * checksum() - calculate checksum for memory allocated from pool
  60. *
  61. * @alloc: allocation header
  62. * Return: checksum, always non-zero
  63. */
  64. static u64 checksum(struct efi_pool_allocation *alloc)
  65. {
  66. u64 addr = (uintptr_t)alloc;
  67. u64 ret = (addr >> 32) ^ (addr << 32) ^ alloc->num_pages ^
  68. EFI_ALLOC_POOL_MAGIC;
  69. if (!ret)
  70. ++ret;
  71. return ret;
  72. }
  73. /**
  74. * efi_mem_cmp() - comparator function for sorting memory map
  75. *
  76. * Sorts the memory list from highest address to lowest address
  77. *
  78. * When allocating memory we should always start from the highest
  79. * address chunk, so sort the memory list such that the first list
  80. * iterator gets the highest address and goes lower from there.
  81. *
  82. * @priv: unused
  83. * @a: first memory area
  84. * @b: second memory area
  85. * Return: 1 if @a is before @b, -1 if @b is before @a, 0 if equal
  86. */
  87. static int efi_mem_cmp(void *priv, struct list_head *a, struct list_head *b)
  88. {
  89. struct efi_mem_list *mema = list_entry(a, struct efi_mem_list, link);
  90. struct efi_mem_list *memb = list_entry(b, struct efi_mem_list, link);
  91. if (mema->desc.physical_start == memb->desc.physical_start)
  92. return 0;
  93. else if (mema->desc.physical_start < memb->desc.physical_start)
  94. return 1;
  95. else
  96. return -1;
  97. }
  98. /**
  99. * desc_get_end() - get end address of memory area
  100. *
  101. * @desc: memory descriptor
  102. * Return: end address + 1
  103. */
  104. static uint64_t desc_get_end(struct efi_mem_desc *desc)
  105. {
  106. return desc->physical_start + (desc->num_pages << EFI_PAGE_SHIFT);
  107. }
  108. /**
  109. * efi_mem_sort() - sort memory map
  110. *
  111. * Sort the memory map and then try to merge adjacent memory areas.
  112. */
  113. static void efi_mem_sort(void)
  114. {
  115. struct list_head *lhandle;
  116. struct efi_mem_list *prevmem = NULL;
  117. bool merge_again = true;
  118. list_sort(NULL, &efi_mem, efi_mem_cmp);
  119. /* Now merge entries that can be merged */
  120. while (merge_again) {
  121. merge_again = false;
  122. list_for_each(lhandle, &efi_mem) {
  123. struct efi_mem_list *lmem;
  124. struct efi_mem_desc *prev = &prevmem->desc;
  125. struct efi_mem_desc *cur;
  126. uint64_t pages;
  127. lmem = list_entry(lhandle, struct efi_mem_list, link);
  128. if (!prevmem) {
  129. prevmem = lmem;
  130. continue;
  131. }
  132. cur = &lmem->desc;
  133. if ((desc_get_end(cur) == prev->physical_start) &&
  134. (prev->type == cur->type) &&
  135. (prev->attribute == cur->attribute)) {
  136. /* There is an existing map before, reuse it */
  137. pages = cur->num_pages;
  138. prev->num_pages += pages;
  139. prev->physical_start -= pages << EFI_PAGE_SHIFT;
  140. prev->virtual_start -= pages << EFI_PAGE_SHIFT;
  141. list_del(&lmem->link);
  142. free(lmem);
  143. merge_again = true;
  144. break;
  145. }
  146. prevmem = lmem;
  147. }
  148. }
  149. }
  150. /**
  151. * efi_mem_carve_out() - unmap memory region
  152. *
  153. * @map: memory map
  154. * @carve_desc: memory region to unmap
  155. * @overlap_only_ram: the carved out region may only overlap RAM
  156. * Return: the number of overlapping pages which have been
  157. * removed from the map,
  158. * EFI_CARVE_NO_OVERLAP, if the regions don't overlap,
  159. * EFI_CARVE_OVERLAPS_NONRAM, if the carve and map overlap,
  160. * and the map contains anything but free ram
  161. * (only when overlap_only_ram is true),
  162. * EFI_CARVE_LOOP_AGAIN, if the mapping list should be
  163. * traversed again, as it has been altered.
  164. *
  165. * Unmaps all memory occupied by the carve_desc region from the list entry
  166. * pointed to by map.
  167. *
  168. * In case of EFI_CARVE_OVERLAPS_NONRAM it is the callers responsibility
  169. * to re-add the already carved out pages to the mapping.
  170. */
  171. static s64 efi_mem_carve_out(struct efi_mem_list *map,
  172. struct efi_mem_desc *carve_desc,
  173. bool overlap_only_ram)
  174. {
  175. struct efi_mem_list *newmap;
  176. struct efi_mem_desc *map_desc = &map->desc;
  177. uint64_t map_start = map_desc->physical_start;
  178. uint64_t map_end = map_start + (map_desc->num_pages << EFI_PAGE_SHIFT);
  179. uint64_t carve_start = carve_desc->physical_start;
  180. uint64_t carve_end = carve_start +
  181. (carve_desc->num_pages << EFI_PAGE_SHIFT);
  182. /* check whether we're overlapping */
  183. if ((carve_end <= map_start) || (carve_start >= map_end))
  184. return EFI_CARVE_NO_OVERLAP;
  185. /* We're overlapping with non-RAM, warn the caller if desired */
  186. if (overlap_only_ram && (map_desc->type != EFI_CONVENTIONAL_MEMORY))
  187. return EFI_CARVE_OVERLAPS_NONRAM;
  188. /* Sanitize carve_start and carve_end to lie within our bounds */
  189. carve_start = max(carve_start, map_start);
  190. carve_end = min(carve_end, map_end);
  191. /* Carving at the beginning of our map? Just move it! */
  192. if (carve_start == map_start) {
  193. if (map_end == carve_end) {
  194. /* Full overlap, just remove map */
  195. list_del(&map->link);
  196. free(map);
  197. } else {
  198. map->desc.physical_start = carve_end;
  199. map->desc.virtual_start = carve_end;
  200. map->desc.num_pages = (map_end - carve_end)
  201. >> EFI_PAGE_SHIFT;
  202. }
  203. return (carve_end - carve_start) >> EFI_PAGE_SHIFT;
  204. }
  205. /*
  206. * Overlapping maps, just split the list map at carve_start,
  207. * it will get moved or removed in the next iteration.
  208. *
  209. * [ map_desc |__carve_start__| newmap ]
  210. */
  211. /* Create a new map from [ carve_start ... map_end ] */
  212. newmap = calloc(1, sizeof(*newmap));
  213. if (!newmap)
  214. return EFI_CARVE_OUT_OF_RESOURCES;
  215. newmap->desc = map->desc;
  216. newmap->desc.physical_start = carve_start;
  217. newmap->desc.virtual_start = carve_start;
  218. newmap->desc.num_pages = (map_end - carve_start) >> EFI_PAGE_SHIFT;
  219. /* Insert before current entry (descending address order) */
  220. list_add_tail(&newmap->link, &map->link);
  221. /* Shrink the map to [ map_start ... carve_start ] */
  222. map_desc->num_pages = (carve_start - map_start) >> EFI_PAGE_SHIFT;
  223. return EFI_CARVE_LOOP_AGAIN;
  224. }
  225. /**
  226. * efi_add_memory_map_pg() - add pages to the memory map
  227. *
  228. * @start: start address, must be a multiple of EFI_PAGE_SIZE
  229. * @pages: number of pages to add
  230. * @memory_type: type of memory added
  231. * @overlap_only_ram: region may only overlap RAM
  232. * Return: status code
  233. */
  234. static efi_status_t efi_add_memory_map_pg(u64 start, u64 pages,
  235. int memory_type,
  236. bool overlap_only_ram)
  237. {
  238. struct list_head *lhandle;
  239. struct efi_mem_list *newlist;
  240. bool carve_again;
  241. uint64_t carved_pages = 0;
  242. struct efi_event *evt;
  243. EFI_PRINT("%s: 0x%llx 0x%llx %d %s\n", __func__,
  244. start, pages, memory_type, overlap_only_ram ? "yes" : "no");
  245. if (memory_type >= EFI_MAX_MEMORY_TYPE)
  246. return EFI_INVALID_PARAMETER;
  247. if (!pages)
  248. return EFI_SUCCESS;
  249. ++efi_memory_map_key;
  250. newlist = calloc(1, sizeof(*newlist));
  251. if (!newlist)
  252. return EFI_OUT_OF_RESOURCES;
  253. newlist->desc.type = memory_type;
  254. newlist->desc.physical_start = start;
  255. newlist->desc.virtual_start = start;
  256. newlist->desc.num_pages = pages;
  257. switch (memory_type) {
  258. case EFI_RUNTIME_SERVICES_CODE:
  259. case EFI_RUNTIME_SERVICES_DATA:
  260. newlist->desc.attribute = EFI_MEMORY_WB | EFI_MEMORY_RUNTIME;
  261. break;
  262. case EFI_MMAP_IO:
  263. newlist->desc.attribute = EFI_MEMORY_RUNTIME;
  264. break;
  265. default:
  266. newlist->desc.attribute = EFI_MEMORY_WB;
  267. break;
  268. }
  269. /* Add our new map */
  270. do {
  271. carve_again = false;
  272. list_for_each(lhandle, &efi_mem) {
  273. struct efi_mem_list *lmem;
  274. s64 r;
  275. lmem = list_entry(lhandle, struct efi_mem_list, link);
  276. r = efi_mem_carve_out(lmem, &newlist->desc,
  277. overlap_only_ram);
  278. switch (r) {
  279. case EFI_CARVE_OUT_OF_RESOURCES:
  280. free(newlist);
  281. return EFI_OUT_OF_RESOURCES;
  282. case EFI_CARVE_OVERLAPS_NONRAM:
  283. /*
  284. * The user requested to only have RAM overlaps,
  285. * but we hit a non-RAM region. Error out.
  286. */
  287. free(newlist);
  288. return EFI_NO_MAPPING;
  289. case EFI_CARVE_NO_OVERLAP:
  290. /* Just ignore this list entry */
  291. break;
  292. case EFI_CARVE_LOOP_AGAIN:
  293. /*
  294. * We split an entry, but need to loop through
  295. * the list again to actually carve it.
  296. */
  297. carve_again = true;
  298. break;
  299. default:
  300. /* We carved a number of pages */
  301. carved_pages += r;
  302. carve_again = true;
  303. break;
  304. }
  305. if (carve_again) {
  306. /* The list changed, we need to start over */
  307. break;
  308. }
  309. }
  310. } while (carve_again);
  311. if (overlap_only_ram && (carved_pages != pages)) {
  312. /*
  313. * The payload wanted to have RAM overlaps, but we overlapped
  314. * with an unallocated region. Error out.
  315. */
  316. free(newlist);
  317. return EFI_NO_MAPPING;
  318. }
  319. /* Add our new map */
  320. list_add_tail(&newlist->link, &efi_mem);
  321. /* And make sure memory is listed in descending order */
  322. efi_mem_sort();
  323. /* Notify that the memory map was changed */
  324. list_for_each_entry(evt, &efi_events, link) {
  325. if (evt->group &&
  326. !guidcmp(evt->group,
  327. &efi_guid_event_group_memory_map_change)) {
  328. efi_signal_event(evt);
  329. break;
  330. }
  331. }
  332. return EFI_SUCCESS;
  333. }
  334. /**
  335. * efi_add_memory_map() - add memory area to the memory map
  336. *
  337. * @start: start address of the memory area
  338. * @size: length in bytes of the memory area
  339. * @memory_type: type of memory added
  340. *
  341. * Return: status code
  342. *
  343. * This function automatically aligns the start and size of the memory area
  344. * to EFI_PAGE_SIZE.
  345. */
  346. efi_status_t efi_add_memory_map(u64 start, u64 size, int memory_type)
  347. {
  348. u64 pages;
  349. pages = efi_size_in_pages(size + (start & EFI_PAGE_MASK));
  350. start &= ~EFI_PAGE_MASK;
  351. return efi_add_memory_map_pg(start, pages, memory_type, false);
  352. }
  353. /**
  354. * efi_check_allocated() - validate address to be freed
  355. *
  356. * Check that the address is within allocated memory:
  357. *
  358. * * The address must be in a range of the memory map.
  359. * * The address may not point to EFI_CONVENTIONAL_MEMORY.
  360. *
  361. * Page alignment is not checked as this is not a requirement of
  362. * efi_free_pool().
  363. *
  364. * @addr: address of page to be freed
  365. * @must_be_allocated: return success if the page is allocated
  366. * Return: status code
  367. */
  368. static efi_status_t efi_check_allocated(u64 addr, bool must_be_allocated)
  369. {
  370. struct efi_mem_list *item;
  371. list_for_each_entry(item, &efi_mem, link) {
  372. u64 start = item->desc.physical_start;
  373. u64 end = start + (item->desc.num_pages << EFI_PAGE_SHIFT);
  374. if (addr >= start && addr < end) {
  375. if (must_be_allocated ^
  376. (item->desc.type == EFI_CONVENTIONAL_MEMORY))
  377. return EFI_SUCCESS;
  378. else
  379. return EFI_NOT_FOUND;
  380. }
  381. }
  382. return EFI_NOT_FOUND;
  383. }
  384. /**
  385. * efi_find_free_memory() - find free memory pages
  386. *
  387. * @len: size of memory area needed
  388. * @max_addr: highest address to allocate
  389. * Return: pointer to free memory area or 0
  390. */
  391. static uint64_t efi_find_free_memory(uint64_t len, uint64_t max_addr)
  392. {
  393. struct list_head *lhandle;
  394. /*
  395. * Prealign input max address, so we simplify our matching
  396. * logic below and can just reuse it as return pointer.
  397. */
  398. max_addr &= ~EFI_PAGE_MASK;
  399. list_for_each(lhandle, &efi_mem) {
  400. struct efi_mem_list *lmem = list_entry(lhandle,
  401. struct efi_mem_list, link);
  402. struct efi_mem_desc *desc = &lmem->desc;
  403. uint64_t desc_len = desc->num_pages << EFI_PAGE_SHIFT;
  404. uint64_t desc_end = desc->physical_start + desc_len;
  405. uint64_t curmax = min(max_addr, desc_end);
  406. uint64_t ret = curmax - len;
  407. /* We only take memory from free RAM */
  408. if (desc->type != EFI_CONVENTIONAL_MEMORY)
  409. continue;
  410. /* Out of bounds for max_addr */
  411. if ((ret + len) > max_addr)
  412. continue;
  413. /* Out of bounds for upper map limit */
  414. if ((ret + len) > desc_end)
  415. continue;
  416. /* Out of bounds for lower map limit */
  417. if (ret < desc->physical_start)
  418. continue;
  419. /* Return the highest address in this map within bounds */
  420. return ret;
  421. }
  422. return 0;
  423. }
  424. /**
  425. * efi_allocate_pages - allocate memory pages
  426. *
  427. * @type: type of allocation to be performed
  428. * @memory_type: usage type of the allocated memory
  429. * @pages: number of pages to be allocated
  430. * @memory: allocated memory
  431. * Return: status code
  432. */
  433. efi_status_t efi_allocate_pages(enum efi_allocate_type type,
  434. enum efi_memory_type memory_type,
  435. efi_uintn_t pages, uint64_t *memory)
  436. {
  437. u64 len;
  438. efi_status_t ret;
  439. uint64_t addr;
  440. /* Check import parameters */
  441. if (memory_type >= EFI_PERSISTENT_MEMORY_TYPE &&
  442. memory_type <= 0x6FFFFFFF)
  443. return EFI_INVALID_PARAMETER;
  444. if (!memory)
  445. return EFI_INVALID_PARAMETER;
  446. len = (u64)pages << EFI_PAGE_SHIFT;
  447. /* Catch possible overflow on 64bit systems */
  448. if (sizeof(efi_uintn_t) == sizeof(u64) &&
  449. (len >> EFI_PAGE_SHIFT) != (u64)pages)
  450. return EFI_OUT_OF_RESOURCES;
  451. switch (type) {
  452. case EFI_ALLOCATE_ANY_PAGES:
  453. /* Any page */
  454. addr = efi_find_free_memory(len, -1ULL);
  455. if (!addr)
  456. return EFI_OUT_OF_RESOURCES;
  457. break;
  458. case EFI_ALLOCATE_MAX_ADDRESS:
  459. /* Max address */
  460. addr = efi_find_free_memory(len, *memory);
  461. if (!addr)
  462. return EFI_OUT_OF_RESOURCES;
  463. break;
  464. case EFI_ALLOCATE_ADDRESS:
  465. if (*memory & EFI_PAGE_MASK)
  466. return EFI_NOT_FOUND;
  467. /* Exact address, reserve it. The addr is already in *memory. */
  468. ret = efi_check_allocated(*memory, false);
  469. if (ret != EFI_SUCCESS)
  470. return EFI_NOT_FOUND;
  471. addr = *memory;
  472. break;
  473. default:
  474. /* UEFI doesn't specify other allocation types */
  475. return EFI_INVALID_PARAMETER;
  476. }
  477. /* Reserve that map in our memory maps */
  478. ret = efi_add_memory_map_pg(addr, pages, memory_type, true);
  479. if (ret != EFI_SUCCESS)
  480. /* Map would overlap, bail out */
  481. return EFI_OUT_OF_RESOURCES;
  482. *memory = addr;
  483. return EFI_SUCCESS;
  484. }
  485. /**
  486. * efi_free_pages() - free memory pages
  487. *
  488. * @memory: start of the memory area to be freed
  489. * @pages: number of pages to be freed
  490. * Return: status code
  491. */
  492. efi_status_t efi_free_pages(uint64_t memory, efi_uintn_t pages)
  493. {
  494. efi_status_t ret;
  495. ret = efi_check_allocated(memory, true);
  496. if (ret != EFI_SUCCESS)
  497. return ret;
  498. /* Sanity check */
  499. if (!memory || (memory & EFI_PAGE_MASK) || !pages) {
  500. printf("%s: illegal free 0x%llx, 0x%zx\n", __func__,
  501. memory, pages);
  502. return EFI_INVALID_PARAMETER;
  503. }
  504. ret = efi_add_memory_map_pg(memory, pages, EFI_CONVENTIONAL_MEMORY,
  505. false);
  506. if (ret != EFI_SUCCESS)
  507. return EFI_NOT_FOUND;
  508. return ret;
  509. }
  510. /**
  511. * efi_alloc_aligned_pages() - allocate aligned memory pages
  512. *
  513. * @len: len in bytes
  514. * @memory_type: usage type of the allocated memory
  515. * @align: alignment in bytes
  516. * Return: aligned memory or NULL
  517. */
  518. void *efi_alloc_aligned_pages(u64 len, int memory_type, size_t align)
  519. {
  520. u64 req_pages = efi_size_in_pages(len);
  521. u64 true_pages = req_pages + efi_size_in_pages(align) - 1;
  522. u64 free_pages;
  523. u64 aligned_mem;
  524. efi_status_t r;
  525. u64 mem;
  526. /* align must be zero or a power of two */
  527. if (align & (align - 1))
  528. return NULL;
  529. /* Check for overflow */
  530. if (true_pages < req_pages)
  531. return NULL;
  532. if (align < EFI_PAGE_SIZE) {
  533. r = efi_allocate_pages(EFI_ALLOCATE_ANY_PAGES, memory_type,
  534. req_pages, &mem);
  535. return (r == EFI_SUCCESS) ? (void *)(uintptr_t)mem : NULL;
  536. }
  537. r = efi_allocate_pages(EFI_ALLOCATE_ANY_PAGES, memory_type,
  538. true_pages, &mem);
  539. if (r != EFI_SUCCESS)
  540. return NULL;
  541. aligned_mem = ALIGN(mem, align);
  542. /* Free pages before alignment */
  543. free_pages = efi_size_in_pages(aligned_mem - mem);
  544. if (free_pages)
  545. efi_free_pages(mem, free_pages);
  546. /* Free trailing pages */
  547. free_pages = true_pages - (req_pages + free_pages);
  548. if (free_pages) {
  549. mem = aligned_mem + req_pages * EFI_PAGE_SIZE;
  550. efi_free_pages(mem, free_pages);
  551. }
  552. return (void *)(uintptr_t)aligned_mem;
  553. }
  554. /**
  555. * efi_allocate_pool - allocate memory from pool
  556. *
  557. * @pool_type: type of the pool from which memory is to be allocated
  558. * @size: number of bytes to be allocated
  559. * @buffer: allocated memory
  560. * Return: status code
  561. */
  562. efi_status_t efi_allocate_pool(enum efi_memory_type pool_type, efi_uintn_t size, void **buffer)
  563. {
  564. efi_status_t r;
  565. u64 addr;
  566. struct efi_pool_allocation *alloc;
  567. u64 num_pages = efi_size_in_pages(size +
  568. sizeof(struct efi_pool_allocation));
  569. if (!buffer)
  570. return EFI_INVALID_PARAMETER;
  571. if (size == 0) {
  572. *buffer = NULL;
  573. return EFI_SUCCESS;
  574. }
  575. r = efi_allocate_pages(EFI_ALLOCATE_ANY_PAGES, pool_type, num_pages,
  576. &addr);
  577. if (r == EFI_SUCCESS) {
  578. alloc = (struct efi_pool_allocation *)(uintptr_t)addr;
  579. alloc->num_pages = num_pages;
  580. alloc->checksum = checksum(alloc);
  581. *buffer = alloc->data;
  582. }
  583. return r;
  584. }
  585. /**
  586. * efi_alloc() - allocate boot services data pool memory
  587. *
  588. * Allocate memory from pool and zero it out.
  589. *
  590. * @size: number of bytes to allocate
  591. * Return: pointer to allocated memory or NULL
  592. */
  593. void *efi_alloc(size_t size)
  594. {
  595. void *buf;
  596. if (efi_allocate_pool(EFI_BOOT_SERVICES_DATA, size, &buf) !=
  597. EFI_SUCCESS) {
  598. log_err("out of memory");
  599. return NULL;
  600. }
  601. memset(buf, 0, size);
  602. return buf;
  603. }
  604. /**
  605. * efi_free_pool() - free memory from pool
  606. *
  607. * @buffer: start of memory to be freed
  608. * Return: status code
  609. */
  610. efi_status_t efi_free_pool(void *buffer)
  611. {
  612. efi_status_t ret;
  613. struct efi_pool_allocation *alloc;
  614. if (!buffer)
  615. return EFI_INVALID_PARAMETER;
  616. ret = efi_check_allocated((uintptr_t)buffer, true);
  617. if (ret != EFI_SUCCESS)
  618. return ret;
  619. alloc = container_of(buffer, struct efi_pool_allocation, data);
  620. /* Check that this memory was allocated by efi_allocate_pool() */
  621. if (((uintptr_t)alloc & EFI_PAGE_MASK) ||
  622. alloc->checksum != checksum(alloc)) {
  623. printf("%s: illegal free 0x%p\n", __func__, buffer);
  624. return EFI_INVALID_PARAMETER;
  625. }
  626. /* Avoid double free */
  627. alloc->checksum = 0;
  628. ret = efi_free_pages((uintptr_t)alloc, alloc->num_pages);
  629. return ret;
  630. }
  631. /**
  632. * efi_get_memory_map() - get map describing memory usage.
  633. *
  634. * @memory_map_size: on entry the size, in bytes, of the memory map buffer,
  635. * on exit the size of the copied memory map
  636. * @memory_map: buffer to which the memory map is written
  637. * @map_key: key for the memory map
  638. * @descriptor_size: size of an individual memory descriptor
  639. * @descriptor_version: version number of the memory descriptor structure
  640. * Return: status code
  641. */
  642. efi_status_t efi_get_memory_map(efi_uintn_t *memory_map_size,
  643. struct efi_mem_desc *memory_map,
  644. efi_uintn_t *map_key,
  645. efi_uintn_t *descriptor_size,
  646. uint32_t *descriptor_version)
  647. {
  648. efi_uintn_t map_size = 0;
  649. int map_entries = 0;
  650. struct list_head *lhandle;
  651. efi_uintn_t provided_map_size;
  652. if (!memory_map_size)
  653. return EFI_INVALID_PARAMETER;
  654. provided_map_size = *memory_map_size;
  655. list_for_each(lhandle, &efi_mem)
  656. map_entries++;
  657. map_size = map_entries * sizeof(struct efi_mem_desc);
  658. *memory_map_size = map_size;
  659. if (descriptor_size)
  660. *descriptor_size = sizeof(struct efi_mem_desc);
  661. if (descriptor_version)
  662. *descriptor_version = EFI_MEMORY_DESCRIPTOR_VERSION;
  663. if (provided_map_size < map_size)
  664. return EFI_BUFFER_TOO_SMALL;
  665. if (!memory_map)
  666. return EFI_INVALID_PARAMETER;
  667. /* Copy list into array */
  668. /* Return the list in ascending order */
  669. memory_map = &memory_map[map_entries - 1];
  670. list_for_each(lhandle, &efi_mem) {
  671. struct efi_mem_list *lmem;
  672. lmem = list_entry(lhandle, struct efi_mem_list, link);
  673. *memory_map = lmem->desc;
  674. memory_map--;
  675. }
  676. if (map_key)
  677. *map_key = efi_memory_map_key;
  678. return EFI_SUCCESS;
  679. }
  680. /**
  681. * efi_get_memory_map_alloc() - allocate map describing memory usage
  682. *
  683. * The caller is responsible for calling FreePool() if the call succeeds.
  684. *
  685. * @map_size: size of the memory map
  686. * @memory_map: buffer to which the memory map is written
  687. * Return: status code
  688. */
  689. efi_status_t efi_get_memory_map_alloc(efi_uintn_t *map_size,
  690. struct efi_mem_desc **memory_map)
  691. {
  692. efi_status_t ret;
  693. *memory_map = NULL;
  694. *map_size = 0;
  695. ret = efi_get_memory_map(map_size, *memory_map, NULL, NULL, NULL);
  696. if (ret == EFI_BUFFER_TOO_SMALL) {
  697. *map_size += sizeof(struct efi_mem_desc); /* for the map */
  698. ret = efi_allocate_pool(EFI_BOOT_SERVICES_DATA, *map_size,
  699. (void **)memory_map);
  700. if (ret != EFI_SUCCESS)
  701. return ret;
  702. ret = efi_get_memory_map(map_size, *memory_map,
  703. NULL, NULL, NULL);
  704. if (ret != EFI_SUCCESS) {
  705. efi_free_pool(*memory_map);
  706. *memory_map = NULL;
  707. }
  708. }
  709. return ret;
  710. }
  711. /**
  712. * efi_add_conventional_memory_map() - add a RAM memory area to the map
  713. *
  714. * @ram_start: start address of a RAM memory area
  715. * @ram_end: end address of a RAM memory area
  716. * @ram_top: max address to be used as conventional memory
  717. * Return: status code
  718. */
  719. efi_status_t efi_add_conventional_memory_map(u64 ram_start, u64 ram_end,
  720. u64 ram_top)
  721. {
  722. u64 pages;
  723. /* Remove partial pages */
  724. ram_end &= ~EFI_PAGE_MASK;
  725. ram_start = (ram_start + EFI_PAGE_MASK) & ~EFI_PAGE_MASK;
  726. if (ram_end <= ram_start) {
  727. /* Invalid mapping */
  728. return EFI_INVALID_PARAMETER;
  729. }
  730. pages = (ram_end - ram_start) >> EFI_PAGE_SHIFT;
  731. efi_add_memory_map_pg(ram_start, pages,
  732. EFI_CONVENTIONAL_MEMORY, false);
  733. /*
  734. * Boards may indicate to the U-Boot memory core that they
  735. * can not support memory above ram_top. Let's honor this
  736. * in the efi_loader subsystem too by declaring any memory
  737. * above ram_top as "already occupied by firmware".
  738. */
  739. if (ram_top < ram_start) {
  740. /* ram_top is before this region, reserve all */
  741. efi_add_memory_map_pg(ram_start, pages,
  742. EFI_BOOT_SERVICES_DATA, true);
  743. } else if (ram_top < ram_end) {
  744. /* ram_top is inside this region, reserve parts */
  745. pages = (ram_end - ram_top) >> EFI_PAGE_SHIFT;
  746. efi_add_memory_map_pg(ram_top, pages,
  747. EFI_BOOT_SERVICES_DATA, true);
  748. }
  749. return EFI_SUCCESS;
  750. }
  751. /**
  752. * efi_add_known_memory() - add memory banks to map
  753. *
  754. * This function may be overridden for specific architectures.
  755. */
  756. __weak void efi_add_known_memory(void)
  757. {
  758. u64 ram_top = gd->ram_top & ~EFI_PAGE_MASK;
  759. int i;
  760. /*
  761. * ram_top is just outside mapped memory. So use an offset of one for
  762. * mapping the sandbox address.
  763. */
  764. ram_top = (uintptr_t)map_sysmem(ram_top - 1, 0) + 1;
  765. /* Fix for 32bit targets with ram_top at 4G */
  766. if (!ram_top)
  767. ram_top = 0x100000000ULL;
  768. /* Add RAM */
  769. for (i = 0; i < CONFIG_NR_DRAM_BANKS; i++) {
  770. u64 ram_end, ram_start;
  771. ram_start = (uintptr_t)map_sysmem(gd->bd->bi_dram[i].start, 0);
  772. ram_end = ram_start + gd->bd->bi_dram[i].size;
  773. efi_add_conventional_memory_map(ram_start, ram_end, ram_top);
  774. }
  775. }
  776. /**
  777. * add_u_boot_and_runtime() - add U-Boot code to memory map
  778. *
  779. * Add memory regions for U-Boot's memory and for the runtime services code.
  780. */
  781. static void add_u_boot_and_runtime(void)
  782. {
  783. unsigned long runtime_start, runtime_end, runtime_pages;
  784. unsigned long runtime_mask = EFI_PAGE_MASK;
  785. unsigned long uboot_start, uboot_pages;
  786. unsigned long uboot_stack_size = CONFIG_STACK_SIZE;
  787. /* Add U-Boot */
  788. uboot_start = ((uintptr_t)map_sysmem(gd->start_addr_sp, 0) -
  789. uboot_stack_size) & ~EFI_PAGE_MASK;
  790. uboot_pages = ((uintptr_t)map_sysmem(gd->ram_top - 1, 0) -
  791. uboot_start + EFI_PAGE_MASK) >> EFI_PAGE_SHIFT;
  792. efi_add_memory_map_pg(uboot_start, uboot_pages, EFI_BOOT_SERVICES_CODE,
  793. false);
  794. #if defined(__aarch64__)
  795. /*
  796. * Runtime Services must be 64KiB aligned according to the
  797. * "AArch64 Platforms" section in the UEFI spec (2.7+).
  798. */
  799. runtime_mask = SZ_64K - 1;
  800. #endif
  801. /*
  802. * Add Runtime Services. We mark surrounding boottime code as runtime as
  803. * well to fulfill the runtime alignment constraints but avoid padding.
  804. */
  805. runtime_start = (ulong)&__efi_runtime_start & ~runtime_mask;
  806. runtime_end = (ulong)&__efi_runtime_stop;
  807. runtime_end = (runtime_end + runtime_mask) & ~runtime_mask;
  808. runtime_pages = (runtime_end - runtime_start) >> EFI_PAGE_SHIFT;
  809. efi_add_memory_map_pg(runtime_start, runtime_pages,
  810. EFI_RUNTIME_SERVICES_CODE, false);
  811. }
  812. int efi_memory_init(void)
  813. {
  814. efi_add_known_memory();
  815. add_u_boot_and_runtime();
  816. #ifdef CONFIG_EFI_LOADER_BOUNCE_BUFFER
  817. /* Request a 32bit 64MB bounce buffer region */
  818. uint64_t efi_bounce_buffer_addr = 0xffffffff;
  819. if (efi_allocate_pages(EFI_ALLOCATE_MAX_ADDRESS, EFI_BOOT_SERVICES_DATA,
  820. (64 * 1024 * 1024) >> EFI_PAGE_SHIFT,
  821. &efi_bounce_buffer_addr) != EFI_SUCCESS)
  822. return -1;
  823. efi_bounce_buffer = (void*)(uintptr_t)efi_bounce_buffer_addr;
  824. #endif
  825. return 0;
  826. }