lmb.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * (C) Copyright 2018 Simon Goldschmidt
  4. */
  5. #include <common.h>
  6. #include <dm.h>
  7. #include <lmb.h>
  8. #include <log.h>
  9. #include <malloc.h>
  10. #include <dm/test.h>
  11. #include <test/test.h>
  12. #include <test/ut.h>
  13. static inline bool lmb_is_nomap(struct lmb_property *m)
  14. {
  15. return m->flags & LMB_NOMAP;
  16. }
  17. static int check_lmb(struct unit_test_state *uts, struct lmb *lmb,
  18. phys_addr_t ram_base, phys_size_t ram_size,
  19. unsigned long num_reserved,
  20. phys_addr_t base1, phys_size_t size1,
  21. phys_addr_t base2, phys_size_t size2,
  22. phys_addr_t base3, phys_size_t size3)
  23. {
  24. if (ram_size) {
  25. ut_asserteq(lmb->memory.cnt, 1);
  26. ut_asserteq(lmb->memory.region[0].base, ram_base);
  27. ut_asserteq(lmb->memory.region[0].size, ram_size);
  28. }
  29. ut_asserteq(lmb->reserved.cnt, num_reserved);
  30. if (num_reserved > 0) {
  31. ut_asserteq(lmb->reserved.region[0].base, base1);
  32. ut_asserteq(lmb->reserved.region[0].size, size1);
  33. }
  34. if (num_reserved > 1) {
  35. ut_asserteq(lmb->reserved.region[1].base, base2);
  36. ut_asserteq(lmb->reserved.region[1].size, size2);
  37. }
  38. if (num_reserved > 2) {
  39. ut_asserteq(lmb->reserved.region[2].base, base3);
  40. ut_asserteq(lmb->reserved.region[2].size, size3);
  41. }
  42. return 0;
  43. }
  44. #define ASSERT_LMB(lmb, ram_base, ram_size, num_reserved, base1, size1, \
  45. base2, size2, base3, size3) \
  46. ut_assert(!check_lmb(uts, lmb, ram_base, ram_size, \
  47. num_reserved, base1, size1, base2, size2, base3, \
  48. size3))
  49. /*
  50. * Test helper function that reserves 64 KiB somewhere in the simulated RAM and
  51. * then does some alloc + free tests.
  52. */
  53. static int test_multi_alloc(struct unit_test_state *uts, const phys_addr_t ram,
  54. const phys_size_t ram_size, const phys_addr_t ram0,
  55. const phys_size_t ram0_size,
  56. const phys_addr_t alloc_64k_addr)
  57. {
  58. const phys_addr_t ram_end = ram + ram_size;
  59. const phys_addr_t alloc_64k_end = alloc_64k_addr + 0x10000;
  60. struct lmb lmb;
  61. long ret;
  62. phys_addr_t a, a2, b, b2, c, d;
  63. /* check for overflow */
  64. ut_assert(ram_end == 0 || ram_end > ram);
  65. ut_assert(alloc_64k_end > alloc_64k_addr);
  66. /* check input addresses + size */
  67. ut_assert(alloc_64k_addr >= ram + 8);
  68. ut_assert(alloc_64k_end <= ram_end - 8);
  69. lmb_init(&lmb);
  70. if (ram0_size) {
  71. ret = lmb_add(&lmb, ram0, ram0_size);
  72. ut_asserteq(ret, 0);
  73. }
  74. ret = lmb_add(&lmb, ram, ram_size);
  75. ut_asserteq(ret, 0);
  76. if (ram0_size) {
  77. ut_asserteq(lmb.memory.cnt, 2);
  78. ut_asserteq(lmb.memory.region[0].base, ram0);
  79. ut_asserteq(lmb.memory.region[0].size, ram0_size);
  80. ut_asserteq(lmb.memory.region[1].base, ram);
  81. ut_asserteq(lmb.memory.region[1].size, ram_size);
  82. } else {
  83. ut_asserteq(lmb.memory.cnt, 1);
  84. ut_asserteq(lmb.memory.region[0].base, ram);
  85. ut_asserteq(lmb.memory.region[0].size, ram_size);
  86. }
  87. /* reserve 64KiB somewhere */
  88. ret = lmb_reserve(&lmb, alloc_64k_addr, 0x10000);
  89. ut_asserteq(ret, 0);
  90. ASSERT_LMB(&lmb, 0, 0, 1, alloc_64k_addr, 0x10000,
  91. 0, 0, 0, 0);
  92. /* allocate somewhere, should be at the end of RAM */
  93. a = lmb_alloc(&lmb, 4, 1);
  94. ut_asserteq(a, ram_end - 4);
  95. ASSERT_LMB(&lmb, 0, 0, 2, alloc_64k_addr, 0x10000,
  96. ram_end - 4, 4, 0, 0);
  97. /* alloc below end of reserved region -> below reserved region */
  98. b = lmb_alloc_base(&lmb, 4, 1, alloc_64k_end);
  99. ut_asserteq(b, alloc_64k_addr - 4);
  100. ASSERT_LMB(&lmb, 0, 0, 2,
  101. alloc_64k_addr - 4, 0x10000 + 4, ram_end - 4, 4, 0, 0);
  102. /* 2nd time */
  103. c = lmb_alloc(&lmb, 4, 1);
  104. ut_asserteq(c, ram_end - 8);
  105. ASSERT_LMB(&lmb, 0, 0, 2,
  106. alloc_64k_addr - 4, 0x10000 + 4, ram_end - 8, 8, 0, 0);
  107. d = lmb_alloc_base(&lmb, 4, 1, alloc_64k_end);
  108. ut_asserteq(d, alloc_64k_addr - 8);
  109. ASSERT_LMB(&lmb, 0, 0, 2,
  110. alloc_64k_addr - 8, 0x10000 + 8, ram_end - 8, 8, 0, 0);
  111. ret = lmb_free(&lmb, a, 4);
  112. ut_asserteq(ret, 0);
  113. ASSERT_LMB(&lmb, 0, 0, 2,
  114. alloc_64k_addr - 8, 0x10000 + 8, ram_end - 8, 4, 0, 0);
  115. /* allocate again to ensure we get the same address */
  116. a2 = lmb_alloc(&lmb, 4, 1);
  117. ut_asserteq(a, a2);
  118. ASSERT_LMB(&lmb, 0, 0, 2,
  119. alloc_64k_addr - 8, 0x10000 + 8, ram_end - 8, 8, 0, 0);
  120. ret = lmb_free(&lmb, a2, 4);
  121. ut_asserteq(ret, 0);
  122. ASSERT_LMB(&lmb, 0, 0, 2,
  123. alloc_64k_addr - 8, 0x10000 + 8, ram_end - 8, 4, 0, 0);
  124. ret = lmb_free(&lmb, b, 4);
  125. ut_asserteq(ret, 0);
  126. ASSERT_LMB(&lmb, 0, 0, 3,
  127. alloc_64k_addr - 8, 4, alloc_64k_addr, 0x10000,
  128. ram_end - 8, 4);
  129. /* allocate again to ensure we get the same address */
  130. b2 = lmb_alloc_base(&lmb, 4, 1, alloc_64k_end);
  131. ut_asserteq(b, b2);
  132. ASSERT_LMB(&lmb, 0, 0, 2,
  133. alloc_64k_addr - 8, 0x10000 + 8, ram_end - 8, 4, 0, 0);
  134. ret = lmb_free(&lmb, b2, 4);
  135. ut_asserteq(ret, 0);
  136. ASSERT_LMB(&lmb, 0, 0, 3,
  137. alloc_64k_addr - 8, 4, alloc_64k_addr, 0x10000,
  138. ram_end - 8, 4);
  139. ret = lmb_free(&lmb, c, 4);
  140. ut_asserteq(ret, 0);
  141. ASSERT_LMB(&lmb, 0, 0, 2,
  142. alloc_64k_addr - 8, 4, alloc_64k_addr, 0x10000, 0, 0);
  143. ret = lmb_free(&lmb, d, 4);
  144. ut_asserteq(ret, 0);
  145. ASSERT_LMB(&lmb, 0, 0, 1, alloc_64k_addr, 0x10000,
  146. 0, 0, 0, 0);
  147. if (ram0_size) {
  148. ut_asserteq(lmb.memory.cnt, 2);
  149. ut_asserteq(lmb.memory.region[0].base, ram0);
  150. ut_asserteq(lmb.memory.region[0].size, ram0_size);
  151. ut_asserteq(lmb.memory.region[1].base, ram);
  152. ut_asserteq(lmb.memory.region[1].size, ram_size);
  153. } else {
  154. ut_asserteq(lmb.memory.cnt, 1);
  155. ut_asserteq(lmb.memory.region[0].base, ram);
  156. ut_asserteq(lmb.memory.region[0].size, ram_size);
  157. }
  158. return 0;
  159. }
  160. static int test_multi_alloc_512mb(struct unit_test_state *uts,
  161. const phys_addr_t ram)
  162. {
  163. return test_multi_alloc(uts, ram, 0x20000000, 0, 0, ram + 0x10000000);
  164. }
  165. static int test_multi_alloc_512mb_x2(struct unit_test_state *uts,
  166. const phys_addr_t ram,
  167. const phys_addr_t ram0)
  168. {
  169. return test_multi_alloc(uts, ram, 0x20000000, ram0, 0x20000000,
  170. ram + 0x10000000);
  171. }
  172. /* Create a memory region with one reserved region and allocate */
  173. static int lib_test_lmb_simple(struct unit_test_state *uts)
  174. {
  175. int ret;
  176. /* simulate 512 MiB RAM beginning at 1GiB */
  177. ret = test_multi_alloc_512mb(uts, 0x40000000);
  178. if (ret)
  179. return ret;
  180. /* simulate 512 MiB RAM beginning at 1.5GiB */
  181. return test_multi_alloc_512mb(uts, 0xE0000000);
  182. }
  183. DM_TEST(lib_test_lmb_simple, UT_TESTF_SCAN_PDATA | UT_TESTF_SCAN_FDT);
  184. /* Create two memory regions with one reserved region and allocate */
  185. static int lib_test_lmb_simple_x2(struct unit_test_state *uts)
  186. {
  187. int ret;
  188. /* simulate 512 MiB RAM beginning at 2GiB and 1 GiB */
  189. ret = test_multi_alloc_512mb_x2(uts, 0x80000000, 0x40000000);
  190. if (ret)
  191. return ret;
  192. /* simulate 512 MiB RAM beginning at 3.5GiB and 1 GiB */
  193. return test_multi_alloc_512mb_x2(uts, 0xE0000000, 0x40000000);
  194. }
  195. DM_TEST(lib_test_lmb_simple_x2, UT_TESTF_SCAN_PDATA | UT_TESTF_SCAN_FDT);
  196. /* Simulate 512 MiB RAM, allocate some blocks that fit/don't fit */
  197. static int test_bigblock(struct unit_test_state *uts, const phys_addr_t ram)
  198. {
  199. const phys_size_t ram_size = 0x20000000;
  200. const phys_size_t big_block_size = 0x10000000;
  201. const phys_addr_t ram_end = ram + ram_size;
  202. const phys_addr_t alloc_64k_addr = ram + 0x10000000;
  203. struct lmb lmb;
  204. long ret;
  205. phys_addr_t a, b;
  206. /* check for overflow */
  207. ut_assert(ram_end == 0 || ram_end > ram);
  208. lmb_init(&lmb);
  209. ret = lmb_add(&lmb, ram, ram_size);
  210. ut_asserteq(ret, 0);
  211. /* reserve 64KiB in the middle of RAM */
  212. ret = lmb_reserve(&lmb, alloc_64k_addr, 0x10000);
  213. ut_asserteq(ret, 0);
  214. ASSERT_LMB(&lmb, ram, ram_size, 1, alloc_64k_addr, 0x10000,
  215. 0, 0, 0, 0);
  216. /* allocate a big block, should be below reserved */
  217. a = lmb_alloc(&lmb, big_block_size, 1);
  218. ut_asserteq(a, ram);
  219. ASSERT_LMB(&lmb, ram, ram_size, 1, a,
  220. big_block_size + 0x10000, 0, 0, 0, 0);
  221. /* allocate 2nd big block */
  222. /* This should fail, printing an error */
  223. b = lmb_alloc(&lmb, big_block_size, 1);
  224. ut_asserteq(b, 0);
  225. ASSERT_LMB(&lmb, ram, ram_size, 1, a,
  226. big_block_size + 0x10000, 0, 0, 0, 0);
  227. ret = lmb_free(&lmb, a, big_block_size);
  228. ut_asserteq(ret, 0);
  229. ASSERT_LMB(&lmb, ram, ram_size, 1, alloc_64k_addr, 0x10000,
  230. 0, 0, 0, 0);
  231. /* allocate too big block */
  232. /* This should fail, printing an error */
  233. a = lmb_alloc(&lmb, ram_size, 1);
  234. ut_asserteq(a, 0);
  235. ASSERT_LMB(&lmb, ram, ram_size, 1, alloc_64k_addr, 0x10000,
  236. 0, 0, 0, 0);
  237. return 0;
  238. }
  239. static int lib_test_lmb_big(struct unit_test_state *uts)
  240. {
  241. int ret;
  242. /* simulate 512 MiB RAM beginning at 1GiB */
  243. ret = test_bigblock(uts, 0x40000000);
  244. if (ret)
  245. return ret;
  246. /* simulate 512 MiB RAM beginning at 1.5GiB */
  247. return test_bigblock(uts, 0xE0000000);
  248. }
  249. DM_TEST(lib_test_lmb_big, UT_TESTF_SCAN_PDATA | UT_TESTF_SCAN_FDT);
  250. /* Simulate 512 MiB RAM, allocate a block without previous reservation */
  251. static int test_noreserved(struct unit_test_state *uts, const phys_addr_t ram,
  252. const phys_addr_t alloc_size, const ulong align)
  253. {
  254. const phys_size_t ram_size = 0x20000000;
  255. const phys_addr_t ram_end = ram + ram_size;
  256. struct lmb lmb;
  257. long ret;
  258. phys_addr_t a, b;
  259. const phys_addr_t alloc_size_aligned = (alloc_size + align - 1) &
  260. ~(align - 1);
  261. /* check for overflow */
  262. ut_assert(ram_end == 0 || ram_end > ram);
  263. lmb_init(&lmb);
  264. ret = lmb_add(&lmb, ram, ram_size);
  265. ut_asserteq(ret, 0);
  266. ASSERT_LMB(&lmb, ram, ram_size, 0, 0, 0, 0, 0, 0, 0);
  267. /* allocate a block */
  268. a = lmb_alloc(&lmb, alloc_size, align);
  269. ut_assert(a != 0);
  270. ASSERT_LMB(&lmb, ram, ram_size, 1, ram + ram_size - alloc_size_aligned,
  271. alloc_size, 0, 0, 0, 0);
  272. /* allocate another block */
  273. b = lmb_alloc(&lmb, alloc_size, align);
  274. ut_assert(b != 0);
  275. if (alloc_size == alloc_size_aligned) {
  276. ASSERT_LMB(&lmb, ram, ram_size, 1, ram + ram_size -
  277. (alloc_size_aligned * 2), alloc_size * 2, 0, 0, 0,
  278. 0);
  279. } else {
  280. ASSERT_LMB(&lmb, ram, ram_size, 2, ram + ram_size -
  281. (alloc_size_aligned * 2), alloc_size, ram + ram_size
  282. - alloc_size_aligned, alloc_size, 0, 0);
  283. }
  284. /* and free them */
  285. ret = lmb_free(&lmb, b, alloc_size);
  286. ut_asserteq(ret, 0);
  287. ASSERT_LMB(&lmb, ram, ram_size, 1, ram + ram_size - alloc_size_aligned,
  288. alloc_size, 0, 0, 0, 0);
  289. ret = lmb_free(&lmb, a, alloc_size);
  290. ut_asserteq(ret, 0);
  291. ASSERT_LMB(&lmb, ram, ram_size, 0, 0, 0, 0, 0, 0, 0);
  292. /* allocate a block with base*/
  293. b = lmb_alloc_base(&lmb, alloc_size, align, ram_end);
  294. ut_assert(a == b);
  295. ASSERT_LMB(&lmb, ram, ram_size, 1, ram + ram_size - alloc_size_aligned,
  296. alloc_size, 0, 0, 0, 0);
  297. /* and free it */
  298. ret = lmb_free(&lmb, b, alloc_size);
  299. ut_asserteq(ret, 0);
  300. ASSERT_LMB(&lmb, ram, ram_size, 0, 0, 0, 0, 0, 0, 0);
  301. return 0;
  302. }
  303. static int lib_test_lmb_noreserved(struct unit_test_state *uts)
  304. {
  305. int ret;
  306. /* simulate 512 MiB RAM beginning at 1GiB */
  307. ret = test_noreserved(uts, 0x40000000, 4, 1);
  308. if (ret)
  309. return ret;
  310. /* simulate 512 MiB RAM beginning at 1.5GiB */
  311. return test_noreserved(uts, 0xE0000000, 4, 1);
  312. }
  313. DM_TEST(lib_test_lmb_noreserved, UT_TESTF_SCAN_PDATA | UT_TESTF_SCAN_FDT);
  314. static int lib_test_lmb_unaligned_size(struct unit_test_state *uts)
  315. {
  316. int ret;
  317. /* simulate 512 MiB RAM beginning at 1GiB */
  318. ret = test_noreserved(uts, 0x40000000, 5, 8);
  319. if (ret)
  320. return ret;
  321. /* simulate 512 MiB RAM beginning at 1.5GiB */
  322. return test_noreserved(uts, 0xE0000000, 5, 8);
  323. }
  324. DM_TEST(lib_test_lmb_unaligned_size, UT_TESTF_SCAN_PDATA | UT_TESTF_SCAN_FDT);
  325. /*
  326. * Simulate a RAM that starts at 0 and allocate down to address 0, which must
  327. * fail as '0' means failure for the lmb_alloc functions.
  328. */
  329. static int lib_test_lmb_at_0(struct unit_test_state *uts)
  330. {
  331. const phys_addr_t ram = 0;
  332. const phys_size_t ram_size = 0x20000000;
  333. struct lmb lmb;
  334. long ret;
  335. phys_addr_t a, b;
  336. lmb_init(&lmb);
  337. ret = lmb_add(&lmb, ram, ram_size);
  338. ut_asserteq(ret, 0);
  339. /* allocate nearly everything */
  340. a = lmb_alloc(&lmb, ram_size - 4, 1);
  341. ut_asserteq(a, ram + 4);
  342. ASSERT_LMB(&lmb, ram, ram_size, 1, a, ram_size - 4,
  343. 0, 0, 0, 0);
  344. /* allocate the rest */
  345. /* This should fail as the allocated address would be 0 */
  346. b = lmb_alloc(&lmb, 4, 1);
  347. ut_asserteq(b, 0);
  348. /* check that this was an error by checking lmb */
  349. ASSERT_LMB(&lmb, ram, ram_size, 1, a, ram_size - 4,
  350. 0, 0, 0, 0);
  351. /* check that this was an error by freeing b */
  352. ret = lmb_free(&lmb, b, 4);
  353. ut_asserteq(ret, -1);
  354. ASSERT_LMB(&lmb, ram, ram_size, 1, a, ram_size - 4,
  355. 0, 0, 0, 0);
  356. ret = lmb_free(&lmb, a, ram_size - 4);
  357. ut_asserteq(ret, 0);
  358. ASSERT_LMB(&lmb, ram, ram_size, 0, 0, 0, 0, 0, 0, 0);
  359. return 0;
  360. }
  361. DM_TEST(lib_test_lmb_at_0, UT_TESTF_SCAN_PDATA | UT_TESTF_SCAN_FDT);
  362. /* Check that calling lmb_reserve with overlapping regions fails. */
  363. static int lib_test_lmb_overlapping_reserve(struct unit_test_state *uts)
  364. {
  365. const phys_addr_t ram = 0x40000000;
  366. const phys_size_t ram_size = 0x20000000;
  367. struct lmb lmb;
  368. long ret;
  369. lmb_init(&lmb);
  370. ret = lmb_add(&lmb, ram, ram_size);
  371. ut_asserteq(ret, 0);
  372. ret = lmb_reserve(&lmb, 0x40010000, 0x10000);
  373. ut_asserteq(ret, 0);
  374. ASSERT_LMB(&lmb, ram, ram_size, 1, 0x40010000, 0x10000,
  375. 0, 0, 0, 0);
  376. /* allocate overlapping region should fail */
  377. ret = lmb_reserve(&lmb, 0x40011000, 0x10000);
  378. ut_asserteq(ret, -1);
  379. ASSERT_LMB(&lmb, ram, ram_size, 1, 0x40010000, 0x10000,
  380. 0, 0, 0, 0);
  381. /* allocate 3nd region */
  382. ret = lmb_reserve(&lmb, 0x40030000, 0x10000);
  383. ut_asserteq(ret, 0);
  384. ASSERT_LMB(&lmb, ram, ram_size, 2, 0x40010000, 0x10000,
  385. 0x40030000, 0x10000, 0, 0);
  386. /* allocate 2nd region */
  387. ret = lmb_reserve(&lmb, 0x40020000, 0x10000);
  388. ut_assert(ret >= 0);
  389. ASSERT_LMB(&lmb, ram, ram_size, 1, 0x40010000, 0x30000,
  390. 0, 0, 0, 0);
  391. return 0;
  392. }
  393. DM_TEST(lib_test_lmb_overlapping_reserve,
  394. UT_TESTF_SCAN_PDATA | UT_TESTF_SCAN_FDT);
  395. /*
  396. * Simulate 512 MiB RAM, reserve 3 blocks, allocate addresses in between.
  397. * Expect addresses outside the memory range to fail.
  398. */
  399. static int test_alloc_addr(struct unit_test_state *uts, const phys_addr_t ram)
  400. {
  401. const phys_size_t ram_size = 0x20000000;
  402. const phys_addr_t ram_end = ram + ram_size;
  403. const phys_size_t alloc_addr_a = ram + 0x8000000;
  404. const phys_size_t alloc_addr_b = ram + 0x8000000 * 2;
  405. const phys_size_t alloc_addr_c = ram + 0x8000000 * 3;
  406. struct lmb lmb;
  407. long ret;
  408. phys_addr_t a, b, c, d, e;
  409. /* check for overflow */
  410. ut_assert(ram_end == 0 || ram_end > ram);
  411. lmb_init(&lmb);
  412. ret = lmb_add(&lmb, ram, ram_size);
  413. ut_asserteq(ret, 0);
  414. /* reserve 3 blocks */
  415. ret = lmb_reserve(&lmb, alloc_addr_a, 0x10000);
  416. ut_asserteq(ret, 0);
  417. ret = lmb_reserve(&lmb, alloc_addr_b, 0x10000);
  418. ut_asserteq(ret, 0);
  419. ret = lmb_reserve(&lmb, alloc_addr_c, 0x10000);
  420. ut_asserteq(ret, 0);
  421. ASSERT_LMB(&lmb, ram, ram_size, 3, alloc_addr_a, 0x10000,
  422. alloc_addr_b, 0x10000, alloc_addr_c, 0x10000);
  423. /* allocate blocks */
  424. a = lmb_alloc_addr(&lmb, ram, alloc_addr_a - ram);
  425. ut_asserteq(a, ram);
  426. ASSERT_LMB(&lmb, ram, ram_size, 3, ram, 0x8010000,
  427. alloc_addr_b, 0x10000, alloc_addr_c, 0x10000);
  428. b = lmb_alloc_addr(&lmb, alloc_addr_a + 0x10000,
  429. alloc_addr_b - alloc_addr_a - 0x10000);
  430. ut_asserteq(b, alloc_addr_a + 0x10000);
  431. ASSERT_LMB(&lmb, ram, ram_size, 2, ram, 0x10010000,
  432. alloc_addr_c, 0x10000, 0, 0);
  433. c = lmb_alloc_addr(&lmb, alloc_addr_b + 0x10000,
  434. alloc_addr_c - alloc_addr_b - 0x10000);
  435. ut_asserteq(c, alloc_addr_b + 0x10000);
  436. ASSERT_LMB(&lmb, ram, ram_size, 1, ram, 0x18010000,
  437. 0, 0, 0, 0);
  438. d = lmb_alloc_addr(&lmb, alloc_addr_c + 0x10000,
  439. ram_end - alloc_addr_c - 0x10000);
  440. ut_asserteq(d, alloc_addr_c + 0x10000);
  441. ASSERT_LMB(&lmb, ram, ram_size, 1, ram, ram_size,
  442. 0, 0, 0, 0);
  443. /* allocating anything else should fail */
  444. e = lmb_alloc(&lmb, 1, 1);
  445. ut_asserteq(e, 0);
  446. ASSERT_LMB(&lmb, ram, ram_size, 1, ram, ram_size,
  447. 0, 0, 0, 0);
  448. ret = lmb_free(&lmb, d, ram_end - alloc_addr_c - 0x10000);
  449. ut_asserteq(ret, 0);
  450. /* allocate at 3 points in free range */
  451. d = lmb_alloc_addr(&lmb, ram_end - 4, 4);
  452. ut_asserteq(d, ram_end - 4);
  453. ASSERT_LMB(&lmb, ram, ram_size, 2, ram, 0x18010000,
  454. d, 4, 0, 0);
  455. ret = lmb_free(&lmb, d, 4);
  456. ut_asserteq(ret, 0);
  457. ASSERT_LMB(&lmb, ram, ram_size, 1, ram, 0x18010000,
  458. 0, 0, 0, 0);
  459. d = lmb_alloc_addr(&lmb, ram_end - 128, 4);
  460. ut_asserteq(d, ram_end - 128);
  461. ASSERT_LMB(&lmb, ram, ram_size, 2, ram, 0x18010000,
  462. d, 4, 0, 0);
  463. ret = lmb_free(&lmb, d, 4);
  464. ut_asserteq(ret, 0);
  465. ASSERT_LMB(&lmb, ram, ram_size, 1, ram, 0x18010000,
  466. 0, 0, 0, 0);
  467. d = lmb_alloc_addr(&lmb, alloc_addr_c + 0x10000, 4);
  468. ut_asserteq(d, alloc_addr_c + 0x10000);
  469. ASSERT_LMB(&lmb, ram, ram_size, 1, ram, 0x18010004,
  470. 0, 0, 0, 0);
  471. ret = lmb_free(&lmb, d, 4);
  472. ut_asserteq(ret, 0);
  473. ASSERT_LMB(&lmb, ram, ram_size, 1, ram, 0x18010000,
  474. 0, 0, 0, 0);
  475. /* allocate at the bottom */
  476. ret = lmb_free(&lmb, a, alloc_addr_a - ram);
  477. ut_asserteq(ret, 0);
  478. ASSERT_LMB(&lmb, ram, ram_size, 1, ram + 0x8000000, 0x10010000,
  479. 0, 0, 0, 0);
  480. d = lmb_alloc_addr(&lmb, ram, 4);
  481. ut_asserteq(d, ram);
  482. ASSERT_LMB(&lmb, ram, ram_size, 2, d, 4,
  483. ram + 0x8000000, 0x10010000, 0, 0);
  484. /* check that allocating outside memory fails */
  485. if (ram_end != 0) {
  486. ret = lmb_alloc_addr(&lmb, ram_end, 1);
  487. ut_asserteq(ret, 0);
  488. }
  489. if (ram != 0) {
  490. ret = lmb_alloc_addr(&lmb, ram - 1, 1);
  491. ut_asserteq(ret, 0);
  492. }
  493. return 0;
  494. }
  495. static int lib_test_lmb_alloc_addr(struct unit_test_state *uts)
  496. {
  497. int ret;
  498. /* simulate 512 MiB RAM beginning at 1GiB */
  499. ret = test_alloc_addr(uts, 0x40000000);
  500. if (ret)
  501. return ret;
  502. /* simulate 512 MiB RAM beginning at 1.5GiB */
  503. return test_alloc_addr(uts, 0xE0000000);
  504. }
  505. DM_TEST(lib_test_lmb_alloc_addr, UT_TESTF_SCAN_PDATA | UT_TESTF_SCAN_FDT);
  506. /* Simulate 512 MiB RAM, reserve 3 blocks, check addresses in between */
  507. static int test_get_unreserved_size(struct unit_test_state *uts,
  508. const phys_addr_t ram)
  509. {
  510. const phys_size_t ram_size = 0x20000000;
  511. const phys_addr_t ram_end = ram + ram_size;
  512. const phys_size_t alloc_addr_a = ram + 0x8000000;
  513. const phys_size_t alloc_addr_b = ram + 0x8000000 * 2;
  514. const phys_size_t alloc_addr_c = ram + 0x8000000 * 3;
  515. struct lmb lmb;
  516. long ret;
  517. phys_size_t s;
  518. /* check for overflow */
  519. ut_assert(ram_end == 0 || ram_end > ram);
  520. lmb_init(&lmb);
  521. ret = lmb_add(&lmb, ram, ram_size);
  522. ut_asserteq(ret, 0);
  523. /* reserve 3 blocks */
  524. ret = lmb_reserve(&lmb, alloc_addr_a, 0x10000);
  525. ut_asserteq(ret, 0);
  526. ret = lmb_reserve(&lmb, alloc_addr_b, 0x10000);
  527. ut_asserteq(ret, 0);
  528. ret = lmb_reserve(&lmb, alloc_addr_c, 0x10000);
  529. ut_asserteq(ret, 0);
  530. ASSERT_LMB(&lmb, ram, ram_size, 3, alloc_addr_a, 0x10000,
  531. alloc_addr_b, 0x10000, alloc_addr_c, 0x10000);
  532. /* check addresses in between blocks */
  533. s = lmb_get_free_size(&lmb, ram);
  534. ut_asserteq(s, alloc_addr_a - ram);
  535. s = lmb_get_free_size(&lmb, ram + 0x10000);
  536. ut_asserteq(s, alloc_addr_a - ram - 0x10000);
  537. s = lmb_get_free_size(&lmb, alloc_addr_a - 4);
  538. ut_asserteq(s, 4);
  539. s = lmb_get_free_size(&lmb, alloc_addr_a + 0x10000);
  540. ut_asserteq(s, alloc_addr_b - alloc_addr_a - 0x10000);
  541. s = lmb_get_free_size(&lmb, alloc_addr_a + 0x20000);
  542. ut_asserteq(s, alloc_addr_b - alloc_addr_a - 0x20000);
  543. s = lmb_get_free_size(&lmb, alloc_addr_b - 4);
  544. ut_asserteq(s, 4);
  545. s = lmb_get_free_size(&lmb, alloc_addr_c + 0x10000);
  546. ut_asserteq(s, ram_end - alloc_addr_c - 0x10000);
  547. s = lmb_get_free_size(&lmb, alloc_addr_c + 0x20000);
  548. ut_asserteq(s, ram_end - alloc_addr_c - 0x20000);
  549. s = lmb_get_free_size(&lmb, ram_end - 4);
  550. ut_asserteq(s, 4);
  551. return 0;
  552. }
  553. static int lib_test_lmb_get_free_size(struct unit_test_state *uts)
  554. {
  555. int ret;
  556. /* simulate 512 MiB RAM beginning at 1GiB */
  557. ret = test_get_unreserved_size(uts, 0x40000000);
  558. if (ret)
  559. return ret;
  560. /* simulate 512 MiB RAM beginning at 1.5GiB */
  561. return test_get_unreserved_size(uts, 0xE0000000);
  562. }
  563. DM_TEST(lib_test_lmb_get_free_size,
  564. UT_TESTF_SCAN_PDATA | UT_TESTF_SCAN_FDT);
  565. #ifdef CONFIG_LMB_USE_MAX_REGIONS
  566. static int lib_test_lmb_max_regions(struct unit_test_state *uts)
  567. {
  568. const phys_addr_t ram = 0x00000000;
  569. /*
  570. * All of 32bit memory space will contain regions for this test, so
  571. * we need to scale ram_size (which in this case is the size of the lmb
  572. * region) to match.
  573. */
  574. const phys_size_t ram_size = ((0xFFFFFFFF >> CONFIG_LMB_MAX_REGIONS)
  575. + 1) * CONFIG_LMB_MAX_REGIONS;
  576. const phys_size_t blk_size = 0x10000;
  577. phys_addr_t offset;
  578. struct lmb lmb;
  579. int ret, i;
  580. lmb_init(&lmb);
  581. ut_asserteq(lmb.memory.cnt, 0);
  582. ut_asserteq(lmb.memory.max, CONFIG_LMB_MAX_REGIONS);
  583. ut_asserteq(lmb.reserved.cnt, 0);
  584. ut_asserteq(lmb.reserved.max, CONFIG_LMB_MAX_REGIONS);
  585. /* Add CONFIG_LMB_MAX_REGIONS memory regions */
  586. for (i = 0; i < CONFIG_LMB_MAX_REGIONS; i++) {
  587. offset = ram + 2 * i * ram_size;
  588. ret = lmb_add(&lmb, offset, ram_size);
  589. ut_asserteq(ret, 0);
  590. }
  591. ut_asserteq(lmb.memory.cnt, CONFIG_LMB_MAX_REGIONS);
  592. ut_asserteq(lmb.reserved.cnt, 0);
  593. /* error for the (CONFIG_LMB_MAX_REGIONS + 1) memory regions */
  594. offset = ram + 2 * (CONFIG_LMB_MAX_REGIONS + 1) * ram_size;
  595. ret = lmb_add(&lmb, offset, ram_size);
  596. ut_asserteq(ret, -1);
  597. ut_asserteq(lmb.memory.cnt, CONFIG_LMB_MAX_REGIONS);
  598. ut_asserteq(lmb.reserved.cnt, 0);
  599. /* reserve CONFIG_LMB_MAX_REGIONS regions */
  600. for (i = 0; i < CONFIG_LMB_MAX_REGIONS; i++) {
  601. offset = ram + 2 * i * blk_size;
  602. ret = lmb_reserve(&lmb, offset, blk_size);
  603. ut_asserteq(ret, 0);
  604. }
  605. ut_asserteq(lmb.memory.cnt, CONFIG_LMB_MAX_REGIONS);
  606. ut_asserteq(lmb.reserved.cnt, CONFIG_LMB_MAX_REGIONS);
  607. /* error for the 9th reserved blocks */
  608. offset = ram + 2 * (CONFIG_LMB_MAX_REGIONS + 1) * blk_size;
  609. ret = lmb_reserve(&lmb, offset, blk_size);
  610. ut_asserteq(ret, -1);
  611. ut_asserteq(lmb.memory.cnt, CONFIG_LMB_MAX_REGIONS);
  612. ut_asserteq(lmb.reserved.cnt, CONFIG_LMB_MAX_REGIONS);
  613. /* check each regions */
  614. for (i = 0; i < CONFIG_LMB_MAX_REGIONS; i++)
  615. ut_asserteq(lmb.memory.region[i].base, ram + 2 * i * ram_size);
  616. for (i = 0; i < CONFIG_LMB_MAX_REGIONS; i++)
  617. ut_asserteq(lmb.reserved.region[i].base, ram + 2 * i * blk_size);
  618. return 0;
  619. }
  620. #endif
  621. DM_TEST(lib_test_lmb_max_regions,
  622. UT_TESTF_SCAN_PDATA | UT_TESTF_SCAN_FDT);
  623. static int lib_test_lmb_flags(struct unit_test_state *uts)
  624. {
  625. const phys_addr_t ram = 0x40000000;
  626. const phys_size_t ram_size = 0x20000000;
  627. struct lmb lmb;
  628. long ret;
  629. lmb_init(&lmb);
  630. ret = lmb_add(&lmb, ram, ram_size);
  631. ut_asserteq(ret, 0);
  632. /* reserve, same flag */
  633. ret = lmb_reserve_flags(&lmb, 0x40010000, 0x10000, LMB_NOMAP);
  634. ut_asserteq(ret, 0);
  635. ASSERT_LMB(&lmb, ram, ram_size, 1, 0x40010000, 0x10000,
  636. 0, 0, 0, 0);
  637. /* reserve again, same flag */
  638. ret = lmb_reserve_flags(&lmb, 0x40010000, 0x10000, LMB_NOMAP);
  639. ut_asserteq(ret, 0);
  640. ASSERT_LMB(&lmb, ram, ram_size, 1, 0x40010000, 0x10000,
  641. 0, 0, 0, 0);
  642. /* reserve again, new flag */
  643. ret = lmb_reserve_flags(&lmb, 0x40010000, 0x10000, LMB_NONE);
  644. ut_asserteq(ret, -1);
  645. ASSERT_LMB(&lmb, ram, ram_size, 1, 0x40010000, 0x10000,
  646. 0, 0, 0, 0);
  647. ut_asserteq(lmb_is_nomap(&lmb.reserved.region[0]), 1);
  648. /* merge after */
  649. ret = lmb_reserve_flags(&lmb, 0x40020000, 0x10000, LMB_NOMAP);
  650. ut_asserteq(ret, 1);
  651. ASSERT_LMB(&lmb, ram, ram_size, 1, 0x40010000, 0x20000,
  652. 0, 0, 0, 0);
  653. /* merge before */
  654. ret = lmb_reserve_flags(&lmb, 0x40000000, 0x10000, LMB_NOMAP);
  655. ut_asserteq(ret, 1);
  656. ASSERT_LMB(&lmb, ram, ram_size, 1, 0x40000000, 0x30000,
  657. 0, 0, 0, 0);
  658. ut_asserteq(lmb_is_nomap(&lmb.reserved.region[0]), 1);
  659. ret = lmb_reserve_flags(&lmb, 0x40030000, 0x10000, LMB_NONE);
  660. ut_asserteq(ret, 0);
  661. ASSERT_LMB(&lmb, ram, ram_size, 2, 0x40000000, 0x30000,
  662. 0x40030000, 0x10000, 0, 0);
  663. ut_asserteq(lmb_is_nomap(&lmb.reserved.region[0]), 1);
  664. ut_asserteq(lmb_is_nomap(&lmb.reserved.region[1]), 0);
  665. /* test that old API use LMB_NONE */
  666. ret = lmb_reserve(&lmb, 0x40040000, 0x10000);
  667. ut_asserteq(ret, 1);
  668. ASSERT_LMB(&lmb, ram, ram_size, 2, 0x40000000, 0x30000,
  669. 0x40030000, 0x20000, 0, 0);
  670. ut_asserteq(lmb_is_nomap(&lmb.reserved.region[0]), 1);
  671. ut_asserteq(lmb_is_nomap(&lmb.reserved.region[1]), 0);
  672. ret = lmb_reserve_flags(&lmb, 0x40070000, 0x10000, LMB_NOMAP);
  673. ut_asserteq(ret, 0);
  674. ASSERT_LMB(&lmb, ram, ram_size, 3, 0x40000000, 0x30000,
  675. 0x40030000, 0x20000, 0x40070000, 0x10000);
  676. ret = lmb_reserve_flags(&lmb, 0x40050000, 0x10000, LMB_NOMAP);
  677. ut_asserteq(ret, 0);
  678. ASSERT_LMB(&lmb, ram, ram_size, 4, 0x40000000, 0x30000,
  679. 0x40030000, 0x20000, 0x40050000, 0x10000);
  680. /* merge with 2 adjacent regions */
  681. ret = lmb_reserve_flags(&lmb, 0x40060000, 0x10000, LMB_NOMAP);
  682. ut_asserteq(ret, 2);
  683. ASSERT_LMB(&lmb, ram, ram_size, 3, 0x40000000, 0x30000,
  684. 0x40030000, 0x20000, 0x40050000, 0x30000);
  685. ut_asserteq(lmb_is_nomap(&lmb.reserved.region[0]), 1);
  686. ut_asserteq(lmb_is_nomap(&lmb.reserved.region[1]), 0);
  687. ut_asserteq(lmb_is_nomap(&lmb.reserved.region[2]), 1);
  688. return 0;
  689. }
  690. DM_TEST(lib_test_lmb_flags,
  691. UT_TESTF_SCAN_PDATA | UT_TESTF_SCAN_FDT);