drm_buddy_test.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778
  1. // SPDX-License-Identifier: MIT
  2. /*
  3. * Copyright © 2019 Intel Corporation
  4. * Copyright © 2022 Maíra Canal <mairacanal@riseup.net>
  5. */
  6. #include <kunit/test.h>
  7. #include <linux/prime_numbers.h>
  8. #include <linux/sched/signal.h>
  9. #include <linux/sizes.h>
  10. #include <drm/drm_buddy.h>
  11. #include "../lib/drm_random.h"
  12. static unsigned int random_seed;
  13. static inline u64 get_size(int order, u64 chunk_size)
  14. {
  15. return (1 << order) * chunk_size;
  16. }
  17. static void drm_test_buddy_alloc_range_bias(struct kunit *test)
  18. {
  19. u32 mm_size, size, ps, bias_size, bias_start, bias_end, bias_rem;
  20. DRM_RND_STATE(prng, random_seed);
  21. unsigned int i, count, *order;
  22. struct drm_buddy_block *block;
  23. unsigned long flags;
  24. struct drm_buddy mm;
  25. LIST_HEAD(allocated);
  26. bias_size = SZ_1M;
  27. ps = roundup_pow_of_two(prandom_u32_state(&prng) % bias_size);
  28. ps = max(SZ_4K, ps);
  29. mm_size = (SZ_8M-1) & ~(ps-1); /* Multiple roots */
  30. kunit_info(test, "mm_size=%u, ps=%u\n", mm_size, ps);
  31. KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, mm_size, ps),
  32. "buddy_init failed\n");
  33. count = mm_size / bias_size;
  34. order = drm_random_order(count, &prng);
  35. KUNIT_EXPECT_TRUE(test, order);
  36. /*
  37. * Idea is to split the address space into uniform bias ranges, and then
  38. * in some random order allocate within each bias, using various
  39. * patterns within. This should detect if allocations leak out from a
  40. * given bias, for example.
  41. */
  42. for (i = 0; i < count; i++) {
  43. LIST_HEAD(tmp);
  44. u32 size;
  45. bias_start = order[i] * bias_size;
  46. bias_end = bias_start + bias_size;
  47. bias_rem = bias_size;
  48. /* internal round_up too big */
  49. KUNIT_ASSERT_TRUE_MSG(test,
  50. drm_buddy_alloc_blocks(&mm, bias_start,
  51. bias_end, bias_size + ps, bias_size,
  52. &allocated,
  53. DRM_BUDDY_RANGE_ALLOCATION),
  54. "buddy_alloc failed with bias(%x-%x), size=%u, ps=%u\n",
  55. bias_start, bias_end, bias_size, bias_size);
  56. /* size too big */
  57. KUNIT_ASSERT_TRUE_MSG(test,
  58. drm_buddy_alloc_blocks(&mm, bias_start,
  59. bias_end, bias_size + ps, ps,
  60. &allocated,
  61. DRM_BUDDY_RANGE_ALLOCATION),
  62. "buddy_alloc didn't fail with bias(%x-%x), size=%u, ps=%u\n",
  63. bias_start, bias_end, bias_size + ps, ps);
  64. /* bias range too small for size */
  65. KUNIT_ASSERT_TRUE_MSG(test,
  66. drm_buddy_alloc_blocks(&mm, bias_start + ps,
  67. bias_end, bias_size, ps,
  68. &allocated,
  69. DRM_BUDDY_RANGE_ALLOCATION),
  70. "buddy_alloc didn't fail with bias(%x-%x), size=%u, ps=%u\n",
  71. bias_start + ps, bias_end, bias_size, ps);
  72. /* bias misaligned */
  73. KUNIT_ASSERT_TRUE_MSG(test,
  74. drm_buddy_alloc_blocks(&mm, bias_start + ps,
  75. bias_end - ps,
  76. bias_size >> 1, bias_size >> 1,
  77. &allocated,
  78. DRM_BUDDY_RANGE_ALLOCATION),
  79. "buddy_alloc h didn't fail with bias(%x-%x), size=%u, ps=%u\n",
  80. bias_start + ps, bias_end - ps, bias_size >> 1, bias_size >> 1);
  81. /* single big page */
  82. KUNIT_ASSERT_FALSE_MSG(test,
  83. drm_buddy_alloc_blocks(&mm, bias_start,
  84. bias_end, bias_size, bias_size,
  85. &tmp,
  86. DRM_BUDDY_RANGE_ALLOCATION),
  87. "buddy_alloc i failed with bias(%x-%x), size=%u, ps=%u\n",
  88. bias_start, bias_end, bias_size, bias_size);
  89. drm_buddy_free_list(&mm, &tmp, 0);
  90. /* single page with internal round_up */
  91. KUNIT_ASSERT_FALSE_MSG(test,
  92. drm_buddy_alloc_blocks(&mm, bias_start,
  93. bias_end, ps, bias_size,
  94. &tmp,
  95. DRM_BUDDY_RANGE_ALLOCATION),
  96. "buddy_alloc failed with bias(%x-%x), size=%u, ps=%u\n",
  97. bias_start, bias_end, ps, bias_size);
  98. drm_buddy_free_list(&mm, &tmp, 0);
  99. /* random size within */
  100. size = max(round_up(prandom_u32_state(&prng) % bias_rem, ps), ps);
  101. if (size)
  102. KUNIT_ASSERT_FALSE_MSG(test,
  103. drm_buddy_alloc_blocks(&mm, bias_start,
  104. bias_end, size, ps,
  105. &tmp,
  106. DRM_BUDDY_RANGE_ALLOCATION),
  107. "buddy_alloc failed with bias(%x-%x), size=%u, ps=%u\n",
  108. bias_start, bias_end, size, ps);
  109. bias_rem -= size;
  110. /* too big for current avail */
  111. KUNIT_ASSERT_TRUE_MSG(test,
  112. drm_buddy_alloc_blocks(&mm, bias_start,
  113. bias_end, bias_rem + ps, ps,
  114. &allocated,
  115. DRM_BUDDY_RANGE_ALLOCATION),
  116. "buddy_alloc didn't fail with bias(%x-%x), size=%u, ps=%u\n",
  117. bias_start, bias_end, bias_rem + ps, ps);
  118. if (bias_rem) {
  119. /* random fill of the remainder */
  120. size = max(round_up(prandom_u32_state(&prng) % bias_rem, ps), ps);
  121. size = max(size, ps);
  122. KUNIT_ASSERT_FALSE_MSG(test,
  123. drm_buddy_alloc_blocks(&mm, bias_start,
  124. bias_end, size, ps,
  125. &allocated,
  126. DRM_BUDDY_RANGE_ALLOCATION),
  127. "buddy_alloc failed with bias(%x-%x), size=%u, ps=%u\n",
  128. bias_start, bias_end, size, ps);
  129. /*
  130. * Intentionally allow some space to be left
  131. * unallocated, and ideally not always on the bias
  132. * boundaries.
  133. */
  134. drm_buddy_free_list(&mm, &tmp, 0);
  135. } else {
  136. list_splice_tail(&tmp, &allocated);
  137. }
  138. }
  139. kfree(order);
  140. drm_buddy_free_list(&mm, &allocated, 0);
  141. drm_buddy_fini(&mm);
  142. /*
  143. * Something more free-form. Idea is to pick a random starting bias
  144. * range within the address space and then start filling it up. Also
  145. * randomly grow the bias range in both directions as we go along. This
  146. * should give us bias start/end which is not always uniform like above,
  147. * and in some cases will require the allocator to jump over already
  148. * allocated nodes in the middle of the address space.
  149. */
  150. KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, mm_size, ps),
  151. "buddy_init failed\n");
  152. bias_start = round_up(prandom_u32_state(&prng) % (mm_size - ps), ps);
  153. bias_end = round_up(bias_start + prandom_u32_state(&prng) % (mm_size - bias_start), ps);
  154. bias_end = max(bias_end, bias_start + ps);
  155. bias_rem = bias_end - bias_start;
  156. do {
  157. u32 size = max(round_up(prandom_u32_state(&prng) % bias_rem, ps), ps);
  158. KUNIT_ASSERT_FALSE_MSG(test,
  159. drm_buddy_alloc_blocks(&mm, bias_start,
  160. bias_end, size, ps,
  161. &allocated,
  162. DRM_BUDDY_RANGE_ALLOCATION),
  163. "buddy_alloc failed with bias(%x-%x), size=%u, ps=%u\n",
  164. bias_start, bias_end, size, ps);
  165. bias_rem -= size;
  166. /*
  167. * Try to randomly grow the bias range in both directions, or
  168. * only one, or perhaps don't grow at all.
  169. */
  170. do {
  171. u32 old_bias_start = bias_start;
  172. u32 old_bias_end = bias_end;
  173. if (bias_start)
  174. bias_start -= round_up(prandom_u32_state(&prng) % bias_start, ps);
  175. if (bias_end != mm_size)
  176. bias_end += round_up(prandom_u32_state(&prng) % (mm_size - bias_end), ps);
  177. bias_rem += old_bias_start - bias_start;
  178. bias_rem += bias_end - old_bias_end;
  179. } while (!bias_rem && (bias_start || bias_end != mm_size));
  180. } while (bias_rem);
  181. KUNIT_ASSERT_EQ(test, bias_start, 0);
  182. KUNIT_ASSERT_EQ(test, bias_end, mm_size);
  183. KUNIT_ASSERT_TRUE_MSG(test,
  184. drm_buddy_alloc_blocks(&mm, bias_start, bias_end,
  185. ps, ps,
  186. &allocated,
  187. DRM_BUDDY_RANGE_ALLOCATION),
  188. "buddy_alloc passed with bias(%x-%x), size=%u\n",
  189. bias_start, bias_end, ps);
  190. drm_buddy_free_list(&mm, &allocated, 0);
  191. drm_buddy_fini(&mm);
  192. /*
  193. * Allocate cleared blocks in the bias range when the DRM buddy's clear avail is
  194. * zero. This will validate the bias range allocation in scenarios like system boot
  195. * when no cleared blocks are available and exercise the fallback path too. The resulting
  196. * blocks should always be dirty.
  197. */
  198. KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, mm_size, ps),
  199. "buddy_init failed\n");
  200. bias_start = round_up(prandom_u32_state(&prng) % (mm_size - ps), ps);
  201. bias_end = round_up(bias_start + prandom_u32_state(&prng) % (mm_size - bias_start), ps);
  202. bias_end = max(bias_end, bias_start + ps);
  203. bias_rem = bias_end - bias_start;
  204. flags = DRM_BUDDY_CLEAR_ALLOCATION | DRM_BUDDY_RANGE_ALLOCATION;
  205. size = max(round_up(prandom_u32_state(&prng) % bias_rem, ps), ps);
  206. KUNIT_ASSERT_FALSE_MSG(test,
  207. drm_buddy_alloc_blocks(&mm, bias_start,
  208. bias_end, size, ps,
  209. &allocated,
  210. flags),
  211. "buddy_alloc failed with bias(%x-%x), size=%u, ps=%u\n",
  212. bias_start, bias_end, size, ps);
  213. list_for_each_entry(block, &allocated, link)
  214. KUNIT_EXPECT_EQ(test, drm_buddy_block_is_clear(block), false);
  215. drm_buddy_free_list(&mm, &allocated, 0);
  216. drm_buddy_fini(&mm);
  217. }
  218. static void drm_test_buddy_alloc_clear(struct kunit *test)
  219. {
  220. unsigned long n_pages, total, i = 0;
  221. DRM_RND_STATE(prng, random_seed);
  222. const unsigned long ps = SZ_4K;
  223. struct drm_buddy_block *block;
  224. const int max_order = 12;
  225. LIST_HEAD(allocated);
  226. struct drm_buddy mm;
  227. unsigned int order;
  228. u32 mm_size, size;
  229. LIST_HEAD(dirty);
  230. LIST_HEAD(clean);
  231. mm_size = SZ_4K << max_order;
  232. KUNIT_EXPECT_FALSE(test, drm_buddy_init(&mm, mm_size, ps));
  233. KUNIT_EXPECT_EQ(test, mm.max_order, max_order);
  234. /*
  235. * Idea is to allocate and free some random portion of the address space,
  236. * returning those pages as non-dirty and randomly alternate between
  237. * requesting dirty and non-dirty pages (not going over the limit
  238. * we freed as non-dirty), putting that into two separate lists.
  239. * Loop over both lists at the end checking that the dirty list
  240. * is indeed all dirty pages and vice versa. Free it all again,
  241. * keeping the dirty/clear status.
  242. */
  243. KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size,
  244. 5 * ps, ps, &allocated,
  245. DRM_BUDDY_TOPDOWN_ALLOCATION),
  246. "buddy_alloc hit an error size=%lu\n", 5 * ps);
  247. drm_buddy_free_list(&mm, &allocated, DRM_BUDDY_CLEARED);
  248. n_pages = 10;
  249. do {
  250. unsigned long flags;
  251. struct list_head *list;
  252. int slot = i % 2;
  253. if (slot == 0) {
  254. list = &dirty;
  255. flags = 0;
  256. } else {
  257. list = &clean;
  258. flags = DRM_BUDDY_CLEAR_ALLOCATION;
  259. }
  260. KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size,
  261. ps, ps, list,
  262. flags),
  263. "buddy_alloc hit an error size=%lu\n", ps);
  264. } while (++i < n_pages);
  265. list_for_each_entry(block, &clean, link)
  266. KUNIT_EXPECT_EQ(test, drm_buddy_block_is_clear(block), true);
  267. list_for_each_entry(block, &dirty, link)
  268. KUNIT_EXPECT_EQ(test, drm_buddy_block_is_clear(block), false);
  269. drm_buddy_free_list(&mm, &clean, DRM_BUDDY_CLEARED);
  270. /*
  271. * Trying to go over the clear limit for some allocation.
  272. * The allocation should never fail with reasonable page-size.
  273. */
  274. KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size,
  275. 10 * ps, ps, &clean,
  276. DRM_BUDDY_CLEAR_ALLOCATION),
  277. "buddy_alloc hit an error size=%lu\n", 10 * ps);
  278. drm_buddy_free_list(&mm, &clean, DRM_BUDDY_CLEARED);
  279. drm_buddy_free_list(&mm, &dirty, 0);
  280. drm_buddy_fini(&mm);
  281. KUNIT_EXPECT_FALSE(test, drm_buddy_init(&mm, mm_size, ps));
  282. /*
  283. * Create a new mm. Intentionally fragment the address space by creating
  284. * two alternating lists. Free both lists, one as dirty the other as clean.
  285. * Try to allocate double the previous size with matching min_page_size. The
  286. * allocation should never fail as it calls the force_merge. Also check that
  287. * the page is always dirty after force_merge. Free the page as dirty, then
  288. * repeat the whole thing, increment the order until we hit the max_order.
  289. */
  290. i = 0;
  291. n_pages = mm_size / ps;
  292. do {
  293. struct list_head *list;
  294. int slot = i % 2;
  295. if (slot == 0)
  296. list = &dirty;
  297. else
  298. list = &clean;
  299. KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size,
  300. ps, ps, list, 0),
  301. "buddy_alloc hit an error size=%lu\n", ps);
  302. } while (++i < n_pages);
  303. drm_buddy_free_list(&mm, &clean, DRM_BUDDY_CLEARED);
  304. drm_buddy_free_list(&mm, &dirty, 0);
  305. order = 1;
  306. do {
  307. size = SZ_4K << order;
  308. KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size,
  309. size, size, &allocated,
  310. DRM_BUDDY_CLEAR_ALLOCATION),
  311. "buddy_alloc hit an error size=%u\n", size);
  312. total = 0;
  313. list_for_each_entry(block, &allocated, link) {
  314. if (size != mm_size)
  315. KUNIT_EXPECT_EQ(test, drm_buddy_block_is_clear(block), false);
  316. total += drm_buddy_block_size(&mm, block);
  317. }
  318. KUNIT_EXPECT_EQ(test, total, size);
  319. drm_buddy_free_list(&mm, &allocated, 0);
  320. } while (++order <= max_order);
  321. drm_buddy_fini(&mm);
  322. /*
  323. * Create a new mm with a non power-of-two size. Allocate a random size, free as
  324. * cleared and then call fini. This will ensure the multi-root force merge during
  325. * fini.
  326. */
  327. mm_size = 12 * SZ_4K;
  328. size = max(round_up(prandom_u32_state(&prng) % mm_size, ps), ps);
  329. KUNIT_EXPECT_FALSE(test, drm_buddy_init(&mm, mm_size, ps));
  330. KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size,
  331. size, ps, &allocated,
  332. DRM_BUDDY_TOPDOWN_ALLOCATION),
  333. "buddy_alloc hit an error size=%u\n", size);
  334. drm_buddy_free_list(&mm, &allocated, DRM_BUDDY_CLEARED);
  335. drm_buddy_fini(&mm);
  336. }
  337. static void drm_test_buddy_alloc_contiguous(struct kunit *test)
  338. {
  339. const unsigned long ps = SZ_4K, mm_size = 16 * 3 * SZ_4K;
  340. unsigned long i, n_pages, total;
  341. struct drm_buddy_block *block;
  342. struct drm_buddy mm;
  343. LIST_HEAD(left);
  344. LIST_HEAD(middle);
  345. LIST_HEAD(right);
  346. LIST_HEAD(allocated);
  347. KUNIT_EXPECT_FALSE(test, drm_buddy_init(&mm, mm_size, ps));
  348. /*
  349. * Idea is to fragment the address space by alternating block
  350. * allocations between three different lists; one for left, middle and
  351. * right. We can then free a list to simulate fragmentation. In
  352. * particular we want to exercise the DRM_BUDDY_CONTIGUOUS_ALLOCATION,
  353. * including the try_harder path.
  354. */
  355. i = 0;
  356. n_pages = mm_size / ps;
  357. do {
  358. struct list_head *list;
  359. int slot = i % 3;
  360. if (slot == 0)
  361. list = &left;
  362. else if (slot == 1)
  363. list = &middle;
  364. else
  365. list = &right;
  366. KUNIT_ASSERT_FALSE_MSG(test,
  367. drm_buddy_alloc_blocks(&mm, 0, mm_size,
  368. ps, ps, list, 0),
  369. "buddy_alloc hit an error size=%lu\n",
  370. ps);
  371. } while (++i < n_pages);
  372. KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size,
  373. 3 * ps, ps, &allocated,
  374. DRM_BUDDY_CONTIGUOUS_ALLOCATION),
  375. "buddy_alloc didn't error size=%lu\n", 3 * ps);
  376. drm_buddy_free_list(&mm, &middle, 0);
  377. KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size,
  378. 3 * ps, ps, &allocated,
  379. DRM_BUDDY_CONTIGUOUS_ALLOCATION),
  380. "buddy_alloc didn't error size=%lu\n", 3 * ps);
  381. KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size,
  382. 2 * ps, ps, &allocated,
  383. DRM_BUDDY_CONTIGUOUS_ALLOCATION),
  384. "buddy_alloc didn't error size=%lu\n", 2 * ps);
  385. drm_buddy_free_list(&mm, &right, 0);
  386. KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size,
  387. 3 * ps, ps, &allocated,
  388. DRM_BUDDY_CONTIGUOUS_ALLOCATION),
  389. "buddy_alloc didn't error size=%lu\n", 3 * ps);
  390. /*
  391. * At this point we should have enough contiguous space for 2 blocks,
  392. * however they are never buddies (since we freed middle and right) so
  393. * will require the try_harder logic to find them.
  394. */
  395. KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size,
  396. 2 * ps, ps, &allocated,
  397. DRM_BUDDY_CONTIGUOUS_ALLOCATION),
  398. "buddy_alloc hit an error size=%lu\n", 2 * ps);
  399. drm_buddy_free_list(&mm, &left, 0);
  400. KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size,
  401. 3 * ps, ps, &allocated,
  402. DRM_BUDDY_CONTIGUOUS_ALLOCATION),
  403. "buddy_alloc hit an error size=%lu\n", 3 * ps);
  404. total = 0;
  405. list_for_each_entry(block, &allocated, link)
  406. total += drm_buddy_block_size(&mm, block);
  407. KUNIT_ASSERT_EQ(test, total, ps * 2 + ps * 3);
  408. drm_buddy_free_list(&mm, &allocated, 0);
  409. drm_buddy_fini(&mm);
  410. }
  411. static void drm_test_buddy_alloc_pathological(struct kunit *test)
  412. {
  413. u64 mm_size, size, start = 0;
  414. struct drm_buddy_block *block;
  415. const int max_order = 3;
  416. unsigned long flags = 0;
  417. int order, top;
  418. struct drm_buddy mm;
  419. LIST_HEAD(blocks);
  420. LIST_HEAD(holes);
  421. LIST_HEAD(tmp);
  422. /*
  423. * Create a pot-sized mm, then allocate one of each possible
  424. * order within. This should leave the mm with exactly one
  425. * page left. Free the largest block, then whittle down again.
  426. * Eventually we will have a fully 50% fragmented mm.
  427. */
  428. mm_size = SZ_4K << max_order;
  429. KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, mm_size, SZ_4K),
  430. "buddy_init failed\n");
  431. KUNIT_EXPECT_EQ(test, mm.max_order, max_order);
  432. for (top = max_order; top; top--) {
  433. /* Make room by freeing the largest allocated block */
  434. block = list_first_entry_or_null(&blocks, typeof(*block), link);
  435. if (block) {
  436. list_del(&block->link);
  437. drm_buddy_free_block(&mm, block);
  438. }
  439. for (order = top; order--;) {
  440. size = get_size(order, mm.chunk_size);
  441. KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start,
  442. mm_size, size, size,
  443. &tmp, flags),
  444. "buddy_alloc hit -ENOMEM with order=%d, top=%d\n",
  445. order, top);
  446. block = list_first_entry_or_null(&tmp, struct drm_buddy_block, link);
  447. KUNIT_ASSERT_TRUE_MSG(test, block, "alloc_blocks has no blocks\n");
  448. list_move_tail(&block->link, &blocks);
  449. }
  450. /* There should be one final page for this sub-allocation */
  451. size = get_size(0, mm.chunk_size);
  452. KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
  453. size, size, &tmp, flags),
  454. "buddy_alloc hit -ENOMEM for hole\n");
  455. block = list_first_entry_or_null(&tmp, struct drm_buddy_block, link);
  456. KUNIT_ASSERT_TRUE_MSG(test, block, "alloc_blocks has no blocks\n");
  457. list_move_tail(&block->link, &holes);
  458. size = get_size(top, mm.chunk_size);
  459. KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
  460. size, size, &tmp, flags),
  461. "buddy_alloc unexpectedly succeeded at top-order %d/%d, it should be full!",
  462. top, max_order);
  463. }
  464. drm_buddy_free_list(&mm, &holes, 0);
  465. /* Nothing larger than blocks of chunk_size now available */
  466. for (order = 1; order <= max_order; order++) {
  467. size = get_size(order, mm.chunk_size);
  468. KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
  469. size, size, &tmp, flags),
  470. "buddy_alloc unexpectedly succeeded at order %d, it should be full!",
  471. order);
  472. }
  473. list_splice_tail(&holes, &blocks);
  474. drm_buddy_free_list(&mm, &blocks, 0);
  475. drm_buddy_fini(&mm);
  476. }
  477. static void drm_test_buddy_alloc_pessimistic(struct kunit *test)
  478. {
  479. u64 mm_size, size, start = 0;
  480. struct drm_buddy_block *block, *bn;
  481. const unsigned int max_order = 16;
  482. unsigned long flags = 0;
  483. struct drm_buddy mm;
  484. unsigned int order;
  485. LIST_HEAD(blocks);
  486. LIST_HEAD(tmp);
  487. /*
  488. * Create a pot-sized mm, then allocate one of each possible
  489. * order within. This should leave the mm with exactly one
  490. * page left.
  491. */
  492. mm_size = SZ_4K << max_order;
  493. KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, mm_size, SZ_4K),
  494. "buddy_init failed\n");
  495. KUNIT_EXPECT_EQ(test, mm.max_order, max_order);
  496. for (order = 0; order < max_order; order++) {
  497. size = get_size(order, mm.chunk_size);
  498. KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
  499. size, size, &tmp, flags),
  500. "buddy_alloc hit -ENOMEM with order=%d\n",
  501. order);
  502. block = list_first_entry_or_null(&tmp, struct drm_buddy_block, link);
  503. KUNIT_ASSERT_TRUE_MSG(test, block, "alloc_blocks has no blocks\n");
  504. list_move_tail(&block->link, &blocks);
  505. }
  506. /* And now the last remaining block available */
  507. size = get_size(0, mm.chunk_size);
  508. KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
  509. size, size, &tmp, flags),
  510. "buddy_alloc hit -ENOMEM on final alloc\n");
  511. block = list_first_entry_or_null(&tmp, struct drm_buddy_block, link);
  512. KUNIT_ASSERT_TRUE_MSG(test, block, "alloc_blocks has no blocks\n");
  513. list_move_tail(&block->link, &blocks);
  514. /* Should be completely full! */
  515. for (order = max_order; order--;) {
  516. size = get_size(order, mm.chunk_size);
  517. KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
  518. size, size, &tmp, flags),
  519. "buddy_alloc unexpectedly succeeded, it should be full!");
  520. }
  521. block = list_last_entry(&blocks, typeof(*block), link);
  522. list_del(&block->link);
  523. drm_buddy_free_block(&mm, block);
  524. /* As we free in increasing size, we make available larger blocks */
  525. order = 1;
  526. list_for_each_entry_safe(block, bn, &blocks, link) {
  527. list_del(&block->link);
  528. drm_buddy_free_block(&mm, block);
  529. size = get_size(order, mm.chunk_size);
  530. KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
  531. size, size, &tmp, flags),
  532. "buddy_alloc hit -ENOMEM with order=%d\n",
  533. order);
  534. block = list_first_entry_or_null(&tmp, struct drm_buddy_block, link);
  535. KUNIT_ASSERT_TRUE_MSG(test, block, "alloc_blocks has no blocks\n");
  536. list_del(&block->link);
  537. drm_buddy_free_block(&mm, block);
  538. order++;
  539. }
  540. /* To confirm, now the whole mm should be available */
  541. size = get_size(max_order, mm.chunk_size);
  542. KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
  543. size, size, &tmp, flags),
  544. "buddy_alloc (realloc) hit -ENOMEM with order=%d\n",
  545. max_order);
  546. block = list_first_entry_or_null(&tmp, struct drm_buddy_block, link);
  547. KUNIT_ASSERT_TRUE_MSG(test, block, "alloc_blocks has no blocks\n");
  548. list_del(&block->link);
  549. drm_buddy_free_block(&mm, block);
  550. drm_buddy_free_list(&mm, &blocks, 0);
  551. drm_buddy_fini(&mm);
  552. }
  553. static void drm_test_buddy_alloc_optimistic(struct kunit *test)
  554. {
  555. u64 mm_size, size, start = 0;
  556. struct drm_buddy_block *block;
  557. unsigned long flags = 0;
  558. const int max_order = 16;
  559. struct drm_buddy mm;
  560. LIST_HEAD(blocks);
  561. LIST_HEAD(tmp);
  562. int order;
  563. /*
  564. * Create a mm with one block of each order available, and
  565. * try to allocate them all.
  566. */
  567. mm_size = SZ_4K * ((1 << (max_order + 1)) - 1);
  568. KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, mm_size, SZ_4K),
  569. "buddy_init failed\n");
  570. KUNIT_EXPECT_EQ(test, mm.max_order, max_order);
  571. for (order = 0; order <= max_order; order++) {
  572. size = get_size(order, mm.chunk_size);
  573. KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
  574. size, size, &tmp, flags),
  575. "buddy_alloc hit -ENOMEM with order=%d\n",
  576. order);
  577. block = list_first_entry_or_null(&tmp, struct drm_buddy_block, link);
  578. KUNIT_ASSERT_TRUE_MSG(test, block, "alloc_blocks has no blocks\n");
  579. list_move_tail(&block->link, &blocks);
  580. }
  581. /* Should be completely full! */
  582. size = get_size(0, mm.chunk_size);
  583. KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
  584. size, size, &tmp, flags),
  585. "buddy_alloc unexpectedly succeeded, it should be full!");
  586. drm_buddy_free_list(&mm, &blocks, 0);
  587. drm_buddy_fini(&mm);
  588. }
  589. static void drm_test_buddy_alloc_limit(struct kunit *test)
  590. {
  591. u64 size = U64_MAX, start = 0;
  592. struct drm_buddy_block *block;
  593. unsigned long flags = 0;
  594. LIST_HEAD(allocated);
  595. struct drm_buddy mm;
  596. KUNIT_EXPECT_FALSE(test, drm_buddy_init(&mm, size, SZ_4K));
  597. KUNIT_EXPECT_EQ_MSG(test, mm.max_order, DRM_BUDDY_MAX_ORDER,
  598. "mm.max_order(%d) != %d\n", mm.max_order,
  599. DRM_BUDDY_MAX_ORDER);
  600. size = mm.chunk_size << mm.max_order;
  601. KUNIT_EXPECT_FALSE(test, drm_buddy_alloc_blocks(&mm, start, size, size,
  602. mm.chunk_size, &allocated, flags));
  603. block = list_first_entry_or_null(&allocated, struct drm_buddy_block, link);
  604. KUNIT_EXPECT_TRUE(test, block);
  605. KUNIT_EXPECT_EQ_MSG(test, drm_buddy_block_order(block), mm.max_order,
  606. "block order(%d) != %d\n",
  607. drm_buddy_block_order(block), mm.max_order);
  608. KUNIT_EXPECT_EQ_MSG(test, drm_buddy_block_size(&mm, block),
  609. BIT_ULL(mm.max_order) * mm.chunk_size,
  610. "block size(%llu) != %llu\n",
  611. drm_buddy_block_size(&mm, block),
  612. BIT_ULL(mm.max_order) * mm.chunk_size);
  613. drm_buddy_free_list(&mm, &allocated, 0);
  614. drm_buddy_fini(&mm);
  615. }
  616. static int drm_buddy_suite_init(struct kunit_suite *suite)
  617. {
  618. while (!random_seed)
  619. random_seed = get_random_u32();
  620. kunit_info(suite, "Testing DRM buddy manager, with random_seed=0x%x\n",
  621. random_seed);
  622. return 0;
  623. }
  624. static struct kunit_case drm_buddy_tests[] = {
  625. KUNIT_CASE(drm_test_buddy_alloc_limit),
  626. KUNIT_CASE(drm_test_buddy_alloc_optimistic),
  627. KUNIT_CASE(drm_test_buddy_alloc_pessimistic),
  628. KUNIT_CASE(drm_test_buddy_alloc_pathological),
  629. KUNIT_CASE(drm_test_buddy_alloc_contiguous),
  630. KUNIT_CASE(drm_test_buddy_alloc_clear),
  631. KUNIT_CASE(drm_test_buddy_alloc_range_bias),
  632. {}
  633. };
  634. static struct kunit_suite drm_buddy_test_suite = {
  635. .name = "drm_buddy",
  636. .suite_init = drm_buddy_suite_init,
  637. .test_cases = drm_buddy_tests,
  638. };
  639. kunit_test_suite(drm_buddy_test_suite);
  640. MODULE_AUTHOR("Intel Corporation");
  641. MODULE_DESCRIPTION("Kunit test for drm_buddy functions");
  642. MODULE_LICENSE("GPL");