drm_buddy.c 28 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238
  1. // SPDX-License-Identifier: MIT
  2. /*
  3. * Copyright © 2021 Intel Corporation
  4. */
  5. #include <linux/kmemleak.h>
  6. #include <linux/module.h>
  7. #include <linux/sizes.h>
  8. #include <drm/drm_buddy.h>
  9. static struct kmem_cache *slab_blocks;
  10. static struct drm_buddy_block *drm_block_alloc(struct drm_buddy *mm,
  11. struct drm_buddy_block *parent,
  12. unsigned int order,
  13. u64 offset)
  14. {
  15. struct drm_buddy_block *block;
  16. BUG_ON(order > DRM_BUDDY_MAX_ORDER);
  17. block = kmem_cache_zalloc(slab_blocks, GFP_KERNEL);
  18. if (!block)
  19. return NULL;
  20. block->header = offset;
  21. block->header |= order;
  22. block->parent = parent;
  23. BUG_ON(block->header & DRM_BUDDY_HEADER_UNUSED);
  24. return block;
  25. }
  26. static void drm_block_free(struct drm_buddy *mm,
  27. struct drm_buddy_block *block)
  28. {
  29. kmem_cache_free(slab_blocks, block);
  30. }
  31. static void list_insert_sorted(struct drm_buddy *mm,
  32. struct drm_buddy_block *block)
  33. {
  34. struct drm_buddy_block *node;
  35. struct list_head *head;
  36. head = &mm->free_list[drm_buddy_block_order(block)];
  37. if (list_empty(head)) {
  38. list_add(&block->link, head);
  39. return;
  40. }
  41. list_for_each_entry(node, head, link)
  42. if (drm_buddy_block_offset(block) < drm_buddy_block_offset(node))
  43. break;
  44. __list_add(&block->link, node->link.prev, &node->link);
  45. }
  46. static void clear_reset(struct drm_buddy_block *block)
  47. {
  48. block->header &= ~DRM_BUDDY_HEADER_CLEAR;
  49. }
  50. static void mark_cleared(struct drm_buddy_block *block)
  51. {
  52. block->header |= DRM_BUDDY_HEADER_CLEAR;
  53. }
  54. static void mark_allocated(struct drm_buddy_block *block)
  55. {
  56. block->header &= ~DRM_BUDDY_HEADER_STATE;
  57. block->header |= DRM_BUDDY_ALLOCATED;
  58. list_del(&block->link);
  59. }
  60. static void mark_free(struct drm_buddy *mm,
  61. struct drm_buddy_block *block)
  62. {
  63. block->header &= ~DRM_BUDDY_HEADER_STATE;
  64. block->header |= DRM_BUDDY_FREE;
  65. list_insert_sorted(mm, block);
  66. }
  67. static void mark_split(struct drm_buddy_block *block)
  68. {
  69. block->header &= ~DRM_BUDDY_HEADER_STATE;
  70. block->header |= DRM_BUDDY_SPLIT;
  71. list_del(&block->link);
  72. }
  73. static inline bool overlaps(u64 s1, u64 e1, u64 s2, u64 e2)
  74. {
  75. return s1 <= e2 && e1 >= s2;
  76. }
  77. static inline bool contains(u64 s1, u64 e1, u64 s2, u64 e2)
  78. {
  79. return s1 <= s2 && e1 >= e2;
  80. }
  81. static struct drm_buddy_block *
  82. __get_buddy(struct drm_buddy_block *block)
  83. {
  84. struct drm_buddy_block *parent;
  85. parent = block->parent;
  86. if (!parent)
  87. return NULL;
  88. if (parent->left == block)
  89. return parent->right;
  90. return parent->left;
  91. }
  92. static unsigned int __drm_buddy_free(struct drm_buddy *mm,
  93. struct drm_buddy_block *block,
  94. bool force_merge)
  95. {
  96. struct drm_buddy_block *parent;
  97. unsigned int order;
  98. while ((parent = block->parent)) {
  99. struct drm_buddy_block *buddy;
  100. buddy = __get_buddy(block);
  101. if (!drm_buddy_block_is_free(buddy))
  102. break;
  103. if (!force_merge) {
  104. /*
  105. * Check the block and its buddy clear state and exit
  106. * the loop if they both have the dissimilar state.
  107. */
  108. if (drm_buddy_block_is_clear(block) !=
  109. drm_buddy_block_is_clear(buddy))
  110. break;
  111. if (drm_buddy_block_is_clear(block))
  112. mark_cleared(parent);
  113. }
  114. list_del(&buddy->link);
  115. if (force_merge && drm_buddy_block_is_clear(buddy))
  116. mm->clear_avail -= drm_buddy_block_size(mm, buddy);
  117. drm_block_free(mm, block);
  118. drm_block_free(mm, buddy);
  119. block = parent;
  120. }
  121. order = drm_buddy_block_order(block);
  122. mark_free(mm, block);
  123. return order;
  124. }
  125. static int __force_merge(struct drm_buddy *mm,
  126. u64 start,
  127. u64 end,
  128. unsigned int min_order)
  129. {
  130. unsigned int order;
  131. int i;
  132. if (!min_order)
  133. return -ENOMEM;
  134. if (min_order > mm->max_order)
  135. return -EINVAL;
  136. for (i = min_order - 1; i >= 0; i--) {
  137. struct drm_buddy_block *block, *prev;
  138. list_for_each_entry_safe_reverse(block, prev, &mm->free_list[i], link) {
  139. struct drm_buddy_block *buddy;
  140. u64 block_start, block_end;
  141. if (!block->parent)
  142. continue;
  143. block_start = drm_buddy_block_offset(block);
  144. block_end = block_start + drm_buddy_block_size(mm, block) - 1;
  145. if (!contains(start, end, block_start, block_end))
  146. continue;
  147. buddy = __get_buddy(block);
  148. if (!drm_buddy_block_is_free(buddy))
  149. continue;
  150. WARN_ON(drm_buddy_block_is_clear(block) ==
  151. drm_buddy_block_is_clear(buddy));
  152. /*
  153. * If the prev block is same as buddy, don't access the
  154. * block in the next iteration as we would free the
  155. * buddy block as part of the free function.
  156. */
  157. if (prev == buddy)
  158. prev = list_prev_entry(prev, link);
  159. list_del(&block->link);
  160. if (drm_buddy_block_is_clear(block))
  161. mm->clear_avail -= drm_buddy_block_size(mm, block);
  162. order = __drm_buddy_free(mm, block, true);
  163. if (order >= min_order)
  164. return 0;
  165. }
  166. }
  167. return -ENOMEM;
  168. }
  169. /**
  170. * drm_buddy_init - init memory manager
  171. *
  172. * @mm: DRM buddy manager to initialize
  173. * @size: size in bytes to manage
  174. * @chunk_size: minimum page size in bytes for our allocations
  175. *
  176. * Initializes the memory manager and its resources.
  177. *
  178. * Returns:
  179. * 0 on success, error code on failure.
  180. */
  181. int drm_buddy_init(struct drm_buddy *mm, u64 size, u64 chunk_size)
  182. {
  183. unsigned int i;
  184. u64 offset;
  185. if (size < chunk_size)
  186. return -EINVAL;
  187. if (chunk_size < SZ_4K)
  188. return -EINVAL;
  189. if (!is_power_of_2(chunk_size))
  190. return -EINVAL;
  191. size = round_down(size, chunk_size);
  192. mm->size = size;
  193. mm->avail = size;
  194. mm->clear_avail = 0;
  195. mm->chunk_size = chunk_size;
  196. mm->max_order = ilog2(size) - ilog2(chunk_size);
  197. BUG_ON(mm->max_order > DRM_BUDDY_MAX_ORDER);
  198. mm->free_list = kmalloc_array(mm->max_order + 1,
  199. sizeof(struct list_head),
  200. GFP_KERNEL);
  201. if (!mm->free_list)
  202. return -ENOMEM;
  203. for (i = 0; i <= mm->max_order; ++i)
  204. INIT_LIST_HEAD(&mm->free_list[i]);
  205. mm->n_roots = hweight64(size);
  206. mm->roots = kmalloc_array(mm->n_roots,
  207. sizeof(struct drm_buddy_block *),
  208. GFP_KERNEL);
  209. if (!mm->roots)
  210. goto out_free_list;
  211. offset = 0;
  212. i = 0;
  213. /*
  214. * Split into power-of-two blocks, in case we are given a size that is
  215. * not itself a power-of-two.
  216. */
  217. do {
  218. struct drm_buddy_block *root;
  219. unsigned int order;
  220. u64 root_size;
  221. order = ilog2(size) - ilog2(chunk_size);
  222. root_size = chunk_size << order;
  223. root = drm_block_alloc(mm, NULL, order, offset);
  224. if (!root)
  225. goto out_free_roots;
  226. mark_free(mm, root);
  227. BUG_ON(i > mm->max_order);
  228. BUG_ON(drm_buddy_block_size(mm, root) < chunk_size);
  229. mm->roots[i] = root;
  230. offset += root_size;
  231. size -= root_size;
  232. i++;
  233. } while (size);
  234. return 0;
  235. out_free_roots:
  236. while (i--)
  237. drm_block_free(mm, mm->roots[i]);
  238. kfree(mm->roots);
  239. out_free_list:
  240. kfree(mm->free_list);
  241. return -ENOMEM;
  242. }
  243. EXPORT_SYMBOL(drm_buddy_init);
  244. /**
  245. * drm_buddy_fini - tear down the memory manager
  246. *
  247. * @mm: DRM buddy manager to free
  248. *
  249. * Cleanup memory manager resources and the freelist
  250. */
  251. void drm_buddy_fini(struct drm_buddy *mm)
  252. {
  253. u64 root_size, size, start;
  254. unsigned int order;
  255. int i;
  256. size = mm->size;
  257. for (i = 0; i < mm->n_roots; ++i) {
  258. order = ilog2(size) - ilog2(mm->chunk_size);
  259. start = drm_buddy_block_offset(mm->roots[i]);
  260. __force_merge(mm, start, start + size, order);
  261. WARN_ON(!drm_buddy_block_is_free(mm->roots[i]));
  262. drm_block_free(mm, mm->roots[i]);
  263. root_size = mm->chunk_size << order;
  264. size -= root_size;
  265. }
  266. WARN_ON(mm->avail != mm->size);
  267. kfree(mm->roots);
  268. kfree(mm->free_list);
  269. }
  270. EXPORT_SYMBOL(drm_buddy_fini);
  271. static int split_block(struct drm_buddy *mm,
  272. struct drm_buddy_block *block)
  273. {
  274. unsigned int block_order = drm_buddy_block_order(block) - 1;
  275. u64 offset = drm_buddy_block_offset(block);
  276. BUG_ON(!drm_buddy_block_is_free(block));
  277. BUG_ON(!drm_buddy_block_order(block));
  278. block->left = drm_block_alloc(mm, block, block_order, offset);
  279. if (!block->left)
  280. return -ENOMEM;
  281. block->right = drm_block_alloc(mm, block, block_order,
  282. offset + (mm->chunk_size << block_order));
  283. if (!block->right) {
  284. drm_block_free(mm, block->left);
  285. return -ENOMEM;
  286. }
  287. mark_free(mm, block->left);
  288. mark_free(mm, block->right);
  289. if (drm_buddy_block_is_clear(block)) {
  290. mark_cleared(block->left);
  291. mark_cleared(block->right);
  292. clear_reset(block);
  293. }
  294. mark_split(block);
  295. return 0;
  296. }
  297. /**
  298. * drm_get_buddy - get buddy address
  299. *
  300. * @block: DRM buddy block
  301. *
  302. * Returns the corresponding buddy block for @block, or NULL
  303. * if this is a root block and can't be merged further.
  304. * Requires some kind of locking to protect against
  305. * any concurrent allocate and free operations.
  306. */
  307. struct drm_buddy_block *
  308. drm_get_buddy(struct drm_buddy_block *block)
  309. {
  310. return __get_buddy(block);
  311. }
  312. EXPORT_SYMBOL(drm_get_buddy);
  313. /**
  314. * drm_buddy_reset_clear - reset blocks clear state
  315. *
  316. * @mm: DRM buddy manager
  317. * @is_clear: blocks clear state
  318. *
  319. * Reset the clear state based on @is_clear value for each block
  320. * in the freelist.
  321. */
  322. void drm_buddy_reset_clear(struct drm_buddy *mm, bool is_clear)
  323. {
  324. u64 root_size, size, start;
  325. unsigned int order;
  326. int i;
  327. size = mm->size;
  328. for (i = 0; i < mm->n_roots; ++i) {
  329. order = ilog2(size) - ilog2(mm->chunk_size);
  330. start = drm_buddy_block_offset(mm->roots[i]);
  331. __force_merge(mm, start, start + size, order);
  332. root_size = mm->chunk_size << order;
  333. size -= root_size;
  334. }
  335. for (i = 0; i <= mm->max_order; ++i) {
  336. struct drm_buddy_block *block;
  337. list_for_each_entry_reverse(block, &mm->free_list[i], link) {
  338. if (is_clear != drm_buddy_block_is_clear(block)) {
  339. if (is_clear) {
  340. mark_cleared(block);
  341. mm->clear_avail += drm_buddy_block_size(mm, block);
  342. } else {
  343. clear_reset(block);
  344. mm->clear_avail -= drm_buddy_block_size(mm, block);
  345. }
  346. }
  347. }
  348. }
  349. }
  350. EXPORT_SYMBOL(drm_buddy_reset_clear);
  351. /**
  352. * drm_buddy_free_block - free a block
  353. *
  354. * @mm: DRM buddy manager
  355. * @block: block to be freed
  356. */
  357. void drm_buddy_free_block(struct drm_buddy *mm,
  358. struct drm_buddy_block *block)
  359. {
  360. BUG_ON(!drm_buddy_block_is_allocated(block));
  361. mm->avail += drm_buddy_block_size(mm, block);
  362. if (drm_buddy_block_is_clear(block))
  363. mm->clear_avail += drm_buddy_block_size(mm, block);
  364. __drm_buddy_free(mm, block, false);
  365. }
  366. EXPORT_SYMBOL(drm_buddy_free_block);
  367. static void __drm_buddy_free_list(struct drm_buddy *mm,
  368. struct list_head *objects,
  369. bool mark_clear,
  370. bool mark_dirty)
  371. {
  372. struct drm_buddy_block *block, *on;
  373. WARN_ON(mark_dirty && mark_clear);
  374. list_for_each_entry_safe(block, on, objects, link) {
  375. if (mark_clear)
  376. mark_cleared(block);
  377. else if (mark_dirty)
  378. clear_reset(block);
  379. drm_buddy_free_block(mm, block);
  380. cond_resched();
  381. }
  382. INIT_LIST_HEAD(objects);
  383. }
  384. static void drm_buddy_free_list_internal(struct drm_buddy *mm,
  385. struct list_head *objects)
  386. {
  387. /*
  388. * Don't touch the clear/dirty bit, since allocation is still internal
  389. * at this point. For example we might have just failed part of the
  390. * allocation.
  391. */
  392. __drm_buddy_free_list(mm, objects, false, false);
  393. }
  394. /**
  395. * drm_buddy_free_list - free blocks
  396. *
  397. * @mm: DRM buddy manager
  398. * @objects: input list head to free blocks
  399. * @flags: optional flags like DRM_BUDDY_CLEARED
  400. */
  401. void drm_buddy_free_list(struct drm_buddy *mm,
  402. struct list_head *objects,
  403. unsigned int flags)
  404. {
  405. bool mark_clear = flags & DRM_BUDDY_CLEARED;
  406. __drm_buddy_free_list(mm, objects, mark_clear, !mark_clear);
  407. }
  408. EXPORT_SYMBOL(drm_buddy_free_list);
  409. static bool block_incompatible(struct drm_buddy_block *block, unsigned int flags)
  410. {
  411. bool needs_clear = flags & DRM_BUDDY_CLEAR_ALLOCATION;
  412. return needs_clear != drm_buddy_block_is_clear(block);
  413. }
  414. static struct drm_buddy_block *
  415. __alloc_range_bias(struct drm_buddy *mm,
  416. u64 start, u64 end,
  417. unsigned int order,
  418. unsigned long flags,
  419. bool fallback)
  420. {
  421. u64 req_size = mm->chunk_size << order;
  422. struct drm_buddy_block *block;
  423. struct drm_buddy_block *buddy;
  424. LIST_HEAD(dfs);
  425. int err;
  426. int i;
  427. end = end - 1;
  428. for (i = 0; i < mm->n_roots; ++i)
  429. list_add_tail(&mm->roots[i]->tmp_link, &dfs);
  430. do {
  431. u64 block_start;
  432. u64 block_end;
  433. block = list_first_entry_or_null(&dfs,
  434. struct drm_buddy_block,
  435. tmp_link);
  436. if (!block)
  437. break;
  438. list_del(&block->tmp_link);
  439. if (drm_buddy_block_order(block) < order)
  440. continue;
  441. block_start = drm_buddy_block_offset(block);
  442. block_end = block_start + drm_buddy_block_size(mm, block) - 1;
  443. if (!overlaps(start, end, block_start, block_end))
  444. continue;
  445. if (drm_buddy_block_is_allocated(block))
  446. continue;
  447. if (block_start < start || block_end > end) {
  448. u64 adjusted_start = max(block_start, start);
  449. u64 adjusted_end = min(block_end, end);
  450. if (round_down(adjusted_end + 1, req_size) <=
  451. round_up(adjusted_start, req_size))
  452. continue;
  453. }
  454. if (!fallback && block_incompatible(block, flags))
  455. continue;
  456. if (contains(start, end, block_start, block_end) &&
  457. order == drm_buddy_block_order(block)) {
  458. /*
  459. * Find the free block within the range.
  460. */
  461. if (drm_buddy_block_is_free(block))
  462. return block;
  463. continue;
  464. }
  465. if (!drm_buddy_block_is_split(block)) {
  466. err = split_block(mm, block);
  467. if (unlikely(err))
  468. goto err_undo;
  469. }
  470. list_add(&block->right->tmp_link, &dfs);
  471. list_add(&block->left->tmp_link, &dfs);
  472. } while (1);
  473. return ERR_PTR(-ENOSPC);
  474. err_undo:
  475. /*
  476. * We really don't want to leave around a bunch of split blocks, since
  477. * bigger is better, so make sure we merge everything back before we
  478. * free the allocated blocks.
  479. */
  480. buddy = __get_buddy(block);
  481. if (buddy &&
  482. (drm_buddy_block_is_free(block) &&
  483. drm_buddy_block_is_free(buddy)))
  484. __drm_buddy_free(mm, block, false);
  485. return ERR_PTR(err);
  486. }
  487. static struct drm_buddy_block *
  488. __drm_buddy_alloc_range_bias(struct drm_buddy *mm,
  489. u64 start, u64 end,
  490. unsigned int order,
  491. unsigned long flags)
  492. {
  493. struct drm_buddy_block *block;
  494. bool fallback = false;
  495. block = __alloc_range_bias(mm, start, end, order,
  496. flags, fallback);
  497. if (IS_ERR(block))
  498. return __alloc_range_bias(mm, start, end, order,
  499. flags, !fallback);
  500. return block;
  501. }
  502. static struct drm_buddy_block *
  503. get_maxblock(struct drm_buddy *mm, unsigned int order,
  504. unsigned long flags)
  505. {
  506. struct drm_buddy_block *max_block = NULL, *block = NULL;
  507. unsigned int i;
  508. for (i = order; i <= mm->max_order; ++i) {
  509. struct drm_buddy_block *tmp_block;
  510. list_for_each_entry_reverse(tmp_block, &mm->free_list[i], link) {
  511. if (block_incompatible(tmp_block, flags))
  512. continue;
  513. block = tmp_block;
  514. break;
  515. }
  516. if (!block)
  517. continue;
  518. if (!max_block) {
  519. max_block = block;
  520. continue;
  521. }
  522. if (drm_buddy_block_offset(block) >
  523. drm_buddy_block_offset(max_block)) {
  524. max_block = block;
  525. }
  526. }
  527. return max_block;
  528. }
  529. static struct drm_buddy_block *
  530. alloc_from_freelist(struct drm_buddy *mm,
  531. unsigned int order,
  532. unsigned long flags)
  533. {
  534. struct drm_buddy_block *block = NULL;
  535. unsigned int tmp;
  536. int err;
  537. if (flags & DRM_BUDDY_TOPDOWN_ALLOCATION) {
  538. block = get_maxblock(mm, order, flags);
  539. if (block)
  540. /* Store the obtained block order */
  541. tmp = drm_buddy_block_order(block);
  542. } else {
  543. for (tmp = order; tmp <= mm->max_order; ++tmp) {
  544. struct drm_buddy_block *tmp_block;
  545. list_for_each_entry_reverse(tmp_block, &mm->free_list[tmp], link) {
  546. if (block_incompatible(tmp_block, flags))
  547. continue;
  548. block = tmp_block;
  549. break;
  550. }
  551. if (block)
  552. break;
  553. }
  554. }
  555. if (!block) {
  556. /* Fallback method */
  557. for (tmp = order; tmp <= mm->max_order; ++tmp) {
  558. if (!list_empty(&mm->free_list[tmp])) {
  559. block = list_last_entry(&mm->free_list[tmp],
  560. struct drm_buddy_block,
  561. link);
  562. if (block)
  563. break;
  564. }
  565. }
  566. if (!block)
  567. return ERR_PTR(-ENOSPC);
  568. }
  569. BUG_ON(!drm_buddy_block_is_free(block));
  570. while (tmp != order) {
  571. err = split_block(mm, block);
  572. if (unlikely(err))
  573. goto err_undo;
  574. block = block->right;
  575. tmp--;
  576. }
  577. return block;
  578. err_undo:
  579. if (tmp != order)
  580. __drm_buddy_free(mm, block, false);
  581. return ERR_PTR(err);
  582. }
  583. static int __alloc_range(struct drm_buddy *mm,
  584. struct list_head *dfs,
  585. u64 start, u64 size,
  586. struct list_head *blocks,
  587. u64 *total_allocated_on_err)
  588. {
  589. struct drm_buddy_block *block;
  590. struct drm_buddy_block *buddy;
  591. u64 total_allocated = 0;
  592. LIST_HEAD(allocated);
  593. u64 end;
  594. int err;
  595. end = start + size - 1;
  596. do {
  597. u64 block_start;
  598. u64 block_end;
  599. block = list_first_entry_or_null(dfs,
  600. struct drm_buddy_block,
  601. tmp_link);
  602. if (!block)
  603. break;
  604. list_del(&block->tmp_link);
  605. block_start = drm_buddy_block_offset(block);
  606. block_end = block_start + drm_buddy_block_size(mm, block) - 1;
  607. if (!overlaps(start, end, block_start, block_end))
  608. continue;
  609. if (drm_buddy_block_is_allocated(block)) {
  610. err = -ENOSPC;
  611. goto err_free;
  612. }
  613. if (contains(start, end, block_start, block_end)) {
  614. if (drm_buddy_block_is_free(block)) {
  615. mark_allocated(block);
  616. total_allocated += drm_buddy_block_size(mm, block);
  617. mm->avail -= drm_buddy_block_size(mm, block);
  618. if (drm_buddy_block_is_clear(block))
  619. mm->clear_avail -= drm_buddy_block_size(mm, block);
  620. list_add_tail(&block->link, &allocated);
  621. continue;
  622. } else if (!mm->clear_avail) {
  623. err = -ENOSPC;
  624. goto err_free;
  625. }
  626. }
  627. if (!drm_buddy_block_is_split(block)) {
  628. err = split_block(mm, block);
  629. if (unlikely(err))
  630. goto err_undo;
  631. }
  632. list_add(&block->right->tmp_link, dfs);
  633. list_add(&block->left->tmp_link, dfs);
  634. } while (1);
  635. if (total_allocated < size) {
  636. err = -ENOSPC;
  637. goto err_free;
  638. }
  639. list_splice_tail(&allocated, blocks);
  640. return 0;
  641. err_undo:
  642. /*
  643. * We really don't want to leave around a bunch of split blocks, since
  644. * bigger is better, so make sure we merge everything back before we
  645. * free the allocated blocks.
  646. */
  647. buddy = __get_buddy(block);
  648. if (buddy &&
  649. (drm_buddy_block_is_free(block) &&
  650. drm_buddy_block_is_free(buddy)))
  651. __drm_buddy_free(mm, block, false);
  652. err_free:
  653. if (err == -ENOSPC && total_allocated_on_err) {
  654. list_splice_tail(&allocated, blocks);
  655. *total_allocated_on_err = total_allocated;
  656. } else {
  657. drm_buddy_free_list_internal(mm, &allocated);
  658. }
  659. return err;
  660. }
  661. static int __drm_buddy_alloc_range(struct drm_buddy *mm,
  662. u64 start,
  663. u64 size,
  664. u64 *total_allocated_on_err,
  665. struct list_head *blocks)
  666. {
  667. LIST_HEAD(dfs);
  668. int i;
  669. for (i = 0; i < mm->n_roots; ++i)
  670. list_add_tail(&mm->roots[i]->tmp_link, &dfs);
  671. return __alloc_range(mm, &dfs, start, size,
  672. blocks, total_allocated_on_err);
  673. }
  674. static int __alloc_contig_try_harder(struct drm_buddy *mm,
  675. u64 size,
  676. u64 min_block_size,
  677. struct list_head *blocks)
  678. {
  679. u64 rhs_offset, lhs_offset, lhs_size, filled;
  680. struct drm_buddy_block *block;
  681. struct list_head *list;
  682. LIST_HEAD(blocks_lhs);
  683. unsigned long pages;
  684. unsigned int order;
  685. u64 modify_size;
  686. int err;
  687. modify_size = rounddown_pow_of_two(size);
  688. pages = modify_size >> ilog2(mm->chunk_size);
  689. order = fls(pages) - 1;
  690. if (order == 0)
  691. return -ENOSPC;
  692. list = &mm->free_list[order];
  693. if (list_empty(list))
  694. return -ENOSPC;
  695. list_for_each_entry_reverse(block, list, link) {
  696. /* Allocate blocks traversing RHS */
  697. rhs_offset = drm_buddy_block_offset(block);
  698. err = __drm_buddy_alloc_range(mm, rhs_offset, size,
  699. &filled, blocks);
  700. if (!err || err != -ENOSPC)
  701. return err;
  702. lhs_size = max((size - filled), min_block_size);
  703. if (!IS_ALIGNED(lhs_size, min_block_size))
  704. lhs_size = round_up(lhs_size, min_block_size);
  705. /* Allocate blocks traversing LHS */
  706. lhs_offset = drm_buddy_block_offset(block) - lhs_size;
  707. err = __drm_buddy_alloc_range(mm, lhs_offset, lhs_size,
  708. NULL, &blocks_lhs);
  709. if (!err) {
  710. list_splice(&blocks_lhs, blocks);
  711. return 0;
  712. } else if (err != -ENOSPC) {
  713. drm_buddy_free_list_internal(mm, blocks);
  714. return err;
  715. }
  716. /* Free blocks for the next iteration */
  717. drm_buddy_free_list_internal(mm, blocks);
  718. }
  719. return -ENOSPC;
  720. }
  721. /**
  722. * drm_buddy_block_trim - free unused pages
  723. *
  724. * @mm: DRM buddy manager
  725. * @start: start address to begin the trimming.
  726. * @new_size: original size requested
  727. * @blocks: Input and output list of allocated blocks.
  728. * MUST contain single block as input to be trimmed.
  729. * On success will contain the newly allocated blocks
  730. * making up the @new_size. Blocks always appear in
  731. * ascending order
  732. *
  733. * For contiguous allocation, we round up the size to the nearest
  734. * power of two value, drivers consume *actual* size, so remaining
  735. * portions are unused and can be optionally freed with this function
  736. *
  737. * Returns:
  738. * 0 on success, error code on failure.
  739. */
  740. int drm_buddy_block_trim(struct drm_buddy *mm,
  741. u64 *start,
  742. u64 new_size,
  743. struct list_head *blocks)
  744. {
  745. struct drm_buddy_block *parent;
  746. struct drm_buddy_block *block;
  747. u64 block_start, block_end;
  748. LIST_HEAD(dfs);
  749. u64 new_start;
  750. int err;
  751. if (!list_is_singular(blocks))
  752. return -EINVAL;
  753. block = list_first_entry(blocks,
  754. struct drm_buddy_block,
  755. link);
  756. block_start = drm_buddy_block_offset(block);
  757. block_end = block_start + drm_buddy_block_size(mm, block);
  758. if (WARN_ON(!drm_buddy_block_is_allocated(block)))
  759. return -EINVAL;
  760. if (new_size > drm_buddy_block_size(mm, block))
  761. return -EINVAL;
  762. if (!new_size || !IS_ALIGNED(new_size, mm->chunk_size))
  763. return -EINVAL;
  764. if (new_size == drm_buddy_block_size(mm, block))
  765. return 0;
  766. new_start = block_start;
  767. if (start) {
  768. new_start = *start;
  769. if (new_start < block_start)
  770. return -EINVAL;
  771. if (!IS_ALIGNED(new_start, mm->chunk_size))
  772. return -EINVAL;
  773. if (range_overflows(new_start, new_size, block_end))
  774. return -EINVAL;
  775. }
  776. list_del(&block->link);
  777. mark_free(mm, block);
  778. mm->avail += drm_buddy_block_size(mm, block);
  779. if (drm_buddy_block_is_clear(block))
  780. mm->clear_avail += drm_buddy_block_size(mm, block);
  781. /* Prevent recursively freeing this node */
  782. parent = block->parent;
  783. block->parent = NULL;
  784. list_add(&block->tmp_link, &dfs);
  785. err = __alloc_range(mm, &dfs, new_start, new_size, blocks, NULL);
  786. if (err) {
  787. mark_allocated(block);
  788. mm->avail -= drm_buddy_block_size(mm, block);
  789. if (drm_buddy_block_is_clear(block))
  790. mm->clear_avail -= drm_buddy_block_size(mm, block);
  791. list_add(&block->link, blocks);
  792. }
  793. block->parent = parent;
  794. return err;
  795. }
  796. EXPORT_SYMBOL(drm_buddy_block_trim);
  797. static struct drm_buddy_block *
  798. __drm_buddy_alloc_blocks(struct drm_buddy *mm,
  799. u64 start, u64 end,
  800. unsigned int order,
  801. unsigned long flags)
  802. {
  803. if (flags & DRM_BUDDY_RANGE_ALLOCATION)
  804. /* Allocate traversing within the range */
  805. return __drm_buddy_alloc_range_bias(mm, start, end,
  806. order, flags);
  807. else
  808. /* Allocate from freelist */
  809. return alloc_from_freelist(mm, order, flags);
  810. }
  811. /**
  812. * drm_buddy_alloc_blocks - allocate power-of-two blocks
  813. *
  814. * @mm: DRM buddy manager to allocate from
  815. * @start: start of the allowed range for this block
  816. * @end: end of the allowed range for this block
  817. * @size: size of the allocation in bytes
  818. * @min_block_size: alignment of the allocation
  819. * @blocks: output list head to add allocated blocks
  820. * @flags: DRM_BUDDY_*_ALLOCATION flags
  821. *
  822. * alloc_range_bias() called on range limitations, which traverses
  823. * the tree and returns the desired block.
  824. *
  825. * alloc_from_freelist() called when *no* range restrictions
  826. * are enforced, which picks the block from the freelist.
  827. *
  828. * Returns:
  829. * 0 on success, error code on failure.
  830. */
  831. int drm_buddy_alloc_blocks(struct drm_buddy *mm,
  832. u64 start, u64 end, u64 size,
  833. u64 min_block_size,
  834. struct list_head *blocks,
  835. unsigned long flags)
  836. {
  837. struct drm_buddy_block *block = NULL;
  838. u64 original_size, original_min_size;
  839. unsigned int min_order, order;
  840. LIST_HEAD(allocated);
  841. unsigned long pages;
  842. int err;
  843. if (size < mm->chunk_size)
  844. return -EINVAL;
  845. if (min_block_size < mm->chunk_size)
  846. return -EINVAL;
  847. if (!is_power_of_2(min_block_size))
  848. return -EINVAL;
  849. if (!IS_ALIGNED(start | end | size, mm->chunk_size))
  850. return -EINVAL;
  851. if (end > mm->size)
  852. return -EINVAL;
  853. if (range_overflows(start, size, mm->size))
  854. return -EINVAL;
  855. /* Actual range allocation */
  856. if (start + size == end) {
  857. if (!IS_ALIGNED(start | end, min_block_size))
  858. return -EINVAL;
  859. return __drm_buddy_alloc_range(mm, start, size, NULL, blocks);
  860. }
  861. original_size = size;
  862. original_min_size = min_block_size;
  863. /* Roundup the size to power of 2 */
  864. if (flags & DRM_BUDDY_CONTIGUOUS_ALLOCATION) {
  865. size = roundup_pow_of_two(size);
  866. min_block_size = size;
  867. /* Align size value to min_block_size */
  868. } else if (!IS_ALIGNED(size, min_block_size)) {
  869. size = round_up(size, min_block_size);
  870. }
  871. pages = size >> ilog2(mm->chunk_size);
  872. order = fls(pages) - 1;
  873. min_order = ilog2(min_block_size) - ilog2(mm->chunk_size);
  874. do {
  875. order = min(order, (unsigned int)fls(pages) - 1);
  876. BUG_ON(order > mm->max_order);
  877. BUG_ON(order < min_order);
  878. do {
  879. block = __drm_buddy_alloc_blocks(mm, start,
  880. end,
  881. order,
  882. flags);
  883. if (!IS_ERR(block))
  884. break;
  885. if (order-- == min_order) {
  886. /* Try allocation through force merge method */
  887. if (mm->clear_avail &&
  888. !__force_merge(mm, start, end, min_order)) {
  889. block = __drm_buddy_alloc_blocks(mm, start,
  890. end,
  891. min_order,
  892. flags);
  893. if (!IS_ERR(block)) {
  894. order = min_order;
  895. break;
  896. }
  897. }
  898. /*
  899. * Try contiguous block allocation through
  900. * try harder method.
  901. */
  902. if (flags & DRM_BUDDY_CONTIGUOUS_ALLOCATION &&
  903. !(flags & DRM_BUDDY_RANGE_ALLOCATION))
  904. return __alloc_contig_try_harder(mm,
  905. original_size,
  906. original_min_size,
  907. blocks);
  908. err = -ENOSPC;
  909. goto err_free;
  910. }
  911. } while (1);
  912. mark_allocated(block);
  913. mm->avail -= drm_buddy_block_size(mm, block);
  914. if (drm_buddy_block_is_clear(block))
  915. mm->clear_avail -= drm_buddy_block_size(mm, block);
  916. kmemleak_update_trace(block);
  917. list_add_tail(&block->link, &allocated);
  918. pages -= BIT(order);
  919. if (!pages)
  920. break;
  921. } while (1);
  922. /* Trim the allocated block to the required size */
  923. if (!(flags & DRM_BUDDY_TRIM_DISABLE) &&
  924. original_size != size) {
  925. struct list_head *trim_list;
  926. LIST_HEAD(temp);
  927. u64 trim_size;
  928. trim_list = &allocated;
  929. trim_size = original_size;
  930. if (!list_is_singular(&allocated)) {
  931. block = list_last_entry(&allocated, typeof(*block), link);
  932. list_move(&block->link, &temp);
  933. trim_list = &temp;
  934. trim_size = drm_buddy_block_size(mm, block) -
  935. (size - original_size);
  936. }
  937. drm_buddy_block_trim(mm,
  938. NULL,
  939. trim_size,
  940. trim_list);
  941. if (!list_empty(&temp))
  942. list_splice_tail(trim_list, &allocated);
  943. }
  944. list_splice_tail(&allocated, blocks);
  945. return 0;
  946. err_free:
  947. drm_buddy_free_list_internal(mm, &allocated);
  948. return err;
  949. }
  950. EXPORT_SYMBOL(drm_buddy_alloc_blocks);
  951. /**
  952. * drm_buddy_block_print - print block information
  953. *
  954. * @mm: DRM buddy manager
  955. * @block: DRM buddy block
  956. * @p: DRM printer to use
  957. */
  958. void drm_buddy_block_print(struct drm_buddy *mm,
  959. struct drm_buddy_block *block,
  960. struct drm_printer *p)
  961. {
  962. u64 start = drm_buddy_block_offset(block);
  963. u64 size = drm_buddy_block_size(mm, block);
  964. drm_printf(p, "%#018llx-%#018llx: %llu\n", start, start + size, size);
  965. }
  966. EXPORT_SYMBOL(drm_buddy_block_print);
  967. /**
  968. * drm_buddy_print - print allocator state
  969. *
  970. * @mm: DRM buddy manager
  971. * @p: DRM printer to use
  972. */
  973. void drm_buddy_print(struct drm_buddy *mm, struct drm_printer *p)
  974. {
  975. int order;
  976. drm_printf(p, "chunk_size: %lluKiB, total: %lluMiB, free: %lluMiB, clear_free: %lluMiB\n",
  977. mm->chunk_size >> 10, mm->size >> 20, mm->avail >> 20, mm->clear_avail >> 20);
  978. for (order = mm->max_order; order >= 0; order--) {
  979. struct drm_buddy_block *block;
  980. u64 count = 0, free;
  981. list_for_each_entry(block, &mm->free_list[order], link) {
  982. BUG_ON(!drm_buddy_block_is_free(block));
  983. count++;
  984. }
  985. drm_printf(p, "order-%2d ", order);
  986. free = count * (mm->chunk_size << order);
  987. if (free < SZ_1M)
  988. drm_printf(p, "free: %8llu KiB", free >> 10);
  989. else
  990. drm_printf(p, "free: %8llu MiB", free >> 20);
  991. drm_printf(p, ", blocks: %llu\n", count);
  992. }
  993. }
  994. EXPORT_SYMBOL(drm_buddy_print);
  995. static void drm_buddy_module_exit(void)
  996. {
  997. kmem_cache_destroy(slab_blocks);
  998. }
  999. static int __init drm_buddy_module_init(void)
  1000. {
  1001. slab_blocks = KMEM_CACHE(drm_buddy_block, 0);
  1002. if (!slab_blocks)
  1003. return -ENOMEM;
  1004. return 0;
  1005. }
  1006. module_init(drm_buddy_module_init);
  1007. module_exit(drm_buddy_module_exit);
  1008. MODULE_DESCRIPTION("DRM Buddy Allocator");
  1009. MODULE_LICENSE("Dual MIT/GPL");