ttm_resource.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881
  1. /*
  2. * Copyright 2020 Advanced Micro Devices, Inc.
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice shall be included in
  12. * all copies or substantial portions of the Software.
  13. *
  14. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  17. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20. * OTHER DEALINGS IN THE SOFTWARE.
  21. *
  22. * Authors: Christian König
  23. */
  24. #include <linux/debugfs.h>
  25. #include <linux/io-mapping.h>
  26. #include <linux/iosys-map.h>
  27. #include <linux/scatterlist.h>
  28. #include <drm/ttm/ttm_bo.h>
  29. #include <drm/ttm/ttm_placement.h>
  30. #include <drm/ttm/ttm_resource.h>
  31. #include <drm/drm_util.h>
  32. /* Detach the cursor from the bulk move list*/
  33. static void
  34. ttm_resource_cursor_clear_bulk(struct ttm_resource_cursor *cursor)
  35. {
  36. lockdep_assert_held(&cursor->man->bdev->lru_lock);
  37. cursor->bulk = NULL;
  38. list_del_init(&cursor->bulk_link);
  39. }
  40. /* Move the cursor to the end of the bulk move list it's in */
  41. static void ttm_resource_cursor_move_bulk_tail(struct ttm_lru_bulk_move *bulk,
  42. struct ttm_resource_cursor *cursor)
  43. {
  44. struct ttm_lru_bulk_move_pos *pos;
  45. lockdep_assert_held(&cursor->man->bdev->lru_lock);
  46. if (WARN_ON_ONCE(bulk != cursor->bulk)) {
  47. list_del_init(&cursor->bulk_link);
  48. return;
  49. }
  50. pos = &bulk->pos[cursor->mem_type][cursor->priority];
  51. if (pos->last)
  52. list_move(&cursor->hitch.link, &pos->last->lru.link);
  53. ttm_resource_cursor_clear_bulk(cursor);
  54. }
  55. /* Move all cursors attached to a bulk move to its end */
  56. static void ttm_bulk_move_adjust_cursors(struct ttm_lru_bulk_move *bulk)
  57. {
  58. struct ttm_resource_cursor *cursor, *next;
  59. list_for_each_entry_safe(cursor, next, &bulk->cursor_list, bulk_link)
  60. ttm_resource_cursor_move_bulk_tail(bulk, cursor);
  61. }
  62. /* Remove a cursor from an empty bulk move list */
  63. static void ttm_bulk_move_drop_cursors(struct ttm_lru_bulk_move *bulk)
  64. {
  65. struct ttm_resource_cursor *cursor, *next;
  66. list_for_each_entry_safe(cursor, next, &bulk->cursor_list, bulk_link)
  67. ttm_resource_cursor_clear_bulk(cursor);
  68. }
  69. /**
  70. * ttm_resource_cursor_fini() - Finalize the LRU list cursor usage
  71. * @cursor: The struct ttm_resource_cursor to finalize.
  72. *
  73. * The function pulls the LRU list cursor off any lists it was previusly
  74. * attached to. Needs to be called with the LRU lock held. The function
  75. * can be called multiple times after eachother.
  76. */
  77. void ttm_resource_cursor_fini(struct ttm_resource_cursor *cursor)
  78. {
  79. lockdep_assert_held(&cursor->man->bdev->lru_lock);
  80. list_del_init(&cursor->hitch.link);
  81. ttm_resource_cursor_clear_bulk(cursor);
  82. }
  83. /**
  84. * ttm_lru_bulk_move_init - initialize a bulk move structure
  85. * @bulk: the structure to init
  86. *
  87. * For now just memset the structure to zero.
  88. */
  89. void ttm_lru_bulk_move_init(struct ttm_lru_bulk_move *bulk)
  90. {
  91. memset(bulk, 0, sizeof(*bulk));
  92. INIT_LIST_HEAD(&bulk->cursor_list);
  93. }
  94. EXPORT_SYMBOL(ttm_lru_bulk_move_init);
  95. /**
  96. * ttm_lru_bulk_move_fini - finalize a bulk move structure
  97. * @bdev: The struct ttm_device
  98. * @bulk: the structure to finalize
  99. *
  100. * Sanity checks that bulk moves don't have any
  101. * resources left and hence no cursors attached.
  102. */
  103. void ttm_lru_bulk_move_fini(struct ttm_device *bdev,
  104. struct ttm_lru_bulk_move *bulk)
  105. {
  106. spin_lock(&bdev->lru_lock);
  107. ttm_bulk_move_drop_cursors(bulk);
  108. spin_unlock(&bdev->lru_lock);
  109. }
  110. EXPORT_SYMBOL(ttm_lru_bulk_move_fini);
  111. /**
  112. * ttm_lru_bulk_move_tail - bulk move range of resources to the LRU tail.
  113. *
  114. * @bulk: bulk move structure
  115. *
  116. * Bulk move BOs to the LRU tail, only valid to use when driver makes sure that
  117. * resource order never changes. Should be called with &ttm_device.lru_lock held.
  118. */
  119. void ttm_lru_bulk_move_tail(struct ttm_lru_bulk_move *bulk)
  120. {
  121. unsigned i, j;
  122. ttm_bulk_move_adjust_cursors(bulk);
  123. for (i = 0; i < TTM_NUM_MEM_TYPES; ++i) {
  124. for (j = 0; j < TTM_MAX_BO_PRIORITY; ++j) {
  125. struct ttm_lru_bulk_move_pos *pos = &bulk->pos[i][j];
  126. struct ttm_resource_manager *man;
  127. if (!pos->first)
  128. continue;
  129. lockdep_assert_held(&pos->first->bo->bdev->lru_lock);
  130. dma_resv_assert_held(pos->first->bo->base.resv);
  131. dma_resv_assert_held(pos->last->bo->base.resv);
  132. man = ttm_manager_type(pos->first->bo->bdev, i);
  133. list_bulk_move_tail(&man->lru[j], &pos->first->lru.link,
  134. &pos->last->lru.link);
  135. }
  136. }
  137. }
  138. EXPORT_SYMBOL(ttm_lru_bulk_move_tail);
  139. /* Return the bulk move pos object for this resource */
  140. static struct ttm_lru_bulk_move_pos *
  141. ttm_lru_bulk_move_pos(struct ttm_lru_bulk_move *bulk, struct ttm_resource *res)
  142. {
  143. return &bulk->pos[res->mem_type][res->bo->priority];
  144. }
  145. /* Return the previous resource on the list (skip over non-resource list items) */
  146. static struct ttm_resource *ttm_lru_prev_res(struct ttm_resource *cur)
  147. {
  148. struct ttm_lru_item *lru = &cur->lru;
  149. do {
  150. lru = list_prev_entry(lru, link);
  151. } while (!ttm_lru_item_is_res(lru));
  152. return ttm_lru_item_to_res(lru);
  153. }
  154. /* Return the next resource on the list (skip over non-resource list items) */
  155. static struct ttm_resource *ttm_lru_next_res(struct ttm_resource *cur)
  156. {
  157. struct ttm_lru_item *lru = &cur->lru;
  158. do {
  159. lru = list_next_entry(lru, link);
  160. } while (!ttm_lru_item_is_res(lru));
  161. return ttm_lru_item_to_res(lru);
  162. }
  163. /* Move the resource to the tail of the bulk move range */
  164. static void ttm_lru_bulk_move_pos_tail(struct ttm_lru_bulk_move_pos *pos,
  165. struct ttm_resource *res)
  166. {
  167. if (pos->last != res) {
  168. if (pos->first == res)
  169. pos->first = ttm_lru_next_res(res);
  170. list_move(&res->lru.link, &pos->last->lru.link);
  171. pos->last = res;
  172. }
  173. }
  174. /* Add the resource to a bulk_move cursor */
  175. static void ttm_lru_bulk_move_add(struct ttm_lru_bulk_move *bulk,
  176. struct ttm_resource *res)
  177. {
  178. struct ttm_lru_bulk_move_pos *pos = ttm_lru_bulk_move_pos(bulk, res);
  179. if (!pos->first) {
  180. pos->first = res;
  181. pos->last = res;
  182. } else {
  183. WARN_ON(pos->first->bo->base.resv != res->bo->base.resv);
  184. ttm_lru_bulk_move_pos_tail(pos, res);
  185. }
  186. }
  187. /* Remove the resource from a bulk_move range */
  188. static void ttm_lru_bulk_move_del(struct ttm_lru_bulk_move *bulk,
  189. struct ttm_resource *res)
  190. {
  191. struct ttm_lru_bulk_move_pos *pos = ttm_lru_bulk_move_pos(bulk, res);
  192. if (unlikely(WARN_ON(!pos->first || !pos->last) ||
  193. (pos->first == res && pos->last == res))) {
  194. pos->first = NULL;
  195. pos->last = NULL;
  196. } else if (pos->first == res) {
  197. pos->first = ttm_lru_next_res(res);
  198. } else if (pos->last == res) {
  199. pos->last = ttm_lru_prev_res(res);
  200. } else {
  201. list_move(&res->lru.link, &pos->last->lru.link);
  202. }
  203. }
  204. /* Add the resource to a bulk move if the BO is configured for it */
  205. void ttm_resource_add_bulk_move(struct ttm_resource *res,
  206. struct ttm_buffer_object *bo)
  207. {
  208. if (bo->bulk_move && !bo->pin_count)
  209. ttm_lru_bulk_move_add(bo->bulk_move, res);
  210. }
  211. /* Remove the resource from a bulk move if the BO is configured for it */
  212. void ttm_resource_del_bulk_move(struct ttm_resource *res,
  213. struct ttm_buffer_object *bo)
  214. {
  215. if (bo->bulk_move && !bo->pin_count)
  216. ttm_lru_bulk_move_del(bo->bulk_move, res);
  217. }
  218. /* Move a resource to the LRU or bulk tail */
  219. void ttm_resource_move_to_lru_tail(struct ttm_resource *res)
  220. {
  221. struct ttm_buffer_object *bo = res->bo;
  222. struct ttm_device *bdev = bo->bdev;
  223. lockdep_assert_held(&bo->bdev->lru_lock);
  224. if (bo->pin_count) {
  225. list_move_tail(&res->lru.link, &bdev->pinned);
  226. } else if (bo->bulk_move) {
  227. struct ttm_lru_bulk_move_pos *pos =
  228. ttm_lru_bulk_move_pos(bo->bulk_move, res);
  229. ttm_lru_bulk_move_pos_tail(pos, res);
  230. } else {
  231. struct ttm_resource_manager *man;
  232. man = ttm_manager_type(bdev, res->mem_type);
  233. list_move_tail(&res->lru.link, &man->lru[bo->priority]);
  234. }
  235. }
  236. /**
  237. * ttm_resource_init - resource object constructure
  238. * @bo: buffer object this resources is allocated for
  239. * @place: placement of the resource
  240. * @res: the resource object to inistilize
  241. *
  242. * Initialize a new resource object. Counterpart of ttm_resource_fini().
  243. */
  244. void ttm_resource_init(struct ttm_buffer_object *bo,
  245. const struct ttm_place *place,
  246. struct ttm_resource *res)
  247. {
  248. struct ttm_resource_manager *man;
  249. res->start = 0;
  250. res->size = bo->base.size;
  251. res->mem_type = place->mem_type;
  252. res->placement = place->flags;
  253. res->bus.addr = NULL;
  254. res->bus.offset = 0;
  255. res->bus.is_iomem = false;
  256. res->bus.caching = ttm_cached;
  257. res->bo = bo;
  258. man = ttm_manager_type(bo->bdev, place->mem_type);
  259. spin_lock(&bo->bdev->lru_lock);
  260. if (bo->pin_count)
  261. list_add_tail(&res->lru.link, &bo->bdev->pinned);
  262. else
  263. list_add_tail(&res->lru.link, &man->lru[bo->priority]);
  264. man->usage += res->size;
  265. spin_unlock(&bo->bdev->lru_lock);
  266. }
  267. EXPORT_SYMBOL(ttm_resource_init);
  268. /**
  269. * ttm_resource_fini - resource destructor
  270. * @man: the resource manager this resource belongs to
  271. * @res: the resource to clean up
  272. *
  273. * Should be used by resource manager backends to clean up the TTM resource
  274. * objects before freeing the underlying structure. Makes sure the resource is
  275. * removed from the LRU before destruction.
  276. * Counterpart of ttm_resource_init().
  277. */
  278. void ttm_resource_fini(struct ttm_resource_manager *man,
  279. struct ttm_resource *res)
  280. {
  281. struct ttm_device *bdev = man->bdev;
  282. spin_lock(&bdev->lru_lock);
  283. list_del_init(&res->lru.link);
  284. man->usage -= res->size;
  285. spin_unlock(&bdev->lru_lock);
  286. }
  287. EXPORT_SYMBOL(ttm_resource_fini);
  288. int ttm_resource_alloc(struct ttm_buffer_object *bo,
  289. const struct ttm_place *place,
  290. struct ttm_resource **res_ptr)
  291. {
  292. struct ttm_resource_manager *man =
  293. ttm_manager_type(bo->bdev, place->mem_type);
  294. int ret;
  295. ret = man->func->alloc(man, bo, place, res_ptr);
  296. if (ret)
  297. return ret;
  298. spin_lock(&bo->bdev->lru_lock);
  299. ttm_resource_add_bulk_move(*res_ptr, bo);
  300. spin_unlock(&bo->bdev->lru_lock);
  301. return 0;
  302. }
  303. EXPORT_SYMBOL_FOR_TESTS_ONLY(ttm_resource_alloc);
  304. void ttm_resource_free(struct ttm_buffer_object *bo, struct ttm_resource **res)
  305. {
  306. struct ttm_resource_manager *man;
  307. if (!*res)
  308. return;
  309. spin_lock(&bo->bdev->lru_lock);
  310. ttm_resource_del_bulk_move(*res, bo);
  311. spin_unlock(&bo->bdev->lru_lock);
  312. man = ttm_manager_type(bo->bdev, (*res)->mem_type);
  313. man->func->free(man, *res);
  314. *res = NULL;
  315. }
  316. EXPORT_SYMBOL(ttm_resource_free);
  317. /**
  318. * ttm_resource_intersects - test for intersection
  319. *
  320. * @bdev: TTM device structure
  321. * @res: The resource to test
  322. * @place: The placement to test
  323. * @size: How many bytes the new allocation needs.
  324. *
  325. * Test if @res intersects with @place and @size. Used for testing if evictions
  326. * are valueable or not.
  327. *
  328. * Returns true if the res placement intersects with @place and @size.
  329. */
  330. bool ttm_resource_intersects(struct ttm_device *bdev,
  331. struct ttm_resource *res,
  332. const struct ttm_place *place,
  333. size_t size)
  334. {
  335. struct ttm_resource_manager *man;
  336. if (!res)
  337. return false;
  338. man = ttm_manager_type(bdev, res->mem_type);
  339. if (!place || !man->func->intersects)
  340. return true;
  341. return man->func->intersects(man, res, place, size);
  342. }
  343. /**
  344. * ttm_resource_compatible - check if resource is compatible with placement
  345. *
  346. * @res: the resource to check
  347. * @placement: the placement to check against
  348. * @evicting: true if the caller is doing evictions
  349. *
  350. * Returns true if the placement is compatible.
  351. */
  352. bool ttm_resource_compatible(struct ttm_resource *res,
  353. struct ttm_placement *placement,
  354. bool evicting)
  355. {
  356. struct ttm_buffer_object *bo = res->bo;
  357. struct ttm_device *bdev = bo->bdev;
  358. unsigned i;
  359. if (res->placement & TTM_PL_FLAG_TEMPORARY)
  360. return false;
  361. for (i = 0; i < placement->num_placement; i++) {
  362. const struct ttm_place *place = &placement->placement[i];
  363. struct ttm_resource_manager *man;
  364. if (res->mem_type != place->mem_type)
  365. continue;
  366. if (place->flags & (evicting ? TTM_PL_FLAG_DESIRED :
  367. TTM_PL_FLAG_FALLBACK))
  368. continue;
  369. if (place->flags & TTM_PL_FLAG_CONTIGUOUS &&
  370. !(res->placement & TTM_PL_FLAG_CONTIGUOUS))
  371. continue;
  372. man = ttm_manager_type(bdev, res->mem_type);
  373. if (man->func->compatible &&
  374. !man->func->compatible(man, res, place, bo->base.size))
  375. continue;
  376. return true;
  377. }
  378. return false;
  379. }
  380. void ttm_resource_set_bo(struct ttm_resource *res,
  381. struct ttm_buffer_object *bo)
  382. {
  383. spin_lock(&bo->bdev->lru_lock);
  384. res->bo = bo;
  385. spin_unlock(&bo->bdev->lru_lock);
  386. }
  387. /**
  388. * ttm_resource_manager_init
  389. *
  390. * @man: memory manager object to init
  391. * @bdev: ttm device this manager belongs to
  392. * @size: size of managed resources in arbitrary units
  393. *
  394. * Initialise core parts of a manager object.
  395. */
  396. void ttm_resource_manager_init(struct ttm_resource_manager *man,
  397. struct ttm_device *bdev,
  398. uint64_t size)
  399. {
  400. unsigned i;
  401. spin_lock_init(&man->move_lock);
  402. man->bdev = bdev;
  403. man->size = size;
  404. man->usage = 0;
  405. for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i)
  406. INIT_LIST_HEAD(&man->lru[i]);
  407. man->move = NULL;
  408. }
  409. EXPORT_SYMBOL(ttm_resource_manager_init);
  410. /*
  411. * ttm_resource_manager_evict_all
  412. *
  413. * @bdev - device to use
  414. * @man - manager to use
  415. *
  416. * Evict all the objects out of a memory manager until it is empty.
  417. * Part of memory manager cleanup sequence.
  418. */
  419. int ttm_resource_manager_evict_all(struct ttm_device *bdev,
  420. struct ttm_resource_manager *man)
  421. {
  422. struct ttm_operation_ctx ctx = {
  423. .interruptible = false,
  424. .no_wait_gpu = false,
  425. .force_alloc = true
  426. };
  427. struct dma_fence *fence;
  428. int ret;
  429. do {
  430. ret = ttm_bo_evict_first(bdev, man, &ctx);
  431. cond_resched();
  432. } while (!ret);
  433. if (ret && ret != -ENOENT)
  434. return ret;
  435. spin_lock(&man->move_lock);
  436. fence = dma_fence_get(man->move);
  437. spin_unlock(&man->move_lock);
  438. if (fence) {
  439. ret = dma_fence_wait(fence, false);
  440. dma_fence_put(fence);
  441. if (ret)
  442. return ret;
  443. }
  444. return 0;
  445. }
  446. EXPORT_SYMBOL(ttm_resource_manager_evict_all);
  447. /**
  448. * ttm_resource_manager_usage
  449. *
  450. * @man: A memory manager object.
  451. *
  452. * Return how many resources are currently used.
  453. */
  454. uint64_t ttm_resource_manager_usage(struct ttm_resource_manager *man)
  455. {
  456. uint64_t usage;
  457. spin_lock(&man->bdev->lru_lock);
  458. usage = man->usage;
  459. spin_unlock(&man->bdev->lru_lock);
  460. return usage;
  461. }
  462. EXPORT_SYMBOL(ttm_resource_manager_usage);
  463. /**
  464. * ttm_resource_manager_debug
  465. *
  466. * @man: manager type to dump.
  467. * @p: printer to use for debug.
  468. */
  469. void ttm_resource_manager_debug(struct ttm_resource_manager *man,
  470. struct drm_printer *p)
  471. {
  472. drm_printf(p, " use_type: %d\n", man->use_type);
  473. drm_printf(p, " use_tt: %d\n", man->use_tt);
  474. drm_printf(p, " size: %llu\n", man->size);
  475. drm_printf(p, " usage: %llu\n", ttm_resource_manager_usage(man));
  476. if (man->func->debug)
  477. man->func->debug(man, p);
  478. }
  479. EXPORT_SYMBOL(ttm_resource_manager_debug);
  480. static void
  481. ttm_resource_cursor_check_bulk(struct ttm_resource_cursor *cursor,
  482. struct ttm_lru_item *next_lru)
  483. {
  484. struct ttm_resource *next = ttm_lru_item_to_res(next_lru);
  485. struct ttm_lru_bulk_move *bulk = NULL;
  486. struct ttm_buffer_object *bo = next->bo;
  487. lockdep_assert_held(&cursor->man->bdev->lru_lock);
  488. bulk = bo->bulk_move;
  489. if (cursor->bulk != bulk) {
  490. if (bulk) {
  491. list_move_tail(&cursor->bulk_link, &bulk->cursor_list);
  492. cursor->mem_type = next->mem_type;
  493. } else {
  494. list_del_init(&cursor->bulk_link);
  495. }
  496. cursor->bulk = bulk;
  497. }
  498. }
  499. /**
  500. * ttm_resource_manager_first() - Start iterating over the resources
  501. * of a resource manager
  502. * @man: resource manager to iterate over
  503. * @cursor: cursor to record the position
  504. *
  505. * Initializes the cursor and starts iterating. When done iterating,
  506. * the caller must explicitly call ttm_resource_cursor_fini().
  507. *
  508. * Return: The first resource from the resource manager.
  509. */
  510. struct ttm_resource *
  511. ttm_resource_manager_first(struct ttm_resource_manager *man,
  512. struct ttm_resource_cursor *cursor)
  513. {
  514. lockdep_assert_held(&man->bdev->lru_lock);
  515. cursor->priority = 0;
  516. cursor->man = man;
  517. ttm_lru_item_init(&cursor->hitch, TTM_LRU_HITCH);
  518. INIT_LIST_HEAD(&cursor->bulk_link);
  519. list_add(&cursor->hitch.link, &man->lru[cursor->priority]);
  520. return ttm_resource_manager_next(cursor);
  521. }
  522. /**
  523. * ttm_resource_manager_next() - Continue iterating over the resource manager
  524. * resources
  525. * @cursor: cursor to record the position
  526. *
  527. * Return: the next resource from the resource manager.
  528. */
  529. struct ttm_resource *
  530. ttm_resource_manager_next(struct ttm_resource_cursor *cursor)
  531. {
  532. struct ttm_resource_manager *man = cursor->man;
  533. struct ttm_lru_item *lru;
  534. lockdep_assert_held(&man->bdev->lru_lock);
  535. for (;;) {
  536. lru = &cursor->hitch;
  537. list_for_each_entry_continue(lru, &man->lru[cursor->priority], link) {
  538. if (ttm_lru_item_is_res(lru)) {
  539. ttm_resource_cursor_check_bulk(cursor, lru);
  540. list_move(&cursor->hitch.link, &lru->link);
  541. return ttm_lru_item_to_res(lru);
  542. }
  543. }
  544. if (++cursor->priority >= TTM_MAX_BO_PRIORITY)
  545. break;
  546. list_move(&cursor->hitch.link, &man->lru[cursor->priority]);
  547. ttm_resource_cursor_clear_bulk(cursor);
  548. }
  549. ttm_resource_cursor_fini(cursor);
  550. return NULL;
  551. }
  552. /**
  553. * ttm_lru_first_res_or_null() - Return the first resource on an lru list
  554. * @head: The list head of the lru list.
  555. *
  556. * Return: Pointer to the first resource on the lru list or NULL if
  557. * there is none.
  558. */
  559. struct ttm_resource *ttm_lru_first_res_or_null(struct list_head *head)
  560. {
  561. struct ttm_lru_item *lru;
  562. list_for_each_entry(lru, head, link) {
  563. if (ttm_lru_item_is_res(lru))
  564. return ttm_lru_item_to_res(lru);
  565. }
  566. return NULL;
  567. }
  568. static void ttm_kmap_iter_iomap_map_local(struct ttm_kmap_iter *iter,
  569. struct iosys_map *dmap,
  570. pgoff_t i)
  571. {
  572. struct ttm_kmap_iter_iomap *iter_io =
  573. container_of(iter, typeof(*iter_io), base);
  574. void __iomem *addr;
  575. retry:
  576. while (i >= iter_io->cache.end) {
  577. iter_io->cache.sg = iter_io->cache.sg ?
  578. sg_next(iter_io->cache.sg) : iter_io->st->sgl;
  579. iter_io->cache.i = iter_io->cache.end;
  580. iter_io->cache.end += sg_dma_len(iter_io->cache.sg) >>
  581. PAGE_SHIFT;
  582. iter_io->cache.offs = sg_dma_address(iter_io->cache.sg) -
  583. iter_io->start;
  584. }
  585. if (i < iter_io->cache.i) {
  586. iter_io->cache.end = 0;
  587. iter_io->cache.sg = NULL;
  588. goto retry;
  589. }
  590. addr = io_mapping_map_local_wc(iter_io->iomap, iter_io->cache.offs +
  591. (((resource_size_t)i - iter_io->cache.i)
  592. << PAGE_SHIFT));
  593. iosys_map_set_vaddr_iomem(dmap, addr);
  594. }
  595. static void ttm_kmap_iter_iomap_unmap_local(struct ttm_kmap_iter *iter,
  596. struct iosys_map *map)
  597. {
  598. io_mapping_unmap_local(map->vaddr_iomem);
  599. }
  600. static const struct ttm_kmap_iter_ops ttm_kmap_iter_io_ops = {
  601. .map_local = ttm_kmap_iter_iomap_map_local,
  602. .unmap_local = ttm_kmap_iter_iomap_unmap_local,
  603. .maps_tt = false,
  604. };
  605. /**
  606. * ttm_kmap_iter_iomap_init - Initialize a struct ttm_kmap_iter_iomap
  607. * @iter_io: The struct ttm_kmap_iter_iomap to initialize.
  608. * @iomap: The struct io_mapping representing the underlying linear io_memory.
  609. * @st: sg_table into @iomap, representing the memory of the struct
  610. * ttm_resource.
  611. * @start: Offset that needs to be subtracted from @st to make
  612. * sg_dma_address(st->sgl) - @start == 0 for @iomap start.
  613. *
  614. * Return: Pointer to the embedded struct ttm_kmap_iter.
  615. */
  616. struct ttm_kmap_iter *
  617. ttm_kmap_iter_iomap_init(struct ttm_kmap_iter_iomap *iter_io,
  618. struct io_mapping *iomap,
  619. struct sg_table *st,
  620. resource_size_t start)
  621. {
  622. iter_io->base.ops = &ttm_kmap_iter_io_ops;
  623. iter_io->iomap = iomap;
  624. iter_io->st = st;
  625. iter_io->start = start;
  626. memset(&iter_io->cache, 0, sizeof(iter_io->cache));
  627. return &iter_io->base;
  628. }
  629. EXPORT_SYMBOL(ttm_kmap_iter_iomap_init);
  630. /**
  631. * DOC: Linear io iterator
  632. *
  633. * This code should die in the not too near future. Best would be if we could
  634. * make io-mapping use memremap for all io memory, and have memremap
  635. * implement a kmap_local functionality. We could then strip a huge amount of
  636. * code. These linear io iterators are implemented to mimic old functionality,
  637. * and they don't use kmap_local semantics at all internally. Rather ioremap or
  638. * friends, and at least on 32-bit they add global TLB flushes and points
  639. * of failure.
  640. */
  641. static void ttm_kmap_iter_linear_io_map_local(struct ttm_kmap_iter *iter,
  642. struct iosys_map *dmap,
  643. pgoff_t i)
  644. {
  645. struct ttm_kmap_iter_linear_io *iter_io =
  646. container_of(iter, typeof(*iter_io), base);
  647. *dmap = iter_io->dmap;
  648. iosys_map_incr(dmap, i * PAGE_SIZE);
  649. }
  650. static const struct ttm_kmap_iter_ops ttm_kmap_iter_linear_io_ops = {
  651. .map_local = ttm_kmap_iter_linear_io_map_local,
  652. .maps_tt = false,
  653. };
  654. /**
  655. * ttm_kmap_iter_linear_io_init - Initialize an iterator for linear io memory
  656. * @iter_io: The iterator to initialize
  657. * @bdev: The TTM device
  658. * @mem: The ttm resource representing the iomap.
  659. *
  660. * This function is for internal TTM use only. It sets up a memcpy kmap iterator
  661. * pointing at a linear chunk of io memory.
  662. *
  663. * Return: A pointer to the embedded struct ttm_kmap_iter or error pointer on
  664. * failure.
  665. */
  666. struct ttm_kmap_iter *
  667. ttm_kmap_iter_linear_io_init(struct ttm_kmap_iter_linear_io *iter_io,
  668. struct ttm_device *bdev,
  669. struct ttm_resource *mem)
  670. {
  671. int ret;
  672. ret = ttm_mem_io_reserve(bdev, mem);
  673. if (ret)
  674. goto out_err;
  675. if (!mem->bus.is_iomem) {
  676. ret = -EINVAL;
  677. goto out_io_free;
  678. }
  679. if (mem->bus.addr) {
  680. iosys_map_set_vaddr(&iter_io->dmap, mem->bus.addr);
  681. iter_io->needs_unmap = false;
  682. } else {
  683. iter_io->needs_unmap = true;
  684. memset(&iter_io->dmap, 0, sizeof(iter_io->dmap));
  685. if (mem->bus.caching == ttm_write_combined)
  686. iosys_map_set_vaddr_iomem(&iter_io->dmap,
  687. ioremap_wc(mem->bus.offset,
  688. mem->size));
  689. else if (mem->bus.caching == ttm_cached)
  690. iosys_map_set_vaddr(&iter_io->dmap,
  691. memremap(mem->bus.offset, mem->size,
  692. MEMREMAP_WB |
  693. MEMREMAP_WT |
  694. MEMREMAP_WC));
  695. /* If uncached requested or if mapping cached or wc failed */
  696. if (iosys_map_is_null(&iter_io->dmap))
  697. iosys_map_set_vaddr_iomem(&iter_io->dmap,
  698. ioremap(mem->bus.offset,
  699. mem->size));
  700. if (iosys_map_is_null(&iter_io->dmap)) {
  701. ret = -ENOMEM;
  702. goto out_io_free;
  703. }
  704. }
  705. iter_io->base.ops = &ttm_kmap_iter_linear_io_ops;
  706. return &iter_io->base;
  707. out_io_free:
  708. ttm_mem_io_free(bdev, mem);
  709. out_err:
  710. return ERR_PTR(ret);
  711. }
  712. /**
  713. * ttm_kmap_iter_linear_io_fini - Clean up an iterator for linear io memory
  714. * @iter_io: The iterator to initialize
  715. * @bdev: The TTM device
  716. * @mem: The ttm resource representing the iomap.
  717. *
  718. * This function is for internal TTM use only. It cleans up a memcpy kmap
  719. * iterator initialized by ttm_kmap_iter_linear_io_init.
  720. */
  721. void
  722. ttm_kmap_iter_linear_io_fini(struct ttm_kmap_iter_linear_io *iter_io,
  723. struct ttm_device *bdev,
  724. struct ttm_resource *mem)
  725. {
  726. if (iter_io->needs_unmap && iosys_map_is_set(&iter_io->dmap)) {
  727. if (iter_io->dmap.is_iomem)
  728. iounmap(iter_io->dmap.vaddr_iomem);
  729. else
  730. memunmap(iter_io->dmap.vaddr);
  731. }
  732. ttm_mem_io_free(bdev, mem);
  733. }
  734. #if defined(CONFIG_DEBUG_FS)
  735. static int ttm_resource_manager_show(struct seq_file *m, void *unused)
  736. {
  737. struct ttm_resource_manager *man =
  738. (struct ttm_resource_manager *)m->private;
  739. struct drm_printer p = drm_seq_file_printer(m);
  740. ttm_resource_manager_debug(man, &p);
  741. return 0;
  742. }
  743. DEFINE_SHOW_ATTRIBUTE(ttm_resource_manager);
  744. #endif
  745. /**
  746. * ttm_resource_manager_create_debugfs - Create debugfs entry for specified
  747. * resource manager.
  748. * @man: The TTM resource manager for which the debugfs stats file be creates
  749. * @parent: debugfs directory in which the file will reside
  750. * @name: The filename to create.
  751. *
  752. * This function setups up a debugfs file that can be used to look
  753. * at debug statistics of the specified ttm_resource_manager.
  754. */
  755. void ttm_resource_manager_create_debugfs(struct ttm_resource_manager *man,
  756. struct dentry * parent,
  757. const char *name)
  758. {
  759. #if defined(CONFIG_DEBUG_FS)
  760. debugfs_create_file(name, 0444, parent, man, &ttm_resource_manager_fops);
  761. #endif
  762. }
  763. EXPORT_SYMBOL(ttm_resource_manager_create_debugfs);