ttm_tt.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497
  1. /* SPDX-License-Identifier: GPL-2.0 OR MIT */
  2. /**************************************************************************
  3. *
  4. * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
  5. * All Rights Reserved.
  6. *
  7. * Permission is hereby granted, free of charge, to any person obtaining a
  8. * copy of this software and associated documentation files (the
  9. * "Software"), to deal in the Software without restriction, including
  10. * without limitation the rights to use, copy, modify, merge, publish,
  11. * distribute, sub license, and/or sell copies of the Software, and to
  12. * permit persons to whom the Software is furnished to do so, subject to
  13. * the following conditions:
  14. *
  15. * The above copyright notice and this permission notice (including the
  16. * next paragraph) shall be included in all copies or substantial portions
  17. * of the Software.
  18. *
  19. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  20. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  21. * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  22. * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  23. * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  24. * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  25. * USE OR OTHER DEALINGS IN THE SOFTWARE.
  26. *
  27. **************************************************************************/
  28. /*
  29. * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
  30. */
  31. #define pr_fmt(fmt) "[TTM] " fmt
  32. #include <linux/sched.h>
  33. #include <linux/pagemap.h>
  34. #include <linux/shmem_fs.h>
  35. #include <linux/file.h>
  36. #include <drm/drm_cache.h>
  37. #include <drm/ttm/ttm_bo_driver.h>
  38. #include <drm/ttm/ttm_page_alloc.h>
  39. #include <drm/ttm/ttm_set_memory.h>
  40. /**
  41. * Allocates a ttm structure for the given BO.
  42. */
  43. int ttm_tt_create(struct ttm_buffer_object *bo, bool zero_alloc)
  44. {
  45. struct ttm_bo_device *bdev = bo->bdev;
  46. uint32_t page_flags = 0;
  47. reservation_object_assert_held(bo->resv);
  48. if (bdev->need_dma32)
  49. page_flags |= TTM_PAGE_FLAG_DMA32;
  50. if (bdev->no_retry)
  51. page_flags |= TTM_PAGE_FLAG_NO_RETRY;
  52. switch (bo->type) {
  53. case ttm_bo_type_device:
  54. if (zero_alloc)
  55. page_flags |= TTM_PAGE_FLAG_ZERO_ALLOC;
  56. break;
  57. case ttm_bo_type_kernel:
  58. break;
  59. case ttm_bo_type_sg:
  60. page_flags |= TTM_PAGE_FLAG_SG;
  61. break;
  62. default:
  63. bo->ttm = NULL;
  64. pr_err("Illegal buffer object type\n");
  65. return -EINVAL;
  66. }
  67. bo->ttm = bdev->driver->ttm_tt_create(bo, page_flags);
  68. if (unlikely(bo->ttm == NULL))
  69. return -ENOMEM;
  70. return 0;
  71. }
  72. /**
  73. * Allocates storage for pointers to the pages that back the ttm.
  74. */
  75. static int ttm_tt_alloc_page_directory(struct ttm_tt *ttm)
  76. {
  77. ttm->pages = kvmalloc_array(ttm->num_pages, sizeof(void*),
  78. GFP_KERNEL | __GFP_ZERO);
  79. if (!ttm->pages)
  80. return -ENOMEM;
  81. return 0;
  82. }
  83. static int ttm_dma_tt_alloc_page_directory(struct ttm_dma_tt *ttm)
  84. {
  85. ttm->ttm.pages = kvmalloc_array(ttm->ttm.num_pages,
  86. sizeof(*ttm->ttm.pages) +
  87. sizeof(*ttm->dma_address),
  88. GFP_KERNEL | __GFP_ZERO);
  89. if (!ttm->ttm.pages)
  90. return -ENOMEM;
  91. ttm->dma_address = (void *) (ttm->ttm.pages + ttm->ttm.num_pages);
  92. return 0;
  93. }
  94. static int ttm_sg_tt_alloc_page_directory(struct ttm_dma_tt *ttm)
  95. {
  96. ttm->dma_address = kvmalloc_array(ttm->ttm.num_pages,
  97. sizeof(*ttm->dma_address),
  98. GFP_KERNEL | __GFP_ZERO);
  99. if (!ttm->dma_address)
  100. return -ENOMEM;
  101. return 0;
  102. }
  103. static int ttm_tt_set_page_caching(struct page *p,
  104. enum ttm_caching_state c_old,
  105. enum ttm_caching_state c_new)
  106. {
  107. int ret = 0;
  108. if (PageHighMem(p))
  109. return 0;
  110. if (c_old != tt_cached) {
  111. /* p isn't in the default caching state, set it to
  112. * writeback first to free its current memtype. */
  113. ret = ttm_set_pages_wb(p, 1);
  114. if (ret)
  115. return ret;
  116. }
  117. if (c_new == tt_wc)
  118. ret = ttm_set_pages_wc(p, 1);
  119. else if (c_new == tt_uncached)
  120. ret = ttm_set_pages_uc(p, 1);
  121. return ret;
  122. }
  123. /*
  124. * Change caching policy for the linear kernel map
  125. * for range of pages in a ttm.
  126. */
  127. static int ttm_tt_set_caching(struct ttm_tt *ttm,
  128. enum ttm_caching_state c_state)
  129. {
  130. int i, j;
  131. struct page *cur_page;
  132. int ret;
  133. if (ttm->caching_state == c_state)
  134. return 0;
  135. if (ttm->state == tt_unpopulated) {
  136. /* Change caching but don't populate */
  137. ttm->caching_state = c_state;
  138. return 0;
  139. }
  140. if (ttm->caching_state == tt_cached)
  141. drm_clflush_pages(ttm->pages, ttm->num_pages);
  142. for (i = 0; i < ttm->num_pages; ++i) {
  143. cur_page = ttm->pages[i];
  144. if (likely(cur_page != NULL)) {
  145. ret = ttm_tt_set_page_caching(cur_page,
  146. ttm->caching_state,
  147. c_state);
  148. if (unlikely(ret != 0))
  149. goto out_err;
  150. }
  151. }
  152. ttm->caching_state = c_state;
  153. return 0;
  154. out_err:
  155. for (j = 0; j < i; ++j) {
  156. cur_page = ttm->pages[j];
  157. if (likely(cur_page != NULL)) {
  158. (void)ttm_tt_set_page_caching(cur_page, c_state,
  159. ttm->caching_state);
  160. }
  161. }
  162. return ret;
  163. }
  164. int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement)
  165. {
  166. enum ttm_caching_state state;
  167. if (placement & TTM_PL_FLAG_WC)
  168. state = tt_wc;
  169. else if (placement & TTM_PL_FLAG_UNCACHED)
  170. state = tt_uncached;
  171. else
  172. state = tt_cached;
  173. return ttm_tt_set_caching(ttm, state);
  174. }
  175. EXPORT_SYMBOL(ttm_tt_set_placement_caching);
  176. void ttm_tt_destroy(struct ttm_tt *ttm)
  177. {
  178. if (ttm == NULL)
  179. return;
  180. ttm_tt_unbind(ttm);
  181. if (ttm->state == tt_unbound)
  182. ttm_tt_unpopulate(ttm);
  183. if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP) &&
  184. ttm->swap_storage)
  185. fput(ttm->swap_storage);
  186. ttm->swap_storage = NULL;
  187. ttm->func->destroy(ttm);
  188. }
  189. void ttm_tt_init_fields(struct ttm_tt *ttm, struct ttm_buffer_object *bo,
  190. uint32_t page_flags)
  191. {
  192. ttm->bdev = bo->bdev;
  193. ttm->num_pages = bo->num_pages;
  194. ttm->caching_state = tt_cached;
  195. ttm->page_flags = page_flags;
  196. ttm->state = tt_unpopulated;
  197. ttm->swap_storage = NULL;
  198. ttm->sg = bo->sg;
  199. }
  200. int ttm_tt_init(struct ttm_tt *ttm, struct ttm_buffer_object *bo,
  201. uint32_t page_flags)
  202. {
  203. ttm_tt_init_fields(ttm, bo, page_flags);
  204. if (ttm_tt_alloc_page_directory(ttm)) {
  205. pr_err("Failed allocating page table\n");
  206. return -ENOMEM;
  207. }
  208. return 0;
  209. }
  210. EXPORT_SYMBOL(ttm_tt_init);
  211. void ttm_tt_fini(struct ttm_tt *ttm)
  212. {
  213. kvfree(ttm->pages);
  214. ttm->pages = NULL;
  215. }
  216. EXPORT_SYMBOL(ttm_tt_fini);
  217. int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_buffer_object *bo,
  218. uint32_t page_flags)
  219. {
  220. struct ttm_tt *ttm = &ttm_dma->ttm;
  221. ttm_tt_init_fields(ttm, bo, page_flags);
  222. INIT_LIST_HEAD(&ttm_dma->pages_list);
  223. if (ttm_dma_tt_alloc_page_directory(ttm_dma)) {
  224. pr_err("Failed allocating page table\n");
  225. return -ENOMEM;
  226. }
  227. return 0;
  228. }
  229. EXPORT_SYMBOL(ttm_dma_tt_init);
  230. int ttm_sg_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_buffer_object *bo,
  231. uint32_t page_flags)
  232. {
  233. struct ttm_tt *ttm = &ttm_dma->ttm;
  234. int ret;
  235. ttm_tt_init_fields(ttm, bo, page_flags);
  236. INIT_LIST_HEAD(&ttm_dma->pages_list);
  237. if (page_flags & TTM_PAGE_FLAG_SG)
  238. ret = ttm_sg_tt_alloc_page_directory(ttm_dma);
  239. else
  240. ret = ttm_dma_tt_alloc_page_directory(ttm_dma);
  241. if (ret) {
  242. pr_err("Failed allocating page table\n");
  243. return -ENOMEM;
  244. }
  245. return 0;
  246. }
  247. EXPORT_SYMBOL(ttm_sg_tt_init);
  248. void ttm_dma_tt_fini(struct ttm_dma_tt *ttm_dma)
  249. {
  250. struct ttm_tt *ttm = &ttm_dma->ttm;
  251. if (ttm->pages)
  252. kvfree(ttm->pages);
  253. else
  254. kvfree(ttm_dma->dma_address);
  255. ttm->pages = NULL;
  256. ttm_dma->dma_address = NULL;
  257. }
  258. EXPORT_SYMBOL(ttm_dma_tt_fini);
  259. void ttm_tt_unbind(struct ttm_tt *ttm)
  260. {
  261. int ret;
  262. if (ttm->state == tt_bound) {
  263. ret = ttm->func->unbind(ttm);
  264. BUG_ON(ret);
  265. ttm->state = tt_unbound;
  266. }
  267. }
  268. int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem,
  269. struct ttm_operation_ctx *ctx)
  270. {
  271. int ret = 0;
  272. if (!ttm)
  273. return -EINVAL;
  274. if (ttm->state == tt_bound)
  275. return 0;
  276. ret = ttm_tt_populate(ttm, ctx);
  277. if (ret)
  278. return ret;
  279. ret = ttm->func->bind(ttm, bo_mem);
  280. if (unlikely(ret != 0))
  281. return ret;
  282. ttm->state = tt_bound;
  283. return 0;
  284. }
  285. EXPORT_SYMBOL(ttm_tt_bind);
  286. int ttm_tt_swapin(struct ttm_tt *ttm)
  287. {
  288. struct address_space *swap_space;
  289. struct file *swap_storage;
  290. struct page *from_page;
  291. struct page *to_page;
  292. int i;
  293. int ret = -ENOMEM;
  294. swap_storage = ttm->swap_storage;
  295. BUG_ON(swap_storage == NULL);
  296. swap_space = swap_storage->f_mapping;
  297. for (i = 0; i < ttm->num_pages; ++i) {
  298. gfp_t gfp_mask = mapping_gfp_mask(swap_space);
  299. gfp_mask |= (ttm->page_flags & TTM_PAGE_FLAG_NO_RETRY ? __GFP_RETRY_MAYFAIL : 0);
  300. from_page = shmem_read_mapping_page_gfp(swap_space, i, gfp_mask);
  301. if (IS_ERR(from_page)) {
  302. ret = PTR_ERR(from_page);
  303. goto out_err;
  304. }
  305. to_page = ttm->pages[i];
  306. if (unlikely(to_page == NULL))
  307. goto out_err;
  308. copy_highpage(to_page, from_page);
  309. put_page(from_page);
  310. }
  311. if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP))
  312. fput(swap_storage);
  313. ttm->swap_storage = NULL;
  314. ttm->page_flags &= ~TTM_PAGE_FLAG_SWAPPED;
  315. return 0;
  316. out_err:
  317. return ret;
  318. }
  319. int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistent_swap_storage)
  320. {
  321. struct address_space *swap_space;
  322. struct file *swap_storage;
  323. struct page *from_page;
  324. struct page *to_page;
  325. int i;
  326. int ret = -ENOMEM;
  327. BUG_ON(ttm->state != tt_unbound && ttm->state != tt_unpopulated);
  328. BUG_ON(ttm->caching_state != tt_cached);
  329. if (!persistent_swap_storage) {
  330. swap_storage = shmem_file_setup("ttm swap",
  331. ttm->num_pages << PAGE_SHIFT,
  332. 0);
  333. if (IS_ERR(swap_storage)) {
  334. pr_err("Failed allocating swap storage\n");
  335. return PTR_ERR(swap_storage);
  336. }
  337. } else {
  338. swap_storage = persistent_swap_storage;
  339. }
  340. swap_space = swap_storage->f_mapping;
  341. for (i = 0; i < ttm->num_pages; ++i) {
  342. gfp_t gfp_mask = mapping_gfp_mask(swap_space);
  343. gfp_mask |= (ttm->page_flags & TTM_PAGE_FLAG_NO_RETRY ? __GFP_RETRY_MAYFAIL : 0);
  344. from_page = ttm->pages[i];
  345. if (unlikely(from_page == NULL))
  346. continue;
  347. to_page = shmem_read_mapping_page_gfp(swap_space, i, gfp_mask);
  348. if (IS_ERR(to_page)) {
  349. ret = PTR_ERR(to_page);
  350. goto out_err;
  351. }
  352. copy_highpage(to_page, from_page);
  353. set_page_dirty(to_page);
  354. mark_page_accessed(to_page);
  355. put_page(to_page);
  356. }
  357. ttm_tt_unpopulate(ttm);
  358. ttm->swap_storage = swap_storage;
  359. ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED;
  360. if (persistent_swap_storage)
  361. ttm->page_flags |= TTM_PAGE_FLAG_PERSISTENT_SWAP;
  362. return 0;
  363. out_err:
  364. if (!persistent_swap_storage)
  365. fput(swap_storage);
  366. return ret;
  367. }
  368. static void ttm_tt_add_mapping(struct ttm_tt *ttm)
  369. {
  370. pgoff_t i;
  371. if (ttm->page_flags & TTM_PAGE_FLAG_SG)
  372. return;
  373. for (i = 0; i < ttm->num_pages; ++i)
  374. ttm->pages[i]->mapping = ttm->bdev->dev_mapping;
  375. }
  376. int ttm_tt_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
  377. {
  378. int ret;
  379. if (ttm->state != tt_unpopulated)
  380. return 0;
  381. if (ttm->bdev->driver->ttm_tt_populate)
  382. ret = ttm->bdev->driver->ttm_tt_populate(ttm, ctx);
  383. else
  384. ret = ttm_pool_populate(ttm, ctx);
  385. if (!ret)
  386. ttm_tt_add_mapping(ttm);
  387. return ret;
  388. }
  389. static void ttm_tt_clear_mapping(struct ttm_tt *ttm)
  390. {
  391. pgoff_t i;
  392. struct page **page = ttm->pages;
  393. if (ttm->page_flags & TTM_PAGE_FLAG_SG)
  394. return;
  395. for (i = 0; i < ttm->num_pages; ++i) {
  396. (*page)->mapping = NULL;
  397. (*page++)->index = 0;
  398. }
  399. }
  400. void ttm_tt_unpopulate(struct ttm_tt *ttm)
  401. {
  402. if (ttm->state == tt_unpopulated)
  403. return;
  404. ttm_tt_clear_mapping(ttm);
  405. if (ttm->bdev->driver->ttm_tt_unpopulate)
  406. ttm->bdev->driver->ttm_tt_unpopulate(ttm);
  407. else
  408. ttm_pool_unpopulate(ttm);
  409. }