ttm_kunit_helpers.c 7.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305
  1. // SPDX-License-Identifier: GPL-2.0 AND MIT
  2. /*
  3. * Copyright © 2023 Intel Corporation
  4. */
  5. #include <drm/ttm/ttm_tt.h>
  6. #include "ttm_kunit_helpers.h"
  7. static const struct ttm_place sys_place = {
  8. .fpfn = 0,
  9. .lpfn = 0,
  10. .mem_type = TTM_PL_SYSTEM,
  11. .flags = TTM_PL_FLAG_FALLBACK,
  12. };
  13. static const struct ttm_place mock1_place = {
  14. .fpfn = 0,
  15. .lpfn = 0,
  16. .mem_type = TTM_PL_MOCK1,
  17. .flags = TTM_PL_FLAG_FALLBACK,
  18. };
  19. static const struct ttm_place mock2_place = {
  20. .fpfn = 0,
  21. .lpfn = 0,
  22. .mem_type = TTM_PL_MOCK2,
  23. .flags = TTM_PL_FLAG_FALLBACK,
  24. };
  25. static struct ttm_placement sys_placement = {
  26. .num_placement = 1,
  27. .placement = &sys_place,
  28. };
  29. static struct ttm_placement bad_placement = {
  30. .num_placement = 1,
  31. .placement = &mock1_place,
  32. };
  33. static struct ttm_placement mock_placement = {
  34. .num_placement = 1,
  35. .placement = &mock2_place,
  36. };
  37. static struct ttm_tt *ttm_tt_simple_create(struct ttm_buffer_object *bo, u32 page_flags)
  38. {
  39. struct ttm_tt *tt;
  40. tt = kzalloc(sizeof(*tt), GFP_KERNEL);
  41. ttm_tt_init(tt, bo, page_flags, ttm_cached, 0);
  42. return tt;
  43. }
  44. static void ttm_tt_simple_destroy(struct ttm_device *bdev, struct ttm_tt *ttm)
  45. {
  46. kfree(ttm);
  47. }
  48. static int mock_move(struct ttm_buffer_object *bo, bool evict,
  49. struct ttm_operation_ctx *ctx,
  50. struct ttm_resource *new_mem,
  51. struct ttm_place *hop)
  52. {
  53. struct ttm_resource *old_mem = bo->resource;
  54. if (!old_mem || (old_mem->mem_type == TTM_PL_SYSTEM && !bo->ttm)) {
  55. ttm_bo_move_null(bo, new_mem);
  56. return 0;
  57. }
  58. if (bo->resource->mem_type == TTM_PL_VRAM &&
  59. new_mem->mem_type == TTM_PL_SYSTEM) {
  60. hop->mem_type = TTM_PL_TT;
  61. hop->flags = TTM_PL_FLAG_TEMPORARY;
  62. hop->fpfn = 0;
  63. hop->lpfn = 0;
  64. return -EMULTIHOP;
  65. }
  66. if ((old_mem->mem_type == TTM_PL_SYSTEM &&
  67. new_mem->mem_type == TTM_PL_TT) ||
  68. (old_mem->mem_type == TTM_PL_TT &&
  69. new_mem->mem_type == TTM_PL_SYSTEM)) {
  70. ttm_bo_move_null(bo, new_mem);
  71. return 0;
  72. }
  73. return ttm_bo_move_memcpy(bo, ctx, new_mem);
  74. }
  75. static void mock_evict_flags(struct ttm_buffer_object *bo,
  76. struct ttm_placement *placement)
  77. {
  78. switch (bo->resource->mem_type) {
  79. case TTM_PL_VRAM:
  80. case TTM_PL_SYSTEM:
  81. *placement = sys_placement;
  82. break;
  83. case TTM_PL_TT:
  84. *placement = mock_placement;
  85. break;
  86. case TTM_PL_MOCK1:
  87. /* Purge objects coming from this domain */
  88. break;
  89. }
  90. }
  91. static void bad_evict_flags(struct ttm_buffer_object *bo,
  92. struct ttm_placement *placement)
  93. {
  94. *placement = bad_placement;
  95. }
  96. static int ttm_device_kunit_init_with_funcs(struct ttm_test_devices *priv,
  97. struct ttm_device *ttm,
  98. bool use_dma_alloc,
  99. bool use_dma32,
  100. struct ttm_device_funcs *funcs)
  101. {
  102. struct drm_device *drm = priv->drm;
  103. int err;
  104. err = ttm_device_init(ttm, funcs, drm->dev,
  105. drm->anon_inode->i_mapping,
  106. drm->vma_offset_manager,
  107. use_dma_alloc, use_dma32);
  108. return err;
  109. }
  110. struct ttm_device_funcs ttm_dev_funcs = {
  111. .ttm_tt_create = ttm_tt_simple_create,
  112. .ttm_tt_destroy = ttm_tt_simple_destroy,
  113. .move = mock_move,
  114. .eviction_valuable = ttm_bo_eviction_valuable,
  115. .evict_flags = mock_evict_flags,
  116. };
  117. EXPORT_SYMBOL_GPL(ttm_dev_funcs);
  118. int ttm_device_kunit_init(struct ttm_test_devices *priv,
  119. struct ttm_device *ttm,
  120. bool use_dma_alloc,
  121. bool use_dma32)
  122. {
  123. return ttm_device_kunit_init_with_funcs(priv, ttm, use_dma_alloc,
  124. use_dma32, &ttm_dev_funcs);
  125. }
  126. EXPORT_SYMBOL_GPL(ttm_device_kunit_init);
  127. struct ttm_device_funcs ttm_dev_funcs_bad_evict = {
  128. .ttm_tt_create = ttm_tt_simple_create,
  129. .ttm_tt_destroy = ttm_tt_simple_destroy,
  130. .move = mock_move,
  131. .eviction_valuable = ttm_bo_eviction_valuable,
  132. .evict_flags = bad_evict_flags,
  133. };
  134. EXPORT_SYMBOL_GPL(ttm_dev_funcs_bad_evict);
  135. int ttm_device_kunit_init_bad_evict(struct ttm_test_devices *priv,
  136. struct ttm_device *ttm,
  137. bool use_dma_alloc,
  138. bool use_dma32)
  139. {
  140. return ttm_device_kunit_init_with_funcs(priv, ttm, use_dma_alloc,
  141. use_dma32, &ttm_dev_funcs_bad_evict);
  142. }
  143. EXPORT_SYMBOL_GPL(ttm_device_kunit_init_bad_evict);
  144. struct ttm_buffer_object *ttm_bo_kunit_init(struct kunit *test,
  145. struct ttm_test_devices *devs,
  146. size_t size,
  147. struct dma_resv *obj)
  148. {
  149. struct drm_gem_object gem_obj = { };
  150. struct ttm_buffer_object *bo;
  151. int err;
  152. bo = kunit_kzalloc(test, sizeof(*bo), GFP_KERNEL);
  153. KUNIT_ASSERT_NOT_NULL(test, bo);
  154. bo->base = gem_obj;
  155. if (obj)
  156. bo->base.resv = obj;
  157. err = drm_gem_object_init(devs->drm, &bo->base, size);
  158. KUNIT_ASSERT_EQ(test, err, 0);
  159. bo->bdev = devs->ttm_dev;
  160. bo->destroy = dummy_ttm_bo_destroy;
  161. kref_init(&bo->kref);
  162. return bo;
  163. }
  164. EXPORT_SYMBOL_GPL(ttm_bo_kunit_init);
  165. struct ttm_place *ttm_place_kunit_init(struct kunit *test, u32 mem_type, u32 flags)
  166. {
  167. struct ttm_place *place;
  168. place = kunit_kzalloc(test, sizeof(*place), GFP_KERNEL);
  169. KUNIT_ASSERT_NOT_NULL(test, place);
  170. place->mem_type = mem_type;
  171. place->flags = flags;
  172. return place;
  173. }
  174. EXPORT_SYMBOL_GPL(ttm_place_kunit_init);
  175. void dummy_ttm_bo_destroy(struct ttm_buffer_object *bo)
  176. {
  177. drm_gem_object_release(&bo->base);
  178. }
  179. EXPORT_SYMBOL_GPL(dummy_ttm_bo_destroy);
  180. struct ttm_test_devices *ttm_test_devices_basic(struct kunit *test)
  181. {
  182. struct ttm_test_devices *devs;
  183. devs = kunit_kzalloc(test, sizeof(*devs), GFP_KERNEL);
  184. KUNIT_ASSERT_NOT_NULL(test, devs);
  185. devs->dev = drm_kunit_helper_alloc_device(test);
  186. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, devs->dev);
  187. /* Set mask for alloc_coherent mappings to enable ttm_pool_alloc testing */
  188. devs->dev->coherent_dma_mask = -1;
  189. devs->drm = __drm_kunit_helper_alloc_drm_device(test, devs->dev,
  190. sizeof(*devs->drm), 0,
  191. DRIVER_GEM);
  192. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, devs->drm);
  193. return devs;
  194. }
  195. EXPORT_SYMBOL_GPL(ttm_test_devices_basic);
  196. struct ttm_test_devices *ttm_test_devices_all(struct kunit *test)
  197. {
  198. struct ttm_test_devices *devs;
  199. struct ttm_device *ttm_dev;
  200. int err;
  201. devs = ttm_test_devices_basic(test);
  202. ttm_dev = kunit_kzalloc(test, sizeof(*ttm_dev), GFP_KERNEL);
  203. KUNIT_ASSERT_NOT_NULL(test, ttm_dev);
  204. err = ttm_device_kunit_init(devs, ttm_dev, false, false);
  205. KUNIT_ASSERT_EQ(test, err, 0);
  206. devs->ttm_dev = ttm_dev;
  207. return devs;
  208. }
  209. EXPORT_SYMBOL_GPL(ttm_test_devices_all);
  210. void ttm_test_devices_put(struct kunit *test, struct ttm_test_devices *devs)
  211. {
  212. if (devs->ttm_dev)
  213. ttm_device_fini(devs->ttm_dev);
  214. drm_kunit_helper_free_device(test, devs->dev);
  215. }
  216. EXPORT_SYMBOL_GPL(ttm_test_devices_put);
  217. int ttm_test_devices_init(struct kunit *test)
  218. {
  219. struct ttm_test_devices *priv;
  220. priv = kunit_kzalloc(test, sizeof(*priv), GFP_KERNEL);
  221. KUNIT_ASSERT_NOT_NULL(test, priv);
  222. priv = ttm_test_devices_basic(test);
  223. test->priv = priv;
  224. return 0;
  225. }
  226. EXPORT_SYMBOL_GPL(ttm_test_devices_init);
  227. int ttm_test_devices_all_init(struct kunit *test)
  228. {
  229. struct ttm_test_devices *priv;
  230. priv = kunit_kzalloc(test, sizeof(*priv), GFP_KERNEL);
  231. KUNIT_ASSERT_NOT_NULL(test, priv);
  232. priv = ttm_test_devices_all(test);
  233. test->priv = priv;
  234. return 0;
  235. }
  236. EXPORT_SYMBOL_GPL(ttm_test_devices_all_init);
  237. void ttm_test_devices_fini(struct kunit *test)
  238. {
  239. ttm_test_devices_put(test, test->priv);
  240. }
  241. EXPORT_SYMBOL_GPL(ttm_test_devices_fini);
  242. MODULE_DESCRIPTION("TTM KUnit test helper functions");
  243. MODULE_LICENSE("GPL and additional rights");