nouveau_ttm.c 9.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366
  1. // SPDX-License-Identifier: GPL-2.0 OR MIT
  2. /*
  3. * Copyright (c) 2007-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA,
  4. * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA,
  5. *
  6. * Permission is hereby granted, free of charge, to any person obtaining a
  7. * copy of this software and associated documentation files (the "Software"),
  8. * to deal in the Software without restriction, including without limitation
  9. * the rights to use, copy, modify, merge, publish, distribute, sub license,
  10. * and/or sell copies of the Software, and to permit persons to whom the
  11. * Software is furnished to do so, subject to the following conditions:
  12. *
  13. * The above copyright notice and this permission notice (including the
  14. * next paragraph) shall be included in all copies or substantial portions
  15. * of the Software.
  16. *
  17. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  18. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  19. * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  20. * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  21. * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  22. * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  23. * USE OR OTHER DEALINGS IN THE SOFTWARE.
  24. */
  25. #include "nouveau_drv.h"
  26. #include "nouveau_gem.h"
  27. #include "nouveau_mem.h"
  28. #include "nouveau_ttm.h"
  29. #include <drm/drm_legacy.h>
  30. #include <core/tegra.h>
  31. static int
  32. nouveau_manager_init(struct ttm_mem_type_manager *man, unsigned long psize)
  33. {
  34. return 0;
  35. }
  36. static int
  37. nouveau_manager_fini(struct ttm_mem_type_manager *man)
  38. {
  39. return 0;
  40. }
  41. static void
  42. nouveau_manager_del(struct ttm_mem_type_manager *man, struct ttm_mem_reg *reg)
  43. {
  44. nouveau_mem_del(reg);
  45. }
  46. static void
  47. nouveau_manager_debug(struct ttm_mem_type_manager *man,
  48. struct drm_printer *printer)
  49. {
  50. }
  51. static int
  52. nouveau_vram_manager_new(struct ttm_mem_type_manager *man,
  53. struct ttm_buffer_object *bo,
  54. const struct ttm_place *place,
  55. struct ttm_mem_reg *reg)
  56. {
  57. struct nouveau_bo *nvbo = nouveau_bo(bo);
  58. struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
  59. int ret;
  60. if (drm->client.device.info.ram_size == 0)
  61. return -ENOMEM;
  62. ret = nouveau_mem_new(&drm->master, nvbo->kind, nvbo->comp, reg);
  63. if (ret)
  64. return ret;
  65. ret = nouveau_mem_vram(reg, nvbo->contig, nvbo->page);
  66. if (ret) {
  67. nouveau_mem_del(reg);
  68. if (ret == -ENOSPC) {
  69. reg->mm_node = NULL;
  70. return 0;
  71. }
  72. return ret;
  73. }
  74. return 0;
  75. }
  76. const struct ttm_mem_type_manager_func nouveau_vram_manager = {
  77. .init = nouveau_manager_init,
  78. .takedown = nouveau_manager_fini,
  79. .get_node = nouveau_vram_manager_new,
  80. .put_node = nouveau_manager_del,
  81. .debug = nouveau_manager_debug,
  82. };
  83. static int
  84. nouveau_gart_manager_new(struct ttm_mem_type_manager *man,
  85. struct ttm_buffer_object *bo,
  86. const struct ttm_place *place,
  87. struct ttm_mem_reg *reg)
  88. {
  89. struct nouveau_bo *nvbo = nouveau_bo(bo);
  90. struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
  91. int ret;
  92. ret = nouveau_mem_new(&drm->master, nvbo->kind, nvbo->comp, reg);
  93. if (ret)
  94. return ret;
  95. reg->start = 0;
  96. return 0;
  97. }
  98. const struct ttm_mem_type_manager_func nouveau_gart_manager = {
  99. .init = nouveau_manager_init,
  100. .takedown = nouveau_manager_fini,
  101. .get_node = nouveau_gart_manager_new,
  102. .put_node = nouveau_manager_del,
  103. .debug = nouveau_manager_debug
  104. };
  105. static int
  106. nv04_gart_manager_new(struct ttm_mem_type_manager *man,
  107. struct ttm_buffer_object *bo,
  108. const struct ttm_place *place,
  109. struct ttm_mem_reg *reg)
  110. {
  111. struct nouveau_bo *nvbo = nouveau_bo(bo);
  112. struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
  113. struct nouveau_mem *mem;
  114. int ret;
  115. ret = nouveau_mem_new(&drm->master, nvbo->kind, nvbo->comp, reg);
  116. mem = nouveau_mem(reg);
  117. if (ret)
  118. return ret;
  119. ret = nvif_vmm_get(&mem->cli->vmm.vmm, PTES, false, 12, 0,
  120. reg->num_pages << PAGE_SHIFT, &mem->vma[0]);
  121. if (ret) {
  122. nouveau_mem_del(reg);
  123. if (ret == -ENOSPC) {
  124. reg->mm_node = NULL;
  125. return 0;
  126. }
  127. return ret;
  128. }
  129. reg->start = mem->vma[0].addr >> PAGE_SHIFT;
  130. return 0;
  131. }
  132. const struct ttm_mem_type_manager_func nv04_gart_manager = {
  133. .init = nouveau_manager_init,
  134. .takedown = nouveau_manager_fini,
  135. .get_node = nv04_gart_manager_new,
  136. .put_node = nouveau_manager_del,
  137. .debug = nouveau_manager_debug
  138. };
  139. int
  140. nouveau_ttm_mmap(struct file *filp, struct vm_area_struct *vma)
  141. {
  142. struct drm_file *file_priv = filp->private_data;
  143. struct nouveau_drm *drm = nouveau_drm(file_priv->minor->dev);
  144. if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET))
  145. #if defined(CONFIG_NOUVEAU_LEGACY_CTX_SUPPORT)
  146. return drm_legacy_mmap(filp, vma);
  147. #else
  148. return -EINVAL;
  149. #endif
  150. return ttm_bo_mmap(filp, vma, &drm->ttm.bdev);
  151. }
  152. static int
  153. nouveau_ttm_mem_global_init(struct drm_global_reference *ref)
  154. {
  155. return ttm_mem_global_init(ref->object);
  156. }
  157. static void
  158. nouveau_ttm_mem_global_release(struct drm_global_reference *ref)
  159. {
  160. ttm_mem_global_release(ref->object);
  161. }
  162. int
  163. nouveau_ttm_global_init(struct nouveau_drm *drm)
  164. {
  165. struct drm_global_reference *global_ref;
  166. int ret;
  167. global_ref = &drm->ttm.mem_global_ref;
  168. global_ref->global_type = DRM_GLOBAL_TTM_MEM;
  169. global_ref->size = sizeof(struct ttm_mem_global);
  170. global_ref->init = &nouveau_ttm_mem_global_init;
  171. global_ref->release = &nouveau_ttm_mem_global_release;
  172. ret = drm_global_item_ref(global_ref);
  173. if (unlikely(ret != 0)) {
  174. DRM_ERROR("Failed setting up TTM memory accounting\n");
  175. drm->ttm.mem_global_ref.release = NULL;
  176. return ret;
  177. }
  178. drm->ttm.bo_global_ref.mem_glob = global_ref->object;
  179. global_ref = &drm->ttm.bo_global_ref.ref;
  180. global_ref->global_type = DRM_GLOBAL_TTM_BO;
  181. global_ref->size = sizeof(struct ttm_bo_global);
  182. global_ref->init = &ttm_bo_global_init;
  183. global_ref->release = &ttm_bo_global_release;
  184. ret = drm_global_item_ref(global_ref);
  185. if (unlikely(ret != 0)) {
  186. DRM_ERROR("Failed setting up TTM BO subsystem\n");
  187. drm_global_item_unref(&drm->ttm.mem_global_ref);
  188. drm->ttm.mem_global_ref.release = NULL;
  189. return ret;
  190. }
  191. return 0;
  192. }
  193. void
  194. nouveau_ttm_global_release(struct nouveau_drm *drm)
  195. {
  196. if (drm->ttm.mem_global_ref.release == NULL)
  197. return;
  198. drm_global_item_unref(&drm->ttm.bo_global_ref.ref);
  199. drm_global_item_unref(&drm->ttm.mem_global_ref);
  200. drm->ttm.mem_global_ref.release = NULL;
  201. }
  202. static int
  203. nouveau_ttm_init_host(struct nouveau_drm *drm, u8 kind)
  204. {
  205. struct nvif_mmu *mmu = &drm->client.mmu;
  206. int typei;
  207. typei = nvif_mmu_type(mmu, NVIF_MEM_HOST | NVIF_MEM_MAPPABLE |
  208. kind | NVIF_MEM_COHERENT);
  209. if (typei < 0)
  210. return -ENOSYS;
  211. drm->ttm.type_host[!!kind] = typei;
  212. typei = nvif_mmu_type(mmu, NVIF_MEM_HOST | NVIF_MEM_MAPPABLE | kind);
  213. if (typei < 0)
  214. return -ENOSYS;
  215. drm->ttm.type_ncoh[!!kind] = typei;
  216. return 0;
  217. }
  218. int
  219. nouveau_ttm_init(struct nouveau_drm *drm)
  220. {
  221. struct nvkm_device *device = nvxx_device(&drm->client.device);
  222. struct nvkm_pci *pci = device->pci;
  223. struct nvif_mmu *mmu = &drm->client.mmu;
  224. struct drm_device *dev = drm->dev;
  225. int typei, ret;
  226. ret = nouveau_ttm_init_host(drm, 0);
  227. if (ret)
  228. return ret;
  229. if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA &&
  230. drm->client.device.info.chipset != 0x50) {
  231. ret = nouveau_ttm_init_host(drm, NVIF_MEM_KIND);
  232. if (ret)
  233. return ret;
  234. }
  235. if (drm->client.device.info.platform != NV_DEVICE_INFO_V0_SOC &&
  236. drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
  237. typei = nvif_mmu_type(mmu, NVIF_MEM_VRAM | NVIF_MEM_MAPPABLE |
  238. NVIF_MEM_KIND |
  239. NVIF_MEM_COMP |
  240. NVIF_MEM_DISP);
  241. if (typei < 0)
  242. return -ENOSYS;
  243. drm->ttm.type_vram = typei;
  244. } else {
  245. drm->ttm.type_vram = -1;
  246. }
  247. if (pci && pci->agp.bridge) {
  248. drm->agp.bridge = pci->agp.bridge;
  249. drm->agp.base = pci->agp.base;
  250. drm->agp.size = pci->agp.size;
  251. drm->agp.cma = pci->agp.cma;
  252. }
  253. ret = nouveau_ttm_global_init(drm);
  254. if (ret)
  255. return ret;
  256. ret = ttm_bo_device_init(&drm->ttm.bdev,
  257. drm->ttm.bo_global_ref.ref.object,
  258. &nouveau_bo_driver,
  259. dev->anon_inode->i_mapping,
  260. DRM_FILE_PAGE_OFFSET,
  261. drm->client.mmu.dmabits <= 32 ? true : false);
  262. if (ret) {
  263. NV_ERROR(drm, "error initialising bo driver, %d\n", ret);
  264. return ret;
  265. }
  266. /* VRAM init */
  267. drm->gem.vram_available = drm->client.device.info.ram_user;
  268. arch_io_reserve_memtype_wc(device->func->resource_addr(device, 1),
  269. device->func->resource_size(device, 1));
  270. ret = ttm_bo_init_mm(&drm->ttm.bdev, TTM_PL_VRAM,
  271. drm->gem.vram_available >> PAGE_SHIFT);
  272. if (ret) {
  273. NV_ERROR(drm, "VRAM mm init failed, %d\n", ret);
  274. return ret;
  275. }
  276. drm->ttm.mtrr = arch_phys_wc_add(device->func->resource_addr(device, 1),
  277. device->func->resource_size(device, 1));
  278. /* GART init */
  279. if (!drm->agp.bridge) {
  280. drm->gem.gart_available = drm->client.vmm.vmm.limit;
  281. } else {
  282. drm->gem.gart_available = drm->agp.size;
  283. }
  284. ret = ttm_bo_init_mm(&drm->ttm.bdev, TTM_PL_TT,
  285. drm->gem.gart_available >> PAGE_SHIFT);
  286. if (ret) {
  287. NV_ERROR(drm, "GART mm init failed, %d\n", ret);
  288. return ret;
  289. }
  290. NV_INFO(drm, "VRAM: %d MiB\n", (u32)(drm->gem.vram_available >> 20));
  291. NV_INFO(drm, "GART: %d MiB\n", (u32)(drm->gem.gart_available >> 20));
  292. return 0;
  293. }
  294. void
  295. nouveau_ttm_fini(struct nouveau_drm *drm)
  296. {
  297. struct nvkm_device *device = nvxx_device(&drm->client.device);
  298. ttm_bo_clean_mm(&drm->ttm.bdev, TTM_PL_VRAM);
  299. ttm_bo_clean_mm(&drm->ttm.bdev, TTM_PL_TT);
  300. ttm_bo_device_release(&drm->ttm.bdev);
  301. nouveau_ttm_global_release(drm);
  302. arch_phys_wc_del(drm->ttm.mtrr);
  303. drm->ttm.mtrr = 0;
  304. arch_io_free_memtype_wc(device->func->resource_addr(device, 1),
  305. device->func->resource_size(device, 1));
  306. }