etnaviv_iommu_v2.c 8.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (C) 2016-2018 Etnaviv Project
  4. */
  5. #include <linux/platform_device.h>
  6. #include <linux/sizes.h>
  7. #include <linux/slab.h>
  8. #include <linux/dma-mapping.h>
  9. #include <linux/bitops.h>
  10. #include "etnaviv_cmdbuf.h"
  11. #include "etnaviv_gpu.h"
  12. #include "etnaviv_mmu.h"
  13. #include "etnaviv_iommu.h"
  14. #include "state.xml.h"
  15. #include "state_hi.xml.h"
  16. #define MMUv2_PTE_PRESENT BIT(0)
  17. #define MMUv2_PTE_EXCEPTION BIT(1)
  18. #define MMUv2_PTE_WRITEABLE BIT(2)
  19. #define MMUv2_MTLB_MASK 0xffc00000
  20. #define MMUv2_MTLB_SHIFT 22
  21. #define MMUv2_STLB_MASK 0x003ff000
  22. #define MMUv2_STLB_SHIFT 12
  23. #define MMUv2_MAX_STLB_ENTRIES 1024
  24. struct etnaviv_iommuv2_domain {
  25. struct etnaviv_iommu_domain base;
  26. /* P(age) T(able) A(rray) */
  27. u64 *pta_cpu;
  28. dma_addr_t pta_dma;
  29. /* M(aster) TLB aka first level pagetable */
  30. u32 *mtlb_cpu;
  31. dma_addr_t mtlb_dma;
  32. /* S(lave) TLB aka second level pagetable */
  33. u32 *stlb_cpu[MMUv2_MAX_STLB_ENTRIES];
  34. dma_addr_t stlb_dma[MMUv2_MAX_STLB_ENTRIES];
  35. };
  36. static struct etnaviv_iommuv2_domain *
  37. to_etnaviv_domain(struct etnaviv_iommu_domain *domain)
  38. {
  39. return container_of(domain, struct etnaviv_iommuv2_domain, base);
  40. }
  41. static int
  42. etnaviv_iommuv2_ensure_stlb(struct etnaviv_iommuv2_domain *etnaviv_domain,
  43. int stlb)
  44. {
  45. if (etnaviv_domain->stlb_cpu[stlb])
  46. return 0;
  47. etnaviv_domain->stlb_cpu[stlb] =
  48. dma_alloc_wc(etnaviv_domain->base.dev, SZ_4K,
  49. &etnaviv_domain->stlb_dma[stlb],
  50. GFP_KERNEL);
  51. if (!etnaviv_domain->stlb_cpu[stlb])
  52. return -ENOMEM;
  53. memset32(etnaviv_domain->stlb_cpu[stlb], MMUv2_PTE_EXCEPTION,
  54. SZ_4K / sizeof(u32));
  55. etnaviv_domain->mtlb_cpu[stlb] = etnaviv_domain->stlb_dma[stlb] |
  56. MMUv2_PTE_PRESENT;
  57. return 0;
  58. }
  59. static int etnaviv_iommuv2_map(struct etnaviv_iommu_domain *domain,
  60. unsigned long iova, phys_addr_t paddr,
  61. size_t size, int prot)
  62. {
  63. struct etnaviv_iommuv2_domain *etnaviv_domain =
  64. to_etnaviv_domain(domain);
  65. int mtlb_entry, stlb_entry, ret;
  66. u32 entry = lower_32_bits(paddr) | MMUv2_PTE_PRESENT;
  67. if (size != SZ_4K)
  68. return -EINVAL;
  69. if (IS_ENABLED(CONFIG_PHYS_ADDR_T_64BIT))
  70. entry |= (upper_32_bits(paddr) & 0xff) << 4;
  71. if (prot & ETNAVIV_PROT_WRITE)
  72. entry |= MMUv2_PTE_WRITEABLE;
  73. mtlb_entry = (iova & MMUv2_MTLB_MASK) >> MMUv2_MTLB_SHIFT;
  74. stlb_entry = (iova & MMUv2_STLB_MASK) >> MMUv2_STLB_SHIFT;
  75. ret = etnaviv_iommuv2_ensure_stlb(etnaviv_domain, mtlb_entry);
  76. if (ret)
  77. return ret;
  78. etnaviv_domain->stlb_cpu[mtlb_entry][stlb_entry] = entry;
  79. return 0;
  80. }
  81. static size_t etnaviv_iommuv2_unmap(struct etnaviv_iommu_domain *domain,
  82. unsigned long iova, size_t size)
  83. {
  84. struct etnaviv_iommuv2_domain *etnaviv_domain =
  85. to_etnaviv_domain(domain);
  86. int mtlb_entry, stlb_entry;
  87. if (size != SZ_4K)
  88. return -EINVAL;
  89. mtlb_entry = (iova & MMUv2_MTLB_MASK) >> MMUv2_MTLB_SHIFT;
  90. stlb_entry = (iova & MMUv2_STLB_MASK) >> MMUv2_STLB_SHIFT;
  91. etnaviv_domain->stlb_cpu[mtlb_entry][stlb_entry] = MMUv2_PTE_EXCEPTION;
  92. return SZ_4K;
  93. }
  94. static int etnaviv_iommuv2_init(struct etnaviv_iommuv2_domain *etnaviv_domain)
  95. {
  96. int ret;
  97. /* allocate scratch page */
  98. etnaviv_domain->base.bad_page_cpu =
  99. dma_alloc_wc(etnaviv_domain->base.dev, SZ_4K,
  100. &etnaviv_domain->base.bad_page_dma,
  101. GFP_KERNEL);
  102. if (!etnaviv_domain->base.bad_page_cpu) {
  103. ret = -ENOMEM;
  104. goto fail_mem;
  105. }
  106. memset32(etnaviv_domain->base.bad_page_cpu, 0xdead55aa,
  107. SZ_4K / sizeof(u32));
  108. etnaviv_domain->pta_cpu = dma_alloc_wc(etnaviv_domain->base.dev,
  109. SZ_4K, &etnaviv_domain->pta_dma,
  110. GFP_KERNEL);
  111. if (!etnaviv_domain->pta_cpu) {
  112. ret = -ENOMEM;
  113. goto fail_mem;
  114. }
  115. etnaviv_domain->mtlb_cpu = dma_alloc_wc(etnaviv_domain->base.dev,
  116. SZ_4K, &etnaviv_domain->mtlb_dma,
  117. GFP_KERNEL);
  118. if (!etnaviv_domain->mtlb_cpu) {
  119. ret = -ENOMEM;
  120. goto fail_mem;
  121. }
  122. memset32(etnaviv_domain->mtlb_cpu, MMUv2_PTE_EXCEPTION,
  123. MMUv2_MAX_STLB_ENTRIES);
  124. return 0;
  125. fail_mem:
  126. if (etnaviv_domain->base.bad_page_cpu)
  127. dma_free_wc(etnaviv_domain->base.dev, SZ_4K,
  128. etnaviv_domain->base.bad_page_cpu,
  129. etnaviv_domain->base.bad_page_dma);
  130. if (etnaviv_domain->pta_cpu)
  131. dma_free_wc(etnaviv_domain->base.dev, SZ_4K,
  132. etnaviv_domain->pta_cpu, etnaviv_domain->pta_dma);
  133. if (etnaviv_domain->mtlb_cpu)
  134. dma_free_wc(etnaviv_domain->base.dev, SZ_4K,
  135. etnaviv_domain->mtlb_cpu, etnaviv_domain->mtlb_dma);
  136. return ret;
  137. }
  138. static void etnaviv_iommuv2_domain_free(struct etnaviv_iommu_domain *domain)
  139. {
  140. struct etnaviv_iommuv2_domain *etnaviv_domain =
  141. to_etnaviv_domain(domain);
  142. int i;
  143. dma_free_wc(etnaviv_domain->base.dev, SZ_4K,
  144. etnaviv_domain->base.bad_page_cpu,
  145. etnaviv_domain->base.bad_page_dma);
  146. dma_free_wc(etnaviv_domain->base.dev, SZ_4K,
  147. etnaviv_domain->pta_cpu, etnaviv_domain->pta_dma);
  148. dma_free_wc(etnaviv_domain->base.dev, SZ_4K,
  149. etnaviv_domain->mtlb_cpu, etnaviv_domain->mtlb_dma);
  150. for (i = 0; i < MMUv2_MAX_STLB_ENTRIES; i++) {
  151. if (etnaviv_domain->stlb_cpu[i])
  152. dma_free_wc(etnaviv_domain->base.dev, SZ_4K,
  153. etnaviv_domain->stlb_cpu[i],
  154. etnaviv_domain->stlb_dma[i]);
  155. }
  156. vfree(etnaviv_domain);
  157. }
  158. static size_t etnaviv_iommuv2_dump_size(struct etnaviv_iommu_domain *domain)
  159. {
  160. struct etnaviv_iommuv2_domain *etnaviv_domain =
  161. to_etnaviv_domain(domain);
  162. size_t dump_size = SZ_4K;
  163. int i;
  164. for (i = 0; i < MMUv2_MAX_STLB_ENTRIES; i++)
  165. if (etnaviv_domain->mtlb_cpu[i] & MMUv2_PTE_PRESENT)
  166. dump_size += SZ_4K;
  167. return dump_size;
  168. }
  169. static void etnaviv_iommuv2_dump(struct etnaviv_iommu_domain *domain, void *buf)
  170. {
  171. struct etnaviv_iommuv2_domain *etnaviv_domain =
  172. to_etnaviv_domain(domain);
  173. int i;
  174. memcpy(buf, etnaviv_domain->mtlb_cpu, SZ_4K);
  175. buf += SZ_4K;
  176. for (i = 0; i < MMUv2_MAX_STLB_ENTRIES; i++, buf += SZ_4K)
  177. if (etnaviv_domain->mtlb_cpu[i] & MMUv2_PTE_PRESENT)
  178. memcpy(buf, etnaviv_domain->stlb_cpu[i], SZ_4K);
  179. }
  180. static void etnaviv_iommuv2_restore_nonsec(struct etnaviv_gpu *gpu)
  181. {
  182. struct etnaviv_iommuv2_domain *etnaviv_domain =
  183. to_etnaviv_domain(gpu->mmu->domain);
  184. u16 prefetch;
  185. /* If the MMU is already enabled the state is still there. */
  186. if (gpu_read(gpu, VIVS_MMUv2_CONTROL) & VIVS_MMUv2_CONTROL_ENABLE)
  187. return;
  188. prefetch = etnaviv_buffer_config_mmuv2(gpu,
  189. (u32)etnaviv_domain->mtlb_dma,
  190. (u32)etnaviv_domain->base.bad_page_dma);
  191. etnaviv_gpu_start_fe(gpu, (u32)etnaviv_cmdbuf_get_pa(&gpu->buffer),
  192. prefetch);
  193. etnaviv_gpu_wait_idle(gpu, 100);
  194. gpu_write(gpu, VIVS_MMUv2_CONTROL, VIVS_MMUv2_CONTROL_ENABLE);
  195. }
  196. static void etnaviv_iommuv2_restore_sec(struct etnaviv_gpu *gpu)
  197. {
  198. struct etnaviv_iommuv2_domain *etnaviv_domain =
  199. to_etnaviv_domain(gpu->mmu->domain);
  200. u16 prefetch;
  201. /* If the MMU is already enabled the state is still there. */
  202. if (gpu_read(gpu, VIVS_MMUv2_SEC_CONTROL) & VIVS_MMUv2_SEC_CONTROL_ENABLE)
  203. return;
  204. gpu_write(gpu, VIVS_MMUv2_PTA_ADDRESS_LOW,
  205. lower_32_bits(etnaviv_domain->pta_dma));
  206. gpu_write(gpu, VIVS_MMUv2_PTA_ADDRESS_HIGH,
  207. upper_32_bits(etnaviv_domain->pta_dma));
  208. gpu_write(gpu, VIVS_MMUv2_PTA_CONTROL, VIVS_MMUv2_PTA_CONTROL_ENABLE);
  209. gpu_write(gpu, VIVS_MMUv2_NONSEC_SAFE_ADDR_LOW,
  210. lower_32_bits(etnaviv_domain->base.bad_page_dma));
  211. gpu_write(gpu, VIVS_MMUv2_SEC_SAFE_ADDR_LOW,
  212. lower_32_bits(etnaviv_domain->base.bad_page_dma));
  213. gpu_write(gpu, VIVS_MMUv2_SAFE_ADDRESS_CONFIG,
  214. VIVS_MMUv2_SAFE_ADDRESS_CONFIG_NON_SEC_SAFE_ADDR_HIGH(
  215. upper_32_bits(etnaviv_domain->base.bad_page_dma)) |
  216. VIVS_MMUv2_SAFE_ADDRESS_CONFIG_SEC_SAFE_ADDR_HIGH(
  217. upper_32_bits(etnaviv_domain->base.bad_page_dma)));
  218. etnaviv_domain->pta_cpu[0] = etnaviv_domain->mtlb_dma |
  219. VIVS_MMUv2_CONFIGURATION_MODE_MODE4_K;
  220. /* trigger a PTA load through the FE */
  221. prefetch = etnaviv_buffer_config_pta(gpu);
  222. etnaviv_gpu_start_fe(gpu, (u32)etnaviv_cmdbuf_get_pa(&gpu->buffer),
  223. prefetch);
  224. etnaviv_gpu_wait_idle(gpu, 100);
  225. gpu_write(gpu, VIVS_MMUv2_SEC_CONTROL, VIVS_MMUv2_SEC_CONTROL_ENABLE);
  226. }
  227. void etnaviv_iommuv2_restore(struct etnaviv_gpu *gpu)
  228. {
  229. switch (gpu->sec_mode) {
  230. case ETNA_SEC_NONE:
  231. etnaviv_iommuv2_restore_nonsec(gpu);
  232. break;
  233. case ETNA_SEC_KERNEL:
  234. etnaviv_iommuv2_restore_sec(gpu);
  235. break;
  236. default:
  237. WARN(1, "unhandled GPU security mode\n");
  238. break;
  239. }
  240. }
  241. static const struct etnaviv_iommu_domain_ops etnaviv_iommuv2_ops = {
  242. .free = etnaviv_iommuv2_domain_free,
  243. .map = etnaviv_iommuv2_map,
  244. .unmap = etnaviv_iommuv2_unmap,
  245. .dump_size = etnaviv_iommuv2_dump_size,
  246. .dump = etnaviv_iommuv2_dump,
  247. };
  248. struct etnaviv_iommu_domain *
  249. etnaviv_iommuv2_domain_alloc(struct etnaviv_gpu *gpu)
  250. {
  251. struct etnaviv_iommuv2_domain *etnaviv_domain;
  252. struct etnaviv_iommu_domain *domain;
  253. int ret;
  254. etnaviv_domain = vzalloc(sizeof(*etnaviv_domain));
  255. if (!etnaviv_domain)
  256. return NULL;
  257. domain = &etnaviv_domain->base;
  258. domain->dev = gpu->dev;
  259. domain->base = 0;
  260. domain->size = (u64)SZ_1G * 4;
  261. domain->ops = &etnaviv_iommuv2_ops;
  262. ret = etnaviv_iommuv2_init(etnaviv_domain);
  263. if (ret)
  264. goto out_free;
  265. return &etnaviv_domain->base;
  266. out_free:
  267. vfree(etnaviv_domain);
  268. return NULL;
  269. }