etnaviv_iommu.c 4.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (C) 2014-2018 Etnaviv Project
  4. */
  5. #include <linux/platform_device.h>
  6. #include <linux/sizes.h>
  7. #include <linux/slab.h>
  8. #include <linux/dma-mapping.h>
  9. #include <linux/bitops.h>
  10. #include "etnaviv_gpu.h"
  11. #include "etnaviv_mmu.h"
  12. #include "etnaviv_iommu.h"
  13. #include "state_hi.xml.h"
  14. #define PT_SIZE SZ_2M
  15. #define PT_ENTRIES (PT_SIZE / sizeof(u32))
  16. #define GPU_MEM_START 0x80000000
  17. struct etnaviv_iommuv1_domain {
  18. struct etnaviv_iommu_domain base;
  19. u32 *pgtable_cpu;
  20. dma_addr_t pgtable_dma;
  21. };
  22. static struct etnaviv_iommuv1_domain *
  23. to_etnaviv_domain(struct etnaviv_iommu_domain *domain)
  24. {
  25. return container_of(domain, struct etnaviv_iommuv1_domain, base);
  26. }
  27. static int __etnaviv_iommu_init(struct etnaviv_iommuv1_domain *etnaviv_domain)
  28. {
  29. u32 *p;
  30. int i;
  31. etnaviv_domain->base.bad_page_cpu =
  32. dma_alloc_wc(etnaviv_domain->base.dev, SZ_4K,
  33. &etnaviv_domain->base.bad_page_dma,
  34. GFP_KERNEL);
  35. if (!etnaviv_domain->base.bad_page_cpu)
  36. return -ENOMEM;
  37. p = etnaviv_domain->base.bad_page_cpu;
  38. for (i = 0; i < SZ_4K / 4; i++)
  39. *p++ = 0xdead55aa;
  40. etnaviv_domain->pgtable_cpu = dma_alloc_wc(etnaviv_domain->base.dev,
  41. PT_SIZE,
  42. &etnaviv_domain->pgtable_dma,
  43. GFP_KERNEL);
  44. if (!etnaviv_domain->pgtable_cpu) {
  45. dma_free_wc(etnaviv_domain->base.dev, SZ_4K,
  46. etnaviv_domain->base.bad_page_cpu,
  47. etnaviv_domain->base.bad_page_dma);
  48. return -ENOMEM;
  49. }
  50. memset32(etnaviv_domain->pgtable_cpu, etnaviv_domain->base.bad_page_dma,
  51. PT_ENTRIES);
  52. return 0;
  53. }
  54. static void etnaviv_iommuv1_domain_free(struct etnaviv_iommu_domain *domain)
  55. {
  56. struct etnaviv_iommuv1_domain *etnaviv_domain =
  57. to_etnaviv_domain(domain);
  58. dma_free_wc(etnaviv_domain->base.dev, PT_SIZE,
  59. etnaviv_domain->pgtable_cpu, etnaviv_domain->pgtable_dma);
  60. dma_free_wc(etnaviv_domain->base.dev, SZ_4K,
  61. etnaviv_domain->base.bad_page_cpu,
  62. etnaviv_domain->base.bad_page_dma);
  63. kfree(etnaviv_domain);
  64. }
  65. static int etnaviv_iommuv1_map(struct etnaviv_iommu_domain *domain,
  66. unsigned long iova, phys_addr_t paddr,
  67. size_t size, int prot)
  68. {
  69. struct etnaviv_iommuv1_domain *etnaviv_domain = to_etnaviv_domain(domain);
  70. unsigned int index = (iova - GPU_MEM_START) / SZ_4K;
  71. if (size != SZ_4K)
  72. return -EINVAL;
  73. etnaviv_domain->pgtable_cpu[index] = paddr;
  74. return 0;
  75. }
  76. static size_t etnaviv_iommuv1_unmap(struct etnaviv_iommu_domain *domain,
  77. unsigned long iova, size_t size)
  78. {
  79. struct etnaviv_iommuv1_domain *etnaviv_domain =
  80. to_etnaviv_domain(domain);
  81. unsigned int index = (iova - GPU_MEM_START) / SZ_4K;
  82. if (size != SZ_4K)
  83. return -EINVAL;
  84. etnaviv_domain->pgtable_cpu[index] = etnaviv_domain->base.bad_page_dma;
  85. return SZ_4K;
  86. }
  87. static size_t etnaviv_iommuv1_dump_size(struct etnaviv_iommu_domain *domain)
  88. {
  89. return PT_SIZE;
  90. }
  91. static void etnaviv_iommuv1_dump(struct etnaviv_iommu_domain *domain, void *buf)
  92. {
  93. struct etnaviv_iommuv1_domain *etnaviv_domain =
  94. to_etnaviv_domain(domain);
  95. memcpy(buf, etnaviv_domain->pgtable_cpu, PT_SIZE);
  96. }
  97. void etnaviv_iommuv1_restore(struct etnaviv_gpu *gpu)
  98. {
  99. struct etnaviv_iommuv1_domain *etnaviv_domain =
  100. to_etnaviv_domain(gpu->mmu->domain);
  101. u32 pgtable;
  102. /* set base addresses */
  103. gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_RA, gpu->memory_base);
  104. gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_FE, gpu->memory_base);
  105. gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_TX, gpu->memory_base);
  106. gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_PEZ, gpu->memory_base);
  107. gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_PE, gpu->memory_base);
  108. /* set page table address in MC */
  109. pgtable = (u32)etnaviv_domain->pgtable_dma;
  110. gpu_write(gpu, VIVS_MC_MMU_FE_PAGE_TABLE, pgtable);
  111. gpu_write(gpu, VIVS_MC_MMU_TX_PAGE_TABLE, pgtable);
  112. gpu_write(gpu, VIVS_MC_MMU_PE_PAGE_TABLE, pgtable);
  113. gpu_write(gpu, VIVS_MC_MMU_PEZ_PAGE_TABLE, pgtable);
  114. gpu_write(gpu, VIVS_MC_MMU_RA_PAGE_TABLE, pgtable);
  115. }
  116. static const struct etnaviv_iommu_domain_ops etnaviv_iommuv1_ops = {
  117. .free = etnaviv_iommuv1_domain_free,
  118. .map = etnaviv_iommuv1_map,
  119. .unmap = etnaviv_iommuv1_unmap,
  120. .dump_size = etnaviv_iommuv1_dump_size,
  121. .dump = etnaviv_iommuv1_dump,
  122. };
  123. struct etnaviv_iommu_domain *
  124. etnaviv_iommuv1_domain_alloc(struct etnaviv_gpu *gpu)
  125. {
  126. struct etnaviv_iommuv1_domain *etnaviv_domain;
  127. struct etnaviv_iommu_domain *domain;
  128. int ret;
  129. etnaviv_domain = kzalloc(sizeof(*etnaviv_domain), GFP_KERNEL);
  130. if (!etnaviv_domain)
  131. return NULL;
  132. domain = &etnaviv_domain->base;
  133. domain->dev = gpu->dev;
  134. domain->base = GPU_MEM_START;
  135. domain->size = PT_ENTRIES * SZ_4K;
  136. domain->ops = &etnaviv_iommuv1_ops;
  137. ret = __etnaviv_iommu_init(etnaviv_domain);
  138. if (ret)
  139. goto out_free;
  140. return &etnaviv_domain->base;
  141. out_free:
  142. kfree(etnaviv_domain);
  143. return NULL;
  144. }