etnaviv_dump.c 6.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (C) 2015-2018 Etnaviv Project
  4. */
  5. #include <linux/devcoredump.h>
  6. #include "etnaviv_cmdbuf.h"
  7. #include "etnaviv_dump.h"
  8. #include "etnaviv_gem.h"
  9. #include "etnaviv_gpu.h"
  10. #include "etnaviv_mmu.h"
  11. #include "etnaviv_sched.h"
  12. #include "state.xml.h"
  13. #include "state_hi.xml.h"
  14. static bool etnaviv_dump_core = true;
  15. module_param_named(dump_core, etnaviv_dump_core, bool, 0600);
  16. struct core_dump_iterator {
  17. void *start;
  18. struct etnaviv_dump_object_header *hdr;
  19. void *data;
  20. };
  21. static const unsigned short etnaviv_dump_registers[] = {
  22. VIVS_HI_AXI_STATUS,
  23. VIVS_HI_CLOCK_CONTROL,
  24. VIVS_HI_IDLE_STATE,
  25. VIVS_HI_AXI_CONFIG,
  26. VIVS_HI_INTR_ENBL,
  27. VIVS_HI_CHIP_IDENTITY,
  28. VIVS_HI_CHIP_FEATURE,
  29. VIVS_HI_CHIP_MODEL,
  30. VIVS_HI_CHIP_REV,
  31. VIVS_HI_CHIP_DATE,
  32. VIVS_HI_CHIP_TIME,
  33. VIVS_HI_CHIP_MINOR_FEATURE_0,
  34. VIVS_HI_CACHE_CONTROL,
  35. VIVS_HI_AXI_CONTROL,
  36. VIVS_PM_POWER_CONTROLS,
  37. VIVS_PM_MODULE_CONTROLS,
  38. VIVS_PM_MODULE_STATUS,
  39. VIVS_PM_PULSE_EATER,
  40. VIVS_MC_MMU_FE_PAGE_TABLE,
  41. VIVS_MC_MMU_TX_PAGE_TABLE,
  42. VIVS_MC_MMU_PE_PAGE_TABLE,
  43. VIVS_MC_MMU_PEZ_PAGE_TABLE,
  44. VIVS_MC_MMU_RA_PAGE_TABLE,
  45. VIVS_MC_DEBUG_MEMORY,
  46. VIVS_MC_MEMORY_BASE_ADDR_RA,
  47. VIVS_MC_MEMORY_BASE_ADDR_FE,
  48. VIVS_MC_MEMORY_BASE_ADDR_TX,
  49. VIVS_MC_MEMORY_BASE_ADDR_PEZ,
  50. VIVS_MC_MEMORY_BASE_ADDR_PE,
  51. VIVS_MC_MEMORY_TIMING_CONTROL,
  52. VIVS_MC_BUS_CONFIG,
  53. VIVS_FE_DMA_STATUS,
  54. VIVS_FE_DMA_DEBUG_STATE,
  55. VIVS_FE_DMA_ADDRESS,
  56. VIVS_FE_DMA_LOW,
  57. VIVS_FE_DMA_HIGH,
  58. VIVS_FE_AUTO_FLUSH,
  59. };
  60. static void etnaviv_core_dump_header(struct core_dump_iterator *iter,
  61. u32 type, void *data_end)
  62. {
  63. struct etnaviv_dump_object_header *hdr = iter->hdr;
  64. hdr->magic = cpu_to_le32(ETDUMP_MAGIC);
  65. hdr->type = cpu_to_le32(type);
  66. hdr->file_offset = cpu_to_le32(iter->data - iter->start);
  67. hdr->file_size = cpu_to_le32(data_end - iter->data);
  68. iter->hdr++;
  69. iter->data += hdr->file_size;
  70. }
  71. static void etnaviv_core_dump_registers(struct core_dump_iterator *iter,
  72. struct etnaviv_gpu *gpu)
  73. {
  74. struct etnaviv_dump_registers *reg = iter->data;
  75. unsigned int i;
  76. for (i = 0; i < ARRAY_SIZE(etnaviv_dump_registers); i++, reg++) {
  77. reg->reg = etnaviv_dump_registers[i];
  78. reg->value = gpu_read(gpu, etnaviv_dump_registers[i]);
  79. }
  80. etnaviv_core_dump_header(iter, ETDUMP_BUF_REG, reg);
  81. }
  82. static void etnaviv_core_dump_mmu(struct core_dump_iterator *iter,
  83. struct etnaviv_gpu *gpu, size_t mmu_size)
  84. {
  85. etnaviv_iommu_dump(gpu->mmu, iter->data);
  86. etnaviv_core_dump_header(iter, ETDUMP_BUF_MMU, iter->data + mmu_size);
  87. }
  88. static void etnaviv_core_dump_mem(struct core_dump_iterator *iter, u32 type,
  89. void *ptr, size_t size, u64 iova)
  90. {
  91. memcpy(iter->data, ptr, size);
  92. iter->hdr->iova = cpu_to_le64(iova);
  93. etnaviv_core_dump_header(iter, type, iter->data + size);
  94. }
  95. void etnaviv_core_dump(struct etnaviv_gpu *gpu)
  96. {
  97. struct core_dump_iterator iter;
  98. struct etnaviv_vram_mapping *vram;
  99. struct etnaviv_gem_object *obj;
  100. struct etnaviv_gem_submit *submit;
  101. struct drm_sched_job *s_job;
  102. unsigned int n_obj, n_bomap_pages;
  103. size_t file_size, mmu_size;
  104. __le64 *bomap, *bomap_start;
  105. /* Only catch the first event, or when manually re-armed */
  106. if (!etnaviv_dump_core)
  107. return;
  108. etnaviv_dump_core = false;
  109. mutex_lock(&gpu->mmu->lock);
  110. mmu_size = etnaviv_iommu_dump_size(gpu->mmu);
  111. /* We always dump registers, mmu, ring and end marker */
  112. n_obj = 4;
  113. n_bomap_pages = 0;
  114. file_size = ARRAY_SIZE(etnaviv_dump_registers) *
  115. sizeof(struct etnaviv_dump_registers) +
  116. mmu_size + gpu->buffer.size;
  117. /* Add in the active command buffers */
  118. spin_lock(&gpu->sched.job_list_lock);
  119. list_for_each_entry(s_job, &gpu->sched.ring_mirror_list, node) {
  120. submit = to_etnaviv_submit(s_job);
  121. file_size += submit->cmdbuf.size;
  122. n_obj++;
  123. }
  124. spin_unlock(&gpu->sched.job_list_lock);
  125. /* Add in the active buffer objects */
  126. list_for_each_entry(vram, &gpu->mmu->mappings, mmu_node) {
  127. if (!vram->use)
  128. continue;
  129. obj = vram->object;
  130. file_size += obj->base.size;
  131. n_bomap_pages += obj->base.size >> PAGE_SHIFT;
  132. n_obj++;
  133. }
  134. /* If we have any buffer objects, add a bomap object */
  135. if (n_bomap_pages) {
  136. file_size += n_bomap_pages * sizeof(__le64);
  137. n_obj++;
  138. }
  139. /* Add the size of the headers */
  140. file_size += sizeof(*iter.hdr) * n_obj;
  141. /* Allocate the file in vmalloc memory, it's likely to be big */
  142. iter.start = __vmalloc(file_size, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY,
  143. PAGE_KERNEL);
  144. if (!iter.start) {
  145. mutex_unlock(&gpu->mmu->lock);
  146. dev_warn(gpu->dev, "failed to allocate devcoredump file\n");
  147. return;
  148. }
  149. /* Point the data member after the headers */
  150. iter.hdr = iter.start;
  151. iter.data = &iter.hdr[n_obj];
  152. memset(iter.hdr, 0, iter.data - iter.start);
  153. etnaviv_core_dump_registers(&iter, gpu);
  154. etnaviv_core_dump_mmu(&iter, gpu, mmu_size);
  155. etnaviv_core_dump_mem(&iter, ETDUMP_BUF_RING, gpu->buffer.vaddr,
  156. gpu->buffer.size,
  157. etnaviv_cmdbuf_get_va(&gpu->buffer));
  158. spin_lock(&gpu->sched.job_list_lock);
  159. list_for_each_entry(s_job, &gpu->sched.ring_mirror_list, node) {
  160. submit = to_etnaviv_submit(s_job);
  161. etnaviv_core_dump_mem(&iter, ETDUMP_BUF_CMD,
  162. submit->cmdbuf.vaddr, submit->cmdbuf.size,
  163. etnaviv_cmdbuf_get_va(&submit->cmdbuf));
  164. }
  165. spin_unlock(&gpu->sched.job_list_lock);
  166. /* Reserve space for the bomap */
  167. if (n_bomap_pages) {
  168. bomap_start = bomap = iter.data;
  169. memset(bomap, 0, sizeof(*bomap) * n_bomap_pages);
  170. etnaviv_core_dump_header(&iter, ETDUMP_BUF_BOMAP,
  171. bomap + n_bomap_pages);
  172. } else {
  173. /* Silence warning */
  174. bomap_start = bomap = NULL;
  175. }
  176. list_for_each_entry(vram, &gpu->mmu->mappings, mmu_node) {
  177. struct page **pages;
  178. void *vaddr;
  179. if (vram->use == 0)
  180. continue;
  181. obj = vram->object;
  182. mutex_lock(&obj->lock);
  183. pages = etnaviv_gem_get_pages(obj);
  184. mutex_unlock(&obj->lock);
  185. if (!IS_ERR(pages)) {
  186. int j;
  187. iter.hdr->data[0] = bomap - bomap_start;
  188. for (j = 0; j < obj->base.size >> PAGE_SHIFT; j++)
  189. *bomap++ = cpu_to_le64(page_to_phys(*pages++));
  190. }
  191. iter.hdr->iova = cpu_to_le64(vram->iova);
  192. vaddr = etnaviv_gem_vmap(&obj->base);
  193. if (vaddr)
  194. memcpy(iter.data, vaddr, obj->base.size);
  195. etnaviv_core_dump_header(&iter, ETDUMP_BUF_BO, iter.data +
  196. obj->base.size);
  197. }
  198. mutex_unlock(&gpu->mmu->lock);
  199. etnaviv_core_dump_header(&iter, ETDUMP_BUF_END, iter.data);
  200. dev_coredumpv(gpu->dev, iter.start, iter.data - iter.start, GFP_KERNEL);
  201. }