cache-v4wt.S 4.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204
  1. /* SPDX-License-Identifier: GPL-2.0-only */
  2. /*
  3. * linux/arch/arm/mm/cache-v4wt.S
  4. *
  5. * Copyright (C) 1997-2002 Russell king
  6. *
  7. * ARMv4 write through cache operations support.
  8. *
  9. * We assume that the write buffer is not enabled.
  10. */
  11. #include <linux/linkage.h>
  12. #include <linux/init.h>
  13. #include <linux/cfi_types.h>
  14. #include <asm/assembler.h>
  15. #include <asm/page.h>
  16. #include "proc-macros.S"
  17. /*
  18. * The size of one data cache line.
  19. */
  20. #define CACHE_DLINESIZE 32
  21. /*
  22. * The number of data cache segments.
  23. */
  24. #define CACHE_DSEGMENTS 8
  25. /*
  26. * The number of lines in a cache segment.
  27. */
  28. #define CACHE_DENTRIES 64
  29. /*
  30. * This is the size at which it becomes more efficient to
  31. * clean the whole cache, rather than using the individual
  32. * cache line maintenance instructions.
  33. *
  34. * *** This needs benchmarking
  35. */
  36. #define CACHE_DLIMIT 16384
  37. /*
  38. * flush_icache_all()
  39. *
  40. * Unconditionally clean and invalidate the entire icache.
  41. */
  42. SYM_TYPED_FUNC_START(v4wt_flush_icache_all)
  43. mov r0, #0
  44. mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
  45. ret lr
  46. SYM_FUNC_END(v4wt_flush_icache_all)
  47. /*
  48. * flush_user_cache_all()
  49. *
  50. * Invalidate all cache entries in a particular address
  51. * space.
  52. */
  53. SYM_FUNC_ALIAS(v4wt_flush_user_cache_all, v4wt_flush_kern_cache_all)
  54. /*
  55. * flush_kern_cache_all()
  56. *
  57. * Clean and invalidate the entire cache.
  58. */
  59. SYM_TYPED_FUNC_START(v4wt_flush_kern_cache_all)
  60. mov r2, #VM_EXEC
  61. mov ip, #0
  62. __flush_whole_cache:
  63. tst r2, #VM_EXEC
  64. mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
  65. mcr p15, 0, ip, c7, c6, 0 @ invalidate D cache
  66. ret lr
  67. SYM_FUNC_END(v4wt_flush_kern_cache_all)
  68. /*
  69. * flush_user_cache_range(start, end, flags)
  70. *
  71. * Clean and invalidate a range of cache entries in the specified
  72. * address space.
  73. *
  74. * - start - start address (inclusive, page aligned)
  75. * - end - end address (exclusive, page aligned)
  76. * - flags - vma_area_struct flags describing address space
  77. */
  78. SYM_TYPED_FUNC_START(v4wt_flush_user_cache_range)
  79. sub r3, r1, r0 @ calculate total size
  80. cmp r3, #CACHE_DLIMIT
  81. bhs __flush_whole_cache
  82. 1: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
  83. tst r2, #VM_EXEC
  84. mcrne p15, 0, r0, c7, c5, 1 @ invalidate I entry
  85. add r0, r0, #CACHE_DLINESIZE
  86. cmp r0, r1
  87. blo 1b
  88. ret lr
  89. SYM_FUNC_END(v4wt_flush_user_cache_range)
  90. /*
  91. * coherent_kern_range(start, end)
  92. *
  93. * Ensure coherency between the Icache and the Dcache in the
  94. * region described by start. If you have non-snooping
  95. * Harvard caches, you need to implement this function.
  96. *
  97. * - start - virtual start address
  98. * - end - virtual end address
  99. */
  100. SYM_TYPED_FUNC_START(v4wt_coherent_kern_range)
  101. #ifdef CONFIG_CFI_CLANG /* Fallthrough if !CFI */
  102. b v4wt_coherent_user_range
  103. #endif
  104. SYM_FUNC_END(v4wt_coherent_kern_range)
  105. /*
  106. * coherent_user_range(start, end)
  107. *
  108. * Ensure coherency between the Icache and the Dcache in the
  109. * region described by start. If you have non-snooping
  110. * Harvard caches, you need to implement this function.
  111. *
  112. * - start - virtual start address
  113. * - end - virtual end address
  114. */
  115. SYM_TYPED_FUNC_START(v4wt_coherent_user_range)
  116. bic r0, r0, #CACHE_DLINESIZE - 1
  117. 1: mcr p15, 0, r0, c7, c5, 1 @ invalidate I entry
  118. add r0, r0, #CACHE_DLINESIZE
  119. cmp r0, r1
  120. blo 1b
  121. mov r0, #0
  122. ret lr
  123. SYM_FUNC_END(v4wt_coherent_user_range)
  124. /*
  125. * flush_kern_dcache_area(void *addr, size_t size)
  126. *
  127. * Ensure no D cache aliasing occurs, either with itself or
  128. * the I cache
  129. *
  130. * - addr - kernel address
  131. * - size - region size
  132. */
  133. SYM_TYPED_FUNC_START(v4wt_flush_kern_dcache_area)
  134. mov r2, #0
  135. mcr p15, 0, r2, c7, c5, 0 @ invalidate I cache
  136. add r1, r0, r1
  137. b v4wt_dma_inv_range
  138. SYM_FUNC_END(v4wt_flush_kern_dcache_area)
  139. /*
  140. * dma_inv_range(start, end)
  141. *
  142. * Invalidate (discard) the specified virtual address range.
  143. * May not write back any entries. If 'start' or 'end'
  144. * are not cache line aligned, those lines must be written
  145. * back.
  146. *
  147. * - start - virtual start address
  148. * - end - virtual end address
  149. */
  150. v4wt_dma_inv_range:
  151. bic r0, r0, #CACHE_DLINESIZE - 1
  152. 1: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
  153. add r0, r0, #CACHE_DLINESIZE
  154. cmp r0, r1
  155. blo 1b
  156. ret lr
  157. /*
  158. * dma_flush_range(start, end)
  159. *
  160. * Clean and invalidate the specified virtual address range.
  161. *
  162. * - start - virtual start address
  163. * - end - virtual end address
  164. */
  165. SYM_TYPED_FUNC_START(v4wt_dma_flush_range)
  166. b v4wt_dma_inv_range
  167. SYM_FUNC_END(v4wt_dma_flush_range)
  168. /*
  169. * dma_unmap_area(start, size, dir)
  170. * - start - kernel virtual start address
  171. * - size - size of region
  172. * - dir - DMA direction
  173. */
  174. SYM_TYPED_FUNC_START(v4wt_dma_unmap_area)
  175. add r1, r1, r0
  176. teq r2, #DMA_TO_DEVICE
  177. bne v4wt_dma_inv_range
  178. ret lr
  179. SYM_FUNC_END(v4wt_dma_unmap_area)
  180. /*
  181. * dma_map_area(start, size, dir)
  182. * - start - kernel virtual start address
  183. * - size - size of region
  184. * - dir - DMA direction
  185. */
  186. SYM_TYPED_FUNC_START(v4wt_dma_map_area)
  187. ret lr
  188. SYM_FUNC_END(v4wt_dma_map_area)