coherency_ll.S 4.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159
  1. /* SPDX-License-Identifier: GPL-2.0-only */
  2. /*
  3. * Coherency fabric: low level functions
  4. *
  5. * Copyright (C) 2012 Marvell
  6. *
  7. * Gregory CLEMENT <gregory.clement@free-electrons.com>
  8. *
  9. * This file implements the assembly function to add a CPU to the
  10. * coherency fabric. This function is called by each of the secondary
  11. * CPUs during their early boot in an SMP kernel, this why this
  12. * function have to callable from assembly. It can also be called by a
  13. * primary CPU from C code during its boot.
  14. */
  15. #include <linux/linkage.h>
  16. #define ARMADA_XP_CFB_CTL_REG_OFFSET 0x0
  17. #define ARMADA_XP_CFB_CFG_REG_OFFSET 0x4
  18. #include <asm/assembler.h>
  19. #include <asm/cp15.h>
  20. .arch armv7-a
  21. .text
  22. /*
  23. * Returns the coherency base address in r1 (r0 is untouched), or 0 if
  24. * the coherency fabric is not enabled.
  25. */
  26. ENTRY(ll_get_coherency_base)
  27. mrc p15, 0, r1, c1, c0, 0
  28. tst r1, #CR_M @ Check MMU bit enabled
  29. bne 1f
  30. /*
  31. * MMU is disabled, use the physical address of the coherency
  32. * base address, (or 0x0 if the coherency fabric is not mapped)
  33. */
  34. adr r1, 3f
  35. ldr r3, [r1]
  36. ldr r1, [r1, r3]
  37. b 2f
  38. 1:
  39. /*
  40. * MMU is enabled, use the virtual address of the coherency
  41. * base address.
  42. */
  43. ldr r1, =coherency_base
  44. ldr r1, [r1]
  45. 2:
  46. ret lr
  47. ENDPROC(ll_get_coherency_base)
  48. /*
  49. * Returns the coherency CPU mask in r3 (r0 is untouched). This
  50. * coherency CPU mask can be used with the coherency fabric
  51. * configuration and control registers. Note that the mask is already
  52. * endian-swapped as appropriate so that the calling functions do not
  53. * have to care about endianness issues while accessing the coherency
  54. * fabric registers
  55. */
  56. ENTRY(ll_get_coherency_cpumask)
  57. mrc p15, 0, r3, cr0, cr0, 5
  58. and r3, r3, #15
  59. mov r2, #(1 << 24)
  60. lsl r3, r2, r3
  61. ARM_BE8(rev r3, r3)
  62. ret lr
  63. ENDPROC(ll_get_coherency_cpumask)
  64. /*
  65. * ll_add_cpu_to_smp_group(), ll_enable_coherency() and
  66. * ll_disable_coherency() use the strex/ldrex instructions while the
  67. * MMU can be disabled. The Armada XP SoC has an exclusive monitor
  68. * that tracks transactions to Device and/or SO memory and thanks to
  69. * that, exclusive transactions are functional even when the MMU is
  70. * disabled.
  71. */
  72. ENTRY(ll_add_cpu_to_smp_group)
  73. /*
  74. * As r0 is not modified by ll_get_coherency_base() and
  75. * ll_get_coherency_cpumask(), we use it to temporarly save lr
  76. * and avoid it being modified by the branch and link
  77. * calls. This function is used very early in the secondary
  78. * CPU boot, and no stack is available at this point.
  79. */
  80. mov r0, lr
  81. bl ll_get_coherency_base
  82. /* Bail out if the coherency is not enabled */
  83. cmp r1, #0
  84. reteq r0
  85. bl ll_get_coherency_cpumask
  86. mov lr, r0
  87. add r0, r1, #ARMADA_XP_CFB_CFG_REG_OFFSET
  88. 1:
  89. ldrex r2, [r0]
  90. orr r2, r2, r3
  91. strex r1, r2, [r0]
  92. cmp r1, #0
  93. bne 1b
  94. ret lr
  95. ENDPROC(ll_add_cpu_to_smp_group)
  96. ENTRY(ll_enable_coherency)
  97. /*
  98. * As r0 is not modified by ll_get_coherency_base() and
  99. * ll_get_coherency_cpumask(), we use it to temporarly save lr
  100. * and avoid it being modified by the branch and link
  101. * calls. This function is used very early in the secondary
  102. * CPU boot, and no stack is available at this point.
  103. */
  104. mov r0, lr
  105. bl ll_get_coherency_base
  106. /* Bail out if the coherency is not enabled */
  107. cmp r1, #0
  108. reteq r0
  109. bl ll_get_coherency_cpumask
  110. mov lr, r0
  111. add r0, r1, #ARMADA_XP_CFB_CTL_REG_OFFSET
  112. 1:
  113. ldrex r2, [r0]
  114. orr r2, r2, r3
  115. strex r1, r2, [r0]
  116. cmp r1, #0
  117. bne 1b
  118. dsb
  119. mov r0, #0
  120. ret lr
  121. ENDPROC(ll_enable_coherency)
  122. ENTRY(ll_disable_coherency)
  123. /*
  124. * As r0 is not modified by ll_get_coherency_base() and
  125. * ll_get_coherency_cpumask(), we use it to temporarly save lr
  126. * and avoid it being modified by the branch and link
  127. * calls. This function is used very early in the secondary
  128. * CPU boot, and no stack is available at this point.
  129. */
  130. mov r0, lr
  131. bl ll_get_coherency_base
  132. /* Bail out if the coherency is not enabled */
  133. cmp r1, #0
  134. reteq r0
  135. bl ll_get_coherency_cpumask
  136. mov lr, r0
  137. add r0, r1, #ARMADA_XP_CFB_CTL_REG_OFFSET
  138. 1:
  139. ldrex r2, [r0]
  140. bic r2, r2, r3
  141. strex r1, r2, [r0]
  142. cmp r1, #0
  143. bne 1b
  144. dsb
  145. ret lr
  146. ENDPROC(ll_disable_coherency)
  147. .align 2
  148. 3:
  149. .long coherency_phys_base - .