sleep33xx.S 5.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. /*
  3. * Low level suspend code for AM33XX SoCs
  4. *
  5. * Copyright (C) 2012-2018 Texas Instruments Incorporated - http://www.ti.com/
  6. * Dave Gerlach, Vaibhav Bedia
  7. */
  8. #include <generated/ti-pm-asm-offsets.h>
  9. #include <linux/linkage.h>
  10. #include <linux/platform_data/pm33xx.h>
  11. #include <linux/ti-emif-sram.h>
  12. #include <asm/assembler.h>
  13. #include <asm/memory.h>
  14. #include "iomap.h"
  15. #include "cm33xx.h"
  16. #define AM33XX_CM_CLKCTRL_MODULESTATE_DISABLED 0x00030000
  17. #define AM33XX_CM_CLKCTRL_MODULEMODE_DISABLE 0x0003
  18. #define AM33XX_CM_CLKCTRL_MODULEMODE_ENABLE 0x0002
  19. /* replicated define because linux/bitops.h cannot be included in assembly */
  20. #define BIT(nr) (1 << (nr))
  21. .arm
  22. .align 3
  23. ENTRY(am33xx_do_wfi)
  24. stmfd sp!, {r4 - r11, lr} @ save registers on stack
  25. /* Save wfi_flags arg to data space */
  26. mov r4, r0
  27. adr r3, am33xx_pm_ro_sram_data
  28. ldr r2, [r3, #AMX3_PM_RO_SRAM_DATA_VIRT_OFFSET]
  29. str r4, [r2, #AMX3_PM_WFI_FLAGS_OFFSET]
  30. /* Only flush cache is we know we are losing MPU context */
  31. tst r4, #WFI_FLAG_FLUSH_CACHE
  32. beq cache_skip_flush
  33. /*
  34. * Flush all data from the L1 and L2 data cache before disabling
  35. * SCTLR.C bit.
  36. */
  37. ldr r1, kernel_flush
  38. blx r1
  39. /*
  40. * Clear the SCTLR.C bit to prevent further data cache
  41. * allocation. Clearing SCTLR.C would make all the data accesses
  42. * strongly ordered and would not hit the cache.
  43. */
  44. mrc p15, 0, r0, c1, c0, 0
  45. bic r0, r0, #(1 << 2) @ Disable the C bit
  46. mcr p15, 0, r0, c1, c0, 0
  47. isb
  48. /*
  49. * Invalidate L1 and L2 data cache.
  50. */
  51. ldr r1, kernel_flush
  52. blx r1
  53. adr r3, am33xx_pm_ro_sram_data
  54. ldr r2, [r3, #AMX3_PM_RO_SRAM_DATA_VIRT_OFFSET]
  55. ldr r4, [r2, #AMX3_PM_WFI_FLAGS_OFFSET]
  56. cache_skip_flush:
  57. /* Check if we want self refresh */
  58. tst r4, #WFI_FLAG_SELF_REFRESH
  59. beq emif_skip_enter_sr
  60. adr r9, am33xx_emif_sram_table
  61. ldr r3, [r9, #EMIF_PM_ENTER_SR_OFFSET]
  62. blx r3
  63. emif_skip_enter_sr:
  64. /* Only necessary if PER is losing context */
  65. tst r4, #WFI_FLAG_SAVE_EMIF
  66. beq emif_skip_save
  67. ldr r3, [r9, #EMIF_PM_SAVE_CONTEXT_OFFSET]
  68. blx r3
  69. emif_skip_save:
  70. /* Only can disable EMIF if we have entered self refresh */
  71. tst r4, #WFI_FLAG_SELF_REFRESH
  72. beq emif_skip_disable
  73. /* Disable EMIF */
  74. ldr r1, virt_emif_clkctrl
  75. ldr r2, [r1]
  76. bic r2, r2, #AM33XX_CM_CLKCTRL_MODULEMODE_DISABLE
  77. str r2, [r1]
  78. ldr r1, virt_emif_clkctrl
  79. wait_emif_disable:
  80. ldr r2, [r1]
  81. mov r3, #AM33XX_CM_CLKCTRL_MODULESTATE_DISABLED
  82. cmp r2, r3
  83. bne wait_emif_disable
  84. emif_skip_disable:
  85. tst r4, #WFI_FLAG_WAKE_M3
  86. beq wkup_m3_skip
  87. /*
  88. * For the MPU WFI to be registered as an interrupt
  89. * to WKUP_M3, MPU_CLKCTRL.MODULEMODE needs to be set
  90. * to DISABLED
  91. */
  92. ldr r1, virt_mpu_clkctrl
  93. ldr r2, [r1]
  94. bic r2, r2, #AM33XX_CM_CLKCTRL_MODULEMODE_DISABLE
  95. str r2, [r1]
  96. wkup_m3_skip:
  97. /*
  98. * Execute an ISB instruction to ensure that all of the
  99. * CP15 register changes have been committed.
  100. */
  101. isb
  102. /*
  103. * Execute a barrier instruction to ensure that all cache,
  104. * TLB and branch predictor maintenance operations issued
  105. * have completed.
  106. */
  107. dsb
  108. dmb
  109. /*
  110. * Execute a WFI instruction and wait until the
  111. * STANDBYWFI output is asserted to indicate that the
  112. * CPU is in idle and low power state. CPU can specualatively
  113. * prefetch the instructions so add NOPs after WFI. Thirteen
  114. * NOPs as per Cortex-A8 pipeline.
  115. */
  116. wfi
  117. nop
  118. nop
  119. nop
  120. nop
  121. nop
  122. nop
  123. nop
  124. nop
  125. nop
  126. nop
  127. nop
  128. nop
  129. nop
  130. /* We come here in case of an abort due to a late interrupt */
  131. /* Set MPU_CLKCTRL.MODULEMODE back to ENABLE */
  132. ldr r1, virt_mpu_clkctrl
  133. mov r2, #AM33XX_CM_CLKCTRL_MODULEMODE_ENABLE
  134. str r2, [r1]
  135. /* Re-enable EMIF */
  136. ldr r1, virt_emif_clkctrl
  137. mov r2, #AM33XX_CM_CLKCTRL_MODULEMODE_ENABLE
  138. str r2, [r1]
  139. wait_emif_enable:
  140. ldr r3, [r1]
  141. cmp r2, r3
  142. bne wait_emif_enable
  143. /* Only necessary if PER is losing context */
  144. tst r4, #WFI_FLAG_SELF_REFRESH
  145. beq emif_skip_exit_sr_abt
  146. adr r9, am33xx_emif_sram_table
  147. ldr r1, [r9, #EMIF_PM_ABORT_SR_OFFSET]
  148. blx r1
  149. emif_skip_exit_sr_abt:
  150. tst r4, #WFI_FLAG_FLUSH_CACHE
  151. beq cache_skip_restore
  152. /*
  153. * Set SCTLR.C bit to allow data cache allocation
  154. */
  155. mrc p15, 0, r0, c1, c0, 0
  156. orr r0, r0, #(1 << 2) @ Enable the C bit
  157. mcr p15, 0, r0, c1, c0, 0
  158. isb
  159. cache_skip_restore:
  160. /* Let the suspend code know about the abort */
  161. mov r0, #1
  162. ldmfd sp!, {r4 - r11, pc} @ restore regs and return
  163. ENDPROC(am33xx_do_wfi)
  164. .align
  165. ENTRY(am33xx_resume_offset)
  166. .word . - am33xx_do_wfi
  167. ENTRY(am33xx_resume_from_deep_sleep)
  168. /* Re-enable EMIF */
  169. ldr r0, phys_emif_clkctrl
  170. mov r1, #AM33XX_CM_CLKCTRL_MODULEMODE_ENABLE
  171. str r1, [r0]
  172. wait_emif_enable1:
  173. ldr r2, [r0]
  174. cmp r1, r2
  175. bne wait_emif_enable1
  176. adr r9, am33xx_emif_sram_table
  177. ldr r1, [r9, #EMIF_PM_RESTORE_CONTEXT_OFFSET]
  178. blx r1
  179. ldr r1, [r9, #EMIF_PM_EXIT_SR_OFFSET]
  180. blx r1
  181. resume_to_ddr:
  182. /* We are back. Branch to the common CPU resume routine */
  183. mov r0, #0
  184. ldr pc, resume_addr
  185. ENDPROC(am33xx_resume_from_deep_sleep)
  186. /*
  187. * Local variables
  188. */
  189. .align
  190. kernel_flush:
  191. .word v7_flush_dcache_all
  192. virt_mpu_clkctrl:
  193. .word AM33XX_CM_MPU_MPU_CLKCTRL
  194. virt_emif_clkctrl:
  195. .word AM33XX_CM_PER_EMIF_CLKCTRL
  196. phys_emif_clkctrl:
  197. .word (AM33XX_CM_BASE + AM33XX_CM_PER_MOD + \
  198. AM33XX_CM_PER_EMIF_CLKCTRL_OFFSET)
  199. .align 3
  200. /* DDR related defines */
  201. am33xx_emif_sram_table:
  202. .space EMIF_PM_FUNCTIONS_SIZE
  203. ENTRY(am33xx_pm_sram)
  204. .word am33xx_do_wfi
  205. .word am33xx_do_wfi_sz
  206. .word am33xx_resume_offset
  207. .word am33xx_emif_sram_table
  208. .word am33xx_pm_ro_sram_data
  209. resume_addr:
  210. .word cpu_resume - PAGE_OFFSET + 0x80000000
  211. .align 3
  212. ENTRY(am33xx_pm_ro_sram_data)
  213. .space AMX3_PM_RO_SRAM_DATA_SIZE
  214. ENTRY(am33xx_do_wfi_sz)
  215. .word . - am33xx_do_wfi