cpuidle44xx.c 7.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318
  1. /*
  2. * OMAP4+ CPU idle Routines
  3. *
  4. * Copyright (C) 2011-2013 Texas Instruments, Inc.
  5. * Santosh Shilimkar <santosh.shilimkar@ti.com>
  6. * Rajendra Nayak <rnayak@ti.com>
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License version 2 as
  10. * published by the Free Software Foundation.
  11. */
  12. #include <linux/sched.h>
  13. #include <linux/cpuidle.h>
  14. #include <linux/cpu_pm.h>
  15. #include <linux/export.h>
  16. #include <linux/tick.h>
  17. #include <asm/cpuidle.h>
  18. #include "common.h"
  19. #include "pm.h"
  20. #include "prm.h"
  21. #include "soc.h"
  22. #include "clockdomain.h"
  23. #define MAX_CPUS 2
  24. /* Machine specific information */
  25. struct idle_statedata {
  26. u32 cpu_state;
  27. u32 mpu_logic_state;
  28. u32 mpu_state;
  29. u32 mpu_state_vote;
  30. };
  31. static struct idle_statedata omap4_idle_data[] = {
  32. {
  33. .cpu_state = PWRDM_POWER_ON,
  34. .mpu_state = PWRDM_POWER_ON,
  35. .mpu_logic_state = PWRDM_POWER_RET,
  36. },
  37. {
  38. .cpu_state = PWRDM_POWER_OFF,
  39. .mpu_state = PWRDM_POWER_RET,
  40. .mpu_logic_state = PWRDM_POWER_RET,
  41. },
  42. {
  43. .cpu_state = PWRDM_POWER_OFF,
  44. .mpu_state = PWRDM_POWER_RET,
  45. .mpu_logic_state = PWRDM_POWER_OFF,
  46. },
  47. };
  48. static struct idle_statedata omap5_idle_data[] = {
  49. {
  50. .cpu_state = PWRDM_POWER_ON,
  51. .mpu_state = PWRDM_POWER_ON,
  52. .mpu_logic_state = PWRDM_POWER_ON,
  53. },
  54. {
  55. .cpu_state = PWRDM_POWER_RET,
  56. .mpu_state = PWRDM_POWER_RET,
  57. .mpu_logic_state = PWRDM_POWER_RET,
  58. },
  59. };
  60. static struct powerdomain *mpu_pd, *cpu_pd[MAX_CPUS];
  61. static struct clockdomain *cpu_clkdm[MAX_CPUS];
  62. static atomic_t abort_barrier;
  63. static bool cpu_done[MAX_CPUS];
  64. static struct idle_statedata *state_ptr = &omap4_idle_data[0];
  65. static DEFINE_RAW_SPINLOCK(mpu_lock);
  66. /* Private functions */
  67. /**
  68. * omap_enter_idle_[simple/coupled] - OMAP4PLUS cpuidle entry functions
  69. * @dev: cpuidle device
  70. * @drv: cpuidle driver
  71. * @index: the index of state to be entered
  72. *
  73. * Called from the CPUidle framework to program the device to the
  74. * specified low power state selected by the governor.
  75. * Returns the amount of time spent in the low power state.
  76. */
  77. static int omap_enter_idle_simple(struct cpuidle_device *dev,
  78. struct cpuidle_driver *drv,
  79. int index)
  80. {
  81. omap_do_wfi();
  82. return index;
  83. }
  84. static int omap_enter_idle_smp(struct cpuidle_device *dev,
  85. struct cpuidle_driver *drv,
  86. int index)
  87. {
  88. struct idle_statedata *cx = state_ptr + index;
  89. unsigned long flag;
  90. raw_spin_lock_irqsave(&mpu_lock, flag);
  91. cx->mpu_state_vote++;
  92. if (cx->mpu_state_vote == num_online_cpus()) {
  93. pwrdm_set_logic_retst(mpu_pd, cx->mpu_logic_state);
  94. omap_set_pwrdm_state(mpu_pd, cx->mpu_state);
  95. }
  96. raw_spin_unlock_irqrestore(&mpu_lock, flag);
  97. omap4_enter_lowpower(dev->cpu, cx->cpu_state);
  98. raw_spin_lock_irqsave(&mpu_lock, flag);
  99. if (cx->mpu_state_vote == num_online_cpus())
  100. omap_set_pwrdm_state(mpu_pd, PWRDM_POWER_ON);
  101. cx->mpu_state_vote--;
  102. raw_spin_unlock_irqrestore(&mpu_lock, flag);
  103. return index;
  104. }
  105. static int omap_enter_idle_coupled(struct cpuidle_device *dev,
  106. struct cpuidle_driver *drv,
  107. int index)
  108. {
  109. struct idle_statedata *cx = state_ptr + index;
  110. u32 mpuss_can_lose_context = 0;
  111. /*
  112. * CPU0 has to wait and stay ON until CPU1 is OFF state.
  113. * This is necessary to honour hardware recommondation
  114. * of triggeing all the possible low power modes once CPU1 is
  115. * out of coherency and in OFF mode.
  116. */
  117. if (dev->cpu == 0 && cpumask_test_cpu(1, cpu_online_mask)) {
  118. while (pwrdm_read_pwrst(cpu_pd[1]) != PWRDM_POWER_OFF) {
  119. cpu_relax();
  120. /*
  121. * CPU1 could have already entered & exited idle
  122. * without hitting off because of a wakeup
  123. * or a failed attempt to hit off mode. Check for
  124. * that here, otherwise we could spin forever
  125. * waiting for CPU1 off.
  126. */
  127. if (cpu_done[1])
  128. goto fail;
  129. }
  130. }
  131. mpuss_can_lose_context = (cx->mpu_state == PWRDM_POWER_RET) &&
  132. (cx->mpu_logic_state == PWRDM_POWER_OFF);
  133. /* Enter broadcast mode for periodic timers */
  134. tick_broadcast_enable();
  135. /* Enter broadcast mode for one-shot timers */
  136. tick_broadcast_enter();
  137. /*
  138. * Call idle CPU PM enter notifier chain so that
  139. * VFP and per CPU interrupt context is saved.
  140. */
  141. cpu_pm_enter();
  142. if (dev->cpu == 0) {
  143. pwrdm_set_logic_retst(mpu_pd, cx->mpu_logic_state);
  144. omap_set_pwrdm_state(mpu_pd, cx->mpu_state);
  145. /*
  146. * Call idle CPU cluster PM enter notifier chain
  147. * to save GIC and wakeupgen context.
  148. */
  149. if (mpuss_can_lose_context)
  150. cpu_cluster_pm_enter();
  151. }
  152. omap4_enter_lowpower(dev->cpu, cx->cpu_state);
  153. cpu_done[dev->cpu] = true;
  154. /* Wakeup CPU1 only if it is not offlined */
  155. if (dev->cpu == 0 && cpumask_test_cpu(1, cpu_online_mask)) {
  156. if (IS_PM44XX_ERRATUM(PM_OMAP4_ROM_SMP_BOOT_ERRATUM_GICD) &&
  157. mpuss_can_lose_context)
  158. gic_dist_disable();
  159. clkdm_deny_idle(cpu_clkdm[1]);
  160. omap_set_pwrdm_state(cpu_pd[1], PWRDM_POWER_ON);
  161. clkdm_allow_idle(cpu_clkdm[1]);
  162. if (IS_PM44XX_ERRATUM(PM_OMAP4_ROM_SMP_BOOT_ERRATUM_GICD) &&
  163. mpuss_can_lose_context) {
  164. while (gic_dist_disabled()) {
  165. udelay(1);
  166. cpu_relax();
  167. }
  168. gic_timer_retrigger();
  169. }
  170. }
  171. /*
  172. * Call idle CPU PM exit notifier chain to restore
  173. * VFP and per CPU IRQ context.
  174. */
  175. cpu_pm_exit();
  176. /*
  177. * Call idle CPU cluster PM exit notifier chain
  178. * to restore GIC and wakeupgen context.
  179. */
  180. if (dev->cpu == 0 && mpuss_can_lose_context)
  181. cpu_cluster_pm_exit();
  182. tick_broadcast_exit();
  183. fail:
  184. cpuidle_coupled_parallel_barrier(dev, &abort_barrier);
  185. cpu_done[dev->cpu] = false;
  186. return index;
  187. }
  188. static struct cpuidle_driver omap4_idle_driver = {
  189. .name = "omap4_idle",
  190. .owner = THIS_MODULE,
  191. .states = {
  192. {
  193. /* C1 - CPU0 ON + CPU1 ON + MPU ON */
  194. .exit_latency = 2 + 2,
  195. .target_residency = 5,
  196. .enter = omap_enter_idle_simple,
  197. .name = "C1",
  198. .desc = "CPUx ON, MPUSS ON"
  199. },
  200. {
  201. /* C2 - CPU0 OFF + CPU1 OFF + MPU CSWR */
  202. .exit_latency = 328 + 440,
  203. .target_residency = 960,
  204. .flags = CPUIDLE_FLAG_COUPLED,
  205. .enter = omap_enter_idle_coupled,
  206. .name = "C2",
  207. .desc = "CPUx OFF, MPUSS CSWR",
  208. },
  209. {
  210. /* C3 - CPU0 OFF + CPU1 OFF + MPU OSWR */
  211. .exit_latency = 460 + 518,
  212. .target_residency = 1100,
  213. .flags = CPUIDLE_FLAG_COUPLED,
  214. .enter = omap_enter_idle_coupled,
  215. .name = "C3",
  216. .desc = "CPUx OFF, MPUSS OSWR",
  217. },
  218. },
  219. .state_count = ARRAY_SIZE(omap4_idle_data),
  220. .safe_state_index = 0,
  221. };
  222. static struct cpuidle_driver omap5_idle_driver = {
  223. .name = "omap5_idle",
  224. .owner = THIS_MODULE,
  225. .states = {
  226. {
  227. /* C1 - CPU0 ON + CPU1 ON + MPU ON */
  228. .exit_latency = 2 + 2,
  229. .target_residency = 5,
  230. .enter = omap_enter_idle_simple,
  231. .name = "C1",
  232. .desc = "CPUx WFI, MPUSS ON"
  233. },
  234. {
  235. /* C2 - CPU0 RET + CPU1 RET + MPU CSWR */
  236. .exit_latency = 48 + 60,
  237. .target_residency = 100,
  238. .flags = CPUIDLE_FLAG_TIMER_STOP,
  239. .enter = omap_enter_idle_smp,
  240. .name = "C2",
  241. .desc = "CPUx CSWR, MPUSS CSWR",
  242. },
  243. },
  244. .state_count = ARRAY_SIZE(omap5_idle_data),
  245. .safe_state_index = 0,
  246. };
  247. /* Public functions */
  248. /**
  249. * omap4_idle_init - Init routine for OMAP4+ idle
  250. *
  251. * Registers the OMAP4+ specific cpuidle driver to the cpuidle
  252. * framework with the valid set of states.
  253. */
  254. int __init omap4_idle_init(void)
  255. {
  256. struct cpuidle_driver *idle_driver;
  257. if (soc_is_omap54xx()) {
  258. state_ptr = &omap5_idle_data[0];
  259. idle_driver = &omap5_idle_driver;
  260. } else {
  261. state_ptr = &omap4_idle_data[0];
  262. idle_driver = &omap4_idle_driver;
  263. }
  264. mpu_pd = pwrdm_lookup("mpu_pwrdm");
  265. cpu_pd[0] = pwrdm_lookup("cpu0_pwrdm");
  266. cpu_pd[1] = pwrdm_lookup("cpu1_pwrdm");
  267. if ((!mpu_pd) || (!cpu_pd[0]) || (!cpu_pd[1]))
  268. return -ENODEV;
  269. cpu_clkdm[0] = clkdm_lookup("mpu0_clkdm");
  270. cpu_clkdm[1] = clkdm_lookup("mpu1_clkdm");
  271. if (!cpu_clkdm[0] || !cpu_clkdm[1])
  272. return -ENODEV;
  273. return cpuidle_register(idle_driver, cpu_online_mask);
  274. }