mcpm_entry.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459
  1. /*
  2. * arch/arm/common/mcpm_entry.c -- entry point for multi-cluster PM
  3. *
  4. * Created by: Nicolas Pitre, March 2012
  5. * Copyright: (C) 2012-2013 Linaro Limited
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License version 2 as
  9. * published by the Free Software Foundation.
  10. */
  11. #include <linux/export.h>
  12. #include <linux/kernel.h>
  13. #include <linux/init.h>
  14. #include <linux/irqflags.h>
  15. #include <linux/cpu_pm.h>
  16. #include <asm/mcpm.h>
  17. #include <asm/cacheflush.h>
  18. #include <asm/idmap.h>
  19. #include <asm/cputype.h>
  20. #include <asm/suspend.h>
  21. /*
  22. * The public API for this code is documented in arch/arm/include/asm/mcpm.h.
  23. * For a comprehensive description of the main algorithm used here, please
  24. * see Documentation/arm/cluster-pm-race-avoidance.txt.
  25. */
  26. struct sync_struct mcpm_sync;
  27. /*
  28. * __mcpm_cpu_going_down: Indicates that the cpu is being torn down.
  29. * This must be called at the point of committing to teardown of a CPU.
  30. * The CPU cache (SCTRL.C bit) is expected to still be active.
  31. */
  32. static void __mcpm_cpu_going_down(unsigned int cpu, unsigned int cluster)
  33. {
  34. mcpm_sync.clusters[cluster].cpus[cpu].cpu = CPU_GOING_DOWN;
  35. sync_cache_w(&mcpm_sync.clusters[cluster].cpus[cpu].cpu);
  36. }
  37. /*
  38. * __mcpm_cpu_down: Indicates that cpu teardown is complete and that the
  39. * cluster can be torn down without disrupting this CPU.
  40. * To avoid deadlocks, this must be called before a CPU is powered down.
  41. * The CPU cache (SCTRL.C bit) is expected to be off.
  42. * However L2 cache might or might not be active.
  43. */
  44. static void __mcpm_cpu_down(unsigned int cpu, unsigned int cluster)
  45. {
  46. dmb();
  47. mcpm_sync.clusters[cluster].cpus[cpu].cpu = CPU_DOWN;
  48. sync_cache_w(&mcpm_sync.clusters[cluster].cpus[cpu].cpu);
  49. sev();
  50. }
  51. /*
  52. * __mcpm_outbound_leave_critical: Leave the cluster teardown critical section.
  53. * @state: the final state of the cluster:
  54. * CLUSTER_UP: no destructive teardown was done and the cluster has been
  55. * restored to the previous state (CPU cache still active); or
  56. * CLUSTER_DOWN: the cluster has been torn-down, ready for power-off
  57. * (CPU cache disabled, L2 cache either enabled or disabled).
  58. */
  59. static void __mcpm_outbound_leave_critical(unsigned int cluster, int state)
  60. {
  61. dmb();
  62. mcpm_sync.clusters[cluster].cluster = state;
  63. sync_cache_w(&mcpm_sync.clusters[cluster].cluster);
  64. sev();
  65. }
  66. /*
  67. * __mcpm_outbound_enter_critical: Enter the cluster teardown critical section.
  68. * This function should be called by the last man, after local CPU teardown
  69. * is complete. CPU cache expected to be active.
  70. *
  71. * Returns:
  72. * false: the critical section was not entered because an inbound CPU was
  73. * observed, or the cluster is already being set up;
  74. * true: the critical section was entered: it is now safe to tear down the
  75. * cluster.
  76. */
  77. static bool __mcpm_outbound_enter_critical(unsigned int cpu, unsigned int cluster)
  78. {
  79. unsigned int i;
  80. struct mcpm_sync_struct *c = &mcpm_sync.clusters[cluster];
  81. /* Warn inbound CPUs that the cluster is being torn down: */
  82. c->cluster = CLUSTER_GOING_DOWN;
  83. sync_cache_w(&c->cluster);
  84. /* Back out if the inbound cluster is already in the critical region: */
  85. sync_cache_r(&c->inbound);
  86. if (c->inbound == INBOUND_COMING_UP)
  87. goto abort;
  88. /*
  89. * Wait for all CPUs to get out of the GOING_DOWN state, so that local
  90. * teardown is complete on each CPU before tearing down the cluster.
  91. *
  92. * If any CPU has been woken up again from the DOWN state, then we
  93. * shouldn't be taking the cluster down at all: abort in that case.
  94. */
  95. sync_cache_r(&c->cpus);
  96. for (i = 0; i < MAX_CPUS_PER_CLUSTER; i++) {
  97. int cpustate;
  98. if (i == cpu)
  99. continue;
  100. while (1) {
  101. cpustate = c->cpus[i].cpu;
  102. if (cpustate != CPU_GOING_DOWN)
  103. break;
  104. wfe();
  105. sync_cache_r(&c->cpus[i].cpu);
  106. }
  107. switch (cpustate) {
  108. case CPU_DOWN:
  109. continue;
  110. default:
  111. goto abort;
  112. }
  113. }
  114. return true;
  115. abort:
  116. __mcpm_outbound_leave_critical(cluster, CLUSTER_UP);
  117. return false;
  118. }
  119. static int __mcpm_cluster_state(unsigned int cluster)
  120. {
  121. sync_cache_r(&mcpm_sync.clusters[cluster].cluster);
  122. return mcpm_sync.clusters[cluster].cluster;
  123. }
  124. extern unsigned long mcpm_entry_vectors[MAX_NR_CLUSTERS][MAX_CPUS_PER_CLUSTER];
  125. void mcpm_set_entry_vector(unsigned cpu, unsigned cluster, void *ptr)
  126. {
  127. unsigned long val = ptr ? __pa_symbol(ptr) : 0;
  128. mcpm_entry_vectors[cluster][cpu] = val;
  129. sync_cache_w(&mcpm_entry_vectors[cluster][cpu]);
  130. }
  131. extern unsigned long mcpm_entry_early_pokes[MAX_NR_CLUSTERS][MAX_CPUS_PER_CLUSTER][2];
  132. void mcpm_set_early_poke(unsigned cpu, unsigned cluster,
  133. unsigned long poke_phys_addr, unsigned long poke_val)
  134. {
  135. unsigned long *poke = &mcpm_entry_early_pokes[cluster][cpu][0];
  136. poke[0] = poke_phys_addr;
  137. poke[1] = poke_val;
  138. __sync_cache_range_w(poke, 2 * sizeof(*poke));
  139. }
  140. static const struct mcpm_platform_ops *platform_ops;
  141. int __init mcpm_platform_register(const struct mcpm_platform_ops *ops)
  142. {
  143. if (platform_ops)
  144. return -EBUSY;
  145. platform_ops = ops;
  146. return 0;
  147. }
  148. bool mcpm_is_available(void)
  149. {
  150. return (platform_ops) ? true : false;
  151. }
  152. EXPORT_SYMBOL_GPL(mcpm_is_available);
  153. /*
  154. * We can't use regular spinlocks. In the switcher case, it is possible
  155. * for an outbound CPU to call power_down() after its inbound counterpart
  156. * is already live using the same logical CPU number which trips lockdep
  157. * debugging.
  158. */
  159. static arch_spinlock_t mcpm_lock = __ARCH_SPIN_LOCK_UNLOCKED;
  160. static int mcpm_cpu_use_count[MAX_NR_CLUSTERS][MAX_CPUS_PER_CLUSTER];
  161. static inline bool mcpm_cluster_unused(unsigned int cluster)
  162. {
  163. int i, cnt;
  164. for (i = 0, cnt = 0; i < MAX_CPUS_PER_CLUSTER; i++)
  165. cnt |= mcpm_cpu_use_count[cluster][i];
  166. return !cnt;
  167. }
  168. int mcpm_cpu_power_up(unsigned int cpu, unsigned int cluster)
  169. {
  170. bool cpu_is_down, cluster_is_down;
  171. int ret = 0;
  172. pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
  173. if (!platform_ops)
  174. return -EUNATCH; /* try not to shadow power_up errors */
  175. might_sleep();
  176. /*
  177. * Since this is called with IRQs enabled, and no arch_spin_lock_irq
  178. * variant exists, we need to disable IRQs manually here.
  179. */
  180. local_irq_disable();
  181. arch_spin_lock(&mcpm_lock);
  182. cpu_is_down = !mcpm_cpu_use_count[cluster][cpu];
  183. cluster_is_down = mcpm_cluster_unused(cluster);
  184. mcpm_cpu_use_count[cluster][cpu]++;
  185. /*
  186. * The only possible values are:
  187. * 0 = CPU down
  188. * 1 = CPU (still) up
  189. * 2 = CPU requested to be up before it had a chance
  190. * to actually make itself down.
  191. * Any other value is a bug.
  192. */
  193. BUG_ON(mcpm_cpu_use_count[cluster][cpu] != 1 &&
  194. mcpm_cpu_use_count[cluster][cpu] != 2);
  195. if (cluster_is_down)
  196. ret = platform_ops->cluster_powerup(cluster);
  197. if (cpu_is_down && !ret)
  198. ret = platform_ops->cpu_powerup(cpu, cluster);
  199. arch_spin_unlock(&mcpm_lock);
  200. local_irq_enable();
  201. return ret;
  202. }
  203. typedef typeof(cpu_reset) phys_reset_t;
  204. void mcpm_cpu_power_down(void)
  205. {
  206. unsigned int mpidr, cpu, cluster;
  207. bool cpu_going_down, last_man;
  208. phys_reset_t phys_reset;
  209. mpidr = read_cpuid_mpidr();
  210. cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
  211. cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
  212. pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
  213. if (WARN_ON_ONCE(!platform_ops))
  214. return;
  215. BUG_ON(!irqs_disabled());
  216. setup_mm_for_reboot();
  217. __mcpm_cpu_going_down(cpu, cluster);
  218. arch_spin_lock(&mcpm_lock);
  219. BUG_ON(__mcpm_cluster_state(cluster) != CLUSTER_UP);
  220. mcpm_cpu_use_count[cluster][cpu]--;
  221. BUG_ON(mcpm_cpu_use_count[cluster][cpu] != 0 &&
  222. mcpm_cpu_use_count[cluster][cpu] != 1);
  223. cpu_going_down = !mcpm_cpu_use_count[cluster][cpu];
  224. last_man = mcpm_cluster_unused(cluster);
  225. if (last_man && __mcpm_outbound_enter_critical(cpu, cluster)) {
  226. platform_ops->cpu_powerdown_prepare(cpu, cluster);
  227. platform_ops->cluster_powerdown_prepare(cluster);
  228. arch_spin_unlock(&mcpm_lock);
  229. platform_ops->cluster_cache_disable();
  230. __mcpm_outbound_leave_critical(cluster, CLUSTER_DOWN);
  231. } else {
  232. if (cpu_going_down)
  233. platform_ops->cpu_powerdown_prepare(cpu, cluster);
  234. arch_spin_unlock(&mcpm_lock);
  235. /*
  236. * If cpu_going_down is false here, that means a power_up
  237. * request raced ahead of us. Even if we do not want to
  238. * shut this CPU down, the caller still expects execution
  239. * to return through the system resume entry path, like
  240. * when the WFI is aborted due to a new IRQ or the like..
  241. * So let's continue with cache cleaning in all cases.
  242. */
  243. platform_ops->cpu_cache_disable();
  244. }
  245. __mcpm_cpu_down(cpu, cluster);
  246. /* Now we are prepared for power-down, do it: */
  247. if (cpu_going_down)
  248. wfi();
  249. /*
  250. * It is possible for a power_up request to happen concurrently
  251. * with a power_down request for the same CPU. In this case the
  252. * CPU might not be able to actually enter a powered down state
  253. * with the WFI instruction if the power_up request has removed
  254. * the required reset condition. We must perform a re-entry in
  255. * the kernel as if the power_up method just had deasserted reset
  256. * on the CPU.
  257. */
  258. phys_reset = (phys_reset_t)(unsigned long)__pa_symbol(cpu_reset);
  259. phys_reset(__pa_symbol(mcpm_entry_point), false);
  260. /* should never get here */
  261. BUG();
  262. }
  263. int mcpm_wait_for_cpu_powerdown(unsigned int cpu, unsigned int cluster)
  264. {
  265. int ret;
  266. if (WARN_ON_ONCE(!platform_ops || !platform_ops->wait_for_powerdown))
  267. return -EUNATCH;
  268. ret = platform_ops->wait_for_powerdown(cpu, cluster);
  269. if (ret)
  270. pr_warn("%s: cpu %u, cluster %u failed to power down (%d)\n",
  271. __func__, cpu, cluster, ret);
  272. return ret;
  273. }
  274. void mcpm_cpu_suspend(void)
  275. {
  276. if (WARN_ON_ONCE(!platform_ops))
  277. return;
  278. /* Some platforms might have to enable special resume modes, etc. */
  279. if (platform_ops->cpu_suspend_prepare) {
  280. unsigned int mpidr = read_cpuid_mpidr();
  281. unsigned int cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
  282. unsigned int cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
  283. arch_spin_lock(&mcpm_lock);
  284. platform_ops->cpu_suspend_prepare(cpu, cluster);
  285. arch_spin_unlock(&mcpm_lock);
  286. }
  287. mcpm_cpu_power_down();
  288. }
  289. int mcpm_cpu_powered_up(void)
  290. {
  291. unsigned int mpidr, cpu, cluster;
  292. bool cpu_was_down, first_man;
  293. unsigned long flags;
  294. if (!platform_ops)
  295. return -EUNATCH;
  296. mpidr = read_cpuid_mpidr();
  297. cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
  298. cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
  299. local_irq_save(flags);
  300. arch_spin_lock(&mcpm_lock);
  301. cpu_was_down = !mcpm_cpu_use_count[cluster][cpu];
  302. first_man = mcpm_cluster_unused(cluster);
  303. if (first_man && platform_ops->cluster_is_up)
  304. platform_ops->cluster_is_up(cluster);
  305. if (cpu_was_down)
  306. mcpm_cpu_use_count[cluster][cpu] = 1;
  307. if (platform_ops->cpu_is_up)
  308. platform_ops->cpu_is_up(cpu, cluster);
  309. arch_spin_unlock(&mcpm_lock);
  310. local_irq_restore(flags);
  311. return 0;
  312. }
  313. #ifdef CONFIG_ARM_CPU_SUSPEND
  314. static int __init nocache_trampoline(unsigned long _arg)
  315. {
  316. void (*cache_disable)(void) = (void *)_arg;
  317. unsigned int mpidr = read_cpuid_mpidr();
  318. unsigned int cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
  319. unsigned int cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
  320. phys_reset_t phys_reset;
  321. mcpm_set_entry_vector(cpu, cluster, cpu_resume_no_hyp);
  322. setup_mm_for_reboot();
  323. __mcpm_cpu_going_down(cpu, cluster);
  324. BUG_ON(!__mcpm_outbound_enter_critical(cpu, cluster));
  325. cache_disable();
  326. __mcpm_outbound_leave_critical(cluster, CLUSTER_DOWN);
  327. __mcpm_cpu_down(cpu, cluster);
  328. phys_reset = (phys_reset_t)(unsigned long)__pa_symbol(cpu_reset);
  329. phys_reset(__pa_symbol(mcpm_entry_point), false);
  330. BUG();
  331. }
  332. int __init mcpm_loopback(void (*cache_disable)(void))
  333. {
  334. int ret;
  335. /*
  336. * We're going to soft-restart the current CPU through the
  337. * low-level MCPM code by leveraging the suspend/resume
  338. * infrastructure. Let's play it safe by using cpu_pm_enter()
  339. * in case the CPU init code path resets the VFP or similar.
  340. */
  341. local_irq_disable();
  342. local_fiq_disable();
  343. ret = cpu_pm_enter();
  344. if (!ret) {
  345. ret = cpu_suspend((unsigned long)cache_disable, nocache_trampoline);
  346. cpu_pm_exit();
  347. }
  348. local_fiq_enable();
  349. local_irq_enable();
  350. if (ret)
  351. pr_err("%s returned %d\n", __func__, ret);
  352. return ret;
  353. }
  354. #endif
  355. extern unsigned long mcpm_power_up_setup_phys;
  356. int __init mcpm_sync_init(
  357. void (*power_up_setup)(unsigned int affinity_level))
  358. {
  359. unsigned int i, j, mpidr, this_cluster;
  360. BUILD_BUG_ON(MCPM_SYNC_CLUSTER_SIZE * MAX_NR_CLUSTERS != sizeof mcpm_sync);
  361. BUG_ON((unsigned long)&mcpm_sync & (__CACHE_WRITEBACK_GRANULE - 1));
  362. /*
  363. * Set initial CPU and cluster states.
  364. * Only one cluster is assumed to be active at this point.
  365. */
  366. for (i = 0; i < MAX_NR_CLUSTERS; i++) {
  367. mcpm_sync.clusters[i].cluster = CLUSTER_DOWN;
  368. mcpm_sync.clusters[i].inbound = INBOUND_NOT_COMING_UP;
  369. for (j = 0; j < MAX_CPUS_PER_CLUSTER; j++)
  370. mcpm_sync.clusters[i].cpus[j].cpu = CPU_DOWN;
  371. }
  372. mpidr = read_cpuid_mpidr();
  373. this_cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
  374. for_each_online_cpu(i) {
  375. mcpm_cpu_use_count[this_cluster][i] = 1;
  376. mcpm_sync.clusters[this_cluster].cpus[i].cpu = CPU_UP;
  377. }
  378. mcpm_sync.clusters[this_cluster].cluster = CLUSTER_UP;
  379. sync_cache_w(&mcpm_sync);
  380. if (power_up_setup) {
  381. mcpm_power_up_setup_phys = __pa_symbol(power_up_setup);
  382. sync_cache_w(&mcpm_power_up_setup_phys);
  383. }
  384. return 0;
  385. }