cpuidle-riscv-sbi.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * RISC-V SBI CPU idle driver.
  4. *
  5. * Copyright (c) 2021 Western Digital Corporation or its affiliates.
  6. * Copyright (c) 2022 Ventana Micro Systems Inc.
  7. */
  8. #define pr_fmt(fmt) "cpuidle-riscv-sbi: " fmt
  9. #include <linux/cleanup.h>
  10. #include <linux/cpuhotplug.h>
  11. #include <linux/cpuidle.h>
  12. #include <linux/cpumask.h>
  13. #include <linux/cpu_pm.h>
  14. #include <linux/cpu_cooling.h>
  15. #include <linux/kernel.h>
  16. #include <linux/module.h>
  17. #include <linux/of.h>
  18. #include <linux/slab.h>
  19. #include <linux/platform_device.h>
  20. #include <linux/pm_domain.h>
  21. #include <linux/pm_runtime.h>
  22. #include <asm/cpuidle.h>
  23. #include <asm/sbi.h>
  24. #include <asm/smp.h>
  25. #include <asm/suspend.h>
  26. #include "dt_idle_states.h"
  27. #include "dt_idle_genpd.h"
  28. struct sbi_cpuidle_data {
  29. u32 *states;
  30. struct device *dev;
  31. };
  32. struct sbi_domain_state {
  33. bool available;
  34. u32 state;
  35. };
  36. static DEFINE_PER_CPU_READ_MOSTLY(struct sbi_cpuidle_data, sbi_cpuidle_data);
  37. static DEFINE_PER_CPU(struct sbi_domain_state, domain_state);
  38. static bool sbi_cpuidle_use_osi;
  39. static bool sbi_cpuidle_use_cpuhp;
  40. static bool sbi_cpuidle_pd_allow_domain_state;
  41. static inline void sbi_set_domain_state(u32 state)
  42. {
  43. struct sbi_domain_state *data = this_cpu_ptr(&domain_state);
  44. data->available = true;
  45. data->state = state;
  46. }
  47. static inline u32 sbi_get_domain_state(void)
  48. {
  49. struct sbi_domain_state *data = this_cpu_ptr(&domain_state);
  50. return data->state;
  51. }
  52. static inline void sbi_clear_domain_state(void)
  53. {
  54. struct sbi_domain_state *data = this_cpu_ptr(&domain_state);
  55. data->available = false;
  56. }
  57. static inline bool sbi_is_domain_state_available(void)
  58. {
  59. struct sbi_domain_state *data = this_cpu_ptr(&domain_state);
  60. return data->available;
  61. }
  62. static __cpuidle int sbi_cpuidle_enter_state(struct cpuidle_device *dev,
  63. struct cpuidle_driver *drv, int idx)
  64. {
  65. u32 *states = __this_cpu_read(sbi_cpuidle_data.states);
  66. u32 state = states[idx];
  67. if (state & SBI_HSM_SUSP_NON_RET_BIT)
  68. return CPU_PM_CPU_IDLE_ENTER_PARAM(riscv_sbi_hart_suspend, idx, state);
  69. else
  70. return CPU_PM_CPU_IDLE_ENTER_RETENTION_PARAM(riscv_sbi_hart_suspend,
  71. idx, state);
  72. }
  73. static __cpuidle int __sbi_enter_domain_idle_state(struct cpuidle_device *dev,
  74. struct cpuidle_driver *drv, int idx,
  75. bool s2idle)
  76. {
  77. struct sbi_cpuidle_data *data = this_cpu_ptr(&sbi_cpuidle_data);
  78. u32 *states = data->states;
  79. struct device *pd_dev = data->dev;
  80. u32 state;
  81. int ret;
  82. ret = cpu_pm_enter();
  83. if (ret)
  84. return -1;
  85. /* Do runtime PM to manage a hierarchical CPU toplogy. */
  86. if (s2idle)
  87. dev_pm_genpd_suspend(pd_dev);
  88. else
  89. pm_runtime_put_sync_suspend(pd_dev);
  90. ct_cpuidle_enter();
  91. if (sbi_is_domain_state_available())
  92. state = sbi_get_domain_state();
  93. else
  94. state = states[idx];
  95. ret = riscv_sbi_hart_suspend(state) ? -1 : idx;
  96. ct_cpuidle_exit();
  97. if (s2idle)
  98. dev_pm_genpd_resume(pd_dev);
  99. else
  100. pm_runtime_get_sync(pd_dev);
  101. cpu_pm_exit();
  102. /* Clear the domain state to start fresh when back from idle. */
  103. sbi_clear_domain_state();
  104. return ret;
  105. }
  106. static int sbi_enter_domain_idle_state(struct cpuidle_device *dev,
  107. struct cpuidle_driver *drv, int idx)
  108. {
  109. return __sbi_enter_domain_idle_state(dev, drv, idx, false);
  110. }
  111. static int sbi_enter_s2idle_domain_idle_state(struct cpuidle_device *dev,
  112. struct cpuidle_driver *drv,
  113. int idx)
  114. {
  115. return __sbi_enter_domain_idle_state(dev, drv, idx, true);
  116. }
  117. static int sbi_cpuidle_cpuhp_up(unsigned int cpu)
  118. {
  119. struct device *pd_dev = __this_cpu_read(sbi_cpuidle_data.dev);
  120. if (pd_dev)
  121. pm_runtime_get_sync(pd_dev);
  122. return 0;
  123. }
  124. static int sbi_cpuidle_cpuhp_down(unsigned int cpu)
  125. {
  126. struct device *pd_dev = __this_cpu_read(sbi_cpuidle_data.dev);
  127. if (pd_dev) {
  128. pm_runtime_put_sync(pd_dev);
  129. /* Clear domain state to start fresh at next online. */
  130. sbi_clear_domain_state();
  131. }
  132. return 0;
  133. }
  134. static void sbi_idle_init_cpuhp(void)
  135. {
  136. int err;
  137. if (!sbi_cpuidle_use_cpuhp)
  138. return;
  139. err = cpuhp_setup_state_nocalls(CPUHP_AP_CPU_PM_STARTING,
  140. "cpuidle/sbi:online",
  141. sbi_cpuidle_cpuhp_up,
  142. sbi_cpuidle_cpuhp_down);
  143. if (err)
  144. pr_warn("Failed %d while setup cpuhp state\n", err);
  145. }
  146. static const struct of_device_id sbi_cpuidle_state_match[] = {
  147. { .compatible = "riscv,idle-state",
  148. .data = sbi_cpuidle_enter_state },
  149. { },
  150. };
  151. static int sbi_dt_parse_state_node(struct device_node *np, u32 *state)
  152. {
  153. int err = of_property_read_u32(np, "riscv,sbi-suspend-param", state);
  154. if (err) {
  155. pr_warn("%pOF missing riscv,sbi-suspend-param property\n", np);
  156. return err;
  157. }
  158. if (!riscv_sbi_suspend_state_is_valid(*state)) {
  159. pr_warn("Invalid SBI suspend state %#x\n", *state);
  160. return -EINVAL;
  161. }
  162. return 0;
  163. }
  164. static int sbi_dt_cpu_init_topology(struct cpuidle_driver *drv,
  165. struct sbi_cpuidle_data *data,
  166. unsigned int state_count, int cpu)
  167. {
  168. /* Currently limit the hierarchical topology to be used in OSI mode. */
  169. if (!sbi_cpuidle_use_osi)
  170. return 0;
  171. data->dev = dt_idle_attach_cpu(cpu, "sbi");
  172. if (IS_ERR_OR_NULL(data->dev))
  173. return PTR_ERR_OR_ZERO(data->dev);
  174. /*
  175. * Using the deepest state for the CPU to trigger a potential selection
  176. * of a shared state for the domain, assumes the domain states are all
  177. * deeper states.
  178. */
  179. drv->states[state_count - 1].flags |= CPUIDLE_FLAG_RCU_IDLE;
  180. drv->states[state_count - 1].enter = sbi_enter_domain_idle_state;
  181. drv->states[state_count - 1].enter_s2idle =
  182. sbi_enter_s2idle_domain_idle_state;
  183. sbi_cpuidle_use_cpuhp = true;
  184. return 0;
  185. }
  186. static int sbi_cpuidle_dt_init_states(struct device *dev,
  187. struct cpuidle_driver *drv,
  188. unsigned int cpu,
  189. unsigned int state_count)
  190. {
  191. struct sbi_cpuidle_data *data = per_cpu_ptr(&sbi_cpuidle_data, cpu);
  192. struct device_node *state_node;
  193. u32 *states;
  194. int i, ret;
  195. struct device_node *cpu_node __free(device_node) = of_cpu_device_node_get(cpu);
  196. if (!cpu_node)
  197. return -ENODEV;
  198. states = devm_kcalloc(dev, state_count, sizeof(*states), GFP_KERNEL);
  199. if (!states)
  200. return -ENOMEM;
  201. /* Parse SBI specific details from state DT nodes */
  202. for (i = 1; i < state_count; i++) {
  203. state_node = of_get_cpu_state_node(cpu_node, i - 1);
  204. if (!state_node)
  205. break;
  206. ret = sbi_dt_parse_state_node(state_node, &states[i]);
  207. of_node_put(state_node);
  208. if (ret)
  209. return ret;
  210. pr_debug("sbi-state %#x index %d\n", states[i], i);
  211. }
  212. if (i != state_count)
  213. return -ENODEV;
  214. /* Initialize optional data, used for the hierarchical topology. */
  215. ret = sbi_dt_cpu_init_topology(drv, data, state_count, cpu);
  216. if (ret < 0)
  217. return ret;
  218. /* Store states in the per-cpu struct. */
  219. data->states = states;
  220. return 0;
  221. }
  222. static void sbi_cpuidle_deinit_cpu(int cpu)
  223. {
  224. struct sbi_cpuidle_data *data = per_cpu_ptr(&sbi_cpuidle_data, cpu);
  225. dt_idle_detach_cpu(data->dev);
  226. sbi_cpuidle_use_cpuhp = false;
  227. }
  228. static int sbi_cpuidle_init_cpu(struct device *dev, int cpu)
  229. {
  230. struct cpuidle_driver *drv;
  231. unsigned int state_count = 0;
  232. int ret = 0;
  233. drv = devm_kzalloc(dev, sizeof(*drv), GFP_KERNEL);
  234. if (!drv)
  235. return -ENOMEM;
  236. drv->name = "sbi_cpuidle";
  237. drv->owner = THIS_MODULE;
  238. drv->cpumask = (struct cpumask *)cpumask_of(cpu);
  239. /* RISC-V architectural WFI to be represented as state index 0. */
  240. drv->states[0].enter = sbi_cpuidle_enter_state;
  241. drv->states[0].exit_latency = 1;
  242. drv->states[0].target_residency = 1;
  243. drv->states[0].power_usage = UINT_MAX;
  244. strcpy(drv->states[0].name, "WFI");
  245. strcpy(drv->states[0].desc, "RISC-V WFI");
  246. /*
  247. * If no DT idle states are detected (ret == 0) let the driver
  248. * initialization fail accordingly since there is no reason to
  249. * initialize the idle driver if only wfi is supported, the
  250. * default archictectural back-end already executes wfi
  251. * on idle entry.
  252. */
  253. ret = dt_init_idle_driver(drv, sbi_cpuidle_state_match, 1);
  254. if (ret <= 0) {
  255. pr_debug("HART%ld: failed to parse DT idle states\n",
  256. cpuid_to_hartid_map(cpu));
  257. return ret ? : -ENODEV;
  258. }
  259. state_count = ret + 1; /* Include WFI state as well */
  260. /* Initialize idle states from DT. */
  261. ret = sbi_cpuidle_dt_init_states(dev, drv, cpu, state_count);
  262. if (ret) {
  263. pr_err("HART%ld: failed to init idle states\n",
  264. cpuid_to_hartid_map(cpu));
  265. return ret;
  266. }
  267. ret = cpuidle_register(drv, NULL);
  268. if (ret)
  269. goto deinit;
  270. cpuidle_cooling_register(drv);
  271. return 0;
  272. deinit:
  273. sbi_cpuidle_deinit_cpu(cpu);
  274. return ret;
  275. }
  276. static void sbi_cpuidle_domain_sync_state(struct device *dev)
  277. {
  278. /*
  279. * All devices have now been attached/probed to the PM domain
  280. * topology, hence it's fine to allow domain states to be picked.
  281. */
  282. sbi_cpuidle_pd_allow_domain_state = true;
  283. }
  284. #ifdef CONFIG_DT_IDLE_GENPD
  285. static int sbi_cpuidle_pd_power_off(struct generic_pm_domain *pd)
  286. {
  287. struct genpd_power_state *state = &pd->states[pd->state_idx];
  288. u32 *pd_state;
  289. if (!state->data)
  290. return 0;
  291. if (!sbi_cpuidle_pd_allow_domain_state)
  292. return -EBUSY;
  293. /* OSI mode is enabled, set the corresponding domain state. */
  294. pd_state = state->data;
  295. sbi_set_domain_state(*pd_state);
  296. return 0;
  297. }
  298. struct sbi_pd_provider {
  299. struct list_head link;
  300. struct device_node *node;
  301. };
  302. static LIST_HEAD(sbi_pd_providers);
  303. static int sbi_pd_init(struct device_node *np)
  304. {
  305. struct generic_pm_domain *pd;
  306. struct sbi_pd_provider *pd_provider;
  307. struct dev_power_governor *pd_gov;
  308. int ret = -ENOMEM;
  309. pd = dt_idle_pd_alloc(np, sbi_dt_parse_state_node);
  310. if (!pd)
  311. goto out;
  312. pd_provider = kzalloc(sizeof(*pd_provider), GFP_KERNEL);
  313. if (!pd_provider)
  314. goto free_pd;
  315. pd->flags |= GENPD_FLAG_IRQ_SAFE | GENPD_FLAG_CPU_DOMAIN;
  316. /* Allow power off when OSI is available. */
  317. if (sbi_cpuidle_use_osi)
  318. pd->power_off = sbi_cpuidle_pd_power_off;
  319. else
  320. pd->flags |= GENPD_FLAG_ALWAYS_ON;
  321. /* Use governor for CPU PM domains if it has some states to manage. */
  322. pd_gov = pd->states ? &pm_domain_cpu_gov : NULL;
  323. ret = pm_genpd_init(pd, pd_gov, false);
  324. if (ret)
  325. goto free_pd_prov;
  326. ret = of_genpd_add_provider_simple(np, pd);
  327. if (ret)
  328. goto remove_pd;
  329. pd_provider->node = of_node_get(np);
  330. list_add(&pd_provider->link, &sbi_pd_providers);
  331. pr_debug("init PM domain %s\n", pd->name);
  332. return 0;
  333. remove_pd:
  334. pm_genpd_remove(pd);
  335. free_pd_prov:
  336. kfree(pd_provider);
  337. free_pd:
  338. dt_idle_pd_free(pd);
  339. out:
  340. pr_err("failed to init PM domain ret=%d %pOF\n", ret, np);
  341. return ret;
  342. }
  343. static void sbi_pd_remove(void)
  344. {
  345. struct sbi_pd_provider *pd_provider, *it;
  346. struct generic_pm_domain *genpd;
  347. list_for_each_entry_safe(pd_provider, it, &sbi_pd_providers, link) {
  348. of_genpd_del_provider(pd_provider->node);
  349. genpd = of_genpd_remove_last(pd_provider->node);
  350. if (!IS_ERR(genpd))
  351. kfree(genpd);
  352. of_node_put(pd_provider->node);
  353. list_del(&pd_provider->link);
  354. kfree(pd_provider);
  355. }
  356. }
  357. static int sbi_genpd_probe(struct device_node *np)
  358. {
  359. int ret = 0, pd_count = 0;
  360. if (!np)
  361. return -ENODEV;
  362. /*
  363. * Parse child nodes for the "#power-domain-cells" property and
  364. * initialize a genpd/genpd-of-provider pair when it's found.
  365. */
  366. for_each_child_of_node_scoped(np, node) {
  367. if (!of_property_present(node, "#power-domain-cells"))
  368. continue;
  369. ret = sbi_pd_init(node);
  370. if (ret)
  371. goto remove_pd;
  372. pd_count++;
  373. }
  374. /* Bail out if not using the hierarchical CPU topology. */
  375. if (!pd_count)
  376. goto no_pd;
  377. /* Link genpd masters/subdomains to model the CPU topology. */
  378. ret = dt_idle_pd_init_topology(np);
  379. if (ret)
  380. goto remove_pd;
  381. return 0;
  382. remove_pd:
  383. sbi_pd_remove();
  384. pr_err("failed to create CPU PM domains ret=%d\n", ret);
  385. no_pd:
  386. return ret;
  387. }
  388. #else
  389. static inline int sbi_genpd_probe(struct device_node *np)
  390. {
  391. return 0;
  392. }
  393. #endif
  394. static int sbi_cpuidle_probe(struct platform_device *pdev)
  395. {
  396. int cpu, ret;
  397. struct cpuidle_driver *drv;
  398. struct cpuidle_device *dev;
  399. struct device_node *pds_node;
  400. /* Detect OSI support based on CPU DT nodes */
  401. sbi_cpuidle_use_osi = true;
  402. for_each_possible_cpu(cpu) {
  403. struct device_node *np __free(device_node) = of_cpu_device_node_get(cpu);
  404. if (np &&
  405. of_property_present(np, "power-domains") &&
  406. of_property_present(np, "power-domain-names")) {
  407. continue;
  408. } else {
  409. sbi_cpuidle_use_osi = false;
  410. break;
  411. }
  412. }
  413. /* Populate generic power domains from DT nodes */
  414. pds_node = of_find_node_by_path("/cpus/power-domains");
  415. if (pds_node) {
  416. ret = sbi_genpd_probe(pds_node);
  417. of_node_put(pds_node);
  418. if (ret)
  419. return ret;
  420. }
  421. /* Initialize CPU idle driver for each CPU */
  422. for_each_possible_cpu(cpu) {
  423. ret = sbi_cpuidle_init_cpu(&pdev->dev, cpu);
  424. if (ret) {
  425. pr_debug("HART%ld: idle driver init failed\n",
  426. cpuid_to_hartid_map(cpu));
  427. goto out_fail;
  428. }
  429. }
  430. /* Setup CPU hotplut notifiers */
  431. sbi_idle_init_cpuhp();
  432. pr_info("idle driver registered for all CPUs\n");
  433. return 0;
  434. out_fail:
  435. while (--cpu >= 0) {
  436. dev = per_cpu(cpuidle_devices, cpu);
  437. drv = cpuidle_get_cpu_driver(dev);
  438. cpuidle_unregister(drv);
  439. sbi_cpuidle_deinit_cpu(cpu);
  440. }
  441. return ret;
  442. }
  443. static struct platform_driver sbi_cpuidle_driver = {
  444. .probe = sbi_cpuidle_probe,
  445. .driver = {
  446. .name = "sbi-cpuidle",
  447. .sync_state = sbi_cpuidle_domain_sync_state,
  448. },
  449. };
  450. static int __init sbi_cpuidle_init(void)
  451. {
  452. int ret;
  453. struct platform_device *pdev;
  454. if (!riscv_sbi_hsm_is_supported())
  455. return 0;
  456. ret = platform_driver_register(&sbi_cpuidle_driver);
  457. if (ret)
  458. return ret;
  459. pdev = platform_device_register_simple("sbi-cpuidle",
  460. -1, NULL, 0);
  461. if (IS_ERR(pdev)) {
  462. platform_driver_unregister(&sbi_cpuidle_driver);
  463. return PTR_ERR(pdev);
  464. }
  465. return 0;
  466. }
  467. device_initcall(sbi_cpuidle_init);