gdsc.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2015, 2017-2018, 2022, The Linux Foundation. All rights reserved.
  4. */
  5. #include <linux/bitops.h>
  6. #include <linux/delay.h>
  7. #include <linux/err.h>
  8. #include <linux/export.h>
  9. #include <linux/jiffies.h>
  10. #include <linux/kernel.h>
  11. #include <linux/ktime.h>
  12. #include <linux/pm_domain.h>
  13. #include <linux/regmap.h>
  14. #include <linux/regulator/consumer.h>
  15. #include <linux/reset-controller.h>
  16. #include <linux/slab.h>
  17. #include "gdsc.h"
  18. #define PWR_ON_MASK BIT(31)
  19. #define EN_REST_WAIT_MASK GENMASK_ULL(23, 20)
  20. #define EN_FEW_WAIT_MASK GENMASK_ULL(19, 16)
  21. #define CLK_DIS_WAIT_MASK GENMASK_ULL(15, 12)
  22. #define SW_OVERRIDE_MASK BIT(2)
  23. #define HW_CONTROL_MASK BIT(1)
  24. #define SW_COLLAPSE_MASK BIT(0)
  25. #define GMEM_CLAMP_IO_MASK BIT(0)
  26. #define GMEM_RESET_MASK BIT(4)
  27. /* CFG_GDSCR */
  28. #define GDSC_POWER_UP_COMPLETE BIT(16)
  29. #define GDSC_POWER_DOWN_COMPLETE BIT(15)
  30. #define GDSC_RETAIN_FF_ENABLE BIT(11)
  31. #define CFG_GDSCR_OFFSET 0x4
  32. /* Wait 2^n CXO cycles between all states. Here, n=2 (4 cycles). */
  33. #define EN_REST_WAIT_VAL 0x2
  34. #define EN_FEW_WAIT_VAL 0x8
  35. #define CLK_DIS_WAIT_VAL 0x2
  36. /* Transition delay shifts */
  37. #define EN_REST_WAIT_SHIFT 20
  38. #define EN_FEW_WAIT_SHIFT 16
  39. #define CLK_DIS_WAIT_SHIFT 12
  40. #define RETAIN_MEM BIT(14)
  41. #define RETAIN_PERIPH BIT(13)
  42. #define STATUS_POLL_TIMEOUT_US 1500
  43. #define TIMEOUT_US 500
  44. #define domain_to_gdsc(domain) container_of(domain, struct gdsc, pd)
  45. enum gdsc_status {
  46. GDSC_OFF,
  47. GDSC_ON
  48. };
  49. /* Returns 1 if GDSC status is status, 0 if not, and < 0 on error */
  50. static int gdsc_check_status(struct gdsc *sc, enum gdsc_status status)
  51. {
  52. unsigned int reg;
  53. u32 val;
  54. int ret;
  55. if (sc->flags & POLL_CFG_GDSCR)
  56. reg = sc->gdscr + CFG_GDSCR_OFFSET;
  57. else if (sc->gds_hw_ctrl)
  58. reg = sc->gds_hw_ctrl;
  59. else
  60. reg = sc->gdscr;
  61. ret = regmap_read(sc->regmap, reg, &val);
  62. if (ret)
  63. return ret;
  64. if (sc->flags & POLL_CFG_GDSCR) {
  65. switch (status) {
  66. case GDSC_ON:
  67. return !!(val & GDSC_POWER_UP_COMPLETE);
  68. case GDSC_OFF:
  69. return !!(val & GDSC_POWER_DOWN_COMPLETE);
  70. }
  71. }
  72. switch (status) {
  73. case GDSC_ON:
  74. return !!(val & PWR_ON_MASK);
  75. case GDSC_OFF:
  76. return !(val & PWR_ON_MASK);
  77. }
  78. return -EINVAL;
  79. }
  80. static int gdsc_hwctrl(struct gdsc *sc, bool en)
  81. {
  82. u32 val = en ? HW_CONTROL_MASK : 0;
  83. return regmap_update_bits(sc->regmap, sc->gdscr, HW_CONTROL_MASK, val);
  84. }
  85. static int gdsc_poll_status(struct gdsc *sc, enum gdsc_status status)
  86. {
  87. ktime_t start;
  88. start = ktime_get();
  89. do {
  90. if (gdsc_check_status(sc, status))
  91. return 0;
  92. } while (ktime_us_delta(ktime_get(), start) < STATUS_POLL_TIMEOUT_US);
  93. if (gdsc_check_status(sc, status))
  94. return 0;
  95. return -ETIMEDOUT;
  96. }
  97. static int gdsc_update_collapse_bit(struct gdsc *sc, bool val)
  98. {
  99. u32 reg, mask;
  100. int ret;
  101. if (sc->collapse_mask) {
  102. reg = sc->collapse_ctrl;
  103. mask = sc->collapse_mask;
  104. } else {
  105. reg = sc->gdscr;
  106. mask = SW_COLLAPSE_MASK;
  107. }
  108. ret = regmap_update_bits(sc->regmap, reg, mask, val ? mask : 0);
  109. if (ret)
  110. return ret;
  111. return 0;
  112. }
  113. static int gdsc_toggle_logic(struct gdsc *sc, enum gdsc_status status,
  114. bool wait)
  115. {
  116. int ret;
  117. if (status == GDSC_ON && sc->rsupply) {
  118. ret = regulator_enable(sc->rsupply);
  119. if (ret < 0)
  120. return ret;
  121. }
  122. ret = gdsc_update_collapse_bit(sc, status == GDSC_OFF);
  123. /* If disabling votable gdscs, don't poll on status */
  124. if ((sc->flags & VOTABLE) && status == GDSC_OFF && !wait) {
  125. /*
  126. * Add a short delay here to ensure that an enable
  127. * right after it was disabled does not put it in an
  128. * unknown state
  129. */
  130. udelay(TIMEOUT_US);
  131. return 0;
  132. }
  133. if (sc->gds_hw_ctrl) {
  134. /*
  135. * The gds hw controller asserts/de-asserts the status bit soon
  136. * after it receives a power on/off request from a master.
  137. * The controller then takes around 8 xo cycles to start its
  138. * internal state machine and update the status bit. During
  139. * this time, the status bit does not reflect the true status
  140. * of the core.
  141. * Add a delay of 1 us between writing to the SW_COLLAPSE bit
  142. * and polling the status bit.
  143. */
  144. udelay(1);
  145. }
  146. ret = gdsc_poll_status(sc, status);
  147. WARN(ret, "%s status stuck at 'o%s'", sc->pd.name, status ? "ff" : "n");
  148. if (!ret && status == GDSC_OFF && sc->rsupply) {
  149. ret = regulator_disable(sc->rsupply);
  150. if (ret < 0)
  151. return ret;
  152. }
  153. return ret;
  154. }
  155. static inline int gdsc_deassert_reset(struct gdsc *sc)
  156. {
  157. int i;
  158. for (i = 0; i < sc->reset_count; i++)
  159. sc->rcdev->ops->deassert(sc->rcdev, sc->resets[i]);
  160. return 0;
  161. }
  162. static inline int gdsc_assert_reset(struct gdsc *sc)
  163. {
  164. int i;
  165. for (i = 0; i < sc->reset_count; i++)
  166. sc->rcdev->ops->assert(sc->rcdev, sc->resets[i]);
  167. return 0;
  168. }
  169. static inline void gdsc_force_mem_on(struct gdsc *sc)
  170. {
  171. int i;
  172. u32 mask = RETAIN_MEM;
  173. if (!(sc->flags & NO_RET_PERIPH))
  174. mask |= RETAIN_PERIPH;
  175. for (i = 0; i < sc->cxc_count; i++)
  176. regmap_update_bits(sc->regmap, sc->cxcs[i], mask, mask);
  177. }
  178. static inline void gdsc_clear_mem_on(struct gdsc *sc)
  179. {
  180. int i;
  181. u32 mask = RETAIN_MEM;
  182. if (!(sc->flags & NO_RET_PERIPH))
  183. mask |= RETAIN_PERIPH;
  184. for (i = 0; i < sc->cxc_count; i++)
  185. regmap_update_bits(sc->regmap, sc->cxcs[i], mask, 0);
  186. }
  187. static inline void gdsc_deassert_clamp_io(struct gdsc *sc)
  188. {
  189. regmap_update_bits(sc->regmap, sc->clamp_io_ctrl,
  190. GMEM_CLAMP_IO_MASK, 0);
  191. }
  192. static inline void gdsc_assert_clamp_io(struct gdsc *sc)
  193. {
  194. regmap_update_bits(sc->regmap, sc->clamp_io_ctrl,
  195. GMEM_CLAMP_IO_MASK, 1);
  196. }
  197. static inline void gdsc_assert_reset_aon(struct gdsc *sc)
  198. {
  199. regmap_update_bits(sc->regmap, sc->clamp_io_ctrl,
  200. GMEM_RESET_MASK, 1);
  201. udelay(1);
  202. regmap_update_bits(sc->regmap, sc->clamp_io_ctrl,
  203. GMEM_RESET_MASK, 0);
  204. }
  205. static void gdsc_retain_ff_on(struct gdsc *sc)
  206. {
  207. u32 mask = GDSC_RETAIN_FF_ENABLE;
  208. regmap_update_bits(sc->regmap, sc->gdscr, mask, mask);
  209. }
  210. static int gdsc_enable(struct generic_pm_domain *domain)
  211. {
  212. struct gdsc *sc = domain_to_gdsc(domain);
  213. int ret;
  214. if (sc->pwrsts == PWRSTS_ON)
  215. return gdsc_deassert_reset(sc);
  216. if (sc->flags & SW_RESET) {
  217. gdsc_assert_reset(sc);
  218. udelay(1);
  219. gdsc_deassert_reset(sc);
  220. }
  221. if (sc->flags & CLAMP_IO) {
  222. if (sc->flags & AON_RESET)
  223. gdsc_assert_reset_aon(sc);
  224. gdsc_deassert_clamp_io(sc);
  225. }
  226. ret = gdsc_toggle_logic(sc, GDSC_ON, false);
  227. if (ret)
  228. return ret;
  229. if (sc->pwrsts & PWRSTS_OFF)
  230. gdsc_force_mem_on(sc);
  231. /*
  232. * If clocks to this power domain were already on, they will take an
  233. * additional 4 clock cycles to re-enable after the power domain is
  234. * enabled. Delay to account for this. A delay is also needed to ensure
  235. * clocks are not enabled within 400ns of enabling power to the
  236. * memories.
  237. */
  238. udelay(1);
  239. /* Turn on HW trigger mode if supported */
  240. if (sc->flags & HW_CTRL) {
  241. ret = gdsc_hwctrl(sc, true);
  242. if (ret)
  243. return ret;
  244. /*
  245. * Wait for the GDSC to go through a power down and
  246. * up cycle. In case a firmware ends up polling status
  247. * bits for the gdsc, it might read an 'on' status before
  248. * the GDSC can finish the power cycle.
  249. * We wait 1us before returning to ensure the firmware
  250. * can't immediately poll the status bits.
  251. */
  252. udelay(1);
  253. }
  254. if (sc->flags & RETAIN_FF_ENABLE)
  255. gdsc_retain_ff_on(sc);
  256. return 0;
  257. }
  258. static int gdsc_disable(struct generic_pm_domain *domain)
  259. {
  260. struct gdsc *sc = domain_to_gdsc(domain);
  261. int ret;
  262. if (sc->pwrsts == PWRSTS_ON)
  263. return gdsc_assert_reset(sc);
  264. /* Turn off HW trigger mode if supported */
  265. if (sc->flags & HW_CTRL) {
  266. ret = gdsc_hwctrl(sc, false);
  267. if (ret < 0)
  268. return ret;
  269. /*
  270. * Wait for the GDSC to go through a power down and
  271. * up cycle. In case we end up polling status
  272. * bits for the gdsc before the power cycle is completed
  273. * it might read an 'on' status wrongly.
  274. */
  275. udelay(1);
  276. ret = gdsc_poll_status(sc, GDSC_ON);
  277. if (ret)
  278. return ret;
  279. }
  280. if (sc->pwrsts & PWRSTS_OFF)
  281. gdsc_clear_mem_on(sc);
  282. /*
  283. * If the GDSC supports only a Retention state, apart from ON,
  284. * leave it in ON state.
  285. * There is no SW control to transition the GDSC into
  286. * Retention state. This happens in HW when the parent
  287. * domain goes down to a Low power state
  288. */
  289. if (sc->pwrsts == PWRSTS_RET_ON)
  290. return 0;
  291. ret = gdsc_toggle_logic(sc, GDSC_OFF, domain->synced_poweroff);
  292. if (ret)
  293. return ret;
  294. if (sc->flags & CLAMP_IO)
  295. gdsc_assert_clamp_io(sc);
  296. return 0;
  297. }
  298. static int gdsc_set_hwmode(struct generic_pm_domain *domain, struct device *dev, bool mode)
  299. {
  300. struct gdsc *sc = domain_to_gdsc(domain);
  301. int ret;
  302. ret = gdsc_hwctrl(sc, mode);
  303. if (ret)
  304. return ret;
  305. /*
  306. * Wait for the GDSC to go through a power down and
  307. * up cycle. If we poll the status register before the
  308. * power cycle is finished we might read incorrect values.
  309. */
  310. udelay(1);
  311. /*
  312. * When the GDSC is switched to HW mode, HW can disable the GDSC.
  313. * When the GDSC is switched back to SW mode, the GDSC will be enabled
  314. * again, hence we need to poll for GDSC to complete the power up.
  315. */
  316. if (!mode)
  317. return gdsc_poll_status(sc, GDSC_ON);
  318. return 0;
  319. }
  320. static bool gdsc_get_hwmode(struct generic_pm_domain *domain, struct device *dev)
  321. {
  322. struct gdsc *sc = domain_to_gdsc(domain);
  323. u32 val;
  324. regmap_read(sc->regmap, sc->gdscr, &val);
  325. return !!(val & HW_CONTROL_MASK);
  326. }
  327. static int gdsc_init(struct gdsc *sc)
  328. {
  329. u32 mask, val;
  330. int on, ret;
  331. /*
  332. * Disable HW trigger: collapse/restore occur based on registers writes.
  333. * Disable SW override: Use hardware state-machine for sequencing.
  334. * Configure wait time between states.
  335. */
  336. mask = HW_CONTROL_MASK | SW_OVERRIDE_MASK |
  337. EN_REST_WAIT_MASK | EN_FEW_WAIT_MASK | CLK_DIS_WAIT_MASK;
  338. if (!sc->en_rest_wait_val)
  339. sc->en_rest_wait_val = EN_REST_WAIT_VAL;
  340. if (!sc->en_few_wait_val)
  341. sc->en_few_wait_val = EN_FEW_WAIT_VAL;
  342. if (!sc->clk_dis_wait_val)
  343. sc->clk_dis_wait_val = CLK_DIS_WAIT_VAL;
  344. val = sc->en_rest_wait_val << EN_REST_WAIT_SHIFT |
  345. sc->en_few_wait_val << EN_FEW_WAIT_SHIFT |
  346. sc->clk_dis_wait_val << CLK_DIS_WAIT_SHIFT;
  347. ret = regmap_update_bits(sc->regmap, sc->gdscr, mask, val);
  348. if (ret)
  349. return ret;
  350. /* Force gdsc ON if only ON state is supported */
  351. if (sc->pwrsts == PWRSTS_ON) {
  352. ret = gdsc_toggle_logic(sc, GDSC_ON, false);
  353. if (ret)
  354. return ret;
  355. }
  356. on = gdsc_check_status(sc, GDSC_ON);
  357. if (on < 0)
  358. return on;
  359. if (on) {
  360. /* The regulator must be on, sync the kernel state */
  361. if (sc->rsupply) {
  362. ret = regulator_enable(sc->rsupply);
  363. if (ret < 0)
  364. return ret;
  365. }
  366. /*
  367. * Votable GDSCs can be ON due to Vote from other masters.
  368. * If a Votable GDSC is ON, make sure we have a Vote.
  369. */
  370. if (sc->flags & VOTABLE) {
  371. ret = gdsc_update_collapse_bit(sc, false);
  372. if (ret)
  373. goto err_disable_supply;
  374. }
  375. /* Turn on HW trigger mode if supported */
  376. if (sc->flags & HW_CTRL) {
  377. ret = gdsc_hwctrl(sc, true);
  378. if (ret < 0)
  379. goto err_disable_supply;
  380. }
  381. /*
  382. * Make sure the retain bit is set if the GDSC is already on,
  383. * otherwise we end up turning off the GDSC and destroying all
  384. * the register contents that we thought we were saving.
  385. */
  386. if (sc->flags & RETAIN_FF_ENABLE)
  387. gdsc_retain_ff_on(sc);
  388. } else if (sc->flags & ALWAYS_ON) {
  389. /* If ALWAYS_ON GDSCs are not ON, turn them ON */
  390. gdsc_enable(&sc->pd);
  391. on = true;
  392. }
  393. if (on || (sc->pwrsts & PWRSTS_RET))
  394. gdsc_force_mem_on(sc);
  395. else
  396. gdsc_clear_mem_on(sc);
  397. if (sc->flags & ALWAYS_ON)
  398. sc->pd.flags |= GENPD_FLAG_ALWAYS_ON;
  399. if (!sc->pd.power_off)
  400. sc->pd.power_off = gdsc_disable;
  401. if (!sc->pd.power_on)
  402. sc->pd.power_on = gdsc_enable;
  403. if (sc->flags & HW_CTRL_TRIGGER) {
  404. sc->pd.set_hwmode_dev = gdsc_set_hwmode;
  405. sc->pd.get_hwmode_dev = gdsc_get_hwmode;
  406. }
  407. ret = pm_genpd_init(&sc->pd, NULL, !on);
  408. if (ret)
  409. goto err_disable_supply;
  410. return 0;
  411. err_disable_supply:
  412. if (on && sc->rsupply)
  413. regulator_disable(sc->rsupply);
  414. return ret;
  415. }
  416. int gdsc_register(struct gdsc_desc *desc,
  417. struct reset_controller_dev *rcdev, struct regmap *regmap)
  418. {
  419. int i, ret;
  420. struct genpd_onecell_data *data;
  421. struct device *dev = desc->dev;
  422. struct gdsc **scs = desc->scs;
  423. size_t num = desc->num;
  424. data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
  425. if (!data)
  426. return -ENOMEM;
  427. data->domains = devm_kcalloc(dev, num, sizeof(*data->domains),
  428. GFP_KERNEL);
  429. if (!data->domains)
  430. return -ENOMEM;
  431. for (i = 0; i < num; i++) {
  432. if (!scs[i] || !scs[i]->supply)
  433. continue;
  434. scs[i]->rsupply = devm_regulator_get_optional(dev, scs[i]->supply);
  435. if (IS_ERR(scs[i]->rsupply)) {
  436. ret = PTR_ERR(scs[i]->rsupply);
  437. if (ret != -ENODEV)
  438. return ret;
  439. scs[i]->rsupply = NULL;
  440. }
  441. }
  442. data->num_domains = num;
  443. for (i = 0; i < num; i++) {
  444. if (!scs[i])
  445. continue;
  446. scs[i]->regmap = regmap;
  447. scs[i]->rcdev = rcdev;
  448. ret = gdsc_init(scs[i]);
  449. if (ret)
  450. return ret;
  451. data->domains[i] = &scs[i]->pd;
  452. }
  453. /* Add subdomains */
  454. for (i = 0; i < num; i++) {
  455. if (!scs[i])
  456. continue;
  457. if (scs[i]->parent)
  458. pm_genpd_add_subdomain(scs[i]->parent, &scs[i]->pd);
  459. else if (!IS_ERR_OR_NULL(dev->pm_domain))
  460. pm_genpd_add_subdomain(pd_to_genpd(dev->pm_domain), &scs[i]->pd);
  461. }
  462. return of_genpd_add_provider_onecell(dev->of_node, data);
  463. }
  464. void gdsc_unregister(struct gdsc_desc *desc)
  465. {
  466. int i;
  467. struct device *dev = desc->dev;
  468. struct gdsc **scs = desc->scs;
  469. size_t num = desc->num;
  470. /* Remove subdomains */
  471. for (i = 0; i < num; i++) {
  472. if (!scs[i])
  473. continue;
  474. if (scs[i]->parent)
  475. pm_genpd_remove_subdomain(scs[i]->parent, &scs[i]->pd);
  476. else if (!IS_ERR_OR_NULL(dev->pm_domain))
  477. pm_genpd_remove_subdomain(pd_to_genpd(dev->pm_domain), &scs[i]->pd);
  478. }
  479. of_genpd_del_provider(dev->of_node);
  480. }
  481. /*
  482. * On SDM845+ the GPU GX domain is *almost* entirely controlled by the GMU
  483. * running in the CX domain so the CPU doesn't need to know anything about the
  484. * GX domain EXCEPT....
  485. *
  486. * Hardware constraints dictate that the GX be powered down before the CX. If
  487. * the GMU crashes it could leave the GX on. In order to successfully bring back
  488. * the device the CPU needs to disable the GX headswitch. There being no sane
  489. * way to reach in and touch that register from deep inside the GPU driver we
  490. * need to set up the infrastructure to be able to ensure that the GPU can
  491. * ensure that the GX is off during this super special case. We do this by
  492. * defining a GX gdsc with a dummy enable function and a "default" disable
  493. * function.
  494. *
  495. * This allows us to attach with genpd_dev_pm_attach_by_name() in the GPU
  496. * driver. During power up, nothing will happen from the CPU (and the GMU will
  497. * power up normally but during power down this will ensure that the GX domain
  498. * is *really* off - this gives us a semi standard way of doing what we need.
  499. */
  500. int gdsc_gx_do_nothing_enable(struct generic_pm_domain *domain)
  501. {
  502. struct gdsc *sc = domain_to_gdsc(domain);
  503. int ret = 0;
  504. /* Enable the parent supply, when controlled through the regulator framework. */
  505. if (sc->rsupply)
  506. ret = regulator_enable(sc->rsupply);
  507. /* Do nothing with the GDSC itself */
  508. return ret;
  509. }
  510. EXPORT_SYMBOL_GPL(gdsc_gx_do_nothing_enable);