clk-cbf-8996.c 9.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (c) 2022, 2023 Linaro Ltd.
  4. */
  5. #include <linux/bitfield.h>
  6. #include <linux/clk.h>
  7. #include <linux/clk-provider.h>
  8. #include <linux/interconnect-clk.h>
  9. #include <linux/interconnect-provider.h>
  10. #include <linux/of.h>
  11. #include <linux/module.h>
  12. #include <linux/platform_device.h>
  13. #include <linux/regmap.h>
  14. #include <dt-bindings/interconnect/qcom,msm8996-cbf.h>
  15. #include "clk-alpha-pll.h"
  16. #include "clk-regmap.h"
  17. /* Need to match the order of clocks in DT binding */
  18. enum {
  19. DT_XO,
  20. DT_APCS_AUX,
  21. };
  22. enum {
  23. CBF_XO_INDEX,
  24. CBF_PLL_INDEX,
  25. CBF_DIV_INDEX,
  26. CBF_APCS_AUX_INDEX,
  27. };
  28. #define DIV_THRESHOLD 600000000
  29. #define CBF_MUX_OFFSET 0x18
  30. #define CBF_MUX_PARENT_MASK GENMASK(1, 0)
  31. #define CBF_MUX_AUTO_CLK_SEL_ALWAYS_ON_MASK GENMASK(5, 4)
  32. #define CBF_MUX_AUTO_CLK_SEL_ALWAYS_ON_GPLL0_SEL \
  33. FIELD_PREP(CBF_MUX_AUTO_CLK_SEL_ALWAYS_ON_MASK, 0x03)
  34. #define CBF_MUX_AUTO_CLK_SEL_BIT BIT(6)
  35. #define CBF_PLL_OFFSET 0xf000
  36. static struct alpha_pll_config cbfpll_config = {
  37. .l = 72,
  38. .config_ctl_val = 0x200d4828,
  39. .config_ctl_hi_val = 0x006,
  40. .test_ctl_val = 0x1c000000,
  41. .test_ctl_hi_val = 0x00004000,
  42. .pre_div_mask = BIT(12),
  43. .post_div_mask = 0x3 << 8,
  44. .post_div_val = 0x1 << 8,
  45. .main_output_mask = BIT(0),
  46. .early_output_mask = BIT(3),
  47. };
  48. static struct clk_alpha_pll cbf_pll = {
  49. .offset = CBF_PLL_OFFSET,
  50. .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_HUAYRA_APSS],
  51. .flags = SUPPORTS_DYNAMIC_UPDATE | SUPPORTS_FSM_MODE,
  52. .clkr.hw.init = &(struct clk_init_data){
  53. .name = "cbf_pll",
  54. .parent_data = (const struct clk_parent_data[]) {
  55. { .index = DT_XO, },
  56. },
  57. .num_parents = 1,
  58. .ops = &clk_alpha_pll_hwfsm_ops,
  59. },
  60. };
  61. static struct clk_fixed_factor cbf_pll_postdiv = {
  62. .mult = 1,
  63. .div = 2,
  64. .hw.init = &(struct clk_init_data){
  65. .name = "cbf_pll_postdiv",
  66. .parent_hws = (const struct clk_hw*[]){
  67. &cbf_pll.clkr.hw
  68. },
  69. .num_parents = 1,
  70. .ops = &clk_fixed_factor_ops,
  71. .flags = CLK_SET_RATE_PARENT,
  72. },
  73. };
  74. static const struct clk_parent_data cbf_mux_parent_data[] = {
  75. { .index = DT_XO },
  76. { .hw = &cbf_pll.clkr.hw },
  77. { .hw = &cbf_pll_postdiv.hw },
  78. { .index = DT_APCS_AUX },
  79. };
  80. struct clk_cbf_8996_mux {
  81. u32 reg;
  82. struct notifier_block nb;
  83. struct clk_regmap clkr;
  84. };
  85. static struct clk_cbf_8996_mux *to_clk_cbf_8996_mux(struct clk_regmap *clkr)
  86. {
  87. return container_of(clkr, struct clk_cbf_8996_mux, clkr);
  88. }
  89. static int cbf_clk_notifier_cb(struct notifier_block *nb, unsigned long event,
  90. void *data);
  91. static u8 clk_cbf_8996_mux_get_parent(struct clk_hw *hw)
  92. {
  93. struct clk_regmap *clkr = to_clk_regmap(hw);
  94. struct clk_cbf_8996_mux *mux = to_clk_cbf_8996_mux(clkr);
  95. u32 val;
  96. regmap_read(clkr->regmap, mux->reg, &val);
  97. return FIELD_GET(CBF_MUX_PARENT_MASK, val);
  98. }
  99. static int clk_cbf_8996_mux_set_parent(struct clk_hw *hw, u8 index)
  100. {
  101. struct clk_regmap *clkr = to_clk_regmap(hw);
  102. struct clk_cbf_8996_mux *mux = to_clk_cbf_8996_mux(clkr);
  103. u32 val;
  104. val = FIELD_PREP(CBF_MUX_PARENT_MASK, index);
  105. return regmap_update_bits(clkr->regmap, mux->reg, CBF_MUX_PARENT_MASK, val);
  106. }
  107. static int clk_cbf_8996_mux_determine_rate(struct clk_hw *hw,
  108. struct clk_rate_request *req)
  109. {
  110. struct clk_hw *parent;
  111. if (req->rate < (DIV_THRESHOLD / cbf_pll_postdiv.div))
  112. return -EINVAL;
  113. if (req->rate < DIV_THRESHOLD)
  114. parent = clk_hw_get_parent_by_index(hw, CBF_DIV_INDEX);
  115. else
  116. parent = clk_hw_get_parent_by_index(hw, CBF_PLL_INDEX);
  117. if (!parent)
  118. return -EINVAL;
  119. req->best_parent_rate = clk_hw_round_rate(parent, req->rate);
  120. req->best_parent_hw = parent;
  121. return 0;
  122. }
  123. static const struct clk_ops clk_cbf_8996_mux_ops = {
  124. .set_parent = clk_cbf_8996_mux_set_parent,
  125. .get_parent = clk_cbf_8996_mux_get_parent,
  126. .determine_rate = clk_cbf_8996_mux_determine_rate,
  127. };
  128. static struct clk_cbf_8996_mux cbf_mux = {
  129. .reg = CBF_MUX_OFFSET,
  130. .nb.notifier_call = cbf_clk_notifier_cb,
  131. .clkr.hw.init = &(struct clk_init_data) {
  132. .name = "cbf_mux",
  133. .parent_data = cbf_mux_parent_data,
  134. .num_parents = ARRAY_SIZE(cbf_mux_parent_data),
  135. .ops = &clk_cbf_8996_mux_ops,
  136. /* CPU clock is critical and should never be gated */
  137. .flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
  138. },
  139. };
  140. static int cbf_clk_notifier_cb(struct notifier_block *nb, unsigned long event,
  141. void *data)
  142. {
  143. struct clk_notifier_data *cnd = data;
  144. switch (event) {
  145. case PRE_RATE_CHANGE:
  146. /*
  147. * Avoid overvolting. clk_core_set_rate_nolock() walks from top
  148. * to bottom, so it will change the rate of the PLL before
  149. * chaging the parent of PMUX. This can result in pmux getting
  150. * clocked twice the expected rate.
  151. *
  152. * Manually switch to PLL/2 here.
  153. */
  154. if (cnd->old_rate > DIV_THRESHOLD &&
  155. cnd->new_rate < DIV_THRESHOLD)
  156. clk_cbf_8996_mux_set_parent(&cbf_mux.clkr.hw, CBF_DIV_INDEX);
  157. break;
  158. case ABORT_RATE_CHANGE:
  159. /* Revert manual change */
  160. if (cnd->new_rate < DIV_THRESHOLD &&
  161. cnd->old_rate > DIV_THRESHOLD)
  162. clk_cbf_8996_mux_set_parent(&cbf_mux.clkr.hw, CBF_PLL_INDEX);
  163. break;
  164. default:
  165. break;
  166. }
  167. return notifier_from_errno(0);
  168. };
  169. static struct clk_hw *cbf_msm8996_hw_clks[] = {
  170. &cbf_pll_postdiv.hw,
  171. };
  172. static struct clk_regmap *cbf_msm8996_clks[] = {
  173. &cbf_pll.clkr,
  174. &cbf_mux.clkr,
  175. };
  176. static const struct regmap_config cbf_msm8996_regmap_config = {
  177. .reg_bits = 32,
  178. .reg_stride = 4,
  179. .val_bits = 32,
  180. .max_register = 0x10000,
  181. .fast_io = true,
  182. .val_format_endian = REGMAP_ENDIAN_LITTLE,
  183. };
  184. #ifdef CONFIG_INTERCONNECT
  185. /* Random ID that doesn't clash with main qnoc and OSM */
  186. #define CBF_MASTER_NODE 2000
  187. static int qcom_msm8996_cbf_icc_register(struct platform_device *pdev, struct clk_hw *cbf_hw)
  188. {
  189. struct device *dev = &pdev->dev;
  190. struct clk *clk = devm_clk_hw_get_clk(dev, cbf_hw, "cbf");
  191. const struct icc_clk_data data[] = {
  192. {
  193. .clk = clk,
  194. .name = "cbf",
  195. .master_id = MASTER_CBF_M4M,
  196. .slave_id = SLAVE_CBF_M4M,
  197. },
  198. };
  199. struct icc_provider *provider;
  200. provider = icc_clk_register(dev, CBF_MASTER_NODE, ARRAY_SIZE(data), data);
  201. if (IS_ERR(provider))
  202. return PTR_ERR(provider);
  203. platform_set_drvdata(pdev, provider);
  204. return 0;
  205. }
  206. static void qcom_msm8996_cbf_icc_remove(struct platform_device *pdev)
  207. {
  208. struct icc_provider *provider = platform_get_drvdata(pdev);
  209. icc_clk_unregister(provider);
  210. }
  211. #define qcom_msm8996_cbf_icc_sync_state icc_sync_state
  212. #else
  213. static int qcom_msm8996_cbf_icc_register(struct platform_device *pdev, struct clk_hw *cbf_hw)
  214. {
  215. dev_warn(&pdev->dev, "CONFIG_INTERCONNECT is disabled, CBF clock is fixed\n");
  216. return 0;
  217. }
  218. #define qcom_msm8996_cbf_icc_remove(pdev) { }
  219. #define qcom_msm8996_cbf_icc_sync_state NULL
  220. #endif
  221. static int qcom_msm8996_cbf_probe(struct platform_device *pdev)
  222. {
  223. void __iomem *base;
  224. struct regmap *regmap;
  225. struct device *dev = &pdev->dev;
  226. int i, ret;
  227. base = devm_platform_ioremap_resource(pdev, 0);
  228. if (IS_ERR(base))
  229. return PTR_ERR(base);
  230. regmap = devm_regmap_init_mmio(dev, base, &cbf_msm8996_regmap_config);
  231. if (IS_ERR(regmap))
  232. return PTR_ERR(regmap);
  233. /* Select GPLL0 for 300MHz for the CBF clock */
  234. regmap_write(regmap, CBF_MUX_OFFSET, 0x3);
  235. /* Ensure write goes through before PLLs are reconfigured */
  236. udelay(5);
  237. /* Set the auto clock sel always-on source to GPLL0/2 (300MHz) */
  238. regmap_update_bits(regmap, CBF_MUX_OFFSET,
  239. CBF_MUX_AUTO_CLK_SEL_ALWAYS_ON_MASK,
  240. CBF_MUX_AUTO_CLK_SEL_ALWAYS_ON_GPLL0_SEL);
  241. clk_alpha_pll_configure(&cbf_pll, regmap, &cbfpll_config);
  242. /* Wait for PLL(s) to lock */
  243. udelay(50);
  244. /* Enable auto clock selection for CBF */
  245. regmap_update_bits(regmap, CBF_MUX_OFFSET,
  246. CBF_MUX_AUTO_CLK_SEL_BIT,
  247. CBF_MUX_AUTO_CLK_SEL_BIT);
  248. /* Ensure write goes through before muxes are switched */
  249. udelay(5);
  250. /* Switch CBF to use the primary PLL */
  251. regmap_update_bits(regmap, CBF_MUX_OFFSET, CBF_MUX_PARENT_MASK, 0x1);
  252. if (of_device_is_compatible(dev->of_node, "qcom,msm8996pro-cbf")) {
  253. cbfpll_config.post_div_val = 0x3 << 8;
  254. cbf_pll_postdiv.div = 4;
  255. }
  256. for (i = 0; i < ARRAY_SIZE(cbf_msm8996_hw_clks); i++) {
  257. ret = devm_clk_hw_register(dev, cbf_msm8996_hw_clks[i]);
  258. if (ret)
  259. return ret;
  260. }
  261. for (i = 0; i < ARRAY_SIZE(cbf_msm8996_clks); i++) {
  262. ret = devm_clk_register_regmap(dev, cbf_msm8996_clks[i]);
  263. if (ret)
  264. return ret;
  265. }
  266. ret = devm_clk_notifier_register(dev, cbf_mux.clkr.hw.clk, &cbf_mux.nb);
  267. if (ret)
  268. return ret;
  269. ret = devm_of_clk_add_hw_provider(dev, of_clk_hw_simple_get, &cbf_mux.clkr.hw);
  270. if (ret)
  271. return ret;
  272. return qcom_msm8996_cbf_icc_register(pdev, &cbf_mux.clkr.hw);
  273. }
  274. static void qcom_msm8996_cbf_remove(struct platform_device *pdev)
  275. {
  276. qcom_msm8996_cbf_icc_remove(pdev);
  277. }
  278. static const struct of_device_id qcom_msm8996_cbf_match_table[] = {
  279. { .compatible = "qcom,msm8996-cbf" },
  280. { .compatible = "qcom,msm8996pro-cbf" },
  281. { /* sentinel */ },
  282. };
  283. MODULE_DEVICE_TABLE(of, qcom_msm8996_cbf_match_table);
  284. static struct platform_driver qcom_msm8996_cbf_driver = {
  285. .probe = qcom_msm8996_cbf_probe,
  286. .remove = qcom_msm8996_cbf_remove,
  287. .driver = {
  288. .name = "qcom-msm8996-cbf",
  289. .of_match_table = qcom_msm8996_cbf_match_table,
  290. .sync_state = qcom_msm8996_cbf_icc_sync_state,
  291. },
  292. };
  293. /* Register early enough to fix the clock to be used for other cores */
  294. static int __init qcom_msm8996_cbf_init(void)
  295. {
  296. return platform_driver_register(&qcom_msm8996_cbf_driver);
  297. }
  298. postcore_initcall(qcom_msm8996_cbf_init);
  299. static void __exit qcom_msm8996_cbf_exit(void)
  300. {
  301. platform_driver_unregister(&qcom_msm8996_cbf_driver);
  302. }
  303. module_exit(qcom_msm8996_cbf_exit);
  304. MODULE_DESCRIPTION("QCOM MSM8996 CPU Bus Fabric Clock Driver");
  305. MODULE_LICENSE("GPL");