clk-hsdk-pll.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Synopsys HSDK SDP Generic PLL clock driver
  4. *
  5. * Copyright (C) 2017 Synopsys
  6. */
  7. #include <linux/clk-provider.h>
  8. #include <linux/delay.h>
  9. #include <linux/device.h>
  10. #include <linux/err.h>
  11. #include <linux/io.h>
  12. #include <linux/of.h>
  13. #include <linux/of_address.h>
  14. #include <linux/platform_device.h>
  15. #include <linux/slab.h>
  16. #define CGU_PLL_CTRL 0x000 /* ARC PLL control register */
  17. #define CGU_PLL_STATUS 0x004 /* ARC PLL status register */
  18. #define CGU_PLL_FMEAS 0x008 /* ARC PLL frequency measurement register */
  19. #define CGU_PLL_MON 0x00C /* ARC PLL monitor register */
  20. #define CGU_PLL_CTRL_ODIV_SHIFT 2
  21. #define CGU_PLL_CTRL_IDIV_SHIFT 4
  22. #define CGU_PLL_CTRL_FBDIV_SHIFT 9
  23. #define CGU_PLL_CTRL_BAND_SHIFT 20
  24. #define CGU_PLL_CTRL_ODIV_MASK GENMASK(3, CGU_PLL_CTRL_ODIV_SHIFT)
  25. #define CGU_PLL_CTRL_IDIV_MASK GENMASK(8, CGU_PLL_CTRL_IDIV_SHIFT)
  26. #define CGU_PLL_CTRL_FBDIV_MASK GENMASK(15, CGU_PLL_CTRL_FBDIV_SHIFT)
  27. #define CGU_PLL_CTRL_PD BIT(0)
  28. #define CGU_PLL_CTRL_BYPASS BIT(1)
  29. #define CGU_PLL_STATUS_LOCK BIT(0)
  30. #define CGU_PLL_STATUS_ERR BIT(1)
  31. #define HSDK_PLL_MAX_LOCK_TIME 100 /* 100 us */
  32. #define CGU_PLL_SOURCE_MAX 1
  33. #define CORE_IF_CLK_THRESHOLD_HZ 500000000
  34. #define CREG_CORE_IF_CLK_DIV_1 0x0
  35. #define CREG_CORE_IF_CLK_DIV_2 0x1
  36. struct hsdk_pll_cfg {
  37. u32 rate;
  38. u32 idiv;
  39. u32 fbdiv;
  40. u32 odiv;
  41. u32 band;
  42. u32 bypass;
  43. };
  44. static const struct hsdk_pll_cfg asdt_pll_cfg[] = {
  45. { 100000000, 0, 11, 3, 0, 0 },
  46. { 133000000, 0, 15, 3, 0, 0 },
  47. { 200000000, 1, 47, 3, 0, 0 },
  48. { 233000000, 1, 27, 2, 0, 0 },
  49. { 300000000, 1, 35, 2, 0, 0 },
  50. { 333000000, 1, 39, 2, 0, 0 },
  51. { 400000000, 1, 47, 2, 0, 0 },
  52. { 500000000, 0, 14, 1, 0, 0 },
  53. { 600000000, 0, 17, 1, 0, 0 },
  54. { 700000000, 0, 20, 1, 0, 0 },
  55. { 800000000, 0, 23, 1, 0, 0 },
  56. { 900000000, 1, 26, 0, 0, 0 },
  57. { 1000000000, 1, 29, 0, 0, 0 },
  58. { 1100000000, 1, 32, 0, 0, 0 },
  59. { 1200000000, 1, 35, 0, 0, 0 },
  60. { 1300000000, 1, 38, 0, 0, 0 },
  61. { 1400000000, 1, 41, 0, 0, 0 },
  62. { 1500000000, 1, 44, 0, 0, 0 },
  63. { 1600000000, 1, 47, 0, 0, 0 },
  64. {}
  65. };
  66. static const struct hsdk_pll_cfg hdmi_pll_cfg[] = {
  67. { 27000000, 0, 0, 0, 0, 1 },
  68. { 148500000, 0, 21, 3, 0, 0 },
  69. { 297000000, 0, 21, 2, 0, 0 },
  70. { 540000000, 0, 19, 1, 0, 0 },
  71. { 594000000, 0, 21, 1, 0, 0 },
  72. {}
  73. };
  74. struct hsdk_pll_clk {
  75. struct clk_hw hw;
  76. void __iomem *regs;
  77. void __iomem *spec_regs;
  78. const struct hsdk_pll_devdata *pll_devdata;
  79. struct device *dev;
  80. };
  81. struct hsdk_pll_devdata {
  82. const struct hsdk_pll_cfg *pll_cfg;
  83. int (*update_rate)(struct hsdk_pll_clk *clk, unsigned long rate,
  84. const struct hsdk_pll_cfg *cfg);
  85. };
  86. static int hsdk_pll_core_update_rate(struct hsdk_pll_clk *, unsigned long,
  87. const struct hsdk_pll_cfg *);
  88. static int hsdk_pll_comm_update_rate(struct hsdk_pll_clk *, unsigned long,
  89. const struct hsdk_pll_cfg *);
  90. static const struct hsdk_pll_devdata core_pll_devdata = {
  91. .pll_cfg = asdt_pll_cfg,
  92. .update_rate = hsdk_pll_core_update_rate,
  93. };
  94. static const struct hsdk_pll_devdata sdt_pll_devdata = {
  95. .pll_cfg = asdt_pll_cfg,
  96. .update_rate = hsdk_pll_comm_update_rate,
  97. };
  98. static const struct hsdk_pll_devdata hdmi_pll_devdata = {
  99. .pll_cfg = hdmi_pll_cfg,
  100. .update_rate = hsdk_pll_comm_update_rate,
  101. };
  102. static inline void hsdk_pll_write(struct hsdk_pll_clk *clk, u32 reg, u32 val)
  103. {
  104. iowrite32(val, clk->regs + reg);
  105. }
  106. static inline u32 hsdk_pll_read(struct hsdk_pll_clk *clk, u32 reg)
  107. {
  108. return ioread32(clk->regs + reg);
  109. }
  110. static inline void hsdk_pll_set_cfg(struct hsdk_pll_clk *clk,
  111. const struct hsdk_pll_cfg *cfg)
  112. {
  113. u32 val = 0;
  114. if (cfg->bypass) {
  115. val = hsdk_pll_read(clk, CGU_PLL_CTRL);
  116. val |= CGU_PLL_CTRL_BYPASS;
  117. } else {
  118. /* Powerdown and Bypass bits should be cleared */
  119. val |= cfg->idiv << CGU_PLL_CTRL_IDIV_SHIFT;
  120. val |= cfg->fbdiv << CGU_PLL_CTRL_FBDIV_SHIFT;
  121. val |= cfg->odiv << CGU_PLL_CTRL_ODIV_SHIFT;
  122. val |= cfg->band << CGU_PLL_CTRL_BAND_SHIFT;
  123. }
  124. dev_dbg(clk->dev, "write configuration: %#x\n", val);
  125. hsdk_pll_write(clk, CGU_PLL_CTRL, val);
  126. }
  127. static inline bool hsdk_pll_is_locked(struct hsdk_pll_clk *clk)
  128. {
  129. return !!(hsdk_pll_read(clk, CGU_PLL_STATUS) & CGU_PLL_STATUS_LOCK);
  130. }
  131. static inline bool hsdk_pll_is_err(struct hsdk_pll_clk *clk)
  132. {
  133. return !!(hsdk_pll_read(clk, CGU_PLL_STATUS) & CGU_PLL_STATUS_ERR);
  134. }
  135. static inline struct hsdk_pll_clk *to_hsdk_pll_clk(struct clk_hw *hw)
  136. {
  137. return container_of(hw, struct hsdk_pll_clk, hw);
  138. }
  139. static unsigned long hsdk_pll_recalc_rate(struct clk_hw *hw,
  140. unsigned long parent_rate)
  141. {
  142. u32 val;
  143. u64 rate;
  144. u32 idiv, fbdiv, odiv;
  145. struct hsdk_pll_clk *clk = to_hsdk_pll_clk(hw);
  146. val = hsdk_pll_read(clk, CGU_PLL_CTRL);
  147. dev_dbg(clk->dev, "current configuration: %#x\n", val);
  148. /* Check if PLL is bypassed */
  149. if (val & CGU_PLL_CTRL_BYPASS)
  150. return parent_rate;
  151. /* Check if PLL is disabled */
  152. if (val & CGU_PLL_CTRL_PD)
  153. return 0;
  154. /* input divider = reg.idiv + 1 */
  155. idiv = 1 + ((val & CGU_PLL_CTRL_IDIV_MASK) >> CGU_PLL_CTRL_IDIV_SHIFT);
  156. /* fb divider = 2*(reg.fbdiv + 1) */
  157. fbdiv = 2 * (1 + ((val & CGU_PLL_CTRL_FBDIV_MASK) >> CGU_PLL_CTRL_FBDIV_SHIFT));
  158. /* output divider = 2^(reg.odiv) */
  159. odiv = 1 << ((val & CGU_PLL_CTRL_ODIV_MASK) >> CGU_PLL_CTRL_ODIV_SHIFT);
  160. rate = (u64)parent_rate * fbdiv;
  161. do_div(rate, idiv * odiv);
  162. return rate;
  163. }
  164. static long hsdk_pll_round_rate(struct clk_hw *hw, unsigned long rate,
  165. unsigned long *prate)
  166. {
  167. int i;
  168. unsigned long best_rate;
  169. struct hsdk_pll_clk *clk = to_hsdk_pll_clk(hw);
  170. const struct hsdk_pll_cfg *pll_cfg = clk->pll_devdata->pll_cfg;
  171. if (pll_cfg[0].rate == 0)
  172. return -EINVAL;
  173. best_rate = pll_cfg[0].rate;
  174. for (i = 1; pll_cfg[i].rate != 0; i++) {
  175. if (abs(rate - pll_cfg[i].rate) < abs(rate - best_rate))
  176. best_rate = pll_cfg[i].rate;
  177. }
  178. dev_dbg(clk->dev, "chosen best rate: %lu\n", best_rate);
  179. return best_rate;
  180. }
  181. static int hsdk_pll_comm_update_rate(struct hsdk_pll_clk *clk,
  182. unsigned long rate,
  183. const struct hsdk_pll_cfg *cfg)
  184. {
  185. hsdk_pll_set_cfg(clk, cfg);
  186. /*
  187. * Wait until CGU relocks and check error status.
  188. * If after timeout CGU is unlocked yet return error.
  189. */
  190. udelay(HSDK_PLL_MAX_LOCK_TIME);
  191. if (!hsdk_pll_is_locked(clk))
  192. return -ETIMEDOUT;
  193. if (hsdk_pll_is_err(clk))
  194. return -EINVAL;
  195. return 0;
  196. }
  197. static int hsdk_pll_core_update_rate(struct hsdk_pll_clk *clk,
  198. unsigned long rate,
  199. const struct hsdk_pll_cfg *cfg)
  200. {
  201. /*
  202. * When core clock exceeds 500MHz, the divider for the interface
  203. * clock must be programmed to div-by-2.
  204. */
  205. if (rate > CORE_IF_CLK_THRESHOLD_HZ)
  206. iowrite32(CREG_CORE_IF_CLK_DIV_2, clk->spec_regs);
  207. hsdk_pll_set_cfg(clk, cfg);
  208. /*
  209. * Wait until CGU relocks and check error status.
  210. * If after timeout CGU is unlocked yet return error.
  211. */
  212. udelay(HSDK_PLL_MAX_LOCK_TIME);
  213. if (!hsdk_pll_is_locked(clk))
  214. return -ETIMEDOUT;
  215. if (hsdk_pll_is_err(clk))
  216. return -EINVAL;
  217. /*
  218. * Program divider to div-by-1 if we succesfuly set core clock below
  219. * 500MHz threshold.
  220. */
  221. if (rate <= CORE_IF_CLK_THRESHOLD_HZ)
  222. iowrite32(CREG_CORE_IF_CLK_DIV_1, clk->spec_regs);
  223. return 0;
  224. }
  225. static int hsdk_pll_set_rate(struct clk_hw *hw, unsigned long rate,
  226. unsigned long parent_rate)
  227. {
  228. int i;
  229. struct hsdk_pll_clk *clk = to_hsdk_pll_clk(hw);
  230. const struct hsdk_pll_cfg *pll_cfg = clk->pll_devdata->pll_cfg;
  231. for (i = 0; pll_cfg[i].rate != 0; i++) {
  232. if (pll_cfg[i].rate == rate) {
  233. return clk->pll_devdata->update_rate(clk, rate,
  234. &pll_cfg[i]);
  235. }
  236. }
  237. dev_err(clk->dev, "invalid rate=%ld, parent_rate=%ld\n", rate,
  238. parent_rate);
  239. return -EINVAL;
  240. }
  241. static const struct clk_ops hsdk_pll_ops = {
  242. .recalc_rate = hsdk_pll_recalc_rate,
  243. .round_rate = hsdk_pll_round_rate,
  244. .set_rate = hsdk_pll_set_rate,
  245. };
  246. static int hsdk_pll_clk_probe(struct platform_device *pdev)
  247. {
  248. int ret;
  249. const char *parent_name;
  250. unsigned int num_parents;
  251. struct hsdk_pll_clk *pll_clk;
  252. struct clk_init_data init = { };
  253. struct device *dev = &pdev->dev;
  254. pll_clk = devm_kzalloc(dev, sizeof(*pll_clk), GFP_KERNEL);
  255. if (!pll_clk)
  256. return -ENOMEM;
  257. pll_clk->regs = devm_platform_ioremap_resource(pdev, 0);
  258. if (IS_ERR(pll_clk->regs))
  259. return PTR_ERR(pll_clk->regs);
  260. init.name = dev->of_node->name;
  261. init.ops = &hsdk_pll_ops;
  262. parent_name = of_clk_get_parent_name(dev->of_node, 0);
  263. init.parent_names = &parent_name;
  264. num_parents = of_clk_get_parent_count(dev->of_node);
  265. if (num_parents == 0 || num_parents > CGU_PLL_SOURCE_MAX) {
  266. dev_err(dev, "wrong clock parents number: %u\n", num_parents);
  267. return -EINVAL;
  268. }
  269. init.num_parents = num_parents;
  270. pll_clk->hw.init = &init;
  271. pll_clk->dev = dev;
  272. pll_clk->pll_devdata = of_device_get_match_data(dev);
  273. if (!pll_clk->pll_devdata) {
  274. dev_err(dev, "No OF match data provided\n");
  275. return -EINVAL;
  276. }
  277. ret = devm_clk_hw_register(dev, &pll_clk->hw);
  278. if (ret) {
  279. dev_err(dev, "failed to register %s clock\n", init.name);
  280. return ret;
  281. }
  282. return devm_of_clk_add_hw_provider(dev, of_clk_hw_simple_get,
  283. &pll_clk->hw);
  284. }
  285. static void __init of_hsdk_pll_clk_setup(struct device_node *node)
  286. {
  287. int ret;
  288. const char *parent_name;
  289. unsigned int num_parents;
  290. struct hsdk_pll_clk *pll_clk;
  291. struct clk_init_data init = { };
  292. pll_clk = kzalloc(sizeof(*pll_clk), GFP_KERNEL);
  293. if (!pll_clk)
  294. return;
  295. pll_clk->regs = of_iomap(node, 0);
  296. if (!pll_clk->regs) {
  297. pr_err("failed to map pll registers\n");
  298. goto err_free_pll_clk;
  299. }
  300. pll_clk->spec_regs = of_iomap(node, 1);
  301. if (!pll_clk->spec_regs) {
  302. pr_err("failed to map pll registers\n");
  303. goto err_unmap_comm_regs;
  304. }
  305. init.name = node->name;
  306. init.ops = &hsdk_pll_ops;
  307. parent_name = of_clk_get_parent_name(node, 0);
  308. init.parent_names = &parent_name;
  309. num_parents = of_clk_get_parent_count(node);
  310. if (num_parents > CGU_PLL_SOURCE_MAX) {
  311. pr_err("too much clock parents: %u\n", num_parents);
  312. goto err_unmap_spec_regs;
  313. }
  314. init.num_parents = num_parents;
  315. pll_clk->hw.init = &init;
  316. pll_clk->pll_devdata = &core_pll_devdata;
  317. ret = clk_hw_register(NULL, &pll_clk->hw);
  318. if (ret) {
  319. pr_err("failed to register %pOFn clock\n", node);
  320. goto err_unmap_spec_regs;
  321. }
  322. ret = of_clk_add_hw_provider(node, of_clk_hw_simple_get, &pll_clk->hw);
  323. if (ret) {
  324. pr_err("failed to add hw provider for %pOFn clock\n", node);
  325. goto err_unmap_spec_regs;
  326. }
  327. return;
  328. err_unmap_spec_regs:
  329. iounmap(pll_clk->spec_regs);
  330. err_unmap_comm_regs:
  331. iounmap(pll_clk->regs);
  332. err_free_pll_clk:
  333. kfree(pll_clk);
  334. }
  335. /* Core PLL needed early for ARC cpus timers */
  336. CLK_OF_DECLARE(hsdk_pll_clock, "snps,hsdk-core-pll-clock",
  337. of_hsdk_pll_clk_setup);
  338. static const struct of_device_id hsdk_pll_clk_id[] = {
  339. { .compatible = "snps,hsdk-gp-pll-clock", .data = &sdt_pll_devdata},
  340. { .compatible = "snps,hsdk-hdmi-pll-clock", .data = &hdmi_pll_devdata},
  341. { }
  342. };
  343. static struct platform_driver hsdk_pll_clk_driver = {
  344. .driver = {
  345. .name = "hsdk-gp-pll-clock",
  346. .of_match_table = hsdk_pll_clk_id,
  347. },
  348. .probe = hsdk_pll_clk_probe,
  349. };
  350. builtin_platform_driver(hsdk_pll_clk_driver);