clk-hsdk-pll.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431
  1. /*
  2. * Synopsys HSDK SDP Generic PLL clock driver
  3. *
  4. * Copyright (C) 2017 Synopsys
  5. *
  6. * This file is licensed under the terms of the GNU General Public
  7. * License version 2. This program is licensed "as is" without any
  8. * warranty of any kind, whether express or implied.
  9. */
  10. #include <linux/clk-provider.h>
  11. #include <linux/delay.h>
  12. #include <linux/device.h>
  13. #include <linux/err.h>
  14. #include <linux/of.h>
  15. #include <linux/of_address.h>
  16. #include <linux/of_device.h>
  17. #include <linux/platform_device.h>
  18. #include <linux/slab.h>
  19. #define CGU_PLL_CTRL 0x000 /* ARC PLL control register */
  20. #define CGU_PLL_STATUS 0x004 /* ARC PLL status register */
  21. #define CGU_PLL_FMEAS 0x008 /* ARC PLL frequency measurement register */
  22. #define CGU_PLL_MON 0x00C /* ARC PLL monitor register */
  23. #define CGU_PLL_CTRL_ODIV_SHIFT 2
  24. #define CGU_PLL_CTRL_IDIV_SHIFT 4
  25. #define CGU_PLL_CTRL_FBDIV_SHIFT 9
  26. #define CGU_PLL_CTRL_BAND_SHIFT 20
  27. #define CGU_PLL_CTRL_ODIV_MASK GENMASK(3, CGU_PLL_CTRL_ODIV_SHIFT)
  28. #define CGU_PLL_CTRL_IDIV_MASK GENMASK(8, CGU_PLL_CTRL_IDIV_SHIFT)
  29. #define CGU_PLL_CTRL_FBDIV_MASK GENMASK(15, CGU_PLL_CTRL_FBDIV_SHIFT)
  30. #define CGU_PLL_CTRL_PD BIT(0)
  31. #define CGU_PLL_CTRL_BYPASS BIT(1)
  32. #define CGU_PLL_STATUS_LOCK BIT(0)
  33. #define CGU_PLL_STATUS_ERR BIT(1)
  34. #define HSDK_PLL_MAX_LOCK_TIME 100 /* 100 us */
  35. #define CGU_PLL_SOURCE_MAX 1
  36. #define CORE_IF_CLK_THRESHOLD_HZ 500000000
  37. #define CREG_CORE_IF_CLK_DIV_1 0x0
  38. #define CREG_CORE_IF_CLK_DIV_2 0x1
  39. struct hsdk_pll_cfg {
  40. u32 rate;
  41. u32 idiv;
  42. u32 fbdiv;
  43. u32 odiv;
  44. u32 band;
  45. };
  46. static const struct hsdk_pll_cfg asdt_pll_cfg[] = {
  47. { 100000000, 0, 11, 3, 0 },
  48. { 133000000, 0, 15, 3, 0 },
  49. { 200000000, 1, 47, 3, 0 },
  50. { 233000000, 1, 27, 2, 0 },
  51. { 300000000, 1, 35, 2, 0 },
  52. { 333000000, 1, 39, 2, 0 },
  53. { 400000000, 1, 47, 2, 0 },
  54. { 500000000, 0, 14, 1, 0 },
  55. { 600000000, 0, 17, 1, 0 },
  56. { 700000000, 0, 20, 1, 0 },
  57. { 800000000, 0, 23, 1, 0 },
  58. { 900000000, 1, 26, 0, 0 },
  59. { 1000000000, 1, 29, 0, 0 },
  60. { 1100000000, 1, 32, 0, 0 },
  61. { 1200000000, 1, 35, 0, 0 },
  62. { 1300000000, 1, 38, 0, 0 },
  63. { 1400000000, 1, 41, 0, 0 },
  64. { 1500000000, 1, 44, 0, 0 },
  65. { 1600000000, 1, 47, 0, 0 },
  66. {}
  67. };
  68. static const struct hsdk_pll_cfg hdmi_pll_cfg[] = {
  69. { 297000000, 0, 21, 2, 0 },
  70. { 540000000, 0, 19, 1, 0 },
  71. { 594000000, 0, 21, 1, 0 },
  72. {}
  73. };
  74. struct hsdk_pll_clk {
  75. struct clk_hw hw;
  76. void __iomem *regs;
  77. void __iomem *spec_regs;
  78. const struct hsdk_pll_devdata *pll_devdata;
  79. struct device *dev;
  80. };
  81. struct hsdk_pll_devdata {
  82. const struct hsdk_pll_cfg *pll_cfg;
  83. int (*update_rate)(struct hsdk_pll_clk *clk, unsigned long rate,
  84. const struct hsdk_pll_cfg *cfg);
  85. };
  86. static int hsdk_pll_core_update_rate(struct hsdk_pll_clk *, unsigned long,
  87. const struct hsdk_pll_cfg *);
  88. static int hsdk_pll_comm_update_rate(struct hsdk_pll_clk *, unsigned long,
  89. const struct hsdk_pll_cfg *);
  90. static const struct hsdk_pll_devdata core_pll_devdata = {
  91. .pll_cfg = asdt_pll_cfg,
  92. .update_rate = hsdk_pll_core_update_rate,
  93. };
  94. static const struct hsdk_pll_devdata sdt_pll_devdata = {
  95. .pll_cfg = asdt_pll_cfg,
  96. .update_rate = hsdk_pll_comm_update_rate,
  97. };
  98. static const struct hsdk_pll_devdata hdmi_pll_devdata = {
  99. .pll_cfg = hdmi_pll_cfg,
  100. .update_rate = hsdk_pll_comm_update_rate,
  101. };
  102. static inline void hsdk_pll_write(struct hsdk_pll_clk *clk, u32 reg, u32 val)
  103. {
  104. iowrite32(val, clk->regs + reg);
  105. }
  106. static inline u32 hsdk_pll_read(struct hsdk_pll_clk *clk, u32 reg)
  107. {
  108. return ioread32(clk->regs + reg);
  109. }
  110. static inline void hsdk_pll_set_cfg(struct hsdk_pll_clk *clk,
  111. const struct hsdk_pll_cfg *cfg)
  112. {
  113. u32 val = 0;
  114. /* Powerdown and Bypass bits should be cleared */
  115. val |= cfg->idiv << CGU_PLL_CTRL_IDIV_SHIFT;
  116. val |= cfg->fbdiv << CGU_PLL_CTRL_FBDIV_SHIFT;
  117. val |= cfg->odiv << CGU_PLL_CTRL_ODIV_SHIFT;
  118. val |= cfg->band << CGU_PLL_CTRL_BAND_SHIFT;
  119. dev_dbg(clk->dev, "write configuration: %#x\n", val);
  120. hsdk_pll_write(clk, CGU_PLL_CTRL, val);
  121. }
  122. static inline bool hsdk_pll_is_locked(struct hsdk_pll_clk *clk)
  123. {
  124. return !!(hsdk_pll_read(clk, CGU_PLL_STATUS) & CGU_PLL_STATUS_LOCK);
  125. }
  126. static inline bool hsdk_pll_is_err(struct hsdk_pll_clk *clk)
  127. {
  128. return !!(hsdk_pll_read(clk, CGU_PLL_STATUS) & CGU_PLL_STATUS_ERR);
  129. }
  130. static inline struct hsdk_pll_clk *to_hsdk_pll_clk(struct clk_hw *hw)
  131. {
  132. return container_of(hw, struct hsdk_pll_clk, hw);
  133. }
  134. static unsigned long hsdk_pll_recalc_rate(struct clk_hw *hw,
  135. unsigned long parent_rate)
  136. {
  137. u32 val;
  138. u64 rate;
  139. u32 idiv, fbdiv, odiv;
  140. struct hsdk_pll_clk *clk = to_hsdk_pll_clk(hw);
  141. val = hsdk_pll_read(clk, CGU_PLL_CTRL);
  142. dev_dbg(clk->dev, "current configuration: %#x\n", val);
  143. /* Check if PLL is disabled */
  144. if (val & CGU_PLL_CTRL_PD)
  145. return 0;
  146. /* Check if PLL is bypassed */
  147. if (val & CGU_PLL_CTRL_BYPASS)
  148. return parent_rate;
  149. /* input divider = reg.idiv + 1 */
  150. idiv = 1 + ((val & CGU_PLL_CTRL_IDIV_MASK) >> CGU_PLL_CTRL_IDIV_SHIFT);
  151. /* fb divider = 2*(reg.fbdiv + 1) */
  152. fbdiv = 2 * (1 + ((val & CGU_PLL_CTRL_FBDIV_MASK) >> CGU_PLL_CTRL_FBDIV_SHIFT));
  153. /* output divider = 2^(reg.odiv) */
  154. odiv = 1 << ((val & CGU_PLL_CTRL_ODIV_MASK) >> CGU_PLL_CTRL_ODIV_SHIFT);
  155. rate = (u64)parent_rate * fbdiv;
  156. do_div(rate, idiv * odiv);
  157. return rate;
  158. }
  159. static long hsdk_pll_round_rate(struct clk_hw *hw, unsigned long rate,
  160. unsigned long *prate)
  161. {
  162. int i;
  163. unsigned long best_rate;
  164. struct hsdk_pll_clk *clk = to_hsdk_pll_clk(hw);
  165. const struct hsdk_pll_cfg *pll_cfg = clk->pll_devdata->pll_cfg;
  166. if (pll_cfg[0].rate == 0)
  167. return -EINVAL;
  168. best_rate = pll_cfg[0].rate;
  169. for (i = 1; pll_cfg[i].rate != 0; i++) {
  170. if (abs(rate - pll_cfg[i].rate) < abs(rate - best_rate))
  171. best_rate = pll_cfg[i].rate;
  172. }
  173. dev_dbg(clk->dev, "chosen best rate: %lu\n", best_rate);
  174. return best_rate;
  175. }
  176. static int hsdk_pll_comm_update_rate(struct hsdk_pll_clk *clk,
  177. unsigned long rate,
  178. const struct hsdk_pll_cfg *cfg)
  179. {
  180. hsdk_pll_set_cfg(clk, cfg);
  181. /*
  182. * Wait until CGU relocks and check error status.
  183. * If after timeout CGU is unlocked yet return error.
  184. */
  185. udelay(HSDK_PLL_MAX_LOCK_TIME);
  186. if (!hsdk_pll_is_locked(clk))
  187. return -ETIMEDOUT;
  188. if (hsdk_pll_is_err(clk))
  189. return -EINVAL;
  190. return 0;
  191. }
  192. static int hsdk_pll_core_update_rate(struct hsdk_pll_clk *clk,
  193. unsigned long rate,
  194. const struct hsdk_pll_cfg *cfg)
  195. {
  196. /*
  197. * When core clock exceeds 500MHz, the divider for the interface
  198. * clock must be programmed to div-by-2.
  199. */
  200. if (rate > CORE_IF_CLK_THRESHOLD_HZ)
  201. iowrite32(CREG_CORE_IF_CLK_DIV_2, clk->spec_regs);
  202. hsdk_pll_set_cfg(clk, cfg);
  203. /*
  204. * Wait until CGU relocks and check error status.
  205. * If after timeout CGU is unlocked yet return error.
  206. */
  207. udelay(HSDK_PLL_MAX_LOCK_TIME);
  208. if (!hsdk_pll_is_locked(clk))
  209. return -ETIMEDOUT;
  210. if (hsdk_pll_is_err(clk))
  211. return -EINVAL;
  212. /*
  213. * Program divider to div-by-1 if we succesfuly set core clock below
  214. * 500MHz threshold.
  215. */
  216. if (rate <= CORE_IF_CLK_THRESHOLD_HZ)
  217. iowrite32(CREG_CORE_IF_CLK_DIV_1, clk->spec_regs);
  218. return 0;
  219. }
  220. static int hsdk_pll_set_rate(struct clk_hw *hw, unsigned long rate,
  221. unsigned long parent_rate)
  222. {
  223. int i;
  224. struct hsdk_pll_clk *clk = to_hsdk_pll_clk(hw);
  225. const struct hsdk_pll_cfg *pll_cfg = clk->pll_devdata->pll_cfg;
  226. for (i = 0; pll_cfg[i].rate != 0; i++) {
  227. if (pll_cfg[i].rate == rate) {
  228. return clk->pll_devdata->update_rate(clk, rate,
  229. &pll_cfg[i]);
  230. }
  231. }
  232. dev_err(clk->dev, "invalid rate=%ld, parent_rate=%ld\n", rate,
  233. parent_rate);
  234. return -EINVAL;
  235. }
  236. static const struct clk_ops hsdk_pll_ops = {
  237. .recalc_rate = hsdk_pll_recalc_rate,
  238. .round_rate = hsdk_pll_round_rate,
  239. .set_rate = hsdk_pll_set_rate,
  240. };
  241. static int hsdk_pll_clk_probe(struct platform_device *pdev)
  242. {
  243. int ret;
  244. struct resource *mem;
  245. const char *parent_name;
  246. unsigned int num_parents;
  247. struct hsdk_pll_clk *pll_clk;
  248. struct clk_init_data init = { };
  249. struct device *dev = &pdev->dev;
  250. pll_clk = devm_kzalloc(dev, sizeof(*pll_clk), GFP_KERNEL);
  251. if (!pll_clk)
  252. return -ENOMEM;
  253. mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  254. pll_clk->regs = devm_ioremap_resource(dev, mem);
  255. if (IS_ERR(pll_clk->regs))
  256. return PTR_ERR(pll_clk->regs);
  257. init.name = dev->of_node->name;
  258. init.ops = &hsdk_pll_ops;
  259. parent_name = of_clk_get_parent_name(dev->of_node, 0);
  260. init.parent_names = &parent_name;
  261. num_parents = of_clk_get_parent_count(dev->of_node);
  262. if (num_parents == 0 || num_parents > CGU_PLL_SOURCE_MAX) {
  263. dev_err(dev, "wrong clock parents number: %u\n", num_parents);
  264. return -EINVAL;
  265. }
  266. init.num_parents = num_parents;
  267. pll_clk->hw.init = &init;
  268. pll_clk->dev = dev;
  269. pll_clk->pll_devdata = of_device_get_match_data(dev);
  270. if (!pll_clk->pll_devdata) {
  271. dev_err(dev, "No OF match data provided\n");
  272. return -EINVAL;
  273. }
  274. ret = devm_clk_hw_register(dev, &pll_clk->hw);
  275. if (ret) {
  276. dev_err(dev, "failed to register %s clock\n", init.name);
  277. return ret;
  278. }
  279. return of_clk_add_hw_provider(dev->of_node, of_clk_hw_simple_get,
  280. &pll_clk->hw);
  281. }
  282. static int hsdk_pll_clk_remove(struct platform_device *pdev)
  283. {
  284. of_clk_del_provider(pdev->dev.of_node);
  285. return 0;
  286. }
  287. static void __init of_hsdk_pll_clk_setup(struct device_node *node)
  288. {
  289. int ret;
  290. const char *parent_name;
  291. unsigned int num_parents;
  292. struct hsdk_pll_clk *pll_clk;
  293. struct clk_init_data init = { };
  294. pll_clk = kzalloc(sizeof(*pll_clk), GFP_KERNEL);
  295. if (!pll_clk)
  296. return;
  297. pll_clk->regs = of_iomap(node, 0);
  298. if (!pll_clk->regs) {
  299. pr_err("failed to map pll registers\n");
  300. goto err_free_pll_clk;
  301. }
  302. pll_clk->spec_regs = of_iomap(node, 1);
  303. if (!pll_clk->spec_regs) {
  304. pr_err("failed to map pll registers\n");
  305. goto err_unmap_comm_regs;
  306. }
  307. init.name = node->name;
  308. init.ops = &hsdk_pll_ops;
  309. parent_name = of_clk_get_parent_name(node, 0);
  310. init.parent_names = &parent_name;
  311. num_parents = of_clk_get_parent_count(node);
  312. if (num_parents > CGU_PLL_SOURCE_MAX) {
  313. pr_err("too much clock parents: %u\n", num_parents);
  314. goto err_unmap_spec_regs;
  315. }
  316. init.num_parents = num_parents;
  317. pll_clk->hw.init = &init;
  318. pll_clk->pll_devdata = &core_pll_devdata;
  319. ret = clk_hw_register(NULL, &pll_clk->hw);
  320. if (ret) {
  321. pr_err("failed to register %s clock\n", node->name);
  322. goto err_unmap_spec_regs;
  323. }
  324. ret = of_clk_add_hw_provider(node, of_clk_hw_simple_get, &pll_clk->hw);
  325. if (ret) {
  326. pr_err("failed to add hw provider for %s clock\n", node->name);
  327. goto err_unmap_spec_regs;
  328. }
  329. return;
  330. err_unmap_spec_regs:
  331. iounmap(pll_clk->spec_regs);
  332. err_unmap_comm_regs:
  333. iounmap(pll_clk->regs);
  334. err_free_pll_clk:
  335. kfree(pll_clk);
  336. }
  337. /* Core PLL needed early for ARC cpus timers */
  338. CLK_OF_DECLARE(hsdk_pll_clock, "snps,hsdk-core-pll-clock",
  339. of_hsdk_pll_clk_setup);
  340. static const struct of_device_id hsdk_pll_clk_id[] = {
  341. { .compatible = "snps,hsdk-gp-pll-clock", .data = &sdt_pll_devdata},
  342. { .compatible = "snps,hsdk-hdmi-pll-clock", .data = &hdmi_pll_devdata},
  343. { }
  344. };
  345. static struct platform_driver hsdk_pll_clk_driver = {
  346. .driver = {
  347. .name = "hsdk-gp-pll-clock",
  348. .of_match_table = hsdk_pll_clk_id,
  349. },
  350. .probe = hsdk_pll_clk_probe,
  351. .remove = hsdk_pll_clk_remove,
  352. };
  353. builtin_platform_driver(hsdk_pll_clk_driver);