clk-highbank.c 7.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright 2011-2012 Calxeda, Inc.
  4. */
  5. #include <linux/kernel.h>
  6. #include <linux/slab.h>
  7. #include <linux/err.h>
  8. #include <linux/clk-provider.h>
  9. #include <linux/io.h>
  10. #include <linux/of.h>
  11. #include <linux/of_address.h>
  12. #define HB_PLL_LOCK_500 0x20000000
  13. #define HB_PLL_LOCK 0x10000000
  14. #define HB_PLL_DIVF_SHIFT 20
  15. #define HB_PLL_DIVF_MASK 0x0ff00000
  16. #define HB_PLL_DIVQ_SHIFT 16
  17. #define HB_PLL_DIVQ_MASK 0x00070000
  18. #define HB_PLL_DIVR_SHIFT 8
  19. #define HB_PLL_DIVR_MASK 0x00001f00
  20. #define HB_PLL_RANGE_SHIFT 4
  21. #define HB_PLL_RANGE_MASK 0x00000070
  22. #define HB_PLL_BYPASS 0x00000008
  23. #define HB_PLL_RESET 0x00000004
  24. #define HB_PLL_EXT_BYPASS 0x00000002
  25. #define HB_PLL_EXT_ENA 0x00000001
  26. #define HB_PLL_VCO_MIN_FREQ 2133000000
  27. #define HB_PLL_MAX_FREQ HB_PLL_VCO_MIN_FREQ
  28. #define HB_PLL_MIN_FREQ (HB_PLL_VCO_MIN_FREQ / 64)
  29. #define HB_A9_BCLK_DIV_MASK 0x00000006
  30. #define HB_A9_BCLK_DIV_SHIFT 1
  31. #define HB_A9_PCLK_DIV 0x00000001
  32. struct hb_clk {
  33. struct clk_hw hw;
  34. void __iomem *reg;
  35. };
  36. #define to_hb_clk(p) container_of(p, struct hb_clk, hw)
  37. static int clk_pll_prepare(struct clk_hw *hwclk)
  38. {
  39. struct hb_clk *hbclk = to_hb_clk(hwclk);
  40. u32 reg;
  41. reg = readl(hbclk->reg);
  42. reg &= ~HB_PLL_RESET;
  43. writel(reg, hbclk->reg);
  44. while ((readl(hbclk->reg) & HB_PLL_LOCK) == 0)
  45. ;
  46. while ((readl(hbclk->reg) & HB_PLL_LOCK_500) == 0)
  47. ;
  48. return 0;
  49. }
  50. static void clk_pll_unprepare(struct clk_hw *hwclk)
  51. {
  52. struct hb_clk *hbclk = to_hb_clk(hwclk);
  53. u32 reg;
  54. reg = readl(hbclk->reg);
  55. reg |= HB_PLL_RESET;
  56. writel(reg, hbclk->reg);
  57. }
  58. static int clk_pll_enable(struct clk_hw *hwclk)
  59. {
  60. struct hb_clk *hbclk = to_hb_clk(hwclk);
  61. u32 reg;
  62. reg = readl(hbclk->reg);
  63. reg |= HB_PLL_EXT_ENA;
  64. writel(reg, hbclk->reg);
  65. return 0;
  66. }
  67. static void clk_pll_disable(struct clk_hw *hwclk)
  68. {
  69. struct hb_clk *hbclk = to_hb_clk(hwclk);
  70. u32 reg;
  71. reg = readl(hbclk->reg);
  72. reg &= ~HB_PLL_EXT_ENA;
  73. writel(reg, hbclk->reg);
  74. }
  75. static unsigned long clk_pll_recalc_rate(struct clk_hw *hwclk,
  76. unsigned long parent_rate)
  77. {
  78. struct hb_clk *hbclk = to_hb_clk(hwclk);
  79. unsigned long divf, divq, vco_freq, reg;
  80. reg = readl(hbclk->reg);
  81. if (reg & HB_PLL_EXT_BYPASS)
  82. return parent_rate;
  83. divf = (reg & HB_PLL_DIVF_MASK) >> HB_PLL_DIVF_SHIFT;
  84. divq = (reg & HB_PLL_DIVQ_MASK) >> HB_PLL_DIVQ_SHIFT;
  85. vco_freq = parent_rate * (divf + 1);
  86. return vco_freq / (1 << divq);
  87. }
  88. static void clk_pll_calc(unsigned long rate, unsigned long ref_freq,
  89. u32 *pdivq, u32 *pdivf)
  90. {
  91. u32 divq, divf;
  92. unsigned long vco_freq;
  93. if (rate < HB_PLL_MIN_FREQ)
  94. rate = HB_PLL_MIN_FREQ;
  95. if (rate > HB_PLL_MAX_FREQ)
  96. rate = HB_PLL_MAX_FREQ;
  97. for (divq = 1; divq <= 6; divq++) {
  98. if ((rate * (1 << divq)) >= HB_PLL_VCO_MIN_FREQ)
  99. break;
  100. }
  101. vco_freq = rate * (1 << divq);
  102. divf = (vco_freq + (ref_freq / 2)) / ref_freq;
  103. divf--;
  104. *pdivq = divq;
  105. *pdivf = divf;
  106. }
  107. static long clk_pll_round_rate(struct clk_hw *hwclk, unsigned long rate,
  108. unsigned long *parent_rate)
  109. {
  110. u32 divq, divf;
  111. unsigned long ref_freq = *parent_rate;
  112. clk_pll_calc(rate, ref_freq, &divq, &divf);
  113. return (ref_freq * (divf + 1)) / (1 << divq);
  114. }
  115. static int clk_pll_set_rate(struct clk_hw *hwclk, unsigned long rate,
  116. unsigned long parent_rate)
  117. {
  118. struct hb_clk *hbclk = to_hb_clk(hwclk);
  119. u32 divq, divf;
  120. u32 reg;
  121. clk_pll_calc(rate, parent_rate, &divq, &divf);
  122. reg = readl(hbclk->reg);
  123. if (divf != ((reg & HB_PLL_DIVF_MASK) >> HB_PLL_DIVF_SHIFT)) {
  124. /* Need to re-lock PLL, so put it into bypass mode */
  125. reg |= HB_PLL_EXT_BYPASS;
  126. writel(reg | HB_PLL_EXT_BYPASS, hbclk->reg);
  127. writel(reg | HB_PLL_RESET, hbclk->reg);
  128. reg &= ~(HB_PLL_DIVF_MASK | HB_PLL_DIVQ_MASK);
  129. reg |= (divf << HB_PLL_DIVF_SHIFT) | (divq << HB_PLL_DIVQ_SHIFT);
  130. writel(reg | HB_PLL_RESET, hbclk->reg);
  131. writel(reg, hbclk->reg);
  132. while ((readl(hbclk->reg) & HB_PLL_LOCK) == 0)
  133. ;
  134. while ((readl(hbclk->reg) & HB_PLL_LOCK_500) == 0)
  135. ;
  136. reg |= HB_PLL_EXT_ENA;
  137. reg &= ~HB_PLL_EXT_BYPASS;
  138. } else {
  139. writel(reg | HB_PLL_EXT_BYPASS, hbclk->reg);
  140. reg &= ~HB_PLL_DIVQ_MASK;
  141. reg |= divq << HB_PLL_DIVQ_SHIFT;
  142. writel(reg | HB_PLL_EXT_BYPASS, hbclk->reg);
  143. }
  144. writel(reg, hbclk->reg);
  145. return 0;
  146. }
  147. static const struct clk_ops clk_pll_ops = {
  148. .prepare = clk_pll_prepare,
  149. .unprepare = clk_pll_unprepare,
  150. .enable = clk_pll_enable,
  151. .disable = clk_pll_disable,
  152. .recalc_rate = clk_pll_recalc_rate,
  153. .round_rate = clk_pll_round_rate,
  154. .set_rate = clk_pll_set_rate,
  155. };
  156. static unsigned long clk_cpu_periphclk_recalc_rate(struct clk_hw *hwclk,
  157. unsigned long parent_rate)
  158. {
  159. struct hb_clk *hbclk = to_hb_clk(hwclk);
  160. u32 div = (readl(hbclk->reg) & HB_A9_PCLK_DIV) ? 8 : 4;
  161. return parent_rate / div;
  162. }
  163. static const struct clk_ops a9periphclk_ops = {
  164. .recalc_rate = clk_cpu_periphclk_recalc_rate,
  165. };
  166. static unsigned long clk_cpu_a9bclk_recalc_rate(struct clk_hw *hwclk,
  167. unsigned long parent_rate)
  168. {
  169. struct hb_clk *hbclk = to_hb_clk(hwclk);
  170. u32 div = (readl(hbclk->reg) & HB_A9_BCLK_DIV_MASK) >> HB_A9_BCLK_DIV_SHIFT;
  171. return parent_rate / (div + 2);
  172. }
  173. static const struct clk_ops a9bclk_ops = {
  174. .recalc_rate = clk_cpu_a9bclk_recalc_rate,
  175. };
  176. static unsigned long clk_periclk_recalc_rate(struct clk_hw *hwclk,
  177. unsigned long parent_rate)
  178. {
  179. struct hb_clk *hbclk = to_hb_clk(hwclk);
  180. u32 div;
  181. div = readl(hbclk->reg) & 0x1f;
  182. div++;
  183. div *= 2;
  184. return parent_rate / div;
  185. }
  186. static long clk_periclk_round_rate(struct clk_hw *hwclk, unsigned long rate,
  187. unsigned long *parent_rate)
  188. {
  189. u32 div;
  190. div = *parent_rate / rate;
  191. div++;
  192. div &= ~0x1;
  193. return *parent_rate / div;
  194. }
  195. static int clk_periclk_set_rate(struct clk_hw *hwclk, unsigned long rate,
  196. unsigned long parent_rate)
  197. {
  198. struct hb_clk *hbclk = to_hb_clk(hwclk);
  199. u32 div;
  200. div = parent_rate / rate;
  201. if (div & 0x1)
  202. return -EINVAL;
  203. writel(div >> 1, hbclk->reg);
  204. return 0;
  205. }
  206. static const struct clk_ops periclk_ops = {
  207. .recalc_rate = clk_periclk_recalc_rate,
  208. .round_rate = clk_periclk_round_rate,
  209. .set_rate = clk_periclk_set_rate,
  210. };
  211. static void __init hb_clk_init(struct device_node *node, const struct clk_ops *ops, unsigned long clkflags)
  212. {
  213. u32 reg;
  214. struct hb_clk *hb_clk;
  215. const char *clk_name = node->name;
  216. const char *parent_name;
  217. struct clk_init_data init;
  218. struct device_node *srnp;
  219. int rc;
  220. rc = of_property_read_u32(node, "reg", &reg);
  221. if (WARN_ON(rc))
  222. return;
  223. hb_clk = kzalloc(sizeof(*hb_clk), GFP_KERNEL);
  224. if (WARN_ON(!hb_clk))
  225. return;
  226. /* Map system registers */
  227. srnp = of_find_compatible_node(NULL, NULL, "calxeda,hb-sregs");
  228. hb_clk->reg = of_iomap(srnp, 0);
  229. of_node_put(srnp);
  230. BUG_ON(!hb_clk->reg);
  231. hb_clk->reg += reg;
  232. of_property_read_string(node, "clock-output-names", &clk_name);
  233. init.name = clk_name;
  234. init.ops = ops;
  235. init.flags = clkflags;
  236. parent_name = of_clk_get_parent_name(node, 0);
  237. init.parent_names = &parent_name;
  238. init.num_parents = 1;
  239. hb_clk->hw.init = &init;
  240. rc = clk_hw_register(NULL, &hb_clk->hw);
  241. if (WARN_ON(rc)) {
  242. kfree(hb_clk);
  243. return;
  244. }
  245. of_clk_add_hw_provider(node, of_clk_hw_simple_get, &hb_clk->hw);
  246. }
  247. static void __init hb_pll_init(struct device_node *node)
  248. {
  249. hb_clk_init(node, &clk_pll_ops, 0);
  250. }
  251. CLK_OF_DECLARE(hb_pll, "calxeda,hb-pll-clock", hb_pll_init);
  252. static void __init hb_a9periph_init(struct device_node *node)
  253. {
  254. hb_clk_init(node, &a9periphclk_ops, 0);
  255. }
  256. CLK_OF_DECLARE(hb_a9periph, "calxeda,hb-a9periph-clock", hb_a9periph_init);
  257. static void __init hb_a9bus_init(struct device_node *node)
  258. {
  259. hb_clk_init(node, &a9bclk_ops, CLK_IS_CRITICAL);
  260. }
  261. CLK_OF_DECLARE(hb_a9bus, "calxeda,hb-a9bus-clock", hb_a9bus_init);
  262. static void __init hb_emmc_init(struct device_node *node)
  263. {
  264. hb_clk_init(node, &periclk_ops, 0);
  265. }
  266. CLK_OF_DECLARE(hb_emmc, "calxeda,hb-emmc-clock", hb_emmc_init);