clk-loongson1.c 7.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * Clock driver for Loongson-1 SoC
  4. *
  5. * Copyright (C) 2012-2023 Keguang Zhang <keguang.zhang@gmail.com>
  6. */
  7. #include <linux/bits.h>
  8. #include <linux/clk-provider.h>
  9. #include <linux/container_of.h>
  10. #include <linux/io.h>
  11. #include <linux/of_address.h>
  12. #include <linux/slab.h>
  13. #include <linux/spinlock.h>
  14. #include <linux/printk.h>
  15. #include <dt-bindings/clock/loongson,ls1x-clk.h>
  16. /* Loongson 1 Clock Register Definitions */
  17. #define CLK_PLL_FREQ 0x0
  18. #define CLK_PLL_DIV 0x4
  19. static DEFINE_SPINLOCK(ls1x_clk_div_lock);
  20. struct ls1x_clk_pll_data {
  21. u32 fixed;
  22. u8 shift;
  23. u8 int_shift;
  24. u8 int_width;
  25. u8 frac_shift;
  26. u8 frac_width;
  27. };
  28. struct ls1x_clk_div_data {
  29. u8 shift;
  30. u8 width;
  31. unsigned long flags;
  32. const struct clk_div_table *table;
  33. u8 bypass_shift;
  34. u8 bypass_inv;
  35. spinlock_t *lock; /* protect access to DIV registers */
  36. };
  37. struct ls1x_clk {
  38. void __iomem *reg;
  39. unsigned int offset;
  40. struct clk_hw hw;
  41. const void *data;
  42. };
  43. #define to_ls1x_clk(_hw) container_of(_hw, struct ls1x_clk, hw)
  44. static inline unsigned long ls1x_pll_rate_part(unsigned int val,
  45. unsigned int shift,
  46. unsigned int width)
  47. {
  48. return (val & GENMASK(shift + width, shift)) >> shift;
  49. }
  50. static unsigned long ls1x_pll_recalc_rate(struct clk_hw *hw,
  51. unsigned long parent_rate)
  52. {
  53. struct ls1x_clk *ls1x_clk = to_ls1x_clk(hw);
  54. const struct ls1x_clk_pll_data *d = ls1x_clk->data;
  55. u32 val, rate;
  56. val = readl(ls1x_clk->reg);
  57. rate = d->fixed;
  58. rate += ls1x_pll_rate_part(val, d->int_shift, d->int_width);
  59. if (d->frac_width)
  60. rate += ls1x_pll_rate_part(val, d->frac_shift, d->frac_width);
  61. rate *= parent_rate;
  62. rate >>= d->shift;
  63. return rate;
  64. }
  65. static const struct clk_ops ls1x_pll_clk_ops = {
  66. .recalc_rate = ls1x_pll_recalc_rate,
  67. };
  68. static unsigned long ls1x_divider_recalc_rate(struct clk_hw *hw,
  69. unsigned long parent_rate)
  70. {
  71. struct ls1x_clk *ls1x_clk = to_ls1x_clk(hw);
  72. const struct ls1x_clk_div_data *d = ls1x_clk->data;
  73. unsigned int val;
  74. val = readl(ls1x_clk->reg) >> d->shift;
  75. val &= clk_div_mask(d->width);
  76. return divider_recalc_rate(hw, parent_rate, val, d->table,
  77. d->flags, d->width);
  78. }
  79. static long ls1x_divider_round_rate(struct clk_hw *hw, unsigned long rate,
  80. unsigned long *prate)
  81. {
  82. struct ls1x_clk *ls1x_clk = to_ls1x_clk(hw);
  83. const struct ls1x_clk_div_data *d = ls1x_clk->data;
  84. return divider_round_rate(hw, rate, prate, d->table,
  85. d->width, d->flags);
  86. }
  87. static int ls1x_divider_set_rate(struct clk_hw *hw, unsigned long rate,
  88. unsigned long parent_rate)
  89. {
  90. struct ls1x_clk *ls1x_clk = to_ls1x_clk(hw);
  91. const struct ls1x_clk_div_data *d = ls1x_clk->data;
  92. int val, div_val;
  93. unsigned long flags = 0;
  94. div_val = divider_get_val(rate, parent_rate, d->table,
  95. d->width, d->flags);
  96. if (div_val < 0)
  97. return div_val;
  98. spin_lock_irqsave(d->lock, flags);
  99. /* Bypass the clock */
  100. val = readl(ls1x_clk->reg);
  101. if (d->bypass_inv)
  102. val &= ~BIT(d->bypass_shift);
  103. else
  104. val |= BIT(d->bypass_shift);
  105. writel(val, ls1x_clk->reg);
  106. val = readl(ls1x_clk->reg);
  107. val &= ~(clk_div_mask(d->width) << d->shift);
  108. val |= (u32)div_val << d->shift;
  109. writel(val, ls1x_clk->reg);
  110. /* Restore the clock */
  111. val = readl(ls1x_clk->reg);
  112. if (d->bypass_inv)
  113. val |= BIT(d->bypass_shift);
  114. else
  115. val &= ~BIT(d->bypass_shift);
  116. writel(val, ls1x_clk->reg);
  117. spin_unlock_irqrestore(d->lock, flags);
  118. return 0;
  119. }
  120. static const struct clk_ops ls1x_clk_divider_ops = {
  121. .recalc_rate = ls1x_divider_recalc_rate,
  122. .round_rate = ls1x_divider_round_rate,
  123. .set_rate = ls1x_divider_set_rate,
  124. };
  125. #define LS1X_CLK_PLL(_name, _offset, _fixed, _shift, \
  126. f_shift, f_width, i_shift, i_width) \
  127. struct ls1x_clk _name = { \
  128. .offset = (_offset), \
  129. .data = &(const struct ls1x_clk_pll_data) { \
  130. .fixed = (_fixed), \
  131. .shift = (_shift), \
  132. .int_shift = (i_shift), \
  133. .int_width = (i_width), \
  134. .frac_shift = (f_shift), \
  135. .frac_width = (f_width), \
  136. }, \
  137. .hw.init = &(const struct clk_init_data) { \
  138. .name = #_name, \
  139. .ops = &ls1x_pll_clk_ops, \
  140. .parent_data = &(const struct clk_parent_data) { \
  141. .fw_name = "xtal", \
  142. .name = "xtal", \
  143. .index = -1, \
  144. }, \
  145. .num_parents = 1, \
  146. }, \
  147. }
  148. #define LS1X_CLK_DIV(_name, _pname, _offset, _shift, _width, \
  149. _table, _bypass_shift, _bypass_inv, _flags) \
  150. struct ls1x_clk _name = { \
  151. .offset = (_offset), \
  152. .data = &(const struct ls1x_clk_div_data){ \
  153. .shift = (_shift), \
  154. .width = (_width), \
  155. .table = (_table), \
  156. .flags = (_flags), \
  157. .bypass_shift = (_bypass_shift), \
  158. .bypass_inv = (_bypass_inv), \
  159. .lock = &ls1x_clk_div_lock, \
  160. }, \
  161. .hw.init = &(const struct clk_init_data) { \
  162. .name = #_name, \
  163. .ops = &ls1x_clk_divider_ops, \
  164. .parent_hws = (const struct clk_hw *[]) { _pname }, \
  165. .num_parents = 1, \
  166. .flags = CLK_GET_RATE_NOCACHE, \
  167. }, \
  168. }
  169. static LS1X_CLK_PLL(ls1b_clk_pll, CLK_PLL_FREQ, 12, 1, 0, 5, 0, 0);
  170. static LS1X_CLK_DIV(ls1b_clk_cpu, &ls1b_clk_pll.hw, CLK_PLL_DIV,
  171. 20, 4, NULL, 8, 0,
  172. CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ROUND_CLOSEST);
  173. static LS1X_CLK_DIV(ls1b_clk_dc, &ls1b_clk_pll.hw, CLK_PLL_DIV,
  174. 26, 4, NULL, 12, 0, CLK_DIVIDER_ONE_BASED);
  175. static LS1X_CLK_DIV(ls1b_clk_ahb, &ls1b_clk_pll.hw, CLK_PLL_DIV,
  176. 14, 4, NULL, 10, 0, CLK_DIVIDER_ONE_BASED);
  177. static CLK_FIXED_FACTOR(ls1b_clk_apb, "ls1b_clk_apb", "ls1b_clk_ahb", 2, 1,
  178. CLK_SET_RATE_PARENT);
  179. static struct clk_hw_onecell_data ls1b_clk_hw_data = {
  180. .hws = {
  181. [LS1X_CLKID_PLL] = &ls1b_clk_pll.hw,
  182. [LS1X_CLKID_CPU] = &ls1b_clk_cpu.hw,
  183. [LS1X_CLKID_DC] = &ls1b_clk_dc.hw,
  184. [LS1X_CLKID_AHB] = &ls1b_clk_ahb.hw,
  185. [LS1X_CLKID_APB] = &ls1b_clk_apb.hw,
  186. },
  187. .num = CLK_NR_CLKS,
  188. };
  189. static const struct clk_div_table ls1c_ahb_div_table[] = {
  190. [0] = { .val = 0, .div = 2 },
  191. [1] = { .val = 1, .div = 4 },
  192. [2] = { .val = 2, .div = 3 },
  193. [3] = { .val = 3, .div = 3 },
  194. [4] = { /* sentinel */ }
  195. };
  196. static LS1X_CLK_PLL(ls1c_clk_pll, CLK_PLL_FREQ, 0, 2, 8, 8, 16, 8);
  197. static LS1X_CLK_DIV(ls1c_clk_cpu, &ls1c_clk_pll.hw, CLK_PLL_DIV,
  198. 8, 7, NULL, 0, 1,
  199. CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ROUND_CLOSEST);
  200. static LS1X_CLK_DIV(ls1c_clk_dc, &ls1c_clk_pll.hw, CLK_PLL_DIV,
  201. 24, 7, NULL, 4, 1, CLK_DIVIDER_ONE_BASED);
  202. static LS1X_CLK_DIV(ls1c_clk_ahb, &ls1c_clk_cpu.hw, CLK_PLL_FREQ,
  203. 0, 2, ls1c_ahb_div_table, 0, 0, CLK_DIVIDER_ALLOW_ZERO);
  204. static CLK_FIXED_FACTOR(ls1c_clk_apb, "ls1c_clk_apb", "ls1c_clk_ahb", 1, 1,
  205. CLK_SET_RATE_PARENT);
  206. static struct clk_hw_onecell_data ls1c_clk_hw_data = {
  207. .hws = {
  208. [LS1X_CLKID_PLL] = &ls1c_clk_pll.hw,
  209. [LS1X_CLKID_CPU] = &ls1c_clk_cpu.hw,
  210. [LS1X_CLKID_DC] = &ls1c_clk_dc.hw,
  211. [LS1X_CLKID_AHB] = &ls1c_clk_ahb.hw,
  212. [LS1X_CLKID_APB] = &ls1c_clk_apb.hw,
  213. },
  214. .num = CLK_NR_CLKS,
  215. };
  216. static void __init ls1x_clk_init(struct device_node *np,
  217. struct clk_hw_onecell_data *hw_data)
  218. {
  219. struct ls1x_clk *ls1x_clk;
  220. void __iomem *reg;
  221. int i, ret;
  222. reg = of_iomap(np, 0);
  223. if (!reg) {
  224. pr_err("Unable to map base for %pOF\n", np);
  225. return;
  226. }
  227. for (i = 0; i < hw_data->num; i++) {
  228. /* array might be sparse */
  229. if (!hw_data->hws[i])
  230. continue;
  231. if (i != LS1X_CLKID_APB) {
  232. ls1x_clk = to_ls1x_clk(hw_data->hws[i]);
  233. ls1x_clk->reg = reg + ls1x_clk->offset;
  234. }
  235. ret = of_clk_hw_register(np, hw_data->hws[i]);
  236. if (ret)
  237. goto err;
  238. }
  239. ret = of_clk_add_hw_provider(np, of_clk_hw_onecell_get, hw_data);
  240. if (!ret)
  241. return;
  242. err:
  243. pr_err("Failed to register %pOF\n", np);
  244. while (--i >= 0)
  245. clk_hw_unregister(hw_data->hws[i]);
  246. iounmap(reg);
  247. }
  248. static void __init ls1b_clk_init(struct device_node *np)
  249. {
  250. return ls1x_clk_init(np, &ls1b_clk_hw_data);
  251. }
  252. static void __init ls1c_clk_init(struct device_node *np)
  253. {
  254. return ls1x_clk_init(np, &ls1c_clk_hw_data);
  255. }
  256. CLK_OF_DECLARE(ls1b_clk, "loongson,ls1b-clk", ls1b_clk_init);
  257. CLK_OF_DECLARE(ls1c_clk, "loongson,ls1c-clk", ls1c_clk_init);