ccu_nm.c 5.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236
  1. /*
  2. * Copyright (C) 2016 Maxime Ripard
  3. * Maxime Ripard <maxime.ripard@free-electrons.com>
  4. *
  5. * This program is free software; you can redistribute it and/or
  6. * modify it under the terms of the GNU General Public License as
  7. * published by the Free Software Foundation; either version 2 of
  8. * the License, or (at your option) any later version.
  9. */
  10. #include <linux/clk-provider.h>
  11. #include "ccu_frac.h"
  12. #include "ccu_gate.h"
  13. #include "ccu_nm.h"
  14. struct _ccu_nm {
  15. unsigned long n, min_n, max_n;
  16. unsigned long m, min_m, max_m;
  17. };
  18. static unsigned long ccu_nm_calc_rate(unsigned long parent,
  19. unsigned long n, unsigned long m)
  20. {
  21. u64 rate = parent;
  22. rate *= n;
  23. do_div(rate, m);
  24. return rate;
  25. }
  26. static void ccu_nm_find_best(unsigned long parent, unsigned long rate,
  27. struct _ccu_nm *nm)
  28. {
  29. unsigned long best_rate = 0;
  30. unsigned long best_n = 0, best_m = 0;
  31. unsigned long _n, _m;
  32. for (_n = nm->min_n; _n <= nm->max_n; _n++) {
  33. for (_m = nm->min_m; _m <= nm->max_m; _m++) {
  34. unsigned long tmp_rate = ccu_nm_calc_rate(parent,
  35. _n, _m);
  36. if (tmp_rate > rate)
  37. continue;
  38. if ((rate - tmp_rate) < (rate - best_rate)) {
  39. best_rate = tmp_rate;
  40. best_n = _n;
  41. best_m = _m;
  42. }
  43. }
  44. }
  45. nm->n = best_n;
  46. nm->m = best_m;
  47. }
  48. static void ccu_nm_disable(struct clk_hw *hw)
  49. {
  50. struct ccu_nm *nm = hw_to_ccu_nm(hw);
  51. return ccu_gate_helper_disable(&nm->common, nm->enable);
  52. }
  53. static int ccu_nm_enable(struct clk_hw *hw)
  54. {
  55. struct ccu_nm *nm = hw_to_ccu_nm(hw);
  56. return ccu_gate_helper_enable(&nm->common, nm->enable);
  57. }
  58. static int ccu_nm_is_enabled(struct clk_hw *hw)
  59. {
  60. struct ccu_nm *nm = hw_to_ccu_nm(hw);
  61. return ccu_gate_helper_is_enabled(&nm->common, nm->enable);
  62. }
  63. static unsigned long ccu_nm_recalc_rate(struct clk_hw *hw,
  64. unsigned long parent_rate)
  65. {
  66. struct ccu_nm *nm = hw_to_ccu_nm(hw);
  67. unsigned long rate;
  68. unsigned long n, m;
  69. u32 reg;
  70. if (ccu_frac_helper_is_enabled(&nm->common, &nm->frac)) {
  71. rate = ccu_frac_helper_read_rate(&nm->common, &nm->frac);
  72. if (nm->common.features & CCU_FEATURE_FIXED_POSTDIV)
  73. rate /= nm->fixed_post_div;
  74. return rate;
  75. }
  76. reg = readl(nm->common.base + nm->common.reg);
  77. n = reg >> nm->n.shift;
  78. n &= (1 << nm->n.width) - 1;
  79. n += nm->n.offset;
  80. if (!n)
  81. n++;
  82. m = reg >> nm->m.shift;
  83. m &= (1 << nm->m.width) - 1;
  84. m += nm->m.offset;
  85. if (!m)
  86. m++;
  87. if (ccu_sdm_helper_is_enabled(&nm->common, &nm->sdm))
  88. rate = ccu_sdm_helper_read_rate(&nm->common, &nm->sdm, m, n);
  89. else
  90. rate = ccu_nm_calc_rate(parent_rate, n, m);
  91. if (nm->common.features & CCU_FEATURE_FIXED_POSTDIV)
  92. rate /= nm->fixed_post_div;
  93. return rate;
  94. }
  95. static long ccu_nm_round_rate(struct clk_hw *hw, unsigned long rate,
  96. unsigned long *parent_rate)
  97. {
  98. struct ccu_nm *nm = hw_to_ccu_nm(hw);
  99. struct _ccu_nm _nm;
  100. if (nm->common.features & CCU_FEATURE_FIXED_POSTDIV)
  101. rate *= nm->fixed_post_div;
  102. if (rate < nm->min_rate) {
  103. rate = nm->min_rate;
  104. if (nm->common.features & CCU_FEATURE_FIXED_POSTDIV)
  105. rate /= nm->fixed_post_div;
  106. return rate;
  107. }
  108. if (ccu_frac_helper_has_rate(&nm->common, &nm->frac, rate)) {
  109. if (nm->common.features & CCU_FEATURE_FIXED_POSTDIV)
  110. rate /= nm->fixed_post_div;
  111. return rate;
  112. }
  113. if (ccu_sdm_helper_has_rate(&nm->common, &nm->sdm, rate)) {
  114. if (nm->common.features & CCU_FEATURE_FIXED_POSTDIV)
  115. rate /= nm->fixed_post_div;
  116. return rate;
  117. }
  118. _nm.min_n = nm->n.min ?: 1;
  119. _nm.max_n = nm->n.max ?: 1 << nm->n.width;
  120. _nm.min_m = 1;
  121. _nm.max_m = nm->m.max ?: 1 << nm->m.width;
  122. ccu_nm_find_best(*parent_rate, rate, &_nm);
  123. rate = ccu_nm_calc_rate(*parent_rate, _nm.n, _nm.m);
  124. if (nm->common.features & CCU_FEATURE_FIXED_POSTDIV)
  125. rate /= nm->fixed_post_div;
  126. return rate;
  127. }
  128. static int ccu_nm_set_rate(struct clk_hw *hw, unsigned long rate,
  129. unsigned long parent_rate)
  130. {
  131. struct ccu_nm *nm = hw_to_ccu_nm(hw);
  132. struct _ccu_nm _nm;
  133. unsigned long flags;
  134. u32 reg;
  135. /* Adjust target rate according to post-dividers */
  136. if (nm->common.features & CCU_FEATURE_FIXED_POSTDIV)
  137. rate = rate * nm->fixed_post_div;
  138. if (ccu_frac_helper_has_rate(&nm->common, &nm->frac, rate)) {
  139. spin_lock_irqsave(nm->common.lock, flags);
  140. /* most SoCs require M to be 0 if fractional mode is used */
  141. reg = readl(nm->common.base + nm->common.reg);
  142. reg &= ~GENMASK(nm->m.width + nm->m.shift - 1, nm->m.shift);
  143. writel(reg, nm->common.base + nm->common.reg);
  144. spin_unlock_irqrestore(nm->common.lock, flags);
  145. ccu_frac_helper_enable(&nm->common, &nm->frac);
  146. return ccu_frac_helper_set_rate(&nm->common, &nm->frac,
  147. rate, nm->lock);
  148. } else {
  149. ccu_frac_helper_disable(&nm->common, &nm->frac);
  150. }
  151. _nm.min_n = nm->n.min ?: 1;
  152. _nm.max_n = nm->n.max ?: 1 << nm->n.width;
  153. _nm.min_m = 1;
  154. _nm.max_m = nm->m.max ?: 1 << nm->m.width;
  155. if (ccu_sdm_helper_has_rate(&nm->common, &nm->sdm, rate)) {
  156. ccu_sdm_helper_enable(&nm->common, &nm->sdm, rate);
  157. /* Sigma delta modulation requires specific N and M factors */
  158. ccu_sdm_helper_get_factors(&nm->common, &nm->sdm, rate,
  159. &_nm.m, &_nm.n);
  160. } else {
  161. ccu_sdm_helper_disable(&nm->common, &nm->sdm);
  162. ccu_nm_find_best(parent_rate, rate, &_nm);
  163. }
  164. spin_lock_irqsave(nm->common.lock, flags);
  165. reg = readl(nm->common.base + nm->common.reg);
  166. reg &= ~GENMASK(nm->n.width + nm->n.shift - 1, nm->n.shift);
  167. reg &= ~GENMASK(nm->m.width + nm->m.shift - 1, nm->m.shift);
  168. reg |= (_nm.n - nm->n.offset) << nm->n.shift;
  169. reg |= (_nm.m - nm->m.offset) << nm->m.shift;
  170. writel(reg, nm->common.base + nm->common.reg);
  171. spin_unlock_irqrestore(nm->common.lock, flags);
  172. ccu_helper_wait_for_lock(&nm->common, nm->lock);
  173. return 0;
  174. }
  175. const struct clk_ops ccu_nm_ops = {
  176. .disable = ccu_nm_disable,
  177. .enable = ccu_nm_enable,
  178. .is_enabled = ccu_nm_is_enabled,
  179. .recalc_rate = ccu_nm_recalc_rate,
  180. .round_rate = ccu_nm_round_rate,
  181. .set_rate = ccu_nm_set_rate,
  182. };