clk-composite-93.c 6.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * Copyright 2021 NXP
  4. *
  5. * Peng Fan <peng.fan@nxp.com>
  6. */
  7. #include <linux/clk-provider.h>
  8. #include <linux/errno.h>
  9. #include <linux/export.h>
  10. #include <linux/io.h>
  11. #include <linux/iopoll.h>
  12. #include <linux/slab.h>
  13. #include "clk.h"
  14. #define TIMEOUT_US 500U
  15. #define CCM_DIV_SHIFT 0
  16. #define CCM_DIV_WIDTH 8
  17. #define CCM_MUX_SHIFT 8
  18. #define CCM_MUX_MASK 3
  19. #define CCM_OFF_SHIFT 24
  20. #define CCM_BUSY_SHIFT 28
  21. #define STAT_OFFSET 0x4
  22. #define AUTHEN_OFFSET 0x30
  23. #define TZ_NS_SHIFT 9
  24. #define TZ_NS_MASK BIT(9)
  25. #define WHITE_LIST_SHIFT 16
  26. static int imx93_clk_composite_wait_ready(struct clk_hw *hw, void __iomem *reg)
  27. {
  28. int ret;
  29. u32 val;
  30. ret = readl_poll_timeout_atomic(reg + STAT_OFFSET, val, !(val & BIT(CCM_BUSY_SHIFT)),
  31. 0, TIMEOUT_US);
  32. if (ret)
  33. pr_err("Slice[%s] busy timeout\n", clk_hw_get_name(hw));
  34. return ret;
  35. }
  36. static void imx93_clk_composite_gate_endisable(struct clk_hw *hw, int enable)
  37. {
  38. struct clk_gate *gate = to_clk_gate(hw);
  39. unsigned long flags;
  40. u32 reg;
  41. if (gate->lock)
  42. spin_lock_irqsave(gate->lock, flags);
  43. reg = readl(gate->reg);
  44. if (enable)
  45. reg &= ~BIT(gate->bit_idx);
  46. else
  47. reg |= BIT(gate->bit_idx);
  48. writel(reg, gate->reg);
  49. imx93_clk_composite_wait_ready(hw, gate->reg);
  50. if (gate->lock)
  51. spin_unlock_irqrestore(gate->lock, flags);
  52. }
  53. static int imx93_clk_composite_gate_enable(struct clk_hw *hw)
  54. {
  55. imx93_clk_composite_gate_endisable(hw, 1);
  56. return 0;
  57. }
  58. static void imx93_clk_composite_gate_disable(struct clk_hw *hw)
  59. {
  60. /*
  61. * Skip disable the root clock gate if mcore enabled.
  62. * The root clock may be used by the mcore.
  63. */
  64. if (mcore_booted)
  65. return;
  66. imx93_clk_composite_gate_endisable(hw, 0);
  67. }
  68. static const struct clk_ops imx93_clk_composite_gate_ops = {
  69. .enable = imx93_clk_composite_gate_enable,
  70. .disable = imx93_clk_composite_gate_disable,
  71. .is_enabled = clk_gate_is_enabled,
  72. };
  73. static unsigned long
  74. imx93_clk_composite_divider_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
  75. {
  76. return clk_divider_ops.recalc_rate(hw, parent_rate);
  77. }
  78. static long
  79. imx93_clk_composite_divider_round_rate(struct clk_hw *hw, unsigned long rate, unsigned long *prate)
  80. {
  81. return clk_divider_ops.round_rate(hw, rate, prate);
  82. }
  83. static int
  84. imx93_clk_composite_divider_determine_rate(struct clk_hw *hw, struct clk_rate_request *req)
  85. {
  86. return clk_divider_ops.determine_rate(hw, req);
  87. }
  88. static int imx93_clk_composite_divider_set_rate(struct clk_hw *hw, unsigned long rate,
  89. unsigned long parent_rate)
  90. {
  91. struct clk_divider *divider = to_clk_divider(hw);
  92. int value;
  93. unsigned long flags = 0;
  94. u32 val;
  95. int ret;
  96. value = divider_get_val(rate, parent_rate, divider->table, divider->width, divider->flags);
  97. if (value < 0)
  98. return value;
  99. if (divider->lock)
  100. spin_lock_irqsave(divider->lock, flags);
  101. val = readl(divider->reg);
  102. val &= ~(clk_div_mask(divider->width) << divider->shift);
  103. val |= (u32)value << divider->shift;
  104. writel(val, divider->reg);
  105. ret = imx93_clk_composite_wait_ready(hw, divider->reg);
  106. if (divider->lock)
  107. spin_unlock_irqrestore(divider->lock, flags);
  108. return ret;
  109. }
  110. static const struct clk_ops imx93_clk_composite_divider_ops = {
  111. .recalc_rate = imx93_clk_composite_divider_recalc_rate,
  112. .round_rate = imx93_clk_composite_divider_round_rate,
  113. .determine_rate = imx93_clk_composite_divider_determine_rate,
  114. .set_rate = imx93_clk_composite_divider_set_rate,
  115. };
  116. static u8 imx93_clk_composite_mux_get_parent(struct clk_hw *hw)
  117. {
  118. return clk_mux_ops.get_parent(hw);
  119. }
  120. static int imx93_clk_composite_mux_set_parent(struct clk_hw *hw, u8 index)
  121. {
  122. struct clk_mux *mux = to_clk_mux(hw);
  123. u32 val = clk_mux_index_to_val(mux->table, mux->flags, index);
  124. unsigned long flags = 0;
  125. u32 reg;
  126. int ret;
  127. if (mux->lock)
  128. spin_lock_irqsave(mux->lock, flags);
  129. reg = readl(mux->reg);
  130. reg &= ~(mux->mask << mux->shift);
  131. val = val << mux->shift;
  132. reg |= val;
  133. writel(reg, mux->reg);
  134. ret = imx93_clk_composite_wait_ready(hw, mux->reg);
  135. if (mux->lock)
  136. spin_unlock_irqrestore(mux->lock, flags);
  137. return ret;
  138. }
  139. static int
  140. imx93_clk_composite_mux_determine_rate(struct clk_hw *hw, struct clk_rate_request *req)
  141. {
  142. return clk_mux_ops.determine_rate(hw, req);
  143. }
  144. static const struct clk_ops imx93_clk_composite_mux_ops = {
  145. .get_parent = imx93_clk_composite_mux_get_parent,
  146. .set_parent = imx93_clk_composite_mux_set_parent,
  147. .determine_rate = imx93_clk_composite_mux_determine_rate,
  148. };
  149. struct clk_hw *imx93_clk_composite_flags(const char *name, const char * const *parent_names,
  150. int num_parents, void __iomem *reg, u32 domain_id,
  151. unsigned long flags)
  152. {
  153. struct clk_hw *hw = ERR_PTR(-ENOMEM), *mux_hw;
  154. struct clk_hw *div_hw, *gate_hw;
  155. struct clk_divider *div = NULL;
  156. struct clk_gate *gate = NULL;
  157. struct clk_mux *mux = NULL;
  158. bool clk_ro = false;
  159. u32 authen;
  160. mux = kzalloc(sizeof(*mux), GFP_KERNEL);
  161. if (!mux)
  162. goto fail;
  163. mux_hw = &mux->hw;
  164. mux->reg = reg;
  165. mux->shift = CCM_MUX_SHIFT;
  166. mux->mask = CCM_MUX_MASK;
  167. mux->lock = &imx_ccm_lock;
  168. div = kzalloc(sizeof(*div), GFP_KERNEL);
  169. if (!div)
  170. goto fail;
  171. div_hw = &div->hw;
  172. div->reg = reg;
  173. div->shift = CCM_DIV_SHIFT;
  174. div->width = CCM_DIV_WIDTH;
  175. div->lock = &imx_ccm_lock;
  176. div->flags = CLK_DIVIDER_ROUND_CLOSEST;
  177. authen = readl(reg + AUTHEN_OFFSET);
  178. if (!(authen & TZ_NS_MASK) || !(authen & BIT(WHITE_LIST_SHIFT + domain_id)))
  179. clk_ro = true;
  180. if (clk_ro) {
  181. hw = clk_hw_register_composite(NULL, name, parent_names, num_parents,
  182. mux_hw, &clk_mux_ro_ops, div_hw,
  183. &clk_divider_ro_ops, NULL, NULL, flags);
  184. } else {
  185. gate = kzalloc(sizeof(*gate), GFP_KERNEL);
  186. if (!gate)
  187. goto fail;
  188. gate_hw = &gate->hw;
  189. gate->reg = reg;
  190. gate->bit_idx = CCM_OFF_SHIFT;
  191. gate->lock = &imx_ccm_lock;
  192. gate->flags = CLK_GATE_SET_TO_DISABLE;
  193. hw = clk_hw_register_composite(NULL, name, parent_names, num_parents,
  194. mux_hw, &imx93_clk_composite_mux_ops, div_hw,
  195. &imx93_clk_composite_divider_ops, gate_hw,
  196. &imx93_clk_composite_gate_ops,
  197. flags | CLK_SET_RATE_NO_REPARENT);
  198. }
  199. if (IS_ERR(hw))
  200. goto fail;
  201. return hw;
  202. fail:
  203. kfree(gate);
  204. kfree(div);
  205. kfree(mux);
  206. return ERR_CAST(hw);
  207. }
  208. EXPORT_SYMBOL_GPL(imx93_clk_composite_flags);