clk-lpcg-scu.c 4.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * Copyright 2018 NXP
  4. * Dong Aisheng <aisheng.dong@nxp.com>
  5. */
  6. #include <linux/bits.h>
  7. #include <linux/clk-provider.h>
  8. #include <linux/delay.h>
  9. #include <linux/err.h>
  10. #include <linux/io.h>
  11. #include <linux/slab.h>
  12. #include <linux/spinlock.h>
  13. #include <linux/units.h>
  14. #include "clk-scu.h"
  15. static DEFINE_SPINLOCK(imx_lpcg_scu_lock);
  16. #define CLK_GATE_SCU_LPCG_MASK 0x3
  17. #define CLK_GATE_SCU_LPCG_HW_SEL BIT(0)
  18. #define CLK_GATE_SCU_LPCG_SW_SEL BIT(1)
  19. /*
  20. * struct clk_lpcg_scu - Description of LPCG clock
  21. *
  22. * @hw: clk_hw of this LPCG
  23. * @reg: register of this LPCG clock
  24. * @bit_idx: bit index of this LPCG clock
  25. * @hw_gate: HW auto gate enable
  26. *
  27. * This structure describes one LPCG clock
  28. */
  29. struct clk_lpcg_scu {
  30. struct clk_hw hw;
  31. void __iomem *reg;
  32. u8 bit_idx;
  33. bool hw_gate;
  34. /* for state save&restore */
  35. u32 state;
  36. };
  37. #define to_clk_lpcg_scu(_hw) container_of(_hw, struct clk_lpcg_scu, hw)
  38. /* e10858 -LPCG clock gating register synchronization errata */
  39. static void lpcg_e10858_writel(unsigned long rate, void __iomem *reg, u32 val)
  40. {
  41. writel(val, reg);
  42. if (rate >= 24 * HZ_PER_MHZ || rate == 0) {
  43. /*
  44. * The time taken to access the LPCG registers from the AP core
  45. * through the interconnect is longer than the minimum delay
  46. * of 4 clock cycles required by the errata.
  47. * Adding a readl will provide sufficient delay to prevent
  48. * back-to-back writes.
  49. */
  50. readl(reg);
  51. } else {
  52. /*
  53. * For clocks running below 24MHz, wait a minimum of
  54. * 4 clock cycles.
  55. */
  56. ndelay(4 * (DIV_ROUND_UP(1000 * HZ_PER_MHZ, rate)));
  57. }
  58. }
  59. static int clk_lpcg_scu_enable(struct clk_hw *hw)
  60. {
  61. struct clk_lpcg_scu *clk = to_clk_lpcg_scu(hw);
  62. unsigned long flags;
  63. u32 reg, val;
  64. spin_lock_irqsave(&imx_lpcg_scu_lock, flags);
  65. reg = readl_relaxed(clk->reg);
  66. reg &= ~(CLK_GATE_SCU_LPCG_MASK << clk->bit_idx);
  67. val = CLK_GATE_SCU_LPCG_SW_SEL;
  68. if (clk->hw_gate)
  69. val |= CLK_GATE_SCU_LPCG_HW_SEL;
  70. reg |= val << clk->bit_idx;
  71. lpcg_e10858_writel(clk_hw_get_rate(hw), clk->reg, reg);
  72. spin_unlock_irqrestore(&imx_lpcg_scu_lock, flags);
  73. return 0;
  74. }
  75. static void clk_lpcg_scu_disable(struct clk_hw *hw)
  76. {
  77. struct clk_lpcg_scu *clk = to_clk_lpcg_scu(hw);
  78. unsigned long flags;
  79. u32 reg;
  80. spin_lock_irqsave(&imx_lpcg_scu_lock, flags);
  81. reg = readl_relaxed(clk->reg);
  82. reg &= ~(CLK_GATE_SCU_LPCG_MASK << clk->bit_idx);
  83. lpcg_e10858_writel(clk_hw_get_rate(hw), clk->reg, reg);
  84. spin_unlock_irqrestore(&imx_lpcg_scu_lock, flags);
  85. }
  86. static const struct clk_ops clk_lpcg_scu_ops = {
  87. .enable = clk_lpcg_scu_enable,
  88. .disable = clk_lpcg_scu_disable,
  89. };
  90. struct clk_hw *__imx_clk_lpcg_scu(struct device *dev, const char *name,
  91. const char *parent_name, unsigned long flags,
  92. void __iomem *reg, u8 bit_idx, bool hw_gate)
  93. {
  94. struct clk_lpcg_scu *clk;
  95. struct clk_init_data init;
  96. struct clk_hw *hw;
  97. int ret;
  98. clk = kzalloc(sizeof(*clk), GFP_KERNEL);
  99. if (!clk)
  100. return ERR_PTR(-ENOMEM);
  101. clk->reg = reg;
  102. clk->bit_idx = bit_idx;
  103. clk->hw_gate = hw_gate;
  104. init.name = name;
  105. init.ops = &clk_lpcg_scu_ops;
  106. init.flags = CLK_SET_RATE_PARENT | flags;
  107. init.parent_names = parent_name ? &parent_name : NULL;
  108. init.num_parents = parent_name ? 1 : 0;
  109. clk->hw.init = &init;
  110. hw = &clk->hw;
  111. ret = clk_hw_register(dev, hw);
  112. if (ret) {
  113. kfree(clk);
  114. hw = ERR_PTR(ret);
  115. return hw;
  116. }
  117. if (dev)
  118. dev_set_drvdata(dev, clk);
  119. return hw;
  120. }
  121. void imx_clk_lpcg_scu_unregister(struct clk_hw *hw)
  122. {
  123. struct clk_lpcg_scu *clk = to_clk_lpcg_scu(hw);
  124. clk_hw_unregister(&clk->hw);
  125. kfree(clk);
  126. }
  127. static int __maybe_unused imx_clk_lpcg_scu_suspend(struct device *dev)
  128. {
  129. struct clk_lpcg_scu *clk = dev_get_drvdata(dev);
  130. clk->state = readl_relaxed(clk->reg);
  131. dev_dbg(dev, "save lpcg state 0x%x\n", clk->state);
  132. return 0;
  133. }
  134. static int __maybe_unused imx_clk_lpcg_scu_resume(struct device *dev)
  135. {
  136. struct clk_lpcg_scu *clk = dev_get_drvdata(dev);
  137. writel(clk->state, clk->reg);
  138. lpcg_e10858_writel(0, clk->reg, clk->state);
  139. dev_dbg(dev, "restore lpcg state 0x%x\n", clk->state);
  140. return 0;
  141. }
  142. const struct dev_pm_ops imx_clk_lpcg_scu_pm_ops = {
  143. SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(imx_clk_lpcg_scu_suspend,
  144. imx_clk_lpcg_scu_resume)
  145. };