clk-pmc-atom.c 8.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Intel Atom platform clocks driver for BayTrail and CherryTrail SoCs
  4. *
  5. * Copyright (C) 2016, Intel Corporation
  6. * Author: Irina Tirdea <irina.tirdea@intel.com>
  7. */
  8. #include <linux/clk-provider.h>
  9. #include <linux/clkdev.h>
  10. #include <linux/err.h>
  11. #include <linux/io.h>
  12. #include <linux/platform_data/x86/clk-pmc-atom.h>
  13. #include <linux/platform_data/x86/pmc_atom.h>
  14. #include <linux/platform_device.h>
  15. #include <linux/slab.h>
  16. #define PLT_CLK_NAME_BASE "pmc_plt_clk"
  17. struct clk_plt_fixed {
  18. struct clk_hw *clk;
  19. struct clk_lookup *lookup;
  20. };
  21. struct clk_plt {
  22. struct clk_hw hw;
  23. void __iomem *reg;
  24. struct clk_lookup *lookup;
  25. /* protect access to PMC registers */
  26. spinlock_t lock;
  27. };
  28. #define to_clk_plt(_hw) container_of(_hw, struct clk_plt, hw)
  29. struct clk_plt_data {
  30. struct clk_plt_fixed **parents;
  31. u8 nparents;
  32. struct clk_plt *clks[PMC_CLK_NUM];
  33. struct clk_lookup *mclk_lookup;
  34. struct clk_lookup *ether_clk_lookup;
  35. };
  36. /* Return an index in parent table */
  37. static inline int plt_reg_to_parent(int reg)
  38. {
  39. switch (reg & PMC_MASK_CLK_FREQ) {
  40. default:
  41. case PMC_CLK_FREQ_XTAL:
  42. return 0;
  43. case PMC_CLK_FREQ_PLL:
  44. return 1;
  45. }
  46. }
  47. /* Return clk index of parent */
  48. static inline int plt_parent_to_reg(int index)
  49. {
  50. switch (index) {
  51. default:
  52. case 0:
  53. return PMC_CLK_FREQ_XTAL;
  54. case 1:
  55. return PMC_CLK_FREQ_PLL;
  56. }
  57. }
  58. /* Abstract status in simpler enabled/disabled value */
  59. static inline int plt_reg_to_enabled(int reg)
  60. {
  61. switch (reg & PMC_MASK_CLK_CTL) {
  62. case PMC_CLK_CTL_GATED_ON_D3:
  63. case PMC_CLK_CTL_FORCE_ON:
  64. return 1; /* enabled */
  65. case PMC_CLK_CTL_FORCE_OFF:
  66. case PMC_CLK_CTL_RESERVED:
  67. default:
  68. return 0; /* disabled */
  69. }
  70. }
  71. static void plt_clk_reg_update(struct clk_plt *clk, u32 mask, u32 val)
  72. {
  73. u32 tmp;
  74. unsigned long flags;
  75. spin_lock_irqsave(&clk->lock, flags);
  76. tmp = readl(clk->reg);
  77. tmp = (tmp & ~mask) | (val & mask);
  78. writel(tmp, clk->reg);
  79. spin_unlock_irqrestore(&clk->lock, flags);
  80. }
  81. static int plt_clk_set_parent(struct clk_hw *hw, u8 index)
  82. {
  83. struct clk_plt *clk = to_clk_plt(hw);
  84. plt_clk_reg_update(clk, PMC_MASK_CLK_FREQ, plt_parent_to_reg(index));
  85. return 0;
  86. }
  87. static u8 plt_clk_get_parent(struct clk_hw *hw)
  88. {
  89. struct clk_plt *clk = to_clk_plt(hw);
  90. u32 value;
  91. value = readl(clk->reg);
  92. return plt_reg_to_parent(value);
  93. }
  94. static int plt_clk_enable(struct clk_hw *hw)
  95. {
  96. struct clk_plt *clk = to_clk_plt(hw);
  97. plt_clk_reg_update(clk, PMC_MASK_CLK_CTL, PMC_CLK_CTL_FORCE_ON);
  98. return 0;
  99. }
  100. static void plt_clk_disable(struct clk_hw *hw)
  101. {
  102. struct clk_plt *clk = to_clk_plt(hw);
  103. plt_clk_reg_update(clk, PMC_MASK_CLK_CTL, PMC_CLK_CTL_FORCE_OFF);
  104. }
  105. static int plt_clk_is_enabled(struct clk_hw *hw)
  106. {
  107. struct clk_plt *clk = to_clk_plt(hw);
  108. u32 value;
  109. value = readl(clk->reg);
  110. return plt_reg_to_enabled(value);
  111. }
  112. static const struct clk_ops plt_clk_ops = {
  113. .enable = plt_clk_enable,
  114. .disable = plt_clk_disable,
  115. .is_enabled = plt_clk_is_enabled,
  116. .get_parent = plt_clk_get_parent,
  117. .set_parent = plt_clk_set_parent,
  118. .determine_rate = __clk_mux_determine_rate,
  119. };
  120. static struct clk_plt *plt_clk_register(struct platform_device *pdev, int id,
  121. const struct pmc_clk_data *pmc_data,
  122. const char **parent_names,
  123. int num_parents)
  124. {
  125. struct clk_plt *pclk;
  126. struct clk_init_data init;
  127. int ret;
  128. pclk = devm_kzalloc(&pdev->dev, sizeof(*pclk), GFP_KERNEL);
  129. if (!pclk)
  130. return ERR_PTR(-ENOMEM);
  131. init.name = kasprintf(GFP_KERNEL, "%s_%d", PLT_CLK_NAME_BASE, id);
  132. init.ops = &plt_clk_ops;
  133. init.flags = 0;
  134. init.parent_names = parent_names;
  135. init.num_parents = num_parents;
  136. pclk->hw.init = &init;
  137. pclk->reg = pmc_data->base + PMC_CLK_CTL_OFFSET + id * PMC_CLK_CTL_SIZE;
  138. spin_lock_init(&pclk->lock);
  139. /*
  140. * On some systems, the pmc_plt_clocks already enabled by the
  141. * firmware are being marked as critical to avoid them being
  142. * gated by the clock framework.
  143. */
  144. if (pmc_data->critical && plt_clk_is_enabled(&pclk->hw))
  145. init.flags |= CLK_IS_CRITICAL;
  146. ret = devm_clk_hw_register(&pdev->dev, &pclk->hw);
  147. if (ret) {
  148. pclk = ERR_PTR(ret);
  149. goto err_free_init;
  150. }
  151. pclk->lookup = clkdev_hw_create(&pclk->hw, init.name, NULL);
  152. if (!pclk->lookup) {
  153. pclk = ERR_PTR(-ENOMEM);
  154. goto err_free_init;
  155. }
  156. err_free_init:
  157. kfree(init.name);
  158. return pclk;
  159. }
  160. static void plt_clk_unregister(struct clk_plt *pclk)
  161. {
  162. clkdev_drop(pclk->lookup);
  163. }
  164. static struct clk_plt_fixed *plt_clk_register_fixed_rate(struct platform_device *pdev,
  165. const char *name,
  166. const char *parent_name,
  167. unsigned long fixed_rate)
  168. {
  169. struct clk_plt_fixed *pclk;
  170. pclk = devm_kzalloc(&pdev->dev, sizeof(*pclk), GFP_KERNEL);
  171. if (!pclk)
  172. return ERR_PTR(-ENOMEM);
  173. pclk->clk = clk_hw_register_fixed_rate(&pdev->dev, name, parent_name,
  174. 0, fixed_rate);
  175. if (IS_ERR(pclk->clk))
  176. return ERR_CAST(pclk->clk);
  177. pclk->lookup = clkdev_hw_create(pclk->clk, name, NULL);
  178. if (!pclk->lookup) {
  179. clk_hw_unregister_fixed_rate(pclk->clk);
  180. return ERR_PTR(-ENOMEM);
  181. }
  182. return pclk;
  183. }
  184. static void plt_clk_unregister_fixed_rate(struct clk_plt_fixed *pclk)
  185. {
  186. clkdev_drop(pclk->lookup);
  187. clk_hw_unregister_fixed_rate(pclk->clk);
  188. }
  189. static void plt_clk_unregister_fixed_rate_loop(struct clk_plt_data *data,
  190. unsigned int i)
  191. {
  192. while (i--)
  193. plt_clk_unregister_fixed_rate(data->parents[i]);
  194. }
  195. static void plt_clk_free_parent_names_loop(const char **parent_names,
  196. unsigned int i)
  197. {
  198. while (i--)
  199. kfree_const(parent_names[i]);
  200. kfree(parent_names);
  201. }
  202. static void plt_clk_unregister_loop(struct clk_plt_data *data,
  203. unsigned int i)
  204. {
  205. while (i--)
  206. plt_clk_unregister(data->clks[i]);
  207. }
  208. static const char **plt_clk_register_parents(struct platform_device *pdev,
  209. struct clk_plt_data *data,
  210. const struct pmc_clk *clks)
  211. {
  212. const char **parent_names;
  213. unsigned int i;
  214. int err;
  215. int nparents = 0;
  216. data->nparents = 0;
  217. while (clks[nparents].name)
  218. nparents++;
  219. data->parents = devm_kcalloc(&pdev->dev, nparents,
  220. sizeof(*data->parents), GFP_KERNEL);
  221. if (!data->parents)
  222. return ERR_PTR(-ENOMEM);
  223. parent_names = kcalloc(nparents, sizeof(*parent_names),
  224. GFP_KERNEL);
  225. if (!parent_names)
  226. return ERR_PTR(-ENOMEM);
  227. for (i = 0; i < nparents; i++) {
  228. data->parents[i] =
  229. plt_clk_register_fixed_rate(pdev, clks[i].name,
  230. clks[i].parent_name,
  231. clks[i].freq);
  232. if (IS_ERR(data->parents[i])) {
  233. err = PTR_ERR(data->parents[i]);
  234. goto err_unreg;
  235. }
  236. parent_names[i] = kstrdup_const(clks[i].name, GFP_KERNEL);
  237. }
  238. data->nparents = nparents;
  239. return parent_names;
  240. err_unreg:
  241. plt_clk_unregister_fixed_rate_loop(data, i);
  242. plt_clk_free_parent_names_loop(parent_names, i);
  243. return ERR_PTR(err);
  244. }
  245. static void plt_clk_unregister_parents(struct clk_plt_data *data)
  246. {
  247. plt_clk_unregister_fixed_rate_loop(data, data->nparents);
  248. }
  249. static int plt_clk_probe(struct platform_device *pdev)
  250. {
  251. const struct pmc_clk_data *pmc_data;
  252. const char **parent_names;
  253. struct clk_plt_data *data;
  254. unsigned int i;
  255. int err;
  256. pmc_data = dev_get_platdata(&pdev->dev);
  257. if (!pmc_data || !pmc_data->clks)
  258. return -EINVAL;
  259. data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
  260. if (!data)
  261. return -ENOMEM;
  262. parent_names = plt_clk_register_parents(pdev, data, pmc_data->clks);
  263. if (IS_ERR(parent_names))
  264. return PTR_ERR(parent_names);
  265. for (i = 0; i < PMC_CLK_NUM; i++) {
  266. data->clks[i] = plt_clk_register(pdev, i, pmc_data,
  267. parent_names, data->nparents);
  268. if (IS_ERR(data->clks[i])) {
  269. err = PTR_ERR(data->clks[i]);
  270. goto err_unreg_clk_plt;
  271. }
  272. }
  273. data->mclk_lookup = clkdev_hw_create(&data->clks[3]->hw, "mclk", NULL);
  274. if (!data->mclk_lookup) {
  275. err = -ENOMEM;
  276. goto err_unreg_clk_plt;
  277. }
  278. data->ether_clk_lookup = clkdev_hw_create(&data->clks[4]->hw,
  279. "ether_clk", NULL);
  280. if (!data->ether_clk_lookup) {
  281. err = -ENOMEM;
  282. goto err_drop_mclk;
  283. }
  284. plt_clk_free_parent_names_loop(parent_names, data->nparents);
  285. platform_set_drvdata(pdev, data);
  286. return 0;
  287. err_drop_mclk:
  288. clkdev_drop(data->mclk_lookup);
  289. err_unreg_clk_plt:
  290. plt_clk_unregister_loop(data, i);
  291. plt_clk_unregister_parents(data);
  292. plt_clk_free_parent_names_loop(parent_names, data->nparents);
  293. return err;
  294. }
  295. static void plt_clk_remove(struct platform_device *pdev)
  296. {
  297. struct clk_plt_data *data;
  298. data = platform_get_drvdata(pdev);
  299. clkdev_drop(data->ether_clk_lookup);
  300. clkdev_drop(data->mclk_lookup);
  301. plt_clk_unregister_loop(data, PMC_CLK_NUM);
  302. plt_clk_unregister_parents(data);
  303. }
  304. static struct platform_driver plt_clk_driver = {
  305. .driver = {
  306. .name = "clk-pmc-atom",
  307. },
  308. .probe = plt_clk_probe,
  309. .remove = plt_clk_remove,
  310. };
  311. builtin_platform_driver(plt_clk_driver);