cgu.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * Ingenic SoC CGU driver
  4. *
  5. * Copyright (c) 2013-2015 Imagination Technologies
  6. * Author: Paul Burton <paul.burton@mips.com>
  7. */
  8. #include <linux/bitops.h>
  9. #include <linux/clk.h>
  10. #include <linux/clk-provider.h>
  11. #include <linux/clkdev.h>
  12. #include <linux/delay.h>
  13. #include <linux/io.h>
  14. #include <linux/iopoll.h>
  15. #include <linux/math64.h>
  16. #include <linux/of.h>
  17. #include <linux/of_address.h>
  18. #include <linux/slab.h>
  19. #include <linux/spinlock.h>
  20. #include <linux/time.h>
  21. #include "cgu.h"
  22. #define MHZ (1000 * 1000)
  23. static inline const struct ingenic_cgu_clk_info *
  24. to_clk_info(struct ingenic_clk *clk)
  25. {
  26. return &clk->cgu->clock_info[clk->idx];
  27. }
  28. /**
  29. * ingenic_cgu_gate_get() - get the value of clock gate register bit
  30. * @cgu: reference to the CGU whose registers should be read
  31. * @info: info struct describing the gate bit
  32. *
  33. * Retrieves the state of the clock gate bit described by info. The
  34. * caller must hold cgu->lock.
  35. *
  36. * Return: true if the gate bit is set, else false.
  37. */
  38. static inline bool
  39. ingenic_cgu_gate_get(struct ingenic_cgu *cgu,
  40. const struct ingenic_cgu_gate_info *info)
  41. {
  42. return !!(readl(cgu->base + info->reg) & BIT(info->bit))
  43. ^ info->clear_to_gate;
  44. }
  45. /**
  46. * ingenic_cgu_gate_set() - set the value of clock gate register bit
  47. * @cgu: reference to the CGU whose registers should be modified
  48. * @info: info struct describing the gate bit
  49. * @val: non-zero to gate a clock, otherwise zero
  50. *
  51. * Sets the given gate bit in order to gate or ungate a clock.
  52. *
  53. * The caller must hold cgu->lock.
  54. */
  55. static inline void
  56. ingenic_cgu_gate_set(struct ingenic_cgu *cgu,
  57. const struct ingenic_cgu_gate_info *info, bool val)
  58. {
  59. u32 clkgr = readl(cgu->base + info->reg);
  60. if (val ^ info->clear_to_gate)
  61. clkgr |= BIT(info->bit);
  62. else
  63. clkgr &= ~BIT(info->bit);
  64. writel(clkgr, cgu->base + info->reg);
  65. }
  66. /*
  67. * PLL operations
  68. */
  69. static unsigned long
  70. ingenic_pll_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
  71. {
  72. struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
  73. const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk);
  74. struct ingenic_cgu *cgu = ingenic_clk->cgu;
  75. const struct ingenic_cgu_pll_info *pll_info;
  76. unsigned m, n, od, od_enc = 0;
  77. bool bypass;
  78. u32 ctl;
  79. BUG_ON(clk_info->type != CGU_CLK_PLL);
  80. pll_info = &clk_info->pll;
  81. ctl = readl(cgu->base + pll_info->reg);
  82. m = (ctl >> pll_info->m_shift) & GENMASK(pll_info->m_bits - 1, 0);
  83. m += pll_info->m_offset;
  84. n = (ctl >> pll_info->n_shift) & GENMASK(pll_info->n_bits - 1, 0);
  85. n += pll_info->n_offset;
  86. if (pll_info->od_bits > 0) {
  87. od_enc = ctl >> pll_info->od_shift;
  88. od_enc &= GENMASK(pll_info->od_bits - 1, 0);
  89. }
  90. if (pll_info->bypass_bit >= 0) {
  91. ctl = readl(cgu->base + pll_info->bypass_reg);
  92. bypass = !!(ctl & BIT(pll_info->bypass_bit));
  93. if (bypass)
  94. return parent_rate;
  95. }
  96. for (od = 0; od < pll_info->od_max; od++)
  97. if (pll_info->od_encoding[od] == od_enc)
  98. break;
  99. /* if od_max = 0, od_bits should be 0 and od is fixed to 1. */
  100. if (pll_info->od_max == 0)
  101. BUG_ON(pll_info->od_bits != 0);
  102. else
  103. BUG_ON(od == pll_info->od_max);
  104. od++;
  105. return div_u64((u64)parent_rate * m * pll_info->rate_multiplier,
  106. n * od);
  107. }
  108. static void
  109. ingenic_pll_calc_m_n_od(const struct ingenic_cgu_pll_info *pll_info,
  110. unsigned long rate, unsigned long parent_rate,
  111. unsigned int *pm, unsigned int *pn, unsigned int *pod)
  112. {
  113. unsigned int m, n, od = 1;
  114. /*
  115. * The frequency after the input divider must be between 10 and 50 MHz.
  116. * The highest divider yields the best resolution.
  117. */
  118. n = parent_rate / (10 * MHZ);
  119. n = min_t(unsigned int, n, 1 << pll_info->n_bits);
  120. n = max_t(unsigned int, n, pll_info->n_offset);
  121. m = (rate / MHZ) * od * n / (parent_rate / MHZ);
  122. m = min_t(unsigned int, m, 1 << pll_info->m_bits);
  123. m = max_t(unsigned int, m, pll_info->m_offset);
  124. *pm = m;
  125. *pn = n;
  126. *pod = od;
  127. }
  128. static unsigned long
  129. ingenic_pll_calc(const struct ingenic_cgu_clk_info *clk_info,
  130. unsigned long rate, unsigned long parent_rate,
  131. unsigned int *pm, unsigned int *pn, unsigned int *pod)
  132. {
  133. const struct ingenic_cgu_pll_info *pll_info = &clk_info->pll;
  134. unsigned int m, n, od;
  135. if (pll_info->calc_m_n_od)
  136. (*pll_info->calc_m_n_od)(pll_info, rate, parent_rate, &m, &n, &od);
  137. else
  138. ingenic_pll_calc_m_n_od(pll_info, rate, parent_rate, &m, &n, &od);
  139. if (pm)
  140. *pm = m;
  141. if (pn)
  142. *pn = n;
  143. if (pod)
  144. *pod = od;
  145. return div_u64((u64)parent_rate * m * pll_info->rate_multiplier,
  146. n * od);
  147. }
  148. static long
  149. ingenic_pll_round_rate(struct clk_hw *hw, unsigned long req_rate,
  150. unsigned long *prate)
  151. {
  152. struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
  153. const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk);
  154. return ingenic_pll_calc(clk_info, req_rate, *prate, NULL, NULL, NULL);
  155. }
  156. static inline int ingenic_pll_check_stable(struct ingenic_cgu *cgu,
  157. const struct ingenic_cgu_pll_info *pll_info)
  158. {
  159. u32 ctl;
  160. if (pll_info->stable_bit < 0)
  161. return 0;
  162. return readl_poll_timeout(cgu->base + pll_info->reg, ctl,
  163. ctl & BIT(pll_info->stable_bit),
  164. 0, 100 * USEC_PER_MSEC);
  165. }
  166. static int
  167. ingenic_pll_set_rate(struct clk_hw *hw, unsigned long req_rate,
  168. unsigned long parent_rate)
  169. {
  170. struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
  171. struct ingenic_cgu *cgu = ingenic_clk->cgu;
  172. const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk);
  173. const struct ingenic_cgu_pll_info *pll_info = &clk_info->pll;
  174. unsigned long rate, flags;
  175. unsigned int m, n, od;
  176. int ret = 0;
  177. u32 ctl;
  178. rate = ingenic_pll_calc(clk_info, req_rate, parent_rate,
  179. &m, &n, &od);
  180. if (rate != req_rate)
  181. pr_info("ingenic-cgu: request '%s' rate %luHz, actual %luHz\n",
  182. clk_info->name, req_rate, rate);
  183. spin_lock_irqsave(&cgu->lock, flags);
  184. ctl = readl(cgu->base + pll_info->reg);
  185. ctl &= ~(GENMASK(pll_info->m_bits - 1, 0) << pll_info->m_shift);
  186. ctl |= (m - pll_info->m_offset) << pll_info->m_shift;
  187. ctl &= ~(GENMASK(pll_info->n_bits - 1, 0) << pll_info->n_shift);
  188. ctl |= (n - pll_info->n_offset) << pll_info->n_shift;
  189. if (pll_info->od_bits > 0) {
  190. ctl &= ~(GENMASK(pll_info->od_bits - 1, 0) << pll_info->od_shift);
  191. ctl |= pll_info->od_encoding[od - 1] << pll_info->od_shift;
  192. }
  193. writel(ctl, cgu->base + pll_info->reg);
  194. if (pll_info->set_rate_hook)
  195. pll_info->set_rate_hook(pll_info, rate, parent_rate);
  196. /* If the PLL is enabled, verify that it's stable */
  197. if (pll_info->enable_bit >= 0 && (ctl & BIT(pll_info->enable_bit)))
  198. ret = ingenic_pll_check_stable(cgu, pll_info);
  199. spin_unlock_irqrestore(&cgu->lock, flags);
  200. return ret;
  201. }
  202. static int ingenic_pll_enable(struct clk_hw *hw)
  203. {
  204. struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
  205. struct ingenic_cgu *cgu = ingenic_clk->cgu;
  206. const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk);
  207. const struct ingenic_cgu_pll_info *pll_info = &clk_info->pll;
  208. unsigned long flags;
  209. int ret;
  210. u32 ctl;
  211. if (pll_info->enable_bit < 0)
  212. return 0;
  213. spin_lock_irqsave(&cgu->lock, flags);
  214. if (pll_info->bypass_bit >= 0) {
  215. ctl = readl(cgu->base + pll_info->bypass_reg);
  216. ctl &= ~BIT(pll_info->bypass_bit);
  217. writel(ctl, cgu->base + pll_info->bypass_reg);
  218. }
  219. ctl = readl(cgu->base + pll_info->reg);
  220. ctl |= BIT(pll_info->enable_bit);
  221. writel(ctl, cgu->base + pll_info->reg);
  222. ret = ingenic_pll_check_stable(cgu, pll_info);
  223. spin_unlock_irqrestore(&cgu->lock, flags);
  224. return ret;
  225. }
  226. static void ingenic_pll_disable(struct clk_hw *hw)
  227. {
  228. struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
  229. struct ingenic_cgu *cgu = ingenic_clk->cgu;
  230. const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk);
  231. const struct ingenic_cgu_pll_info *pll_info = &clk_info->pll;
  232. unsigned long flags;
  233. u32 ctl;
  234. if (pll_info->enable_bit < 0)
  235. return;
  236. spin_lock_irqsave(&cgu->lock, flags);
  237. ctl = readl(cgu->base + pll_info->reg);
  238. ctl &= ~BIT(pll_info->enable_bit);
  239. writel(ctl, cgu->base + pll_info->reg);
  240. spin_unlock_irqrestore(&cgu->lock, flags);
  241. }
  242. static int ingenic_pll_is_enabled(struct clk_hw *hw)
  243. {
  244. struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
  245. struct ingenic_cgu *cgu = ingenic_clk->cgu;
  246. const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk);
  247. const struct ingenic_cgu_pll_info *pll_info = &clk_info->pll;
  248. u32 ctl;
  249. if (pll_info->enable_bit < 0)
  250. return true;
  251. ctl = readl(cgu->base + pll_info->reg);
  252. return !!(ctl & BIT(pll_info->enable_bit));
  253. }
  254. static const struct clk_ops ingenic_pll_ops = {
  255. .recalc_rate = ingenic_pll_recalc_rate,
  256. .round_rate = ingenic_pll_round_rate,
  257. .set_rate = ingenic_pll_set_rate,
  258. .enable = ingenic_pll_enable,
  259. .disable = ingenic_pll_disable,
  260. .is_enabled = ingenic_pll_is_enabled,
  261. };
  262. /*
  263. * Operations for all non-PLL clocks
  264. */
  265. static u8 ingenic_clk_get_parent(struct clk_hw *hw)
  266. {
  267. struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
  268. const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk);
  269. struct ingenic_cgu *cgu = ingenic_clk->cgu;
  270. u32 reg;
  271. u8 i, hw_idx, idx = 0;
  272. if (clk_info->type & CGU_CLK_MUX) {
  273. reg = readl(cgu->base + clk_info->mux.reg);
  274. hw_idx = (reg >> clk_info->mux.shift) &
  275. GENMASK(clk_info->mux.bits - 1, 0);
  276. /*
  277. * Convert the hardware index to the parent index by skipping
  278. * over any -1's in the parents array.
  279. */
  280. for (i = 0; i < hw_idx; i++) {
  281. if (clk_info->parents[i] != -1)
  282. idx++;
  283. }
  284. }
  285. return idx;
  286. }
  287. static int ingenic_clk_set_parent(struct clk_hw *hw, u8 idx)
  288. {
  289. struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
  290. const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk);
  291. struct ingenic_cgu *cgu = ingenic_clk->cgu;
  292. unsigned long flags;
  293. u8 curr_idx, hw_idx, num_poss;
  294. u32 reg, mask;
  295. if (clk_info->type & CGU_CLK_MUX) {
  296. /*
  297. * Convert the parent index to the hardware index by adding
  298. * 1 for any -1 in the parents array preceding the given
  299. * index. That is, we want the index of idx'th entry in
  300. * clk_info->parents which does not equal -1.
  301. */
  302. hw_idx = curr_idx = 0;
  303. num_poss = 1 << clk_info->mux.bits;
  304. for (; hw_idx < num_poss; hw_idx++) {
  305. if (clk_info->parents[hw_idx] == -1)
  306. continue;
  307. if (curr_idx == idx)
  308. break;
  309. curr_idx++;
  310. }
  311. /* idx should always be a valid parent */
  312. BUG_ON(curr_idx != idx);
  313. mask = GENMASK(clk_info->mux.bits - 1, 0);
  314. mask <<= clk_info->mux.shift;
  315. spin_lock_irqsave(&cgu->lock, flags);
  316. /* write the register */
  317. reg = readl(cgu->base + clk_info->mux.reg);
  318. reg &= ~mask;
  319. reg |= hw_idx << clk_info->mux.shift;
  320. writel(reg, cgu->base + clk_info->mux.reg);
  321. spin_unlock_irqrestore(&cgu->lock, flags);
  322. return 0;
  323. }
  324. return idx ? -EINVAL : 0;
  325. }
  326. static unsigned long
  327. ingenic_clk_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
  328. {
  329. struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
  330. const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk);
  331. struct ingenic_cgu *cgu = ingenic_clk->cgu;
  332. unsigned long rate = parent_rate;
  333. u32 div_reg, div;
  334. u8 parent;
  335. if (clk_info->type & CGU_CLK_DIV) {
  336. parent = ingenic_clk_get_parent(hw);
  337. if (!(clk_info->div.bypass_mask & BIT(parent))) {
  338. div_reg = readl(cgu->base + clk_info->div.reg);
  339. div = (div_reg >> clk_info->div.shift) &
  340. GENMASK(clk_info->div.bits - 1, 0);
  341. if (clk_info->div.div_table)
  342. div = clk_info->div.div_table[div];
  343. else
  344. div = (div + 1) * clk_info->div.div;
  345. rate /= div;
  346. }
  347. } else if (clk_info->type & CGU_CLK_FIXDIV) {
  348. rate /= clk_info->fixdiv.div;
  349. }
  350. return rate;
  351. }
  352. static unsigned int
  353. ingenic_clk_calc_hw_div(const struct ingenic_cgu_clk_info *clk_info,
  354. unsigned int div)
  355. {
  356. unsigned int i, best_i = 0, best = (unsigned int)-1;
  357. for (i = 0; i < (1 << clk_info->div.bits)
  358. && clk_info->div.div_table[i]; i++) {
  359. if (clk_info->div.div_table[i] >= div &&
  360. clk_info->div.div_table[i] < best) {
  361. best = clk_info->div.div_table[i];
  362. best_i = i;
  363. if (div == best)
  364. break;
  365. }
  366. }
  367. return best_i;
  368. }
  369. static unsigned
  370. ingenic_clk_calc_div(struct clk_hw *hw,
  371. const struct ingenic_cgu_clk_info *clk_info,
  372. unsigned long parent_rate, unsigned long req_rate)
  373. {
  374. unsigned int div, hw_div;
  375. u8 parent;
  376. parent = ingenic_clk_get_parent(hw);
  377. if (clk_info->div.bypass_mask & BIT(parent))
  378. return 1;
  379. /* calculate the divide */
  380. div = DIV_ROUND_UP(parent_rate, req_rate);
  381. if (clk_info->div.div_table) {
  382. hw_div = ingenic_clk_calc_hw_div(clk_info, div);
  383. return clk_info->div.div_table[hw_div];
  384. }
  385. /* Impose hardware constraints */
  386. div = clamp_t(unsigned int, div, clk_info->div.div,
  387. clk_info->div.div << clk_info->div.bits);
  388. /*
  389. * If the divider value itself must be divided before being written to
  390. * the divider register, we must ensure we don't have any bits set that
  391. * would be lost as a result of doing so.
  392. */
  393. div = DIV_ROUND_UP(div, clk_info->div.div);
  394. div *= clk_info->div.div;
  395. return div;
  396. }
  397. static int ingenic_clk_determine_rate(struct clk_hw *hw,
  398. struct clk_rate_request *req)
  399. {
  400. struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
  401. const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk);
  402. unsigned int div = 1;
  403. if (clk_info->type & CGU_CLK_DIV)
  404. div = ingenic_clk_calc_div(hw, clk_info, req->best_parent_rate,
  405. req->rate);
  406. else if (clk_info->type & CGU_CLK_FIXDIV)
  407. div = clk_info->fixdiv.div;
  408. else if (clk_hw_can_set_rate_parent(hw))
  409. req->best_parent_rate = req->rate;
  410. req->rate = DIV_ROUND_UP(req->best_parent_rate, div);
  411. return 0;
  412. }
  413. static inline int ingenic_clk_check_stable(struct ingenic_cgu *cgu,
  414. const struct ingenic_cgu_clk_info *clk_info)
  415. {
  416. u32 reg;
  417. return readl_poll_timeout(cgu->base + clk_info->div.reg, reg,
  418. !(reg & BIT(clk_info->div.busy_bit)),
  419. 0, 100 * USEC_PER_MSEC);
  420. }
  421. static int
  422. ingenic_clk_set_rate(struct clk_hw *hw, unsigned long req_rate,
  423. unsigned long parent_rate)
  424. {
  425. struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
  426. const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk);
  427. struct ingenic_cgu *cgu = ingenic_clk->cgu;
  428. unsigned long rate, flags;
  429. unsigned int hw_div, div;
  430. u32 reg, mask;
  431. int ret = 0;
  432. if (clk_info->type & CGU_CLK_DIV) {
  433. div = ingenic_clk_calc_div(hw, clk_info, parent_rate, req_rate);
  434. rate = DIV_ROUND_UP(parent_rate, div);
  435. if (rate != req_rate)
  436. return -EINVAL;
  437. if (clk_info->div.div_table)
  438. hw_div = ingenic_clk_calc_hw_div(clk_info, div);
  439. else
  440. hw_div = ((div / clk_info->div.div) - 1);
  441. spin_lock_irqsave(&cgu->lock, flags);
  442. reg = readl(cgu->base + clk_info->div.reg);
  443. /* update the divide */
  444. mask = GENMASK(clk_info->div.bits - 1, 0);
  445. reg &= ~(mask << clk_info->div.shift);
  446. reg |= hw_div << clk_info->div.shift;
  447. /* clear the stop bit */
  448. if (clk_info->div.stop_bit != -1)
  449. reg &= ~BIT(clk_info->div.stop_bit);
  450. /* set the change enable bit */
  451. if (clk_info->div.ce_bit != -1)
  452. reg |= BIT(clk_info->div.ce_bit);
  453. /* update the hardware */
  454. writel(reg, cgu->base + clk_info->div.reg);
  455. /* wait for the change to take effect */
  456. if (clk_info->div.busy_bit != -1)
  457. ret = ingenic_clk_check_stable(cgu, clk_info);
  458. spin_unlock_irqrestore(&cgu->lock, flags);
  459. return ret;
  460. }
  461. return -EINVAL;
  462. }
  463. static int ingenic_clk_enable(struct clk_hw *hw)
  464. {
  465. struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
  466. const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk);
  467. struct ingenic_cgu *cgu = ingenic_clk->cgu;
  468. unsigned long flags;
  469. if (clk_info->type & CGU_CLK_GATE) {
  470. /* ungate the clock */
  471. spin_lock_irqsave(&cgu->lock, flags);
  472. ingenic_cgu_gate_set(cgu, &clk_info->gate, false);
  473. spin_unlock_irqrestore(&cgu->lock, flags);
  474. if (clk_info->gate.delay_us)
  475. udelay(clk_info->gate.delay_us);
  476. }
  477. return 0;
  478. }
  479. static void ingenic_clk_disable(struct clk_hw *hw)
  480. {
  481. struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
  482. const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk);
  483. struct ingenic_cgu *cgu = ingenic_clk->cgu;
  484. unsigned long flags;
  485. if (clk_info->type & CGU_CLK_GATE) {
  486. /* gate the clock */
  487. spin_lock_irqsave(&cgu->lock, flags);
  488. ingenic_cgu_gate_set(cgu, &clk_info->gate, true);
  489. spin_unlock_irqrestore(&cgu->lock, flags);
  490. }
  491. }
  492. static int ingenic_clk_is_enabled(struct clk_hw *hw)
  493. {
  494. struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
  495. const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk);
  496. struct ingenic_cgu *cgu = ingenic_clk->cgu;
  497. int enabled = 1;
  498. if (clk_info->type & CGU_CLK_GATE)
  499. enabled = !ingenic_cgu_gate_get(cgu, &clk_info->gate);
  500. return enabled;
  501. }
  502. static const struct clk_ops ingenic_clk_ops = {
  503. .get_parent = ingenic_clk_get_parent,
  504. .set_parent = ingenic_clk_set_parent,
  505. .recalc_rate = ingenic_clk_recalc_rate,
  506. .determine_rate = ingenic_clk_determine_rate,
  507. .set_rate = ingenic_clk_set_rate,
  508. .enable = ingenic_clk_enable,
  509. .disable = ingenic_clk_disable,
  510. .is_enabled = ingenic_clk_is_enabled,
  511. };
  512. /*
  513. * Setup functions.
  514. */
  515. static int ingenic_register_clock(struct ingenic_cgu *cgu, unsigned idx)
  516. {
  517. const struct ingenic_cgu_clk_info *clk_info = &cgu->clock_info[idx];
  518. struct clk_init_data clk_init;
  519. struct ingenic_clk *ingenic_clk = NULL;
  520. struct clk *clk, *parent;
  521. const char *parent_names[4];
  522. unsigned caps, i, num_possible;
  523. int err = -EINVAL;
  524. BUILD_BUG_ON(ARRAY_SIZE(clk_info->parents) > ARRAY_SIZE(parent_names));
  525. if (clk_info->type == CGU_CLK_EXT) {
  526. clk = of_clk_get_by_name(cgu->np, clk_info->name);
  527. if (IS_ERR(clk)) {
  528. pr_err("%s: no external clock '%s' provided\n",
  529. __func__, clk_info->name);
  530. err = -ENODEV;
  531. goto out;
  532. }
  533. err = clk_register_clkdev(clk, clk_info->name, NULL);
  534. if (err) {
  535. clk_put(clk);
  536. goto out;
  537. }
  538. cgu->clocks.clks[idx] = clk;
  539. return 0;
  540. }
  541. if (!clk_info->type) {
  542. pr_err("%s: no clock type specified for '%s'\n", __func__,
  543. clk_info->name);
  544. goto out;
  545. }
  546. ingenic_clk = kzalloc(sizeof(*ingenic_clk), GFP_KERNEL);
  547. if (!ingenic_clk) {
  548. err = -ENOMEM;
  549. goto out;
  550. }
  551. ingenic_clk->hw.init = &clk_init;
  552. ingenic_clk->cgu = cgu;
  553. ingenic_clk->idx = idx;
  554. clk_init.name = clk_info->name;
  555. clk_init.flags = clk_info->flags;
  556. clk_init.parent_names = parent_names;
  557. caps = clk_info->type;
  558. if (caps & CGU_CLK_DIV) {
  559. caps &= ~CGU_CLK_DIV;
  560. } else if (!(caps & CGU_CLK_CUSTOM)) {
  561. /* pass rate changes to the parent clock */
  562. clk_init.flags |= CLK_SET_RATE_PARENT;
  563. }
  564. if (caps & (CGU_CLK_MUX | CGU_CLK_CUSTOM)) {
  565. clk_init.num_parents = 0;
  566. if (caps & CGU_CLK_MUX)
  567. num_possible = 1 << clk_info->mux.bits;
  568. else
  569. num_possible = ARRAY_SIZE(clk_info->parents);
  570. for (i = 0; i < num_possible; i++) {
  571. if (clk_info->parents[i] == -1)
  572. continue;
  573. parent = cgu->clocks.clks[clk_info->parents[i]];
  574. parent_names[clk_init.num_parents] =
  575. __clk_get_name(parent);
  576. clk_init.num_parents++;
  577. }
  578. BUG_ON(!clk_init.num_parents);
  579. BUG_ON(clk_init.num_parents > ARRAY_SIZE(parent_names));
  580. } else {
  581. BUG_ON(clk_info->parents[0] == -1);
  582. clk_init.num_parents = 1;
  583. parent = cgu->clocks.clks[clk_info->parents[0]];
  584. parent_names[0] = __clk_get_name(parent);
  585. }
  586. if (caps & CGU_CLK_CUSTOM) {
  587. clk_init.ops = clk_info->custom.clk_ops;
  588. caps &= ~CGU_CLK_CUSTOM;
  589. if (caps) {
  590. pr_err("%s: custom clock may not be combined with type 0x%x\n",
  591. __func__, caps);
  592. goto out;
  593. }
  594. } else if (caps & CGU_CLK_PLL) {
  595. clk_init.ops = &ingenic_pll_ops;
  596. caps &= ~CGU_CLK_PLL;
  597. if (caps) {
  598. pr_err("%s: PLL may not be combined with type 0x%x\n",
  599. __func__, caps);
  600. goto out;
  601. }
  602. } else {
  603. clk_init.ops = &ingenic_clk_ops;
  604. }
  605. /* nothing to do for gates or fixed dividers */
  606. caps &= ~(CGU_CLK_GATE | CGU_CLK_FIXDIV);
  607. if (caps & CGU_CLK_MUX) {
  608. if (!(caps & CGU_CLK_MUX_GLITCHFREE))
  609. clk_init.flags |= CLK_SET_PARENT_GATE;
  610. caps &= ~(CGU_CLK_MUX | CGU_CLK_MUX_GLITCHFREE);
  611. }
  612. if (caps) {
  613. pr_err("%s: unknown clock type 0x%x\n", __func__, caps);
  614. goto out;
  615. }
  616. clk = clk_register(NULL, &ingenic_clk->hw);
  617. if (IS_ERR(clk)) {
  618. pr_err("%s: failed to register clock '%s'\n", __func__,
  619. clk_info->name);
  620. err = PTR_ERR(clk);
  621. goto out;
  622. }
  623. err = clk_register_clkdev(clk, clk_info->name, NULL);
  624. if (err)
  625. goto out;
  626. cgu->clocks.clks[idx] = clk;
  627. out:
  628. if (err)
  629. kfree(ingenic_clk);
  630. return err;
  631. }
  632. struct ingenic_cgu *
  633. ingenic_cgu_new(const struct ingenic_cgu_clk_info *clock_info,
  634. unsigned num_clocks, struct device_node *np)
  635. {
  636. struct ingenic_cgu *cgu;
  637. cgu = kzalloc(sizeof(*cgu), GFP_KERNEL);
  638. if (!cgu)
  639. goto err_out;
  640. cgu->base = of_iomap(np, 0);
  641. if (!cgu->base) {
  642. pr_err("%s: failed to map CGU registers\n", __func__);
  643. goto err_out_free;
  644. }
  645. cgu->np = np;
  646. cgu->clock_info = clock_info;
  647. cgu->clocks.clk_num = num_clocks;
  648. spin_lock_init(&cgu->lock);
  649. return cgu;
  650. err_out_free:
  651. kfree(cgu);
  652. err_out:
  653. return NULL;
  654. }
  655. int ingenic_cgu_register_clocks(struct ingenic_cgu *cgu)
  656. {
  657. unsigned i;
  658. int err;
  659. cgu->clocks.clks = kcalloc(cgu->clocks.clk_num, sizeof(struct clk *),
  660. GFP_KERNEL);
  661. if (!cgu->clocks.clks) {
  662. err = -ENOMEM;
  663. goto err_out;
  664. }
  665. for (i = 0; i < cgu->clocks.clk_num; i++) {
  666. err = ingenic_register_clock(cgu, i);
  667. if (err)
  668. goto err_out_unregister;
  669. }
  670. err = of_clk_add_provider(cgu->np, of_clk_src_onecell_get,
  671. &cgu->clocks);
  672. if (err)
  673. goto err_out_unregister;
  674. return 0;
  675. err_out_unregister:
  676. for (i = 0; i < cgu->clocks.clk_num; i++) {
  677. if (!cgu->clocks.clks[i])
  678. continue;
  679. if (cgu->clock_info[i].type & CGU_CLK_EXT)
  680. clk_put(cgu->clocks.clks[i]);
  681. else
  682. clk_unregister(cgu->clocks.clks[i]);
  683. }
  684. kfree(cgu->clocks.clks);
  685. err_out:
  686. return err;
  687. }