rzv2h-cpg.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Renesas RZ/V2H(P) Clock Pulse Generator
  4. *
  5. * Copyright (C) 2024 Renesas Electronics Corp.
  6. *
  7. * Based on rzg2l-cpg.c
  8. *
  9. * Copyright (C) 2015 Glider bvba
  10. * Copyright (C) 2013 Ideas On Board SPRL
  11. * Copyright (C) 2015 Renesas Electronics Corp.
  12. */
  13. #include <linux/bitfield.h>
  14. #include <linux/clk.h>
  15. #include <linux/clk-provider.h>
  16. #include <linux/delay.h>
  17. #include <linux/init.h>
  18. #include <linux/iopoll.h>
  19. #include <linux/mod_devicetable.h>
  20. #include <linux/module.h>
  21. #include <linux/of.h>
  22. #include <linux/platform_device.h>
  23. #include <linux/pm_clock.h>
  24. #include <linux/pm_domain.h>
  25. #include <linux/reset-controller.h>
  26. #include <dt-bindings/clock/renesas-cpg-mssr.h>
  27. #include "rzv2h-cpg.h"
  28. #ifdef DEBUG
  29. #define WARN_DEBUG(x) WARN_ON(x)
  30. #else
  31. #define WARN_DEBUG(x) do { } while (0)
  32. #endif
  33. #define GET_CLK_ON_OFFSET(x) (0x600 + ((x) * 4))
  34. #define GET_CLK_MON_OFFSET(x) (0x800 + ((x) * 4))
  35. #define GET_RST_OFFSET(x) (0x900 + ((x) * 4))
  36. #define GET_RST_MON_OFFSET(x) (0xA00 + ((x) * 4))
  37. #define KDIV(val) ((s16)FIELD_GET(GENMASK(31, 16), (val)))
  38. #define MDIV(val) FIELD_GET(GENMASK(15, 6), (val))
  39. #define PDIV(val) FIELD_GET(GENMASK(5, 0), (val))
  40. #define SDIV(val) FIELD_GET(GENMASK(2, 0), (val))
  41. #define DDIV_DIVCTL_WEN(shift) BIT((shift) + 16)
  42. #define GET_MOD_CLK_ID(base, index, bit) \
  43. ((base) + ((((index) * (16))) + (bit)))
  44. #define CPG_CLKSTATUS0 (0x700)
  45. /**
  46. * struct rzv2h_cpg_priv - Clock Pulse Generator Private Data
  47. *
  48. * @dev: CPG device
  49. * @base: CPG register block base address
  50. * @rmw_lock: protects register accesses
  51. * @clks: Array containing all Core and Module Clocks
  52. * @num_core_clks: Number of Core Clocks in clks[]
  53. * @num_mod_clks: Number of Module Clocks in clks[]
  54. * @resets: Array of resets
  55. * @num_resets: Number of Module Resets in info->resets[]
  56. * @last_dt_core_clk: ID of the last Core Clock exported to DT
  57. * @rcdev: Reset controller entity
  58. */
  59. struct rzv2h_cpg_priv {
  60. struct device *dev;
  61. void __iomem *base;
  62. spinlock_t rmw_lock;
  63. struct clk **clks;
  64. unsigned int num_core_clks;
  65. unsigned int num_mod_clks;
  66. struct rzv2h_reset *resets;
  67. unsigned int num_resets;
  68. unsigned int last_dt_core_clk;
  69. struct reset_controller_dev rcdev;
  70. };
  71. #define rcdev_to_priv(x) container_of(x, struct rzv2h_cpg_priv, rcdev)
  72. struct pll_clk {
  73. struct rzv2h_cpg_priv *priv;
  74. void __iomem *base;
  75. struct clk_hw hw;
  76. unsigned int conf;
  77. unsigned int type;
  78. };
  79. #define to_pll(_hw) container_of(_hw, struct pll_clk, hw)
  80. /**
  81. * struct mod_clock - Module clock
  82. *
  83. * @priv: CPG private data
  84. * @hw: handle between common and hardware-specific interfaces
  85. * @on_index: register offset
  86. * @on_bit: ON/MON bit
  87. * @mon_index: monitor register offset
  88. * @mon_bit: montor bit
  89. */
  90. struct mod_clock {
  91. struct rzv2h_cpg_priv *priv;
  92. struct clk_hw hw;
  93. u8 on_index;
  94. u8 on_bit;
  95. s8 mon_index;
  96. u8 mon_bit;
  97. };
  98. #define to_mod_clock(_hw) container_of(_hw, struct mod_clock, hw)
  99. /**
  100. * struct ddiv_clk - DDIV clock
  101. *
  102. * @priv: CPG private data
  103. * @div: divider clk
  104. * @mon: monitor bit in CPG_CLKSTATUS0 register
  105. */
  106. struct ddiv_clk {
  107. struct rzv2h_cpg_priv *priv;
  108. struct clk_divider div;
  109. u8 mon;
  110. };
  111. #define to_ddiv_clock(_div) container_of(_div, struct ddiv_clk, div)
  112. static unsigned long rzv2h_cpg_pll_clk_recalc_rate(struct clk_hw *hw,
  113. unsigned long parent_rate)
  114. {
  115. struct pll_clk *pll_clk = to_pll(hw);
  116. struct rzv2h_cpg_priv *priv = pll_clk->priv;
  117. unsigned int clk1, clk2;
  118. u64 rate;
  119. if (!PLL_CLK_ACCESS(pll_clk->conf))
  120. return 0;
  121. clk1 = readl(priv->base + PLL_CLK1_OFFSET(pll_clk->conf));
  122. clk2 = readl(priv->base + PLL_CLK2_OFFSET(pll_clk->conf));
  123. rate = mul_u64_u32_shr(parent_rate, (MDIV(clk1) << 16) + KDIV(clk1),
  124. 16 + SDIV(clk2));
  125. return DIV_ROUND_CLOSEST_ULL(rate, PDIV(clk1));
  126. }
  127. static const struct clk_ops rzv2h_cpg_pll_ops = {
  128. .recalc_rate = rzv2h_cpg_pll_clk_recalc_rate,
  129. };
  130. static struct clk * __init
  131. rzv2h_cpg_pll_clk_register(const struct cpg_core_clk *core,
  132. struct rzv2h_cpg_priv *priv,
  133. const struct clk_ops *ops)
  134. {
  135. void __iomem *base = priv->base;
  136. struct device *dev = priv->dev;
  137. struct clk_init_data init;
  138. const struct clk *parent;
  139. const char *parent_name;
  140. struct pll_clk *pll_clk;
  141. int ret;
  142. parent = priv->clks[core->parent];
  143. if (IS_ERR(parent))
  144. return ERR_CAST(parent);
  145. pll_clk = devm_kzalloc(dev, sizeof(*pll_clk), GFP_KERNEL);
  146. if (!pll_clk)
  147. return ERR_PTR(-ENOMEM);
  148. parent_name = __clk_get_name(parent);
  149. init.name = core->name;
  150. init.ops = ops;
  151. init.flags = 0;
  152. init.parent_names = &parent_name;
  153. init.num_parents = 1;
  154. pll_clk->hw.init = &init;
  155. pll_clk->conf = core->cfg.conf;
  156. pll_clk->base = base;
  157. pll_clk->priv = priv;
  158. pll_clk->type = core->type;
  159. ret = devm_clk_hw_register(dev, &pll_clk->hw);
  160. if (ret)
  161. return ERR_PTR(ret);
  162. return pll_clk->hw.clk;
  163. }
  164. static unsigned long rzv2h_ddiv_recalc_rate(struct clk_hw *hw,
  165. unsigned long parent_rate)
  166. {
  167. struct clk_divider *divider = to_clk_divider(hw);
  168. unsigned int val;
  169. val = readl(divider->reg) >> divider->shift;
  170. val &= clk_div_mask(divider->width);
  171. return divider_recalc_rate(hw, parent_rate, val, divider->table,
  172. divider->flags, divider->width);
  173. }
  174. static long rzv2h_ddiv_round_rate(struct clk_hw *hw, unsigned long rate,
  175. unsigned long *prate)
  176. {
  177. struct clk_divider *divider = to_clk_divider(hw);
  178. return divider_round_rate(hw, rate, prate, divider->table,
  179. divider->width, divider->flags);
  180. }
  181. static int rzv2h_ddiv_determine_rate(struct clk_hw *hw,
  182. struct clk_rate_request *req)
  183. {
  184. struct clk_divider *divider = to_clk_divider(hw);
  185. return divider_determine_rate(hw, req, divider->table, divider->width,
  186. divider->flags);
  187. }
  188. static inline int rzv2h_cpg_wait_ddiv_clk_update_done(void __iomem *base, u8 mon)
  189. {
  190. u32 bitmask = BIT(mon);
  191. u32 val;
  192. return readl_poll_timeout_atomic(base + CPG_CLKSTATUS0, val, !(val & bitmask), 10, 200);
  193. }
  194. static int rzv2h_ddiv_set_rate(struct clk_hw *hw, unsigned long rate,
  195. unsigned long parent_rate)
  196. {
  197. struct clk_divider *divider = to_clk_divider(hw);
  198. struct ddiv_clk *ddiv = to_ddiv_clock(divider);
  199. struct rzv2h_cpg_priv *priv = ddiv->priv;
  200. unsigned long flags = 0;
  201. int value;
  202. u32 val;
  203. int ret;
  204. value = divider_get_val(rate, parent_rate, divider->table,
  205. divider->width, divider->flags);
  206. if (value < 0)
  207. return value;
  208. spin_lock_irqsave(divider->lock, flags);
  209. ret = rzv2h_cpg_wait_ddiv_clk_update_done(priv->base, ddiv->mon);
  210. if (ret)
  211. goto ddiv_timeout;
  212. val = readl(divider->reg) | DDIV_DIVCTL_WEN(divider->shift);
  213. val &= ~(clk_div_mask(divider->width) << divider->shift);
  214. val |= (u32)value << divider->shift;
  215. writel(val, divider->reg);
  216. ret = rzv2h_cpg_wait_ddiv_clk_update_done(priv->base, ddiv->mon);
  217. if (ret)
  218. goto ddiv_timeout;
  219. spin_unlock_irqrestore(divider->lock, flags);
  220. return 0;
  221. ddiv_timeout:
  222. spin_unlock_irqrestore(divider->lock, flags);
  223. return ret;
  224. }
  225. static const struct clk_ops rzv2h_ddiv_clk_divider_ops = {
  226. .recalc_rate = rzv2h_ddiv_recalc_rate,
  227. .round_rate = rzv2h_ddiv_round_rate,
  228. .determine_rate = rzv2h_ddiv_determine_rate,
  229. .set_rate = rzv2h_ddiv_set_rate,
  230. };
  231. static struct clk * __init
  232. rzv2h_cpg_ddiv_clk_register(const struct cpg_core_clk *core,
  233. struct rzv2h_cpg_priv *priv)
  234. {
  235. struct ddiv cfg_ddiv = core->cfg.ddiv;
  236. struct clk_init_data init = {};
  237. struct device *dev = priv->dev;
  238. u8 shift = cfg_ddiv.shift;
  239. u8 width = cfg_ddiv.width;
  240. const struct clk *parent;
  241. const char *parent_name;
  242. struct clk_divider *div;
  243. struct ddiv_clk *ddiv;
  244. int ret;
  245. parent = priv->clks[core->parent];
  246. if (IS_ERR(parent))
  247. return ERR_CAST(parent);
  248. parent_name = __clk_get_name(parent);
  249. if ((shift + width) > 16)
  250. return ERR_PTR(-EINVAL);
  251. ddiv = devm_kzalloc(priv->dev, sizeof(*ddiv), GFP_KERNEL);
  252. if (!ddiv)
  253. return ERR_PTR(-ENOMEM);
  254. init.name = core->name;
  255. init.ops = &rzv2h_ddiv_clk_divider_ops;
  256. init.parent_names = &parent_name;
  257. init.num_parents = 1;
  258. ddiv->priv = priv;
  259. ddiv->mon = cfg_ddiv.monbit;
  260. div = &ddiv->div;
  261. div->reg = priv->base + cfg_ddiv.offset;
  262. div->shift = shift;
  263. div->width = width;
  264. div->flags = core->flag;
  265. div->lock = &priv->rmw_lock;
  266. div->hw.init = &init;
  267. div->table = core->dtable;
  268. ret = devm_clk_hw_register(dev, &div->hw);
  269. if (ret)
  270. return ERR_PTR(ret);
  271. return div->hw.clk;
  272. }
  273. static struct clk
  274. *rzv2h_cpg_clk_src_twocell_get(struct of_phandle_args *clkspec,
  275. void *data)
  276. {
  277. unsigned int clkidx = clkspec->args[1];
  278. struct rzv2h_cpg_priv *priv = data;
  279. struct device *dev = priv->dev;
  280. const char *type;
  281. struct clk *clk;
  282. switch (clkspec->args[0]) {
  283. case CPG_CORE:
  284. type = "core";
  285. if (clkidx > priv->last_dt_core_clk) {
  286. dev_err(dev, "Invalid %s clock index %u\n", type, clkidx);
  287. return ERR_PTR(-EINVAL);
  288. }
  289. clk = priv->clks[clkidx];
  290. break;
  291. case CPG_MOD:
  292. type = "module";
  293. if (clkidx >= priv->num_mod_clks) {
  294. dev_err(dev, "Invalid %s clock index %u\n", type, clkidx);
  295. return ERR_PTR(-EINVAL);
  296. }
  297. clk = priv->clks[priv->num_core_clks + clkidx];
  298. break;
  299. default:
  300. dev_err(dev, "Invalid CPG clock type %u\n", clkspec->args[0]);
  301. return ERR_PTR(-EINVAL);
  302. }
  303. if (IS_ERR(clk))
  304. dev_err(dev, "Cannot get %s clock %u: %ld", type, clkidx,
  305. PTR_ERR(clk));
  306. else
  307. dev_dbg(dev, "clock (%u, %u) is %pC at %lu Hz\n",
  308. clkspec->args[0], clkspec->args[1], clk,
  309. clk_get_rate(clk));
  310. return clk;
  311. }
  312. static void __init
  313. rzv2h_cpg_register_core_clk(const struct cpg_core_clk *core,
  314. struct rzv2h_cpg_priv *priv)
  315. {
  316. struct clk *clk = ERR_PTR(-EOPNOTSUPP), *parent;
  317. unsigned int id = core->id, div = core->div;
  318. struct device *dev = priv->dev;
  319. const char *parent_name;
  320. struct clk_hw *clk_hw;
  321. WARN_DEBUG(id >= priv->num_core_clks);
  322. WARN_DEBUG(PTR_ERR(priv->clks[id]) != -ENOENT);
  323. switch (core->type) {
  324. case CLK_TYPE_IN:
  325. clk = of_clk_get_by_name(priv->dev->of_node, core->name);
  326. break;
  327. case CLK_TYPE_FF:
  328. WARN_DEBUG(core->parent >= priv->num_core_clks);
  329. parent = priv->clks[core->parent];
  330. if (IS_ERR(parent)) {
  331. clk = parent;
  332. goto fail;
  333. }
  334. parent_name = __clk_get_name(parent);
  335. clk_hw = devm_clk_hw_register_fixed_factor(dev, core->name,
  336. parent_name, CLK_SET_RATE_PARENT,
  337. core->mult, div);
  338. if (IS_ERR(clk_hw))
  339. clk = ERR_CAST(clk_hw);
  340. else
  341. clk = clk_hw->clk;
  342. break;
  343. case CLK_TYPE_PLL:
  344. clk = rzv2h_cpg_pll_clk_register(core, priv, &rzv2h_cpg_pll_ops);
  345. break;
  346. case CLK_TYPE_DDIV:
  347. clk = rzv2h_cpg_ddiv_clk_register(core, priv);
  348. break;
  349. default:
  350. goto fail;
  351. }
  352. if (IS_ERR_OR_NULL(clk))
  353. goto fail;
  354. dev_dbg(dev, "Core clock %pC at %lu Hz\n", clk, clk_get_rate(clk));
  355. priv->clks[id] = clk;
  356. return;
  357. fail:
  358. dev_err(dev, "Failed to register core clock %s: %ld\n",
  359. core->name, PTR_ERR(clk));
  360. }
  361. static int rzv2h_mod_clock_endisable(struct clk_hw *hw, bool enable)
  362. {
  363. struct mod_clock *clock = to_mod_clock(hw);
  364. unsigned int reg = GET_CLK_ON_OFFSET(clock->on_index);
  365. struct rzv2h_cpg_priv *priv = clock->priv;
  366. u32 bitmask = BIT(clock->on_bit);
  367. struct device *dev = priv->dev;
  368. u32 value;
  369. int error;
  370. dev_dbg(dev, "CLK_ON 0x%x/%pC %s\n", reg, hw->clk,
  371. enable ? "ON" : "OFF");
  372. value = bitmask << 16;
  373. if (enable)
  374. value |= bitmask;
  375. writel(value, priv->base + reg);
  376. if (!enable || clock->mon_index < 0)
  377. return 0;
  378. reg = GET_CLK_MON_OFFSET(clock->mon_index);
  379. bitmask = BIT(clock->mon_bit);
  380. error = readl_poll_timeout_atomic(priv->base + reg, value,
  381. value & bitmask, 0, 10);
  382. if (error)
  383. dev_err(dev, "Failed to enable CLK_ON %p\n",
  384. priv->base + reg);
  385. return error;
  386. }
  387. static int rzv2h_mod_clock_enable(struct clk_hw *hw)
  388. {
  389. return rzv2h_mod_clock_endisable(hw, true);
  390. }
  391. static void rzv2h_mod_clock_disable(struct clk_hw *hw)
  392. {
  393. rzv2h_mod_clock_endisable(hw, false);
  394. }
  395. static int rzv2h_mod_clock_is_enabled(struct clk_hw *hw)
  396. {
  397. struct mod_clock *clock = to_mod_clock(hw);
  398. struct rzv2h_cpg_priv *priv = clock->priv;
  399. u32 bitmask;
  400. u32 offset;
  401. if (clock->mon_index >= 0) {
  402. offset = GET_CLK_MON_OFFSET(clock->mon_index);
  403. bitmask = BIT(clock->mon_bit);
  404. } else {
  405. offset = GET_CLK_ON_OFFSET(clock->on_index);
  406. bitmask = BIT(clock->on_bit);
  407. }
  408. return readl(priv->base + offset) & bitmask;
  409. }
  410. static const struct clk_ops rzv2h_mod_clock_ops = {
  411. .enable = rzv2h_mod_clock_enable,
  412. .disable = rzv2h_mod_clock_disable,
  413. .is_enabled = rzv2h_mod_clock_is_enabled,
  414. };
  415. static void __init
  416. rzv2h_cpg_register_mod_clk(const struct rzv2h_mod_clk *mod,
  417. struct rzv2h_cpg_priv *priv)
  418. {
  419. struct mod_clock *clock = NULL;
  420. struct device *dev = priv->dev;
  421. struct clk_init_data init;
  422. struct clk *parent, *clk;
  423. const char *parent_name;
  424. unsigned int id;
  425. int ret;
  426. id = GET_MOD_CLK_ID(priv->num_core_clks, mod->on_index, mod->on_bit);
  427. WARN_DEBUG(id >= priv->num_core_clks + priv->num_mod_clks);
  428. WARN_DEBUG(mod->parent >= priv->num_core_clks + priv->num_mod_clks);
  429. WARN_DEBUG(PTR_ERR(priv->clks[id]) != -ENOENT);
  430. parent = priv->clks[mod->parent];
  431. if (IS_ERR(parent)) {
  432. clk = parent;
  433. goto fail;
  434. }
  435. clock = devm_kzalloc(dev, sizeof(*clock), GFP_KERNEL);
  436. if (!clock) {
  437. clk = ERR_PTR(-ENOMEM);
  438. goto fail;
  439. }
  440. init.name = mod->name;
  441. init.ops = &rzv2h_mod_clock_ops;
  442. init.flags = CLK_SET_RATE_PARENT;
  443. if (mod->critical)
  444. init.flags |= CLK_IS_CRITICAL;
  445. parent_name = __clk_get_name(parent);
  446. init.parent_names = &parent_name;
  447. init.num_parents = 1;
  448. clock->on_index = mod->on_index;
  449. clock->on_bit = mod->on_bit;
  450. clock->mon_index = mod->mon_index;
  451. clock->mon_bit = mod->mon_bit;
  452. clock->priv = priv;
  453. clock->hw.init = &init;
  454. ret = devm_clk_hw_register(dev, &clock->hw);
  455. if (ret) {
  456. clk = ERR_PTR(ret);
  457. goto fail;
  458. }
  459. priv->clks[id] = clock->hw.clk;
  460. return;
  461. fail:
  462. dev_err(dev, "Failed to register module clock %s: %ld\n",
  463. mod->name, PTR_ERR(clk));
  464. }
  465. static int rzv2h_cpg_assert(struct reset_controller_dev *rcdev,
  466. unsigned long id)
  467. {
  468. struct rzv2h_cpg_priv *priv = rcdev_to_priv(rcdev);
  469. unsigned int reg = GET_RST_OFFSET(priv->resets[id].reset_index);
  470. u32 mask = BIT(priv->resets[id].reset_bit);
  471. u8 monbit = priv->resets[id].mon_bit;
  472. u32 value = mask << 16;
  473. dev_dbg(rcdev->dev, "assert id:%ld offset:0x%x\n", id, reg);
  474. writel(value, priv->base + reg);
  475. reg = GET_RST_MON_OFFSET(priv->resets[id].mon_index);
  476. mask = BIT(monbit);
  477. return readl_poll_timeout_atomic(priv->base + reg, value,
  478. value & mask, 10, 200);
  479. }
  480. static int rzv2h_cpg_deassert(struct reset_controller_dev *rcdev,
  481. unsigned long id)
  482. {
  483. struct rzv2h_cpg_priv *priv = rcdev_to_priv(rcdev);
  484. unsigned int reg = GET_RST_OFFSET(priv->resets[id].reset_index);
  485. u32 mask = BIT(priv->resets[id].reset_bit);
  486. u8 monbit = priv->resets[id].mon_bit;
  487. u32 value = (mask << 16) | mask;
  488. dev_dbg(rcdev->dev, "deassert id:%ld offset:0x%x\n", id, reg);
  489. writel(value, priv->base + reg);
  490. reg = GET_RST_MON_OFFSET(priv->resets[id].mon_index);
  491. mask = BIT(monbit);
  492. return readl_poll_timeout_atomic(priv->base + reg, value,
  493. !(value & mask), 10, 200);
  494. }
  495. static int rzv2h_cpg_reset(struct reset_controller_dev *rcdev,
  496. unsigned long id)
  497. {
  498. int ret;
  499. ret = rzv2h_cpg_assert(rcdev, id);
  500. if (ret)
  501. return ret;
  502. return rzv2h_cpg_deassert(rcdev, id);
  503. }
  504. static int rzv2h_cpg_status(struct reset_controller_dev *rcdev,
  505. unsigned long id)
  506. {
  507. struct rzv2h_cpg_priv *priv = rcdev_to_priv(rcdev);
  508. unsigned int reg = GET_RST_MON_OFFSET(priv->resets[id].mon_index);
  509. u8 monbit = priv->resets[id].mon_bit;
  510. return !!(readl(priv->base + reg) & BIT(monbit));
  511. }
  512. static const struct reset_control_ops rzv2h_cpg_reset_ops = {
  513. .reset = rzv2h_cpg_reset,
  514. .assert = rzv2h_cpg_assert,
  515. .deassert = rzv2h_cpg_deassert,
  516. .status = rzv2h_cpg_status,
  517. };
  518. static int rzv2h_cpg_reset_xlate(struct reset_controller_dev *rcdev,
  519. const struct of_phandle_args *reset_spec)
  520. {
  521. struct rzv2h_cpg_priv *priv = rcdev_to_priv(rcdev);
  522. unsigned int id = reset_spec->args[0];
  523. u8 rst_index = id / 16;
  524. u8 rst_bit = id % 16;
  525. unsigned int i;
  526. for (i = 0; i < rcdev->nr_resets; i++) {
  527. if (rst_index == priv->resets[i].reset_index &&
  528. rst_bit == priv->resets[i].reset_bit)
  529. return i;
  530. }
  531. return -EINVAL;
  532. }
  533. static int rzv2h_cpg_reset_controller_register(struct rzv2h_cpg_priv *priv)
  534. {
  535. priv->rcdev.ops = &rzv2h_cpg_reset_ops;
  536. priv->rcdev.of_node = priv->dev->of_node;
  537. priv->rcdev.dev = priv->dev;
  538. priv->rcdev.of_reset_n_cells = 1;
  539. priv->rcdev.of_xlate = rzv2h_cpg_reset_xlate;
  540. priv->rcdev.nr_resets = priv->num_resets;
  541. return devm_reset_controller_register(priv->dev, &priv->rcdev);
  542. }
  543. /**
  544. * struct rzv2h_cpg_pd - RZ/V2H power domain data structure
  545. * @priv: pointer to CPG private data structure
  546. * @genpd: generic PM domain
  547. */
  548. struct rzv2h_cpg_pd {
  549. struct rzv2h_cpg_priv *priv;
  550. struct generic_pm_domain genpd;
  551. };
  552. static int rzv2h_cpg_attach_dev(struct generic_pm_domain *domain, struct device *dev)
  553. {
  554. struct device_node *np = dev->of_node;
  555. struct of_phandle_args clkspec;
  556. bool once = true;
  557. struct clk *clk;
  558. int error;
  559. int i = 0;
  560. while (!of_parse_phandle_with_args(np, "clocks", "#clock-cells", i,
  561. &clkspec)) {
  562. if (once) {
  563. once = false;
  564. error = pm_clk_create(dev);
  565. if (error) {
  566. of_node_put(clkspec.np);
  567. goto err;
  568. }
  569. }
  570. clk = of_clk_get_from_provider(&clkspec);
  571. of_node_put(clkspec.np);
  572. if (IS_ERR(clk)) {
  573. error = PTR_ERR(clk);
  574. goto fail_destroy;
  575. }
  576. error = pm_clk_add_clk(dev, clk);
  577. if (error) {
  578. dev_err(dev, "pm_clk_add_clk failed %d\n",
  579. error);
  580. goto fail_put;
  581. }
  582. i++;
  583. }
  584. return 0;
  585. fail_put:
  586. clk_put(clk);
  587. fail_destroy:
  588. pm_clk_destroy(dev);
  589. err:
  590. return error;
  591. }
  592. static void rzv2h_cpg_detach_dev(struct generic_pm_domain *unused, struct device *dev)
  593. {
  594. if (!pm_clk_no_clocks(dev))
  595. pm_clk_destroy(dev);
  596. }
  597. static void rzv2h_cpg_genpd_remove_simple(void *data)
  598. {
  599. pm_genpd_remove(data);
  600. }
  601. static int __init rzv2h_cpg_add_pm_domains(struct rzv2h_cpg_priv *priv)
  602. {
  603. struct device *dev = priv->dev;
  604. struct device_node *np = dev->of_node;
  605. struct rzv2h_cpg_pd *pd;
  606. int ret;
  607. pd = devm_kzalloc(dev, sizeof(*pd), GFP_KERNEL);
  608. if (!pd)
  609. return -ENOMEM;
  610. pd->genpd.name = np->name;
  611. pd->priv = priv;
  612. pd->genpd.flags |= GENPD_FLAG_ALWAYS_ON | GENPD_FLAG_PM_CLK | GENPD_FLAG_ACTIVE_WAKEUP;
  613. pd->genpd.attach_dev = rzv2h_cpg_attach_dev;
  614. pd->genpd.detach_dev = rzv2h_cpg_detach_dev;
  615. ret = pm_genpd_init(&pd->genpd, &pm_domain_always_on_gov, false);
  616. if (ret)
  617. return ret;
  618. ret = devm_add_action_or_reset(dev, rzv2h_cpg_genpd_remove_simple, &pd->genpd);
  619. if (ret)
  620. return ret;
  621. return of_genpd_add_provider_simple(np, &pd->genpd);
  622. }
  623. static void rzv2h_cpg_del_clk_provider(void *data)
  624. {
  625. of_clk_del_provider(data);
  626. }
  627. static int __init rzv2h_cpg_probe(struct platform_device *pdev)
  628. {
  629. struct device *dev = &pdev->dev;
  630. struct device_node *np = dev->of_node;
  631. const struct rzv2h_cpg_info *info;
  632. struct rzv2h_cpg_priv *priv;
  633. unsigned int nclks, i;
  634. struct clk **clks;
  635. int error;
  636. info = of_device_get_match_data(dev);
  637. priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
  638. if (!priv)
  639. return -ENOMEM;
  640. spin_lock_init(&priv->rmw_lock);
  641. priv->dev = dev;
  642. priv->base = devm_platform_ioremap_resource(pdev, 0);
  643. if (IS_ERR(priv->base))
  644. return PTR_ERR(priv->base);
  645. nclks = info->num_total_core_clks + info->num_hw_mod_clks;
  646. clks = devm_kmalloc_array(dev, nclks, sizeof(*clks), GFP_KERNEL);
  647. if (!clks)
  648. return -ENOMEM;
  649. priv->resets = devm_kmemdup(dev, info->resets, sizeof(*info->resets) *
  650. info->num_resets, GFP_KERNEL);
  651. if (!priv->resets)
  652. return -ENOMEM;
  653. dev_set_drvdata(dev, priv);
  654. priv->clks = clks;
  655. priv->num_core_clks = info->num_total_core_clks;
  656. priv->num_mod_clks = info->num_hw_mod_clks;
  657. priv->last_dt_core_clk = info->last_dt_core_clk;
  658. priv->num_resets = info->num_resets;
  659. for (i = 0; i < nclks; i++)
  660. clks[i] = ERR_PTR(-ENOENT);
  661. for (i = 0; i < info->num_core_clks; i++)
  662. rzv2h_cpg_register_core_clk(&info->core_clks[i], priv);
  663. for (i = 0; i < info->num_mod_clks; i++)
  664. rzv2h_cpg_register_mod_clk(&info->mod_clks[i], priv);
  665. error = of_clk_add_provider(np, rzv2h_cpg_clk_src_twocell_get, priv);
  666. if (error)
  667. return error;
  668. error = devm_add_action_or_reset(dev, rzv2h_cpg_del_clk_provider, np);
  669. if (error)
  670. return error;
  671. error = rzv2h_cpg_add_pm_domains(priv);
  672. if (error)
  673. return error;
  674. error = rzv2h_cpg_reset_controller_register(priv);
  675. if (error)
  676. return error;
  677. return 0;
  678. }
  679. static const struct of_device_id rzv2h_cpg_match[] = {
  680. #ifdef CONFIG_CLK_R9A09G057
  681. {
  682. .compatible = "renesas,r9a09g057-cpg",
  683. .data = &r9a09g057_cpg_info,
  684. },
  685. #endif
  686. { /* sentinel */ }
  687. };
  688. static struct platform_driver rzv2h_cpg_driver = {
  689. .driver = {
  690. .name = "rzv2h-cpg",
  691. .of_match_table = rzv2h_cpg_match,
  692. },
  693. };
  694. static int __init rzv2h_cpg_init(void)
  695. {
  696. return platform_driver_probe(&rzv2h_cpg_driver, rzv2h_cpg_probe);
  697. }
  698. subsys_initcall(rzv2h_cpg_init);
  699. MODULE_DESCRIPTION("Renesas RZ/V2H CPG Driver");