rzg2l-cpg.c 48 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * RZ/G2L Clock Pulse Generator
  4. *
  5. * Copyright (C) 2021 Renesas Electronics Corp.
  6. *
  7. * Based on renesas-cpg-mssr.c
  8. *
  9. * Copyright (C) 2015 Glider bvba
  10. * Copyright (C) 2013 Ideas On Board SPRL
  11. * Copyright (C) 2015 Renesas Electronics Corp.
  12. */
  13. #include <linux/bitfield.h>
  14. #include <linux/clk.h>
  15. #include <linux/clk-provider.h>
  16. #include <linux/clk/renesas.h>
  17. #include <linux/delay.h>
  18. #include <linux/device.h>
  19. #include <linux/init.h>
  20. #include <linux/iopoll.h>
  21. #include <linux/mod_devicetable.h>
  22. #include <linux/module.h>
  23. #include <linux/of.h>
  24. #include <linux/platform_device.h>
  25. #include <linux/pm_clock.h>
  26. #include <linux/pm_domain.h>
  27. #include <linux/reset-controller.h>
  28. #include <linux/slab.h>
  29. #include <linux/units.h>
  30. #include <dt-bindings/clock/renesas-cpg-mssr.h>
  31. #include "rzg2l-cpg.h"
  32. #ifdef DEBUG
  33. #define WARN_DEBUG(x) WARN_ON(x)
  34. #else
  35. #define WARN_DEBUG(x) do { } while (0)
  36. #endif
  37. #define GET_SHIFT(val) ((val >> 12) & 0xff)
  38. #define GET_WIDTH(val) ((val >> 8) & 0xf)
  39. #define KDIV(val) ((s16)FIELD_GET(GENMASK(31, 16), val))
  40. #define MDIV(val) FIELD_GET(GENMASK(15, 6), val)
  41. #define PDIV(val) FIELD_GET(GENMASK(5, 0), val)
  42. #define SDIV(val) FIELD_GET(GENMASK(2, 0), val)
  43. #define RZG3S_DIV_P GENMASK(28, 26)
  44. #define RZG3S_DIV_M GENMASK(25, 22)
  45. #define RZG3S_DIV_NI GENMASK(21, 13)
  46. #define RZG3S_DIV_NF GENMASK(12, 1)
  47. #define CLK_ON_R(reg) (reg)
  48. #define CLK_MON_R(reg) (0x180 + (reg))
  49. #define CLK_RST_R(reg) (reg)
  50. #define CLK_MRST_R(reg) (0x180 + (reg))
  51. #define GET_REG_OFFSET(val) ((val >> 20) & 0xfff)
  52. #define GET_REG_SAMPLL_CLK1(val) ((val >> 22) & 0xfff)
  53. #define GET_REG_SAMPLL_CLK2(val) ((val >> 12) & 0xfff)
  54. #define CPG_WEN_BIT BIT(16)
  55. #define MAX_VCLK_FREQ (148500000)
  56. /**
  57. * struct clk_hw_data - clock hardware data
  58. * @hw: clock hw
  59. * @conf: clock configuration (register offset, shift, width)
  60. * @sconf: clock status configuration (register offset, shift, width)
  61. * @priv: CPG private data structure
  62. */
  63. struct clk_hw_data {
  64. struct clk_hw hw;
  65. u32 conf;
  66. u32 sconf;
  67. struct rzg2l_cpg_priv *priv;
  68. };
  69. #define to_clk_hw_data(_hw) container_of(_hw, struct clk_hw_data, hw)
  70. /**
  71. * struct sd_mux_hw_data - SD MUX clock hardware data
  72. * @hw_data: clock hw data
  73. * @mtable: clock mux table
  74. */
  75. struct sd_mux_hw_data {
  76. struct clk_hw_data hw_data;
  77. const u32 *mtable;
  78. };
  79. #define to_sd_mux_hw_data(_hw) container_of(_hw, struct sd_mux_hw_data, hw_data)
  80. /**
  81. * struct div_hw_data - divider clock hardware data
  82. * @hw_data: clock hw data
  83. * @dtable: pointer to divider table
  84. * @invalid_rate: invalid rate for divider
  85. * @max_rate: maximum rate for divider
  86. * @width: divider width
  87. */
  88. struct div_hw_data {
  89. struct clk_hw_data hw_data;
  90. const struct clk_div_table *dtable;
  91. unsigned long invalid_rate;
  92. unsigned long max_rate;
  93. u32 width;
  94. };
  95. #define to_div_hw_data(_hw) container_of(_hw, struct div_hw_data, hw_data)
  96. struct rzg2l_pll5_param {
  97. u32 pl5_fracin;
  98. u8 pl5_refdiv;
  99. u8 pl5_intin;
  100. u8 pl5_postdiv1;
  101. u8 pl5_postdiv2;
  102. u8 pl5_spread;
  103. };
  104. struct rzg2l_pll5_mux_dsi_div_param {
  105. u8 clksrc;
  106. u8 dsi_div_a;
  107. u8 dsi_div_b;
  108. };
  109. /**
  110. * struct rzg2l_cpg_priv - Clock Pulse Generator Private Data
  111. *
  112. * @rcdev: Reset controller entity
  113. * @dev: CPG device
  114. * @base: CPG register block base address
  115. * @rmw_lock: protects register accesses
  116. * @clks: Array containing all Core and Module Clocks
  117. * @num_core_clks: Number of Core Clocks in clks[]
  118. * @num_mod_clks: Number of Module Clocks in clks[]
  119. * @num_resets: Number of Module Resets in info->resets[]
  120. * @last_dt_core_clk: ID of the last Core Clock exported to DT
  121. * @info: Pointer to platform data
  122. * @mux_dsi_div_params: pll5 mux and dsi div parameters
  123. */
  124. struct rzg2l_cpg_priv {
  125. struct reset_controller_dev rcdev;
  126. struct device *dev;
  127. void __iomem *base;
  128. spinlock_t rmw_lock;
  129. struct clk **clks;
  130. unsigned int num_core_clks;
  131. unsigned int num_mod_clks;
  132. unsigned int num_resets;
  133. unsigned int last_dt_core_clk;
  134. const struct rzg2l_cpg_info *info;
  135. struct rzg2l_pll5_mux_dsi_div_param mux_dsi_div_params;
  136. };
  137. static void rzg2l_cpg_del_clk_provider(void *data)
  138. {
  139. of_clk_del_provider(data);
  140. }
  141. /* Must be called in atomic context. */
  142. static int rzg2l_cpg_wait_clk_update_done(void __iomem *base, u32 conf)
  143. {
  144. u32 bitmask = GENMASK(GET_WIDTH(conf) - 1, 0) << GET_SHIFT(conf);
  145. u32 off = GET_REG_OFFSET(conf);
  146. u32 val;
  147. return readl_poll_timeout_atomic(base + off, val, !(val & bitmask), 10, 200);
  148. }
  149. int rzg2l_cpg_sd_clk_mux_notifier(struct notifier_block *nb, unsigned long event,
  150. void *data)
  151. {
  152. struct clk_notifier_data *cnd = data;
  153. struct clk_hw *hw = __clk_get_hw(cnd->clk);
  154. struct clk_hw_data *clk_hw_data = to_clk_hw_data(hw);
  155. struct rzg2l_cpg_priv *priv = clk_hw_data->priv;
  156. u32 off = GET_REG_OFFSET(clk_hw_data->conf);
  157. u32 shift = GET_SHIFT(clk_hw_data->conf);
  158. const u32 clk_src_266 = 3;
  159. unsigned long flags;
  160. int ret;
  161. if (event != PRE_RATE_CHANGE || (cnd->new_rate / MEGA == 266))
  162. return NOTIFY_DONE;
  163. spin_lock_irqsave(&priv->rmw_lock, flags);
  164. /*
  165. * As per the HW manual, we should not directly switch from 533 MHz to
  166. * 400 MHz and vice versa. To change the setting from 2’b01 (533 MHz)
  167. * to 2’b10 (400 MHz) or vice versa, Switch to 2’b11 (266 MHz) first,
  168. * and then switch to the target setting (2’b01 (533 MHz) or 2’b10
  169. * (400 MHz)).
  170. * Setting a value of '0' to the SEL_SDHI0_SET or SEL_SDHI1_SET clock
  171. * switching register is prohibited.
  172. * The clock mux has 3 input clocks(533 MHz, 400 MHz, and 266 MHz), and
  173. * the index to value mapping is done by adding 1 to the index.
  174. */
  175. writel((CPG_WEN_BIT | clk_src_266) << shift, priv->base + off);
  176. /* Wait for the update done. */
  177. ret = rzg2l_cpg_wait_clk_update_done(priv->base, clk_hw_data->sconf);
  178. spin_unlock_irqrestore(&priv->rmw_lock, flags);
  179. if (ret)
  180. dev_err(priv->dev, "failed to switch to safe clk source\n");
  181. return notifier_from_errno(ret);
  182. }
  183. int rzg3s_cpg_div_clk_notifier(struct notifier_block *nb, unsigned long event,
  184. void *data)
  185. {
  186. struct clk_notifier_data *cnd = data;
  187. struct clk_hw *hw = __clk_get_hw(cnd->clk);
  188. struct clk_hw_data *clk_hw_data = to_clk_hw_data(hw);
  189. struct div_hw_data *div_hw_data = to_div_hw_data(clk_hw_data);
  190. struct rzg2l_cpg_priv *priv = clk_hw_data->priv;
  191. u32 off = GET_REG_OFFSET(clk_hw_data->conf);
  192. u32 shift = GET_SHIFT(clk_hw_data->conf);
  193. unsigned long flags;
  194. int ret = 0;
  195. u32 val;
  196. if (event != PRE_RATE_CHANGE || !div_hw_data->invalid_rate ||
  197. div_hw_data->invalid_rate % cnd->new_rate)
  198. return NOTIFY_DONE;
  199. spin_lock_irqsave(&priv->rmw_lock, flags);
  200. val = readl(priv->base + off);
  201. val >>= shift;
  202. val &= GENMASK(GET_WIDTH(clk_hw_data->conf) - 1, 0);
  203. /*
  204. * There are different constraints for the user of this notifiers as follows:
  205. * 1/ SD div cannot be 1 (val == 0) if parent rate is 800MHz
  206. * 2/ OCTA / SPI div cannot be 1 (val == 0) if parent rate is 400MHz
  207. * As SD can have only one parent having 800MHz and OCTA div can have
  208. * only one parent having 400MHz we took into account the parent rate
  209. * at the beginning of function (by checking invalid_rate % new_rate).
  210. * Now it is time to check the hardware divider and update it accordingly.
  211. */
  212. if (!val) {
  213. writel((CPG_WEN_BIT | 1) << shift, priv->base + off);
  214. /* Wait for the update done. */
  215. ret = rzg2l_cpg_wait_clk_update_done(priv->base, clk_hw_data->sconf);
  216. }
  217. spin_unlock_irqrestore(&priv->rmw_lock, flags);
  218. if (ret)
  219. dev_err(priv->dev, "Failed to downgrade the div\n");
  220. return notifier_from_errno(ret);
  221. }
  222. static int rzg2l_register_notifier(struct clk_hw *hw, const struct cpg_core_clk *core,
  223. struct rzg2l_cpg_priv *priv)
  224. {
  225. struct notifier_block *nb;
  226. if (!core->notifier)
  227. return 0;
  228. nb = devm_kzalloc(priv->dev, sizeof(*nb), GFP_KERNEL);
  229. if (!nb)
  230. return -ENOMEM;
  231. nb->notifier_call = core->notifier;
  232. return clk_notifier_register(hw->clk, nb);
  233. }
  234. static unsigned long rzg3s_div_clk_recalc_rate(struct clk_hw *hw,
  235. unsigned long parent_rate)
  236. {
  237. struct clk_hw_data *clk_hw_data = to_clk_hw_data(hw);
  238. struct div_hw_data *div_hw_data = to_div_hw_data(clk_hw_data);
  239. struct rzg2l_cpg_priv *priv = clk_hw_data->priv;
  240. u32 val;
  241. val = readl(priv->base + GET_REG_OFFSET(clk_hw_data->conf));
  242. val >>= GET_SHIFT(clk_hw_data->conf);
  243. val &= GENMASK(GET_WIDTH(clk_hw_data->conf) - 1, 0);
  244. return divider_recalc_rate(hw, parent_rate, val, div_hw_data->dtable,
  245. CLK_DIVIDER_ROUND_CLOSEST, div_hw_data->width);
  246. }
  247. static int rzg3s_div_clk_determine_rate(struct clk_hw *hw, struct clk_rate_request *req)
  248. {
  249. struct clk_hw_data *clk_hw_data = to_clk_hw_data(hw);
  250. struct div_hw_data *div_hw_data = to_div_hw_data(clk_hw_data);
  251. if (div_hw_data->max_rate && req->rate > div_hw_data->max_rate)
  252. req->rate = div_hw_data->max_rate;
  253. return divider_determine_rate(hw, req, div_hw_data->dtable, div_hw_data->width,
  254. CLK_DIVIDER_ROUND_CLOSEST);
  255. }
  256. static int rzg3s_div_clk_set_rate(struct clk_hw *hw, unsigned long rate,
  257. unsigned long parent_rate)
  258. {
  259. struct clk_hw_data *clk_hw_data = to_clk_hw_data(hw);
  260. struct div_hw_data *div_hw_data = to_div_hw_data(clk_hw_data);
  261. struct rzg2l_cpg_priv *priv = clk_hw_data->priv;
  262. u32 off = GET_REG_OFFSET(clk_hw_data->conf);
  263. u32 shift = GET_SHIFT(clk_hw_data->conf);
  264. unsigned long flags;
  265. u32 val;
  266. int ret;
  267. val = divider_get_val(rate, parent_rate, div_hw_data->dtable, div_hw_data->width,
  268. CLK_DIVIDER_ROUND_CLOSEST);
  269. spin_lock_irqsave(&priv->rmw_lock, flags);
  270. writel((CPG_WEN_BIT | val) << shift, priv->base + off);
  271. /* Wait for the update done. */
  272. ret = rzg2l_cpg_wait_clk_update_done(priv->base, clk_hw_data->sconf);
  273. spin_unlock_irqrestore(&priv->rmw_lock, flags);
  274. return ret;
  275. }
  276. static const struct clk_ops rzg3s_div_clk_ops = {
  277. .recalc_rate = rzg3s_div_clk_recalc_rate,
  278. .determine_rate = rzg3s_div_clk_determine_rate,
  279. .set_rate = rzg3s_div_clk_set_rate,
  280. };
  281. static struct clk * __init
  282. rzg3s_cpg_div_clk_register(const struct cpg_core_clk *core, struct rzg2l_cpg_priv *priv)
  283. {
  284. struct div_hw_data *div_hw_data;
  285. struct clk_init_data init = {};
  286. const struct clk_div_table *clkt;
  287. struct clk_hw *clk_hw;
  288. const struct clk *parent;
  289. const char *parent_name;
  290. u32 max = 0;
  291. int ret;
  292. parent = priv->clks[core->parent];
  293. if (IS_ERR(parent))
  294. return ERR_CAST(parent);
  295. parent_name = __clk_get_name(parent);
  296. div_hw_data = devm_kzalloc(priv->dev, sizeof(*div_hw_data), GFP_KERNEL);
  297. if (!div_hw_data)
  298. return ERR_PTR(-ENOMEM);
  299. init.name = core->name;
  300. init.flags = core->flag;
  301. init.ops = &rzg3s_div_clk_ops;
  302. init.parent_names = &parent_name;
  303. init.num_parents = 1;
  304. /* Get the maximum divider to retrieve div width. */
  305. for (clkt = core->dtable; clkt->div; clkt++) {
  306. if (max < clkt->div)
  307. max = clkt->div;
  308. }
  309. div_hw_data->hw_data.priv = priv;
  310. div_hw_data->hw_data.conf = core->conf;
  311. div_hw_data->hw_data.sconf = core->sconf;
  312. div_hw_data->dtable = core->dtable;
  313. div_hw_data->invalid_rate = core->invalid_rate;
  314. div_hw_data->max_rate = core->max_rate;
  315. div_hw_data->width = fls(max) - 1;
  316. clk_hw = &div_hw_data->hw_data.hw;
  317. clk_hw->init = &init;
  318. ret = devm_clk_hw_register(priv->dev, clk_hw);
  319. if (ret)
  320. return ERR_PTR(ret);
  321. ret = rzg2l_register_notifier(clk_hw, core, priv);
  322. if (ret) {
  323. dev_err(priv->dev, "Failed to register notifier for %s\n",
  324. core->name);
  325. return ERR_PTR(ret);
  326. }
  327. return clk_hw->clk;
  328. }
  329. static struct clk * __init
  330. rzg2l_cpg_div_clk_register(const struct cpg_core_clk *core,
  331. struct rzg2l_cpg_priv *priv)
  332. {
  333. void __iomem *base = priv->base;
  334. struct device *dev = priv->dev;
  335. const struct clk *parent;
  336. const char *parent_name;
  337. struct clk_hw *clk_hw;
  338. parent = priv->clks[core->parent];
  339. if (IS_ERR(parent))
  340. return ERR_CAST(parent);
  341. parent_name = __clk_get_name(parent);
  342. if (core->dtable)
  343. clk_hw = clk_hw_register_divider_table(dev, core->name,
  344. parent_name, 0,
  345. base + GET_REG_OFFSET(core->conf),
  346. GET_SHIFT(core->conf),
  347. GET_WIDTH(core->conf),
  348. core->flag,
  349. core->dtable,
  350. &priv->rmw_lock);
  351. else
  352. clk_hw = clk_hw_register_divider(dev, core->name,
  353. parent_name, 0,
  354. base + GET_REG_OFFSET(core->conf),
  355. GET_SHIFT(core->conf),
  356. GET_WIDTH(core->conf),
  357. core->flag, &priv->rmw_lock);
  358. if (IS_ERR(clk_hw))
  359. return ERR_CAST(clk_hw);
  360. return clk_hw->clk;
  361. }
  362. static struct clk * __init
  363. rzg2l_cpg_mux_clk_register(const struct cpg_core_clk *core,
  364. struct rzg2l_cpg_priv *priv)
  365. {
  366. const struct clk_hw *clk_hw;
  367. clk_hw = devm_clk_hw_register_mux(priv->dev, core->name,
  368. core->parent_names, core->num_parents,
  369. core->flag,
  370. priv->base + GET_REG_OFFSET(core->conf),
  371. GET_SHIFT(core->conf),
  372. GET_WIDTH(core->conf),
  373. core->mux_flags, &priv->rmw_lock);
  374. if (IS_ERR(clk_hw))
  375. return ERR_CAST(clk_hw);
  376. return clk_hw->clk;
  377. }
  378. static int rzg2l_cpg_sd_clk_mux_set_parent(struct clk_hw *hw, u8 index)
  379. {
  380. struct clk_hw_data *clk_hw_data = to_clk_hw_data(hw);
  381. struct sd_mux_hw_data *sd_mux_hw_data = to_sd_mux_hw_data(clk_hw_data);
  382. struct rzg2l_cpg_priv *priv = clk_hw_data->priv;
  383. u32 off = GET_REG_OFFSET(clk_hw_data->conf);
  384. u32 shift = GET_SHIFT(clk_hw_data->conf);
  385. unsigned long flags;
  386. u32 val;
  387. int ret;
  388. val = clk_mux_index_to_val(sd_mux_hw_data->mtable, CLK_MUX_ROUND_CLOSEST, index);
  389. spin_lock_irqsave(&priv->rmw_lock, flags);
  390. writel((CPG_WEN_BIT | val) << shift, priv->base + off);
  391. /* Wait for the update done. */
  392. ret = rzg2l_cpg_wait_clk_update_done(priv->base, clk_hw_data->sconf);
  393. spin_unlock_irqrestore(&priv->rmw_lock, flags);
  394. if (ret)
  395. dev_err(priv->dev, "Failed to switch parent\n");
  396. return ret;
  397. }
  398. static u8 rzg2l_cpg_sd_clk_mux_get_parent(struct clk_hw *hw)
  399. {
  400. struct clk_hw_data *clk_hw_data = to_clk_hw_data(hw);
  401. struct sd_mux_hw_data *sd_mux_hw_data = to_sd_mux_hw_data(clk_hw_data);
  402. struct rzg2l_cpg_priv *priv = clk_hw_data->priv;
  403. u32 val;
  404. val = readl(priv->base + GET_REG_OFFSET(clk_hw_data->conf));
  405. val >>= GET_SHIFT(clk_hw_data->conf);
  406. val &= GENMASK(GET_WIDTH(clk_hw_data->conf) - 1, 0);
  407. return clk_mux_val_to_index(hw, sd_mux_hw_data->mtable, CLK_MUX_ROUND_CLOSEST, val);
  408. }
  409. static const struct clk_ops rzg2l_cpg_sd_clk_mux_ops = {
  410. .determine_rate = __clk_mux_determine_rate_closest,
  411. .set_parent = rzg2l_cpg_sd_clk_mux_set_parent,
  412. .get_parent = rzg2l_cpg_sd_clk_mux_get_parent,
  413. };
  414. static struct clk * __init
  415. rzg2l_cpg_sd_mux_clk_register(const struct cpg_core_clk *core,
  416. struct rzg2l_cpg_priv *priv)
  417. {
  418. struct sd_mux_hw_data *sd_mux_hw_data;
  419. struct clk_init_data init;
  420. struct clk_hw *clk_hw;
  421. int ret;
  422. sd_mux_hw_data = devm_kzalloc(priv->dev, sizeof(*sd_mux_hw_data), GFP_KERNEL);
  423. if (!sd_mux_hw_data)
  424. return ERR_PTR(-ENOMEM);
  425. sd_mux_hw_data->hw_data.priv = priv;
  426. sd_mux_hw_data->hw_data.conf = core->conf;
  427. sd_mux_hw_data->hw_data.sconf = core->sconf;
  428. sd_mux_hw_data->mtable = core->mtable;
  429. init.name = core->name;
  430. init.ops = &rzg2l_cpg_sd_clk_mux_ops;
  431. init.flags = core->flag;
  432. init.num_parents = core->num_parents;
  433. init.parent_names = core->parent_names;
  434. clk_hw = &sd_mux_hw_data->hw_data.hw;
  435. clk_hw->init = &init;
  436. ret = devm_clk_hw_register(priv->dev, clk_hw);
  437. if (ret)
  438. return ERR_PTR(ret);
  439. ret = rzg2l_register_notifier(clk_hw, core, priv);
  440. if (ret) {
  441. dev_err(priv->dev, "Failed to register notifier for %s\n",
  442. core->name);
  443. return ERR_PTR(ret);
  444. }
  445. return clk_hw->clk;
  446. }
  447. static unsigned long
  448. rzg2l_cpg_get_foutpostdiv_rate(struct rzg2l_pll5_param *params,
  449. unsigned long rate)
  450. {
  451. unsigned long foutpostdiv_rate, foutvco_rate;
  452. params->pl5_intin = rate / MEGA;
  453. params->pl5_fracin = div_u64(((u64)rate % MEGA) << 24, MEGA);
  454. params->pl5_refdiv = 2;
  455. params->pl5_postdiv1 = 1;
  456. params->pl5_postdiv2 = 1;
  457. params->pl5_spread = 0x16;
  458. foutvco_rate = div_u64(mul_u32_u32(EXTAL_FREQ_IN_MEGA_HZ * MEGA,
  459. (params->pl5_intin << 24) + params->pl5_fracin),
  460. params->pl5_refdiv) >> 24;
  461. foutpostdiv_rate = DIV_ROUND_CLOSEST_ULL(foutvco_rate,
  462. params->pl5_postdiv1 * params->pl5_postdiv2);
  463. return foutpostdiv_rate;
  464. }
  465. struct dsi_div_hw_data {
  466. struct clk_hw hw;
  467. u32 conf;
  468. unsigned long rate;
  469. struct rzg2l_cpg_priv *priv;
  470. };
  471. #define to_dsi_div_hw_data(_hw) container_of(_hw, struct dsi_div_hw_data, hw)
  472. static unsigned long rzg2l_cpg_dsi_div_recalc_rate(struct clk_hw *hw,
  473. unsigned long parent_rate)
  474. {
  475. struct dsi_div_hw_data *dsi_div = to_dsi_div_hw_data(hw);
  476. unsigned long rate = dsi_div->rate;
  477. if (!rate)
  478. rate = parent_rate;
  479. return rate;
  480. }
  481. static unsigned long rzg2l_cpg_get_vclk_parent_rate(struct clk_hw *hw,
  482. unsigned long rate)
  483. {
  484. struct dsi_div_hw_data *dsi_div = to_dsi_div_hw_data(hw);
  485. struct rzg2l_cpg_priv *priv = dsi_div->priv;
  486. struct rzg2l_pll5_param params;
  487. unsigned long parent_rate;
  488. parent_rate = rzg2l_cpg_get_foutpostdiv_rate(&params, rate);
  489. if (priv->mux_dsi_div_params.clksrc)
  490. parent_rate /= 2;
  491. return parent_rate;
  492. }
  493. static int rzg2l_cpg_dsi_div_determine_rate(struct clk_hw *hw,
  494. struct clk_rate_request *req)
  495. {
  496. if (req->rate > MAX_VCLK_FREQ)
  497. req->rate = MAX_VCLK_FREQ;
  498. req->best_parent_rate = rzg2l_cpg_get_vclk_parent_rate(hw, req->rate);
  499. return 0;
  500. }
  501. static int rzg2l_cpg_dsi_div_set_rate(struct clk_hw *hw,
  502. unsigned long rate,
  503. unsigned long parent_rate)
  504. {
  505. struct dsi_div_hw_data *dsi_div = to_dsi_div_hw_data(hw);
  506. struct rzg2l_cpg_priv *priv = dsi_div->priv;
  507. /*
  508. * MUX -->DIV_DSI_{A,B} -->M3 -->VCLK
  509. *
  510. * Based on the dot clock, the DSI divider clock sets the divider value,
  511. * calculates the pll parameters for generating FOUTPOSTDIV and the clk
  512. * source for the MUX and propagates that info to the parents.
  513. */
  514. if (!rate || rate > MAX_VCLK_FREQ)
  515. return -EINVAL;
  516. dsi_div->rate = rate;
  517. writel(CPG_PL5_SDIV_DIV_DSI_A_WEN | CPG_PL5_SDIV_DIV_DSI_B_WEN |
  518. (priv->mux_dsi_div_params.dsi_div_a << 0) |
  519. (priv->mux_dsi_div_params.dsi_div_b << 8),
  520. priv->base + CPG_PL5_SDIV);
  521. return 0;
  522. }
  523. static const struct clk_ops rzg2l_cpg_dsi_div_ops = {
  524. .recalc_rate = rzg2l_cpg_dsi_div_recalc_rate,
  525. .determine_rate = rzg2l_cpg_dsi_div_determine_rate,
  526. .set_rate = rzg2l_cpg_dsi_div_set_rate,
  527. };
  528. static struct clk * __init
  529. rzg2l_cpg_dsi_div_clk_register(const struct cpg_core_clk *core,
  530. struct rzg2l_cpg_priv *priv)
  531. {
  532. struct dsi_div_hw_data *clk_hw_data;
  533. const struct clk *parent;
  534. const char *parent_name;
  535. struct clk_init_data init;
  536. struct clk_hw *clk_hw;
  537. int ret;
  538. parent = priv->clks[core->parent];
  539. if (IS_ERR(parent))
  540. return ERR_CAST(parent);
  541. clk_hw_data = devm_kzalloc(priv->dev, sizeof(*clk_hw_data), GFP_KERNEL);
  542. if (!clk_hw_data)
  543. return ERR_PTR(-ENOMEM);
  544. clk_hw_data->priv = priv;
  545. parent_name = __clk_get_name(parent);
  546. init.name = core->name;
  547. init.ops = &rzg2l_cpg_dsi_div_ops;
  548. init.flags = CLK_SET_RATE_PARENT;
  549. init.parent_names = &parent_name;
  550. init.num_parents = 1;
  551. clk_hw = &clk_hw_data->hw;
  552. clk_hw->init = &init;
  553. ret = devm_clk_hw_register(priv->dev, clk_hw);
  554. if (ret)
  555. return ERR_PTR(ret);
  556. return clk_hw->clk;
  557. }
  558. struct pll5_mux_hw_data {
  559. struct clk_hw hw;
  560. u32 conf;
  561. unsigned long rate;
  562. struct rzg2l_cpg_priv *priv;
  563. };
  564. #define to_pll5_mux_hw_data(_hw) container_of(_hw, struct pll5_mux_hw_data, hw)
  565. static int rzg2l_cpg_pll5_4_clk_mux_determine_rate(struct clk_hw *hw,
  566. struct clk_rate_request *req)
  567. {
  568. struct clk_hw *parent;
  569. struct pll5_mux_hw_data *hwdata = to_pll5_mux_hw_data(hw);
  570. struct rzg2l_cpg_priv *priv = hwdata->priv;
  571. parent = clk_hw_get_parent_by_index(hw, priv->mux_dsi_div_params.clksrc);
  572. req->best_parent_hw = parent;
  573. req->best_parent_rate = req->rate;
  574. return 0;
  575. }
  576. static int rzg2l_cpg_pll5_4_clk_mux_set_parent(struct clk_hw *hw, u8 index)
  577. {
  578. struct pll5_mux_hw_data *hwdata = to_pll5_mux_hw_data(hw);
  579. struct rzg2l_cpg_priv *priv = hwdata->priv;
  580. /*
  581. * FOUTPOSTDIV--->|
  582. * | | -->MUX -->DIV_DSIA_B -->M3 -->VCLK
  583. * |--FOUT1PH0-->|
  584. *
  585. * Based on the dot clock, the DSI divider clock calculates the parent
  586. * rate and clk source for the MUX. It propagates that info to
  587. * pll5_4_clk_mux which sets the clock source for DSI divider clock.
  588. */
  589. writel(CPG_OTHERFUNC1_REG_RES0_ON_WEN | index,
  590. priv->base + CPG_OTHERFUNC1_REG);
  591. return 0;
  592. }
  593. static u8 rzg2l_cpg_pll5_4_clk_mux_get_parent(struct clk_hw *hw)
  594. {
  595. struct pll5_mux_hw_data *hwdata = to_pll5_mux_hw_data(hw);
  596. struct rzg2l_cpg_priv *priv = hwdata->priv;
  597. return readl(priv->base + GET_REG_OFFSET(hwdata->conf));
  598. }
  599. static const struct clk_ops rzg2l_cpg_pll5_4_clk_mux_ops = {
  600. .determine_rate = rzg2l_cpg_pll5_4_clk_mux_determine_rate,
  601. .set_parent = rzg2l_cpg_pll5_4_clk_mux_set_parent,
  602. .get_parent = rzg2l_cpg_pll5_4_clk_mux_get_parent,
  603. };
  604. static struct clk * __init
  605. rzg2l_cpg_pll5_4_mux_clk_register(const struct cpg_core_clk *core,
  606. struct rzg2l_cpg_priv *priv)
  607. {
  608. struct pll5_mux_hw_data *clk_hw_data;
  609. struct clk_init_data init;
  610. struct clk_hw *clk_hw;
  611. int ret;
  612. clk_hw_data = devm_kzalloc(priv->dev, sizeof(*clk_hw_data), GFP_KERNEL);
  613. if (!clk_hw_data)
  614. return ERR_PTR(-ENOMEM);
  615. clk_hw_data->priv = priv;
  616. clk_hw_data->conf = core->conf;
  617. init.name = core->name;
  618. init.ops = &rzg2l_cpg_pll5_4_clk_mux_ops;
  619. init.flags = CLK_SET_RATE_PARENT;
  620. init.num_parents = core->num_parents;
  621. init.parent_names = core->parent_names;
  622. clk_hw = &clk_hw_data->hw;
  623. clk_hw->init = &init;
  624. ret = devm_clk_hw_register(priv->dev, clk_hw);
  625. if (ret)
  626. return ERR_PTR(ret);
  627. return clk_hw->clk;
  628. }
  629. struct sipll5 {
  630. struct clk_hw hw;
  631. u32 conf;
  632. unsigned long foutpostdiv_rate;
  633. struct rzg2l_cpg_priv *priv;
  634. };
  635. #define to_sipll5(_hw) container_of(_hw, struct sipll5, hw)
  636. static unsigned long rzg2l_cpg_get_vclk_rate(struct clk_hw *hw,
  637. unsigned long rate)
  638. {
  639. struct sipll5 *sipll5 = to_sipll5(hw);
  640. struct rzg2l_cpg_priv *priv = sipll5->priv;
  641. unsigned long vclk;
  642. vclk = rate / ((1 << priv->mux_dsi_div_params.dsi_div_a) *
  643. (priv->mux_dsi_div_params.dsi_div_b + 1));
  644. if (priv->mux_dsi_div_params.clksrc)
  645. vclk /= 2;
  646. return vclk;
  647. }
  648. static unsigned long rzg2l_cpg_sipll5_recalc_rate(struct clk_hw *hw,
  649. unsigned long parent_rate)
  650. {
  651. struct sipll5 *sipll5 = to_sipll5(hw);
  652. unsigned long pll5_rate = sipll5->foutpostdiv_rate;
  653. if (!pll5_rate)
  654. pll5_rate = parent_rate;
  655. return pll5_rate;
  656. }
  657. static long rzg2l_cpg_sipll5_round_rate(struct clk_hw *hw,
  658. unsigned long rate,
  659. unsigned long *parent_rate)
  660. {
  661. return rate;
  662. }
  663. static int rzg2l_cpg_sipll5_set_rate(struct clk_hw *hw,
  664. unsigned long rate,
  665. unsigned long parent_rate)
  666. {
  667. struct sipll5 *sipll5 = to_sipll5(hw);
  668. struct rzg2l_cpg_priv *priv = sipll5->priv;
  669. struct rzg2l_pll5_param params;
  670. unsigned long vclk_rate;
  671. int ret;
  672. u32 val;
  673. /*
  674. * OSC --> PLL5 --> FOUTPOSTDIV-->|
  675. * | | -->MUX -->DIV_DSIA_B -->M3 -->VCLK
  676. * |--FOUT1PH0-->|
  677. *
  678. * Based on the dot clock, the DSI divider clock calculates the parent
  679. * rate and the pll5 parameters for generating FOUTPOSTDIV. It propagates
  680. * that info to sipll5 which sets parameters for generating FOUTPOSTDIV.
  681. *
  682. * OSC --> PLL5 --> FOUTPOSTDIV
  683. */
  684. if (!rate)
  685. return -EINVAL;
  686. vclk_rate = rzg2l_cpg_get_vclk_rate(hw, rate);
  687. sipll5->foutpostdiv_rate =
  688. rzg2l_cpg_get_foutpostdiv_rate(&params, vclk_rate);
  689. /* Put PLL5 into standby mode */
  690. writel(CPG_SIPLL5_STBY_RESETB_WEN, priv->base + CPG_SIPLL5_STBY);
  691. ret = readl_poll_timeout(priv->base + CPG_SIPLL5_MON, val,
  692. !(val & CPG_SIPLL5_MON_PLL5_LOCK), 100, 250000);
  693. if (ret) {
  694. dev_err(priv->dev, "failed to release pll5 lock");
  695. return ret;
  696. }
  697. /* Output clock setting 1 */
  698. writel((params.pl5_postdiv1 << 0) | (params.pl5_postdiv2 << 4) |
  699. (params.pl5_refdiv << 8), priv->base + CPG_SIPLL5_CLK1);
  700. /* Output clock setting, SSCG modulation value setting 3 */
  701. writel((params.pl5_fracin << 8), priv->base + CPG_SIPLL5_CLK3);
  702. /* Output clock setting 4 */
  703. writel(CPG_SIPLL5_CLK4_RESV_LSB | (params.pl5_intin << 16),
  704. priv->base + CPG_SIPLL5_CLK4);
  705. /* Output clock setting 5 */
  706. writel(params.pl5_spread, priv->base + CPG_SIPLL5_CLK5);
  707. /* PLL normal mode setting */
  708. writel(CPG_SIPLL5_STBY_DOWNSPREAD_WEN | CPG_SIPLL5_STBY_SSCG_EN_WEN |
  709. CPG_SIPLL5_STBY_RESETB_WEN | CPG_SIPLL5_STBY_RESETB,
  710. priv->base + CPG_SIPLL5_STBY);
  711. /* PLL normal mode transition, output clock stability check */
  712. ret = readl_poll_timeout(priv->base + CPG_SIPLL5_MON, val,
  713. (val & CPG_SIPLL5_MON_PLL5_LOCK), 100, 250000);
  714. if (ret) {
  715. dev_err(priv->dev, "failed to lock pll5");
  716. return ret;
  717. }
  718. return 0;
  719. }
  720. static const struct clk_ops rzg2l_cpg_sipll5_ops = {
  721. .recalc_rate = rzg2l_cpg_sipll5_recalc_rate,
  722. .round_rate = rzg2l_cpg_sipll5_round_rate,
  723. .set_rate = rzg2l_cpg_sipll5_set_rate,
  724. };
  725. static struct clk * __init
  726. rzg2l_cpg_sipll5_register(const struct cpg_core_clk *core,
  727. struct rzg2l_cpg_priv *priv)
  728. {
  729. const struct clk *parent;
  730. struct clk_init_data init;
  731. const char *parent_name;
  732. struct sipll5 *sipll5;
  733. struct clk_hw *clk_hw;
  734. int ret;
  735. parent = priv->clks[core->parent];
  736. if (IS_ERR(parent))
  737. return ERR_CAST(parent);
  738. sipll5 = devm_kzalloc(priv->dev, sizeof(*sipll5), GFP_KERNEL);
  739. if (!sipll5)
  740. return ERR_PTR(-ENOMEM);
  741. init.name = core->name;
  742. parent_name = __clk_get_name(parent);
  743. init.ops = &rzg2l_cpg_sipll5_ops;
  744. init.flags = 0;
  745. init.parent_names = &parent_name;
  746. init.num_parents = 1;
  747. sipll5->hw.init = &init;
  748. sipll5->conf = core->conf;
  749. sipll5->priv = priv;
  750. writel(CPG_SIPLL5_STBY_SSCG_EN_WEN | CPG_SIPLL5_STBY_RESETB_WEN |
  751. CPG_SIPLL5_STBY_RESETB, priv->base + CPG_SIPLL5_STBY);
  752. clk_hw = &sipll5->hw;
  753. clk_hw->init = &init;
  754. ret = devm_clk_hw_register(priv->dev, clk_hw);
  755. if (ret)
  756. return ERR_PTR(ret);
  757. priv->mux_dsi_div_params.clksrc = 1; /* Use clk src 1 for DSI */
  758. priv->mux_dsi_div_params.dsi_div_a = 1; /* Divided by 2 */
  759. priv->mux_dsi_div_params.dsi_div_b = 2; /* Divided by 3 */
  760. return clk_hw->clk;
  761. }
  762. struct pll_clk {
  763. struct clk_hw hw;
  764. unsigned int conf;
  765. unsigned int type;
  766. void __iomem *base;
  767. struct rzg2l_cpg_priv *priv;
  768. };
  769. #define to_pll(_hw) container_of(_hw, struct pll_clk, hw)
  770. static unsigned long rzg2l_cpg_pll_clk_recalc_rate(struct clk_hw *hw,
  771. unsigned long parent_rate)
  772. {
  773. struct pll_clk *pll_clk = to_pll(hw);
  774. struct rzg2l_cpg_priv *priv = pll_clk->priv;
  775. unsigned int val1, val2;
  776. u64 rate;
  777. if (pll_clk->type != CLK_TYPE_SAM_PLL)
  778. return parent_rate;
  779. val1 = readl(priv->base + GET_REG_SAMPLL_CLK1(pll_clk->conf));
  780. val2 = readl(priv->base + GET_REG_SAMPLL_CLK2(pll_clk->conf));
  781. rate = mul_u64_u32_shr(parent_rate, (MDIV(val1) << 16) + KDIV(val1),
  782. 16 + SDIV(val2));
  783. return DIV_ROUND_CLOSEST_ULL(rate, PDIV(val1));
  784. }
  785. static const struct clk_ops rzg2l_cpg_pll_ops = {
  786. .recalc_rate = rzg2l_cpg_pll_clk_recalc_rate,
  787. };
  788. static unsigned long rzg3s_cpg_pll_clk_recalc_rate(struct clk_hw *hw,
  789. unsigned long parent_rate)
  790. {
  791. struct pll_clk *pll_clk = to_pll(hw);
  792. struct rzg2l_cpg_priv *priv = pll_clk->priv;
  793. u32 nir, nfr, mr, pr, val;
  794. u64 rate;
  795. if (pll_clk->type != CLK_TYPE_G3S_PLL)
  796. return parent_rate;
  797. val = readl(priv->base + GET_REG_SAMPLL_CLK1(pll_clk->conf));
  798. pr = 1 << FIELD_GET(RZG3S_DIV_P, val);
  799. /* Hardware interprets values higher than 8 as p = 16. */
  800. if (pr > 8)
  801. pr = 16;
  802. mr = FIELD_GET(RZG3S_DIV_M, val) + 1;
  803. nir = FIELD_GET(RZG3S_DIV_NI, val) + 1;
  804. nfr = FIELD_GET(RZG3S_DIV_NF, val);
  805. rate = mul_u64_u32_shr(parent_rate, 4096 * nir + nfr, 12);
  806. return DIV_ROUND_CLOSEST_ULL(rate, (mr * pr));
  807. }
  808. static const struct clk_ops rzg3s_cpg_pll_ops = {
  809. .recalc_rate = rzg3s_cpg_pll_clk_recalc_rate,
  810. };
  811. static struct clk * __init
  812. rzg2l_cpg_pll_clk_register(const struct cpg_core_clk *core,
  813. struct rzg2l_cpg_priv *priv,
  814. const struct clk_ops *ops)
  815. {
  816. struct device *dev = priv->dev;
  817. const struct clk *parent;
  818. struct clk_init_data init;
  819. const char *parent_name;
  820. struct pll_clk *pll_clk;
  821. int ret;
  822. parent = priv->clks[core->parent];
  823. if (IS_ERR(parent))
  824. return ERR_CAST(parent);
  825. pll_clk = devm_kzalloc(dev, sizeof(*pll_clk), GFP_KERNEL);
  826. if (!pll_clk)
  827. return ERR_PTR(-ENOMEM);
  828. parent_name = __clk_get_name(parent);
  829. init.name = core->name;
  830. init.ops = ops;
  831. init.flags = 0;
  832. init.parent_names = &parent_name;
  833. init.num_parents = 1;
  834. pll_clk->hw.init = &init;
  835. pll_clk->conf = core->conf;
  836. pll_clk->base = priv->base;
  837. pll_clk->priv = priv;
  838. pll_clk->type = core->type;
  839. ret = devm_clk_hw_register(dev, &pll_clk->hw);
  840. if (ret)
  841. return ERR_PTR(ret);
  842. return pll_clk->hw.clk;
  843. }
  844. static struct clk
  845. *rzg2l_cpg_clk_src_twocell_get(struct of_phandle_args *clkspec,
  846. void *data)
  847. {
  848. unsigned int clkidx = clkspec->args[1];
  849. struct rzg2l_cpg_priv *priv = data;
  850. struct device *dev = priv->dev;
  851. const char *type;
  852. struct clk *clk;
  853. switch (clkspec->args[0]) {
  854. case CPG_CORE:
  855. type = "core";
  856. if (clkidx > priv->last_dt_core_clk) {
  857. dev_err(dev, "Invalid %s clock index %u\n", type, clkidx);
  858. return ERR_PTR(-EINVAL);
  859. }
  860. clk = priv->clks[clkidx];
  861. break;
  862. case CPG_MOD:
  863. type = "module";
  864. if (clkidx >= priv->num_mod_clks) {
  865. dev_err(dev, "Invalid %s clock index %u\n", type,
  866. clkidx);
  867. return ERR_PTR(-EINVAL);
  868. }
  869. clk = priv->clks[priv->num_core_clks + clkidx];
  870. break;
  871. default:
  872. dev_err(dev, "Invalid CPG clock type %u\n", clkspec->args[0]);
  873. return ERR_PTR(-EINVAL);
  874. }
  875. if (IS_ERR(clk))
  876. dev_err(dev, "Cannot get %s clock %u: %ld", type, clkidx,
  877. PTR_ERR(clk));
  878. else
  879. dev_dbg(dev, "clock (%u, %u) is %pC at %lu Hz\n",
  880. clkspec->args[0], clkspec->args[1], clk,
  881. clk_get_rate(clk));
  882. return clk;
  883. }
  884. static void __init
  885. rzg2l_cpg_register_core_clk(const struct cpg_core_clk *core,
  886. const struct rzg2l_cpg_info *info,
  887. struct rzg2l_cpg_priv *priv)
  888. {
  889. struct clk *clk = ERR_PTR(-EOPNOTSUPP), *parent;
  890. struct device *dev = priv->dev;
  891. unsigned int id = core->id, div = core->div;
  892. const char *parent_name;
  893. struct clk_hw *clk_hw;
  894. WARN_DEBUG(id >= priv->num_core_clks);
  895. WARN_DEBUG(PTR_ERR(priv->clks[id]) != -ENOENT);
  896. if (!core->name) {
  897. /* Skip NULLified clock */
  898. return;
  899. }
  900. switch (core->type) {
  901. case CLK_TYPE_IN:
  902. clk = of_clk_get_by_name(priv->dev->of_node, core->name);
  903. break;
  904. case CLK_TYPE_FF:
  905. WARN_DEBUG(core->parent >= priv->num_core_clks);
  906. parent = priv->clks[core->parent];
  907. if (IS_ERR(parent)) {
  908. clk = parent;
  909. goto fail;
  910. }
  911. parent_name = __clk_get_name(parent);
  912. clk_hw = devm_clk_hw_register_fixed_factor(dev, core->name, parent_name,
  913. CLK_SET_RATE_PARENT,
  914. core->mult, div);
  915. if (IS_ERR(clk_hw))
  916. clk = ERR_CAST(clk_hw);
  917. else
  918. clk = clk_hw->clk;
  919. break;
  920. case CLK_TYPE_SAM_PLL:
  921. clk = rzg2l_cpg_pll_clk_register(core, priv, &rzg2l_cpg_pll_ops);
  922. break;
  923. case CLK_TYPE_G3S_PLL:
  924. clk = rzg2l_cpg_pll_clk_register(core, priv, &rzg3s_cpg_pll_ops);
  925. break;
  926. case CLK_TYPE_SIPLL5:
  927. clk = rzg2l_cpg_sipll5_register(core, priv);
  928. break;
  929. case CLK_TYPE_DIV:
  930. clk = rzg2l_cpg_div_clk_register(core, priv);
  931. break;
  932. case CLK_TYPE_G3S_DIV:
  933. clk = rzg3s_cpg_div_clk_register(core, priv);
  934. break;
  935. case CLK_TYPE_MUX:
  936. clk = rzg2l_cpg_mux_clk_register(core, priv);
  937. break;
  938. case CLK_TYPE_SD_MUX:
  939. clk = rzg2l_cpg_sd_mux_clk_register(core, priv);
  940. break;
  941. case CLK_TYPE_PLL5_4_MUX:
  942. clk = rzg2l_cpg_pll5_4_mux_clk_register(core, priv);
  943. break;
  944. case CLK_TYPE_DSI_DIV:
  945. clk = rzg2l_cpg_dsi_div_clk_register(core, priv);
  946. break;
  947. default:
  948. goto fail;
  949. }
  950. if (IS_ERR_OR_NULL(clk))
  951. goto fail;
  952. dev_dbg(dev, "Core clock %pC at %lu Hz\n", clk, clk_get_rate(clk));
  953. priv->clks[id] = clk;
  954. return;
  955. fail:
  956. dev_err(dev, "Failed to register %s clock %s: %ld\n", "core",
  957. core->name, PTR_ERR(clk));
  958. }
  959. /**
  960. * struct mstp_clock - MSTP gating clock
  961. *
  962. * @hw: handle between common and hardware-specific interfaces
  963. * @off: register offset
  964. * @bit: ON/MON bit
  965. * @enabled: soft state of the clock, if it is coupled with another clock
  966. * @priv: CPG/MSTP private data
  967. * @sibling: pointer to the other coupled clock
  968. */
  969. struct mstp_clock {
  970. struct clk_hw hw;
  971. u16 off;
  972. u8 bit;
  973. bool enabled;
  974. struct rzg2l_cpg_priv *priv;
  975. struct mstp_clock *sibling;
  976. };
  977. #define to_mod_clock(_hw) container_of(_hw, struct mstp_clock, hw)
  978. static int rzg2l_mod_clock_endisable(struct clk_hw *hw, bool enable)
  979. {
  980. struct mstp_clock *clock = to_mod_clock(hw);
  981. struct rzg2l_cpg_priv *priv = clock->priv;
  982. unsigned int reg = clock->off;
  983. struct device *dev = priv->dev;
  984. u32 bitmask = BIT(clock->bit);
  985. u32 value;
  986. int error;
  987. if (!clock->off) {
  988. dev_dbg(dev, "%pC does not support ON/OFF\n", hw->clk);
  989. return 0;
  990. }
  991. dev_dbg(dev, "CLK_ON 0x%x/%pC %s\n", CLK_ON_R(reg), hw->clk,
  992. enable ? "ON" : "OFF");
  993. value = bitmask << 16;
  994. if (enable)
  995. value |= bitmask;
  996. writel(value, priv->base + CLK_ON_R(reg));
  997. if (!enable)
  998. return 0;
  999. if (!priv->info->has_clk_mon_regs)
  1000. return 0;
  1001. error = readl_poll_timeout_atomic(priv->base + CLK_MON_R(reg), value,
  1002. value & bitmask, 0, 10);
  1003. if (error)
  1004. dev_err(dev, "Failed to enable CLK_ON %p\n",
  1005. priv->base + CLK_ON_R(reg));
  1006. return error;
  1007. }
  1008. static int rzg2l_mod_clock_enable(struct clk_hw *hw)
  1009. {
  1010. struct mstp_clock *clock = to_mod_clock(hw);
  1011. if (clock->sibling) {
  1012. struct rzg2l_cpg_priv *priv = clock->priv;
  1013. unsigned long flags;
  1014. bool enabled;
  1015. spin_lock_irqsave(&priv->rmw_lock, flags);
  1016. enabled = clock->sibling->enabled;
  1017. clock->enabled = true;
  1018. spin_unlock_irqrestore(&priv->rmw_lock, flags);
  1019. if (enabled)
  1020. return 0;
  1021. }
  1022. return rzg2l_mod_clock_endisable(hw, true);
  1023. }
  1024. static void rzg2l_mod_clock_disable(struct clk_hw *hw)
  1025. {
  1026. struct mstp_clock *clock = to_mod_clock(hw);
  1027. if (clock->sibling) {
  1028. struct rzg2l_cpg_priv *priv = clock->priv;
  1029. unsigned long flags;
  1030. bool enabled;
  1031. spin_lock_irqsave(&priv->rmw_lock, flags);
  1032. enabled = clock->sibling->enabled;
  1033. clock->enabled = false;
  1034. spin_unlock_irqrestore(&priv->rmw_lock, flags);
  1035. if (enabled)
  1036. return;
  1037. }
  1038. rzg2l_mod_clock_endisable(hw, false);
  1039. }
  1040. static int rzg2l_mod_clock_is_enabled(struct clk_hw *hw)
  1041. {
  1042. struct mstp_clock *clock = to_mod_clock(hw);
  1043. struct rzg2l_cpg_priv *priv = clock->priv;
  1044. u32 bitmask = BIT(clock->bit);
  1045. u32 value;
  1046. if (!clock->off) {
  1047. dev_dbg(priv->dev, "%pC does not support ON/OFF\n", hw->clk);
  1048. return 1;
  1049. }
  1050. if (clock->sibling)
  1051. return clock->enabled;
  1052. if (priv->info->has_clk_mon_regs)
  1053. value = readl(priv->base + CLK_MON_R(clock->off));
  1054. else
  1055. value = readl(priv->base + clock->off);
  1056. return value & bitmask;
  1057. }
  1058. static const struct clk_ops rzg2l_mod_clock_ops = {
  1059. .enable = rzg2l_mod_clock_enable,
  1060. .disable = rzg2l_mod_clock_disable,
  1061. .is_enabled = rzg2l_mod_clock_is_enabled,
  1062. };
  1063. static struct mstp_clock
  1064. *rzg2l_mod_clock_get_sibling(struct mstp_clock *clock,
  1065. struct rzg2l_cpg_priv *priv)
  1066. {
  1067. struct clk_hw *hw;
  1068. unsigned int i;
  1069. for (i = 0; i < priv->num_mod_clks; i++) {
  1070. struct mstp_clock *clk;
  1071. if (priv->clks[priv->num_core_clks + i] == ERR_PTR(-ENOENT))
  1072. continue;
  1073. hw = __clk_get_hw(priv->clks[priv->num_core_clks + i]);
  1074. clk = to_mod_clock(hw);
  1075. if (clock->off == clk->off && clock->bit == clk->bit)
  1076. return clk;
  1077. }
  1078. return NULL;
  1079. }
  1080. static void __init
  1081. rzg2l_cpg_register_mod_clk(const struct rzg2l_mod_clk *mod,
  1082. const struct rzg2l_cpg_info *info,
  1083. struct rzg2l_cpg_priv *priv)
  1084. {
  1085. struct mstp_clock *clock = NULL;
  1086. struct device *dev = priv->dev;
  1087. unsigned int id = mod->id;
  1088. struct clk_init_data init;
  1089. struct clk *parent, *clk;
  1090. const char *parent_name;
  1091. unsigned int i;
  1092. int ret;
  1093. WARN_DEBUG(id < priv->num_core_clks);
  1094. WARN_DEBUG(id >= priv->num_core_clks + priv->num_mod_clks);
  1095. WARN_DEBUG(mod->parent >= priv->num_core_clks + priv->num_mod_clks);
  1096. WARN_DEBUG(PTR_ERR(priv->clks[id]) != -ENOENT);
  1097. if (!mod->name) {
  1098. /* Skip NULLified clock */
  1099. return;
  1100. }
  1101. parent = priv->clks[mod->parent];
  1102. if (IS_ERR(parent)) {
  1103. clk = parent;
  1104. goto fail;
  1105. }
  1106. clock = devm_kzalloc(dev, sizeof(*clock), GFP_KERNEL);
  1107. if (!clock) {
  1108. clk = ERR_PTR(-ENOMEM);
  1109. goto fail;
  1110. }
  1111. init.name = mod->name;
  1112. init.ops = &rzg2l_mod_clock_ops;
  1113. init.flags = CLK_SET_RATE_PARENT;
  1114. for (i = 0; i < info->num_crit_mod_clks; i++)
  1115. if (id == info->crit_mod_clks[i]) {
  1116. dev_dbg(dev, "CPG %s setting CLK_IS_CRITICAL\n",
  1117. mod->name);
  1118. init.flags |= CLK_IS_CRITICAL;
  1119. break;
  1120. }
  1121. parent_name = __clk_get_name(parent);
  1122. init.parent_names = &parent_name;
  1123. init.num_parents = 1;
  1124. clock->off = mod->off;
  1125. clock->bit = mod->bit;
  1126. clock->priv = priv;
  1127. clock->hw.init = &init;
  1128. ret = devm_clk_hw_register(dev, &clock->hw);
  1129. if (ret) {
  1130. clk = ERR_PTR(ret);
  1131. goto fail;
  1132. }
  1133. clk = clock->hw.clk;
  1134. dev_dbg(dev, "Module clock %pC at %lu Hz\n", clk, clk_get_rate(clk));
  1135. priv->clks[id] = clk;
  1136. if (mod->is_coupled) {
  1137. struct mstp_clock *sibling;
  1138. clock->enabled = rzg2l_mod_clock_is_enabled(&clock->hw);
  1139. sibling = rzg2l_mod_clock_get_sibling(clock, priv);
  1140. if (sibling) {
  1141. clock->sibling = sibling;
  1142. sibling->sibling = clock;
  1143. }
  1144. }
  1145. return;
  1146. fail:
  1147. dev_err(dev, "Failed to register %s clock %s: %ld\n", "module",
  1148. mod->name, PTR_ERR(clk));
  1149. }
  1150. #define rcdev_to_priv(x) container_of(x, struct rzg2l_cpg_priv, rcdev)
  1151. static int rzg2l_cpg_assert(struct reset_controller_dev *rcdev,
  1152. unsigned long id)
  1153. {
  1154. struct rzg2l_cpg_priv *priv = rcdev_to_priv(rcdev);
  1155. const struct rzg2l_cpg_info *info = priv->info;
  1156. unsigned int reg = info->resets[id].off;
  1157. u32 mask = BIT(info->resets[id].bit);
  1158. s8 monbit = info->resets[id].monbit;
  1159. u32 value = mask << 16;
  1160. dev_dbg(rcdev->dev, "assert id:%ld offset:0x%x\n", id, CLK_RST_R(reg));
  1161. writel(value, priv->base + CLK_RST_R(reg));
  1162. if (info->has_clk_mon_regs) {
  1163. reg = CLK_MRST_R(reg);
  1164. } else if (monbit >= 0) {
  1165. reg = CPG_RST_MON;
  1166. mask = BIT(monbit);
  1167. } else {
  1168. /* Wait for at least one cycle of the RCLK clock (@ ca. 32 kHz) */
  1169. udelay(35);
  1170. return 0;
  1171. }
  1172. return readl_poll_timeout_atomic(priv->base + reg, value,
  1173. value & mask, 10, 200);
  1174. }
  1175. static int rzg2l_cpg_deassert(struct reset_controller_dev *rcdev,
  1176. unsigned long id)
  1177. {
  1178. struct rzg2l_cpg_priv *priv = rcdev_to_priv(rcdev);
  1179. const struct rzg2l_cpg_info *info = priv->info;
  1180. unsigned int reg = info->resets[id].off;
  1181. u32 mask = BIT(info->resets[id].bit);
  1182. s8 monbit = info->resets[id].monbit;
  1183. u32 value = (mask << 16) | mask;
  1184. dev_dbg(rcdev->dev, "deassert id:%ld offset:0x%x\n", id,
  1185. CLK_RST_R(reg));
  1186. writel(value, priv->base + CLK_RST_R(reg));
  1187. if (info->has_clk_mon_regs) {
  1188. reg = CLK_MRST_R(reg);
  1189. } else if (monbit >= 0) {
  1190. reg = CPG_RST_MON;
  1191. mask = BIT(monbit);
  1192. } else {
  1193. /* Wait for at least one cycle of the RCLK clock (@ ca. 32 kHz) */
  1194. udelay(35);
  1195. return 0;
  1196. }
  1197. return readl_poll_timeout_atomic(priv->base + reg, value,
  1198. !(value & mask), 10, 200);
  1199. }
  1200. static int rzg2l_cpg_reset(struct reset_controller_dev *rcdev,
  1201. unsigned long id)
  1202. {
  1203. int ret;
  1204. ret = rzg2l_cpg_assert(rcdev, id);
  1205. if (ret)
  1206. return ret;
  1207. return rzg2l_cpg_deassert(rcdev, id);
  1208. }
  1209. static int rzg2l_cpg_status(struct reset_controller_dev *rcdev,
  1210. unsigned long id)
  1211. {
  1212. struct rzg2l_cpg_priv *priv = rcdev_to_priv(rcdev);
  1213. const struct rzg2l_cpg_info *info = priv->info;
  1214. s8 monbit = info->resets[id].monbit;
  1215. unsigned int reg;
  1216. u32 bitmask;
  1217. if (info->has_clk_mon_regs) {
  1218. reg = CLK_MRST_R(info->resets[id].off);
  1219. bitmask = BIT(info->resets[id].bit);
  1220. } else if (monbit >= 0) {
  1221. reg = CPG_RST_MON;
  1222. bitmask = BIT(monbit);
  1223. } else {
  1224. return -ENOTSUPP;
  1225. }
  1226. return !!(readl(priv->base + reg) & bitmask);
  1227. }
  1228. static const struct reset_control_ops rzg2l_cpg_reset_ops = {
  1229. .reset = rzg2l_cpg_reset,
  1230. .assert = rzg2l_cpg_assert,
  1231. .deassert = rzg2l_cpg_deassert,
  1232. .status = rzg2l_cpg_status,
  1233. };
  1234. static int rzg2l_cpg_reset_xlate(struct reset_controller_dev *rcdev,
  1235. const struct of_phandle_args *reset_spec)
  1236. {
  1237. struct rzg2l_cpg_priv *priv = rcdev_to_priv(rcdev);
  1238. const struct rzg2l_cpg_info *info = priv->info;
  1239. unsigned int id = reset_spec->args[0];
  1240. if (id >= rcdev->nr_resets || !info->resets[id].off) {
  1241. dev_err(rcdev->dev, "Invalid reset index %u\n", id);
  1242. return -EINVAL;
  1243. }
  1244. return id;
  1245. }
  1246. static int rzg2l_cpg_reset_controller_register(struct rzg2l_cpg_priv *priv)
  1247. {
  1248. priv->rcdev.ops = &rzg2l_cpg_reset_ops;
  1249. priv->rcdev.of_node = priv->dev->of_node;
  1250. priv->rcdev.dev = priv->dev;
  1251. priv->rcdev.of_reset_n_cells = 1;
  1252. priv->rcdev.of_xlate = rzg2l_cpg_reset_xlate;
  1253. priv->rcdev.nr_resets = priv->num_resets;
  1254. return devm_reset_controller_register(priv->dev, &priv->rcdev);
  1255. }
  1256. static bool rzg2l_cpg_is_pm_clk(struct rzg2l_cpg_priv *priv,
  1257. const struct of_phandle_args *clkspec)
  1258. {
  1259. const struct rzg2l_cpg_info *info = priv->info;
  1260. unsigned int id;
  1261. unsigned int i;
  1262. if (clkspec->args_count != 2)
  1263. return false;
  1264. if (clkspec->args[0] != CPG_MOD)
  1265. return false;
  1266. id = clkspec->args[1] + info->num_total_core_clks;
  1267. for (i = 0; i < info->num_no_pm_mod_clks; i++) {
  1268. if (info->no_pm_mod_clks[i] == id)
  1269. return false;
  1270. }
  1271. return true;
  1272. }
  1273. /**
  1274. * struct rzg2l_cpg_pm_domains - RZ/G2L PM domains data structure
  1275. * @onecell_data: cell data
  1276. * @domains: generic PM domains
  1277. */
  1278. struct rzg2l_cpg_pm_domains {
  1279. struct genpd_onecell_data onecell_data;
  1280. struct generic_pm_domain *domains[];
  1281. };
  1282. /**
  1283. * struct rzg2l_cpg_pd - RZ/G2L power domain data structure
  1284. * @genpd: generic PM domain
  1285. * @priv: pointer to CPG private data structure
  1286. * @conf: CPG PM domain configuration info
  1287. * @id: RZ/G2L power domain ID
  1288. */
  1289. struct rzg2l_cpg_pd {
  1290. struct generic_pm_domain genpd;
  1291. struct rzg2l_cpg_priv *priv;
  1292. struct rzg2l_cpg_pm_domain_conf conf;
  1293. u16 id;
  1294. };
  1295. static int rzg2l_cpg_attach_dev(struct generic_pm_domain *domain, struct device *dev)
  1296. {
  1297. struct rzg2l_cpg_pd *pd = container_of(domain, struct rzg2l_cpg_pd, genpd);
  1298. struct rzg2l_cpg_priv *priv = pd->priv;
  1299. struct device_node *np = dev->of_node;
  1300. struct of_phandle_args clkspec;
  1301. bool once = true;
  1302. struct clk *clk;
  1303. int error;
  1304. int i = 0;
  1305. while (!of_parse_phandle_with_args(np, "clocks", "#clock-cells", i,
  1306. &clkspec)) {
  1307. if (rzg2l_cpg_is_pm_clk(priv, &clkspec)) {
  1308. if (once) {
  1309. once = false;
  1310. error = pm_clk_create(dev);
  1311. if (error) {
  1312. of_node_put(clkspec.np);
  1313. goto err;
  1314. }
  1315. }
  1316. clk = of_clk_get_from_provider(&clkspec);
  1317. of_node_put(clkspec.np);
  1318. if (IS_ERR(clk)) {
  1319. error = PTR_ERR(clk);
  1320. goto fail_destroy;
  1321. }
  1322. error = pm_clk_add_clk(dev, clk);
  1323. if (error) {
  1324. dev_err(dev, "pm_clk_add_clk failed %d\n",
  1325. error);
  1326. goto fail_put;
  1327. }
  1328. } else {
  1329. of_node_put(clkspec.np);
  1330. }
  1331. i++;
  1332. }
  1333. return 0;
  1334. fail_put:
  1335. clk_put(clk);
  1336. fail_destroy:
  1337. pm_clk_destroy(dev);
  1338. err:
  1339. return error;
  1340. }
  1341. static void rzg2l_cpg_detach_dev(struct generic_pm_domain *unused, struct device *dev)
  1342. {
  1343. if (!pm_clk_no_clocks(dev))
  1344. pm_clk_destroy(dev);
  1345. }
  1346. static void rzg2l_cpg_genpd_remove(void *data)
  1347. {
  1348. struct genpd_onecell_data *celldata = data;
  1349. for (unsigned int i = 0; i < celldata->num_domains; i++)
  1350. pm_genpd_remove(celldata->domains[i]);
  1351. }
  1352. static void rzg2l_cpg_genpd_remove_simple(void *data)
  1353. {
  1354. pm_genpd_remove(data);
  1355. }
  1356. static int rzg2l_cpg_power_on(struct generic_pm_domain *domain)
  1357. {
  1358. struct rzg2l_cpg_pd *pd = container_of(domain, struct rzg2l_cpg_pd, genpd);
  1359. struct rzg2l_cpg_reg_conf mstop = pd->conf.mstop;
  1360. struct rzg2l_cpg_priv *priv = pd->priv;
  1361. /* Set MSTOP. */
  1362. if (mstop.mask)
  1363. writel(mstop.mask << 16, priv->base + mstop.off);
  1364. return 0;
  1365. }
  1366. static int rzg2l_cpg_power_off(struct generic_pm_domain *domain)
  1367. {
  1368. struct rzg2l_cpg_pd *pd = container_of(domain, struct rzg2l_cpg_pd, genpd);
  1369. struct rzg2l_cpg_reg_conf mstop = pd->conf.mstop;
  1370. struct rzg2l_cpg_priv *priv = pd->priv;
  1371. /* Set MSTOP. */
  1372. if (mstop.mask)
  1373. writel(mstop.mask | (mstop.mask << 16), priv->base + mstop.off);
  1374. return 0;
  1375. }
  1376. static int __init rzg2l_cpg_pd_setup(struct rzg2l_cpg_pd *pd, bool always_on)
  1377. {
  1378. struct dev_power_governor *governor;
  1379. pd->genpd.flags |= GENPD_FLAG_PM_CLK | GENPD_FLAG_ACTIVE_WAKEUP;
  1380. pd->genpd.attach_dev = rzg2l_cpg_attach_dev;
  1381. pd->genpd.detach_dev = rzg2l_cpg_detach_dev;
  1382. if (always_on) {
  1383. pd->genpd.flags |= GENPD_FLAG_ALWAYS_ON;
  1384. governor = &pm_domain_always_on_gov;
  1385. } else {
  1386. pd->genpd.power_on = rzg2l_cpg_power_on;
  1387. pd->genpd.power_off = rzg2l_cpg_power_off;
  1388. governor = &simple_qos_governor;
  1389. }
  1390. return pm_genpd_init(&pd->genpd, governor, !always_on);
  1391. }
  1392. static int __init rzg2l_cpg_add_clk_domain(struct rzg2l_cpg_priv *priv)
  1393. {
  1394. struct device *dev = priv->dev;
  1395. struct device_node *np = dev->of_node;
  1396. struct rzg2l_cpg_pd *pd;
  1397. int ret;
  1398. pd = devm_kzalloc(dev, sizeof(*pd), GFP_KERNEL);
  1399. if (!pd)
  1400. return -ENOMEM;
  1401. pd->genpd.name = np->name;
  1402. pd->priv = priv;
  1403. ret = rzg2l_cpg_pd_setup(pd, true);
  1404. if (ret)
  1405. return ret;
  1406. ret = devm_add_action_or_reset(dev, rzg2l_cpg_genpd_remove_simple, &pd->genpd);
  1407. if (ret)
  1408. return ret;
  1409. return of_genpd_add_provider_simple(np, &pd->genpd);
  1410. }
  1411. static struct generic_pm_domain *
  1412. rzg2l_cpg_pm_domain_xlate(const struct of_phandle_args *spec, void *data)
  1413. {
  1414. struct generic_pm_domain *domain = ERR_PTR(-ENOENT);
  1415. struct genpd_onecell_data *genpd = data;
  1416. if (spec->args_count != 1)
  1417. return ERR_PTR(-EINVAL);
  1418. for (unsigned int i = 0; i < genpd->num_domains; i++) {
  1419. struct rzg2l_cpg_pd *pd = container_of(genpd->domains[i], struct rzg2l_cpg_pd,
  1420. genpd);
  1421. if (pd->id == spec->args[0]) {
  1422. domain = &pd->genpd;
  1423. break;
  1424. }
  1425. }
  1426. return domain;
  1427. }
  1428. static int __init rzg2l_cpg_add_pm_domains(struct rzg2l_cpg_priv *priv)
  1429. {
  1430. const struct rzg2l_cpg_info *info = priv->info;
  1431. struct device *dev = priv->dev;
  1432. struct device_node *np = dev->of_node;
  1433. struct rzg2l_cpg_pm_domains *domains;
  1434. struct generic_pm_domain *parent;
  1435. u32 ncells;
  1436. int ret;
  1437. ret = of_property_read_u32(np, "#power-domain-cells", &ncells);
  1438. if (ret)
  1439. return ret;
  1440. /* For backward compatibility. */
  1441. if (!ncells)
  1442. return rzg2l_cpg_add_clk_domain(priv);
  1443. domains = devm_kzalloc(dev, struct_size(domains, domains, info->num_pm_domains),
  1444. GFP_KERNEL);
  1445. if (!domains)
  1446. return -ENOMEM;
  1447. domains->onecell_data.domains = domains->domains;
  1448. domains->onecell_data.num_domains = info->num_pm_domains;
  1449. domains->onecell_data.xlate = rzg2l_cpg_pm_domain_xlate;
  1450. ret = devm_add_action_or_reset(dev, rzg2l_cpg_genpd_remove, &domains->onecell_data);
  1451. if (ret)
  1452. return ret;
  1453. for (unsigned int i = 0; i < info->num_pm_domains; i++) {
  1454. bool always_on = !!(info->pm_domains[i].flags & RZG2L_PD_F_ALWAYS_ON);
  1455. struct rzg2l_cpg_pd *pd;
  1456. pd = devm_kzalloc(dev, sizeof(*pd), GFP_KERNEL);
  1457. if (!pd)
  1458. return -ENOMEM;
  1459. pd->genpd.name = info->pm_domains[i].name;
  1460. pd->conf = info->pm_domains[i].conf;
  1461. pd->id = info->pm_domains[i].id;
  1462. pd->priv = priv;
  1463. ret = rzg2l_cpg_pd_setup(pd, always_on);
  1464. if (ret)
  1465. return ret;
  1466. if (always_on) {
  1467. ret = rzg2l_cpg_power_on(&pd->genpd);
  1468. if (ret)
  1469. return ret;
  1470. }
  1471. domains->domains[i] = &pd->genpd;
  1472. /* Parent should be on the very first entry of info->pm_domains[]. */
  1473. if (!i) {
  1474. parent = &pd->genpd;
  1475. continue;
  1476. }
  1477. ret = pm_genpd_add_subdomain(parent, &pd->genpd);
  1478. if (ret)
  1479. return ret;
  1480. }
  1481. ret = of_genpd_add_provider_onecell(np, &domains->onecell_data);
  1482. if (ret)
  1483. return ret;
  1484. return 0;
  1485. }
  1486. static int __init rzg2l_cpg_probe(struct platform_device *pdev)
  1487. {
  1488. struct device *dev = &pdev->dev;
  1489. struct device_node *np = dev->of_node;
  1490. const struct rzg2l_cpg_info *info;
  1491. struct rzg2l_cpg_priv *priv;
  1492. unsigned int nclks, i;
  1493. struct clk **clks;
  1494. int error;
  1495. info = of_device_get_match_data(dev);
  1496. priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
  1497. if (!priv)
  1498. return -ENOMEM;
  1499. priv->dev = dev;
  1500. priv->info = info;
  1501. spin_lock_init(&priv->rmw_lock);
  1502. priv->base = devm_platform_ioremap_resource(pdev, 0);
  1503. if (IS_ERR(priv->base))
  1504. return PTR_ERR(priv->base);
  1505. nclks = info->num_total_core_clks + info->num_hw_mod_clks;
  1506. clks = devm_kmalloc_array(dev, nclks, sizeof(*clks), GFP_KERNEL);
  1507. if (!clks)
  1508. return -ENOMEM;
  1509. dev_set_drvdata(dev, priv);
  1510. priv->clks = clks;
  1511. priv->num_core_clks = info->num_total_core_clks;
  1512. priv->num_mod_clks = info->num_hw_mod_clks;
  1513. priv->num_resets = info->num_resets;
  1514. priv->last_dt_core_clk = info->last_dt_core_clk;
  1515. for (i = 0; i < nclks; i++)
  1516. clks[i] = ERR_PTR(-ENOENT);
  1517. for (i = 0; i < info->num_core_clks; i++)
  1518. rzg2l_cpg_register_core_clk(&info->core_clks[i], info, priv);
  1519. for (i = 0; i < info->num_mod_clks; i++)
  1520. rzg2l_cpg_register_mod_clk(&info->mod_clks[i], info, priv);
  1521. error = of_clk_add_provider(np, rzg2l_cpg_clk_src_twocell_get, priv);
  1522. if (error)
  1523. return error;
  1524. error = devm_add_action_or_reset(dev, rzg2l_cpg_del_clk_provider, np);
  1525. if (error)
  1526. return error;
  1527. error = rzg2l_cpg_add_pm_domains(priv);
  1528. if (error)
  1529. return error;
  1530. error = rzg2l_cpg_reset_controller_register(priv);
  1531. if (error)
  1532. return error;
  1533. return 0;
  1534. }
  1535. static const struct of_device_id rzg2l_cpg_match[] = {
  1536. #ifdef CONFIG_CLK_R9A07G043
  1537. {
  1538. .compatible = "renesas,r9a07g043-cpg",
  1539. .data = &r9a07g043_cpg_info,
  1540. },
  1541. #endif
  1542. #ifdef CONFIG_CLK_R9A07G044
  1543. {
  1544. .compatible = "renesas,r9a07g044-cpg",
  1545. .data = &r9a07g044_cpg_info,
  1546. },
  1547. #endif
  1548. #ifdef CONFIG_CLK_R9A07G054
  1549. {
  1550. .compatible = "renesas,r9a07g054-cpg",
  1551. .data = &r9a07g054_cpg_info,
  1552. },
  1553. #endif
  1554. #ifdef CONFIG_CLK_R9A08G045
  1555. {
  1556. .compatible = "renesas,r9a08g045-cpg",
  1557. .data = &r9a08g045_cpg_info,
  1558. },
  1559. #endif
  1560. #ifdef CONFIG_CLK_R9A09G011
  1561. {
  1562. .compatible = "renesas,r9a09g011-cpg",
  1563. .data = &r9a09g011_cpg_info,
  1564. },
  1565. #endif
  1566. { /* sentinel */ }
  1567. };
  1568. static struct platform_driver rzg2l_cpg_driver = {
  1569. .driver = {
  1570. .name = "rzg2l-cpg",
  1571. .of_match_table = rzg2l_cpg_match,
  1572. },
  1573. };
  1574. static int __init rzg2l_cpg_init(void)
  1575. {
  1576. return platform_driver_probe(&rzg2l_cpg_driver, rzg2l_cpg_probe);
  1577. }
  1578. subsys_initcall(rzg2l_cpg_init);
  1579. MODULE_DESCRIPTION("Renesas RZ/G2L CPG Driver");