clk-scu.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * Copyright 2018-2021 NXP
  4. * Dong Aisheng <aisheng.dong@nxp.com>
  5. */
  6. #include <dt-bindings/firmware/imx/rsrc.h>
  7. #include <linux/arm-smccc.h>
  8. #include <linux/bsearch.h>
  9. #include <linux/clk-provider.h>
  10. #include <linux/err.h>
  11. #include <linux/of.h>
  12. #include <linux/firmware/imx/svc/rm.h>
  13. #include <linux/platform_device.h>
  14. #include <linux/pm_domain.h>
  15. #include <linux/pm_runtime.h>
  16. #include <linux/slab.h>
  17. #include <xen/xen.h>
  18. #include "clk-scu.h"
  19. #define IMX_SIP_CPUFREQ 0xC2000001
  20. #define IMX_SIP_SET_CPUFREQ 0x00
  21. static struct imx_sc_ipc *ccm_ipc_handle;
  22. static struct device_node *pd_np;
  23. static struct platform_driver imx_clk_scu_driver;
  24. static const struct imx_clk_scu_rsrc_table *rsrc_table;
  25. struct imx_scu_clk_node {
  26. const char *name;
  27. u32 rsrc;
  28. u8 clk_type;
  29. const char * const *parents;
  30. int num_parents;
  31. struct clk_hw *hw;
  32. struct list_head node;
  33. };
  34. struct list_head imx_scu_clks[IMX_SC_R_LAST];
  35. /*
  36. * struct clk_scu - Description of one SCU clock
  37. * @hw: the common clk_hw
  38. * @rsrc_id: resource ID of this SCU clock
  39. * @clk_type: type of this clock resource
  40. */
  41. struct clk_scu {
  42. struct clk_hw hw;
  43. u16 rsrc_id;
  44. u8 clk_type;
  45. /* for state save&restore */
  46. struct clk_hw *parent;
  47. u8 parent_index;
  48. bool is_enabled;
  49. u32 rate;
  50. };
  51. /*
  52. * struct clk_gpr_scu - Description of one SCU GPR clock
  53. * @hw: the common clk_hw
  54. * @rsrc_id: resource ID of this SCU clock
  55. * @gpr_id: GPR ID index to control the divider
  56. */
  57. struct clk_gpr_scu {
  58. struct clk_hw hw;
  59. u16 rsrc_id;
  60. u8 gpr_id;
  61. u8 flags;
  62. bool gate_invert;
  63. };
  64. #define to_clk_gpr_scu(_hw) container_of(_hw, struct clk_gpr_scu, hw)
  65. /*
  66. * struct imx_sc_msg_req_set_clock_rate - clock set rate protocol
  67. * @hdr: SCU protocol header
  68. * @rate: rate to set
  69. * @resource: clock resource to set rate
  70. * @clk: clk type of this resource
  71. *
  72. * This structure describes the SCU protocol of clock rate set
  73. */
  74. struct imx_sc_msg_req_set_clock_rate {
  75. struct imx_sc_rpc_msg hdr;
  76. __le32 rate;
  77. __le16 resource;
  78. u8 clk;
  79. } __packed __aligned(4);
  80. struct req_get_clock_rate {
  81. __le16 resource;
  82. u8 clk;
  83. } __packed __aligned(4);
  84. struct resp_get_clock_rate {
  85. __le32 rate;
  86. };
  87. /*
  88. * struct imx_sc_msg_get_clock_rate - clock get rate protocol
  89. * @hdr: SCU protocol header
  90. * @req: get rate request protocol
  91. * @resp: get rate response protocol
  92. *
  93. * This structure describes the SCU protocol of clock rate get
  94. */
  95. struct imx_sc_msg_get_clock_rate {
  96. struct imx_sc_rpc_msg hdr;
  97. union {
  98. struct req_get_clock_rate req;
  99. struct resp_get_clock_rate resp;
  100. } data;
  101. };
  102. /*
  103. * struct imx_sc_msg_get_clock_parent - clock get parent protocol
  104. * @hdr: SCU protocol header
  105. * @req: get parent request protocol
  106. * @resp: get parent response protocol
  107. *
  108. * This structure describes the SCU protocol of clock get parent
  109. */
  110. struct imx_sc_msg_get_clock_parent {
  111. struct imx_sc_rpc_msg hdr;
  112. union {
  113. struct req_get_clock_parent {
  114. __le16 resource;
  115. u8 clk;
  116. } __packed __aligned(4) req;
  117. struct resp_get_clock_parent {
  118. u8 parent;
  119. } resp;
  120. } data;
  121. };
  122. /*
  123. * struct imx_sc_msg_set_clock_parent - clock set parent protocol
  124. * @hdr: SCU protocol header
  125. * @req: set parent request protocol
  126. *
  127. * This structure describes the SCU protocol of clock set parent
  128. */
  129. struct imx_sc_msg_set_clock_parent {
  130. struct imx_sc_rpc_msg hdr;
  131. __le16 resource;
  132. u8 clk;
  133. u8 parent;
  134. } __packed;
  135. /*
  136. * struct imx_sc_msg_req_clock_enable - clock gate protocol
  137. * @hdr: SCU protocol header
  138. * @resource: clock resource to gate
  139. * @clk: clk type of this resource
  140. * @enable: whether gate off the clock
  141. * @autog: HW auto gate enable
  142. *
  143. * This structure describes the SCU protocol of clock gate
  144. */
  145. struct imx_sc_msg_req_clock_enable {
  146. struct imx_sc_rpc_msg hdr;
  147. __le16 resource;
  148. u8 clk;
  149. u8 enable;
  150. u8 autog;
  151. } __packed __aligned(4);
  152. static inline struct clk_scu *to_clk_scu(struct clk_hw *hw)
  153. {
  154. return container_of(hw, struct clk_scu, hw);
  155. }
  156. static inline int imx_scu_clk_search_cmp(const void *rsrc, const void *rsrc_p)
  157. {
  158. return *(u32 *)rsrc - *(u32 *)rsrc_p;
  159. }
  160. static bool imx_scu_clk_is_valid(u32 rsrc_id)
  161. {
  162. void *p;
  163. if (!rsrc_table)
  164. return true;
  165. p = bsearch(&rsrc_id, rsrc_table->rsrc, rsrc_table->num,
  166. sizeof(rsrc_table->rsrc[0]), imx_scu_clk_search_cmp);
  167. return p != NULL;
  168. }
  169. int imx_clk_scu_init(struct device_node *np,
  170. const struct imx_clk_scu_rsrc_table *data)
  171. {
  172. u32 clk_cells;
  173. int ret, i;
  174. ret = imx_scu_get_handle(&ccm_ipc_handle);
  175. if (ret)
  176. return ret;
  177. of_property_read_u32(np, "#clock-cells", &clk_cells);
  178. if (clk_cells == 2) {
  179. for (i = 0; i < IMX_SC_R_LAST; i++)
  180. INIT_LIST_HEAD(&imx_scu_clks[i]);
  181. /* pd_np will be used to attach power domains later */
  182. pd_np = of_find_compatible_node(NULL, NULL, "fsl,scu-pd");
  183. if (!pd_np)
  184. return -EINVAL;
  185. rsrc_table = data;
  186. }
  187. return platform_driver_register(&imx_clk_scu_driver);
  188. }
  189. /*
  190. * clk_scu_recalc_rate - Get clock rate for a SCU clock
  191. * @hw: clock to get rate for
  192. * @parent_rate: parent rate provided by common clock framework, not used
  193. *
  194. * Gets the current clock rate of a SCU clock. Returns the current
  195. * clock rate, or zero in failure.
  196. */
  197. static unsigned long clk_scu_recalc_rate(struct clk_hw *hw,
  198. unsigned long parent_rate)
  199. {
  200. struct clk_scu *clk = to_clk_scu(hw);
  201. struct imx_sc_msg_get_clock_rate msg;
  202. struct imx_sc_rpc_msg *hdr = &msg.hdr;
  203. int ret;
  204. hdr->ver = IMX_SC_RPC_VERSION;
  205. hdr->svc = IMX_SC_RPC_SVC_PM;
  206. hdr->func = IMX_SC_PM_FUNC_GET_CLOCK_RATE;
  207. hdr->size = 2;
  208. msg.data.req.resource = cpu_to_le16(clk->rsrc_id);
  209. msg.data.req.clk = clk->clk_type;
  210. ret = imx_scu_call_rpc(ccm_ipc_handle, &msg, true);
  211. if (ret) {
  212. pr_err("%s: failed to get clock rate %d\n",
  213. clk_hw_get_name(hw), ret);
  214. return 0;
  215. }
  216. return le32_to_cpu(msg.data.resp.rate);
  217. }
  218. /*
  219. * clk_scu_determine_rate - Returns the closest rate for a SCU clock
  220. * @hw: clock to round rate for
  221. * @req: clock rate request
  222. *
  223. * Returns 0 on success, a negative error on failure
  224. */
  225. static int clk_scu_determine_rate(struct clk_hw *hw,
  226. struct clk_rate_request *req)
  227. {
  228. /*
  229. * Assume we support all the requested rate and let the SCU firmware
  230. * to handle the left work
  231. */
  232. return 0;
  233. }
  234. /*
  235. * clk_scu_round_rate - Round clock rate for a SCU clock
  236. * @hw: clock to round rate for
  237. * @rate: rate to round
  238. * @parent_rate: parent rate provided by common clock framework, not used
  239. *
  240. * Returns the current clock rate, or zero in failure.
  241. */
  242. static long clk_scu_round_rate(struct clk_hw *hw, unsigned long rate,
  243. unsigned long *parent_rate)
  244. {
  245. /*
  246. * Assume we support all the requested rate and let the SCU firmware
  247. * to handle the left work
  248. */
  249. return rate;
  250. }
  251. static int clk_scu_atf_set_cpu_rate(struct clk_hw *hw, unsigned long rate,
  252. unsigned long parent_rate)
  253. {
  254. struct clk_scu *clk = to_clk_scu(hw);
  255. struct arm_smccc_res res;
  256. unsigned long cluster_id;
  257. if (clk->rsrc_id == IMX_SC_R_A35 || clk->rsrc_id == IMX_SC_R_A53)
  258. cluster_id = 0;
  259. else if (clk->rsrc_id == IMX_SC_R_A72)
  260. cluster_id = 1;
  261. else
  262. return -EINVAL;
  263. /* CPU frequency scaling can ONLY be done by ARM-Trusted-Firmware */
  264. arm_smccc_smc(IMX_SIP_CPUFREQ, IMX_SIP_SET_CPUFREQ,
  265. cluster_id, rate, 0, 0, 0, 0, &res);
  266. return 0;
  267. }
  268. /*
  269. * clk_scu_set_rate - Set rate for a SCU clock
  270. * @hw: clock to change rate for
  271. * @rate: target rate for the clock
  272. * @parent_rate: rate of the clock parent, not used for SCU clocks
  273. *
  274. * Sets a clock frequency for a SCU clock. Returns the SCU
  275. * protocol status.
  276. */
  277. static int clk_scu_set_rate(struct clk_hw *hw, unsigned long rate,
  278. unsigned long parent_rate)
  279. {
  280. struct clk_scu *clk = to_clk_scu(hw);
  281. struct imx_sc_msg_req_set_clock_rate msg;
  282. struct imx_sc_rpc_msg *hdr = &msg.hdr;
  283. hdr->ver = IMX_SC_RPC_VERSION;
  284. hdr->svc = IMX_SC_RPC_SVC_PM;
  285. hdr->func = IMX_SC_PM_FUNC_SET_CLOCK_RATE;
  286. hdr->size = 3;
  287. msg.rate = cpu_to_le32(rate);
  288. msg.resource = cpu_to_le16(clk->rsrc_id);
  289. msg.clk = clk->clk_type;
  290. return imx_scu_call_rpc(ccm_ipc_handle, &msg, true);
  291. }
  292. static u8 clk_scu_get_parent(struct clk_hw *hw)
  293. {
  294. struct clk_scu *clk = to_clk_scu(hw);
  295. struct imx_sc_msg_get_clock_parent msg;
  296. struct imx_sc_rpc_msg *hdr = &msg.hdr;
  297. int ret;
  298. hdr->ver = IMX_SC_RPC_VERSION;
  299. hdr->svc = IMX_SC_RPC_SVC_PM;
  300. hdr->func = IMX_SC_PM_FUNC_GET_CLOCK_PARENT;
  301. hdr->size = 2;
  302. msg.data.req.resource = cpu_to_le16(clk->rsrc_id);
  303. msg.data.req.clk = clk->clk_type;
  304. ret = imx_scu_call_rpc(ccm_ipc_handle, &msg, true);
  305. if (ret) {
  306. pr_err("%s: failed to get clock parent %d\n",
  307. clk_hw_get_name(hw), ret);
  308. return 0;
  309. }
  310. clk->parent_index = msg.data.resp.parent;
  311. return msg.data.resp.parent;
  312. }
  313. static int clk_scu_set_parent(struct clk_hw *hw, u8 index)
  314. {
  315. struct clk_scu *clk = to_clk_scu(hw);
  316. struct imx_sc_msg_set_clock_parent msg;
  317. struct imx_sc_rpc_msg *hdr = &msg.hdr;
  318. int ret;
  319. hdr->ver = IMX_SC_RPC_VERSION;
  320. hdr->svc = IMX_SC_RPC_SVC_PM;
  321. hdr->func = IMX_SC_PM_FUNC_SET_CLOCK_PARENT;
  322. hdr->size = 2;
  323. msg.resource = cpu_to_le16(clk->rsrc_id);
  324. msg.clk = clk->clk_type;
  325. msg.parent = index;
  326. ret = imx_scu_call_rpc(ccm_ipc_handle, &msg, true);
  327. if (ret) {
  328. pr_err("%s: failed to set clock parent %d\n",
  329. clk_hw_get_name(hw), ret);
  330. return ret;
  331. }
  332. clk->parent_index = index;
  333. return 0;
  334. }
  335. static int sc_pm_clock_enable(struct imx_sc_ipc *ipc, u16 resource,
  336. u8 clk, bool enable, bool autog)
  337. {
  338. struct imx_sc_msg_req_clock_enable msg;
  339. struct imx_sc_rpc_msg *hdr = &msg.hdr;
  340. hdr->ver = IMX_SC_RPC_VERSION;
  341. hdr->svc = IMX_SC_RPC_SVC_PM;
  342. hdr->func = IMX_SC_PM_FUNC_CLOCK_ENABLE;
  343. hdr->size = 3;
  344. msg.resource = cpu_to_le16(resource);
  345. msg.clk = clk;
  346. msg.enable = enable;
  347. msg.autog = autog;
  348. return imx_scu_call_rpc(ccm_ipc_handle, &msg, true);
  349. }
  350. /*
  351. * clk_scu_prepare - Enable a SCU clock
  352. * @hw: clock to enable
  353. *
  354. * Enable the clock at the DSC slice level
  355. */
  356. static int clk_scu_prepare(struct clk_hw *hw)
  357. {
  358. struct clk_scu *clk = to_clk_scu(hw);
  359. return sc_pm_clock_enable(ccm_ipc_handle, clk->rsrc_id,
  360. clk->clk_type, true, false);
  361. }
  362. /*
  363. * clk_scu_unprepare - Disable a SCU clock
  364. * @hw: clock to enable
  365. *
  366. * Disable the clock at the DSC slice level
  367. */
  368. static void clk_scu_unprepare(struct clk_hw *hw)
  369. {
  370. struct clk_scu *clk = to_clk_scu(hw);
  371. int ret;
  372. ret = sc_pm_clock_enable(ccm_ipc_handle, clk->rsrc_id,
  373. clk->clk_type, false, false);
  374. if (ret)
  375. pr_warn("%s: clk unprepare failed %d\n", clk_hw_get_name(hw),
  376. ret);
  377. }
  378. static const struct clk_ops clk_scu_ops = {
  379. .recalc_rate = clk_scu_recalc_rate,
  380. .determine_rate = clk_scu_determine_rate,
  381. .set_rate = clk_scu_set_rate,
  382. .get_parent = clk_scu_get_parent,
  383. .set_parent = clk_scu_set_parent,
  384. .prepare = clk_scu_prepare,
  385. .unprepare = clk_scu_unprepare,
  386. };
  387. static const struct clk_ops clk_scu_cpu_ops = {
  388. .recalc_rate = clk_scu_recalc_rate,
  389. .round_rate = clk_scu_round_rate,
  390. .set_rate = clk_scu_atf_set_cpu_rate,
  391. .prepare = clk_scu_prepare,
  392. .unprepare = clk_scu_unprepare,
  393. };
  394. static const struct clk_ops clk_scu_pi_ops = {
  395. .recalc_rate = clk_scu_recalc_rate,
  396. .round_rate = clk_scu_round_rate,
  397. .set_rate = clk_scu_set_rate,
  398. };
  399. struct clk_hw *__imx_clk_scu(struct device *dev, const char *name,
  400. const char * const *parents, int num_parents,
  401. u32 rsrc_id, u8 clk_type)
  402. {
  403. struct clk_init_data init;
  404. struct clk_scu *clk;
  405. struct clk_hw *hw;
  406. int ret;
  407. clk = kzalloc(sizeof(*clk), GFP_KERNEL);
  408. if (!clk)
  409. return ERR_PTR(-ENOMEM);
  410. clk->rsrc_id = rsrc_id;
  411. clk->clk_type = clk_type;
  412. init.name = name;
  413. init.ops = &clk_scu_ops;
  414. if (rsrc_id == IMX_SC_R_A35 || rsrc_id == IMX_SC_R_A53 || rsrc_id == IMX_SC_R_A72)
  415. init.ops = &clk_scu_cpu_ops;
  416. else if (rsrc_id == IMX_SC_R_PI_0_PLL)
  417. init.ops = &clk_scu_pi_ops;
  418. else
  419. init.ops = &clk_scu_ops;
  420. init.parent_names = parents;
  421. init.num_parents = num_parents;
  422. /*
  423. * Note on MX8, the clocks are tightly coupled with power domain
  424. * that once the power domain is off, the clock status may be
  425. * lost. So we make it NOCACHE to let user to retrieve the real
  426. * clock status from HW instead of using the possible invalid
  427. * cached rate.
  428. */
  429. init.flags = CLK_GET_RATE_NOCACHE;
  430. clk->hw.init = &init;
  431. hw = &clk->hw;
  432. ret = clk_hw_register(dev, hw);
  433. if (ret) {
  434. kfree(clk);
  435. hw = ERR_PTR(ret);
  436. return hw;
  437. }
  438. if (dev)
  439. dev_set_drvdata(dev, clk);
  440. return hw;
  441. }
  442. struct clk_hw *imx_scu_of_clk_src_get(struct of_phandle_args *clkspec,
  443. void *data)
  444. {
  445. unsigned int rsrc = clkspec->args[0];
  446. unsigned int idx = clkspec->args[1];
  447. struct list_head *scu_clks = data;
  448. struct imx_scu_clk_node *clk;
  449. list_for_each_entry(clk, &scu_clks[rsrc], node) {
  450. if (clk->clk_type == idx)
  451. return clk->hw;
  452. }
  453. return ERR_PTR(-ENODEV);
  454. }
  455. static int imx_clk_scu_probe(struct platform_device *pdev)
  456. {
  457. struct device *dev = &pdev->dev;
  458. struct imx_scu_clk_node *clk = dev_get_platdata(dev);
  459. struct clk_hw *hw;
  460. int ret;
  461. if (!((clk->rsrc == IMX_SC_R_A35) || (clk->rsrc == IMX_SC_R_A53) ||
  462. (clk->rsrc == IMX_SC_R_A72))) {
  463. pm_runtime_set_suspended(dev);
  464. pm_runtime_set_autosuspend_delay(dev, 50);
  465. pm_runtime_use_autosuspend(&pdev->dev);
  466. pm_runtime_enable(dev);
  467. ret = pm_runtime_resume_and_get(dev);
  468. if (ret) {
  469. pm_genpd_remove_device(dev);
  470. pm_runtime_disable(dev);
  471. return ret;
  472. }
  473. }
  474. hw = __imx_clk_scu(dev, clk->name, clk->parents, clk->num_parents,
  475. clk->rsrc, clk->clk_type);
  476. if (IS_ERR(hw)) {
  477. pm_runtime_disable(dev);
  478. return PTR_ERR(hw);
  479. }
  480. clk->hw = hw;
  481. list_add_tail(&clk->node, &imx_scu_clks[clk->rsrc]);
  482. if (!((clk->rsrc == IMX_SC_R_A35) || (clk->rsrc == IMX_SC_R_A53) ||
  483. (clk->rsrc == IMX_SC_R_A72))) {
  484. pm_runtime_mark_last_busy(&pdev->dev);
  485. pm_runtime_put_autosuspend(&pdev->dev);
  486. }
  487. dev_dbg(dev, "register SCU clock rsrc:%d type:%d\n", clk->rsrc,
  488. clk->clk_type);
  489. return 0;
  490. }
  491. static int __maybe_unused imx_clk_scu_suspend(struct device *dev)
  492. {
  493. struct clk_scu *clk = dev_get_drvdata(dev);
  494. u32 rsrc_id = clk->rsrc_id;
  495. if ((rsrc_id == IMX_SC_R_A35) || (rsrc_id == IMX_SC_R_A53) ||
  496. (rsrc_id == IMX_SC_R_A72))
  497. return 0;
  498. clk->parent = clk_hw_get_parent(&clk->hw);
  499. /* DC SS needs to handle bypass clock using non-cached clock rate */
  500. if (clk->rsrc_id == IMX_SC_R_DC_0_VIDEO0 ||
  501. clk->rsrc_id == IMX_SC_R_DC_0_VIDEO1 ||
  502. clk->rsrc_id == IMX_SC_R_DC_1_VIDEO0 ||
  503. clk->rsrc_id == IMX_SC_R_DC_1_VIDEO1)
  504. clk->rate = clk_scu_recalc_rate(&clk->hw, 0);
  505. else
  506. clk->rate = clk_hw_get_rate(&clk->hw);
  507. clk->is_enabled = clk_hw_is_prepared(&clk->hw);
  508. if (clk->parent)
  509. dev_dbg(dev, "save parent %s idx %u\n", clk_hw_get_name(clk->parent),
  510. clk->parent_index);
  511. if (clk->rate)
  512. dev_dbg(dev, "save rate %d\n", clk->rate);
  513. if (clk->is_enabled)
  514. dev_dbg(dev, "save enabled state\n");
  515. return 0;
  516. }
  517. static int __maybe_unused imx_clk_scu_resume(struct device *dev)
  518. {
  519. struct clk_scu *clk = dev_get_drvdata(dev);
  520. u32 rsrc_id = clk->rsrc_id;
  521. int ret = 0;
  522. if ((rsrc_id == IMX_SC_R_A35) || (rsrc_id == IMX_SC_R_A53) ||
  523. (rsrc_id == IMX_SC_R_A72))
  524. return 0;
  525. if (clk->parent) {
  526. ret = clk_scu_set_parent(&clk->hw, clk->parent_index);
  527. dev_dbg(dev, "restore parent %s idx %u %s\n",
  528. clk_hw_get_name(clk->parent),
  529. clk->parent_index, !ret ? "success" : "failed");
  530. }
  531. if (clk->rate) {
  532. ret = clk_scu_set_rate(&clk->hw, clk->rate, 0);
  533. dev_dbg(dev, "restore rate %d %s\n", clk->rate,
  534. !ret ? "success" : "failed");
  535. }
  536. if (clk->is_enabled && rsrc_id != IMX_SC_R_PI_0_PLL) {
  537. ret = clk_scu_prepare(&clk->hw);
  538. dev_dbg(dev, "restore enabled state %s\n",
  539. !ret ? "success" : "failed");
  540. }
  541. return ret;
  542. }
  543. static const struct dev_pm_ops imx_clk_scu_pm_ops = {
  544. SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(imx_clk_scu_suspend,
  545. imx_clk_scu_resume)
  546. };
  547. static struct platform_driver imx_clk_scu_driver = {
  548. .driver = {
  549. .name = "imx-scu-clk",
  550. .suppress_bind_attrs = true,
  551. .pm = &imx_clk_scu_pm_ops,
  552. },
  553. .probe = imx_clk_scu_probe,
  554. };
  555. static int imx_clk_scu_attach_pd(struct device *dev, u32 rsrc_id)
  556. {
  557. struct of_phandle_args genpdspec = {
  558. .np = pd_np,
  559. .args_count = 1,
  560. .args[0] = rsrc_id,
  561. };
  562. if (rsrc_id == IMX_SC_R_A35 || rsrc_id == IMX_SC_R_A53 ||
  563. rsrc_id == IMX_SC_R_A72)
  564. return 0;
  565. return of_genpd_add_device(&genpdspec, dev);
  566. }
  567. static bool imx_clk_is_resource_owned(u32 rsrc)
  568. {
  569. /*
  570. * A-core resources are special. SCFW reports they are not "owned" by
  571. * current partition but linux can still adjust them for cpufreq.
  572. */
  573. if (rsrc == IMX_SC_R_A53 || rsrc == IMX_SC_R_A72 || rsrc == IMX_SC_R_A35)
  574. return true;
  575. return imx_sc_rm_is_resource_owned(ccm_ipc_handle, rsrc);
  576. }
  577. struct clk_hw *imx_clk_scu_alloc_dev(const char *name,
  578. const char * const *parents,
  579. int num_parents, u32 rsrc_id, u8 clk_type)
  580. {
  581. struct imx_scu_clk_node clk = {
  582. .name = name,
  583. .rsrc = rsrc_id,
  584. .clk_type = clk_type,
  585. .parents = parents,
  586. .num_parents = num_parents,
  587. };
  588. struct platform_device *pdev;
  589. int ret;
  590. if (!imx_scu_clk_is_valid(rsrc_id))
  591. return ERR_PTR(-EINVAL);
  592. if (!imx_clk_is_resource_owned(rsrc_id))
  593. return NULL;
  594. pdev = platform_device_alloc(name, PLATFORM_DEVID_NONE);
  595. if (!pdev) {
  596. pr_err("%s: failed to allocate scu clk dev rsrc %d type %d\n",
  597. name, rsrc_id, clk_type);
  598. return ERR_PTR(-ENOMEM);
  599. }
  600. ret = platform_device_add_data(pdev, &clk, sizeof(clk));
  601. if (ret)
  602. goto put_device;
  603. ret = driver_set_override(&pdev->dev, &pdev->driver_override,
  604. "imx-scu-clk", strlen("imx-scu-clk"));
  605. if (ret)
  606. goto put_device;
  607. ret = imx_clk_scu_attach_pd(&pdev->dev, rsrc_id);
  608. if (ret)
  609. pr_warn("%s: failed to attached the power domain %d\n",
  610. name, ret);
  611. ret = platform_device_add(pdev);
  612. if (ret)
  613. goto put_device;
  614. /* For API backwards compatiblilty, simply return NULL for success */
  615. return NULL;
  616. put_device:
  617. platform_device_put(pdev);
  618. return ERR_PTR(ret);
  619. }
  620. void imx_clk_scu_unregister(void)
  621. {
  622. struct imx_scu_clk_node *clk, *n;
  623. int i;
  624. for (i = 0; i < IMX_SC_R_LAST; i++) {
  625. list_for_each_entry_safe(clk, n, &imx_scu_clks[i], node) {
  626. clk_hw_unregister(clk->hw);
  627. kfree(clk);
  628. }
  629. }
  630. }
  631. static unsigned long clk_gpr_div_scu_recalc_rate(struct clk_hw *hw,
  632. unsigned long parent_rate)
  633. {
  634. struct clk_gpr_scu *clk = to_clk_gpr_scu(hw);
  635. unsigned long rate = 0;
  636. u32 val;
  637. int err;
  638. err = imx_sc_misc_get_control(ccm_ipc_handle, clk->rsrc_id,
  639. clk->gpr_id, &val);
  640. rate = val ? parent_rate / 2 : parent_rate;
  641. return err ? 0 : rate;
  642. }
  643. static long clk_gpr_div_scu_round_rate(struct clk_hw *hw, unsigned long rate,
  644. unsigned long *prate)
  645. {
  646. if (rate < *prate)
  647. rate = *prate / 2;
  648. else
  649. rate = *prate;
  650. return rate;
  651. }
  652. static int clk_gpr_div_scu_set_rate(struct clk_hw *hw, unsigned long rate,
  653. unsigned long parent_rate)
  654. {
  655. struct clk_gpr_scu *clk = to_clk_gpr_scu(hw);
  656. uint32_t val;
  657. int err;
  658. val = (rate < parent_rate) ? 1 : 0;
  659. err = imx_sc_misc_set_control(ccm_ipc_handle, clk->rsrc_id,
  660. clk->gpr_id, val);
  661. return err ? -EINVAL : 0;
  662. }
  663. static const struct clk_ops clk_gpr_div_scu_ops = {
  664. .recalc_rate = clk_gpr_div_scu_recalc_rate,
  665. .round_rate = clk_gpr_div_scu_round_rate,
  666. .set_rate = clk_gpr_div_scu_set_rate,
  667. };
  668. static u8 clk_gpr_mux_scu_get_parent(struct clk_hw *hw)
  669. {
  670. struct clk_gpr_scu *clk = to_clk_gpr_scu(hw);
  671. u32 val = 0;
  672. imx_sc_misc_get_control(ccm_ipc_handle, clk->rsrc_id,
  673. clk->gpr_id, &val);
  674. return (u8)val;
  675. }
  676. static int clk_gpr_mux_scu_set_parent(struct clk_hw *hw, u8 index)
  677. {
  678. struct clk_gpr_scu *clk = to_clk_gpr_scu(hw);
  679. return imx_sc_misc_set_control(ccm_ipc_handle, clk->rsrc_id,
  680. clk->gpr_id, index);
  681. }
  682. static const struct clk_ops clk_gpr_mux_scu_ops = {
  683. .determine_rate = clk_hw_determine_rate_no_reparent,
  684. .get_parent = clk_gpr_mux_scu_get_parent,
  685. .set_parent = clk_gpr_mux_scu_set_parent,
  686. };
  687. static int clk_gpr_gate_scu_prepare(struct clk_hw *hw)
  688. {
  689. struct clk_gpr_scu *clk = to_clk_gpr_scu(hw);
  690. return imx_sc_misc_set_control(ccm_ipc_handle, clk->rsrc_id,
  691. clk->gpr_id, !clk->gate_invert);
  692. }
  693. static void clk_gpr_gate_scu_unprepare(struct clk_hw *hw)
  694. {
  695. struct clk_gpr_scu *clk = to_clk_gpr_scu(hw);
  696. int ret;
  697. ret = imx_sc_misc_set_control(ccm_ipc_handle, clk->rsrc_id,
  698. clk->gpr_id, clk->gate_invert);
  699. if (ret)
  700. pr_err("%s: clk unprepare failed %d\n", clk_hw_get_name(hw),
  701. ret);
  702. }
  703. static int clk_gpr_gate_scu_is_prepared(struct clk_hw *hw)
  704. {
  705. struct clk_gpr_scu *clk = to_clk_gpr_scu(hw);
  706. int ret;
  707. u32 val;
  708. ret = imx_sc_misc_get_control(ccm_ipc_handle, clk->rsrc_id,
  709. clk->gpr_id, &val);
  710. if (ret)
  711. return ret;
  712. return clk->gate_invert ? !val : val;
  713. }
  714. static const struct clk_ops clk_gpr_gate_scu_ops = {
  715. .prepare = clk_gpr_gate_scu_prepare,
  716. .unprepare = clk_gpr_gate_scu_unprepare,
  717. .is_prepared = clk_gpr_gate_scu_is_prepared,
  718. };
  719. struct clk_hw *__imx_clk_gpr_scu(const char *name, const char * const *parent_name,
  720. int num_parents, u32 rsrc_id, u8 gpr_id, u8 flags,
  721. bool invert)
  722. {
  723. struct imx_scu_clk_node *clk_node;
  724. struct clk_gpr_scu *clk;
  725. struct clk_hw *hw;
  726. struct clk_init_data init;
  727. int ret;
  728. if (rsrc_id >= IMX_SC_R_LAST || gpr_id >= IMX_SC_C_LAST)
  729. return ERR_PTR(-EINVAL);
  730. clk_node = kzalloc(sizeof(*clk_node), GFP_KERNEL);
  731. if (!clk_node)
  732. return ERR_PTR(-ENOMEM);
  733. if (!imx_scu_clk_is_valid(rsrc_id)) {
  734. kfree(clk_node);
  735. return ERR_PTR(-EINVAL);
  736. }
  737. if (!imx_clk_is_resource_owned(rsrc_id)) {
  738. kfree(clk_node);
  739. return NULL;
  740. }
  741. clk = kzalloc(sizeof(*clk), GFP_KERNEL);
  742. if (!clk) {
  743. kfree(clk_node);
  744. return ERR_PTR(-ENOMEM);
  745. }
  746. clk->rsrc_id = rsrc_id;
  747. clk->gpr_id = gpr_id;
  748. clk->flags = flags;
  749. clk->gate_invert = invert;
  750. if (flags & IMX_SCU_GPR_CLK_GATE)
  751. init.ops = &clk_gpr_gate_scu_ops;
  752. if (flags & IMX_SCU_GPR_CLK_DIV)
  753. init.ops = &clk_gpr_div_scu_ops;
  754. if (flags & IMX_SCU_GPR_CLK_MUX)
  755. init.ops = &clk_gpr_mux_scu_ops;
  756. init.flags = 0;
  757. init.name = name;
  758. init.parent_names = parent_name;
  759. init.num_parents = num_parents;
  760. clk->hw.init = &init;
  761. hw = &clk->hw;
  762. ret = clk_hw_register(NULL, hw);
  763. if (ret) {
  764. kfree(clk);
  765. kfree(clk_node);
  766. hw = ERR_PTR(ret);
  767. } else {
  768. clk_node->hw = hw;
  769. clk_node->clk_type = gpr_id;
  770. list_add_tail(&clk_node->node, &imx_scu_clks[rsrc_id]);
  771. }
  772. return hw;
  773. }