clk-uclass.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * Copyright (C) 2015 Google, Inc
  4. * Written by Simon Glass <sjg@chromium.org>
  5. * Copyright (c) 2016, NVIDIA CORPORATION.
  6. * Copyright (c) 2018, Theobroma Systems Design und Consulting GmbH
  7. */
  8. #define LOG_CATEGORY UCLASS_CLK
  9. #include <common.h>
  10. #include <clk.h>
  11. #include <clk-uclass.h>
  12. #include <dm.h>
  13. #include <dt-structs.h>
  14. #include <errno.h>
  15. #include <log.h>
  16. #include <malloc.h>
  17. #include <asm/global_data.h>
  18. #include <dm/device_compat.h>
  19. #include <dm/device-internal.h>
  20. #include <dm/devres.h>
  21. #include <dm/read.h>
  22. #include <linux/bug.h>
  23. #include <linux/clk-provider.h>
  24. #include <linux/err.h>
  25. static inline const struct clk_ops *clk_dev_ops(struct udevice *dev)
  26. {
  27. return (const struct clk_ops *)dev->driver->ops;
  28. }
  29. struct clk *dev_get_clk_ptr(struct udevice *dev)
  30. {
  31. return (struct clk *)dev_get_uclass_priv(dev);
  32. }
  33. #if CONFIG_IS_ENABLED(OF_PLATDATA)
  34. int clk_get_by_phandle(struct udevice *dev, const struct phandle_1_arg *cells,
  35. struct clk *clk)
  36. {
  37. int ret;
  38. ret = device_get_by_ofplat_idx(cells->idx, &clk->dev);
  39. if (ret)
  40. return ret;
  41. clk->id = cells->arg[0];
  42. return 0;
  43. }
  44. #endif
  45. #if CONFIG_IS_ENABLED(OF_REAL)
  46. static int clk_of_xlate_default(struct clk *clk,
  47. struct ofnode_phandle_args *args)
  48. {
  49. debug("%s(clk=%p)\n", __func__, clk);
  50. if (args->args_count > 1) {
  51. debug("Invalid args_count: %d\n", args->args_count);
  52. return -EINVAL;
  53. }
  54. if (args->args_count)
  55. clk->id = args->args[0];
  56. else
  57. clk->id = 0;
  58. clk->data = 0;
  59. return 0;
  60. }
  61. static int clk_get_by_index_tail(int ret, ofnode node,
  62. struct ofnode_phandle_args *args,
  63. const char *list_name, int index,
  64. struct clk *clk)
  65. {
  66. struct udevice *dev_clk;
  67. const struct clk_ops *ops;
  68. assert(clk);
  69. clk->dev = NULL;
  70. if (ret)
  71. goto err;
  72. ret = uclass_get_device_by_ofnode(UCLASS_CLK, args->node, &dev_clk);
  73. if (ret) {
  74. debug("%s: uclass_get_device_by_of_offset failed: err=%d\n",
  75. __func__, ret);
  76. return log_msg_ret("get", ret);
  77. }
  78. clk->dev = dev_clk;
  79. ops = clk_dev_ops(dev_clk);
  80. if (ops->of_xlate)
  81. ret = ops->of_xlate(clk, args);
  82. else
  83. ret = clk_of_xlate_default(clk, args);
  84. if (ret) {
  85. debug("of_xlate() failed: %d\n", ret);
  86. return log_msg_ret("xlate", ret);
  87. }
  88. return clk_request(dev_clk, clk);
  89. err:
  90. debug("%s: Node '%s', property '%s', failed to request CLK index %d: %d\n",
  91. __func__, ofnode_get_name(node), list_name, index, ret);
  92. return log_msg_ret("prop", ret);
  93. }
  94. static int clk_get_by_indexed_prop(struct udevice *dev, const char *prop_name,
  95. int index, struct clk *clk)
  96. {
  97. int ret;
  98. struct ofnode_phandle_args args;
  99. debug("%s(dev=%p, index=%d, clk=%p)\n", __func__, dev, index, clk);
  100. assert(clk);
  101. clk->dev = NULL;
  102. ret = dev_read_phandle_with_args(dev, prop_name, "#clock-cells", 0,
  103. index, &args);
  104. if (ret) {
  105. debug("%s: fdtdec_parse_phandle_with_args failed: err=%d\n",
  106. __func__, ret);
  107. return log_ret(ret);
  108. }
  109. return clk_get_by_index_tail(ret, dev_ofnode(dev), &args, "clocks",
  110. index, clk);
  111. }
  112. int clk_get_by_index(struct udevice *dev, int index, struct clk *clk)
  113. {
  114. return clk_get_by_index_nodev(dev_ofnode(dev), index, clk);
  115. }
  116. int clk_get_by_index_nodev(ofnode node, int index, struct clk *clk)
  117. {
  118. struct ofnode_phandle_args args;
  119. int ret;
  120. ret = ofnode_parse_phandle_with_args(node, "clocks", "#clock-cells", 0,
  121. index, &args);
  122. return clk_get_by_index_tail(ret, node, &args, "clocks",
  123. index, clk);
  124. }
  125. int clk_get_bulk(struct udevice *dev, struct clk_bulk *bulk)
  126. {
  127. int i, ret, err, count;
  128. bulk->count = 0;
  129. count = dev_count_phandle_with_args(dev, "clocks", "#clock-cells", 0);
  130. if (count < 1)
  131. return count;
  132. bulk->clks = devm_kcalloc(dev, count, sizeof(struct clk), GFP_KERNEL);
  133. if (!bulk->clks)
  134. return -ENOMEM;
  135. for (i = 0; i < count; i++) {
  136. ret = clk_get_by_index(dev, i, &bulk->clks[i]);
  137. if (ret < 0)
  138. goto bulk_get_err;
  139. ++bulk->count;
  140. }
  141. return 0;
  142. bulk_get_err:
  143. err = clk_release_all(bulk->clks, bulk->count);
  144. if (err)
  145. debug("%s: could release all clocks for %p\n",
  146. __func__, dev);
  147. return ret;
  148. }
  149. static struct clk *clk_set_default_get_by_id(struct clk *clk)
  150. {
  151. struct clk *c = clk;
  152. if (CONFIG_IS_ENABLED(CLK_CCF)) {
  153. int ret = clk_get_by_id(clk->id, &c);
  154. if (ret) {
  155. debug("%s(): could not get parent clock pointer, id %lu\n",
  156. __func__, clk->id);
  157. ERR_PTR(ret);
  158. }
  159. }
  160. return c;
  161. }
  162. static int clk_set_default_parents(struct udevice *dev,
  163. enum clk_defaults_stage stage)
  164. {
  165. struct clk clk, parent_clk, *c, *p;
  166. int index;
  167. int num_parents;
  168. int ret;
  169. num_parents = dev_count_phandle_with_args(dev, "assigned-clock-parents",
  170. "#clock-cells", 0);
  171. if (num_parents < 0) {
  172. debug("%s: could not read assigned-clock-parents for %p\n",
  173. __func__, dev);
  174. return 0;
  175. }
  176. for (index = 0; index < num_parents; index++) {
  177. ret = clk_get_by_indexed_prop(dev, "assigned-clock-parents",
  178. index, &parent_clk);
  179. /* If -ENOENT, this is a no-op entry */
  180. if (ret == -ENOENT)
  181. continue;
  182. if (ret) {
  183. debug("%s: could not get parent clock %d for %s\n",
  184. __func__, index, dev_read_name(dev));
  185. return ret;
  186. }
  187. p = clk_set_default_get_by_id(&parent_clk);
  188. if (IS_ERR(p))
  189. return PTR_ERR(p);
  190. ret = clk_get_by_indexed_prop(dev, "assigned-clocks",
  191. index, &clk);
  192. /*
  193. * If the clock provider is not ready yet, let it handle
  194. * the re-programming later.
  195. */
  196. if (ret == -EPROBE_DEFER) {
  197. ret = 0;
  198. continue;
  199. }
  200. if (ret) {
  201. debug("%s: could not get assigned clock %d for %s\n",
  202. __func__, index, dev_read_name(dev));
  203. return ret;
  204. }
  205. /* This is clk provider device trying to reparent itself
  206. * It cannot be done right now but need to wait after the
  207. * device is probed
  208. */
  209. if (stage == CLK_DEFAULTS_PRE && clk.dev == dev)
  210. continue;
  211. if (stage != CLK_DEFAULTS_PRE && clk.dev != dev)
  212. /* do not setup twice the parent clocks */
  213. continue;
  214. c = clk_set_default_get_by_id(&clk);
  215. if (IS_ERR(c))
  216. return PTR_ERR(c);
  217. ret = clk_set_parent(c, p);
  218. /*
  219. * Not all drivers may support clock-reparenting (as of now).
  220. * Ignore errors due to this.
  221. */
  222. if (ret == -ENOSYS)
  223. continue;
  224. if (ret < 0) {
  225. debug("%s: failed to reparent clock %d for %s\n",
  226. __func__, index, dev_read_name(dev));
  227. return ret;
  228. }
  229. }
  230. return 0;
  231. }
  232. static int clk_set_default_rates(struct udevice *dev,
  233. enum clk_defaults_stage stage)
  234. {
  235. struct clk clk, *c;
  236. int index;
  237. int num_rates;
  238. int size;
  239. int ret = 0;
  240. u32 *rates = NULL;
  241. size = dev_read_size(dev, "assigned-clock-rates");
  242. if (size < 0)
  243. return 0;
  244. num_rates = size / sizeof(u32);
  245. rates = calloc(num_rates, sizeof(u32));
  246. if (!rates)
  247. return -ENOMEM;
  248. ret = dev_read_u32_array(dev, "assigned-clock-rates", rates, num_rates);
  249. if (ret)
  250. goto fail;
  251. for (index = 0; index < num_rates; index++) {
  252. /* If 0 is passed, this is a no-op */
  253. if (!rates[index])
  254. continue;
  255. ret = clk_get_by_indexed_prop(dev, "assigned-clocks",
  256. index, &clk);
  257. /*
  258. * If the clock provider is not ready yet, let it handle
  259. * the re-programming later.
  260. */
  261. if (ret == -EPROBE_DEFER) {
  262. ret = 0;
  263. continue;
  264. }
  265. if (ret) {
  266. dev_dbg(dev,
  267. "could not get assigned clock %d (err = %d)\n",
  268. index, ret);
  269. continue;
  270. }
  271. /* This is clk provider device trying to program itself
  272. * It cannot be done right now but need to wait after the
  273. * device is probed
  274. */
  275. if (stage == CLK_DEFAULTS_PRE && clk.dev == dev)
  276. continue;
  277. if (stage != CLK_DEFAULTS_PRE && clk.dev != dev)
  278. /* do not setup twice the parent clocks */
  279. continue;
  280. c = clk_set_default_get_by_id(&clk);
  281. if (IS_ERR(c))
  282. return PTR_ERR(c);
  283. ret = clk_set_rate(c, rates[index]);
  284. if (ret < 0) {
  285. dev_warn(dev,
  286. "failed to set rate on clock index %d (%ld) (error = %d)\n",
  287. index, clk.id, ret);
  288. break;
  289. }
  290. }
  291. fail:
  292. free(rates);
  293. return ret;
  294. }
  295. int clk_set_defaults(struct udevice *dev, enum clk_defaults_stage stage)
  296. {
  297. int ret;
  298. if (!dev_has_ofnode(dev))
  299. return 0;
  300. /*
  301. * To avoid setting defaults twice, don't set them before relocation.
  302. * However, still set them for SPL. And still set them if explicitly
  303. * asked.
  304. */
  305. if (!(IS_ENABLED(CONFIG_SPL_BUILD) || (gd->flags & GD_FLG_RELOC)))
  306. if (stage != CLK_DEFAULTS_POST_FORCE)
  307. return 0;
  308. debug("%s(%s)\n", __func__, dev_read_name(dev));
  309. ret = clk_set_default_parents(dev, stage);
  310. if (ret)
  311. return ret;
  312. ret = clk_set_default_rates(dev, stage);
  313. if (ret < 0)
  314. return ret;
  315. return 0;
  316. }
  317. int clk_get_by_name(struct udevice *dev, const char *name, struct clk *clk)
  318. {
  319. return clk_get_by_name_nodev(dev_ofnode(dev), name, clk);
  320. }
  321. #endif /* OF_REAL */
  322. int clk_get_by_name_nodev(ofnode node, const char *name, struct clk *clk)
  323. {
  324. int index = 0;
  325. debug("%s(node=%p, name=%s, clk=%p)\n", __func__,
  326. ofnode_get_name(node), name, clk);
  327. clk->dev = NULL;
  328. if (name) {
  329. index = ofnode_stringlist_search(node, "clock-names", name);
  330. if (index < 0) {
  331. debug("fdt_stringlist_search() failed: %d\n", index);
  332. return index;
  333. }
  334. }
  335. return clk_get_by_index_nodev(node, index, clk);
  336. }
  337. int clk_release_all(struct clk *clk, int count)
  338. {
  339. int i, ret;
  340. for (i = 0; i < count; i++) {
  341. debug("%s(clk[%d]=%p)\n", __func__, i, &clk[i]);
  342. /* check if clock has been previously requested */
  343. if (!clk[i].dev)
  344. continue;
  345. ret = clk_disable(&clk[i]);
  346. if (ret && ret != -ENOSYS)
  347. return ret;
  348. clk_free(&clk[i]);
  349. }
  350. return 0;
  351. }
  352. int clk_request(struct udevice *dev, struct clk *clk)
  353. {
  354. const struct clk_ops *ops;
  355. debug("%s(dev=%p, clk=%p)\n", __func__, dev, clk);
  356. if (!clk)
  357. return 0;
  358. ops = clk_dev_ops(dev);
  359. clk->dev = dev;
  360. if (!ops->request)
  361. return 0;
  362. return ops->request(clk);
  363. }
  364. void clk_free(struct clk *clk)
  365. {
  366. const struct clk_ops *ops;
  367. debug("%s(clk=%p)\n", __func__, clk);
  368. if (!clk_valid(clk))
  369. return;
  370. ops = clk_dev_ops(clk->dev);
  371. if (ops->rfree)
  372. ops->rfree(clk);
  373. return;
  374. }
  375. ulong clk_get_rate(struct clk *clk)
  376. {
  377. const struct clk_ops *ops;
  378. int ret;
  379. debug("%s(clk=%p)\n", __func__, clk);
  380. if (!clk_valid(clk))
  381. return 0;
  382. ops = clk_dev_ops(clk->dev);
  383. if (!ops->get_rate)
  384. return -ENOSYS;
  385. ret = ops->get_rate(clk);
  386. if (ret)
  387. return log_ret(ret);
  388. return 0;
  389. }
  390. struct clk *clk_get_parent(struct clk *clk)
  391. {
  392. struct udevice *pdev;
  393. struct clk *pclk;
  394. debug("%s(clk=%p)\n", __func__, clk);
  395. if (!clk_valid(clk))
  396. return NULL;
  397. pdev = dev_get_parent(clk->dev);
  398. if (!pdev)
  399. return ERR_PTR(-ENODEV);
  400. pclk = dev_get_clk_ptr(pdev);
  401. if (!pclk)
  402. return ERR_PTR(-ENODEV);
  403. return pclk;
  404. }
  405. ulong clk_get_parent_rate(struct clk *clk)
  406. {
  407. const struct clk_ops *ops;
  408. struct clk *pclk;
  409. debug("%s(clk=%p)\n", __func__, clk);
  410. if (!clk_valid(clk))
  411. return 0;
  412. pclk = clk_get_parent(clk);
  413. if (IS_ERR(pclk))
  414. return -ENODEV;
  415. ops = clk_dev_ops(pclk->dev);
  416. if (!ops->get_rate)
  417. return -ENOSYS;
  418. /* Read the 'rate' if not already set or if proper flag set*/
  419. if (!pclk->rate || pclk->flags & CLK_GET_RATE_NOCACHE)
  420. pclk->rate = clk_get_rate(pclk);
  421. return pclk->rate;
  422. }
  423. ulong clk_round_rate(struct clk *clk, ulong rate)
  424. {
  425. const struct clk_ops *ops;
  426. debug("%s(clk=%p, rate=%lu)\n", __func__, clk, rate);
  427. if (!clk_valid(clk))
  428. return 0;
  429. ops = clk_dev_ops(clk->dev);
  430. if (!ops->round_rate)
  431. return -ENOSYS;
  432. return ops->round_rate(clk, rate);
  433. }
  434. static void clk_get_priv(struct clk *clk, struct clk **clkp)
  435. {
  436. *clkp = clk;
  437. /* get private clock struct associated to the provided clock */
  438. if (CONFIG_IS_ENABLED(CLK_CCF)) {
  439. /* Take id 0 as a non-valid clk, such as dummy */
  440. if (clk->id)
  441. clk_get_by_id(clk->id, clkp);
  442. }
  443. }
  444. /* clean cache, called with private clock struct */
  445. static void clk_clean_rate_cache(struct clk *clk)
  446. {
  447. struct udevice *child_dev;
  448. struct clk *clkp;
  449. if (!clk)
  450. return;
  451. clk->rate = 0;
  452. list_for_each_entry(child_dev, &clk->dev->child_head, sibling_node) {
  453. clkp = dev_get_clk_ptr(child_dev);
  454. clk_clean_rate_cache(clkp);
  455. }
  456. }
  457. ulong clk_set_rate(struct clk *clk, ulong rate)
  458. {
  459. const struct clk_ops *ops;
  460. struct clk *clkp;
  461. debug("%s(clk=%p, rate=%lu)\n", __func__, clk, rate);
  462. if (!clk_valid(clk))
  463. return 0;
  464. ops = clk_dev_ops(clk->dev);
  465. if (!ops->set_rate)
  466. return -ENOSYS;
  467. /* get private clock struct used for cache */
  468. clk_get_priv(clk, &clkp);
  469. /* Clean up cached rates for us and all child clocks */
  470. clk_clean_rate_cache(clkp);
  471. return ops->set_rate(clk, rate);
  472. }
  473. int clk_set_parent(struct clk *clk, struct clk *parent)
  474. {
  475. const struct clk_ops *ops;
  476. int ret;
  477. debug("%s(clk=%p, parent=%p)\n", __func__, clk, parent);
  478. if (!clk_valid(clk))
  479. return 0;
  480. ops = clk_dev_ops(clk->dev);
  481. if (!ops->set_parent)
  482. return -ENOSYS;
  483. ret = ops->set_parent(clk, parent);
  484. if (ret)
  485. return ret;
  486. if (CONFIG_IS_ENABLED(CLK_CCF))
  487. ret = device_reparent(clk->dev, parent->dev);
  488. return ret;
  489. }
  490. int clk_enable(struct clk *clk)
  491. {
  492. const struct clk_ops *ops;
  493. struct clk *clkp = NULL;
  494. int ret;
  495. debug("%s(clk=%p)\n", __func__, clk);
  496. if (!clk_valid(clk))
  497. return 0;
  498. ops = clk_dev_ops(clk->dev);
  499. if (CONFIG_IS_ENABLED(CLK_CCF)) {
  500. /* Take id 0 as a non-valid clk, such as dummy */
  501. if (clk->id && !clk_get_by_id(clk->id, &clkp)) {
  502. if (clkp->enable_count) {
  503. clkp->enable_count++;
  504. return 0;
  505. }
  506. if (clkp->dev->parent &&
  507. device_get_uclass_id(clkp->dev->parent) == UCLASS_CLK) {
  508. ret = clk_enable(dev_get_clk_ptr(clkp->dev->parent));
  509. if (ret) {
  510. printf("Enable %s failed\n",
  511. clkp->dev->parent->name);
  512. return ret;
  513. }
  514. }
  515. }
  516. if (ops->enable) {
  517. ret = ops->enable(clk);
  518. if (ret) {
  519. printf("Enable %s failed\n", clk->dev->name);
  520. return ret;
  521. }
  522. }
  523. if (clkp)
  524. clkp->enable_count++;
  525. } else {
  526. if (!ops->enable)
  527. return -ENOSYS;
  528. return ops->enable(clk);
  529. }
  530. return 0;
  531. }
  532. int clk_enable_bulk(struct clk_bulk *bulk)
  533. {
  534. int i, ret;
  535. for (i = 0; i < bulk->count; i++) {
  536. ret = clk_enable(&bulk->clks[i]);
  537. if (ret < 0 && ret != -ENOSYS)
  538. return ret;
  539. }
  540. return 0;
  541. }
  542. int clk_disable(struct clk *clk)
  543. {
  544. const struct clk_ops *ops;
  545. struct clk *clkp = NULL;
  546. int ret;
  547. debug("%s(clk=%p)\n", __func__, clk);
  548. if (!clk_valid(clk))
  549. return 0;
  550. ops = clk_dev_ops(clk->dev);
  551. if (CONFIG_IS_ENABLED(CLK_CCF)) {
  552. if (clk->id && !clk_get_by_id(clk->id, &clkp)) {
  553. if (clkp->flags & CLK_IS_CRITICAL)
  554. return 0;
  555. if (clkp->enable_count == 0) {
  556. printf("clk %s already disabled\n",
  557. clkp->dev->name);
  558. return 0;
  559. }
  560. if (--clkp->enable_count > 0)
  561. return 0;
  562. }
  563. if (ops->disable) {
  564. ret = ops->disable(clk);
  565. if (ret)
  566. return ret;
  567. }
  568. if (clkp && clkp->dev->parent &&
  569. device_get_uclass_id(clkp->dev->parent) == UCLASS_CLK) {
  570. ret = clk_disable(dev_get_clk_ptr(clkp->dev->parent));
  571. if (ret) {
  572. printf("Disable %s failed\n",
  573. clkp->dev->parent->name);
  574. return ret;
  575. }
  576. }
  577. } else {
  578. if (!ops->disable)
  579. return -ENOSYS;
  580. return ops->disable(clk);
  581. }
  582. return 0;
  583. }
  584. int clk_disable_bulk(struct clk_bulk *bulk)
  585. {
  586. int i, ret;
  587. for (i = 0; i < bulk->count; i++) {
  588. ret = clk_disable(&bulk->clks[i]);
  589. if (ret < 0 && ret != -ENOSYS)
  590. return ret;
  591. }
  592. return 0;
  593. }
  594. int clk_get_by_id(ulong id, struct clk **clkp)
  595. {
  596. struct udevice *dev;
  597. struct uclass *uc;
  598. int ret;
  599. ret = uclass_get(UCLASS_CLK, &uc);
  600. if (ret)
  601. return ret;
  602. uclass_foreach_dev(dev, uc) {
  603. struct clk *clk = dev_get_clk_ptr(dev);
  604. if (clk && clk->id == id) {
  605. *clkp = clk;
  606. return 0;
  607. }
  608. }
  609. return -ENOENT;
  610. }
  611. bool clk_is_match(const struct clk *p, const struct clk *q)
  612. {
  613. /* trivial case: identical struct clk's or both NULL */
  614. if (p == q)
  615. return true;
  616. /* trivial case #2: on the clk pointer is NULL */
  617. if (!p || !q)
  618. return false;
  619. /* same device, id and data */
  620. if (p->dev == q->dev && p->id == q->id && p->data == q->data)
  621. return true;
  622. return false;
  623. }
  624. static void devm_clk_release(struct udevice *dev, void *res)
  625. {
  626. clk_free(res);
  627. }
  628. static int devm_clk_match(struct udevice *dev, void *res, void *data)
  629. {
  630. return res == data;
  631. }
  632. struct clk *devm_clk_get(struct udevice *dev, const char *id)
  633. {
  634. int rc;
  635. struct clk *clk;
  636. clk = devres_alloc(devm_clk_release, sizeof(struct clk), __GFP_ZERO);
  637. if (unlikely(!clk))
  638. return ERR_PTR(-ENOMEM);
  639. rc = clk_get_by_name(dev, id, clk);
  640. if (rc)
  641. return ERR_PTR(rc);
  642. devres_add(dev, clk);
  643. return clk;
  644. }
  645. void devm_clk_put(struct udevice *dev, struct clk *clk)
  646. {
  647. int rc;
  648. if (!clk)
  649. return;
  650. rc = devres_release(dev, devm_clk_release, devm_clk_match, clk);
  651. WARN_ON(rc);
  652. }
  653. int clk_uclass_post_probe(struct udevice *dev)
  654. {
  655. /*
  656. * when a clock provider is probed. Call clk_set_defaults()
  657. * also after the device is probed. This takes care of cases
  658. * where the DT is used to setup default parents and rates
  659. * using assigned-clocks
  660. */
  661. clk_set_defaults(dev, CLK_DEFAULTS_POST);
  662. return 0;
  663. }
  664. UCLASS_DRIVER(clk) = {
  665. .id = UCLASS_CLK,
  666. .name = "clk",
  667. .post_probe = clk_uclass_post_probe,
  668. };