clk-rpm.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2016, Linaro Limited
  4. * Copyright (c) 2014, The Linux Foundation. All rights reserved.
  5. */
  6. #include <linux/clk-provider.h>
  7. #include <linux/err.h>
  8. #include <linux/export.h>
  9. #include <linux/init.h>
  10. #include <linux/kernel.h>
  11. #include <linux/module.h>
  12. #include <linux/mutex.h>
  13. #include <linux/mfd/qcom_rpm.h>
  14. #include <linux/of.h>
  15. #include <linux/platform_device.h>
  16. #include <dt-bindings/mfd/qcom-rpm.h>
  17. #include <dt-bindings/clock/qcom,rpmcc.h>
  18. #define QCOM_RPM_MISC_CLK_TYPE 0x306b6c63
  19. #define QCOM_RPM_SCALING_ENABLE_ID 0x2
  20. #define QCOM_RPM_XO_MODE_ON 0x2
  21. static const struct clk_parent_data gcc_pxo[] = {
  22. { .fw_name = "pxo", .name = "pxo_board" },
  23. };
  24. static const struct clk_parent_data gcc_cxo[] = {
  25. { .fw_name = "cxo", .name = "cxo_board" },
  26. };
  27. #define DEFINE_CLK_RPM(_name, r_id) \
  28. static struct clk_rpm clk_rpm_##_name##_a_clk; \
  29. static struct clk_rpm clk_rpm_##_name##_clk = { \
  30. .rpm_clk_id = (r_id), \
  31. .peer = &clk_rpm_##_name##_a_clk, \
  32. .rate = INT_MAX, \
  33. .hw.init = &(struct clk_init_data){ \
  34. .ops = &clk_rpm_ops, \
  35. .name = #_name "_clk", \
  36. .parent_data = gcc_pxo, \
  37. .num_parents = ARRAY_SIZE(gcc_pxo), \
  38. }, \
  39. }; \
  40. static struct clk_rpm clk_rpm_##_name##_a_clk = { \
  41. .rpm_clk_id = (r_id), \
  42. .peer = &clk_rpm_##_name##_clk, \
  43. .active_only = true, \
  44. .rate = INT_MAX, \
  45. .hw.init = &(struct clk_init_data){ \
  46. .ops = &clk_rpm_ops, \
  47. .name = #_name "_a_clk", \
  48. .parent_data = gcc_pxo, \
  49. .num_parents = ARRAY_SIZE(gcc_pxo), \
  50. }, \
  51. }
  52. #define DEFINE_CLK_RPM_XO_BUFFER(_name, offset) \
  53. static struct clk_rpm clk_rpm_##_name##_clk = { \
  54. .rpm_clk_id = QCOM_RPM_CXO_BUFFERS, \
  55. .xo_offset = (offset), \
  56. .hw.init = &(struct clk_init_data){ \
  57. .ops = &clk_rpm_xo_ops, \
  58. .name = #_name "_clk", \
  59. .parent_data = gcc_cxo, \
  60. .num_parents = ARRAY_SIZE(gcc_cxo), \
  61. }, \
  62. }
  63. #define DEFINE_CLK_RPM_FIXED(_name, r_id, r) \
  64. static struct clk_rpm clk_rpm_##_name##_clk = { \
  65. .rpm_clk_id = (r_id), \
  66. .rate = (r), \
  67. .hw.init = &(struct clk_init_data){ \
  68. .ops = &clk_rpm_fixed_ops, \
  69. .name = #_name "_clk", \
  70. .parent_data = gcc_pxo, \
  71. .num_parents = ARRAY_SIZE(gcc_pxo), \
  72. }, \
  73. }
  74. #define to_clk_rpm(_hw) container_of(_hw, struct clk_rpm, hw)
  75. struct rpm_cc;
  76. struct clk_rpm {
  77. const int rpm_clk_id;
  78. const int xo_offset;
  79. const bool active_only;
  80. unsigned long rate;
  81. bool enabled;
  82. bool branch;
  83. struct clk_rpm *peer;
  84. struct clk_hw hw;
  85. struct qcom_rpm *rpm;
  86. struct rpm_cc *rpm_cc;
  87. };
  88. struct rpm_cc {
  89. struct clk_rpm **clks;
  90. size_t num_clks;
  91. u32 xo_buffer_value;
  92. struct mutex xo_lock;
  93. };
  94. struct rpm_clk_desc {
  95. struct clk_rpm **clks;
  96. size_t num_clks;
  97. };
  98. static DEFINE_MUTEX(rpm_clk_lock);
  99. static int clk_rpm_handoff(struct clk_rpm *r)
  100. {
  101. int ret;
  102. u32 value = INT_MAX;
  103. /*
  104. * The vendor tree simply reads the status for this
  105. * RPM clock.
  106. */
  107. if (r->rpm_clk_id == QCOM_RPM_PLL_4 ||
  108. r->rpm_clk_id == QCOM_RPM_CXO_BUFFERS)
  109. return 0;
  110. ret = qcom_rpm_write(r->rpm, QCOM_RPM_ACTIVE_STATE,
  111. r->rpm_clk_id, &value, 1);
  112. if (ret)
  113. return ret;
  114. ret = qcom_rpm_write(r->rpm, QCOM_RPM_SLEEP_STATE,
  115. r->rpm_clk_id, &value, 1);
  116. if (ret)
  117. return ret;
  118. return 0;
  119. }
  120. static int clk_rpm_set_rate_active(struct clk_rpm *r, unsigned long rate)
  121. {
  122. u32 value = DIV_ROUND_UP(rate, 1000); /* to kHz */
  123. return qcom_rpm_write(r->rpm, QCOM_RPM_ACTIVE_STATE,
  124. r->rpm_clk_id, &value, 1);
  125. }
  126. static int clk_rpm_set_rate_sleep(struct clk_rpm *r, unsigned long rate)
  127. {
  128. u32 value = DIV_ROUND_UP(rate, 1000); /* to kHz */
  129. return qcom_rpm_write(r->rpm, QCOM_RPM_SLEEP_STATE,
  130. r->rpm_clk_id, &value, 1);
  131. }
  132. static void to_active_sleep(struct clk_rpm *r, unsigned long rate,
  133. unsigned long *active, unsigned long *sleep)
  134. {
  135. *active = rate;
  136. /*
  137. * Active-only clocks don't care what the rate is during sleep. So,
  138. * they vote for zero.
  139. */
  140. if (r->active_only)
  141. *sleep = 0;
  142. else
  143. *sleep = *active;
  144. }
  145. static int clk_rpm_prepare(struct clk_hw *hw)
  146. {
  147. struct clk_rpm *r = to_clk_rpm(hw);
  148. struct clk_rpm *peer = r->peer;
  149. unsigned long this_rate = 0, this_sleep_rate = 0;
  150. unsigned long peer_rate = 0, peer_sleep_rate = 0;
  151. unsigned long active_rate, sleep_rate;
  152. int ret = 0;
  153. mutex_lock(&rpm_clk_lock);
  154. /* Don't send requests to the RPM if the rate has not been set. */
  155. if (!r->rate)
  156. goto out;
  157. to_active_sleep(r, r->rate, &this_rate, &this_sleep_rate);
  158. /* Take peer clock's rate into account only if it's enabled. */
  159. if (peer->enabled)
  160. to_active_sleep(peer, peer->rate,
  161. &peer_rate, &peer_sleep_rate);
  162. active_rate = max(this_rate, peer_rate);
  163. if (r->branch)
  164. active_rate = !!active_rate;
  165. ret = clk_rpm_set_rate_active(r, active_rate);
  166. if (ret)
  167. goto out;
  168. sleep_rate = max(this_sleep_rate, peer_sleep_rate);
  169. if (r->branch)
  170. sleep_rate = !!sleep_rate;
  171. ret = clk_rpm_set_rate_sleep(r, sleep_rate);
  172. if (ret)
  173. /* Undo the active set vote and restore it */
  174. ret = clk_rpm_set_rate_active(r, peer_rate);
  175. out:
  176. if (!ret)
  177. r->enabled = true;
  178. mutex_unlock(&rpm_clk_lock);
  179. return ret;
  180. }
  181. static void clk_rpm_unprepare(struct clk_hw *hw)
  182. {
  183. struct clk_rpm *r = to_clk_rpm(hw);
  184. struct clk_rpm *peer = r->peer;
  185. unsigned long peer_rate = 0, peer_sleep_rate = 0;
  186. unsigned long active_rate, sleep_rate;
  187. int ret;
  188. mutex_lock(&rpm_clk_lock);
  189. if (!r->rate)
  190. goto out;
  191. /* Take peer clock's rate into account only if it's enabled. */
  192. if (peer->enabled)
  193. to_active_sleep(peer, peer->rate, &peer_rate,
  194. &peer_sleep_rate);
  195. active_rate = r->branch ? !!peer_rate : peer_rate;
  196. ret = clk_rpm_set_rate_active(r, active_rate);
  197. if (ret)
  198. goto out;
  199. sleep_rate = r->branch ? !!peer_sleep_rate : peer_sleep_rate;
  200. ret = clk_rpm_set_rate_sleep(r, sleep_rate);
  201. if (ret)
  202. goto out;
  203. r->enabled = false;
  204. out:
  205. mutex_unlock(&rpm_clk_lock);
  206. }
  207. static int clk_rpm_xo_prepare(struct clk_hw *hw)
  208. {
  209. struct clk_rpm *r = to_clk_rpm(hw);
  210. struct rpm_cc *rcc = r->rpm_cc;
  211. int ret, clk_id = r->rpm_clk_id;
  212. u32 value;
  213. mutex_lock(&rcc->xo_lock);
  214. value = rcc->xo_buffer_value | (QCOM_RPM_XO_MODE_ON << r->xo_offset);
  215. ret = qcom_rpm_write(r->rpm, QCOM_RPM_ACTIVE_STATE, clk_id, &value, 1);
  216. if (!ret) {
  217. r->enabled = true;
  218. rcc->xo_buffer_value = value;
  219. }
  220. mutex_unlock(&rcc->xo_lock);
  221. return ret;
  222. }
  223. static void clk_rpm_xo_unprepare(struct clk_hw *hw)
  224. {
  225. struct clk_rpm *r = to_clk_rpm(hw);
  226. struct rpm_cc *rcc = r->rpm_cc;
  227. int ret, clk_id = r->rpm_clk_id;
  228. u32 value;
  229. mutex_lock(&rcc->xo_lock);
  230. value = rcc->xo_buffer_value & ~(QCOM_RPM_XO_MODE_ON << r->xo_offset);
  231. ret = qcom_rpm_write(r->rpm, QCOM_RPM_ACTIVE_STATE, clk_id, &value, 1);
  232. if (!ret) {
  233. r->enabled = false;
  234. rcc->xo_buffer_value = value;
  235. }
  236. mutex_unlock(&rcc->xo_lock);
  237. }
  238. static int clk_rpm_fixed_prepare(struct clk_hw *hw)
  239. {
  240. struct clk_rpm *r = to_clk_rpm(hw);
  241. u32 value = 1;
  242. int ret;
  243. ret = qcom_rpm_write(r->rpm, QCOM_RPM_ACTIVE_STATE,
  244. r->rpm_clk_id, &value, 1);
  245. if (!ret)
  246. r->enabled = true;
  247. return ret;
  248. }
  249. static void clk_rpm_fixed_unprepare(struct clk_hw *hw)
  250. {
  251. struct clk_rpm *r = to_clk_rpm(hw);
  252. u32 value = 0;
  253. int ret;
  254. ret = qcom_rpm_write(r->rpm, QCOM_RPM_ACTIVE_STATE,
  255. r->rpm_clk_id, &value, 1);
  256. if (!ret)
  257. r->enabled = false;
  258. }
  259. static int clk_rpm_set_rate(struct clk_hw *hw,
  260. unsigned long rate, unsigned long parent_rate)
  261. {
  262. struct clk_rpm *r = to_clk_rpm(hw);
  263. struct clk_rpm *peer = r->peer;
  264. unsigned long active_rate, sleep_rate;
  265. unsigned long this_rate = 0, this_sleep_rate = 0;
  266. unsigned long peer_rate = 0, peer_sleep_rate = 0;
  267. int ret = 0;
  268. mutex_lock(&rpm_clk_lock);
  269. if (!r->enabled)
  270. goto out;
  271. to_active_sleep(r, rate, &this_rate, &this_sleep_rate);
  272. /* Take peer clock's rate into account only if it's enabled. */
  273. if (peer->enabled)
  274. to_active_sleep(peer, peer->rate,
  275. &peer_rate, &peer_sleep_rate);
  276. active_rate = max(this_rate, peer_rate);
  277. ret = clk_rpm_set_rate_active(r, active_rate);
  278. if (ret)
  279. goto out;
  280. sleep_rate = max(this_sleep_rate, peer_sleep_rate);
  281. ret = clk_rpm_set_rate_sleep(r, sleep_rate);
  282. if (ret)
  283. goto out;
  284. r->rate = rate;
  285. out:
  286. mutex_unlock(&rpm_clk_lock);
  287. return ret;
  288. }
  289. static long clk_rpm_round_rate(struct clk_hw *hw, unsigned long rate,
  290. unsigned long *parent_rate)
  291. {
  292. /*
  293. * RPM handles rate rounding and we don't have a way to
  294. * know what the rate will be, so just return whatever
  295. * rate is requested.
  296. */
  297. return rate;
  298. }
  299. static unsigned long clk_rpm_recalc_rate(struct clk_hw *hw,
  300. unsigned long parent_rate)
  301. {
  302. struct clk_rpm *r = to_clk_rpm(hw);
  303. /*
  304. * RPM handles rate rounding and we don't have a way to
  305. * know what the rate will be, so just return whatever
  306. * rate was set.
  307. */
  308. return r->rate;
  309. }
  310. static const struct clk_ops clk_rpm_xo_ops = {
  311. .prepare = clk_rpm_xo_prepare,
  312. .unprepare = clk_rpm_xo_unprepare,
  313. };
  314. static const struct clk_ops clk_rpm_fixed_ops = {
  315. .prepare = clk_rpm_fixed_prepare,
  316. .unprepare = clk_rpm_fixed_unprepare,
  317. .round_rate = clk_rpm_round_rate,
  318. .recalc_rate = clk_rpm_recalc_rate,
  319. };
  320. static const struct clk_ops clk_rpm_ops = {
  321. .prepare = clk_rpm_prepare,
  322. .unprepare = clk_rpm_unprepare,
  323. .set_rate = clk_rpm_set_rate,
  324. .round_rate = clk_rpm_round_rate,
  325. .recalc_rate = clk_rpm_recalc_rate,
  326. };
  327. DEFINE_CLK_RPM(afab, QCOM_RPM_APPS_FABRIC_CLK);
  328. DEFINE_CLK_RPM(sfab, QCOM_RPM_SYS_FABRIC_CLK);
  329. DEFINE_CLK_RPM(mmfab, QCOM_RPM_MM_FABRIC_CLK);
  330. DEFINE_CLK_RPM(daytona, QCOM_RPM_DAYTONA_FABRIC_CLK);
  331. DEFINE_CLK_RPM(sfpb, QCOM_RPM_SFPB_CLK);
  332. DEFINE_CLK_RPM(cfpb, QCOM_RPM_CFPB_CLK);
  333. DEFINE_CLK_RPM(mmfpb, QCOM_RPM_MMFPB_CLK);
  334. DEFINE_CLK_RPM(smi, QCOM_RPM_SMI_CLK);
  335. DEFINE_CLK_RPM(ebi1, QCOM_RPM_EBI1_CLK);
  336. DEFINE_CLK_RPM(qdss, QCOM_RPM_QDSS_CLK);
  337. DEFINE_CLK_RPM(nss_fabric_0, QCOM_RPM_NSS_FABRIC_0_CLK);
  338. DEFINE_CLK_RPM(nss_fabric_1, QCOM_RPM_NSS_FABRIC_1_CLK);
  339. DEFINE_CLK_RPM_FIXED(pll4, QCOM_RPM_PLL_4, 540672000);
  340. DEFINE_CLK_RPM_XO_BUFFER(xo_d0, 0);
  341. DEFINE_CLK_RPM_XO_BUFFER(xo_d1, 8);
  342. DEFINE_CLK_RPM_XO_BUFFER(xo_a0, 16);
  343. DEFINE_CLK_RPM_XO_BUFFER(xo_a1, 24);
  344. DEFINE_CLK_RPM_XO_BUFFER(xo_a2, 28);
  345. static struct clk_rpm *msm8660_clks[] = {
  346. [RPM_APPS_FABRIC_CLK] = &clk_rpm_afab_clk,
  347. [RPM_APPS_FABRIC_A_CLK] = &clk_rpm_afab_a_clk,
  348. [RPM_SYS_FABRIC_CLK] = &clk_rpm_sfab_clk,
  349. [RPM_SYS_FABRIC_A_CLK] = &clk_rpm_sfab_a_clk,
  350. [RPM_MM_FABRIC_CLK] = &clk_rpm_mmfab_clk,
  351. [RPM_MM_FABRIC_A_CLK] = &clk_rpm_mmfab_a_clk,
  352. [RPM_DAYTONA_FABRIC_CLK] = &clk_rpm_daytona_clk,
  353. [RPM_DAYTONA_FABRIC_A_CLK] = &clk_rpm_daytona_a_clk,
  354. [RPM_SFPB_CLK] = &clk_rpm_sfpb_clk,
  355. [RPM_SFPB_A_CLK] = &clk_rpm_sfpb_a_clk,
  356. [RPM_CFPB_CLK] = &clk_rpm_cfpb_clk,
  357. [RPM_CFPB_A_CLK] = &clk_rpm_cfpb_a_clk,
  358. [RPM_MMFPB_CLK] = &clk_rpm_mmfpb_clk,
  359. [RPM_MMFPB_A_CLK] = &clk_rpm_mmfpb_a_clk,
  360. [RPM_SMI_CLK] = &clk_rpm_smi_clk,
  361. [RPM_SMI_A_CLK] = &clk_rpm_smi_a_clk,
  362. [RPM_EBI1_CLK] = &clk_rpm_ebi1_clk,
  363. [RPM_EBI1_A_CLK] = &clk_rpm_ebi1_a_clk,
  364. [RPM_PLL4_CLK] = &clk_rpm_pll4_clk,
  365. };
  366. static const struct rpm_clk_desc rpm_clk_msm8660 = {
  367. .clks = msm8660_clks,
  368. .num_clks = ARRAY_SIZE(msm8660_clks),
  369. };
  370. static struct clk_rpm *apq8064_clks[] = {
  371. [RPM_APPS_FABRIC_CLK] = &clk_rpm_afab_clk,
  372. [RPM_APPS_FABRIC_A_CLK] = &clk_rpm_afab_a_clk,
  373. [RPM_CFPB_CLK] = &clk_rpm_cfpb_clk,
  374. [RPM_CFPB_A_CLK] = &clk_rpm_cfpb_a_clk,
  375. [RPM_DAYTONA_FABRIC_CLK] = &clk_rpm_daytona_clk,
  376. [RPM_DAYTONA_FABRIC_A_CLK] = &clk_rpm_daytona_a_clk,
  377. [RPM_EBI1_CLK] = &clk_rpm_ebi1_clk,
  378. [RPM_EBI1_A_CLK] = &clk_rpm_ebi1_a_clk,
  379. [RPM_MM_FABRIC_CLK] = &clk_rpm_mmfab_clk,
  380. [RPM_MM_FABRIC_A_CLK] = &clk_rpm_mmfab_a_clk,
  381. [RPM_MMFPB_CLK] = &clk_rpm_mmfpb_clk,
  382. [RPM_MMFPB_A_CLK] = &clk_rpm_mmfpb_a_clk,
  383. [RPM_SYS_FABRIC_CLK] = &clk_rpm_sfab_clk,
  384. [RPM_SYS_FABRIC_A_CLK] = &clk_rpm_sfab_a_clk,
  385. [RPM_SFPB_CLK] = &clk_rpm_sfpb_clk,
  386. [RPM_SFPB_A_CLK] = &clk_rpm_sfpb_a_clk,
  387. [RPM_QDSS_CLK] = &clk_rpm_qdss_clk,
  388. [RPM_QDSS_A_CLK] = &clk_rpm_qdss_a_clk,
  389. [RPM_XO_D0] = &clk_rpm_xo_d0_clk,
  390. [RPM_XO_D1] = &clk_rpm_xo_d1_clk,
  391. [RPM_XO_A0] = &clk_rpm_xo_a0_clk,
  392. [RPM_XO_A1] = &clk_rpm_xo_a1_clk,
  393. [RPM_XO_A2] = &clk_rpm_xo_a2_clk,
  394. };
  395. static const struct rpm_clk_desc rpm_clk_apq8064 = {
  396. .clks = apq8064_clks,
  397. .num_clks = ARRAY_SIZE(apq8064_clks),
  398. };
  399. static struct clk_rpm *ipq806x_clks[] = {
  400. [RPM_APPS_FABRIC_CLK] = &clk_rpm_afab_clk,
  401. [RPM_APPS_FABRIC_A_CLK] = &clk_rpm_afab_a_clk,
  402. [RPM_CFPB_CLK] = &clk_rpm_cfpb_clk,
  403. [RPM_CFPB_A_CLK] = &clk_rpm_cfpb_a_clk,
  404. [RPM_DAYTONA_FABRIC_CLK] = &clk_rpm_daytona_clk,
  405. [RPM_DAYTONA_FABRIC_A_CLK] = &clk_rpm_daytona_a_clk,
  406. [RPM_EBI1_CLK] = &clk_rpm_ebi1_clk,
  407. [RPM_EBI1_A_CLK] = &clk_rpm_ebi1_a_clk,
  408. [RPM_SYS_FABRIC_CLK] = &clk_rpm_sfab_clk,
  409. [RPM_SYS_FABRIC_A_CLK] = &clk_rpm_sfab_a_clk,
  410. [RPM_SFPB_CLK] = &clk_rpm_sfpb_clk,
  411. [RPM_SFPB_A_CLK] = &clk_rpm_sfpb_a_clk,
  412. [RPM_NSS_FABRIC_0_CLK] = &clk_rpm_nss_fabric_0_clk,
  413. [RPM_NSS_FABRIC_0_A_CLK] = &clk_rpm_nss_fabric_0_a_clk,
  414. [RPM_NSS_FABRIC_1_CLK] = &clk_rpm_nss_fabric_1_clk,
  415. [RPM_NSS_FABRIC_1_A_CLK] = &clk_rpm_nss_fabric_1_a_clk,
  416. };
  417. static const struct rpm_clk_desc rpm_clk_ipq806x = {
  418. .clks = ipq806x_clks,
  419. .num_clks = ARRAY_SIZE(ipq806x_clks),
  420. };
  421. static const struct of_device_id rpm_clk_match_table[] = {
  422. { .compatible = "qcom,rpmcc-msm8660", .data = &rpm_clk_msm8660 },
  423. { .compatible = "qcom,rpmcc-apq8060", .data = &rpm_clk_msm8660 },
  424. { .compatible = "qcom,rpmcc-apq8064", .data = &rpm_clk_apq8064 },
  425. { .compatible = "qcom,rpmcc-ipq806x", .data = &rpm_clk_ipq806x },
  426. { }
  427. };
  428. MODULE_DEVICE_TABLE(of, rpm_clk_match_table);
  429. static struct clk_hw *qcom_rpm_clk_hw_get(struct of_phandle_args *clkspec,
  430. void *data)
  431. {
  432. struct rpm_cc *rcc = data;
  433. unsigned int idx = clkspec->args[0];
  434. if (idx >= rcc->num_clks) {
  435. pr_err("%s: invalid index %u\n", __func__, idx);
  436. return ERR_PTR(-EINVAL);
  437. }
  438. return rcc->clks[idx] ? &rcc->clks[idx]->hw : ERR_PTR(-ENOENT);
  439. }
  440. static int rpm_clk_probe(struct platform_device *pdev)
  441. {
  442. struct rpm_cc *rcc;
  443. int ret;
  444. size_t num_clks, i;
  445. struct qcom_rpm *rpm;
  446. struct clk_rpm **rpm_clks;
  447. const struct rpm_clk_desc *desc;
  448. rpm = dev_get_drvdata(pdev->dev.parent);
  449. if (!rpm) {
  450. dev_err(&pdev->dev, "Unable to retrieve handle to RPM\n");
  451. return -ENODEV;
  452. }
  453. desc = of_device_get_match_data(&pdev->dev);
  454. if (!desc)
  455. return -EINVAL;
  456. rpm_clks = desc->clks;
  457. num_clks = desc->num_clks;
  458. rcc = devm_kzalloc(&pdev->dev, sizeof(*rcc), GFP_KERNEL);
  459. if (!rcc)
  460. return -ENOMEM;
  461. rcc->clks = rpm_clks;
  462. rcc->num_clks = num_clks;
  463. mutex_init(&rcc->xo_lock);
  464. for (i = 0; i < num_clks; i++) {
  465. if (!rpm_clks[i])
  466. continue;
  467. rpm_clks[i]->rpm = rpm;
  468. rpm_clks[i]->rpm_cc = rcc;
  469. ret = clk_rpm_handoff(rpm_clks[i]);
  470. if (ret)
  471. goto err;
  472. }
  473. for (i = 0; i < num_clks; i++) {
  474. if (!rpm_clks[i])
  475. continue;
  476. ret = devm_clk_hw_register(&pdev->dev, &rpm_clks[i]->hw);
  477. if (ret)
  478. goto err;
  479. }
  480. ret = devm_of_clk_add_hw_provider(&pdev->dev, qcom_rpm_clk_hw_get,
  481. rcc);
  482. if (ret)
  483. goto err;
  484. return 0;
  485. err:
  486. dev_err(&pdev->dev, "Error registering RPM Clock driver (%d)\n", ret);
  487. return ret;
  488. }
  489. static struct platform_driver rpm_clk_driver = {
  490. .driver = {
  491. .name = "qcom-clk-rpm",
  492. .of_match_table = rpm_clk_match_table,
  493. },
  494. .probe = rpm_clk_probe,
  495. };
  496. static int __init rpm_clk_init(void)
  497. {
  498. return platform_driver_register(&rpm_clk_driver);
  499. }
  500. core_initcall(rpm_clk_init);
  501. static void __exit rpm_clk_exit(void)
  502. {
  503. platform_driver_unregister(&rpm_clk_driver);
  504. }
  505. module_exit(rpm_clk_exit);
  506. MODULE_DESCRIPTION("Qualcomm RPM Clock Controller Driver");
  507. MODULE_LICENSE("GPL v2");
  508. MODULE_ALIAS("platform:qcom-clk-rpm");