clk-rcg2.c 41 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (c) 2013, 2018, The Linux Foundation. All rights reserved.
  4. */
  5. #include <linux/kernel.h>
  6. #include <linux/bitops.h>
  7. #include <linux/err.h>
  8. #include <linux/bug.h>
  9. #include <linux/export.h>
  10. #include <linux/clk-provider.h>
  11. #include <linux/delay.h>
  12. #include <linux/rational.h>
  13. #include <linux/regmap.h>
  14. #include <linux/math64.h>
  15. #include <linux/minmax.h>
  16. #include <linux/slab.h>
  17. #include <asm/div64.h>
  18. #include "clk-rcg.h"
  19. #include "common.h"
  20. #define CMD_REG 0x0
  21. #define CMD_UPDATE BIT(0)
  22. #define CMD_ROOT_EN BIT(1)
  23. #define CMD_DIRTY_CFG BIT(4)
  24. #define CMD_DIRTY_N BIT(5)
  25. #define CMD_DIRTY_M BIT(6)
  26. #define CMD_DIRTY_D BIT(7)
  27. #define CMD_ROOT_OFF BIT(31)
  28. #define CFG_REG 0x4
  29. #define CFG_SRC_DIV_SHIFT 0
  30. #define CFG_SRC_SEL_SHIFT 8
  31. #define CFG_SRC_SEL_MASK (0x7 << CFG_SRC_SEL_SHIFT)
  32. #define CFG_MODE_SHIFT 12
  33. #define CFG_MODE_MASK (0x3 << CFG_MODE_SHIFT)
  34. #define CFG_MODE_DUAL_EDGE (0x2 << CFG_MODE_SHIFT)
  35. #define CFG_HW_CLK_CTRL_MASK BIT(20)
  36. #define M_REG 0x8
  37. #define N_REG 0xc
  38. #define D_REG 0x10
  39. #define RCG_CFG_OFFSET(rcg) ((rcg)->cmd_rcgr + (rcg)->cfg_off + CFG_REG)
  40. #define RCG_M_OFFSET(rcg) ((rcg)->cmd_rcgr + (rcg)->cfg_off + M_REG)
  41. #define RCG_N_OFFSET(rcg) ((rcg)->cmd_rcgr + (rcg)->cfg_off + N_REG)
  42. #define RCG_D_OFFSET(rcg) ((rcg)->cmd_rcgr + (rcg)->cfg_off + D_REG)
  43. /* Dynamic Frequency Scaling */
  44. #define MAX_PERF_LEVEL 8
  45. #define SE_CMD_DFSR_OFFSET 0x14
  46. #define SE_CMD_DFS_EN BIT(0)
  47. #define SE_PERF_DFSR(level) (0x1c + 0x4 * (level))
  48. #define SE_PERF_M_DFSR(level) (0x5c + 0x4 * (level))
  49. #define SE_PERF_N_DFSR(level) (0x9c + 0x4 * (level))
  50. enum freq_policy {
  51. FLOOR,
  52. CEIL,
  53. };
  54. static int clk_rcg2_is_enabled(struct clk_hw *hw)
  55. {
  56. struct clk_rcg2 *rcg = to_clk_rcg2(hw);
  57. u32 cmd;
  58. int ret;
  59. ret = regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CMD_REG, &cmd);
  60. if (ret)
  61. return ret;
  62. return (cmd & CMD_ROOT_OFF) == 0;
  63. }
  64. static u8 __clk_rcg2_get_parent(struct clk_hw *hw, u32 cfg)
  65. {
  66. struct clk_rcg2 *rcg = to_clk_rcg2(hw);
  67. int num_parents = clk_hw_get_num_parents(hw);
  68. int i;
  69. cfg &= CFG_SRC_SEL_MASK;
  70. cfg >>= CFG_SRC_SEL_SHIFT;
  71. for (i = 0; i < num_parents; i++)
  72. if (cfg == rcg->parent_map[i].cfg)
  73. return i;
  74. pr_debug("%s: Clock %s has invalid parent, using default.\n",
  75. __func__, clk_hw_get_name(hw));
  76. return 0;
  77. }
  78. static u8 clk_rcg2_get_parent(struct clk_hw *hw)
  79. {
  80. struct clk_rcg2 *rcg = to_clk_rcg2(hw);
  81. u32 cfg;
  82. int ret;
  83. ret = regmap_read(rcg->clkr.regmap, RCG_CFG_OFFSET(rcg), &cfg);
  84. if (ret) {
  85. pr_debug("%s: Unable to read CFG register for %s\n",
  86. __func__, clk_hw_get_name(hw));
  87. return 0;
  88. }
  89. return __clk_rcg2_get_parent(hw, cfg);
  90. }
  91. static int update_config(struct clk_rcg2 *rcg)
  92. {
  93. int count, ret;
  94. u32 cmd;
  95. struct clk_hw *hw = &rcg->clkr.hw;
  96. const char *name = clk_hw_get_name(hw);
  97. ret = regmap_update_bits(rcg->clkr.regmap, rcg->cmd_rcgr + CMD_REG,
  98. CMD_UPDATE, CMD_UPDATE);
  99. if (ret)
  100. return ret;
  101. /* Wait for update to take effect */
  102. for (count = 500; count > 0; count--) {
  103. ret = regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CMD_REG, &cmd);
  104. if (ret)
  105. return ret;
  106. if (!(cmd & CMD_UPDATE))
  107. return 0;
  108. udelay(1);
  109. }
  110. WARN(1, "%s: rcg didn't update its configuration.", name);
  111. return -EBUSY;
  112. }
  113. static int clk_rcg2_set_parent(struct clk_hw *hw, u8 index)
  114. {
  115. struct clk_rcg2 *rcg = to_clk_rcg2(hw);
  116. int ret;
  117. u32 cfg = rcg->parent_map[index].cfg << CFG_SRC_SEL_SHIFT;
  118. ret = regmap_update_bits(rcg->clkr.regmap, RCG_CFG_OFFSET(rcg),
  119. CFG_SRC_SEL_MASK, cfg);
  120. if (ret)
  121. return ret;
  122. return update_config(rcg);
  123. }
  124. /*
  125. * Calculate m/n:d rate
  126. *
  127. * parent_rate m
  128. * rate = ----------- x ---
  129. * hid_div n
  130. */
  131. static unsigned long
  132. calc_rate(unsigned long rate, u32 m, u32 n, u32 mode, u32 hid_div)
  133. {
  134. if (hid_div)
  135. rate = mult_frac(rate, 2, hid_div + 1);
  136. if (mode)
  137. rate = mult_frac(rate, m, n);
  138. return rate;
  139. }
  140. static unsigned long
  141. __clk_rcg2_recalc_rate(struct clk_hw *hw, unsigned long parent_rate, u32 cfg)
  142. {
  143. struct clk_rcg2 *rcg = to_clk_rcg2(hw);
  144. u32 hid_div, m = 0, n = 0, mode = 0, mask;
  145. if (rcg->mnd_width) {
  146. mask = BIT(rcg->mnd_width) - 1;
  147. regmap_read(rcg->clkr.regmap, RCG_M_OFFSET(rcg), &m);
  148. m &= mask;
  149. regmap_read(rcg->clkr.regmap, RCG_N_OFFSET(rcg), &n);
  150. n = ~n;
  151. n &= mask;
  152. n += m;
  153. mode = cfg & CFG_MODE_MASK;
  154. mode >>= CFG_MODE_SHIFT;
  155. }
  156. mask = BIT(rcg->hid_width) - 1;
  157. hid_div = cfg >> CFG_SRC_DIV_SHIFT;
  158. hid_div &= mask;
  159. return calc_rate(parent_rate, m, n, mode, hid_div);
  160. }
  161. static unsigned long
  162. clk_rcg2_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
  163. {
  164. struct clk_rcg2 *rcg = to_clk_rcg2(hw);
  165. u32 cfg;
  166. regmap_read(rcg->clkr.regmap, RCG_CFG_OFFSET(rcg), &cfg);
  167. return __clk_rcg2_recalc_rate(hw, parent_rate, cfg);
  168. }
  169. static int _freq_tbl_determine_rate(struct clk_hw *hw, const struct freq_tbl *f,
  170. struct clk_rate_request *req,
  171. enum freq_policy policy)
  172. {
  173. unsigned long clk_flags, rate = req->rate;
  174. struct clk_hw *p;
  175. struct clk_rcg2 *rcg = to_clk_rcg2(hw);
  176. int index;
  177. switch (policy) {
  178. case FLOOR:
  179. f = qcom_find_freq_floor(f, rate);
  180. break;
  181. case CEIL:
  182. f = qcom_find_freq(f, rate);
  183. break;
  184. default:
  185. return -EINVAL;
  186. }
  187. if (!f)
  188. return -EINVAL;
  189. index = qcom_find_src_index(hw, rcg->parent_map, f->src);
  190. if (index < 0)
  191. return index;
  192. clk_flags = clk_hw_get_flags(hw);
  193. p = clk_hw_get_parent_by_index(hw, index);
  194. if (!p)
  195. return -EINVAL;
  196. if (clk_flags & CLK_SET_RATE_PARENT) {
  197. rate = f->freq;
  198. if (f->pre_div) {
  199. if (!rate)
  200. rate = req->rate;
  201. rate /= 2;
  202. rate *= f->pre_div + 1;
  203. }
  204. if (f->n) {
  205. u64 tmp = rate;
  206. tmp = tmp * f->n;
  207. do_div(tmp, f->m);
  208. rate = tmp;
  209. }
  210. } else {
  211. rate = clk_hw_get_rate(p);
  212. }
  213. req->best_parent_hw = p;
  214. req->best_parent_rate = rate;
  215. req->rate = f->freq;
  216. return 0;
  217. }
  218. static const struct freq_conf *
  219. __clk_rcg2_select_conf(struct clk_hw *hw, const struct freq_multi_tbl *f,
  220. unsigned long req_rate)
  221. {
  222. unsigned long rate_diff, best_rate_diff = ULONG_MAX;
  223. const struct freq_conf *conf, *best_conf = NULL;
  224. struct clk_rcg2 *rcg = to_clk_rcg2(hw);
  225. const char *name = clk_hw_get_name(hw);
  226. unsigned long parent_rate, rate;
  227. struct clk_hw *p;
  228. int index, i;
  229. /* Exit early if only one config is defined */
  230. if (f->num_confs == 1) {
  231. best_conf = f->confs;
  232. goto exit;
  233. }
  234. /* Search in each provided config the one that is near the wanted rate */
  235. for (i = 0, conf = f->confs; i < f->num_confs; i++, conf++) {
  236. index = qcom_find_src_index(hw, rcg->parent_map, conf->src);
  237. if (index < 0)
  238. continue;
  239. p = clk_hw_get_parent_by_index(hw, index);
  240. if (!p)
  241. continue;
  242. parent_rate = clk_hw_get_rate(p);
  243. rate = calc_rate(parent_rate, conf->n, conf->m, conf->n, conf->pre_div);
  244. if (rate == req_rate) {
  245. best_conf = conf;
  246. goto exit;
  247. }
  248. rate_diff = abs_diff(req_rate, rate);
  249. if (rate_diff < best_rate_diff) {
  250. best_rate_diff = rate_diff;
  251. best_conf = conf;
  252. }
  253. }
  254. /*
  255. * Very unlikely. Warn if we couldn't find a correct config
  256. * due to parent not found in every config.
  257. */
  258. if (unlikely(!best_conf)) {
  259. WARN(1, "%s: can't find a configuration for rate %lu\n",
  260. name, req_rate);
  261. return ERR_PTR(-EINVAL);
  262. }
  263. exit:
  264. return best_conf;
  265. }
  266. static int _freq_tbl_fm_determine_rate(struct clk_hw *hw, const struct freq_multi_tbl *f,
  267. struct clk_rate_request *req)
  268. {
  269. unsigned long clk_flags, rate = req->rate;
  270. struct clk_rcg2 *rcg = to_clk_rcg2(hw);
  271. const struct freq_conf *conf;
  272. struct clk_hw *p;
  273. int index;
  274. f = qcom_find_freq_multi(f, rate);
  275. if (!f || !f->confs)
  276. return -EINVAL;
  277. conf = __clk_rcg2_select_conf(hw, f, rate);
  278. if (IS_ERR(conf))
  279. return PTR_ERR(conf);
  280. index = qcom_find_src_index(hw, rcg->parent_map, conf->src);
  281. if (index < 0)
  282. return index;
  283. clk_flags = clk_hw_get_flags(hw);
  284. p = clk_hw_get_parent_by_index(hw, index);
  285. if (!p)
  286. return -EINVAL;
  287. if (clk_flags & CLK_SET_RATE_PARENT) {
  288. rate = f->freq;
  289. if (conf->pre_div) {
  290. if (!rate)
  291. rate = req->rate;
  292. rate /= 2;
  293. rate *= conf->pre_div + 1;
  294. }
  295. if (conf->n) {
  296. u64 tmp = rate;
  297. tmp = tmp * conf->n;
  298. do_div(tmp, conf->m);
  299. rate = tmp;
  300. }
  301. } else {
  302. rate = clk_hw_get_rate(p);
  303. }
  304. req->best_parent_hw = p;
  305. req->best_parent_rate = rate;
  306. req->rate = f->freq;
  307. return 0;
  308. }
  309. static int clk_rcg2_determine_rate(struct clk_hw *hw,
  310. struct clk_rate_request *req)
  311. {
  312. struct clk_rcg2 *rcg = to_clk_rcg2(hw);
  313. return _freq_tbl_determine_rate(hw, rcg->freq_tbl, req, CEIL);
  314. }
  315. static int clk_rcg2_determine_floor_rate(struct clk_hw *hw,
  316. struct clk_rate_request *req)
  317. {
  318. struct clk_rcg2 *rcg = to_clk_rcg2(hw);
  319. return _freq_tbl_determine_rate(hw, rcg->freq_tbl, req, FLOOR);
  320. }
  321. static int clk_rcg2_fm_determine_rate(struct clk_hw *hw,
  322. struct clk_rate_request *req)
  323. {
  324. struct clk_rcg2 *rcg = to_clk_rcg2(hw);
  325. return _freq_tbl_fm_determine_rate(hw, rcg->freq_multi_tbl, req);
  326. }
  327. static int __clk_rcg2_configure(struct clk_rcg2 *rcg, const struct freq_tbl *f,
  328. u32 *_cfg)
  329. {
  330. u32 cfg, mask, d_val, not2d_val, n_minus_m;
  331. struct clk_hw *hw = &rcg->clkr.hw;
  332. int ret, index = qcom_find_src_index(hw, rcg->parent_map, f->src);
  333. if (index < 0)
  334. return index;
  335. if (rcg->mnd_width && f->n) {
  336. mask = BIT(rcg->mnd_width) - 1;
  337. ret = regmap_update_bits(rcg->clkr.regmap,
  338. RCG_M_OFFSET(rcg), mask, f->m);
  339. if (ret)
  340. return ret;
  341. ret = regmap_update_bits(rcg->clkr.regmap,
  342. RCG_N_OFFSET(rcg), mask, ~(f->n - f->m));
  343. if (ret)
  344. return ret;
  345. /* Calculate 2d value */
  346. d_val = f->n;
  347. n_minus_m = f->n - f->m;
  348. n_minus_m *= 2;
  349. d_val = clamp_t(u32, d_val, f->m, n_minus_m);
  350. not2d_val = ~d_val & mask;
  351. ret = regmap_update_bits(rcg->clkr.regmap,
  352. RCG_D_OFFSET(rcg), mask, not2d_val);
  353. if (ret)
  354. return ret;
  355. }
  356. mask = BIT(rcg->hid_width) - 1;
  357. mask |= CFG_SRC_SEL_MASK | CFG_MODE_MASK | CFG_HW_CLK_CTRL_MASK;
  358. cfg = f->pre_div << CFG_SRC_DIV_SHIFT;
  359. cfg |= rcg->parent_map[index].cfg << CFG_SRC_SEL_SHIFT;
  360. if (rcg->mnd_width && f->n && (f->m != f->n))
  361. cfg |= CFG_MODE_DUAL_EDGE;
  362. if (rcg->hw_clk_ctrl)
  363. cfg |= CFG_HW_CLK_CTRL_MASK;
  364. *_cfg &= ~mask;
  365. *_cfg |= cfg;
  366. return 0;
  367. }
  368. static int clk_rcg2_configure(struct clk_rcg2 *rcg, const struct freq_tbl *f)
  369. {
  370. u32 cfg;
  371. int ret;
  372. ret = regmap_read(rcg->clkr.regmap, RCG_CFG_OFFSET(rcg), &cfg);
  373. if (ret)
  374. return ret;
  375. ret = __clk_rcg2_configure(rcg, f, &cfg);
  376. if (ret)
  377. return ret;
  378. ret = regmap_write(rcg->clkr.regmap, RCG_CFG_OFFSET(rcg), cfg);
  379. if (ret)
  380. return ret;
  381. return update_config(rcg);
  382. }
  383. static int __clk_rcg2_set_rate(struct clk_hw *hw, unsigned long rate,
  384. enum freq_policy policy)
  385. {
  386. struct clk_rcg2 *rcg = to_clk_rcg2(hw);
  387. const struct freq_tbl *f;
  388. switch (policy) {
  389. case FLOOR:
  390. f = qcom_find_freq_floor(rcg->freq_tbl, rate);
  391. break;
  392. case CEIL:
  393. f = qcom_find_freq(rcg->freq_tbl, rate);
  394. break;
  395. default:
  396. return -EINVAL;
  397. }
  398. if (!f)
  399. return -EINVAL;
  400. return clk_rcg2_configure(rcg, f);
  401. }
  402. static int __clk_rcg2_fm_set_rate(struct clk_hw *hw, unsigned long rate)
  403. {
  404. struct clk_rcg2 *rcg = to_clk_rcg2(hw);
  405. const struct freq_multi_tbl *f;
  406. const struct freq_conf *conf;
  407. struct freq_tbl f_tbl = {};
  408. f = qcom_find_freq_multi(rcg->freq_multi_tbl, rate);
  409. if (!f || !f->confs)
  410. return -EINVAL;
  411. conf = __clk_rcg2_select_conf(hw, f, rate);
  412. if (IS_ERR(conf))
  413. return PTR_ERR(conf);
  414. f_tbl.freq = f->freq;
  415. f_tbl.src = conf->src;
  416. f_tbl.pre_div = conf->pre_div;
  417. f_tbl.m = conf->m;
  418. f_tbl.n = conf->n;
  419. return clk_rcg2_configure(rcg, &f_tbl);
  420. }
  421. static int clk_rcg2_set_rate(struct clk_hw *hw, unsigned long rate,
  422. unsigned long parent_rate)
  423. {
  424. return __clk_rcg2_set_rate(hw, rate, CEIL);
  425. }
  426. static int clk_rcg2_set_floor_rate(struct clk_hw *hw, unsigned long rate,
  427. unsigned long parent_rate)
  428. {
  429. return __clk_rcg2_set_rate(hw, rate, FLOOR);
  430. }
  431. static int clk_rcg2_fm_set_rate(struct clk_hw *hw, unsigned long rate,
  432. unsigned long parent_rate)
  433. {
  434. return __clk_rcg2_fm_set_rate(hw, rate);
  435. }
  436. static int clk_rcg2_set_rate_and_parent(struct clk_hw *hw,
  437. unsigned long rate, unsigned long parent_rate, u8 index)
  438. {
  439. return __clk_rcg2_set_rate(hw, rate, CEIL);
  440. }
  441. static int clk_rcg2_set_floor_rate_and_parent(struct clk_hw *hw,
  442. unsigned long rate, unsigned long parent_rate, u8 index)
  443. {
  444. return __clk_rcg2_set_rate(hw, rate, FLOOR);
  445. }
  446. static int clk_rcg2_fm_set_rate_and_parent(struct clk_hw *hw,
  447. unsigned long rate, unsigned long parent_rate, u8 index)
  448. {
  449. return __clk_rcg2_fm_set_rate(hw, rate);
  450. }
  451. static int clk_rcg2_get_duty_cycle(struct clk_hw *hw, struct clk_duty *duty)
  452. {
  453. struct clk_rcg2 *rcg = to_clk_rcg2(hw);
  454. u32 notn_m, n, m, d, not2d, mask;
  455. if (!rcg->mnd_width) {
  456. /* 50 % duty-cycle for Non-MND RCGs */
  457. duty->num = 1;
  458. duty->den = 2;
  459. return 0;
  460. }
  461. regmap_read(rcg->clkr.regmap, RCG_D_OFFSET(rcg), &not2d);
  462. regmap_read(rcg->clkr.regmap, RCG_M_OFFSET(rcg), &m);
  463. regmap_read(rcg->clkr.regmap, RCG_N_OFFSET(rcg), &notn_m);
  464. if (!not2d && !m && !notn_m) {
  465. /* 50 % duty-cycle always */
  466. duty->num = 1;
  467. duty->den = 2;
  468. return 0;
  469. }
  470. mask = BIT(rcg->mnd_width) - 1;
  471. d = ~(not2d) & mask;
  472. d = DIV_ROUND_CLOSEST(d, 2);
  473. n = (~(notn_m) + m) & mask;
  474. duty->num = d;
  475. duty->den = n;
  476. return 0;
  477. }
  478. static int clk_rcg2_set_duty_cycle(struct clk_hw *hw, struct clk_duty *duty)
  479. {
  480. struct clk_rcg2 *rcg = to_clk_rcg2(hw);
  481. u32 notn_m, n, m, d, not2d, mask, duty_per, cfg;
  482. int ret;
  483. /* Duty-cycle cannot be modified for non-MND RCGs */
  484. if (!rcg->mnd_width)
  485. return -EINVAL;
  486. mask = BIT(rcg->mnd_width) - 1;
  487. regmap_read(rcg->clkr.regmap, RCG_N_OFFSET(rcg), &notn_m);
  488. regmap_read(rcg->clkr.regmap, RCG_M_OFFSET(rcg), &m);
  489. regmap_read(rcg->clkr.regmap, RCG_CFG_OFFSET(rcg), &cfg);
  490. /* Duty-cycle cannot be modified if MND divider is in bypass mode. */
  491. if (!(cfg & CFG_MODE_MASK))
  492. return -EINVAL;
  493. n = (~(notn_m) + m) & mask;
  494. duty_per = (duty->num * 100) / duty->den;
  495. /* Calculate 2d value */
  496. d = DIV_ROUND_CLOSEST(n * duty_per * 2, 100);
  497. /*
  498. * Check bit widths of 2d. If D is too big reduce duty cycle.
  499. * Also make sure it is never zero.
  500. */
  501. d = clamp_val(d, 1, mask);
  502. if ((d / 2) > (n - m))
  503. d = (n - m) * 2;
  504. else if ((d / 2) < (m / 2))
  505. d = m;
  506. not2d = ~d & mask;
  507. ret = regmap_update_bits(rcg->clkr.regmap, RCG_D_OFFSET(rcg), mask,
  508. not2d);
  509. if (ret)
  510. return ret;
  511. return update_config(rcg);
  512. }
  513. const struct clk_ops clk_rcg2_ops = {
  514. .is_enabled = clk_rcg2_is_enabled,
  515. .get_parent = clk_rcg2_get_parent,
  516. .set_parent = clk_rcg2_set_parent,
  517. .recalc_rate = clk_rcg2_recalc_rate,
  518. .determine_rate = clk_rcg2_determine_rate,
  519. .set_rate = clk_rcg2_set_rate,
  520. .set_rate_and_parent = clk_rcg2_set_rate_and_parent,
  521. .get_duty_cycle = clk_rcg2_get_duty_cycle,
  522. .set_duty_cycle = clk_rcg2_set_duty_cycle,
  523. };
  524. EXPORT_SYMBOL_GPL(clk_rcg2_ops);
  525. const struct clk_ops clk_rcg2_floor_ops = {
  526. .is_enabled = clk_rcg2_is_enabled,
  527. .get_parent = clk_rcg2_get_parent,
  528. .set_parent = clk_rcg2_set_parent,
  529. .recalc_rate = clk_rcg2_recalc_rate,
  530. .determine_rate = clk_rcg2_determine_floor_rate,
  531. .set_rate = clk_rcg2_set_floor_rate,
  532. .set_rate_and_parent = clk_rcg2_set_floor_rate_and_parent,
  533. .get_duty_cycle = clk_rcg2_get_duty_cycle,
  534. .set_duty_cycle = clk_rcg2_set_duty_cycle,
  535. };
  536. EXPORT_SYMBOL_GPL(clk_rcg2_floor_ops);
  537. const struct clk_ops clk_rcg2_fm_ops = {
  538. .is_enabled = clk_rcg2_is_enabled,
  539. .get_parent = clk_rcg2_get_parent,
  540. .set_parent = clk_rcg2_set_parent,
  541. .recalc_rate = clk_rcg2_recalc_rate,
  542. .determine_rate = clk_rcg2_fm_determine_rate,
  543. .set_rate = clk_rcg2_fm_set_rate,
  544. .set_rate_and_parent = clk_rcg2_fm_set_rate_and_parent,
  545. .get_duty_cycle = clk_rcg2_get_duty_cycle,
  546. .set_duty_cycle = clk_rcg2_set_duty_cycle,
  547. };
  548. EXPORT_SYMBOL_GPL(clk_rcg2_fm_ops);
  549. const struct clk_ops clk_rcg2_mux_closest_ops = {
  550. .determine_rate = __clk_mux_determine_rate_closest,
  551. .get_parent = clk_rcg2_get_parent,
  552. .set_parent = clk_rcg2_set_parent,
  553. };
  554. EXPORT_SYMBOL_GPL(clk_rcg2_mux_closest_ops);
  555. struct frac_entry {
  556. int num;
  557. int den;
  558. };
  559. static const struct frac_entry frac_table_675m[] = { /* link rate of 270M */
  560. { 52, 295 }, /* 119 M */
  561. { 11, 57 }, /* 130.25 M */
  562. { 63, 307 }, /* 138.50 M */
  563. { 11, 50 }, /* 148.50 M */
  564. { 47, 206 }, /* 154 M */
  565. { 31, 100 }, /* 205.25 M */
  566. { 107, 269 }, /* 268.50 M */
  567. { },
  568. };
  569. static struct frac_entry frac_table_810m[] = { /* Link rate of 162M */
  570. { 31, 211 }, /* 119 M */
  571. { 32, 199 }, /* 130.25 M */
  572. { 63, 307 }, /* 138.50 M */
  573. { 11, 60 }, /* 148.50 M */
  574. { 50, 263 }, /* 154 M */
  575. { 31, 120 }, /* 205.25 M */
  576. { 119, 359 }, /* 268.50 M */
  577. { },
  578. };
  579. static int clk_edp_pixel_set_rate(struct clk_hw *hw, unsigned long rate,
  580. unsigned long parent_rate)
  581. {
  582. struct clk_rcg2 *rcg = to_clk_rcg2(hw);
  583. struct freq_tbl f = *rcg->freq_tbl;
  584. const struct frac_entry *frac;
  585. int delta = 100000;
  586. s64 src_rate = parent_rate;
  587. s64 request;
  588. u32 mask = BIT(rcg->hid_width) - 1;
  589. u32 hid_div;
  590. if (src_rate == 810000000)
  591. frac = frac_table_810m;
  592. else
  593. frac = frac_table_675m;
  594. for (; frac->num; frac++) {
  595. request = rate;
  596. request *= frac->den;
  597. request = div_s64(request, frac->num);
  598. if ((src_rate < (request - delta)) ||
  599. (src_rate > (request + delta)))
  600. continue;
  601. regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG,
  602. &hid_div);
  603. f.pre_div = hid_div;
  604. f.pre_div >>= CFG_SRC_DIV_SHIFT;
  605. f.pre_div &= mask;
  606. f.m = frac->num;
  607. f.n = frac->den;
  608. return clk_rcg2_configure(rcg, &f);
  609. }
  610. return -EINVAL;
  611. }
  612. static int clk_edp_pixel_set_rate_and_parent(struct clk_hw *hw,
  613. unsigned long rate, unsigned long parent_rate, u8 index)
  614. {
  615. /* Parent index is set statically in frequency table */
  616. return clk_edp_pixel_set_rate(hw, rate, parent_rate);
  617. }
  618. static int clk_edp_pixel_determine_rate(struct clk_hw *hw,
  619. struct clk_rate_request *req)
  620. {
  621. struct clk_rcg2 *rcg = to_clk_rcg2(hw);
  622. const struct freq_tbl *f = rcg->freq_tbl;
  623. const struct frac_entry *frac;
  624. int delta = 100000;
  625. s64 request;
  626. u32 mask = BIT(rcg->hid_width) - 1;
  627. u32 hid_div;
  628. int index = qcom_find_src_index(hw, rcg->parent_map, f->src);
  629. /* Force the correct parent */
  630. req->best_parent_hw = clk_hw_get_parent_by_index(hw, index);
  631. req->best_parent_rate = clk_hw_get_rate(req->best_parent_hw);
  632. if (req->best_parent_rate == 810000000)
  633. frac = frac_table_810m;
  634. else
  635. frac = frac_table_675m;
  636. for (; frac->num; frac++) {
  637. request = req->rate;
  638. request *= frac->den;
  639. request = div_s64(request, frac->num);
  640. if ((req->best_parent_rate < (request - delta)) ||
  641. (req->best_parent_rate > (request + delta)))
  642. continue;
  643. regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG,
  644. &hid_div);
  645. hid_div >>= CFG_SRC_DIV_SHIFT;
  646. hid_div &= mask;
  647. req->rate = calc_rate(req->best_parent_rate,
  648. frac->num, frac->den,
  649. !!frac->den, hid_div);
  650. return 0;
  651. }
  652. return -EINVAL;
  653. }
  654. const struct clk_ops clk_edp_pixel_ops = {
  655. .is_enabled = clk_rcg2_is_enabled,
  656. .get_parent = clk_rcg2_get_parent,
  657. .set_parent = clk_rcg2_set_parent,
  658. .recalc_rate = clk_rcg2_recalc_rate,
  659. .set_rate = clk_edp_pixel_set_rate,
  660. .set_rate_and_parent = clk_edp_pixel_set_rate_and_parent,
  661. .determine_rate = clk_edp_pixel_determine_rate,
  662. };
  663. EXPORT_SYMBOL_GPL(clk_edp_pixel_ops);
  664. static int clk_byte_determine_rate(struct clk_hw *hw,
  665. struct clk_rate_request *req)
  666. {
  667. struct clk_rcg2 *rcg = to_clk_rcg2(hw);
  668. const struct freq_tbl *f = rcg->freq_tbl;
  669. int index = qcom_find_src_index(hw, rcg->parent_map, f->src);
  670. unsigned long parent_rate, div;
  671. u32 mask = BIT(rcg->hid_width) - 1;
  672. struct clk_hw *p;
  673. if (req->rate == 0)
  674. return -EINVAL;
  675. req->best_parent_hw = p = clk_hw_get_parent_by_index(hw, index);
  676. req->best_parent_rate = parent_rate = clk_hw_round_rate(p, req->rate);
  677. div = DIV_ROUND_UP((2 * parent_rate), req->rate) - 1;
  678. div = min_t(u32, div, mask);
  679. req->rate = calc_rate(parent_rate, 0, 0, 0, div);
  680. return 0;
  681. }
  682. static int clk_byte_set_rate(struct clk_hw *hw, unsigned long rate,
  683. unsigned long parent_rate)
  684. {
  685. struct clk_rcg2 *rcg = to_clk_rcg2(hw);
  686. struct freq_tbl f = *rcg->freq_tbl;
  687. unsigned long div;
  688. u32 mask = BIT(rcg->hid_width) - 1;
  689. div = DIV_ROUND_UP((2 * parent_rate), rate) - 1;
  690. div = min_t(u32, div, mask);
  691. f.pre_div = div;
  692. return clk_rcg2_configure(rcg, &f);
  693. }
  694. static int clk_byte_set_rate_and_parent(struct clk_hw *hw,
  695. unsigned long rate, unsigned long parent_rate, u8 index)
  696. {
  697. /* Parent index is set statically in frequency table */
  698. return clk_byte_set_rate(hw, rate, parent_rate);
  699. }
  700. const struct clk_ops clk_byte_ops = {
  701. .is_enabled = clk_rcg2_is_enabled,
  702. .get_parent = clk_rcg2_get_parent,
  703. .set_parent = clk_rcg2_set_parent,
  704. .recalc_rate = clk_rcg2_recalc_rate,
  705. .set_rate = clk_byte_set_rate,
  706. .set_rate_and_parent = clk_byte_set_rate_and_parent,
  707. .determine_rate = clk_byte_determine_rate,
  708. };
  709. EXPORT_SYMBOL_GPL(clk_byte_ops);
  710. static int clk_byte2_determine_rate(struct clk_hw *hw,
  711. struct clk_rate_request *req)
  712. {
  713. struct clk_rcg2 *rcg = to_clk_rcg2(hw);
  714. unsigned long parent_rate, div;
  715. u32 mask = BIT(rcg->hid_width) - 1;
  716. struct clk_hw *p;
  717. unsigned long rate = req->rate;
  718. if (rate == 0)
  719. return -EINVAL;
  720. p = req->best_parent_hw;
  721. req->best_parent_rate = parent_rate = clk_hw_round_rate(p, rate);
  722. div = DIV_ROUND_UP((2 * parent_rate), rate) - 1;
  723. div = min_t(u32, div, mask);
  724. req->rate = calc_rate(parent_rate, 0, 0, 0, div);
  725. return 0;
  726. }
  727. static int clk_byte2_set_rate(struct clk_hw *hw, unsigned long rate,
  728. unsigned long parent_rate)
  729. {
  730. struct clk_rcg2 *rcg = to_clk_rcg2(hw);
  731. struct freq_tbl f = { 0 };
  732. unsigned long div;
  733. int i, num_parents = clk_hw_get_num_parents(hw);
  734. u32 mask = BIT(rcg->hid_width) - 1;
  735. u32 cfg;
  736. div = DIV_ROUND_UP((2 * parent_rate), rate) - 1;
  737. div = min_t(u32, div, mask);
  738. f.pre_div = div;
  739. regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, &cfg);
  740. cfg &= CFG_SRC_SEL_MASK;
  741. cfg >>= CFG_SRC_SEL_SHIFT;
  742. for (i = 0; i < num_parents; i++) {
  743. if (cfg == rcg->parent_map[i].cfg) {
  744. f.src = rcg->parent_map[i].src;
  745. return clk_rcg2_configure(rcg, &f);
  746. }
  747. }
  748. return -EINVAL;
  749. }
  750. static int clk_byte2_set_rate_and_parent(struct clk_hw *hw,
  751. unsigned long rate, unsigned long parent_rate, u8 index)
  752. {
  753. /* Read the hardware to determine parent during set_rate */
  754. return clk_byte2_set_rate(hw, rate, parent_rate);
  755. }
  756. const struct clk_ops clk_byte2_ops = {
  757. .is_enabled = clk_rcg2_is_enabled,
  758. .get_parent = clk_rcg2_get_parent,
  759. .set_parent = clk_rcg2_set_parent,
  760. .recalc_rate = clk_rcg2_recalc_rate,
  761. .set_rate = clk_byte2_set_rate,
  762. .set_rate_and_parent = clk_byte2_set_rate_and_parent,
  763. .determine_rate = clk_byte2_determine_rate,
  764. };
  765. EXPORT_SYMBOL_GPL(clk_byte2_ops);
  766. static const struct frac_entry frac_table_pixel[] = {
  767. { 3, 8 },
  768. { 2, 9 },
  769. { 4, 9 },
  770. { 1, 1 },
  771. { 2, 3 },
  772. { }
  773. };
  774. static int clk_pixel_determine_rate(struct clk_hw *hw,
  775. struct clk_rate_request *req)
  776. {
  777. unsigned long request, src_rate;
  778. int delta = 100000;
  779. const struct frac_entry *frac = frac_table_pixel;
  780. for (; frac->num; frac++) {
  781. request = (req->rate * frac->den) / frac->num;
  782. src_rate = clk_hw_round_rate(req->best_parent_hw, request);
  783. if ((src_rate < (request - delta)) ||
  784. (src_rate > (request + delta)))
  785. continue;
  786. req->best_parent_rate = src_rate;
  787. req->rate = (src_rate * frac->num) / frac->den;
  788. return 0;
  789. }
  790. return -EINVAL;
  791. }
  792. static int clk_pixel_set_rate(struct clk_hw *hw, unsigned long rate,
  793. unsigned long parent_rate)
  794. {
  795. struct clk_rcg2 *rcg = to_clk_rcg2(hw);
  796. struct freq_tbl f = { 0 };
  797. const struct frac_entry *frac = frac_table_pixel;
  798. unsigned long request;
  799. int delta = 100000;
  800. u32 mask = BIT(rcg->hid_width) - 1;
  801. u32 hid_div, cfg;
  802. int i, num_parents = clk_hw_get_num_parents(hw);
  803. regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, &cfg);
  804. cfg &= CFG_SRC_SEL_MASK;
  805. cfg >>= CFG_SRC_SEL_SHIFT;
  806. for (i = 0; i < num_parents; i++)
  807. if (cfg == rcg->parent_map[i].cfg) {
  808. f.src = rcg->parent_map[i].src;
  809. break;
  810. }
  811. for (; frac->num; frac++) {
  812. request = (rate * frac->den) / frac->num;
  813. if ((parent_rate < (request - delta)) ||
  814. (parent_rate > (request + delta)))
  815. continue;
  816. regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG,
  817. &hid_div);
  818. f.pre_div = hid_div;
  819. f.pre_div >>= CFG_SRC_DIV_SHIFT;
  820. f.pre_div &= mask;
  821. f.m = frac->num;
  822. f.n = frac->den;
  823. return clk_rcg2_configure(rcg, &f);
  824. }
  825. return -EINVAL;
  826. }
  827. static int clk_pixel_set_rate_and_parent(struct clk_hw *hw, unsigned long rate,
  828. unsigned long parent_rate, u8 index)
  829. {
  830. return clk_pixel_set_rate(hw, rate, parent_rate);
  831. }
  832. const struct clk_ops clk_pixel_ops = {
  833. .is_enabled = clk_rcg2_is_enabled,
  834. .get_parent = clk_rcg2_get_parent,
  835. .set_parent = clk_rcg2_set_parent,
  836. .recalc_rate = clk_rcg2_recalc_rate,
  837. .set_rate = clk_pixel_set_rate,
  838. .set_rate_and_parent = clk_pixel_set_rate_and_parent,
  839. .determine_rate = clk_pixel_determine_rate,
  840. };
  841. EXPORT_SYMBOL_GPL(clk_pixel_ops);
  842. static int clk_gfx3d_determine_rate(struct clk_hw *hw,
  843. struct clk_rate_request *req)
  844. {
  845. struct clk_rate_request parent_req = { .min_rate = 0, .max_rate = ULONG_MAX };
  846. struct clk_rcg2_gfx3d *cgfx = to_clk_rcg2_gfx3d(hw);
  847. struct clk_hw *xo, *p0, *p1, *p2;
  848. unsigned long p0_rate;
  849. u8 mux_div = cgfx->div;
  850. int ret;
  851. p0 = cgfx->hws[0];
  852. p1 = cgfx->hws[1];
  853. p2 = cgfx->hws[2];
  854. /*
  855. * This function does ping-pong the RCG between PLLs: if we don't
  856. * have at least one fixed PLL and two variable ones,
  857. * then it's not going to work correctly.
  858. */
  859. if (WARN_ON(!p0 || !p1 || !p2))
  860. return -EINVAL;
  861. xo = clk_hw_get_parent_by_index(hw, 0);
  862. if (req->rate == clk_hw_get_rate(xo)) {
  863. req->best_parent_hw = xo;
  864. return 0;
  865. }
  866. if (mux_div == 0)
  867. mux_div = 1;
  868. parent_req.rate = req->rate * mux_div;
  869. /* This has to be a fixed rate PLL */
  870. p0_rate = clk_hw_get_rate(p0);
  871. if (parent_req.rate == p0_rate) {
  872. req->rate = req->best_parent_rate = p0_rate;
  873. req->best_parent_hw = p0;
  874. return 0;
  875. }
  876. if (req->best_parent_hw == p0) {
  877. /* Are we going back to a previously used rate? */
  878. if (clk_hw_get_rate(p2) == parent_req.rate)
  879. req->best_parent_hw = p2;
  880. else
  881. req->best_parent_hw = p1;
  882. } else if (req->best_parent_hw == p2) {
  883. req->best_parent_hw = p1;
  884. } else {
  885. req->best_parent_hw = p2;
  886. }
  887. clk_hw_get_rate_range(req->best_parent_hw,
  888. &parent_req.min_rate, &parent_req.max_rate);
  889. if (req->min_rate > parent_req.min_rate)
  890. parent_req.min_rate = req->min_rate;
  891. if (req->max_rate < parent_req.max_rate)
  892. parent_req.max_rate = req->max_rate;
  893. ret = __clk_determine_rate(req->best_parent_hw, &parent_req);
  894. if (ret)
  895. return ret;
  896. req->rate = req->best_parent_rate = parent_req.rate;
  897. req->rate /= mux_div;
  898. return 0;
  899. }
  900. static int clk_gfx3d_set_rate_and_parent(struct clk_hw *hw, unsigned long rate,
  901. unsigned long parent_rate, u8 index)
  902. {
  903. struct clk_rcg2_gfx3d *cgfx = to_clk_rcg2_gfx3d(hw);
  904. struct clk_rcg2 *rcg = &cgfx->rcg;
  905. u32 cfg;
  906. int ret;
  907. cfg = rcg->parent_map[index].cfg << CFG_SRC_SEL_SHIFT;
  908. /* On some targets, the GFX3D RCG may need to divide PLL frequency */
  909. if (cgfx->div > 1)
  910. cfg |= ((2 * cgfx->div) - 1) << CFG_SRC_DIV_SHIFT;
  911. ret = regmap_write(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, cfg);
  912. if (ret)
  913. return ret;
  914. return update_config(rcg);
  915. }
  916. static int clk_gfx3d_set_rate(struct clk_hw *hw, unsigned long rate,
  917. unsigned long parent_rate)
  918. {
  919. /*
  920. * We should never get here; clk_gfx3d_determine_rate() should always
  921. * make us use a different parent than what we're currently using, so
  922. * clk_gfx3d_set_rate_and_parent() should always be called.
  923. */
  924. return 0;
  925. }
  926. const struct clk_ops clk_gfx3d_ops = {
  927. .is_enabled = clk_rcg2_is_enabled,
  928. .get_parent = clk_rcg2_get_parent,
  929. .set_parent = clk_rcg2_set_parent,
  930. .recalc_rate = clk_rcg2_recalc_rate,
  931. .set_rate = clk_gfx3d_set_rate,
  932. .set_rate_and_parent = clk_gfx3d_set_rate_and_parent,
  933. .determine_rate = clk_gfx3d_determine_rate,
  934. };
  935. EXPORT_SYMBOL_GPL(clk_gfx3d_ops);
  936. static int clk_rcg2_set_force_enable(struct clk_hw *hw)
  937. {
  938. struct clk_rcg2 *rcg = to_clk_rcg2(hw);
  939. const char *name = clk_hw_get_name(hw);
  940. int ret, count;
  941. ret = regmap_update_bits(rcg->clkr.regmap, rcg->cmd_rcgr + CMD_REG,
  942. CMD_ROOT_EN, CMD_ROOT_EN);
  943. if (ret)
  944. return ret;
  945. /* wait for RCG to turn ON */
  946. for (count = 500; count > 0; count--) {
  947. if (clk_rcg2_is_enabled(hw))
  948. return 0;
  949. udelay(1);
  950. }
  951. pr_err("%s: RCG did not turn on\n", name);
  952. return -ETIMEDOUT;
  953. }
  954. static int clk_rcg2_clear_force_enable(struct clk_hw *hw)
  955. {
  956. struct clk_rcg2 *rcg = to_clk_rcg2(hw);
  957. return regmap_update_bits(rcg->clkr.regmap, rcg->cmd_rcgr + CMD_REG,
  958. CMD_ROOT_EN, 0);
  959. }
  960. static int
  961. clk_rcg2_shared_force_enable_clear(struct clk_hw *hw, const struct freq_tbl *f)
  962. {
  963. struct clk_rcg2 *rcg = to_clk_rcg2(hw);
  964. int ret;
  965. ret = clk_rcg2_set_force_enable(hw);
  966. if (ret)
  967. return ret;
  968. ret = clk_rcg2_configure(rcg, f);
  969. if (ret)
  970. return ret;
  971. return clk_rcg2_clear_force_enable(hw);
  972. }
  973. static int __clk_rcg2_shared_set_rate(struct clk_hw *hw, unsigned long rate,
  974. unsigned long parent_rate,
  975. enum freq_policy policy)
  976. {
  977. struct clk_rcg2 *rcg = to_clk_rcg2(hw);
  978. const struct freq_tbl *f;
  979. switch (policy) {
  980. case FLOOR:
  981. f = qcom_find_freq_floor(rcg->freq_tbl, rate);
  982. break;
  983. case CEIL:
  984. f = qcom_find_freq(rcg->freq_tbl, rate);
  985. break;
  986. default:
  987. return -EINVAL;
  988. }
  989. /*
  990. * In case clock is disabled, update the M, N and D registers, cache
  991. * the CFG value in parked_cfg and don't hit the update bit of CMD
  992. * register.
  993. */
  994. if (!clk_hw_is_enabled(hw))
  995. return __clk_rcg2_configure(rcg, f, &rcg->parked_cfg);
  996. return clk_rcg2_shared_force_enable_clear(hw, f);
  997. }
  998. static int clk_rcg2_shared_set_rate(struct clk_hw *hw, unsigned long rate,
  999. unsigned long parent_rate)
  1000. {
  1001. return __clk_rcg2_shared_set_rate(hw, rate, parent_rate, CEIL);
  1002. }
  1003. static int clk_rcg2_shared_set_rate_and_parent(struct clk_hw *hw,
  1004. unsigned long rate, unsigned long parent_rate, u8 index)
  1005. {
  1006. return __clk_rcg2_shared_set_rate(hw, rate, parent_rate, CEIL);
  1007. }
  1008. static int clk_rcg2_shared_set_floor_rate(struct clk_hw *hw, unsigned long rate,
  1009. unsigned long parent_rate)
  1010. {
  1011. return __clk_rcg2_shared_set_rate(hw, rate, parent_rate, FLOOR);
  1012. }
  1013. static int clk_rcg2_shared_set_floor_rate_and_parent(struct clk_hw *hw,
  1014. unsigned long rate, unsigned long parent_rate, u8 index)
  1015. {
  1016. return __clk_rcg2_shared_set_rate(hw, rate, parent_rate, FLOOR);
  1017. }
  1018. static int clk_rcg2_shared_enable(struct clk_hw *hw)
  1019. {
  1020. struct clk_rcg2 *rcg = to_clk_rcg2(hw);
  1021. int ret;
  1022. /*
  1023. * Set the update bit because required configuration has already
  1024. * been written in clk_rcg2_shared_set_rate()
  1025. */
  1026. ret = clk_rcg2_set_force_enable(hw);
  1027. if (ret)
  1028. return ret;
  1029. /* Write back the stored configuration corresponding to current rate */
  1030. ret = regmap_write(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, rcg->parked_cfg);
  1031. if (ret)
  1032. return ret;
  1033. ret = update_config(rcg);
  1034. if (ret)
  1035. return ret;
  1036. return clk_rcg2_clear_force_enable(hw);
  1037. }
  1038. static void clk_rcg2_shared_disable(struct clk_hw *hw)
  1039. {
  1040. struct clk_rcg2 *rcg = to_clk_rcg2(hw);
  1041. /*
  1042. * Store current configuration as switching to safe source would clear
  1043. * the SRC and DIV of CFG register
  1044. */
  1045. regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, &rcg->parked_cfg);
  1046. /*
  1047. * Park the RCG at a safe configuration - sourced off of safe source.
  1048. * Force enable and disable the RCG while configuring it to safeguard
  1049. * against any update signal coming from the downstream clock.
  1050. * The current parent is still prepared and enabled at this point, and
  1051. * the safe source is always on while application processor subsystem
  1052. * is online. Therefore, the RCG can safely switch its parent.
  1053. */
  1054. clk_rcg2_set_force_enable(hw);
  1055. regmap_write(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG,
  1056. rcg->safe_src_index << CFG_SRC_SEL_SHIFT);
  1057. update_config(rcg);
  1058. clk_rcg2_clear_force_enable(hw);
  1059. }
  1060. static u8 clk_rcg2_shared_get_parent(struct clk_hw *hw)
  1061. {
  1062. struct clk_rcg2 *rcg = to_clk_rcg2(hw);
  1063. /* If the shared rcg is parked use the cached cfg instead */
  1064. if (!clk_hw_is_enabled(hw))
  1065. return __clk_rcg2_get_parent(hw, rcg->parked_cfg);
  1066. return clk_rcg2_get_parent(hw);
  1067. }
  1068. static int clk_rcg2_shared_set_parent(struct clk_hw *hw, u8 index)
  1069. {
  1070. struct clk_rcg2 *rcg = to_clk_rcg2(hw);
  1071. /* If the shared rcg is parked only update the cached cfg */
  1072. if (!clk_hw_is_enabled(hw)) {
  1073. rcg->parked_cfg &= ~CFG_SRC_SEL_MASK;
  1074. rcg->parked_cfg |= rcg->parent_map[index].cfg << CFG_SRC_SEL_SHIFT;
  1075. return 0;
  1076. }
  1077. return clk_rcg2_set_parent(hw, index);
  1078. }
  1079. static unsigned long
  1080. clk_rcg2_shared_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
  1081. {
  1082. struct clk_rcg2 *rcg = to_clk_rcg2(hw);
  1083. /* If the shared rcg is parked use the cached cfg instead */
  1084. if (!clk_hw_is_enabled(hw))
  1085. return __clk_rcg2_recalc_rate(hw, parent_rate, rcg->parked_cfg);
  1086. return clk_rcg2_recalc_rate(hw, parent_rate);
  1087. }
  1088. static int clk_rcg2_shared_init(struct clk_hw *hw)
  1089. {
  1090. /*
  1091. * This does a few things:
  1092. *
  1093. * 1. Sets rcg->parked_cfg to reflect the value at probe so that the
  1094. * proper parent is reported from clk_rcg2_shared_get_parent().
  1095. *
  1096. * 2. Clears the force enable bit of the RCG because we rely on child
  1097. * clks (branches) to turn the RCG on/off with a hardware feedback
  1098. * mechanism and only set the force enable bit in the RCG when we
  1099. * want to make sure the clk stays on for parent switches or
  1100. * parking.
  1101. *
  1102. * 3. Parks shared RCGs on the safe source at registration because we
  1103. * can't be certain that the parent clk will stay on during boot,
  1104. * especially if the parent is shared. If this RCG is enabled at
  1105. * boot, and the parent is turned off, the RCG will get stuck on. A
  1106. * GDSC can wedge if is turned on and the RCG is stuck on because
  1107. * the GDSC's controller will hang waiting for the clk status to
  1108. * toggle on when it never does.
  1109. *
  1110. * The safest option here is to "park" the RCG at init so that the clk
  1111. * can never get stuck on or off. This ensures the GDSC can't get
  1112. * wedged.
  1113. */
  1114. clk_rcg2_shared_disable(hw);
  1115. return 0;
  1116. }
  1117. const struct clk_ops clk_rcg2_shared_ops = {
  1118. .init = clk_rcg2_shared_init,
  1119. .enable = clk_rcg2_shared_enable,
  1120. .disable = clk_rcg2_shared_disable,
  1121. .get_parent = clk_rcg2_shared_get_parent,
  1122. .set_parent = clk_rcg2_shared_set_parent,
  1123. .recalc_rate = clk_rcg2_shared_recalc_rate,
  1124. .determine_rate = clk_rcg2_determine_rate,
  1125. .set_rate = clk_rcg2_shared_set_rate,
  1126. .set_rate_and_parent = clk_rcg2_shared_set_rate_and_parent,
  1127. };
  1128. EXPORT_SYMBOL_GPL(clk_rcg2_shared_ops);
  1129. const struct clk_ops clk_rcg2_shared_floor_ops = {
  1130. .enable = clk_rcg2_shared_enable,
  1131. .disable = clk_rcg2_shared_disable,
  1132. .get_parent = clk_rcg2_shared_get_parent,
  1133. .set_parent = clk_rcg2_shared_set_parent,
  1134. .recalc_rate = clk_rcg2_shared_recalc_rate,
  1135. .determine_rate = clk_rcg2_determine_floor_rate,
  1136. .set_rate = clk_rcg2_shared_set_floor_rate,
  1137. .set_rate_and_parent = clk_rcg2_shared_set_floor_rate_and_parent,
  1138. };
  1139. EXPORT_SYMBOL_GPL(clk_rcg2_shared_floor_ops);
  1140. static int clk_rcg2_shared_no_init_park(struct clk_hw *hw)
  1141. {
  1142. struct clk_rcg2 *rcg = to_clk_rcg2(hw);
  1143. /*
  1144. * Read the config register so that the parent is properly mapped at
  1145. * registration time.
  1146. */
  1147. regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, &rcg->parked_cfg);
  1148. return 0;
  1149. }
  1150. /*
  1151. * Like clk_rcg2_shared_ops but skip the init so that the clk frequency is left
  1152. * unchanged at registration time.
  1153. */
  1154. const struct clk_ops clk_rcg2_shared_no_init_park_ops = {
  1155. .init = clk_rcg2_shared_no_init_park,
  1156. .enable = clk_rcg2_shared_enable,
  1157. .disable = clk_rcg2_shared_disable,
  1158. .get_parent = clk_rcg2_shared_get_parent,
  1159. .set_parent = clk_rcg2_shared_set_parent,
  1160. .recalc_rate = clk_rcg2_shared_recalc_rate,
  1161. .determine_rate = clk_rcg2_determine_rate,
  1162. .set_rate = clk_rcg2_shared_set_rate,
  1163. .set_rate_and_parent = clk_rcg2_shared_set_rate_and_parent,
  1164. };
  1165. EXPORT_SYMBOL_GPL(clk_rcg2_shared_no_init_park_ops);
  1166. /* Common APIs to be used for DFS based RCGR */
  1167. static void clk_rcg2_dfs_populate_freq(struct clk_hw *hw, unsigned int l,
  1168. struct freq_tbl *f)
  1169. {
  1170. struct clk_rcg2 *rcg = to_clk_rcg2(hw);
  1171. struct clk_hw *p;
  1172. unsigned long prate = 0;
  1173. u32 val, mask, cfg, mode, src;
  1174. int i, num_parents;
  1175. regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + SE_PERF_DFSR(l), &cfg);
  1176. mask = BIT(rcg->hid_width) - 1;
  1177. f->pre_div = 1;
  1178. if (cfg & mask)
  1179. f->pre_div = cfg & mask;
  1180. src = cfg & CFG_SRC_SEL_MASK;
  1181. src >>= CFG_SRC_SEL_SHIFT;
  1182. num_parents = clk_hw_get_num_parents(hw);
  1183. for (i = 0; i < num_parents; i++) {
  1184. if (src == rcg->parent_map[i].cfg) {
  1185. f->src = rcg->parent_map[i].src;
  1186. p = clk_hw_get_parent_by_index(&rcg->clkr.hw, i);
  1187. prate = clk_hw_get_rate(p);
  1188. }
  1189. }
  1190. mode = cfg & CFG_MODE_MASK;
  1191. mode >>= CFG_MODE_SHIFT;
  1192. if (mode) {
  1193. mask = BIT(rcg->mnd_width) - 1;
  1194. regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + SE_PERF_M_DFSR(l),
  1195. &val);
  1196. val &= mask;
  1197. f->m = val;
  1198. regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + SE_PERF_N_DFSR(l),
  1199. &val);
  1200. val = ~val;
  1201. val &= mask;
  1202. val += f->m;
  1203. f->n = val;
  1204. }
  1205. f->freq = calc_rate(prate, f->m, f->n, mode, f->pre_div);
  1206. }
  1207. static int clk_rcg2_dfs_populate_freq_table(struct clk_rcg2 *rcg)
  1208. {
  1209. struct freq_tbl *freq_tbl;
  1210. int i;
  1211. /* Allocate space for 1 extra since table is NULL terminated */
  1212. freq_tbl = kcalloc(MAX_PERF_LEVEL + 1, sizeof(*freq_tbl), GFP_KERNEL);
  1213. if (!freq_tbl)
  1214. return -ENOMEM;
  1215. rcg->freq_tbl = freq_tbl;
  1216. for (i = 0; i < MAX_PERF_LEVEL; i++)
  1217. clk_rcg2_dfs_populate_freq(&rcg->clkr.hw, i, freq_tbl + i);
  1218. return 0;
  1219. }
  1220. static int clk_rcg2_dfs_determine_rate(struct clk_hw *hw,
  1221. struct clk_rate_request *req)
  1222. {
  1223. struct clk_rcg2 *rcg = to_clk_rcg2(hw);
  1224. int ret;
  1225. if (!rcg->freq_tbl) {
  1226. ret = clk_rcg2_dfs_populate_freq_table(rcg);
  1227. if (ret) {
  1228. pr_err("Failed to update DFS tables for %s\n",
  1229. clk_hw_get_name(hw));
  1230. return ret;
  1231. }
  1232. }
  1233. return clk_rcg2_determine_rate(hw, req);
  1234. }
  1235. static unsigned long
  1236. clk_rcg2_dfs_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
  1237. {
  1238. struct clk_rcg2 *rcg = to_clk_rcg2(hw);
  1239. u32 level, mask, cfg, m = 0, n = 0, mode, pre_div;
  1240. regmap_read(rcg->clkr.regmap,
  1241. rcg->cmd_rcgr + SE_CMD_DFSR_OFFSET, &level);
  1242. level &= GENMASK(4, 1);
  1243. level >>= 1;
  1244. if (rcg->freq_tbl)
  1245. return rcg->freq_tbl[level].freq;
  1246. /*
  1247. * Assume that parent_rate is actually the parent because
  1248. * we can't do any better at figuring it out when the table
  1249. * hasn't been populated yet. We only populate the table
  1250. * in determine_rate because we can't guarantee the parents
  1251. * will be registered with the framework until then.
  1252. */
  1253. regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + SE_PERF_DFSR(level),
  1254. &cfg);
  1255. mask = BIT(rcg->hid_width) - 1;
  1256. pre_div = 1;
  1257. if (cfg & mask)
  1258. pre_div = cfg & mask;
  1259. mode = cfg & CFG_MODE_MASK;
  1260. mode >>= CFG_MODE_SHIFT;
  1261. if (mode) {
  1262. mask = BIT(rcg->mnd_width) - 1;
  1263. regmap_read(rcg->clkr.regmap,
  1264. rcg->cmd_rcgr + SE_PERF_M_DFSR(level), &m);
  1265. m &= mask;
  1266. regmap_read(rcg->clkr.regmap,
  1267. rcg->cmd_rcgr + SE_PERF_N_DFSR(level), &n);
  1268. n = ~n;
  1269. n &= mask;
  1270. n += m;
  1271. }
  1272. return calc_rate(parent_rate, m, n, mode, pre_div);
  1273. }
  1274. static const struct clk_ops clk_rcg2_dfs_ops = {
  1275. .is_enabled = clk_rcg2_is_enabled,
  1276. .get_parent = clk_rcg2_get_parent,
  1277. .determine_rate = clk_rcg2_dfs_determine_rate,
  1278. .recalc_rate = clk_rcg2_dfs_recalc_rate,
  1279. };
  1280. static int clk_rcg2_enable_dfs(const struct clk_rcg_dfs_data *data,
  1281. struct regmap *regmap)
  1282. {
  1283. struct clk_rcg2 *rcg = data->rcg;
  1284. struct clk_init_data *init = data->init;
  1285. u32 val;
  1286. int ret;
  1287. ret = regmap_read(regmap, rcg->cmd_rcgr + SE_CMD_DFSR_OFFSET, &val);
  1288. if (ret)
  1289. return -EINVAL;
  1290. if (!(val & SE_CMD_DFS_EN))
  1291. return 0;
  1292. /*
  1293. * Rate changes with consumer writing a register in
  1294. * their own I/O region
  1295. */
  1296. init->flags |= CLK_GET_RATE_NOCACHE;
  1297. init->ops = &clk_rcg2_dfs_ops;
  1298. rcg->freq_tbl = NULL;
  1299. return 0;
  1300. }
  1301. int qcom_cc_register_rcg_dfs(struct regmap *regmap,
  1302. const struct clk_rcg_dfs_data *rcgs, size_t len)
  1303. {
  1304. int i, ret;
  1305. for (i = 0; i < len; i++) {
  1306. ret = clk_rcg2_enable_dfs(&rcgs[i], regmap);
  1307. if (ret)
  1308. return ret;
  1309. }
  1310. return 0;
  1311. }
  1312. EXPORT_SYMBOL_GPL(qcom_cc_register_rcg_dfs);
  1313. static int clk_rcg2_dp_set_rate(struct clk_hw *hw, unsigned long rate,
  1314. unsigned long parent_rate)
  1315. {
  1316. struct clk_rcg2 *rcg = to_clk_rcg2(hw);
  1317. struct freq_tbl f = { 0 };
  1318. u32 mask = BIT(rcg->hid_width) - 1;
  1319. u32 hid_div, cfg;
  1320. int i, num_parents = clk_hw_get_num_parents(hw);
  1321. unsigned long num, den;
  1322. rational_best_approximation(parent_rate, rate,
  1323. GENMASK(rcg->mnd_width - 1, 0),
  1324. GENMASK(rcg->mnd_width - 1, 0), &den, &num);
  1325. if (!num || !den)
  1326. return -EINVAL;
  1327. regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, &cfg);
  1328. hid_div = cfg;
  1329. cfg &= CFG_SRC_SEL_MASK;
  1330. cfg >>= CFG_SRC_SEL_SHIFT;
  1331. for (i = 0; i < num_parents; i++) {
  1332. if (cfg == rcg->parent_map[i].cfg) {
  1333. f.src = rcg->parent_map[i].src;
  1334. break;
  1335. }
  1336. }
  1337. f.pre_div = hid_div;
  1338. f.pre_div >>= CFG_SRC_DIV_SHIFT;
  1339. f.pre_div &= mask;
  1340. if (num != den) {
  1341. f.m = num;
  1342. f.n = den;
  1343. } else {
  1344. f.m = 0;
  1345. f.n = 0;
  1346. }
  1347. return clk_rcg2_configure(rcg, &f);
  1348. }
  1349. static int clk_rcg2_dp_set_rate_and_parent(struct clk_hw *hw,
  1350. unsigned long rate, unsigned long parent_rate, u8 index)
  1351. {
  1352. return clk_rcg2_dp_set_rate(hw, rate, parent_rate);
  1353. }
  1354. static int clk_rcg2_dp_determine_rate(struct clk_hw *hw,
  1355. struct clk_rate_request *req)
  1356. {
  1357. struct clk_rcg2 *rcg = to_clk_rcg2(hw);
  1358. unsigned long num, den;
  1359. u64 tmp;
  1360. /* Parent rate is a fixed phy link rate */
  1361. rational_best_approximation(req->best_parent_rate, req->rate,
  1362. GENMASK(rcg->mnd_width - 1, 0),
  1363. GENMASK(rcg->mnd_width - 1, 0), &den, &num);
  1364. if (!num || !den)
  1365. return -EINVAL;
  1366. tmp = req->best_parent_rate * num;
  1367. do_div(tmp, den);
  1368. req->rate = tmp;
  1369. return 0;
  1370. }
  1371. const struct clk_ops clk_dp_ops = {
  1372. .is_enabled = clk_rcg2_is_enabled,
  1373. .get_parent = clk_rcg2_get_parent,
  1374. .set_parent = clk_rcg2_set_parent,
  1375. .recalc_rate = clk_rcg2_recalc_rate,
  1376. .set_rate = clk_rcg2_dp_set_rate,
  1377. .set_rate_and_parent = clk_rcg2_dp_set_rate_and_parent,
  1378. .determine_rate = clk_rcg2_dp_determine_rate,
  1379. };
  1380. EXPORT_SYMBOL_GPL(clk_dp_ops);