clk-stm32h7.c 36 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (C) STMicroelectronics 2017
  4. * Author: Gabriel Fernandez <gabriel.fernandez@st.com> for STMicroelectronics.
  5. */
  6. #include <linux/clk.h>
  7. #include <linux/clk-provider.h>
  8. #include <linux/err.h>
  9. #include <linux/io.h>
  10. #include <linux/mfd/syscon.h>
  11. #include <linux/of.h>
  12. #include <linux/of_address.h>
  13. #include <linux/slab.h>
  14. #include <linux/spinlock.h>
  15. #include <linux/regmap.h>
  16. #include <dt-bindings/clock/stm32h7-clks.h>
  17. /* Reset Clock Control Registers */
  18. #define RCC_CR 0x00
  19. #define RCC_CFGR 0x10
  20. #define RCC_D1CFGR 0x18
  21. #define RCC_D2CFGR 0x1C
  22. #define RCC_D3CFGR 0x20
  23. #define RCC_PLLCKSELR 0x28
  24. #define RCC_PLLCFGR 0x2C
  25. #define RCC_PLL1DIVR 0x30
  26. #define RCC_PLL1FRACR 0x34
  27. #define RCC_PLL2DIVR 0x38
  28. #define RCC_PLL2FRACR 0x3C
  29. #define RCC_PLL3DIVR 0x40
  30. #define RCC_PLL3FRACR 0x44
  31. #define RCC_D1CCIPR 0x4C
  32. #define RCC_D2CCIP1R 0x50
  33. #define RCC_D2CCIP2R 0x54
  34. #define RCC_D3CCIPR 0x58
  35. #define RCC_BDCR 0x70
  36. #define RCC_CSR 0x74
  37. #define RCC_AHB3ENR 0xD4
  38. #define RCC_AHB1ENR 0xD8
  39. #define RCC_AHB2ENR 0xDC
  40. #define RCC_AHB4ENR 0xE0
  41. #define RCC_APB3ENR 0xE4
  42. #define RCC_APB1LENR 0xE8
  43. #define RCC_APB1HENR 0xEC
  44. #define RCC_APB2ENR 0xF0
  45. #define RCC_APB4ENR 0xF4
  46. static DEFINE_SPINLOCK(stm32rcc_lock);
  47. static void __iomem *base;
  48. static struct clk_hw **hws;
  49. /* System clock parent */
  50. static const char * const sys_src[] = {
  51. "hsi_ck", "csi_ck", "hse_ck", "pll1_p" };
  52. static const char * const tracein_src[] = {
  53. "hsi_ck", "csi_ck", "hse_ck", "pll1_r" };
  54. static const char * const per_src[] = {
  55. "hsi_ker", "csi_ker", "hse_ck", "disabled" };
  56. static const char * const pll_src[] = {
  57. "hsi_ck", "csi_ck", "hse_ck", "no clock" };
  58. static const char * const sdmmc_src[] = { "pll1_q", "pll2_r" };
  59. static const char * const dsi_src[] = { "ck_dsi_phy", "pll2_q" };
  60. static const char * const qspi_src[] = {
  61. "hclk", "pll1_q", "pll2_r", "per_ck" };
  62. static const char * const fmc_src[] = {
  63. "hclk", "pll1_q", "pll2_r", "per_ck" };
  64. /* Kernel clock parent */
  65. static const char * const swp_src[] = { "pclk1", "hsi_ker" };
  66. static const char * const fdcan_src[] = { "hse_ck", "pll1_q", "pll2_q" };
  67. static const char * const dfsdm1_src[] = { "pclk2", "sys_ck" };
  68. static const char * const spdifrx_src[] = {
  69. "pll1_q", "pll2_r", "pll3_r", "hsi_ker" };
  70. static const char *spi_src1[5] = {
  71. "pll1_q", "pll2_p", "pll3_p", NULL, "per_ck" };
  72. static const char * const spi_src2[] = {
  73. "pclk2", "pll2_q", "pll3_q", "hsi_ker", "csi_ker", "hse_ck" };
  74. static const char * const spi_src3[] = {
  75. "pclk4", "pll2_q", "pll3_q", "hsi_ker", "csi_ker", "hse_ck" };
  76. static const char * const lptim_src1[] = {
  77. "pclk1", "pll2_p", "pll3_r", "lse_ck", "lsi_ck", "per_ck" };
  78. static const char * const lptim_src2[] = {
  79. "pclk4", "pll2_p", "pll3_r", "lse_ck", "lsi_ck", "per_ck" };
  80. static const char * const cec_src[] = {"lse_ck", "lsi_ck", "csi_ker_div122" };
  81. static const char * const usbotg_src[] = {"pll1_q", "pll3_q", "rc48_ck" };
  82. /* i2c 1,2,3 src */
  83. static const char * const i2c_src1[] = {
  84. "pclk1", "pll3_r", "hsi_ker", "csi_ker" };
  85. static const char * const i2c_src2[] = {
  86. "pclk4", "pll3_r", "hsi_ker", "csi_ker" };
  87. static const char * const rng_src[] = {
  88. "rc48_ck", "pll1_q", "lse_ck", "lsi_ck" };
  89. /* usart 1,6 src */
  90. static const char * const usart_src1[] = {
  91. "pclk2", "pll2_q", "pll3_q", "hsi_ker", "csi_ker", "lse_ck" };
  92. /* usart 2,3,4,5,7,8 src */
  93. static const char * const usart_src2[] = {
  94. "pclk1", "pll2_q", "pll3_q", "hsi_ker", "csi_ker", "lse_ck" };
  95. static const char *sai_src[5] = {
  96. "pll1_q", "pll2_p", "pll3_p", NULL, "per_ck" };
  97. static const char * const adc_src[] = { "pll2_p", "pll3_r", "per_ck" };
  98. /* lptim 2,3,4,5 src */
  99. static const char * const lpuart1_src[] = {
  100. "pclk3", "pll2_q", "pll3_q", "csi_ker", "lse_ck" };
  101. static const char * const hrtim_src[] = { "tim2_ker", "d1cpre" };
  102. /* RTC clock parent */
  103. static const char * const rtc_src[] = { "off", "lse_ck", "lsi_ck", "hse_1M" };
  104. /* Micro-controller output clock parent */
  105. static const char * const mco_src1[] = {
  106. "hsi_ck", "lse_ck", "hse_ck", "pll1_q", "rc48_ck" };
  107. static const char * const mco_src2[] = {
  108. "sys_ck", "pll2_p", "hse_ck", "pll1_p", "csi_ck", "lsi_ck" };
  109. /* LCD clock */
  110. static const char * const ltdc_src[] = {"pll3_r"};
  111. /* Gate clock with ready bit and backup domain management */
  112. struct stm32_ready_gate {
  113. struct clk_gate gate;
  114. u8 bit_rdy;
  115. };
  116. #define to_ready_gate_clk(_rgate) container_of(_rgate, struct stm32_ready_gate,\
  117. gate)
  118. #define RGATE_TIMEOUT 10000
  119. static int ready_gate_clk_enable(struct clk_hw *hw)
  120. {
  121. struct clk_gate *gate = to_clk_gate(hw);
  122. struct stm32_ready_gate *rgate = to_ready_gate_clk(gate);
  123. int bit_status;
  124. unsigned int timeout = RGATE_TIMEOUT;
  125. if (clk_gate_ops.is_enabled(hw))
  126. return 0;
  127. clk_gate_ops.enable(hw);
  128. /* We can't use readl_poll_timeout() because we can blocked if
  129. * someone enables this clock before clocksource changes.
  130. * Only jiffies counter is available. Jiffies are incremented by
  131. * interruptions and enable op does not allow to be interrupted.
  132. */
  133. do {
  134. bit_status = !(readl(gate->reg) & BIT(rgate->bit_rdy));
  135. if (bit_status)
  136. udelay(100);
  137. } while (bit_status && --timeout);
  138. return bit_status;
  139. }
  140. static void ready_gate_clk_disable(struct clk_hw *hw)
  141. {
  142. struct clk_gate *gate = to_clk_gate(hw);
  143. struct stm32_ready_gate *rgate = to_ready_gate_clk(gate);
  144. int bit_status;
  145. unsigned int timeout = RGATE_TIMEOUT;
  146. if (!clk_gate_ops.is_enabled(hw))
  147. return;
  148. clk_gate_ops.disable(hw);
  149. do {
  150. bit_status = !!(readl(gate->reg) & BIT(rgate->bit_rdy));
  151. if (bit_status)
  152. udelay(100);
  153. } while (bit_status && --timeout);
  154. }
  155. static const struct clk_ops ready_gate_clk_ops = {
  156. .enable = ready_gate_clk_enable,
  157. .disable = ready_gate_clk_disable,
  158. .is_enabled = clk_gate_is_enabled,
  159. };
  160. static struct clk_hw *clk_register_ready_gate(struct device *dev,
  161. const char *name, const char *parent_name,
  162. void __iomem *reg, u8 bit_idx, u8 bit_rdy,
  163. unsigned long flags, spinlock_t *lock)
  164. {
  165. struct stm32_ready_gate *rgate;
  166. struct clk_init_data init = { NULL };
  167. struct clk_hw *hw;
  168. int ret;
  169. rgate = kzalloc(sizeof(*rgate), GFP_KERNEL);
  170. if (!rgate)
  171. return ERR_PTR(-ENOMEM);
  172. init.name = name;
  173. init.ops = &ready_gate_clk_ops;
  174. init.flags = flags;
  175. init.parent_names = &parent_name;
  176. init.num_parents = 1;
  177. rgate->bit_rdy = bit_rdy;
  178. rgate->gate.lock = lock;
  179. rgate->gate.reg = reg;
  180. rgate->gate.bit_idx = bit_idx;
  181. rgate->gate.hw.init = &init;
  182. hw = &rgate->gate.hw;
  183. ret = clk_hw_register(dev, hw);
  184. if (ret) {
  185. kfree(rgate);
  186. hw = ERR_PTR(ret);
  187. }
  188. return hw;
  189. }
  190. struct gate_cfg {
  191. u32 offset;
  192. u8 bit_idx;
  193. };
  194. struct muxdiv_cfg {
  195. u32 offset;
  196. u8 shift;
  197. u8 width;
  198. };
  199. struct composite_clk_cfg {
  200. struct gate_cfg *gate;
  201. struct muxdiv_cfg *mux;
  202. struct muxdiv_cfg *div;
  203. const char *name;
  204. const char * const *parent_name;
  205. int num_parents;
  206. u32 flags;
  207. };
  208. struct composite_clk_gcfg_t {
  209. u8 flags;
  210. const struct clk_ops *ops;
  211. };
  212. /*
  213. * General config definition of a composite clock (only clock diviser for rate)
  214. */
  215. struct composite_clk_gcfg {
  216. struct composite_clk_gcfg_t *mux;
  217. struct composite_clk_gcfg_t *div;
  218. struct composite_clk_gcfg_t *gate;
  219. };
  220. #define M_CFG_MUX(_mux_ops, _mux_flags)\
  221. .mux = &(struct composite_clk_gcfg_t) { _mux_flags, _mux_ops}
  222. #define M_CFG_DIV(_rate_ops, _rate_flags)\
  223. .div = &(struct composite_clk_gcfg_t) {_rate_flags, _rate_ops}
  224. #define M_CFG_GATE(_gate_ops, _gate_flags)\
  225. .gate = &(struct composite_clk_gcfg_t) { _gate_flags, _gate_ops}
  226. static struct clk_mux *_get_cmux(void __iomem *reg, u8 shift, u8 width,
  227. u32 flags, spinlock_t *lock)
  228. {
  229. struct clk_mux *mux;
  230. mux = kzalloc(sizeof(*mux), GFP_KERNEL);
  231. if (!mux)
  232. return ERR_PTR(-ENOMEM);
  233. mux->reg = reg;
  234. mux->shift = shift;
  235. mux->mask = (1 << width) - 1;
  236. mux->flags = flags;
  237. mux->lock = lock;
  238. return mux;
  239. }
  240. static struct clk_divider *_get_cdiv(void __iomem *reg, u8 shift, u8 width,
  241. u32 flags, spinlock_t *lock)
  242. {
  243. struct clk_divider *div;
  244. div = kzalloc(sizeof(*div), GFP_KERNEL);
  245. if (!div)
  246. return ERR_PTR(-ENOMEM);
  247. div->reg = reg;
  248. div->shift = shift;
  249. div->width = width;
  250. div->flags = flags;
  251. div->lock = lock;
  252. return div;
  253. }
  254. static struct clk_gate *_get_cgate(void __iomem *reg, u8 bit_idx, u32 flags,
  255. spinlock_t *lock)
  256. {
  257. struct clk_gate *gate;
  258. gate = kzalloc(sizeof(*gate), GFP_KERNEL);
  259. if (!gate)
  260. return ERR_PTR(-ENOMEM);
  261. gate->reg = reg;
  262. gate->bit_idx = bit_idx;
  263. gate->flags = flags;
  264. gate->lock = lock;
  265. return gate;
  266. }
  267. struct composite_cfg {
  268. struct clk_hw *mux_hw;
  269. struct clk_hw *div_hw;
  270. struct clk_hw *gate_hw;
  271. const struct clk_ops *mux_ops;
  272. const struct clk_ops *div_ops;
  273. const struct clk_ops *gate_ops;
  274. };
  275. static void get_cfg_composite_div(const struct composite_clk_gcfg *gcfg,
  276. const struct composite_clk_cfg *cfg,
  277. struct composite_cfg *composite, spinlock_t *lock)
  278. {
  279. struct clk_mux *mux = NULL;
  280. struct clk_divider *div = NULL;
  281. struct clk_gate *gate = NULL;
  282. const struct clk_ops *mux_ops, *div_ops, *gate_ops;
  283. struct clk_hw *mux_hw;
  284. struct clk_hw *div_hw;
  285. struct clk_hw *gate_hw;
  286. mux_ops = div_ops = gate_ops = NULL;
  287. mux_hw = div_hw = gate_hw = NULL;
  288. if (gcfg->mux && cfg->mux) {
  289. mux = _get_cmux(base + cfg->mux->offset,
  290. cfg->mux->shift,
  291. cfg->mux->width,
  292. gcfg->mux->flags, lock);
  293. if (!IS_ERR(mux)) {
  294. mux_hw = &mux->hw;
  295. mux_ops = gcfg->mux->ops ?
  296. gcfg->mux->ops : &clk_mux_ops;
  297. }
  298. }
  299. if (gcfg->div && cfg->div) {
  300. div = _get_cdiv(base + cfg->div->offset,
  301. cfg->div->shift,
  302. cfg->div->width,
  303. gcfg->div->flags, lock);
  304. if (!IS_ERR(div)) {
  305. div_hw = &div->hw;
  306. div_ops = gcfg->div->ops ?
  307. gcfg->div->ops : &clk_divider_ops;
  308. }
  309. }
  310. if (gcfg->gate && cfg->gate) {
  311. gate = _get_cgate(base + cfg->gate->offset,
  312. cfg->gate->bit_idx,
  313. gcfg->gate->flags, lock);
  314. if (!IS_ERR(gate)) {
  315. gate_hw = &gate->hw;
  316. gate_ops = gcfg->gate->ops ?
  317. gcfg->gate->ops : &clk_gate_ops;
  318. }
  319. }
  320. composite->mux_hw = mux_hw;
  321. composite->mux_ops = mux_ops;
  322. composite->div_hw = div_hw;
  323. composite->div_ops = div_ops;
  324. composite->gate_hw = gate_hw;
  325. composite->gate_ops = gate_ops;
  326. }
  327. /* Kernel Timer */
  328. struct timer_ker {
  329. u8 dppre_shift;
  330. struct clk_hw hw;
  331. spinlock_t *lock;
  332. };
  333. #define to_timer_ker(_hw) container_of(_hw, struct timer_ker, hw)
  334. static unsigned long timer_ker_recalc_rate(struct clk_hw *hw,
  335. unsigned long parent_rate)
  336. {
  337. struct timer_ker *clk_elem = to_timer_ker(hw);
  338. u32 timpre;
  339. u32 dppre_shift = clk_elem->dppre_shift;
  340. u32 prescaler;
  341. u32 mul;
  342. timpre = (readl(base + RCC_CFGR) >> 15) & 0x01;
  343. prescaler = (readl(base + RCC_D2CFGR) >> dppre_shift) & 0x03;
  344. mul = 2;
  345. if (prescaler < 4)
  346. mul = 1;
  347. else if (timpre && prescaler > 4)
  348. mul = 4;
  349. return parent_rate * mul;
  350. }
  351. static const struct clk_ops timer_ker_ops = {
  352. .recalc_rate = timer_ker_recalc_rate,
  353. };
  354. static struct clk_hw *clk_register_stm32_timer_ker(struct device *dev,
  355. const char *name, const char *parent_name,
  356. unsigned long flags,
  357. u8 dppre_shift,
  358. spinlock_t *lock)
  359. {
  360. struct timer_ker *element;
  361. struct clk_init_data init;
  362. struct clk_hw *hw;
  363. int err;
  364. element = kzalloc(sizeof(*element), GFP_KERNEL);
  365. if (!element)
  366. return ERR_PTR(-ENOMEM);
  367. init.name = name;
  368. init.ops = &timer_ker_ops;
  369. init.flags = flags;
  370. init.parent_names = &parent_name;
  371. init.num_parents = 1;
  372. element->hw.init = &init;
  373. element->lock = lock;
  374. element->dppre_shift = dppre_shift;
  375. hw = &element->hw;
  376. err = clk_hw_register(dev, hw);
  377. if (err) {
  378. kfree(element);
  379. return ERR_PTR(err);
  380. }
  381. return hw;
  382. }
  383. static const struct clk_div_table d1cpre_div_table[] = {
  384. { 0, 1 }, { 1, 1 }, { 2, 1 }, { 3, 1},
  385. { 4, 1 }, { 5, 1 }, { 6, 1 }, { 7, 1},
  386. { 8, 2 }, { 9, 4 }, { 10, 8 }, { 11, 16 },
  387. { 12, 64 }, { 13, 128 }, { 14, 256 },
  388. { 15, 512 },
  389. { 0 },
  390. };
  391. static const struct clk_div_table ppre_div_table[] = {
  392. { 0, 1 }, { 1, 1 }, { 2, 1 }, { 3, 1},
  393. { 4, 2 }, { 5, 4 }, { 6, 8 }, { 7, 16 },
  394. { 0 },
  395. };
  396. static void register_core_and_bus_clocks(void)
  397. {
  398. /* CORE AND BUS */
  399. hws[SYS_D1CPRE] = clk_hw_register_divider_table(NULL, "d1cpre",
  400. "sys_ck", CLK_IGNORE_UNUSED, base + RCC_D1CFGR, 8, 4, 0,
  401. d1cpre_div_table, &stm32rcc_lock);
  402. hws[HCLK] = clk_hw_register_divider_table(NULL, "hclk", "d1cpre",
  403. CLK_IGNORE_UNUSED, base + RCC_D1CFGR, 0, 4, 0,
  404. d1cpre_div_table, &stm32rcc_lock);
  405. /* D1 DOMAIN */
  406. /* * CPU Systick */
  407. hws[CPU_SYSTICK] = clk_hw_register_fixed_factor(NULL, "systick",
  408. "d1cpre", 0, 1, 8);
  409. /* * APB3 peripheral */
  410. hws[PCLK3] = clk_hw_register_divider_table(NULL, "pclk3", "hclk", 0,
  411. base + RCC_D1CFGR, 4, 3, 0,
  412. ppre_div_table, &stm32rcc_lock);
  413. /* D2 DOMAIN */
  414. /* * APB1 peripheral */
  415. hws[PCLK1] = clk_hw_register_divider_table(NULL, "pclk1", "hclk", 0,
  416. base + RCC_D2CFGR, 4, 3, 0,
  417. ppre_div_table, &stm32rcc_lock);
  418. /* Timers prescaler clocks */
  419. clk_register_stm32_timer_ker(NULL, "tim1_ker", "pclk1", 0,
  420. 4, &stm32rcc_lock);
  421. /* * APB2 peripheral */
  422. hws[PCLK2] = clk_hw_register_divider_table(NULL, "pclk2", "hclk", 0,
  423. base + RCC_D2CFGR, 8, 3, 0, ppre_div_table,
  424. &stm32rcc_lock);
  425. clk_register_stm32_timer_ker(NULL, "tim2_ker", "pclk2", 0, 8,
  426. &stm32rcc_lock);
  427. /* D3 DOMAIN */
  428. /* * APB4 peripheral */
  429. hws[PCLK4] = clk_hw_register_divider_table(NULL, "pclk4", "hclk", 0,
  430. base + RCC_D3CFGR, 4, 3, 0,
  431. ppre_div_table, &stm32rcc_lock);
  432. }
  433. /* MUX clock configuration */
  434. struct stm32_mux_clk {
  435. const char *name;
  436. const char * const *parents;
  437. u8 num_parents;
  438. u32 offset;
  439. u8 shift;
  440. u8 width;
  441. u32 flags;
  442. };
  443. #define M_MCLOCF(_name, _parents, _mux_offset, _mux_shift, _mux_width, _flags)\
  444. {\
  445. .name = _name,\
  446. .parents = _parents,\
  447. .num_parents = ARRAY_SIZE(_parents),\
  448. .offset = _mux_offset,\
  449. .shift = _mux_shift,\
  450. .width = _mux_width,\
  451. .flags = _flags,\
  452. }
  453. #define M_MCLOC(_name, _parents, _mux_offset, _mux_shift, _mux_width)\
  454. M_MCLOCF(_name, _parents, _mux_offset, _mux_shift, _mux_width, 0)\
  455. static const struct stm32_mux_clk stm32_mclk[] __initconst = {
  456. M_MCLOC("per_ck", per_src, RCC_D1CCIPR, 28, 3),
  457. M_MCLOC("pllsrc", pll_src, RCC_PLLCKSELR, 0, 3),
  458. M_MCLOC("sys_ck", sys_src, RCC_CFGR, 0, 3),
  459. M_MCLOC("tracein_ck", tracein_src, RCC_CFGR, 0, 3),
  460. };
  461. /* Oscillary clock configuration */
  462. struct stm32_osc_clk {
  463. const char *name;
  464. const char *parent;
  465. u32 gate_offset;
  466. u8 bit_idx;
  467. u8 bit_rdy;
  468. u32 flags;
  469. };
  470. #define OSC_CLKF(_name, _parent, _gate_offset, _bit_idx, _bit_rdy, _flags)\
  471. {\
  472. .name = _name,\
  473. .parent = _parent,\
  474. .gate_offset = _gate_offset,\
  475. .bit_idx = _bit_idx,\
  476. .bit_rdy = _bit_rdy,\
  477. .flags = _flags,\
  478. }
  479. #define OSC_CLK(_name, _parent, _gate_offset, _bit_idx, _bit_rdy)\
  480. OSC_CLKF(_name, _parent, _gate_offset, _bit_idx, _bit_rdy, 0)
  481. static const struct stm32_osc_clk stm32_oclk[] __initconst = {
  482. OSC_CLKF("hsi_ck", "hsidiv", RCC_CR, 0, 2, CLK_IGNORE_UNUSED),
  483. OSC_CLKF("hsi_ker", "hsidiv", RCC_CR, 1, 2, CLK_IGNORE_UNUSED),
  484. OSC_CLKF("csi_ck", "clk-csi", RCC_CR, 7, 8, CLK_IGNORE_UNUSED),
  485. OSC_CLKF("csi_ker", "clk-csi", RCC_CR, 9, 8, CLK_IGNORE_UNUSED),
  486. OSC_CLKF("rc48_ck", "clk-rc48", RCC_CR, 12, 13, CLK_IGNORE_UNUSED),
  487. OSC_CLKF("lsi_ck", "clk-lsi", RCC_CSR, 0, 1, CLK_IGNORE_UNUSED),
  488. };
  489. /* PLL configuration */
  490. struct st32h7_pll_cfg {
  491. u8 bit_idx;
  492. u32 offset_divr;
  493. u8 bit_frac_en;
  494. u32 offset_frac;
  495. u8 divm;
  496. };
  497. struct stm32_pll_data {
  498. const char *name;
  499. const char *parent_name;
  500. unsigned long flags;
  501. const struct st32h7_pll_cfg *cfg;
  502. };
  503. static const struct st32h7_pll_cfg stm32h7_pll1 = {
  504. .bit_idx = 24,
  505. .offset_divr = RCC_PLL1DIVR,
  506. .bit_frac_en = 0,
  507. .offset_frac = RCC_PLL1FRACR,
  508. .divm = 4,
  509. };
  510. static const struct st32h7_pll_cfg stm32h7_pll2 = {
  511. .bit_idx = 26,
  512. .offset_divr = RCC_PLL2DIVR,
  513. .bit_frac_en = 4,
  514. .offset_frac = RCC_PLL2FRACR,
  515. .divm = 12,
  516. };
  517. static const struct st32h7_pll_cfg stm32h7_pll3 = {
  518. .bit_idx = 28,
  519. .offset_divr = RCC_PLL3DIVR,
  520. .bit_frac_en = 8,
  521. .offset_frac = RCC_PLL3FRACR,
  522. .divm = 20,
  523. };
  524. static const struct stm32_pll_data stm32_pll[] = {
  525. { "vco1", "pllsrc", CLK_IGNORE_UNUSED, &stm32h7_pll1 },
  526. { "vco2", "pllsrc", 0, &stm32h7_pll2 },
  527. { "vco3", "pllsrc", 0, &stm32h7_pll3 },
  528. };
  529. struct stm32_fractional_divider {
  530. void __iomem *mreg;
  531. u8 mshift;
  532. u8 mwidth;
  533. void __iomem *nreg;
  534. u8 nshift;
  535. u8 nwidth;
  536. void __iomem *freg_status;
  537. u8 freg_bit;
  538. void __iomem *freg_value;
  539. u8 fshift;
  540. u8 fwidth;
  541. u8 flags;
  542. struct clk_hw hw;
  543. spinlock_t *lock;
  544. };
  545. struct stm32_pll_obj {
  546. spinlock_t *lock;
  547. struct stm32_fractional_divider div;
  548. struct stm32_ready_gate rgate;
  549. struct clk_hw hw;
  550. };
  551. #define to_pll(_hw) container_of(_hw, struct stm32_pll_obj, hw)
  552. static int pll_is_enabled(struct clk_hw *hw)
  553. {
  554. struct stm32_pll_obj *clk_elem = to_pll(hw);
  555. struct clk_hw *_hw = &clk_elem->rgate.gate.hw;
  556. __clk_hw_set_clk(_hw, hw);
  557. return ready_gate_clk_ops.is_enabled(_hw);
  558. }
  559. static int pll_enable(struct clk_hw *hw)
  560. {
  561. struct stm32_pll_obj *clk_elem = to_pll(hw);
  562. struct clk_hw *_hw = &clk_elem->rgate.gate.hw;
  563. __clk_hw_set_clk(_hw, hw);
  564. return ready_gate_clk_ops.enable(_hw);
  565. }
  566. static void pll_disable(struct clk_hw *hw)
  567. {
  568. struct stm32_pll_obj *clk_elem = to_pll(hw);
  569. struct clk_hw *_hw = &clk_elem->rgate.gate.hw;
  570. __clk_hw_set_clk(_hw, hw);
  571. ready_gate_clk_ops.disable(_hw);
  572. }
  573. static int pll_frac_is_enabled(struct clk_hw *hw)
  574. {
  575. struct stm32_pll_obj *clk_elem = to_pll(hw);
  576. struct stm32_fractional_divider *fd = &clk_elem->div;
  577. return (readl(fd->freg_status) >> fd->freg_bit) & 0x01;
  578. }
  579. static unsigned long pll_read_frac(struct clk_hw *hw)
  580. {
  581. struct stm32_pll_obj *clk_elem = to_pll(hw);
  582. struct stm32_fractional_divider *fd = &clk_elem->div;
  583. return (readl(fd->freg_value) >> fd->fshift) &
  584. GENMASK(fd->fwidth - 1, 0);
  585. }
  586. static unsigned long pll_fd_recalc_rate(struct clk_hw *hw,
  587. unsigned long parent_rate)
  588. {
  589. struct stm32_pll_obj *clk_elem = to_pll(hw);
  590. struct stm32_fractional_divider *fd = &clk_elem->div;
  591. unsigned long m, n;
  592. u32 val, mask;
  593. u64 rate, rate1 = 0;
  594. val = readl(fd->mreg);
  595. mask = GENMASK(fd->mwidth - 1, 0) << fd->mshift;
  596. m = (val & mask) >> fd->mshift;
  597. val = readl(fd->nreg);
  598. mask = GENMASK(fd->nwidth - 1, 0) << fd->nshift;
  599. n = ((val & mask) >> fd->nshift) + 1;
  600. if (!n || !m)
  601. return parent_rate;
  602. rate = (u64)parent_rate * n;
  603. do_div(rate, m);
  604. if (pll_frac_is_enabled(hw)) {
  605. val = pll_read_frac(hw);
  606. rate1 = (u64)parent_rate * (u64)val;
  607. do_div(rate1, (m * 8191));
  608. }
  609. return rate + rate1;
  610. }
  611. static const struct clk_ops pll_ops = {
  612. .enable = pll_enable,
  613. .disable = pll_disable,
  614. .is_enabled = pll_is_enabled,
  615. .recalc_rate = pll_fd_recalc_rate,
  616. };
  617. static struct clk_hw *clk_register_stm32_pll(struct device *dev,
  618. const char *name,
  619. const char *parent,
  620. unsigned long flags,
  621. const struct st32h7_pll_cfg *cfg,
  622. spinlock_t *lock)
  623. {
  624. struct stm32_pll_obj *pll;
  625. struct clk_init_data init = { NULL };
  626. struct clk_hw *hw;
  627. int ret;
  628. struct stm32_fractional_divider *div = NULL;
  629. struct stm32_ready_gate *rgate;
  630. pll = kzalloc(sizeof(*pll), GFP_KERNEL);
  631. if (!pll)
  632. return ERR_PTR(-ENOMEM);
  633. init.name = name;
  634. init.ops = &pll_ops;
  635. init.flags = flags;
  636. init.parent_names = &parent;
  637. init.num_parents = 1;
  638. pll->hw.init = &init;
  639. hw = &pll->hw;
  640. rgate = &pll->rgate;
  641. rgate->bit_rdy = cfg->bit_idx + 1;
  642. rgate->gate.lock = lock;
  643. rgate->gate.reg = base + RCC_CR;
  644. rgate->gate.bit_idx = cfg->bit_idx;
  645. div = &pll->div;
  646. div->flags = 0;
  647. div->mreg = base + RCC_PLLCKSELR;
  648. div->mshift = cfg->divm;
  649. div->mwidth = 6;
  650. div->nreg = base + cfg->offset_divr;
  651. div->nshift = 0;
  652. div->nwidth = 9;
  653. div->freg_status = base + RCC_PLLCFGR;
  654. div->freg_bit = cfg->bit_frac_en;
  655. div->freg_value = base + cfg->offset_frac;
  656. div->fshift = 3;
  657. div->fwidth = 13;
  658. div->lock = lock;
  659. ret = clk_hw_register(dev, hw);
  660. if (ret) {
  661. kfree(pll);
  662. hw = ERR_PTR(ret);
  663. }
  664. return hw;
  665. }
  666. /* ODF CLOCKS */
  667. static unsigned long odf_divider_recalc_rate(struct clk_hw *hw,
  668. unsigned long parent_rate)
  669. {
  670. return clk_divider_ops.recalc_rate(hw, parent_rate);
  671. }
  672. static int odf_divider_determine_rate(struct clk_hw *hw,
  673. struct clk_rate_request *req)
  674. {
  675. return clk_divider_ops.determine_rate(hw, req);
  676. }
  677. static int odf_divider_set_rate(struct clk_hw *hw, unsigned long rate,
  678. unsigned long parent_rate)
  679. {
  680. struct clk_hw *hwp;
  681. int pll_status;
  682. int ret;
  683. hwp = clk_hw_get_parent(hw);
  684. pll_status = pll_is_enabled(hwp);
  685. if (pll_status)
  686. pll_disable(hwp);
  687. ret = clk_divider_ops.set_rate(hw, rate, parent_rate);
  688. if (pll_status)
  689. pll_enable(hwp);
  690. return ret;
  691. }
  692. static const struct clk_ops odf_divider_ops = {
  693. .recalc_rate = odf_divider_recalc_rate,
  694. .determine_rate = odf_divider_determine_rate,
  695. .set_rate = odf_divider_set_rate,
  696. };
  697. static int odf_gate_enable(struct clk_hw *hw)
  698. {
  699. struct clk_hw *hwp;
  700. int pll_status;
  701. int ret;
  702. if (clk_gate_ops.is_enabled(hw))
  703. return 0;
  704. hwp = clk_hw_get_parent(hw);
  705. pll_status = pll_is_enabled(hwp);
  706. if (pll_status)
  707. pll_disable(hwp);
  708. ret = clk_gate_ops.enable(hw);
  709. if (pll_status)
  710. pll_enable(hwp);
  711. return ret;
  712. }
  713. static void odf_gate_disable(struct clk_hw *hw)
  714. {
  715. struct clk_hw *hwp;
  716. int pll_status;
  717. if (!clk_gate_ops.is_enabled(hw))
  718. return;
  719. hwp = clk_hw_get_parent(hw);
  720. pll_status = pll_is_enabled(hwp);
  721. if (pll_status)
  722. pll_disable(hwp);
  723. clk_gate_ops.disable(hw);
  724. if (pll_status)
  725. pll_enable(hwp);
  726. }
  727. static const struct clk_ops odf_gate_ops = {
  728. .enable = odf_gate_enable,
  729. .disable = odf_gate_disable,
  730. .is_enabled = clk_gate_is_enabled,
  731. };
  732. static struct composite_clk_gcfg odf_clk_gcfg = {
  733. M_CFG_DIV(&odf_divider_ops, 0),
  734. M_CFG_GATE(&odf_gate_ops, 0),
  735. };
  736. #define M_ODF_F(_name, _parent, _gate_offset, _bit_idx, _rate_offset,\
  737. _rate_shift, _rate_width, _flags)\
  738. {\
  739. .mux = NULL,\
  740. .div = &(struct muxdiv_cfg) {_rate_offset, _rate_shift, _rate_width},\
  741. .gate = &(struct gate_cfg) {_gate_offset, _bit_idx },\
  742. .name = _name,\
  743. .parent_name = &(const char *) {_parent},\
  744. .num_parents = 1,\
  745. .flags = _flags,\
  746. }
  747. #define M_ODF(_name, _parent, _gate_offset, _bit_idx, _rate_offset,\
  748. _rate_shift, _rate_width)\
  749. M_ODF_F(_name, _parent, _gate_offset, _bit_idx, _rate_offset,\
  750. _rate_shift, _rate_width, 0)\
  751. static const struct composite_clk_cfg stm32_odf[3][3] = {
  752. {
  753. M_ODF_F("pll1_p", "vco1", RCC_PLLCFGR, 16, RCC_PLL1DIVR, 9, 7,
  754. CLK_IGNORE_UNUSED),
  755. M_ODF_F("pll1_q", "vco1", RCC_PLLCFGR, 17, RCC_PLL1DIVR, 16, 7,
  756. CLK_IGNORE_UNUSED),
  757. M_ODF_F("pll1_r", "vco1", RCC_PLLCFGR, 18, RCC_PLL1DIVR, 24, 7,
  758. CLK_IGNORE_UNUSED),
  759. },
  760. {
  761. M_ODF("pll2_p", "vco2", RCC_PLLCFGR, 19, RCC_PLL2DIVR, 9, 7),
  762. M_ODF("pll2_q", "vco2", RCC_PLLCFGR, 20, RCC_PLL2DIVR, 16, 7),
  763. M_ODF("pll2_r", "vco2", RCC_PLLCFGR, 21, RCC_PLL2DIVR, 24, 7),
  764. },
  765. {
  766. M_ODF("pll3_p", "vco3", RCC_PLLCFGR, 22, RCC_PLL3DIVR, 9, 7),
  767. M_ODF("pll3_q", "vco3", RCC_PLLCFGR, 23, RCC_PLL3DIVR, 16, 7),
  768. M_ODF("pll3_r", "vco3", RCC_PLLCFGR, 24, RCC_PLL3DIVR, 24, 7),
  769. }
  770. };
  771. /* PERIF CLOCKS */
  772. struct pclk_t {
  773. u32 gate_offset;
  774. u8 bit_idx;
  775. const char *name;
  776. const char *parent;
  777. u32 flags;
  778. };
  779. #define PER_CLKF(_gate_offset, _bit_idx, _name, _parent, _flags)\
  780. {\
  781. .gate_offset = _gate_offset,\
  782. .bit_idx = _bit_idx,\
  783. .name = _name,\
  784. .parent = _parent,\
  785. .flags = _flags,\
  786. }
  787. #define PER_CLK(_gate_offset, _bit_idx, _name, _parent)\
  788. PER_CLKF(_gate_offset, _bit_idx, _name, _parent, 0)
  789. static const struct pclk_t pclk[] = {
  790. PER_CLK(RCC_AHB3ENR, 31, "d1sram1", "hclk"),
  791. PER_CLK(RCC_AHB3ENR, 30, "itcm", "hclk"),
  792. PER_CLK(RCC_AHB3ENR, 29, "dtcm2", "hclk"),
  793. PER_CLK(RCC_AHB3ENR, 28, "dtcm1", "hclk"),
  794. PER_CLK(RCC_AHB3ENR, 8, "flitf", "hclk"),
  795. PER_CLK(RCC_AHB3ENR, 5, "jpgdec", "hclk"),
  796. PER_CLK(RCC_AHB3ENR, 4, "dma2d", "hclk"),
  797. PER_CLK(RCC_AHB3ENR, 0, "mdma", "hclk"),
  798. PER_CLK(RCC_AHB1ENR, 28, "usb2ulpi", "hclk"),
  799. PER_CLK(RCC_AHB1ENR, 26, "usb1ulpi", "hclk"),
  800. PER_CLK(RCC_AHB1ENR, 17, "eth1rx", "hclk"),
  801. PER_CLK(RCC_AHB1ENR, 16, "eth1tx", "hclk"),
  802. PER_CLK(RCC_AHB1ENR, 15, "eth1mac", "hclk"),
  803. PER_CLK(RCC_AHB1ENR, 14, "art", "hclk"),
  804. PER_CLK(RCC_AHB1ENR, 1, "dma2", "hclk"),
  805. PER_CLK(RCC_AHB1ENR, 0, "dma1", "hclk"),
  806. PER_CLK(RCC_AHB2ENR, 31, "d2sram3", "hclk"),
  807. PER_CLK(RCC_AHB2ENR, 30, "d2sram2", "hclk"),
  808. PER_CLK(RCC_AHB2ENR, 29, "d2sram1", "hclk"),
  809. PER_CLK(RCC_AHB2ENR, 5, "hash", "hclk"),
  810. PER_CLK(RCC_AHB2ENR, 4, "crypt", "hclk"),
  811. PER_CLK(RCC_AHB2ENR, 0, "camitf", "hclk"),
  812. PER_CLK(RCC_AHB4ENR, 28, "bkpram", "hclk"),
  813. PER_CLK(RCC_AHB4ENR, 25, "hsem", "hclk"),
  814. PER_CLK(RCC_AHB4ENR, 21, "bdma", "hclk"),
  815. PER_CLK(RCC_AHB4ENR, 19, "crc", "hclk"),
  816. PER_CLK(RCC_AHB4ENR, 10, "gpiok", "hclk"),
  817. PER_CLK(RCC_AHB4ENR, 9, "gpioj", "hclk"),
  818. PER_CLK(RCC_AHB4ENR, 8, "gpioi", "hclk"),
  819. PER_CLK(RCC_AHB4ENR, 7, "gpioh", "hclk"),
  820. PER_CLK(RCC_AHB4ENR, 6, "gpiog", "hclk"),
  821. PER_CLK(RCC_AHB4ENR, 5, "gpiof", "hclk"),
  822. PER_CLK(RCC_AHB4ENR, 4, "gpioe", "hclk"),
  823. PER_CLK(RCC_AHB4ENR, 3, "gpiod", "hclk"),
  824. PER_CLK(RCC_AHB4ENR, 2, "gpioc", "hclk"),
  825. PER_CLK(RCC_AHB4ENR, 1, "gpiob", "hclk"),
  826. PER_CLK(RCC_AHB4ENR, 0, "gpioa", "hclk"),
  827. PER_CLK(RCC_APB3ENR, 6, "wwdg1", "pclk3"),
  828. PER_CLK(RCC_APB1LENR, 29, "dac12", "pclk1"),
  829. PER_CLK(RCC_APB1LENR, 11, "wwdg2", "pclk1"),
  830. PER_CLK(RCC_APB1LENR, 8, "tim14", "tim1_ker"),
  831. PER_CLK(RCC_APB1LENR, 7, "tim13", "tim1_ker"),
  832. PER_CLK(RCC_APB1LENR, 6, "tim12", "tim1_ker"),
  833. PER_CLK(RCC_APB1LENR, 5, "tim7", "tim1_ker"),
  834. PER_CLK(RCC_APB1LENR, 4, "tim6", "tim1_ker"),
  835. PER_CLK(RCC_APB1LENR, 3, "tim5", "tim1_ker"),
  836. PER_CLK(RCC_APB1LENR, 2, "tim4", "tim1_ker"),
  837. PER_CLK(RCC_APB1LENR, 1, "tim3", "tim1_ker"),
  838. PER_CLK(RCC_APB1LENR, 0, "tim2", "tim1_ker"),
  839. PER_CLK(RCC_APB1HENR, 5, "mdios", "pclk1"),
  840. PER_CLK(RCC_APB1HENR, 4, "opamp", "pclk1"),
  841. PER_CLK(RCC_APB1HENR, 1, "crs", "pclk1"),
  842. PER_CLK(RCC_APB2ENR, 18, "tim17", "tim2_ker"),
  843. PER_CLK(RCC_APB2ENR, 17, "tim16", "tim2_ker"),
  844. PER_CLK(RCC_APB2ENR, 16, "tim15", "tim2_ker"),
  845. PER_CLK(RCC_APB2ENR, 1, "tim8", "tim2_ker"),
  846. PER_CLK(RCC_APB2ENR, 0, "tim1", "tim2_ker"),
  847. PER_CLK(RCC_APB4ENR, 26, "tmpsens", "pclk4"),
  848. PER_CLK(RCC_APB4ENR, 16, "rtcapb", "pclk4"),
  849. PER_CLK(RCC_APB4ENR, 15, "vref", "pclk4"),
  850. PER_CLK(RCC_APB4ENR, 14, "comp12", "pclk4"),
  851. PER_CLK(RCC_APB4ENR, 1, "syscfg", "pclk4"),
  852. };
  853. /* KERNEL CLOCKS */
  854. #define KER_CLKF(_gate_offset, _bit_idx,\
  855. _mux_offset, _mux_shift, _mux_width,\
  856. _name, _parent_name,\
  857. _flags) \
  858. { \
  859. .gate = &(struct gate_cfg) {_gate_offset, _bit_idx},\
  860. .mux = &(struct muxdiv_cfg) {_mux_offset, _mux_shift, _mux_width },\
  861. .name = _name, \
  862. .parent_name = _parent_name, \
  863. .num_parents = ARRAY_SIZE(_parent_name),\
  864. .flags = _flags,\
  865. }
  866. #define KER_CLK(_gate_offset, _bit_idx, _mux_offset, _mux_shift, _mux_width,\
  867. _name, _parent_name) \
  868. KER_CLKF(_gate_offset, _bit_idx, _mux_offset, _mux_shift, _mux_width,\
  869. _name, _parent_name, 0)\
  870. #define KER_CLKF_NOMUX(_gate_offset, _bit_idx,\
  871. _name, _parent_name,\
  872. _flags) \
  873. { \
  874. .gate = &(struct gate_cfg) {_gate_offset, _bit_idx},\
  875. .mux = NULL,\
  876. .name = _name, \
  877. .parent_name = _parent_name, \
  878. .num_parents = 1,\
  879. .flags = _flags,\
  880. }
  881. static const struct composite_clk_cfg kclk[] = {
  882. KER_CLK(RCC_AHB3ENR, 16, RCC_D1CCIPR, 16, 1, "sdmmc1", sdmmc_src),
  883. KER_CLKF(RCC_AHB3ENR, 14, RCC_D1CCIPR, 4, 2, "quadspi", qspi_src,
  884. CLK_IGNORE_UNUSED),
  885. KER_CLKF(RCC_AHB3ENR, 12, RCC_D1CCIPR, 0, 2, "fmc", fmc_src,
  886. CLK_IGNORE_UNUSED),
  887. KER_CLK(RCC_AHB1ENR, 27, RCC_D2CCIP2R, 20, 2, "usb2otg", usbotg_src),
  888. KER_CLK(RCC_AHB1ENR, 25, RCC_D2CCIP2R, 20, 2, "usb1otg", usbotg_src),
  889. KER_CLK(RCC_AHB1ENR, 5, RCC_D3CCIPR, 16, 2, "adc12", adc_src),
  890. KER_CLK(RCC_AHB2ENR, 9, RCC_D1CCIPR, 16, 1, "sdmmc2", sdmmc_src),
  891. KER_CLK(RCC_AHB2ENR, 6, RCC_D2CCIP2R, 8, 2, "rng", rng_src),
  892. KER_CLK(RCC_AHB4ENR, 24, RCC_D3CCIPR, 16, 2, "adc3", adc_src),
  893. KER_CLKF(RCC_APB3ENR, 4, RCC_D1CCIPR, 8, 1, "dsi", dsi_src,
  894. CLK_SET_RATE_PARENT),
  895. KER_CLKF_NOMUX(RCC_APB3ENR, 3, "ltdc", ltdc_src, CLK_SET_RATE_PARENT),
  896. KER_CLK(RCC_APB1LENR, 31, RCC_D2CCIP2R, 0, 3, "usart8", usart_src2),
  897. KER_CLK(RCC_APB1LENR, 30, RCC_D2CCIP2R, 0, 3, "usart7", usart_src2),
  898. KER_CLK(RCC_APB1LENR, 27, RCC_D2CCIP2R, 22, 2, "hdmicec", cec_src),
  899. KER_CLK(RCC_APB1LENR, 23, RCC_D2CCIP2R, 12, 2, "i2c3", i2c_src1),
  900. KER_CLK(RCC_APB1LENR, 22, RCC_D2CCIP2R, 12, 2, "i2c2", i2c_src1),
  901. KER_CLK(RCC_APB1LENR, 21, RCC_D2CCIP2R, 12, 2, "i2c1", i2c_src1),
  902. KER_CLK(RCC_APB1LENR, 20, RCC_D2CCIP2R, 0, 3, "uart5", usart_src2),
  903. KER_CLK(RCC_APB1LENR, 19, RCC_D2CCIP2R, 0, 3, "uart4", usart_src2),
  904. KER_CLK(RCC_APB1LENR, 18, RCC_D2CCIP2R, 0, 3, "usart3", usart_src2),
  905. KER_CLK(RCC_APB1LENR, 17, RCC_D2CCIP2R, 0, 3, "usart2", usart_src2),
  906. KER_CLK(RCC_APB1LENR, 16, RCC_D2CCIP1R, 20, 2, "spdifrx", spdifrx_src),
  907. KER_CLK(RCC_APB1LENR, 15, RCC_D2CCIP1R, 16, 3, "spi3", spi_src1),
  908. KER_CLK(RCC_APB1LENR, 14, RCC_D2CCIP1R, 16, 3, "spi2", spi_src1),
  909. KER_CLK(RCC_APB1LENR, 9, RCC_D2CCIP2R, 28, 3, "lptim1", lptim_src1),
  910. KER_CLK(RCC_APB1HENR, 8, RCC_D2CCIP1R, 28, 2, "fdcan", fdcan_src),
  911. KER_CLK(RCC_APB1HENR, 2, RCC_D2CCIP1R, 31, 1, "swp", swp_src),
  912. KER_CLK(RCC_APB2ENR, 29, RCC_CFGR, 14, 1, "hrtim", hrtim_src),
  913. KER_CLK(RCC_APB2ENR, 28, RCC_D2CCIP1R, 24, 1, "dfsdm1", dfsdm1_src),
  914. KER_CLKF(RCC_APB2ENR, 24, RCC_D2CCIP1R, 6, 3, "sai3", sai_src,
  915. CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT),
  916. KER_CLKF(RCC_APB2ENR, 23, RCC_D2CCIP1R, 6, 3, "sai2", sai_src,
  917. CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT),
  918. KER_CLKF(RCC_APB2ENR, 22, RCC_D2CCIP1R, 0, 3, "sai1", sai_src,
  919. CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT),
  920. KER_CLK(RCC_APB2ENR, 20, RCC_D2CCIP1R, 16, 3, "spi5", spi_src2),
  921. KER_CLK(RCC_APB2ENR, 13, RCC_D2CCIP1R, 16, 3, "spi4", spi_src2),
  922. KER_CLK(RCC_APB2ENR, 12, RCC_D2CCIP1R, 16, 3, "spi1", spi_src1),
  923. KER_CLK(RCC_APB2ENR, 5, RCC_D2CCIP2R, 3, 3, "usart6", usart_src1),
  924. KER_CLK(RCC_APB2ENR, 4, RCC_D2CCIP2R, 3, 3, "usart1", usart_src1),
  925. KER_CLK(RCC_APB4ENR, 21, RCC_D3CCIPR, 24, 3, "sai4b", sai_src),
  926. KER_CLK(RCC_APB4ENR, 21, RCC_D3CCIPR, 21, 3, "sai4a", sai_src),
  927. KER_CLK(RCC_APB4ENR, 12, RCC_D3CCIPR, 13, 3, "lptim5", lptim_src2),
  928. KER_CLK(RCC_APB4ENR, 11, RCC_D3CCIPR, 13, 3, "lptim4", lptim_src2),
  929. KER_CLK(RCC_APB4ENR, 10, RCC_D3CCIPR, 13, 3, "lptim3", lptim_src2),
  930. KER_CLK(RCC_APB4ENR, 9, RCC_D3CCIPR, 10, 3, "lptim2", lptim_src2),
  931. KER_CLK(RCC_APB4ENR, 7, RCC_D3CCIPR, 8, 2, "i2c4", i2c_src2),
  932. KER_CLK(RCC_APB4ENR, 5, RCC_D3CCIPR, 28, 3, "spi6", spi_src3),
  933. KER_CLK(RCC_APB4ENR, 3, RCC_D3CCIPR, 0, 3, "lpuart1", lpuart1_src),
  934. };
  935. static struct composite_clk_gcfg kernel_clk_cfg = {
  936. M_CFG_MUX(NULL, 0),
  937. M_CFG_GATE(NULL, 0),
  938. };
  939. /* RTC clock */
  940. /*
  941. * RTC & LSE registers are protected against parasitic write access.
  942. * PWR_CR_DBP bit must be set to enable write access to RTC registers.
  943. */
  944. /* STM32_PWR_CR */
  945. #define PWR_CR 0x00
  946. /* STM32_PWR_CR bit field */
  947. #define PWR_CR_DBP BIT(8)
  948. static struct composite_clk_gcfg rtc_clk_cfg = {
  949. M_CFG_MUX(NULL, 0),
  950. M_CFG_GATE(NULL, 0),
  951. };
  952. static const struct composite_clk_cfg rtc_clk =
  953. KER_CLK(RCC_BDCR, 15, RCC_BDCR, 8, 2, "rtc_ck", rtc_src);
  954. /* Micro-controller output clock */
  955. static struct composite_clk_gcfg mco_clk_cfg = {
  956. M_CFG_MUX(NULL, 0),
  957. M_CFG_DIV(NULL, CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO),
  958. };
  959. #define M_MCO_F(_name, _parents, _mux_offset, _mux_shift, _mux_width,\
  960. _rate_offset, _rate_shift, _rate_width,\
  961. _flags)\
  962. {\
  963. .mux = &(struct muxdiv_cfg) {_mux_offset, _mux_shift, _mux_width },\
  964. .div = &(struct muxdiv_cfg) {_rate_offset, _rate_shift, _rate_width},\
  965. .gate = NULL,\
  966. .name = _name,\
  967. .parent_name = _parents,\
  968. .num_parents = ARRAY_SIZE(_parents),\
  969. .flags = _flags,\
  970. }
  971. static const struct composite_clk_cfg mco_clk[] = {
  972. M_MCO_F("mco1", mco_src1, RCC_CFGR, 22, 4, RCC_CFGR, 18, 4, 0),
  973. M_MCO_F("mco2", mco_src2, RCC_CFGR, 29, 3, RCC_CFGR, 25, 4, 0),
  974. };
  975. static void __init stm32h7_rcc_init(struct device_node *np)
  976. {
  977. struct clk_hw_onecell_data *clk_data;
  978. struct composite_cfg c_cfg;
  979. int n;
  980. const char *hse_clk, *lse_clk, *i2s_clk;
  981. struct regmap *pdrm;
  982. clk_data = kzalloc(struct_size(clk_data, hws, STM32H7_MAX_CLKS),
  983. GFP_KERNEL);
  984. if (!clk_data)
  985. return;
  986. clk_data->num = STM32H7_MAX_CLKS;
  987. hws = clk_data->hws;
  988. for (n = 0; n < STM32H7_MAX_CLKS; n++)
  989. hws[n] = ERR_PTR(-ENOENT);
  990. /* get RCC base @ from DT */
  991. base = of_iomap(np, 0);
  992. if (!base) {
  993. pr_err("%pOFn: unable to map resource", np);
  994. goto err_free_clks;
  995. }
  996. pdrm = syscon_regmap_lookup_by_phandle(np, "st,syscfg");
  997. if (IS_ERR(pdrm))
  998. pr_warn("%s: Unable to get syscfg\n", __func__);
  999. else
  1000. /* In any case disable backup domain write protection
  1001. * and will never be enabled.
  1002. * Needed by LSE & RTC clocks.
  1003. */
  1004. regmap_update_bits(pdrm, PWR_CR, PWR_CR_DBP, PWR_CR_DBP);
  1005. /* Put parent names from DT */
  1006. hse_clk = of_clk_get_parent_name(np, 0);
  1007. lse_clk = of_clk_get_parent_name(np, 1);
  1008. i2s_clk = of_clk_get_parent_name(np, 2);
  1009. sai_src[3] = i2s_clk;
  1010. spi_src1[3] = i2s_clk;
  1011. /* Register Internal oscillators */
  1012. clk_hw_register_fixed_rate(NULL, "clk-hsi", NULL, 0, 64000000);
  1013. clk_hw_register_fixed_rate(NULL, "clk-csi", NULL, 0, 4000000);
  1014. clk_hw_register_fixed_rate(NULL, "clk-lsi", NULL, 0, 32000);
  1015. clk_hw_register_fixed_rate(NULL, "clk-rc48", NULL, 0, 48000);
  1016. /* This clock is coming from outside. Frequencies unknown */
  1017. hws[CK_DSI_PHY] = clk_hw_register_fixed_rate(NULL, "ck_dsi_phy", NULL,
  1018. 0, 0);
  1019. hws[HSI_DIV] = clk_hw_register_divider(NULL, "hsidiv", "clk-hsi", 0,
  1020. base + RCC_CR, 3, 2, CLK_DIVIDER_POWER_OF_TWO,
  1021. &stm32rcc_lock);
  1022. hws[HSE_1M] = clk_hw_register_divider(NULL, "hse_1M", "hse_ck", 0,
  1023. base + RCC_CFGR, 8, 6, CLK_DIVIDER_ONE_BASED |
  1024. CLK_DIVIDER_ALLOW_ZERO,
  1025. &stm32rcc_lock);
  1026. /* Mux system clocks */
  1027. for (n = 0; n < ARRAY_SIZE(stm32_mclk); n++)
  1028. hws[MCLK_BANK + n] = clk_hw_register_mux(NULL,
  1029. stm32_mclk[n].name,
  1030. stm32_mclk[n].parents,
  1031. stm32_mclk[n].num_parents,
  1032. stm32_mclk[n].flags,
  1033. stm32_mclk[n].offset + base,
  1034. stm32_mclk[n].shift,
  1035. stm32_mclk[n].width,
  1036. 0,
  1037. &stm32rcc_lock);
  1038. register_core_and_bus_clocks();
  1039. /* Oscillary clocks */
  1040. for (n = 0; n < ARRAY_SIZE(stm32_oclk); n++)
  1041. hws[OSC_BANK + n] = clk_register_ready_gate(NULL,
  1042. stm32_oclk[n].name,
  1043. stm32_oclk[n].parent,
  1044. stm32_oclk[n].gate_offset + base,
  1045. stm32_oclk[n].bit_idx,
  1046. stm32_oclk[n].bit_rdy,
  1047. stm32_oclk[n].flags,
  1048. &stm32rcc_lock);
  1049. hws[HSE_CK] = clk_register_ready_gate(NULL,
  1050. "hse_ck",
  1051. hse_clk,
  1052. RCC_CR + base,
  1053. 16, 17,
  1054. 0,
  1055. &stm32rcc_lock);
  1056. hws[LSE_CK] = clk_register_ready_gate(NULL,
  1057. "lse_ck",
  1058. lse_clk,
  1059. RCC_BDCR + base,
  1060. 0, 1,
  1061. 0,
  1062. &stm32rcc_lock);
  1063. hws[CSI_KER_DIV122 + n] = clk_hw_register_fixed_factor(NULL,
  1064. "csi_ker_div122", "csi_ker", 0, 1, 122);
  1065. /* PLLs */
  1066. for (n = 0; n < ARRAY_SIZE(stm32_pll); n++) {
  1067. int odf;
  1068. /* Register the VCO */
  1069. clk_register_stm32_pll(NULL, stm32_pll[n].name,
  1070. stm32_pll[n].parent_name, stm32_pll[n].flags,
  1071. stm32_pll[n].cfg,
  1072. &stm32rcc_lock);
  1073. /* Register the 3 output dividers */
  1074. for (odf = 0; odf < 3; odf++) {
  1075. int idx = n * 3 + odf;
  1076. get_cfg_composite_div(&odf_clk_gcfg, &stm32_odf[n][odf],
  1077. &c_cfg, &stm32rcc_lock);
  1078. hws[ODF_BANK + idx] = clk_hw_register_composite(NULL,
  1079. stm32_odf[n][odf].name,
  1080. stm32_odf[n][odf].parent_name,
  1081. stm32_odf[n][odf].num_parents,
  1082. c_cfg.mux_hw, c_cfg.mux_ops,
  1083. c_cfg.div_hw, c_cfg.div_ops,
  1084. c_cfg.gate_hw, c_cfg.gate_ops,
  1085. stm32_odf[n][odf].flags);
  1086. }
  1087. }
  1088. /* Peripheral clocks */
  1089. for (n = 0; n < ARRAY_SIZE(pclk); n++)
  1090. hws[PERIF_BANK + n] = clk_hw_register_gate(NULL, pclk[n].name,
  1091. pclk[n].parent,
  1092. pclk[n].flags, base + pclk[n].gate_offset,
  1093. pclk[n].bit_idx, pclk[n].flags, &stm32rcc_lock);
  1094. /* Kernel clocks */
  1095. for (n = 0; n < ARRAY_SIZE(kclk); n++) {
  1096. get_cfg_composite_div(&kernel_clk_cfg, &kclk[n], &c_cfg,
  1097. &stm32rcc_lock);
  1098. hws[KERN_BANK + n] = clk_hw_register_composite(NULL,
  1099. kclk[n].name,
  1100. kclk[n].parent_name,
  1101. kclk[n].num_parents,
  1102. c_cfg.mux_hw, c_cfg.mux_ops,
  1103. c_cfg.div_hw, c_cfg.div_ops,
  1104. c_cfg.gate_hw, c_cfg.gate_ops,
  1105. kclk[n].flags);
  1106. }
  1107. /* RTC clock (default state is off) */
  1108. clk_hw_register_fixed_rate(NULL, "off", NULL, 0, 0);
  1109. get_cfg_composite_div(&rtc_clk_cfg, &rtc_clk, &c_cfg, &stm32rcc_lock);
  1110. hws[RTC_CK] = clk_hw_register_composite(NULL,
  1111. rtc_clk.name,
  1112. rtc_clk.parent_name,
  1113. rtc_clk.num_parents,
  1114. c_cfg.mux_hw, c_cfg.mux_ops,
  1115. c_cfg.div_hw, c_cfg.div_ops,
  1116. c_cfg.gate_hw, c_cfg.gate_ops,
  1117. rtc_clk.flags);
  1118. /* Micro-controller clocks */
  1119. for (n = 0; n < ARRAY_SIZE(mco_clk); n++) {
  1120. get_cfg_composite_div(&mco_clk_cfg, &mco_clk[n], &c_cfg,
  1121. &stm32rcc_lock);
  1122. hws[MCO_BANK + n] = clk_hw_register_composite(NULL,
  1123. mco_clk[n].name,
  1124. mco_clk[n].parent_name,
  1125. mco_clk[n].num_parents,
  1126. c_cfg.mux_hw, c_cfg.mux_ops,
  1127. c_cfg.div_hw, c_cfg.div_ops,
  1128. c_cfg.gate_hw, c_cfg.gate_ops,
  1129. mco_clk[n].flags);
  1130. }
  1131. of_clk_add_hw_provider(np, of_clk_hw_onecell_get, clk_data);
  1132. return;
  1133. err_free_clks:
  1134. kfree(clk_data);
  1135. }
  1136. /* The RCC node is a clock and reset controller, and these
  1137. * functionalities are supported by different drivers that
  1138. * matches the same compatible strings.
  1139. */
  1140. CLK_OF_DECLARE_DRIVER(stm32h7_rcc, "st,stm32h743-rcc", stm32h7_rcc_init);