clk-scmi.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * System Control and Power Interface (SCMI) Protocol based clock driver
  4. *
  5. * Copyright (C) 2018-2024 ARM Ltd.
  6. */
  7. #include <linux/bits.h>
  8. #include <linux/clk-provider.h>
  9. #include <linux/device.h>
  10. #include <linux/err.h>
  11. #include <linux/of.h>
  12. #include <linux/module.h>
  13. #include <linux/scmi_protocol.h>
  14. #include <asm/div64.h>
  15. #define NOT_ATOMIC false
  16. #define ATOMIC true
  17. enum scmi_clk_feats {
  18. SCMI_CLK_ATOMIC_SUPPORTED,
  19. SCMI_CLK_STATE_CTRL_SUPPORTED,
  20. SCMI_CLK_RATE_CTRL_SUPPORTED,
  21. SCMI_CLK_PARENT_CTRL_SUPPORTED,
  22. SCMI_CLK_DUTY_CYCLE_SUPPORTED,
  23. SCMI_CLK_FEATS_COUNT
  24. };
  25. #define SCMI_MAX_CLK_OPS BIT(SCMI_CLK_FEATS_COUNT)
  26. static const struct scmi_clk_proto_ops *scmi_proto_clk_ops;
  27. struct scmi_clk {
  28. u32 id;
  29. struct device *dev;
  30. struct clk_hw hw;
  31. const struct scmi_clock_info *info;
  32. const struct scmi_protocol_handle *ph;
  33. struct clk_parent_data *parent_data;
  34. };
  35. #define to_scmi_clk(clk) container_of(clk, struct scmi_clk, hw)
  36. static unsigned long scmi_clk_recalc_rate(struct clk_hw *hw,
  37. unsigned long parent_rate)
  38. {
  39. int ret;
  40. u64 rate;
  41. struct scmi_clk *clk = to_scmi_clk(hw);
  42. ret = scmi_proto_clk_ops->rate_get(clk->ph, clk->id, &rate);
  43. if (ret)
  44. return 0;
  45. return rate;
  46. }
  47. static long scmi_clk_round_rate(struct clk_hw *hw, unsigned long rate,
  48. unsigned long *parent_rate)
  49. {
  50. u64 fmin, fmax, ftmp;
  51. struct scmi_clk *clk = to_scmi_clk(hw);
  52. /*
  53. * We can't figure out what rate it will be, so just return the
  54. * rate back to the caller. scmi_clk_recalc_rate() will be called
  55. * after the rate is set and we'll know what rate the clock is
  56. * running at then.
  57. */
  58. if (clk->info->rate_discrete)
  59. return rate;
  60. fmin = clk->info->range.min_rate;
  61. fmax = clk->info->range.max_rate;
  62. if (rate <= fmin)
  63. return fmin;
  64. else if (rate >= fmax)
  65. return fmax;
  66. ftmp = rate - fmin;
  67. ftmp += clk->info->range.step_size - 1; /* to round up */
  68. do_div(ftmp, clk->info->range.step_size);
  69. return ftmp * clk->info->range.step_size + fmin;
  70. }
  71. static int scmi_clk_set_rate(struct clk_hw *hw, unsigned long rate,
  72. unsigned long parent_rate)
  73. {
  74. struct scmi_clk *clk = to_scmi_clk(hw);
  75. return scmi_proto_clk_ops->rate_set(clk->ph, clk->id, rate);
  76. }
  77. static int scmi_clk_set_parent(struct clk_hw *hw, u8 parent_index)
  78. {
  79. struct scmi_clk *clk = to_scmi_clk(hw);
  80. return scmi_proto_clk_ops->parent_set(clk->ph, clk->id, parent_index);
  81. }
  82. static u8 scmi_clk_get_parent(struct clk_hw *hw)
  83. {
  84. struct scmi_clk *clk = to_scmi_clk(hw);
  85. u32 parent_id, p_idx;
  86. int ret;
  87. ret = scmi_proto_clk_ops->parent_get(clk->ph, clk->id, &parent_id);
  88. if (ret)
  89. return 0;
  90. for (p_idx = 0; p_idx < clk->info->num_parents; p_idx++) {
  91. if (clk->parent_data[p_idx].index == parent_id)
  92. break;
  93. }
  94. if (p_idx == clk->info->num_parents)
  95. return 0;
  96. return p_idx;
  97. }
  98. static int scmi_clk_determine_rate(struct clk_hw *hw, struct clk_rate_request *req)
  99. {
  100. /*
  101. * Suppose all the requested rates are supported, and let firmware
  102. * to handle the left work.
  103. */
  104. return 0;
  105. }
  106. static int scmi_clk_enable(struct clk_hw *hw)
  107. {
  108. struct scmi_clk *clk = to_scmi_clk(hw);
  109. return scmi_proto_clk_ops->enable(clk->ph, clk->id, NOT_ATOMIC);
  110. }
  111. static void scmi_clk_disable(struct clk_hw *hw)
  112. {
  113. struct scmi_clk *clk = to_scmi_clk(hw);
  114. scmi_proto_clk_ops->disable(clk->ph, clk->id, NOT_ATOMIC);
  115. }
  116. static int scmi_clk_atomic_enable(struct clk_hw *hw)
  117. {
  118. struct scmi_clk *clk = to_scmi_clk(hw);
  119. return scmi_proto_clk_ops->enable(clk->ph, clk->id, ATOMIC);
  120. }
  121. static void scmi_clk_atomic_disable(struct clk_hw *hw)
  122. {
  123. struct scmi_clk *clk = to_scmi_clk(hw);
  124. scmi_proto_clk_ops->disable(clk->ph, clk->id, ATOMIC);
  125. }
  126. static int __scmi_clk_is_enabled(struct clk_hw *hw, bool atomic)
  127. {
  128. int ret;
  129. bool enabled = false;
  130. struct scmi_clk *clk = to_scmi_clk(hw);
  131. ret = scmi_proto_clk_ops->state_get(clk->ph, clk->id, &enabled, atomic);
  132. if (ret)
  133. dev_warn(clk->dev,
  134. "Failed to get state for clock ID %d\n", clk->id);
  135. return !!enabled;
  136. }
  137. static int scmi_clk_atomic_is_enabled(struct clk_hw *hw)
  138. {
  139. return __scmi_clk_is_enabled(hw, ATOMIC);
  140. }
  141. static int scmi_clk_is_enabled(struct clk_hw *hw)
  142. {
  143. return __scmi_clk_is_enabled(hw, NOT_ATOMIC);
  144. }
  145. static int scmi_clk_get_duty_cycle(struct clk_hw *hw, struct clk_duty *duty)
  146. {
  147. int ret;
  148. u32 val;
  149. struct scmi_clk *clk = to_scmi_clk(hw);
  150. ret = scmi_proto_clk_ops->config_oem_get(clk->ph, clk->id,
  151. SCMI_CLOCK_CFG_DUTY_CYCLE,
  152. &val, NULL, false);
  153. if (!ret) {
  154. duty->num = val;
  155. duty->den = 100;
  156. } else {
  157. dev_warn(clk->dev,
  158. "Failed to get duty cycle for clock ID %d\n", clk->id);
  159. }
  160. return ret;
  161. }
  162. static int scmi_clk_set_duty_cycle(struct clk_hw *hw, struct clk_duty *duty)
  163. {
  164. int ret;
  165. u32 val;
  166. struct scmi_clk *clk = to_scmi_clk(hw);
  167. /* SCMI OEM Duty Cycle is expressed as a percentage */
  168. val = (duty->num * 100) / duty->den;
  169. ret = scmi_proto_clk_ops->config_oem_set(clk->ph, clk->id,
  170. SCMI_CLOCK_CFG_DUTY_CYCLE,
  171. val, false);
  172. if (ret)
  173. dev_warn(clk->dev,
  174. "Failed to set duty cycle(%u/%u) for clock ID %d\n",
  175. duty->num, duty->den, clk->id);
  176. return ret;
  177. }
  178. static int scmi_clk_ops_init(struct device *dev, struct scmi_clk *sclk,
  179. const struct clk_ops *scmi_ops)
  180. {
  181. int ret;
  182. unsigned long min_rate, max_rate;
  183. struct clk_init_data init = {
  184. .flags = CLK_GET_RATE_NOCACHE,
  185. .num_parents = sclk->info->num_parents,
  186. .ops = scmi_ops,
  187. .name = sclk->info->name,
  188. .parent_data = sclk->parent_data,
  189. };
  190. sclk->hw.init = &init;
  191. ret = devm_clk_hw_register(dev, &sclk->hw);
  192. if (ret)
  193. return ret;
  194. if (sclk->info->rate_discrete) {
  195. int num_rates = sclk->info->list.num_rates;
  196. if (num_rates <= 0)
  197. return -EINVAL;
  198. min_rate = sclk->info->list.rates[0];
  199. max_rate = sclk->info->list.rates[num_rates - 1];
  200. } else {
  201. min_rate = sclk->info->range.min_rate;
  202. max_rate = sclk->info->range.max_rate;
  203. }
  204. clk_hw_set_rate_range(&sclk->hw, min_rate, max_rate);
  205. return ret;
  206. }
  207. /**
  208. * scmi_clk_ops_alloc() - Alloc and configure clock operations
  209. * @dev: A device reference for devres
  210. * @feats_key: A bitmap representing the desired clk_ops capabilities
  211. *
  212. * Allocate and configure a proper set of clock operations depending on the
  213. * specifically required SCMI clock features.
  214. *
  215. * Return: A pointer to the allocated and configured clk_ops on success,
  216. * or NULL on allocation failure.
  217. */
  218. static const struct clk_ops *
  219. scmi_clk_ops_alloc(struct device *dev, unsigned long feats_key)
  220. {
  221. struct clk_ops *ops;
  222. ops = devm_kzalloc(dev, sizeof(*ops), GFP_KERNEL);
  223. if (!ops)
  224. return NULL;
  225. /*
  226. * We can provide enable/disable/is_enabled atomic callbacks only if the
  227. * underlying SCMI transport for an SCMI instance is configured to
  228. * handle SCMI commands in an atomic manner.
  229. *
  230. * When no SCMI atomic transport support is available we instead provide
  231. * only the prepare/unprepare API, as allowed by the clock framework
  232. * when atomic calls are not available.
  233. */
  234. if (feats_key & BIT(SCMI_CLK_STATE_CTRL_SUPPORTED)) {
  235. if (feats_key & BIT(SCMI_CLK_ATOMIC_SUPPORTED)) {
  236. ops->enable = scmi_clk_atomic_enable;
  237. ops->disable = scmi_clk_atomic_disable;
  238. } else {
  239. ops->prepare = scmi_clk_enable;
  240. ops->unprepare = scmi_clk_disable;
  241. }
  242. }
  243. if (feats_key & BIT(SCMI_CLK_ATOMIC_SUPPORTED))
  244. ops->is_enabled = scmi_clk_atomic_is_enabled;
  245. else
  246. ops->is_prepared = scmi_clk_is_enabled;
  247. /* Rate ops */
  248. ops->recalc_rate = scmi_clk_recalc_rate;
  249. ops->round_rate = scmi_clk_round_rate;
  250. ops->determine_rate = scmi_clk_determine_rate;
  251. if (feats_key & BIT(SCMI_CLK_RATE_CTRL_SUPPORTED))
  252. ops->set_rate = scmi_clk_set_rate;
  253. /* Parent ops */
  254. ops->get_parent = scmi_clk_get_parent;
  255. if (feats_key & BIT(SCMI_CLK_PARENT_CTRL_SUPPORTED))
  256. ops->set_parent = scmi_clk_set_parent;
  257. /* Duty cycle */
  258. if (feats_key & BIT(SCMI_CLK_DUTY_CYCLE_SUPPORTED)) {
  259. ops->get_duty_cycle = scmi_clk_get_duty_cycle;
  260. ops->set_duty_cycle = scmi_clk_set_duty_cycle;
  261. }
  262. return ops;
  263. }
  264. /**
  265. * scmi_clk_ops_select() - Select a proper set of clock operations
  266. * @sclk: A reference to an SCMI clock descriptor
  267. * @atomic_capable: A flag to indicate if atomic mode is supported by the
  268. * transport
  269. * @atomic_threshold_us: Platform atomic threshold value in microseconds:
  270. * clk_ops are atomic when clock enable latency is less
  271. * than this threshold
  272. * @clk_ops_db: A reference to the array used as a database to store all the
  273. * created clock operations combinations.
  274. * @db_size: Maximum number of entries held by @clk_ops_db
  275. *
  276. * After having built a bitmap descriptor to represent the set of features
  277. * needed by this SCMI clock, at first use it to lookup into the set of
  278. * previously allocated clk_ops to check if a suitable combination of clock
  279. * operations was already created; when no match is found allocate a brand new
  280. * set of clk_ops satisfying the required combination of features and save it
  281. * for future references.
  282. *
  283. * In this way only one set of clk_ops is ever created for each different
  284. * combination that is effectively needed by a driver instance.
  285. *
  286. * Return: A pointer to the allocated and configured clk_ops on success, or
  287. * NULL otherwise.
  288. */
  289. static const struct clk_ops *
  290. scmi_clk_ops_select(struct scmi_clk *sclk, bool atomic_capable,
  291. unsigned int atomic_threshold_us,
  292. const struct clk_ops **clk_ops_db, size_t db_size)
  293. {
  294. const struct scmi_clock_info *ci = sclk->info;
  295. unsigned int feats_key = 0;
  296. const struct clk_ops *ops;
  297. /*
  298. * Note that when transport is atomic but SCMI protocol did not
  299. * specify (or support) an enable_latency associated with a
  300. * clock, we default to use atomic operations mode.
  301. */
  302. if (atomic_capable && ci->enable_latency <= atomic_threshold_us)
  303. feats_key |= BIT(SCMI_CLK_ATOMIC_SUPPORTED);
  304. if (!ci->state_ctrl_forbidden)
  305. feats_key |= BIT(SCMI_CLK_STATE_CTRL_SUPPORTED);
  306. if (!ci->rate_ctrl_forbidden)
  307. feats_key |= BIT(SCMI_CLK_RATE_CTRL_SUPPORTED);
  308. if (!ci->parent_ctrl_forbidden)
  309. feats_key |= BIT(SCMI_CLK_PARENT_CTRL_SUPPORTED);
  310. if (ci->extended_config)
  311. feats_key |= BIT(SCMI_CLK_DUTY_CYCLE_SUPPORTED);
  312. if (WARN_ON(feats_key >= db_size))
  313. return NULL;
  314. /* Lookup previously allocated ops */
  315. ops = clk_ops_db[feats_key];
  316. if (ops)
  317. return ops;
  318. /* Did not find a pre-allocated clock_ops */
  319. ops = scmi_clk_ops_alloc(sclk->dev, feats_key);
  320. if (!ops)
  321. return NULL;
  322. /* Store new ops combinations */
  323. clk_ops_db[feats_key] = ops;
  324. return ops;
  325. }
  326. static int scmi_clocks_probe(struct scmi_device *sdev)
  327. {
  328. int idx, count, err;
  329. unsigned int atomic_threshold_us;
  330. bool transport_is_atomic;
  331. struct clk_hw **hws;
  332. struct clk_hw_onecell_data *clk_data;
  333. struct device *dev = &sdev->dev;
  334. struct device_node *np = dev->of_node;
  335. const struct scmi_handle *handle = sdev->handle;
  336. struct scmi_protocol_handle *ph;
  337. const struct clk_ops *scmi_clk_ops_db[SCMI_MAX_CLK_OPS] = {};
  338. if (!handle)
  339. return -ENODEV;
  340. scmi_proto_clk_ops =
  341. handle->devm_protocol_get(sdev, SCMI_PROTOCOL_CLOCK, &ph);
  342. if (IS_ERR(scmi_proto_clk_ops))
  343. return PTR_ERR(scmi_proto_clk_ops);
  344. count = scmi_proto_clk_ops->count_get(ph);
  345. if (count < 0) {
  346. dev_err(dev, "%pOFn: invalid clock output count\n", np);
  347. return -EINVAL;
  348. }
  349. clk_data = devm_kzalloc(dev, struct_size(clk_data, hws, count),
  350. GFP_KERNEL);
  351. if (!clk_data)
  352. return -ENOMEM;
  353. clk_data->num = count;
  354. hws = clk_data->hws;
  355. transport_is_atomic = handle->is_transport_atomic(handle,
  356. &atomic_threshold_us);
  357. for (idx = 0; idx < count; idx++) {
  358. struct scmi_clk *sclk;
  359. const struct clk_ops *scmi_ops;
  360. sclk = devm_kzalloc(dev, sizeof(*sclk), GFP_KERNEL);
  361. if (!sclk)
  362. return -ENOMEM;
  363. sclk->info = scmi_proto_clk_ops->info_get(ph, idx);
  364. if (!sclk->info) {
  365. dev_dbg(dev, "invalid clock info for idx %d\n", idx);
  366. devm_kfree(dev, sclk);
  367. continue;
  368. }
  369. sclk->id = idx;
  370. sclk->ph = ph;
  371. sclk->dev = dev;
  372. /*
  373. * Note that the scmi_clk_ops_db is on the stack, not global,
  374. * because it cannot be shared between mulitple probe-sequences
  375. * to avoid sharing the devm_ allocated clk_ops between multiple
  376. * SCMI clk driver instances.
  377. */
  378. scmi_ops = scmi_clk_ops_select(sclk, transport_is_atomic,
  379. atomic_threshold_us,
  380. scmi_clk_ops_db,
  381. ARRAY_SIZE(scmi_clk_ops_db));
  382. if (!scmi_ops)
  383. return -ENOMEM;
  384. /* Initialize clock parent data. */
  385. if (sclk->info->num_parents > 0) {
  386. sclk->parent_data = devm_kcalloc(dev, sclk->info->num_parents,
  387. sizeof(*sclk->parent_data), GFP_KERNEL);
  388. if (!sclk->parent_data)
  389. return -ENOMEM;
  390. for (int i = 0; i < sclk->info->num_parents; i++) {
  391. sclk->parent_data[i].index = sclk->info->parents[i];
  392. sclk->parent_data[i].hw = hws[sclk->info->parents[i]];
  393. }
  394. }
  395. err = scmi_clk_ops_init(dev, sclk, scmi_ops);
  396. if (err) {
  397. dev_err(dev, "failed to register clock %d\n", idx);
  398. devm_kfree(dev, sclk->parent_data);
  399. devm_kfree(dev, sclk);
  400. hws[idx] = NULL;
  401. } else {
  402. dev_dbg(dev, "Registered clock:%s%s\n",
  403. sclk->info->name,
  404. scmi_ops->enable ? " (atomic ops)" : "");
  405. hws[idx] = &sclk->hw;
  406. }
  407. }
  408. return devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get,
  409. clk_data);
  410. }
  411. static const struct scmi_device_id scmi_id_table[] = {
  412. { SCMI_PROTOCOL_CLOCK, "clocks" },
  413. { },
  414. };
  415. MODULE_DEVICE_TABLE(scmi, scmi_id_table);
  416. static struct scmi_driver scmi_clocks_driver = {
  417. .name = "scmi-clocks",
  418. .probe = scmi_clocks_probe,
  419. .id_table = scmi_id_table,
  420. };
  421. module_scmi_driver(scmi_clocks_driver);
  422. MODULE_AUTHOR("Sudeep Holla <sudeep.holla@arm.com>");
  423. MODULE_DESCRIPTION("ARM SCMI clock driver");
  424. MODULE_LICENSE("GPL v2");