perf.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * System Control and Management Interface (SCMI) Performance Protocol
  4. *
  5. * Copyright (C) 2018 ARM Ltd.
  6. */
  7. #include <linux/of.h>
  8. #include <linux/platform_device.h>
  9. #include <linux/pm_opp.h>
  10. #include <linux/sort.h>
  11. #include "common.h"
  12. enum scmi_performance_protocol_cmd {
  13. PERF_DOMAIN_ATTRIBUTES = 0x3,
  14. PERF_DESCRIBE_LEVELS = 0x4,
  15. PERF_LIMITS_SET = 0x5,
  16. PERF_LIMITS_GET = 0x6,
  17. PERF_LEVEL_SET = 0x7,
  18. PERF_LEVEL_GET = 0x8,
  19. PERF_NOTIFY_LIMITS = 0x9,
  20. PERF_NOTIFY_LEVEL = 0xa,
  21. };
  22. struct scmi_opp {
  23. u32 perf;
  24. u32 power;
  25. u32 trans_latency_us;
  26. };
  27. struct scmi_msg_resp_perf_attributes {
  28. __le16 num_domains;
  29. __le16 flags;
  30. #define POWER_SCALE_IN_MILLIWATT(x) ((x) & BIT(0))
  31. __le32 stats_addr_low;
  32. __le32 stats_addr_high;
  33. __le32 stats_size;
  34. };
  35. struct scmi_msg_resp_perf_domain_attributes {
  36. __le32 flags;
  37. #define SUPPORTS_SET_LIMITS(x) ((x) & BIT(31))
  38. #define SUPPORTS_SET_PERF_LVL(x) ((x) & BIT(30))
  39. #define SUPPORTS_PERF_LIMIT_NOTIFY(x) ((x) & BIT(29))
  40. #define SUPPORTS_PERF_LEVEL_NOTIFY(x) ((x) & BIT(28))
  41. __le32 rate_limit_us;
  42. __le32 sustained_freq_khz;
  43. __le32 sustained_perf_level;
  44. u8 name[SCMI_MAX_STR_SIZE];
  45. };
  46. struct scmi_msg_perf_describe_levels {
  47. __le32 domain;
  48. __le32 level_index;
  49. };
  50. struct scmi_perf_set_limits {
  51. __le32 domain;
  52. __le32 max_level;
  53. __le32 min_level;
  54. };
  55. struct scmi_perf_get_limits {
  56. __le32 max_level;
  57. __le32 min_level;
  58. };
  59. struct scmi_perf_set_level {
  60. __le32 domain;
  61. __le32 level;
  62. };
  63. struct scmi_perf_notify_level_or_limits {
  64. __le32 domain;
  65. __le32 notify_enable;
  66. };
  67. struct scmi_msg_resp_perf_describe_levels {
  68. __le16 num_returned;
  69. __le16 num_remaining;
  70. struct {
  71. __le32 perf_val;
  72. __le32 power;
  73. __le16 transition_latency_us;
  74. __le16 reserved;
  75. } opp[0];
  76. };
  77. struct perf_dom_info {
  78. bool set_limits;
  79. bool set_perf;
  80. bool perf_limit_notify;
  81. bool perf_level_notify;
  82. u32 opp_count;
  83. u32 sustained_freq_khz;
  84. u32 sustained_perf_level;
  85. u32 mult_factor;
  86. char name[SCMI_MAX_STR_SIZE];
  87. struct scmi_opp opp[MAX_OPPS];
  88. };
  89. struct scmi_perf_info {
  90. int num_domains;
  91. bool power_scale_mw;
  92. u64 stats_addr;
  93. u32 stats_size;
  94. struct perf_dom_info *dom_info;
  95. };
  96. static int scmi_perf_attributes_get(const struct scmi_handle *handle,
  97. struct scmi_perf_info *pi)
  98. {
  99. int ret;
  100. struct scmi_xfer *t;
  101. struct scmi_msg_resp_perf_attributes *attr;
  102. ret = scmi_xfer_get_init(handle, PROTOCOL_ATTRIBUTES,
  103. SCMI_PROTOCOL_PERF, 0, sizeof(*attr), &t);
  104. if (ret)
  105. return ret;
  106. attr = t->rx.buf;
  107. ret = scmi_do_xfer(handle, t);
  108. if (!ret) {
  109. u16 flags = le16_to_cpu(attr->flags);
  110. pi->num_domains = le16_to_cpu(attr->num_domains);
  111. pi->power_scale_mw = POWER_SCALE_IN_MILLIWATT(flags);
  112. pi->stats_addr = le32_to_cpu(attr->stats_addr_low) |
  113. (u64)le32_to_cpu(attr->stats_addr_high) << 32;
  114. pi->stats_size = le32_to_cpu(attr->stats_size);
  115. }
  116. scmi_xfer_put(handle, t);
  117. return ret;
  118. }
  119. static int
  120. scmi_perf_domain_attributes_get(const struct scmi_handle *handle, u32 domain,
  121. struct perf_dom_info *dom_info)
  122. {
  123. int ret;
  124. struct scmi_xfer *t;
  125. struct scmi_msg_resp_perf_domain_attributes *attr;
  126. ret = scmi_xfer_get_init(handle, PERF_DOMAIN_ATTRIBUTES,
  127. SCMI_PROTOCOL_PERF, sizeof(domain),
  128. sizeof(*attr), &t);
  129. if (ret)
  130. return ret;
  131. *(__le32 *)t->tx.buf = cpu_to_le32(domain);
  132. attr = t->rx.buf;
  133. ret = scmi_do_xfer(handle, t);
  134. if (!ret) {
  135. u32 flags = le32_to_cpu(attr->flags);
  136. dom_info->set_limits = SUPPORTS_SET_LIMITS(flags);
  137. dom_info->set_perf = SUPPORTS_SET_PERF_LVL(flags);
  138. dom_info->perf_limit_notify = SUPPORTS_PERF_LIMIT_NOTIFY(flags);
  139. dom_info->perf_level_notify = SUPPORTS_PERF_LEVEL_NOTIFY(flags);
  140. dom_info->sustained_freq_khz =
  141. le32_to_cpu(attr->sustained_freq_khz);
  142. dom_info->sustained_perf_level =
  143. le32_to_cpu(attr->sustained_perf_level);
  144. if (!dom_info->sustained_freq_khz ||
  145. !dom_info->sustained_perf_level)
  146. /* CPUFreq converts to kHz, hence default 1000 */
  147. dom_info->mult_factor = 1000;
  148. else
  149. dom_info->mult_factor =
  150. (dom_info->sustained_freq_khz * 1000) /
  151. dom_info->sustained_perf_level;
  152. strlcpy(dom_info->name, attr->name, SCMI_MAX_STR_SIZE);
  153. }
  154. scmi_xfer_put(handle, t);
  155. return ret;
  156. }
  157. static int opp_cmp_func(const void *opp1, const void *opp2)
  158. {
  159. const struct scmi_opp *t1 = opp1, *t2 = opp2;
  160. return t1->perf - t2->perf;
  161. }
  162. static int
  163. scmi_perf_describe_levels_get(const struct scmi_handle *handle, u32 domain,
  164. struct perf_dom_info *perf_dom)
  165. {
  166. int ret, cnt;
  167. u32 tot_opp_cnt = 0;
  168. u16 num_returned, num_remaining;
  169. struct scmi_xfer *t;
  170. struct scmi_opp *opp;
  171. struct scmi_msg_perf_describe_levels *dom_info;
  172. struct scmi_msg_resp_perf_describe_levels *level_info;
  173. ret = scmi_xfer_get_init(handle, PERF_DESCRIBE_LEVELS,
  174. SCMI_PROTOCOL_PERF, sizeof(*dom_info), 0, &t);
  175. if (ret)
  176. return ret;
  177. dom_info = t->tx.buf;
  178. level_info = t->rx.buf;
  179. do {
  180. dom_info->domain = cpu_to_le32(domain);
  181. /* Set the number of OPPs to be skipped/already read */
  182. dom_info->level_index = cpu_to_le32(tot_opp_cnt);
  183. ret = scmi_do_xfer(handle, t);
  184. if (ret)
  185. break;
  186. num_returned = le16_to_cpu(level_info->num_returned);
  187. num_remaining = le16_to_cpu(level_info->num_remaining);
  188. if (tot_opp_cnt + num_returned > MAX_OPPS) {
  189. dev_err(handle->dev, "No. of OPPs exceeded MAX_OPPS");
  190. break;
  191. }
  192. opp = &perf_dom->opp[tot_opp_cnt];
  193. for (cnt = 0; cnt < num_returned; cnt++, opp++) {
  194. opp->perf = le32_to_cpu(level_info->opp[cnt].perf_val);
  195. opp->power = le32_to_cpu(level_info->opp[cnt].power);
  196. opp->trans_latency_us = le16_to_cpu
  197. (level_info->opp[cnt].transition_latency_us);
  198. dev_dbg(handle->dev, "Level %d Power %d Latency %dus\n",
  199. opp->perf, opp->power, opp->trans_latency_us);
  200. }
  201. tot_opp_cnt += num_returned;
  202. /*
  203. * check for both returned and remaining to avoid infinite
  204. * loop due to buggy firmware
  205. */
  206. } while (num_returned && num_remaining);
  207. perf_dom->opp_count = tot_opp_cnt;
  208. scmi_xfer_put(handle, t);
  209. sort(perf_dom->opp, tot_opp_cnt, sizeof(*opp), opp_cmp_func, NULL);
  210. return ret;
  211. }
  212. static int scmi_perf_limits_set(const struct scmi_handle *handle, u32 domain,
  213. u32 max_perf, u32 min_perf)
  214. {
  215. int ret;
  216. struct scmi_xfer *t;
  217. struct scmi_perf_set_limits *limits;
  218. ret = scmi_xfer_get_init(handle, PERF_LIMITS_SET, SCMI_PROTOCOL_PERF,
  219. sizeof(*limits), 0, &t);
  220. if (ret)
  221. return ret;
  222. limits = t->tx.buf;
  223. limits->domain = cpu_to_le32(domain);
  224. limits->max_level = cpu_to_le32(max_perf);
  225. limits->min_level = cpu_to_le32(min_perf);
  226. ret = scmi_do_xfer(handle, t);
  227. scmi_xfer_put(handle, t);
  228. return ret;
  229. }
  230. static int scmi_perf_limits_get(const struct scmi_handle *handle, u32 domain,
  231. u32 *max_perf, u32 *min_perf)
  232. {
  233. int ret;
  234. struct scmi_xfer *t;
  235. struct scmi_perf_get_limits *limits;
  236. ret = scmi_xfer_get_init(handle, PERF_LIMITS_GET, SCMI_PROTOCOL_PERF,
  237. sizeof(__le32), 0, &t);
  238. if (ret)
  239. return ret;
  240. *(__le32 *)t->tx.buf = cpu_to_le32(domain);
  241. ret = scmi_do_xfer(handle, t);
  242. if (!ret) {
  243. limits = t->rx.buf;
  244. *max_perf = le32_to_cpu(limits->max_level);
  245. *min_perf = le32_to_cpu(limits->min_level);
  246. }
  247. scmi_xfer_put(handle, t);
  248. return ret;
  249. }
  250. static int scmi_perf_level_set(const struct scmi_handle *handle, u32 domain,
  251. u32 level, bool poll)
  252. {
  253. int ret;
  254. struct scmi_xfer *t;
  255. struct scmi_perf_set_level *lvl;
  256. ret = scmi_xfer_get_init(handle, PERF_LEVEL_SET, SCMI_PROTOCOL_PERF,
  257. sizeof(*lvl), 0, &t);
  258. if (ret)
  259. return ret;
  260. t->hdr.poll_completion = poll;
  261. lvl = t->tx.buf;
  262. lvl->domain = cpu_to_le32(domain);
  263. lvl->level = cpu_to_le32(level);
  264. ret = scmi_do_xfer(handle, t);
  265. scmi_xfer_put(handle, t);
  266. return ret;
  267. }
  268. static int scmi_perf_level_get(const struct scmi_handle *handle, u32 domain,
  269. u32 *level, bool poll)
  270. {
  271. int ret;
  272. struct scmi_xfer *t;
  273. ret = scmi_xfer_get_init(handle, PERF_LEVEL_GET, SCMI_PROTOCOL_PERF,
  274. sizeof(u32), sizeof(u32), &t);
  275. if (ret)
  276. return ret;
  277. t->hdr.poll_completion = poll;
  278. *(__le32 *)t->tx.buf = cpu_to_le32(domain);
  279. ret = scmi_do_xfer(handle, t);
  280. if (!ret)
  281. *level = le32_to_cpu(*(__le32 *)t->rx.buf);
  282. scmi_xfer_put(handle, t);
  283. return ret;
  284. }
  285. /* Device specific ops */
  286. static int scmi_dev_domain_id(struct device *dev)
  287. {
  288. struct of_phandle_args clkspec;
  289. if (of_parse_phandle_with_args(dev->of_node, "clocks", "#clock-cells",
  290. 0, &clkspec))
  291. return -EINVAL;
  292. return clkspec.args[0];
  293. }
  294. static int scmi_dvfs_device_opps_add(const struct scmi_handle *handle,
  295. struct device *dev)
  296. {
  297. int idx, ret, domain;
  298. unsigned long freq;
  299. struct scmi_opp *opp;
  300. struct perf_dom_info *dom;
  301. struct scmi_perf_info *pi = handle->perf_priv;
  302. domain = scmi_dev_domain_id(dev);
  303. if (domain < 0)
  304. return domain;
  305. dom = pi->dom_info + domain;
  306. for (opp = dom->opp, idx = 0; idx < dom->opp_count; idx++, opp++) {
  307. freq = opp->perf * dom->mult_factor;
  308. ret = dev_pm_opp_add(dev, freq, 0);
  309. if (ret) {
  310. dev_warn(dev, "failed to add opp %luHz\n", freq);
  311. while (idx-- > 0) {
  312. freq = (--opp)->perf * dom->mult_factor;
  313. dev_pm_opp_remove(dev, freq);
  314. }
  315. return ret;
  316. }
  317. }
  318. return 0;
  319. }
  320. static int scmi_dvfs_transition_latency_get(const struct scmi_handle *handle,
  321. struct device *dev)
  322. {
  323. struct perf_dom_info *dom;
  324. struct scmi_perf_info *pi = handle->perf_priv;
  325. int domain = scmi_dev_domain_id(dev);
  326. if (domain < 0)
  327. return domain;
  328. dom = pi->dom_info + domain;
  329. /* uS to nS */
  330. return dom->opp[dom->opp_count - 1].trans_latency_us * 1000;
  331. }
  332. static int scmi_dvfs_freq_set(const struct scmi_handle *handle, u32 domain,
  333. unsigned long freq, bool poll)
  334. {
  335. struct scmi_perf_info *pi = handle->perf_priv;
  336. struct perf_dom_info *dom = pi->dom_info + domain;
  337. return scmi_perf_level_set(handle, domain, freq / dom->mult_factor,
  338. poll);
  339. }
  340. static int scmi_dvfs_freq_get(const struct scmi_handle *handle, u32 domain,
  341. unsigned long *freq, bool poll)
  342. {
  343. int ret;
  344. u32 level;
  345. struct scmi_perf_info *pi = handle->perf_priv;
  346. struct perf_dom_info *dom = pi->dom_info + domain;
  347. ret = scmi_perf_level_get(handle, domain, &level, poll);
  348. if (!ret)
  349. *freq = level * dom->mult_factor;
  350. return ret;
  351. }
  352. static struct scmi_perf_ops perf_ops = {
  353. .limits_set = scmi_perf_limits_set,
  354. .limits_get = scmi_perf_limits_get,
  355. .level_set = scmi_perf_level_set,
  356. .level_get = scmi_perf_level_get,
  357. .device_domain_id = scmi_dev_domain_id,
  358. .transition_latency_get = scmi_dvfs_transition_latency_get,
  359. .device_opps_add = scmi_dvfs_device_opps_add,
  360. .freq_set = scmi_dvfs_freq_set,
  361. .freq_get = scmi_dvfs_freq_get,
  362. };
  363. static int scmi_perf_protocol_init(struct scmi_handle *handle)
  364. {
  365. int domain;
  366. u32 version;
  367. struct scmi_perf_info *pinfo;
  368. scmi_version_get(handle, SCMI_PROTOCOL_PERF, &version);
  369. dev_dbg(handle->dev, "Performance Version %d.%d\n",
  370. PROTOCOL_REV_MAJOR(version), PROTOCOL_REV_MINOR(version));
  371. pinfo = devm_kzalloc(handle->dev, sizeof(*pinfo), GFP_KERNEL);
  372. if (!pinfo)
  373. return -ENOMEM;
  374. scmi_perf_attributes_get(handle, pinfo);
  375. pinfo->dom_info = devm_kcalloc(handle->dev, pinfo->num_domains,
  376. sizeof(*pinfo->dom_info), GFP_KERNEL);
  377. if (!pinfo->dom_info)
  378. return -ENOMEM;
  379. for (domain = 0; domain < pinfo->num_domains; domain++) {
  380. struct perf_dom_info *dom = pinfo->dom_info + domain;
  381. scmi_perf_domain_attributes_get(handle, domain, dom);
  382. scmi_perf_describe_levels_get(handle, domain, dom);
  383. }
  384. handle->perf_ops = &perf_ops;
  385. handle->perf_priv = pinfo;
  386. return 0;
  387. }
  388. static int __init scmi_perf_init(void)
  389. {
  390. return scmi_protocol_register(SCMI_PROTOCOL_PERF,
  391. &scmi_perf_protocol_init);
  392. }
  393. subsys_initcall(scmi_perf_init);