perf.c 32 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * System Control and Management Interface (SCMI) Performance Protocol
  4. *
  5. * Copyright (C) 2018-2023 ARM Ltd.
  6. */
  7. #define pr_fmt(fmt) "SCMI Notifications PERF - " fmt
  8. #include <linux/bits.h>
  9. #include <linux/hashtable.h>
  10. #include <linux/io.h>
  11. #include <linux/log2.h>
  12. #include <linux/module.h>
  13. #include <linux/of.h>
  14. #include <linux/platform_device.h>
  15. #include <linux/pm_opp.h>
  16. #include <linux/scmi_protocol.h>
  17. #include <linux/sort.h>
  18. #include <linux/xarray.h>
  19. #include <trace/events/scmi.h>
  20. #include "protocols.h"
  21. #include "notify.h"
  22. /* Updated only after ALL the mandatory features for that version are merged */
  23. #define SCMI_PROTOCOL_SUPPORTED_VERSION 0x40000
  24. #define MAX_OPPS 32
  25. enum scmi_performance_protocol_cmd {
  26. PERF_DOMAIN_ATTRIBUTES = 0x3,
  27. PERF_DESCRIBE_LEVELS = 0x4,
  28. PERF_LIMITS_SET = 0x5,
  29. PERF_LIMITS_GET = 0x6,
  30. PERF_LEVEL_SET = 0x7,
  31. PERF_LEVEL_GET = 0x8,
  32. PERF_NOTIFY_LIMITS = 0x9,
  33. PERF_NOTIFY_LEVEL = 0xa,
  34. PERF_DESCRIBE_FASTCHANNEL = 0xb,
  35. PERF_DOMAIN_NAME_GET = 0xc,
  36. };
  37. enum {
  38. PERF_FC_LEVEL,
  39. PERF_FC_LIMIT,
  40. PERF_FC_MAX,
  41. };
  42. struct scmi_opp {
  43. u32 perf;
  44. u32 power;
  45. u32 trans_latency_us;
  46. u32 indicative_freq;
  47. u32 level_index;
  48. struct hlist_node hash;
  49. };
  50. struct scmi_msg_resp_perf_attributes {
  51. __le16 num_domains;
  52. __le16 flags;
  53. #define POWER_SCALE_IN_MILLIWATT(x) ((x) & BIT(0))
  54. #define POWER_SCALE_IN_MICROWATT(x) ((x) & BIT(1))
  55. __le32 stats_addr_low;
  56. __le32 stats_addr_high;
  57. __le32 stats_size;
  58. };
  59. struct scmi_msg_resp_perf_domain_attributes {
  60. __le32 flags;
  61. #define SUPPORTS_SET_LIMITS(x) ((x) & BIT(31))
  62. #define SUPPORTS_SET_PERF_LVL(x) ((x) & BIT(30))
  63. #define SUPPORTS_PERF_LIMIT_NOTIFY(x) ((x) & BIT(29))
  64. #define SUPPORTS_PERF_LEVEL_NOTIFY(x) ((x) & BIT(28))
  65. #define SUPPORTS_PERF_FASTCHANNELS(x) ((x) & BIT(27))
  66. #define SUPPORTS_EXTENDED_NAMES(x) ((x) & BIT(26))
  67. #define SUPPORTS_LEVEL_INDEXING(x) ((x) & BIT(25))
  68. __le32 rate_limit_us;
  69. __le32 sustained_freq_khz;
  70. __le32 sustained_perf_level;
  71. u8 name[SCMI_SHORT_NAME_MAX_SIZE];
  72. };
  73. struct scmi_msg_perf_describe_levels {
  74. __le32 domain;
  75. __le32 level_index;
  76. };
  77. struct scmi_perf_set_limits {
  78. __le32 domain;
  79. __le32 max_level;
  80. __le32 min_level;
  81. };
  82. struct scmi_perf_get_limits {
  83. __le32 max_level;
  84. __le32 min_level;
  85. };
  86. struct scmi_perf_set_level {
  87. __le32 domain;
  88. __le32 level;
  89. };
  90. struct scmi_perf_notify_level_or_limits {
  91. __le32 domain;
  92. __le32 notify_enable;
  93. };
  94. struct scmi_perf_limits_notify_payld {
  95. __le32 agent_id;
  96. __le32 domain_id;
  97. __le32 range_max;
  98. __le32 range_min;
  99. };
  100. struct scmi_perf_level_notify_payld {
  101. __le32 agent_id;
  102. __le32 domain_id;
  103. __le32 performance_level;
  104. };
  105. struct scmi_msg_resp_perf_describe_levels {
  106. __le16 num_returned;
  107. __le16 num_remaining;
  108. struct {
  109. __le32 perf_val;
  110. __le32 power;
  111. __le16 transition_latency_us;
  112. __le16 reserved;
  113. } opp[];
  114. };
  115. struct scmi_msg_resp_perf_describe_levels_v4 {
  116. __le16 num_returned;
  117. __le16 num_remaining;
  118. struct {
  119. __le32 perf_val;
  120. __le32 power;
  121. __le16 transition_latency_us;
  122. __le16 reserved;
  123. __le32 indicative_freq;
  124. __le32 level_index;
  125. } opp[];
  126. };
  127. struct perf_dom_info {
  128. u32 id;
  129. bool set_limits;
  130. bool perf_limit_notify;
  131. bool perf_level_notify;
  132. bool perf_fastchannels;
  133. bool level_indexing_mode;
  134. u32 opp_count;
  135. u32 rate_limit_us;
  136. u32 sustained_freq_khz;
  137. u32 sustained_perf_level;
  138. unsigned long mult_factor;
  139. struct scmi_perf_domain_info info;
  140. struct scmi_opp opp[MAX_OPPS];
  141. struct scmi_fc_info *fc_info;
  142. struct xarray opps_by_idx;
  143. struct xarray opps_by_lvl;
  144. DECLARE_HASHTABLE(opps_by_freq, ilog2(MAX_OPPS));
  145. };
  146. #define LOOKUP_BY_FREQ(__htp, __freq) \
  147. ({ \
  148. /* u32 cast is needed to pick right hash func */ \
  149. u32 f_ = (u32)(__freq); \
  150. struct scmi_opp *_opp; \
  151. \
  152. hash_for_each_possible((__htp), _opp, hash, f_) \
  153. if (_opp->indicative_freq == f_) \
  154. break; \
  155. _opp; \
  156. })
  157. struct scmi_perf_info {
  158. u32 version;
  159. u16 num_domains;
  160. enum scmi_power_scale power_scale;
  161. u64 stats_addr;
  162. u32 stats_size;
  163. bool notify_lvl_cmd;
  164. bool notify_lim_cmd;
  165. struct perf_dom_info *dom_info;
  166. };
  167. static enum scmi_performance_protocol_cmd evt_2_cmd[] = {
  168. PERF_NOTIFY_LIMITS,
  169. PERF_NOTIFY_LEVEL,
  170. };
  171. static int scmi_perf_attributes_get(const struct scmi_protocol_handle *ph,
  172. struct scmi_perf_info *pi)
  173. {
  174. int ret;
  175. struct scmi_xfer *t;
  176. struct scmi_msg_resp_perf_attributes *attr;
  177. ret = ph->xops->xfer_get_init(ph, PROTOCOL_ATTRIBUTES, 0,
  178. sizeof(*attr), &t);
  179. if (ret)
  180. return ret;
  181. attr = t->rx.buf;
  182. ret = ph->xops->do_xfer(ph, t);
  183. if (!ret) {
  184. u16 flags = le16_to_cpu(attr->flags);
  185. pi->num_domains = le16_to_cpu(attr->num_domains);
  186. if (POWER_SCALE_IN_MILLIWATT(flags))
  187. pi->power_scale = SCMI_POWER_MILLIWATTS;
  188. if (PROTOCOL_REV_MAJOR(pi->version) >= 0x3)
  189. if (POWER_SCALE_IN_MICROWATT(flags))
  190. pi->power_scale = SCMI_POWER_MICROWATTS;
  191. pi->stats_addr = le32_to_cpu(attr->stats_addr_low) |
  192. (u64)le32_to_cpu(attr->stats_addr_high) << 32;
  193. pi->stats_size = le32_to_cpu(attr->stats_size);
  194. }
  195. ph->xops->xfer_put(ph, t);
  196. if (!ret) {
  197. if (!ph->hops->protocol_msg_check(ph, PERF_NOTIFY_LEVEL, NULL))
  198. pi->notify_lvl_cmd = true;
  199. if (!ph->hops->protocol_msg_check(ph, PERF_NOTIFY_LIMITS, NULL))
  200. pi->notify_lim_cmd = true;
  201. }
  202. return ret;
  203. }
  204. static void scmi_perf_xa_destroy(void *data)
  205. {
  206. int domain;
  207. struct scmi_perf_info *pinfo = data;
  208. for (domain = 0; domain < pinfo->num_domains; domain++) {
  209. xa_destroy(&((pinfo->dom_info + domain)->opps_by_idx));
  210. xa_destroy(&((pinfo->dom_info + domain)->opps_by_lvl));
  211. }
  212. }
  213. static int
  214. scmi_perf_domain_attributes_get(const struct scmi_protocol_handle *ph,
  215. struct perf_dom_info *dom_info,
  216. bool notify_lim_cmd, bool notify_lvl_cmd,
  217. u32 version)
  218. {
  219. int ret;
  220. u32 flags;
  221. struct scmi_xfer *t;
  222. struct scmi_msg_resp_perf_domain_attributes *attr;
  223. ret = ph->xops->xfer_get_init(ph, PERF_DOMAIN_ATTRIBUTES,
  224. sizeof(dom_info->id), sizeof(*attr), &t);
  225. if (ret)
  226. return ret;
  227. put_unaligned_le32(dom_info->id, t->tx.buf);
  228. attr = t->rx.buf;
  229. ret = ph->xops->do_xfer(ph, t);
  230. if (!ret) {
  231. flags = le32_to_cpu(attr->flags);
  232. dom_info->set_limits = SUPPORTS_SET_LIMITS(flags);
  233. dom_info->info.set_perf = SUPPORTS_SET_PERF_LVL(flags);
  234. if (notify_lim_cmd)
  235. dom_info->perf_limit_notify =
  236. SUPPORTS_PERF_LIMIT_NOTIFY(flags);
  237. if (notify_lvl_cmd)
  238. dom_info->perf_level_notify =
  239. SUPPORTS_PERF_LEVEL_NOTIFY(flags);
  240. dom_info->perf_fastchannels = SUPPORTS_PERF_FASTCHANNELS(flags);
  241. if (PROTOCOL_REV_MAJOR(version) >= 0x4)
  242. dom_info->level_indexing_mode =
  243. SUPPORTS_LEVEL_INDEXING(flags);
  244. dom_info->rate_limit_us = le32_to_cpu(attr->rate_limit_us) &
  245. GENMASK(19, 0);
  246. dom_info->sustained_freq_khz =
  247. le32_to_cpu(attr->sustained_freq_khz);
  248. dom_info->sustained_perf_level =
  249. le32_to_cpu(attr->sustained_perf_level);
  250. /*
  251. * sustained_freq_khz = mult_factor * sustained_perf_level
  252. * mult_factor must be non zero positive integer(not fraction)
  253. */
  254. if (!dom_info->sustained_freq_khz ||
  255. !dom_info->sustained_perf_level ||
  256. dom_info->level_indexing_mode) {
  257. /* CPUFreq converts to kHz, hence default 1000 */
  258. dom_info->mult_factor = 1000;
  259. } else {
  260. dom_info->mult_factor =
  261. (dom_info->sustained_freq_khz * 1000UL)
  262. / dom_info->sustained_perf_level;
  263. if ((dom_info->sustained_freq_khz * 1000UL) %
  264. dom_info->sustained_perf_level)
  265. dev_warn(ph->dev,
  266. "multiplier for domain %d rounded\n",
  267. dom_info->id);
  268. }
  269. if (!dom_info->mult_factor)
  270. dev_warn(ph->dev,
  271. "Wrong sustained perf/frequency(domain %d)\n",
  272. dom_info->id);
  273. strscpy(dom_info->info.name, attr->name,
  274. SCMI_SHORT_NAME_MAX_SIZE);
  275. }
  276. ph->xops->xfer_put(ph, t);
  277. /*
  278. * If supported overwrite short name with the extended one;
  279. * on error just carry on and use already provided short name.
  280. */
  281. if (!ret && PROTOCOL_REV_MAJOR(version) >= 0x3 &&
  282. SUPPORTS_EXTENDED_NAMES(flags))
  283. ph->hops->extended_name_get(ph, PERF_DOMAIN_NAME_GET,
  284. dom_info->id, NULL, dom_info->info.name,
  285. SCMI_MAX_STR_SIZE);
  286. xa_init(&dom_info->opps_by_lvl);
  287. if (dom_info->level_indexing_mode) {
  288. xa_init(&dom_info->opps_by_idx);
  289. hash_init(dom_info->opps_by_freq);
  290. }
  291. return ret;
  292. }
  293. static int opp_cmp_func(const void *opp1, const void *opp2)
  294. {
  295. const struct scmi_opp *t1 = opp1, *t2 = opp2;
  296. return t1->perf - t2->perf;
  297. }
  298. struct scmi_perf_ipriv {
  299. u32 version;
  300. struct perf_dom_info *perf_dom;
  301. };
  302. static void iter_perf_levels_prepare_message(void *message,
  303. unsigned int desc_index,
  304. const void *priv)
  305. {
  306. struct scmi_msg_perf_describe_levels *msg = message;
  307. const struct scmi_perf_ipriv *p = priv;
  308. msg->domain = cpu_to_le32(p->perf_dom->id);
  309. /* Set the number of OPPs to be skipped/already read */
  310. msg->level_index = cpu_to_le32(desc_index);
  311. }
  312. static int iter_perf_levels_update_state(struct scmi_iterator_state *st,
  313. const void *response, void *priv)
  314. {
  315. const struct scmi_msg_resp_perf_describe_levels *r = response;
  316. st->num_returned = le16_to_cpu(r->num_returned);
  317. st->num_remaining = le16_to_cpu(r->num_remaining);
  318. return 0;
  319. }
  320. static inline int
  321. process_response_opp(struct device *dev, struct perf_dom_info *dom,
  322. struct scmi_opp *opp, unsigned int loop_idx,
  323. const struct scmi_msg_resp_perf_describe_levels *r)
  324. {
  325. int ret;
  326. opp->perf = le32_to_cpu(r->opp[loop_idx].perf_val);
  327. opp->power = le32_to_cpu(r->opp[loop_idx].power);
  328. opp->trans_latency_us =
  329. le16_to_cpu(r->opp[loop_idx].transition_latency_us);
  330. ret = xa_insert(&dom->opps_by_lvl, opp->perf, opp, GFP_KERNEL);
  331. if (ret) {
  332. dev_info(dev, FW_BUG "Failed to add opps_by_lvl at %d for %s - ret:%d\n",
  333. opp->perf, dom->info.name, ret);
  334. return ret;
  335. }
  336. return 0;
  337. }
  338. static inline int
  339. process_response_opp_v4(struct device *dev, struct perf_dom_info *dom,
  340. struct scmi_opp *opp, unsigned int loop_idx,
  341. const struct scmi_msg_resp_perf_describe_levels_v4 *r)
  342. {
  343. int ret;
  344. opp->perf = le32_to_cpu(r->opp[loop_idx].perf_val);
  345. opp->power = le32_to_cpu(r->opp[loop_idx].power);
  346. opp->trans_latency_us =
  347. le16_to_cpu(r->opp[loop_idx].transition_latency_us);
  348. ret = xa_insert(&dom->opps_by_lvl, opp->perf, opp, GFP_KERNEL);
  349. if (ret) {
  350. dev_info(dev, FW_BUG "Failed to add opps_by_lvl at %d for %s - ret:%d\n",
  351. opp->perf, dom->info.name, ret);
  352. return ret;
  353. }
  354. /* Note that PERF v4 reports always five 32-bit words */
  355. opp->indicative_freq = le32_to_cpu(r->opp[loop_idx].indicative_freq);
  356. if (dom->level_indexing_mode) {
  357. opp->level_index = le32_to_cpu(r->opp[loop_idx].level_index);
  358. ret = xa_insert(&dom->opps_by_idx, opp->level_index, opp,
  359. GFP_KERNEL);
  360. if (ret) {
  361. dev_warn(dev,
  362. "Failed to add opps_by_idx at %d for %s - ret:%d\n",
  363. opp->level_index, dom->info.name, ret);
  364. /* Cleanup by_lvl too */
  365. xa_erase(&dom->opps_by_lvl, opp->perf);
  366. return ret;
  367. }
  368. hash_add(dom->opps_by_freq, &opp->hash, opp->indicative_freq);
  369. }
  370. return 0;
  371. }
  372. static int
  373. iter_perf_levels_process_response(const struct scmi_protocol_handle *ph,
  374. const void *response,
  375. struct scmi_iterator_state *st, void *priv)
  376. {
  377. int ret;
  378. struct scmi_opp *opp;
  379. struct scmi_perf_ipriv *p = priv;
  380. opp = &p->perf_dom->opp[p->perf_dom->opp_count];
  381. if (PROTOCOL_REV_MAJOR(p->version) <= 0x3)
  382. ret = process_response_opp(ph->dev, p->perf_dom, opp,
  383. st->loop_idx, response);
  384. else
  385. ret = process_response_opp_v4(ph->dev, p->perf_dom, opp,
  386. st->loop_idx, response);
  387. /* Skip BAD duplicates received from firmware */
  388. if (ret)
  389. return ret == -EBUSY ? 0 : ret;
  390. p->perf_dom->opp_count++;
  391. dev_dbg(ph->dev, "Level %d Power %d Latency %dus Ifreq %d Index %d\n",
  392. opp->perf, opp->power, opp->trans_latency_us,
  393. opp->indicative_freq, opp->level_index);
  394. return 0;
  395. }
  396. static int
  397. scmi_perf_describe_levels_get(const struct scmi_protocol_handle *ph,
  398. struct perf_dom_info *perf_dom, u32 version)
  399. {
  400. int ret;
  401. void *iter;
  402. struct scmi_iterator_ops ops = {
  403. .prepare_message = iter_perf_levels_prepare_message,
  404. .update_state = iter_perf_levels_update_state,
  405. .process_response = iter_perf_levels_process_response,
  406. };
  407. struct scmi_perf_ipriv ppriv = {
  408. .version = version,
  409. .perf_dom = perf_dom,
  410. };
  411. iter = ph->hops->iter_response_init(ph, &ops, MAX_OPPS,
  412. PERF_DESCRIBE_LEVELS,
  413. sizeof(struct scmi_msg_perf_describe_levels),
  414. &ppriv);
  415. if (IS_ERR(iter))
  416. return PTR_ERR(iter);
  417. ret = ph->hops->iter_response_run(iter);
  418. if (ret)
  419. return ret;
  420. if (perf_dom->opp_count)
  421. sort(perf_dom->opp, perf_dom->opp_count,
  422. sizeof(struct scmi_opp), opp_cmp_func, NULL);
  423. return ret;
  424. }
  425. static int scmi_perf_num_domains_get(const struct scmi_protocol_handle *ph)
  426. {
  427. struct scmi_perf_info *pi = ph->get_priv(ph);
  428. return pi->num_domains;
  429. }
  430. static inline struct perf_dom_info *
  431. scmi_perf_domain_lookup(const struct scmi_protocol_handle *ph, u32 domain)
  432. {
  433. struct scmi_perf_info *pi = ph->get_priv(ph);
  434. if (domain >= pi->num_domains)
  435. return ERR_PTR(-EINVAL);
  436. return pi->dom_info + domain;
  437. }
  438. static const struct scmi_perf_domain_info *
  439. scmi_perf_info_get(const struct scmi_protocol_handle *ph, u32 domain)
  440. {
  441. struct perf_dom_info *dom;
  442. dom = scmi_perf_domain_lookup(ph, domain);
  443. if (IS_ERR(dom))
  444. return ERR_PTR(-EINVAL);
  445. return &dom->info;
  446. }
  447. static int scmi_perf_msg_limits_set(const struct scmi_protocol_handle *ph,
  448. u32 domain, u32 max_perf, u32 min_perf)
  449. {
  450. int ret;
  451. struct scmi_xfer *t;
  452. struct scmi_perf_set_limits *limits;
  453. ret = ph->xops->xfer_get_init(ph, PERF_LIMITS_SET,
  454. sizeof(*limits), 0, &t);
  455. if (ret)
  456. return ret;
  457. limits = t->tx.buf;
  458. limits->domain = cpu_to_le32(domain);
  459. limits->max_level = cpu_to_le32(max_perf);
  460. limits->min_level = cpu_to_le32(min_perf);
  461. ret = ph->xops->do_xfer(ph, t);
  462. ph->xops->xfer_put(ph, t);
  463. return ret;
  464. }
  465. static int __scmi_perf_limits_set(const struct scmi_protocol_handle *ph,
  466. struct perf_dom_info *dom, u32 max_perf,
  467. u32 min_perf)
  468. {
  469. if (dom->fc_info && dom->fc_info[PERF_FC_LIMIT].set_addr) {
  470. struct scmi_fc_info *fci = &dom->fc_info[PERF_FC_LIMIT];
  471. trace_scmi_fc_call(SCMI_PROTOCOL_PERF, PERF_LIMITS_SET,
  472. dom->id, min_perf, max_perf);
  473. iowrite32(max_perf, fci->set_addr);
  474. iowrite32(min_perf, fci->set_addr + 4);
  475. ph->hops->fastchannel_db_ring(fci->set_db);
  476. return 0;
  477. }
  478. return scmi_perf_msg_limits_set(ph, dom->id, max_perf, min_perf);
  479. }
  480. static int scmi_perf_limits_set(const struct scmi_protocol_handle *ph,
  481. u32 domain, u32 max_perf, u32 min_perf)
  482. {
  483. struct scmi_perf_info *pi = ph->get_priv(ph);
  484. struct perf_dom_info *dom;
  485. dom = scmi_perf_domain_lookup(ph, domain);
  486. if (IS_ERR(dom))
  487. return PTR_ERR(dom);
  488. if (!dom->set_limits)
  489. return -EOPNOTSUPP;
  490. if (PROTOCOL_REV_MAJOR(pi->version) >= 0x3 && !max_perf && !min_perf)
  491. return -EINVAL;
  492. if (dom->level_indexing_mode) {
  493. struct scmi_opp *opp;
  494. if (min_perf) {
  495. opp = xa_load(&dom->opps_by_lvl, min_perf);
  496. if (!opp)
  497. return -EIO;
  498. min_perf = opp->level_index;
  499. }
  500. if (max_perf) {
  501. opp = xa_load(&dom->opps_by_lvl, max_perf);
  502. if (!opp)
  503. return -EIO;
  504. max_perf = opp->level_index;
  505. }
  506. }
  507. return __scmi_perf_limits_set(ph, dom, max_perf, min_perf);
  508. }
  509. static int scmi_perf_msg_limits_get(const struct scmi_protocol_handle *ph,
  510. u32 domain, u32 *max_perf, u32 *min_perf)
  511. {
  512. int ret;
  513. struct scmi_xfer *t;
  514. struct scmi_perf_get_limits *limits;
  515. ret = ph->xops->xfer_get_init(ph, PERF_LIMITS_GET,
  516. sizeof(__le32), 0, &t);
  517. if (ret)
  518. return ret;
  519. put_unaligned_le32(domain, t->tx.buf);
  520. ret = ph->xops->do_xfer(ph, t);
  521. if (!ret) {
  522. limits = t->rx.buf;
  523. *max_perf = le32_to_cpu(limits->max_level);
  524. *min_perf = le32_to_cpu(limits->min_level);
  525. }
  526. ph->xops->xfer_put(ph, t);
  527. return ret;
  528. }
  529. static int __scmi_perf_limits_get(const struct scmi_protocol_handle *ph,
  530. struct perf_dom_info *dom, u32 *max_perf,
  531. u32 *min_perf)
  532. {
  533. if (dom->fc_info && dom->fc_info[PERF_FC_LIMIT].get_addr) {
  534. struct scmi_fc_info *fci = &dom->fc_info[PERF_FC_LIMIT];
  535. *max_perf = ioread32(fci->get_addr);
  536. *min_perf = ioread32(fci->get_addr + 4);
  537. trace_scmi_fc_call(SCMI_PROTOCOL_PERF, PERF_LIMITS_GET,
  538. dom->id, *min_perf, *max_perf);
  539. return 0;
  540. }
  541. return scmi_perf_msg_limits_get(ph, dom->id, max_perf, min_perf);
  542. }
  543. static int scmi_perf_limits_get(const struct scmi_protocol_handle *ph,
  544. u32 domain, u32 *max_perf, u32 *min_perf)
  545. {
  546. int ret;
  547. struct perf_dom_info *dom;
  548. dom = scmi_perf_domain_lookup(ph, domain);
  549. if (IS_ERR(dom))
  550. return PTR_ERR(dom);
  551. ret = __scmi_perf_limits_get(ph, dom, max_perf, min_perf);
  552. if (ret)
  553. return ret;
  554. if (dom->level_indexing_mode) {
  555. struct scmi_opp *opp;
  556. opp = xa_load(&dom->opps_by_idx, *min_perf);
  557. if (!opp)
  558. return -EIO;
  559. *min_perf = opp->perf;
  560. opp = xa_load(&dom->opps_by_idx, *max_perf);
  561. if (!opp)
  562. return -EIO;
  563. *max_perf = opp->perf;
  564. }
  565. return 0;
  566. }
  567. static int scmi_perf_msg_level_set(const struct scmi_protocol_handle *ph,
  568. u32 domain, u32 level, bool poll)
  569. {
  570. int ret;
  571. struct scmi_xfer *t;
  572. struct scmi_perf_set_level *lvl;
  573. ret = ph->xops->xfer_get_init(ph, PERF_LEVEL_SET, sizeof(*lvl), 0, &t);
  574. if (ret)
  575. return ret;
  576. t->hdr.poll_completion = poll;
  577. lvl = t->tx.buf;
  578. lvl->domain = cpu_to_le32(domain);
  579. lvl->level = cpu_to_le32(level);
  580. ret = ph->xops->do_xfer(ph, t);
  581. ph->xops->xfer_put(ph, t);
  582. return ret;
  583. }
  584. static int __scmi_perf_level_set(const struct scmi_protocol_handle *ph,
  585. struct perf_dom_info *dom, u32 level,
  586. bool poll)
  587. {
  588. if (dom->fc_info && dom->fc_info[PERF_FC_LEVEL].set_addr) {
  589. struct scmi_fc_info *fci = &dom->fc_info[PERF_FC_LEVEL];
  590. trace_scmi_fc_call(SCMI_PROTOCOL_PERF, PERF_LEVEL_SET,
  591. dom->id, level, 0);
  592. iowrite32(level, fci->set_addr);
  593. ph->hops->fastchannel_db_ring(fci->set_db);
  594. return 0;
  595. }
  596. return scmi_perf_msg_level_set(ph, dom->id, level, poll);
  597. }
  598. static int scmi_perf_level_set(const struct scmi_protocol_handle *ph,
  599. u32 domain, u32 level, bool poll)
  600. {
  601. struct perf_dom_info *dom;
  602. dom = scmi_perf_domain_lookup(ph, domain);
  603. if (IS_ERR(dom))
  604. return PTR_ERR(dom);
  605. if (!dom->info.set_perf)
  606. return -EOPNOTSUPP;
  607. if (dom->level_indexing_mode) {
  608. struct scmi_opp *opp;
  609. opp = xa_load(&dom->opps_by_lvl, level);
  610. if (!opp)
  611. return -EIO;
  612. level = opp->level_index;
  613. }
  614. return __scmi_perf_level_set(ph, dom, level, poll);
  615. }
  616. static int scmi_perf_msg_level_get(const struct scmi_protocol_handle *ph,
  617. u32 domain, u32 *level, bool poll)
  618. {
  619. int ret;
  620. struct scmi_xfer *t;
  621. ret = ph->xops->xfer_get_init(ph, PERF_LEVEL_GET,
  622. sizeof(u32), sizeof(u32), &t);
  623. if (ret)
  624. return ret;
  625. t->hdr.poll_completion = poll;
  626. put_unaligned_le32(domain, t->tx.buf);
  627. ret = ph->xops->do_xfer(ph, t);
  628. if (!ret)
  629. *level = get_unaligned_le32(t->rx.buf);
  630. ph->xops->xfer_put(ph, t);
  631. return ret;
  632. }
  633. static int __scmi_perf_level_get(const struct scmi_protocol_handle *ph,
  634. struct perf_dom_info *dom, u32 *level,
  635. bool poll)
  636. {
  637. if (dom->fc_info && dom->fc_info[PERF_FC_LEVEL].get_addr) {
  638. *level = ioread32(dom->fc_info[PERF_FC_LEVEL].get_addr);
  639. trace_scmi_fc_call(SCMI_PROTOCOL_PERF, PERF_LEVEL_GET,
  640. dom->id, *level, 0);
  641. return 0;
  642. }
  643. return scmi_perf_msg_level_get(ph, dom->id, level, poll);
  644. }
  645. static int scmi_perf_level_get(const struct scmi_protocol_handle *ph,
  646. u32 domain, u32 *level, bool poll)
  647. {
  648. int ret;
  649. struct perf_dom_info *dom;
  650. dom = scmi_perf_domain_lookup(ph, domain);
  651. if (IS_ERR(dom))
  652. return PTR_ERR(dom);
  653. ret = __scmi_perf_level_get(ph, dom, level, poll);
  654. if (ret)
  655. return ret;
  656. if (dom->level_indexing_mode) {
  657. struct scmi_opp *opp;
  658. opp = xa_load(&dom->opps_by_idx, *level);
  659. if (!opp)
  660. return -EIO;
  661. *level = opp->perf;
  662. }
  663. return 0;
  664. }
  665. static int scmi_perf_level_limits_notify(const struct scmi_protocol_handle *ph,
  666. u32 domain, int message_id,
  667. bool enable)
  668. {
  669. int ret;
  670. struct scmi_xfer *t;
  671. struct scmi_perf_notify_level_or_limits *notify;
  672. ret = ph->xops->xfer_get_init(ph, message_id, sizeof(*notify), 0, &t);
  673. if (ret)
  674. return ret;
  675. notify = t->tx.buf;
  676. notify->domain = cpu_to_le32(domain);
  677. notify->notify_enable = enable ? cpu_to_le32(BIT(0)) : 0;
  678. ret = ph->xops->do_xfer(ph, t);
  679. ph->xops->xfer_put(ph, t);
  680. return ret;
  681. }
  682. static void scmi_perf_domain_init_fc(const struct scmi_protocol_handle *ph,
  683. struct perf_dom_info *dom)
  684. {
  685. struct scmi_fc_info *fc;
  686. fc = devm_kcalloc(ph->dev, PERF_FC_MAX, sizeof(*fc), GFP_KERNEL);
  687. if (!fc)
  688. return;
  689. ph->hops->fastchannel_init(ph, PERF_DESCRIBE_FASTCHANNEL,
  690. PERF_LEVEL_GET, 4, dom->id,
  691. &fc[PERF_FC_LEVEL].get_addr, NULL,
  692. &fc[PERF_FC_LEVEL].rate_limit);
  693. ph->hops->fastchannel_init(ph, PERF_DESCRIBE_FASTCHANNEL,
  694. PERF_LIMITS_GET, 8, dom->id,
  695. &fc[PERF_FC_LIMIT].get_addr, NULL,
  696. &fc[PERF_FC_LIMIT].rate_limit);
  697. if (dom->info.set_perf)
  698. ph->hops->fastchannel_init(ph, PERF_DESCRIBE_FASTCHANNEL,
  699. PERF_LEVEL_SET, 4, dom->id,
  700. &fc[PERF_FC_LEVEL].set_addr,
  701. &fc[PERF_FC_LEVEL].set_db,
  702. &fc[PERF_FC_LEVEL].rate_limit);
  703. if (dom->set_limits)
  704. ph->hops->fastchannel_init(ph, PERF_DESCRIBE_FASTCHANNEL,
  705. PERF_LIMITS_SET, 8, dom->id,
  706. &fc[PERF_FC_LIMIT].set_addr,
  707. &fc[PERF_FC_LIMIT].set_db,
  708. &fc[PERF_FC_LIMIT].rate_limit);
  709. dom->fc_info = fc;
  710. }
  711. static int scmi_dvfs_device_opps_add(const struct scmi_protocol_handle *ph,
  712. struct device *dev, u32 domain)
  713. {
  714. int idx, ret;
  715. unsigned long freq;
  716. struct dev_pm_opp_data data = {};
  717. struct perf_dom_info *dom;
  718. dom = scmi_perf_domain_lookup(ph, domain);
  719. if (IS_ERR(dom))
  720. return PTR_ERR(dom);
  721. for (idx = 0; idx < dom->opp_count; idx++) {
  722. if (!dom->level_indexing_mode)
  723. freq = dom->opp[idx].perf * dom->mult_factor;
  724. else
  725. freq = dom->opp[idx].indicative_freq * dom->mult_factor;
  726. /* All OPPs above the sustained frequency are treated as turbo */
  727. data.turbo = freq > dom->sustained_freq_khz * 1000UL;
  728. data.level = dom->opp[idx].perf;
  729. data.freq = freq;
  730. ret = dev_pm_opp_add_dynamic(dev, &data);
  731. if (ret) {
  732. dev_warn(dev, "[%d][%s]: Failed to add OPP[%d] %lu\n",
  733. domain, dom->info.name, idx, freq);
  734. dev_pm_opp_remove_all_dynamic(dev);
  735. return ret;
  736. }
  737. dev_dbg(dev, "[%d][%s]:: Registered OPP[%d] %lu\n",
  738. domain, dom->info.name, idx, freq);
  739. }
  740. return 0;
  741. }
  742. static int
  743. scmi_dvfs_transition_latency_get(const struct scmi_protocol_handle *ph,
  744. u32 domain)
  745. {
  746. struct perf_dom_info *dom;
  747. dom = scmi_perf_domain_lookup(ph, domain);
  748. if (IS_ERR(dom))
  749. return PTR_ERR(dom);
  750. /* uS to nS */
  751. return dom->opp[dom->opp_count - 1].trans_latency_us * 1000;
  752. }
  753. static int
  754. scmi_dvfs_rate_limit_get(const struct scmi_protocol_handle *ph,
  755. u32 domain, u32 *rate_limit)
  756. {
  757. struct perf_dom_info *dom;
  758. if (!rate_limit)
  759. return -EINVAL;
  760. dom = scmi_perf_domain_lookup(ph, domain);
  761. if (IS_ERR(dom))
  762. return PTR_ERR(dom);
  763. *rate_limit = dom->rate_limit_us;
  764. return 0;
  765. }
  766. static int scmi_dvfs_freq_set(const struct scmi_protocol_handle *ph, u32 domain,
  767. unsigned long freq, bool poll)
  768. {
  769. unsigned int level;
  770. struct perf_dom_info *dom;
  771. dom = scmi_perf_domain_lookup(ph, domain);
  772. if (IS_ERR(dom))
  773. return PTR_ERR(dom);
  774. if (!dom->level_indexing_mode) {
  775. level = freq / dom->mult_factor;
  776. } else {
  777. struct scmi_opp *opp;
  778. opp = LOOKUP_BY_FREQ(dom->opps_by_freq,
  779. freq / dom->mult_factor);
  780. if (!opp)
  781. return -EIO;
  782. level = opp->level_index;
  783. }
  784. return __scmi_perf_level_set(ph, dom, level, poll);
  785. }
  786. static int scmi_dvfs_freq_get(const struct scmi_protocol_handle *ph, u32 domain,
  787. unsigned long *freq, bool poll)
  788. {
  789. int ret;
  790. u32 level;
  791. struct perf_dom_info *dom;
  792. dom = scmi_perf_domain_lookup(ph, domain);
  793. if (IS_ERR(dom))
  794. return PTR_ERR(dom);
  795. ret = __scmi_perf_level_get(ph, dom, &level, poll);
  796. if (ret)
  797. return ret;
  798. if (!dom->level_indexing_mode) {
  799. *freq = level * dom->mult_factor;
  800. } else {
  801. struct scmi_opp *opp;
  802. opp = xa_load(&dom->opps_by_idx, level);
  803. if (!opp)
  804. return -EIO;
  805. *freq = opp->indicative_freq * dom->mult_factor;
  806. }
  807. return ret;
  808. }
  809. static int scmi_dvfs_est_power_get(const struct scmi_protocol_handle *ph,
  810. u32 domain, unsigned long *freq,
  811. unsigned long *power)
  812. {
  813. struct perf_dom_info *dom;
  814. unsigned long opp_freq;
  815. int idx, ret = -EINVAL;
  816. struct scmi_opp *opp;
  817. dom = scmi_perf_domain_lookup(ph, domain);
  818. if (IS_ERR(dom))
  819. return PTR_ERR(dom);
  820. for (opp = dom->opp, idx = 0; idx < dom->opp_count; idx++, opp++) {
  821. if (!dom->level_indexing_mode)
  822. opp_freq = opp->perf * dom->mult_factor;
  823. else
  824. opp_freq = opp->indicative_freq * dom->mult_factor;
  825. if (opp_freq < *freq)
  826. continue;
  827. *freq = opp_freq;
  828. *power = opp->power;
  829. ret = 0;
  830. break;
  831. }
  832. return ret;
  833. }
  834. static bool scmi_fast_switch_possible(const struct scmi_protocol_handle *ph,
  835. u32 domain)
  836. {
  837. struct perf_dom_info *dom;
  838. dom = scmi_perf_domain_lookup(ph, domain);
  839. if (IS_ERR(dom))
  840. return false;
  841. return dom->fc_info && dom->fc_info[PERF_FC_LEVEL].set_addr;
  842. }
  843. static int scmi_fast_switch_rate_limit(const struct scmi_protocol_handle *ph,
  844. u32 domain, u32 *rate_limit)
  845. {
  846. struct perf_dom_info *dom;
  847. if (!rate_limit)
  848. return -EINVAL;
  849. dom = scmi_perf_domain_lookup(ph, domain);
  850. if (IS_ERR(dom))
  851. return PTR_ERR(dom);
  852. if (!dom->fc_info)
  853. return -EINVAL;
  854. *rate_limit = dom->fc_info[PERF_FC_LEVEL].rate_limit;
  855. return 0;
  856. }
  857. static enum scmi_power_scale
  858. scmi_power_scale_get(const struct scmi_protocol_handle *ph)
  859. {
  860. struct scmi_perf_info *pi = ph->get_priv(ph);
  861. return pi->power_scale;
  862. }
  863. static const struct scmi_perf_proto_ops perf_proto_ops = {
  864. .num_domains_get = scmi_perf_num_domains_get,
  865. .info_get = scmi_perf_info_get,
  866. .limits_set = scmi_perf_limits_set,
  867. .limits_get = scmi_perf_limits_get,
  868. .level_set = scmi_perf_level_set,
  869. .level_get = scmi_perf_level_get,
  870. .transition_latency_get = scmi_dvfs_transition_latency_get,
  871. .rate_limit_get = scmi_dvfs_rate_limit_get,
  872. .device_opps_add = scmi_dvfs_device_opps_add,
  873. .freq_set = scmi_dvfs_freq_set,
  874. .freq_get = scmi_dvfs_freq_get,
  875. .est_power_get = scmi_dvfs_est_power_get,
  876. .fast_switch_possible = scmi_fast_switch_possible,
  877. .fast_switch_rate_limit = scmi_fast_switch_rate_limit,
  878. .power_scale_get = scmi_power_scale_get,
  879. };
  880. static bool scmi_perf_notify_supported(const struct scmi_protocol_handle *ph,
  881. u8 evt_id, u32 src_id)
  882. {
  883. bool supported;
  884. struct perf_dom_info *dom;
  885. if (evt_id >= ARRAY_SIZE(evt_2_cmd))
  886. return false;
  887. dom = scmi_perf_domain_lookup(ph, src_id);
  888. if (IS_ERR(dom))
  889. return false;
  890. if (evt_id == SCMI_EVENT_PERFORMANCE_LIMITS_CHANGED)
  891. supported = dom->perf_limit_notify;
  892. else
  893. supported = dom->perf_level_notify;
  894. return supported;
  895. }
  896. static int scmi_perf_set_notify_enabled(const struct scmi_protocol_handle *ph,
  897. u8 evt_id, u32 src_id, bool enable)
  898. {
  899. int ret, cmd_id;
  900. if (evt_id >= ARRAY_SIZE(evt_2_cmd))
  901. return -EINVAL;
  902. cmd_id = evt_2_cmd[evt_id];
  903. ret = scmi_perf_level_limits_notify(ph, src_id, cmd_id, enable);
  904. if (ret)
  905. pr_debug("FAIL_ENABLED - evt[%X] dom[%d] - ret:%d\n",
  906. evt_id, src_id, ret);
  907. return ret;
  908. }
  909. static int
  910. scmi_perf_xlate_opp_to_freq(struct perf_dom_info *dom,
  911. unsigned int index, unsigned long *freq)
  912. {
  913. struct scmi_opp *opp;
  914. if (!dom || !freq)
  915. return -EINVAL;
  916. if (!dom->level_indexing_mode) {
  917. opp = xa_load(&dom->opps_by_lvl, index);
  918. if (!opp)
  919. return -ENODEV;
  920. *freq = opp->perf * dom->mult_factor;
  921. } else {
  922. opp = xa_load(&dom->opps_by_idx, index);
  923. if (!opp)
  924. return -ENODEV;
  925. *freq = opp->indicative_freq * dom->mult_factor;
  926. }
  927. return 0;
  928. }
  929. static void *scmi_perf_fill_custom_report(const struct scmi_protocol_handle *ph,
  930. u8 evt_id, ktime_t timestamp,
  931. const void *payld, size_t payld_sz,
  932. void *report, u32 *src_id)
  933. {
  934. int ret;
  935. void *rep = NULL;
  936. struct perf_dom_info *dom;
  937. switch (evt_id) {
  938. case SCMI_EVENT_PERFORMANCE_LIMITS_CHANGED:
  939. {
  940. const struct scmi_perf_limits_notify_payld *p = payld;
  941. struct scmi_perf_limits_report *r = report;
  942. unsigned long freq_min, freq_max;
  943. if (sizeof(*p) != payld_sz)
  944. break;
  945. r->timestamp = timestamp;
  946. r->agent_id = le32_to_cpu(p->agent_id);
  947. r->domain_id = le32_to_cpu(p->domain_id);
  948. r->range_max = le32_to_cpu(p->range_max);
  949. r->range_min = le32_to_cpu(p->range_min);
  950. /* Check if the reported domain exist at all */
  951. dom = scmi_perf_domain_lookup(ph, r->domain_id);
  952. if (IS_ERR(dom))
  953. break;
  954. /*
  955. * Event will be reported from this point on...
  956. * ...even if, later, xlated frequencies were not retrieved.
  957. */
  958. *src_id = r->domain_id;
  959. rep = r;
  960. ret = scmi_perf_xlate_opp_to_freq(dom, r->range_max, &freq_max);
  961. if (ret)
  962. break;
  963. ret = scmi_perf_xlate_opp_to_freq(dom, r->range_min, &freq_min);
  964. if (ret)
  965. break;
  966. /* Report translated freqs ONLY if both available */
  967. r->range_max_freq = freq_max;
  968. r->range_min_freq = freq_min;
  969. break;
  970. }
  971. case SCMI_EVENT_PERFORMANCE_LEVEL_CHANGED:
  972. {
  973. const struct scmi_perf_level_notify_payld *p = payld;
  974. struct scmi_perf_level_report *r = report;
  975. unsigned long freq;
  976. if (sizeof(*p) != payld_sz)
  977. break;
  978. r->timestamp = timestamp;
  979. r->agent_id = le32_to_cpu(p->agent_id);
  980. r->domain_id = le32_to_cpu(p->domain_id);
  981. /* Report translated freqs ONLY if available */
  982. r->performance_level = le32_to_cpu(p->performance_level);
  983. /* Check if the reported domain exist at all */
  984. dom = scmi_perf_domain_lookup(ph, r->domain_id);
  985. if (IS_ERR(dom))
  986. break;
  987. /*
  988. * Event will be reported from this point on...
  989. * ...even if, later, xlated frequencies were not retrieved.
  990. */
  991. *src_id = r->domain_id;
  992. rep = r;
  993. /* Report translated freqs ONLY if available */
  994. ret = scmi_perf_xlate_opp_to_freq(dom, r->performance_level,
  995. &freq);
  996. if (ret)
  997. break;
  998. r->performance_level_freq = freq;
  999. break;
  1000. }
  1001. default:
  1002. break;
  1003. }
  1004. return rep;
  1005. }
  1006. static int scmi_perf_get_num_sources(const struct scmi_protocol_handle *ph)
  1007. {
  1008. struct scmi_perf_info *pi = ph->get_priv(ph);
  1009. if (!pi)
  1010. return -EINVAL;
  1011. return pi->num_domains;
  1012. }
  1013. static const struct scmi_event perf_events[] = {
  1014. {
  1015. .id = SCMI_EVENT_PERFORMANCE_LIMITS_CHANGED,
  1016. .max_payld_sz = sizeof(struct scmi_perf_limits_notify_payld),
  1017. .max_report_sz = sizeof(struct scmi_perf_limits_report),
  1018. },
  1019. {
  1020. .id = SCMI_EVENT_PERFORMANCE_LEVEL_CHANGED,
  1021. .max_payld_sz = sizeof(struct scmi_perf_level_notify_payld),
  1022. .max_report_sz = sizeof(struct scmi_perf_level_report),
  1023. },
  1024. };
  1025. static const struct scmi_event_ops perf_event_ops = {
  1026. .is_notify_supported = scmi_perf_notify_supported,
  1027. .get_num_sources = scmi_perf_get_num_sources,
  1028. .set_notify_enabled = scmi_perf_set_notify_enabled,
  1029. .fill_custom_report = scmi_perf_fill_custom_report,
  1030. };
  1031. static const struct scmi_protocol_events perf_protocol_events = {
  1032. .queue_sz = SCMI_PROTO_QUEUE_SZ,
  1033. .ops = &perf_event_ops,
  1034. .evts = perf_events,
  1035. .num_events = ARRAY_SIZE(perf_events),
  1036. };
  1037. static int scmi_perf_protocol_init(const struct scmi_protocol_handle *ph)
  1038. {
  1039. int domain, ret;
  1040. u32 version;
  1041. struct scmi_perf_info *pinfo;
  1042. ret = ph->xops->version_get(ph, &version);
  1043. if (ret)
  1044. return ret;
  1045. dev_dbg(ph->dev, "Performance Version %d.%d\n",
  1046. PROTOCOL_REV_MAJOR(version), PROTOCOL_REV_MINOR(version));
  1047. pinfo = devm_kzalloc(ph->dev, sizeof(*pinfo), GFP_KERNEL);
  1048. if (!pinfo)
  1049. return -ENOMEM;
  1050. pinfo->version = version;
  1051. ret = scmi_perf_attributes_get(ph, pinfo);
  1052. if (ret)
  1053. return ret;
  1054. pinfo->dom_info = devm_kcalloc(ph->dev, pinfo->num_domains,
  1055. sizeof(*pinfo->dom_info), GFP_KERNEL);
  1056. if (!pinfo->dom_info)
  1057. return -ENOMEM;
  1058. for (domain = 0; domain < pinfo->num_domains; domain++) {
  1059. struct perf_dom_info *dom = pinfo->dom_info + domain;
  1060. dom->id = domain;
  1061. scmi_perf_domain_attributes_get(ph, dom, pinfo->notify_lim_cmd,
  1062. pinfo->notify_lvl_cmd, version);
  1063. scmi_perf_describe_levels_get(ph, dom, version);
  1064. if (dom->perf_fastchannels)
  1065. scmi_perf_domain_init_fc(ph, dom);
  1066. }
  1067. ret = devm_add_action_or_reset(ph->dev, scmi_perf_xa_destroy, pinfo);
  1068. if (ret)
  1069. return ret;
  1070. return ph->set_priv(ph, pinfo, version);
  1071. }
  1072. static const struct scmi_protocol scmi_perf = {
  1073. .id = SCMI_PROTOCOL_PERF,
  1074. .owner = THIS_MODULE,
  1075. .instance_init = &scmi_perf_protocol_init,
  1076. .ops = &perf_proto_ops,
  1077. .events = &perf_protocol_events,
  1078. .supported_version = SCMI_PROTOCOL_SUPPORTED_VERSION,
  1079. };
  1080. DEFINE_SCMI_PROTOCOL_REGISTER_UNREGISTER(perf, scmi_perf)