of.c 42 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Generic OPP OF helpers
  4. *
  5. * Copyright (C) 2009-2010 Texas Instruments Incorporated.
  6. * Nishanth Menon
  7. * Romit Dasgupta
  8. * Kevin Hilman
  9. */
  10. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  11. #include <linux/cpu.h>
  12. #include <linux/errno.h>
  13. #include <linux/device.h>
  14. #include <linux/of.h>
  15. #include <linux/pm_domain.h>
  16. #include <linux/slab.h>
  17. #include <linux/export.h>
  18. #include <linux/energy_model.h>
  19. #include "opp.h"
  20. /* OPP tables with uninitialized required OPPs, protected by opp_table_lock */
  21. static LIST_HEAD(lazy_opp_tables);
  22. /*
  23. * Returns opp descriptor node for a device node, caller must
  24. * do of_node_put().
  25. */
  26. static struct device_node *_opp_of_get_opp_desc_node(struct device_node *np,
  27. int index)
  28. {
  29. /* "operating-points-v2" can be an array for power domain providers */
  30. return of_parse_phandle(np, "operating-points-v2", index);
  31. }
  32. /* Returns opp descriptor node for a device, caller must do of_node_put() */
  33. struct device_node *dev_pm_opp_of_get_opp_desc_node(struct device *dev)
  34. {
  35. return _opp_of_get_opp_desc_node(dev->of_node, 0);
  36. }
  37. EXPORT_SYMBOL_GPL(dev_pm_opp_of_get_opp_desc_node);
  38. struct opp_table *_managed_opp(struct device *dev, int index)
  39. {
  40. struct opp_table *opp_table, *managed_table = NULL;
  41. struct device_node *np;
  42. np = _opp_of_get_opp_desc_node(dev->of_node, index);
  43. if (!np)
  44. return NULL;
  45. list_for_each_entry(opp_table, &opp_tables, node) {
  46. if (opp_table->np == np) {
  47. /*
  48. * Multiple devices can point to the same OPP table and
  49. * so will have same node-pointer, np.
  50. *
  51. * But the OPPs will be considered as shared only if the
  52. * OPP table contains a "opp-shared" property.
  53. */
  54. if (opp_table->shared_opp == OPP_TABLE_ACCESS_SHARED) {
  55. _get_opp_table_kref(opp_table);
  56. managed_table = opp_table;
  57. }
  58. break;
  59. }
  60. }
  61. of_node_put(np);
  62. return managed_table;
  63. }
  64. /* The caller must call dev_pm_opp_put() after the OPP is used */
  65. static struct dev_pm_opp *_find_opp_of_np(struct opp_table *opp_table,
  66. struct device_node *opp_np)
  67. {
  68. struct dev_pm_opp *opp;
  69. mutex_lock(&opp_table->lock);
  70. list_for_each_entry(opp, &opp_table->opp_list, node) {
  71. if (opp->np == opp_np) {
  72. dev_pm_opp_get(opp);
  73. mutex_unlock(&opp_table->lock);
  74. return opp;
  75. }
  76. }
  77. mutex_unlock(&opp_table->lock);
  78. return NULL;
  79. }
  80. static struct device_node *of_parse_required_opp(struct device_node *np,
  81. int index)
  82. {
  83. return of_parse_phandle(np, "required-opps", index);
  84. }
  85. /* The caller must call dev_pm_opp_put_opp_table() after the table is used */
  86. static struct opp_table *_find_table_of_opp_np(struct device_node *opp_np)
  87. {
  88. struct opp_table *opp_table;
  89. struct device_node *opp_table_np;
  90. opp_table_np = of_get_parent(opp_np);
  91. if (!opp_table_np)
  92. goto err;
  93. /* It is safe to put the node now as all we need now is its address */
  94. of_node_put(opp_table_np);
  95. mutex_lock(&opp_table_lock);
  96. list_for_each_entry(opp_table, &opp_tables, node) {
  97. if (opp_table_np == opp_table->np) {
  98. _get_opp_table_kref(opp_table);
  99. mutex_unlock(&opp_table_lock);
  100. return opp_table;
  101. }
  102. }
  103. mutex_unlock(&opp_table_lock);
  104. err:
  105. return ERR_PTR(-ENODEV);
  106. }
  107. /* Free resources previously acquired by _opp_table_alloc_required_tables() */
  108. static void _opp_table_free_required_tables(struct opp_table *opp_table)
  109. {
  110. struct opp_table **required_opp_tables = opp_table->required_opp_tables;
  111. int i;
  112. if (!required_opp_tables)
  113. return;
  114. for (i = 0; i < opp_table->required_opp_count; i++) {
  115. if (IS_ERR_OR_NULL(required_opp_tables[i]))
  116. continue;
  117. dev_pm_opp_put_opp_table(required_opp_tables[i]);
  118. }
  119. kfree(required_opp_tables);
  120. opp_table->required_opp_count = 0;
  121. opp_table->required_opp_tables = NULL;
  122. mutex_lock(&opp_table_lock);
  123. list_del(&opp_table->lazy);
  124. mutex_unlock(&opp_table_lock);
  125. }
  126. /*
  127. * Populate all devices and opp tables which are part of "required-opps" list.
  128. * Checking only the first OPP node should be enough.
  129. */
  130. static void _opp_table_alloc_required_tables(struct opp_table *opp_table,
  131. struct device *dev,
  132. struct device_node *opp_np)
  133. {
  134. struct opp_table **required_opp_tables;
  135. struct device_node *required_np, *np;
  136. bool lazy = false;
  137. int count, i, size;
  138. /* Traversing the first OPP node is all we need */
  139. np = of_get_next_available_child(opp_np, NULL);
  140. if (!np) {
  141. dev_warn(dev, "Empty OPP table\n");
  142. return;
  143. }
  144. count = of_count_phandle_with_args(np, "required-opps", NULL);
  145. if (count <= 0)
  146. goto put_np;
  147. size = sizeof(*required_opp_tables) + sizeof(*opp_table->required_devs);
  148. required_opp_tables = kcalloc(count, size, GFP_KERNEL);
  149. if (!required_opp_tables)
  150. goto put_np;
  151. opp_table->required_opp_tables = required_opp_tables;
  152. opp_table->required_devs = (void *)(required_opp_tables + count);
  153. opp_table->required_opp_count = count;
  154. for (i = 0; i < count; i++) {
  155. required_np = of_parse_required_opp(np, i);
  156. if (!required_np)
  157. goto free_required_tables;
  158. required_opp_tables[i] = _find_table_of_opp_np(required_np);
  159. of_node_put(required_np);
  160. if (IS_ERR(required_opp_tables[i]))
  161. lazy = true;
  162. }
  163. /* Let's do the linking later on */
  164. if (lazy) {
  165. /*
  166. * The OPP table is not held while allocating the table, take it
  167. * now to avoid corruption to the lazy_opp_tables list.
  168. */
  169. mutex_lock(&opp_table_lock);
  170. list_add(&opp_table->lazy, &lazy_opp_tables);
  171. mutex_unlock(&opp_table_lock);
  172. }
  173. goto put_np;
  174. free_required_tables:
  175. _opp_table_free_required_tables(opp_table);
  176. put_np:
  177. of_node_put(np);
  178. }
  179. void _of_init_opp_table(struct opp_table *opp_table, struct device *dev,
  180. int index)
  181. {
  182. struct device_node *np, *opp_np;
  183. u32 val;
  184. /*
  185. * Only required for backward compatibility with v1 bindings, but isn't
  186. * harmful for other cases. And so we do it unconditionally.
  187. */
  188. np = of_node_get(dev->of_node);
  189. if (!np)
  190. return;
  191. if (!of_property_read_u32(np, "clock-latency", &val))
  192. opp_table->clock_latency_ns_max = val;
  193. of_property_read_u32(np, "voltage-tolerance",
  194. &opp_table->voltage_tolerance_v1);
  195. if (of_property_present(np, "#power-domain-cells"))
  196. opp_table->is_genpd = true;
  197. /* Get OPP table node */
  198. opp_np = _opp_of_get_opp_desc_node(np, index);
  199. of_node_put(np);
  200. if (!opp_np)
  201. return;
  202. if (of_property_read_bool(opp_np, "opp-shared"))
  203. opp_table->shared_opp = OPP_TABLE_ACCESS_SHARED;
  204. else
  205. opp_table->shared_opp = OPP_TABLE_ACCESS_EXCLUSIVE;
  206. opp_table->np = opp_np;
  207. _opp_table_alloc_required_tables(opp_table, dev, opp_np);
  208. }
  209. void _of_clear_opp_table(struct opp_table *opp_table)
  210. {
  211. _opp_table_free_required_tables(opp_table);
  212. of_node_put(opp_table->np);
  213. }
  214. /*
  215. * Release all resources previously acquired with a call to
  216. * _of_opp_alloc_required_opps().
  217. */
  218. static void _of_opp_free_required_opps(struct opp_table *opp_table,
  219. struct dev_pm_opp *opp)
  220. {
  221. struct dev_pm_opp **required_opps = opp->required_opps;
  222. int i;
  223. if (!required_opps)
  224. return;
  225. for (i = 0; i < opp_table->required_opp_count; i++) {
  226. if (!required_opps[i])
  227. continue;
  228. /* Put the reference back */
  229. dev_pm_opp_put(required_opps[i]);
  230. }
  231. opp->required_opps = NULL;
  232. kfree(required_opps);
  233. }
  234. void _of_clear_opp(struct opp_table *opp_table, struct dev_pm_opp *opp)
  235. {
  236. _of_opp_free_required_opps(opp_table, opp);
  237. of_node_put(opp->np);
  238. }
  239. static int _link_required_opps(struct dev_pm_opp *opp, struct opp_table *opp_table,
  240. struct opp_table *required_table, int index)
  241. {
  242. struct device_node *np;
  243. np = of_parse_required_opp(opp->np, index);
  244. if (unlikely(!np))
  245. return -ENODEV;
  246. opp->required_opps[index] = _find_opp_of_np(required_table, np);
  247. of_node_put(np);
  248. if (!opp->required_opps[index]) {
  249. pr_err("%s: Unable to find required OPP node: %pOF (%d)\n",
  250. __func__, opp->np, index);
  251. return -ENODEV;
  252. }
  253. /*
  254. * There are two genpd (as required-opp) cases that we need to handle,
  255. * devices with a single genpd and ones with multiple genpds.
  256. *
  257. * The single genpd case requires special handling as we need to use the
  258. * same `dev` structure (instead of a virtual one provided by genpd
  259. * core) for setting the performance state.
  260. *
  261. * It doesn't make sense for a device's DT entry to have both
  262. * "opp-level" and single "required-opps" entry pointing to a genpd's
  263. * OPP, as that would make the OPP core call
  264. * dev_pm_domain_set_performance_state() for two different values for
  265. * the same device structure. Lets treat single genpd configuration as a
  266. * case where the OPP's level is directly available without required-opp
  267. * link in the DT.
  268. *
  269. * Just update the `level` with the right value, which
  270. * dev_pm_opp_set_opp() will take care of in the normal path itself.
  271. *
  272. * There is another case though, where a genpd's OPP table has
  273. * required-opps set to a parent genpd. The OPP core expects the user to
  274. * set the respective required `struct device` pointer via
  275. * dev_pm_opp_set_config().
  276. */
  277. if (required_table->is_genpd && opp_table->required_opp_count == 1 &&
  278. !opp_table->required_devs[0]) {
  279. /* Genpd core takes care of propagation to parent genpd */
  280. if (!opp_table->is_genpd) {
  281. if (!WARN_ON(opp->level != OPP_LEVEL_UNSET))
  282. opp->level = opp->required_opps[0]->level;
  283. }
  284. }
  285. return 0;
  286. }
  287. /* Populate all required OPPs which are part of "required-opps" list */
  288. static int _of_opp_alloc_required_opps(struct opp_table *opp_table,
  289. struct dev_pm_opp *opp)
  290. {
  291. struct opp_table *required_table;
  292. int i, ret, count = opp_table->required_opp_count;
  293. if (!count)
  294. return 0;
  295. opp->required_opps = kcalloc(count, sizeof(*opp->required_opps), GFP_KERNEL);
  296. if (!opp->required_opps)
  297. return -ENOMEM;
  298. for (i = 0; i < count; i++) {
  299. required_table = opp_table->required_opp_tables[i];
  300. /* Required table not added yet, we will link later */
  301. if (IS_ERR_OR_NULL(required_table))
  302. continue;
  303. ret = _link_required_opps(opp, opp_table, required_table, i);
  304. if (ret)
  305. goto free_required_opps;
  306. }
  307. return 0;
  308. free_required_opps:
  309. _of_opp_free_required_opps(opp_table, opp);
  310. return ret;
  311. }
  312. /* Link required OPPs for an individual OPP */
  313. static int lazy_link_required_opps(struct opp_table *opp_table,
  314. struct opp_table *new_table, int index)
  315. {
  316. struct dev_pm_opp *opp;
  317. int ret;
  318. list_for_each_entry(opp, &opp_table->opp_list, node) {
  319. ret = _link_required_opps(opp, opp_table, new_table, index);
  320. if (ret)
  321. return ret;
  322. }
  323. return 0;
  324. }
  325. /* Link required OPPs for all OPPs of the newly added OPP table */
  326. static void lazy_link_required_opp_table(struct opp_table *new_table)
  327. {
  328. struct opp_table *opp_table, *temp, **required_opp_tables;
  329. struct device_node *required_np, *opp_np, *required_table_np;
  330. struct dev_pm_opp *opp;
  331. int i, ret;
  332. mutex_lock(&opp_table_lock);
  333. list_for_each_entry_safe(opp_table, temp, &lazy_opp_tables, lazy) {
  334. bool lazy = false;
  335. /* opp_np can't be invalid here */
  336. opp_np = of_get_next_available_child(opp_table->np, NULL);
  337. for (i = 0; i < opp_table->required_opp_count; i++) {
  338. required_opp_tables = opp_table->required_opp_tables;
  339. /* Required opp-table is already parsed */
  340. if (!IS_ERR(required_opp_tables[i]))
  341. continue;
  342. /* required_np can't be invalid here */
  343. required_np = of_parse_required_opp(opp_np, i);
  344. required_table_np = of_get_parent(required_np);
  345. of_node_put(required_table_np);
  346. of_node_put(required_np);
  347. /*
  348. * Newly added table isn't the required opp-table for
  349. * opp_table.
  350. */
  351. if (required_table_np != new_table->np) {
  352. lazy = true;
  353. continue;
  354. }
  355. required_opp_tables[i] = new_table;
  356. _get_opp_table_kref(new_table);
  357. /* Link OPPs now */
  358. ret = lazy_link_required_opps(opp_table, new_table, i);
  359. if (ret) {
  360. /* The OPPs will be marked unusable */
  361. lazy = false;
  362. break;
  363. }
  364. }
  365. of_node_put(opp_np);
  366. /* All required opp-tables found, remove from lazy list */
  367. if (!lazy) {
  368. list_del_init(&opp_table->lazy);
  369. list_for_each_entry(opp, &opp_table->opp_list, node)
  370. _required_opps_available(opp, opp_table->required_opp_count);
  371. }
  372. }
  373. mutex_unlock(&opp_table_lock);
  374. }
  375. static int _bandwidth_supported(struct device *dev, struct opp_table *opp_table)
  376. {
  377. struct device_node *np, *opp_np;
  378. struct property *prop;
  379. if (!opp_table) {
  380. np = of_node_get(dev->of_node);
  381. if (!np)
  382. return -ENODEV;
  383. opp_np = _opp_of_get_opp_desc_node(np, 0);
  384. of_node_put(np);
  385. } else {
  386. opp_np = of_node_get(opp_table->np);
  387. }
  388. /* Lets not fail in case we are parsing opp-v1 bindings */
  389. if (!opp_np)
  390. return 0;
  391. /* Checking only first OPP is sufficient */
  392. np = of_get_next_available_child(opp_np, NULL);
  393. of_node_put(opp_np);
  394. if (!np) {
  395. dev_err(dev, "OPP table empty\n");
  396. return -EINVAL;
  397. }
  398. prop = of_find_property(np, "opp-peak-kBps", NULL);
  399. of_node_put(np);
  400. if (!prop || !prop->length)
  401. return 0;
  402. return 1;
  403. }
  404. int dev_pm_opp_of_find_icc_paths(struct device *dev,
  405. struct opp_table *opp_table)
  406. {
  407. struct device_node *np;
  408. int ret, i, count, num_paths;
  409. struct icc_path **paths;
  410. ret = _bandwidth_supported(dev, opp_table);
  411. if (ret == -EINVAL)
  412. return 0; /* Empty OPP table is a valid corner-case, let's not fail */
  413. else if (ret <= 0)
  414. return ret;
  415. ret = 0;
  416. np = of_node_get(dev->of_node);
  417. if (!np)
  418. return 0;
  419. count = of_count_phandle_with_args(np, "interconnects",
  420. "#interconnect-cells");
  421. of_node_put(np);
  422. if (count < 0)
  423. return 0;
  424. /* two phandles when #interconnect-cells = <1> */
  425. if (count % 2) {
  426. dev_err(dev, "%s: Invalid interconnects values\n", __func__);
  427. return -EINVAL;
  428. }
  429. num_paths = count / 2;
  430. paths = kcalloc(num_paths, sizeof(*paths), GFP_KERNEL);
  431. if (!paths)
  432. return -ENOMEM;
  433. for (i = 0; i < num_paths; i++) {
  434. paths[i] = of_icc_get_by_index(dev, i);
  435. if (IS_ERR(paths[i])) {
  436. ret = dev_err_probe(dev, PTR_ERR(paths[i]), "%s: Unable to get path%d\n", __func__, i);
  437. goto err;
  438. }
  439. }
  440. if (opp_table) {
  441. opp_table->paths = paths;
  442. opp_table->path_count = num_paths;
  443. return 0;
  444. }
  445. err:
  446. while (i--)
  447. icc_put(paths[i]);
  448. kfree(paths);
  449. return ret;
  450. }
  451. EXPORT_SYMBOL_GPL(dev_pm_opp_of_find_icc_paths);
  452. static bool _opp_is_supported(struct device *dev, struct opp_table *opp_table,
  453. struct device_node *np)
  454. {
  455. unsigned int levels = opp_table->supported_hw_count;
  456. int count, versions, ret, i, j;
  457. u32 val;
  458. if (!opp_table->supported_hw) {
  459. /*
  460. * In the case that no supported_hw has been set by the
  461. * platform but there is an opp-supported-hw value set for
  462. * an OPP then the OPP should not be enabled as there is
  463. * no way to see if the hardware supports it.
  464. */
  465. if (of_property_present(np, "opp-supported-hw"))
  466. return false;
  467. else
  468. return true;
  469. }
  470. count = of_property_count_u32_elems(np, "opp-supported-hw");
  471. if (count <= 0 || count % levels) {
  472. dev_err(dev, "%s: Invalid opp-supported-hw property (%d)\n",
  473. __func__, count);
  474. return false;
  475. }
  476. versions = count / levels;
  477. /* All levels in at least one of the versions should match */
  478. for (i = 0; i < versions; i++) {
  479. bool supported = true;
  480. for (j = 0; j < levels; j++) {
  481. ret = of_property_read_u32_index(np, "opp-supported-hw",
  482. i * levels + j, &val);
  483. if (ret) {
  484. dev_warn(dev, "%s: failed to read opp-supported-hw property at index %d: %d\n",
  485. __func__, i * levels + j, ret);
  486. return false;
  487. }
  488. /* Check if the level is supported */
  489. if (!(val & opp_table->supported_hw[j])) {
  490. supported = false;
  491. break;
  492. }
  493. }
  494. if (supported)
  495. return true;
  496. }
  497. return false;
  498. }
  499. static u32 *_parse_named_prop(struct dev_pm_opp *opp, struct device *dev,
  500. struct opp_table *opp_table,
  501. const char *prop_type, bool *triplet)
  502. {
  503. struct property *prop = NULL;
  504. char name[NAME_MAX];
  505. int count, ret;
  506. u32 *out;
  507. /* Search for "opp-<prop_type>-<name>" */
  508. if (opp_table->prop_name) {
  509. snprintf(name, sizeof(name), "opp-%s-%s", prop_type,
  510. opp_table->prop_name);
  511. prop = of_find_property(opp->np, name, NULL);
  512. }
  513. if (!prop) {
  514. /* Search for "opp-<prop_type>" */
  515. snprintf(name, sizeof(name), "opp-%s", prop_type);
  516. prop = of_find_property(opp->np, name, NULL);
  517. if (!prop)
  518. return NULL;
  519. }
  520. count = of_property_count_u32_elems(opp->np, name);
  521. if (count < 0) {
  522. dev_err(dev, "%s: Invalid %s property (%d)\n", __func__, name,
  523. count);
  524. return ERR_PTR(count);
  525. }
  526. /*
  527. * Initialize regulator_count, if regulator information isn't provided
  528. * by the platform. Now that one of the properties is available, fix the
  529. * regulator_count to 1.
  530. */
  531. if (unlikely(opp_table->regulator_count == -1))
  532. opp_table->regulator_count = 1;
  533. if (count != opp_table->regulator_count &&
  534. (!triplet || count != opp_table->regulator_count * 3)) {
  535. dev_err(dev, "%s: Invalid number of elements in %s property (%u) with supplies (%d)\n",
  536. __func__, prop_type, count, opp_table->regulator_count);
  537. return ERR_PTR(-EINVAL);
  538. }
  539. out = kmalloc_array(count, sizeof(*out), GFP_KERNEL);
  540. if (!out)
  541. return ERR_PTR(-EINVAL);
  542. ret = of_property_read_u32_array(opp->np, name, out, count);
  543. if (ret) {
  544. dev_err(dev, "%s: error parsing %s: %d\n", __func__, name, ret);
  545. kfree(out);
  546. return ERR_PTR(-EINVAL);
  547. }
  548. if (triplet)
  549. *triplet = count != opp_table->regulator_count;
  550. return out;
  551. }
  552. static u32 *opp_parse_microvolt(struct dev_pm_opp *opp, struct device *dev,
  553. struct opp_table *opp_table, bool *triplet)
  554. {
  555. u32 *microvolt;
  556. microvolt = _parse_named_prop(opp, dev, opp_table, "microvolt", triplet);
  557. if (IS_ERR(microvolt))
  558. return microvolt;
  559. if (!microvolt) {
  560. /*
  561. * Missing property isn't a problem, but an invalid
  562. * entry is. This property isn't optional if regulator
  563. * information is provided. Check only for the first OPP, as
  564. * regulator_count may get initialized after that to a valid
  565. * value.
  566. */
  567. if (list_empty(&opp_table->opp_list) &&
  568. opp_table->regulator_count > 0) {
  569. dev_err(dev, "%s: opp-microvolt missing although OPP managing regulators\n",
  570. __func__);
  571. return ERR_PTR(-EINVAL);
  572. }
  573. }
  574. return microvolt;
  575. }
  576. static int opp_parse_supplies(struct dev_pm_opp *opp, struct device *dev,
  577. struct opp_table *opp_table)
  578. {
  579. u32 *microvolt, *microamp, *microwatt;
  580. int ret = 0, i, j;
  581. bool triplet;
  582. microvolt = opp_parse_microvolt(opp, dev, opp_table, &triplet);
  583. if (IS_ERR(microvolt))
  584. return PTR_ERR(microvolt);
  585. microamp = _parse_named_prop(opp, dev, opp_table, "microamp", NULL);
  586. if (IS_ERR(microamp)) {
  587. ret = PTR_ERR(microamp);
  588. goto free_microvolt;
  589. }
  590. microwatt = _parse_named_prop(opp, dev, opp_table, "microwatt", NULL);
  591. if (IS_ERR(microwatt)) {
  592. ret = PTR_ERR(microwatt);
  593. goto free_microamp;
  594. }
  595. /*
  596. * Initialize regulator_count if it is uninitialized and no properties
  597. * are found.
  598. */
  599. if (unlikely(opp_table->regulator_count == -1)) {
  600. opp_table->regulator_count = 0;
  601. return 0;
  602. }
  603. for (i = 0, j = 0; i < opp_table->regulator_count; i++) {
  604. if (microvolt) {
  605. opp->supplies[i].u_volt = microvolt[j++];
  606. if (triplet) {
  607. opp->supplies[i].u_volt_min = microvolt[j++];
  608. opp->supplies[i].u_volt_max = microvolt[j++];
  609. } else {
  610. opp->supplies[i].u_volt_min = opp->supplies[i].u_volt;
  611. opp->supplies[i].u_volt_max = opp->supplies[i].u_volt;
  612. }
  613. }
  614. if (microamp)
  615. opp->supplies[i].u_amp = microamp[i];
  616. if (microwatt)
  617. opp->supplies[i].u_watt = microwatt[i];
  618. }
  619. kfree(microwatt);
  620. free_microamp:
  621. kfree(microamp);
  622. free_microvolt:
  623. kfree(microvolt);
  624. return ret;
  625. }
  626. /**
  627. * dev_pm_opp_of_remove_table() - Free OPP table entries created from static DT
  628. * entries
  629. * @dev: device pointer used to lookup OPP table.
  630. *
  631. * Free OPPs created using static entries present in DT.
  632. */
  633. void dev_pm_opp_of_remove_table(struct device *dev)
  634. {
  635. dev_pm_opp_remove_table(dev);
  636. }
  637. EXPORT_SYMBOL_GPL(dev_pm_opp_of_remove_table);
  638. static int _read_rate(struct dev_pm_opp *new_opp, struct opp_table *opp_table,
  639. struct device_node *np)
  640. {
  641. struct property *prop;
  642. int i, count, ret;
  643. u64 *rates;
  644. prop = of_find_property(np, "opp-hz", NULL);
  645. if (!prop)
  646. return -ENODEV;
  647. count = prop->length / sizeof(u64);
  648. if (opp_table->clk_count != count) {
  649. pr_err("%s: Count mismatch between opp-hz and clk_count (%d %d)\n",
  650. __func__, count, opp_table->clk_count);
  651. return -EINVAL;
  652. }
  653. rates = kmalloc_array(count, sizeof(*rates), GFP_KERNEL);
  654. if (!rates)
  655. return -ENOMEM;
  656. ret = of_property_read_u64_array(np, "opp-hz", rates, count);
  657. if (ret) {
  658. pr_err("%s: Error parsing opp-hz: %d\n", __func__, ret);
  659. } else {
  660. /*
  661. * Rate is defined as an unsigned long in clk API, and so
  662. * casting explicitly to its type. Must be fixed once rate is 64
  663. * bit guaranteed in clk API.
  664. */
  665. for (i = 0; i < count; i++) {
  666. new_opp->rates[i] = (unsigned long)rates[i];
  667. /* This will happen for frequencies > 4.29 GHz */
  668. WARN_ON(new_opp->rates[i] != rates[i]);
  669. }
  670. }
  671. kfree(rates);
  672. return ret;
  673. }
  674. static int _read_bw(struct dev_pm_opp *new_opp, struct opp_table *opp_table,
  675. struct device_node *np, bool peak)
  676. {
  677. const char *name = peak ? "opp-peak-kBps" : "opp-avg-kBps";
  678. struct property *prop;
  679. int i, count, ret;
  680. u32 *bw;
  681. prop = of_find_property(np, name, NULL);
  682. if (!prop)
  683. return -ENODEV;
  684. count = prop->length / sizeof(u32);
  685. if (opp_table->path_count != count) {
  686. pr_err("%s: Mismatch between %s and paths (%d %d)\n",
  687. __func__, name, count, opp_table->path_count);
  688. return -EINVAL;
  689. }
  690. bw = kmalloc_array(count, sizeof(*bw), GFP_KERNEL);
  691. if (!bw)
  692. return -ENOMEM;
  693. ret = of_property_read_u32_array(np, name, bw, count);
  694. if (ret) {
  695. pr_err("%s: Error parsing %s: %d\n", __func__, name, ret);
  696. goto out;
  697. }
  698. for (i = 0; i < count; i++) {
  699. if (peak)
  700. new_opp->bandwidth[i].peak = kBps_to_icc(bw[i]);
  701. else
  702. new_opp->bandwidth[i].avg = kBps_to_icc(bw[i]);
  703. }
  704. out:
  705. kfree(bw);
  706. return ret;
  707. }
  708. static int _read_opp_key(struct dev_pm_opp *new_opp,
  709. struct opp_table *opp_table, struct device_node *np)
  710. {
  711. bool found = false;
  712. int ret;
  713. ret = _read_rate(new_opp, opp_table, np);
  714. if (!ret)
  715. found = true;
  716. else if (ret != -ENODEV)
  717. return ret;
  718. /*
  719. * Bandwidth consists of peak and average (optional) values:
  720. * opp-peak-kBps = <path1_value path2_value>;
  721. * opp-avg-kBps = <path1_value path2_value>;
  722. */
  723. ret = _read_bw(new_opp, opp_table, np, true);
  724. if (!ret) {
  725. found = true;
  726. ret = _read_bw(new_opp, opp_table, np, false);
  727. }
  728. /* The properties were found but we failed to parse them */
  729. if (ret && ret != -ENODEV)
  730. return ret;
  731. if (!of_property_read_u32(np, "opp-level", &new_opp->level))
  732. found = true;
  733. if (found)
  734. return 0;
  735. return ret;
  736. }
  737. /**
  738. * _opp_add_static_v2() - Allocate static OPPs (As per 'v2' DT bindings)
  739. * @opp_table: OPP table
  740. * @dev: device for which we do this operation
  741. * @np: device node
  742. *
  743. * This function adds an opp definition to the opp table and returns status. The
  744. * opp can be controlled using dev_pm_opp_enable/disable functions and may be
  745. * removed by dev_pm_opp_remove.
  746. *
  747. * Return:
  748. * Valid OPP pointer:
  749. * On success
  750. * NULL:
  751. * Duplicate OPPs (both freq and volt are same) and opp->available
  752. * OR if the OPP is not supported by hardware.
  753. * ERR_PTR(-EEXIST):
  754. * Freq are same and volt are different OR
  755. * Duplicate OPPs (both freq and volt are same) and !opp->available
  756. * ERR_PTR(-ENOMEM):
  757. * Memory allocation failure
  758. * ERR_PTR(-EINVAL):
  759. * Failed parsing the OPP node
  760. */
  761. static struct dev_pm_opp *_opp_add_static_v2(struct opp_table *opp_table,
  762. struct device *dev, struct device_node *np)
  763. {
  764. struct dev_pm_opp *new_opp;
  765. u32 val;
  766. int ret;
  767. new_opp = _opp_allocate(opp_table);
  768. if (!new_opp)
  769. return ERR_PTR(-ENOMEM);
  770. ret = _read_opp_key(new_opp, opp_table, np);
  771. if (ret < 0) {
  772. dev_err(dev, "%s: opp key field not found\n", __func__);
  773. goto free_opp;
  774. }
  775. /* Check if the OPP supports hardware's hierarchy of versions or not */
  776. if (!_opp_is_supported(dev, opp_table, np)) {
  777. dev_dbg(dev, "OPP not supported by hardware: %s\n",
  778. of_node_full_name(np));
  779. goto free_opp;
  780. }
  781. new_opp->turbo = of_property_read_bool(np, "turbo-mode");
  782. new_opp->np = of_node_get(np);
  783. new_opp->dynamic = false;
  784. new_opp->available = true;
  785. ret = _of_opp_alloc_required_opps(opp_table, new_opp);
  786. if (ret)
  787. goto put_node;
  788. if (!of_property_read_u32(np, "clock-latency-ns", &val))
  789. new_opp->clock_latency_ns = val;
  790. ret = opp_parse_supplies(new_opp, dev, opp_table);
  791. if (ret)
  792. goto free_required_opps;
  793. ret = _opp_add(dev, new_opp, opp_table);
  794. if (ret) {
  795. /* Don't return error for duplicate OPPs */
  796. if (ret == -EBUSY)
  797. ret = 0;
  798. goto free_required_opps;
  799. }
  800. /* OPP to select on device suspend */
  801. if (of_property_read_bool(np, "opp-suspend")) {
  802. if (opp_table->suspend_opp) {
  803. /* Pick the OPP with higher rate/bw/level as suspend OPP */
  804. if (_opp_compare_key(opp_table, new_opp, opp_table->suspend_opp) == 1) {
  805. opp_table->suspend_opp->suspend = false;
  806. new_opp->suspend = true;
  807. opp_table->suspend_opp = new_opp;
  808. }
  809. } else {
  810. new_opp->suspend = true;
  811. opp_table->suspend_opp = new_opp;
  812. }
  813. }
  814. if (new_opp->clock_latency_ns > opp_table->clock_latency_ns_max)
  815. opp_table->clock_latency_ns_max = new_opp->clock_latency_ns;
  816. pr_debug("%s: turbo:%d rate:%lu uv:%lu uvmin:%lu uvmax:%lu latency:%lu level:%u\n",
  817. __func__, new_opp->turbo, new_opp->rates[0],
  818. new_opp->supplies[0].u_volt, new_opp->supplies[0].u_volt_min,
  819. new_opp->supplies[0].u_volt_max, new_opp->clock_latency_ns,
  820. new_opp->level);
  821. /*
  822. * Notify the changes in the availability of the operable
  823. * frequency/voltage list.
  824. */
  825. blocking_notifier_call_chain(&opp_table->head, OPP_EVENT_ADD, new_opp);
  826. return new_opp;
  827. free_required_opps:
  828. _of_opp_free_required_opps(opp_table, new_opp);
  829. put_node:
  830. of_node_put(np);
  831. free_opp:
  832. _opp_free(new_opp);
  833. return ret ? ERR_PTR(ret) : NULL;
  834. }
  835. /* Initializes OPP tables based on new bindings */
  836. static int _of_add_opp_table_v2(struct device *dev, struct opp_table *opp_table)
  837. {
  838. struct device_node *np;
  839. int ret, count = 0;
  840. struct dev_pm_opp *opp;
  841. /* OPP table is already initialized for the device */
  842. mutex_lock(&opp_table->lock);
  843. if (opp_table->parsed_static_opps) {
  844. opp_table->parsed_static_opps++;
  845. mutex_unlock(&opp_table->lock);
  846. return 0;
  847. }
  848. opp_table->parsed_static_opps = 1;
  849. mutex_unlock(&opp_table->lock);
  850. /* We have opp-table node now, iterate over it and add OPPs */
  851. for_each_available_child_of_node(opp_table->np, np) {
  852. opp = _opp_add_static_v2(opp_table, dev, np);
  853. if (IS_ERR(opp)) {
  854. ret = PTR_ERR(opp);
  855. dev_err(dev, "%s: Failed to add OPP, %d\n", __func__,
  856. ret);
  857. of_node_put(np);
  858. goto remove_static_opp;
  859. } else if (opp) {
  860. count++;
  861. }
  862. }
  863. /* There should be one or more OPPs defined */
  864. if (!count) {
  865. dev_err(dev, "%s: no supported OPPs", __func__);
  866. ret = -ENOENT;
  867. goto remove_static_opp;
  868. }
  869. lazy_link_required_opp_table(opp_table);
  870. return 0;
  871. remove_static_opp:
  872. _opp_remove_all_static(opp_table);
  873. return ret;
  874. }
  875. /* Initializes OPP tables based on old-deprecated bindings */
  876. static int _of_add_opp_table_v1(struct device *dev, struct opp_table *opp_table)
  877. {
  878. const struct property *prop;
  879. const __be32 *val;
  880. int nr, ret = 0;
  881. mutex_lock(&opp_table->lock);
  882. if (opp_table->parsed_static_opps) {
  883. opp_table->parsed_static_opps++;
  884. mutex_unlock(&opp_table->lock);
  885. return 0;
  886. }
  887. opp_table->parsed_static_opps = 1;
  888. mutex_unlock(&opp_table->lock);
  889. prop = of_find_property(dev->of_node, "operating-points", NULL);
  890. if (!prop) {
  891. ret = -ENODEV;
  892. goto remove_static_opp;
  893. }
  894. if (!prop->value) {
  895. ret = -ENODATA;
  896. goto remove_static_opp;
  897. }
  898. /*
  899. * Each OPP is a set of tuples consisting of frequency and
  900. * voltage like <freq-kHz vol-uV>.
  901. */
  902. nr = prop->length / sizeof(u32);
  903. if (nr % 2) {
  904. dev_err(dev, "%s: Invalid OPP table\n", __func__);
  905. ret = -EINVAL;
  906. goto remove_static_opp;
  907. }
  908. val = prop->value;
  909. while (nr) {
  910. unsigned long freq = be32_to_cpup(val++) * 1000;
  911. unsigned long volt = be32_to_cpup(val++);
  912. struct dev_pm_opp_data data = {
  913. .freq = freq,
  914. .u_volt = volt,
  915. };
  916. ret = _opp_add_v1(opp_table, dev, &data, false);
  917. if (ret) {
  918. dev_err(dev, "%s: Failed to add OPP %ld (%d)\n",
  919. __func__, data.freq, ret);
  920. goto remove_static_opp;
  921. }
  922. nr -= 2;
  923. }
  924. return 0;
  925. remove_static_opp:
  926. _opp_remove_all_static(opp_table);
  927. return ret;
  928. }
  929. static int _of_add_table_indexed(struct device *dev, int index)
  930. {
  931. struct opp_table *opp_table;
  932. int ret, count;
  933. if (index) {
  934. /*
  935. * If only one phandle is present, then the same OPP table
  936. * applies for all index requests.
  937. */
  938. count = of_count_phandle_with_args(dev->of_node,
  939. "operating-points-v2", NULL);
  940. if (count == 1)
  941. index = 0;
  942. }
  943. opp_table = _add_opp_table_indexed(dev, index, true);
  944. if (IS_ERR(opp_table))
  945. return PTR_ERR(opp_table);
  946. /*
  947. * OPPs have two version of bindings now. Also try the old (v1)
  948. * bindings for backward compatibility with older dtbs.
  949. */
  950. if (opp_table->np)
  951. ret = _of_add_opp_table_v2(dev, opp_table);
  952. else
  953. ret = _of_add_opp_table_v1(dev, opp_table);
  954. if (ret)
  955. dev_pm_opp_put_opp_table(opp_table);
  956. return ret;
  957. }
  958. static void devm_pm_opp_of_table_release(void *data)
  959. {
  960. dev_pm_opp_of_remove_table(data);
  961. }
  962. static int _devm_of_add_table_indexed(struct device *dev, int index)
  963. {
  964. int ret;
  965. ret = _of_add_table_indexed(dev, index);
  966. if (ret)
  967. return ret;
  968. return devm_add_action_or_reset(dev, devm_pm_opp_of_table_release, dev);
  969. }
  970. /**
  971. * devm_pm_opp_of_add_table() - Initialize opp table from device tree
  972. * @dev: device pointer used to lookup OPP table.
  973. *
  974. * Register the initial OPP table with the OPP library for given device.
  975. *
  976. * The opp_table structure will be freed after the device is destroyed.
  977. *
  978. * Return:
  979. * 0 On success OR
  980. * Duplicate OPPs (both freq and volt are same) and opp->available
  981. * -EEXIST Freq are same and volt are different OR
  982. * Duplicate OPPs (both freq and volt are same) and !opp->available
  983. * -ENOMEM Memory allocation failure
  984. * -ENODEV when 'operating-points' property is not found or is invalid data
  985. * in device node.
  986. * -ENODATA when empty 'operating-points' property is found
  987. * -EINVAL when invalid entries are found in opp-v2 table
  988. */
  989. int devm_pm_opp_of_add_table(struct device *dev)
  990. {
  991. return _devm_of_add_table_indexed(dev, 0);
  992. }
  993. EXPORT_SYMBOL_GPL(devm_pm_opp_of_add_table);
  994. /**
  995. * dev_pm_opp_of_add_table() - Initialize opp table from device tree
  996. * @dev: device pointer used to lookup OPP table.
  997. *
  998. * Register the initial OPP table with the OPP library for given device.
  999. *
  1000. * Return:
  1001. * 0 On success OR
  1002. * Duplicate OPPs (both freq and volt are same) and opp->available
  1003. * -EEXIST Freq are same and volt are different OR
  1004. * Duplicate OPPs (both freq and volt are same) and !opp->available
  1005. * -ENOMEM Memory allocation failure
  1006. * -ENODEV when 'operating-points' property is not found or is invalid data
  1007. * in device node.
  1008. * -ENODATA when empty 'operating-points' property is found
  1009. * -EINVAL when invalid entries are found in opp-v2 table
  1010. */
  1011. int dev_pm_opp_of_add_table(struct device *dev)
  1012. {
  1013. return _of_add_table_indexed(dev, 0);
  1014. }
  1015. EXPORT_SYMBOL_GPL(dev_pm_opp_of_add_table);
  1016. /**
  1017. * dev_pm_opp_of_add_table_indexed() - Initialize indexed opp table from device tree
  1018. * @dev: device pointer used to lookup OPP table.
  1019. * @index: Index number.
  1020. *
  1021. * Register the initial OPP table with the OPP library for given device only
  1022. * using the "operating-points-v2" property.
  1023. *
  1024. * Return: Refer to dev_pm_opp_of_add_table() for return values.
  1025. */
  1026. int dev_pm_opp_of_add_table_indexed(struct device *dev, int index)
  1027. {
  1028. return _of_add_table_indexed(dev, index);
  1029. }
  1030. EXPORT_SYMBOL_GPL(dev_pm_opp_of_add_table_indexed);
  1031. /**
  1032. * devm_pm_opp_of_add_table_indexed() - Initialize indexed opp table from device tree
  1033. * @dev: device pointer used to lookup OPP table.
  1034. * @index: Index number.
  1035. *
  1036. * This is a resource-managed variant of dev_pm_opp_of_add_table_indexed().
  1037. */
  1038. int devm_pm_opp_of_add_table_indexed(struct device *dev, int index)
  1039. {
  1040. return _devm_of_add_table_indexed(dev, index);
  1041. }
  1042. EXPORT_SYMBOL_GPL(devm_pm_opp_of_add_table_indexed);
  1043. /* CPU device specific helpers */
  1044. /**
  1045. * dev_pm_opp_of_cpumask_remove_table() - Removes OPP table for @cpumask
  1046. * @cpumask: cpumask for which OPP table needs to be removed
  1047. *
  1048. * This removes the OPP tables for CPUs present in the @cpumask.
  1049. * This should be used only to remove static entries created from DT.
  1050. */
  1051. void dev_pm_opp_of_cpumask_remove_table(const struct cpumask *cpumask)
  1052. {
  1053. _dev_pm_opp_cpumask_remove_table(cpumask, -1);
  1054. }
  1055. EXPORT_SYMBOL_GPL(dev_pm_opp_of_cpumask_remove_table);
  1056. /**
  1057. * dev_pm_opp_of_cpumask_add_table() - Adds OPP table for @cpumask
  1058. * @cpumask: cpumask for which OPP table needs to be added.
  1059. *
  1060. * This adds the OPP tables for CPUs present in the @cpumask.
  1061. */
  1062. int dev_pm_opp_of_cpumask_add_table(const struct cpumask *cpumask)
  1063. {
  1064. struct device *cpu_dev;
  1065. int cpu, ret;
  1066. if (WARN_ON(cpumask_empty(cpumask)))
  1067. return -ENODEV;
  1068. for_each_cpu(cpu, cpumask) {
  1069. cpu_dev = get_cpu_device(cpu);
  1070. if (!cpu_dev) {
  1071. pr_err("%s: failed to get cpu%d device\n", __func__,
  1072. cpu);
  1073. ret = -ENODEV;
  1074. goto remove_table;
  1075. }
  1076. ret = dev_pm_opp_of_add_table(cpu_dev);
  1077. if (ret) {
  1078. /*
  1079. * OPP may get registered dynamically, don't print error
  1080. * message here.
  1081. */
  1082. pr_debug("%s: couldn't find opp table for cpu:%d, %d\n",
  1083. __func__, cpu, ret);
  1084. goto remove_table;
  1085. }
  1086. }
  1087. return 0;
  1088. remove_table:
  1089. /* Free all other OPPs */
  1090. _dev_pm_opp_cpumask_remove_table(cpumask, cpu);
  1091. return ret;
  1092. }
  1093. EXPORT_SYMBOL_GPL(dev_pm_opp_of_cpumask_add_table);
  1094. /*
  1095. * Works only for OPP v2 bindings.
  1096. *
  1097. * Returns -ENOENT if operating-points-v2 bindings aren't supported.
  1098. */
  1099. /**
  1100. * dev_pm_opp_of_get_sharing_cpus() - Get cpumask of CPUs sharing OPPs with
  1101. * @cpu_dev using operating-points-v2
  1102. * bindings.
  1103. *
  1104. * @cpu_dev: CPU device for which we do this operation
  1105. * @cpumask: cpumask to update with information of sharing CPUs
  1106. *
  1107. * This updates the @cpumask with CPUs that are sharing OPPs with @cpu_dev.
  1108. *
  1109. * Returns -ENOENT if operating-points-v2 isn't present for @cpu_dev.
  1110. */
  1111. int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev,
  1112. struct cpumask *cpumask)
  1113. {
  1114. struct device_node *np, *tmp_np, *cpu_np;
  1115. int cpu, ret = 0;
  1116. /* Get OPP descriptor node */
  1117. np = dev_pm_opp_of_get_opp_desc_node(cpu_dev);
  1118. if (!np) {
  1119. dev_dbg(cpu_dev, "%s: Couldn't find opp node.\n", __func__);
  1120. return -ENOENT;
  1121. }
  1122. cpumask_set_cpu(cpu_dev->id, cpumask);
  1123. /* OPPs are shared ? */
  1124. if (!of_property_read_bool(np, "opp-shared"))
  1125. goto put_cpu_node;
  1126. for_each_possible_cpu(cpu) {
  1127. if (cpu == cpu_dev->id)
  1128. continue;
  1129. cpu_np = of_cpu_device_node_get(cpu);
  1130. if (!cpu_np) {
  1131. dev_err(cpu_dev, "%s: failed to get cpu%d node\n",
  1132. __func__, cpu);
  1133. ret = -ENOENT;
  1134. goto put_cpu_node;
  1135. }
  1136. /* Get OPP descriptor node */
  1137. tmp_np = _opp_of_get_opp_desc_node(cpu_np, 0);
  1138. of_node_put(cpu_np);
  1139. if (!tmp_np) {
  1140. pr_err("%pOF: Couldn't find opp node\n", cpu_np);
  1141. ret = -ENOENT;
  1142. goto put_cpu_node;
  1143. }
  1144. /* CPUs are sharing opp node */
  1145. if (np == tmp_np)
  1146. cpumask_set_cpu(cpu, cpumask);
  1147. of_node_put(tmp_np);
  1148. }
  1149. put_cpu_node:
  1150. of_node_put(np);
  1151. return ret;
  1152. }
  1153. EXPORT_SYMBOL_GPL(dev_pm_opp_of_get_sharing_cpus);
  1154. /**
  1155. * of_get_required_opp_performance_state() - Search for required OPP and return its performance state.
  1156. * @np: Node that contains the "required-opps" property.
  1157. * @index: Index of the phandle to parse.
  1158. *
  1159. * Returns the performance state of the OPP pointed out by the "required-opps"
  1160. * property at @index in @np.
  1161. *
  1162. * Return: Zero or positive performance state on success, otherwise negative
  1163. * value on errors.
  1164. */
  1165. int of_get_required_opp_performance_state(struct device_node *np, int index)
  1166. {
  1167. struct dev_pm_opp *opp;
  1168. struct device_node *required_np;
  1169. struct opp_table *opp_table;
  1170. int pstate = -EINVAL;
  1171. required_np = of_parse_required_opp(np, index);
  1172. if (!required_np)
  1173. return -ENODEV;
  1174. opp_table = _find_table_of_opp_np(required_np);
  1175. if (IS_ERR(opp_table)) {
  1176. pr_err("%s: Failed to find required OPP table %pOF: %ld\n",
  1177. __func__, np, PTR_ERR(opp_table));
  1178. goto put_required_np;
  1179. }
  1180. /* The OPP tables must belong to a genpd */
  1181. if (unlikely(!opp_table->is_genpd)) {
  1182. pr_err("%s: Performance state is only valid for genpds.\n", __func__);
  1183. goto put_required_np;
  1184. }
  1185. opp = _find_opp_of_np(opp_table, required_np);
  1186. if (opp) {
  1187. if (opp->level == OPP_LEVEL_UNSET) {
  1188. pr_err("%s: OPP levels aren't available for %pOF\n",
  1189. __func__, np);
  1190. } else {
  1191. pstate = opp->level;
  1192. }
  1193. dev_pm_opp_put(opp);
  1194. }
  1195. dev_pm_opp_put_opp_table(opp_table);
  1196. put_required_np:
  1197. of_node_put(required_np);
  1198. return pstate;
  1199. }
  1200. EXPORT_SYMBOL_GPL(of_get_required_opp_performance_state);
  1201. /**
  1202. * dev_pm_opp_of_has_required_opp - Find out if a required-opps exists.
  1203. * @dev: The device to investigate.
  1204. *
  1205. * Returns true if the device's node has a "operating-points-v2" property and if
  1206. * the corresponding node for the opp-table describes opp nodes that uses the
  1207. * "required-opps" property.
  1208. *
  1209. * Return: True if a required-opps is present, else false.
  1210. */
  1211. bool dev_pm_opp_of_has_required_opp(struct device *dev)
  1212. {
  1213. struct device_node *opp_np, *np;
  1214. int count;
  1215. opp_np = _opp_of_get_opp_desc_node(dev->of_node, 0);
  1216. if (!opp_np)
  1217. return false;
  1218. np = of_get_next_available_child(opp_np, NULL);
  1219. of_node_put(opp_np);
  1220. if (!np) {
  1221. dev_warn(dev, "Empty OPP table\n");
  1222. return false;
  1223. }
  1224. count = of_count_phandle_with_args(np, "required-opps", NULL);
  1225. of_node_put(np);
  1226. return count > 0;
  1227. }
  1228. /**
  1229. * dev_pm_opp_get_of_node() - Gets the DT node corresponding to an opp
  1230. * @opp: opp for which DT node has to be returned for
  1231. *
  1232. * Return: DT node corresponding to the opp, else 0 on success.
  1233. *
  1234. * The caller needs to put the node with of_node_put() after using it.
  1235. */
  1236. struct device_node *dev_pm_opp_get_of_node(struct dev_pm_opp *opp)
  1237. {
  1238. if (IS_ERR_OR_NULL(opp)) {
  1239. pr_err("%s: Invalid parameters\n", __func__);
  1240. return NULL;
  1241. }
  1242. return of_node_get(opp->np);
  1243. }
  1244. EXPORT_SYMBOL_GPL(dev_pm_opp_get_of_node);
  1245. /*
  1246. * Callback function provided to the Energy Model framework upon registration.
  1247. * It provides the power used by @dev at @kHz if it is the frequency of an
  1248. * existing OPP, or at the frequency of the first OPP above @kHz otherwise
  1249. * (see dev_pm_opp_find_freq_ceil()). This function updates @kHz to the ceiled
  1250. * frequency and @uW to the associated power.
  1251. *
  1252. * Returns 0 on success or a proper -EINVAL value in case of error.
  1253. */
  1254. static int __maybe_unused
  1255. _get_dt_power(struct device *dev, unsigned long *uW, unsigned long *kHz)
  1256. {
  1257. struct dev_pm_opp *opp;
  1258. unsigned long opp_freq, opp_power;
  1259. /* Find the right frequency and related OPP */
  1260. opp_freq = *kHz * 1000;
  1261. opp = dev_pm_opp_find_freq_ceil(dev, &opp_freq);
  1262. if (IS_ERR(opp))
  1263. return -EINVAL;
  1264. opp_power = dev_pm_opp_get_power(opp);
  1265. dev_pm_opp_put(opp);
  1266. if (!opp_power)
  1267. return -EINVAL;
  1268. *kHz = opp_freq / 1000;
  1269. *uW = opp_power;
  1270. return 0;
  1271. }
  1272. /**
  1273. * dev_pm_opp_calc_power() - Calculate power value for device with EM
  1274. * @dev : Device for which an Energy Model has to be registered
  1275. * @uW : New power value that is calculated
  1276. * @kHz : Frequency for which the new power is calculated
  1277. *
  1278. * This computes the power estimated by @dev at @kHz if it is the frequency
  1279. * of an existing OPP, or at the frequency of the first OPP above @kHz otherwise
  1280. * (see dev_pm_opp_find_freq_ceil()). This function updates @kHz to the ceiled
  1281. * frequency and @uW to the associated power. The power is estimated as
  1282. * P = C * V^2 * f with C being the device's capacitance and V and f
  1283. * respectively the voltage and frequency of the OPP.
  1284. * It is also used as a callback function provided to the Energy Model
  1285. * framework upon registration.
  1286. *
  1287. * Returns -EINVAL if the power calculation failed because of missing
  1288. * parameters, 0 otherwise.
  1289. */
  1290. int dev_pm_opp_calc_power(struct device *dev, unsigned long *uW,
  1291. unsigned long *kHz)
  1292. {
  1293. struct dev_pm_opp *opp;
  1294. struct device_node *np;
  1295. unsigned long mV, Hz;
  1296. u32 cap;
  1297. u64 tmp;
  1298. int ret;
  1299. np = of_node_get(dev->of_node);
  1300. if (!np)
  1301. return -EINVAL;
  1302. ret = of_property_read_u32(np, "dynamic-power-coefficient", &cap);
  1303. of_node_put(np);
  1304. if (ret)
  1305. return -EINVAL;
  1306. Hz = *kHz * 1000;
  1307. opp = dev_pm_opp_find_freq_ceil(dev, &Hz);
  1308. if (IS_ERR(opp))
  1309. return -EINVAL;
  1310. mV = dev_pm_opp_get_voltage(opp) / 1000;
  1311. dev_pm_opp_put(opp);
  1312. if (!mV)
  1313. return -EINVAL;
  1314. tmp = (u64)cap * mV * mV * (Hz / 1000000);
  1315. /* Provide power in micro-Watts */
  1316. do_div(tmp, 1000000);
  1317. *uW = (unsigned long)tmp;
  1318. *kHz = Hz / 1000;
  1319. return 0;
  1320. }
  1321. EXPORT_SYMBOL_GPL(dev_pm_opp_calc_power);
  1322. static bool _of_has_opp_microwatt_property(struct device *dev)
  1323. {
  1324. unsigned long power, freq = 0;
  1325. struct dev_pm_opp *opp;
  1326. /* Check if at least one OPP has needed property */
  1327. opp = dev_pm_opp_find_freq_ceil(dev, &freq);
  1328. if (IS_ERR(opp))
  1329. return false;
  1330. power = dev_pm_opp_get_power(opp);
  1331. dev_pm_opp_put(opp);
  1332. if (!power)
  1333. return false;
  1334. return true;
  1335. }
  1336. /**
  1337. * dev_pm_opp_of_register_em() - Attempt to register an Energy Model
  1338. * @dev : Device for which an Energy Model has to be registered
  1339. * @cpus : CPUs for which an Energy Model has to be registered. For
  1340. * other type of devices it should be set to NULL.
  1341. *
  1342. * This checks whether the "dynamic-power-coefficient" devicetree property has
  1343. * been specified, and tries to register an Energy Model with it if it has.
  1344. * Having this property means the voltages are known for OPPs and the EM
  1345. * might be calculated.
  1346. */
  1347. int dev_pm_opp_of_register_em(struct device *dev, struct cpumask *cpus)
  1348. {
  1349. struct em_data_callback em_cb;
  1350. struct device_node *np;
  1351. int ret, nr_opp;
  1352. u32 cap;
  1353. if (IS_ERR_OR_NULL(dev)) {
  1354. ret = -EINVAL;
  1355. goto failed;
  1356. }
  1357. nr_opp = dev_pm_opp_get_opp_count(dev);
  1358. if (nr_opp <= 0) {
  1359. ret = -EINVAL;
  1360. goto failed;
  1361. }
  1362. /* First, try to find more precised Energy Model in DT */
  1363. if (_of_has_opp_microwatt_property(dev)) {
  1364. EM_SET_ACTIVE_POWER_CB(em_cb, _get_dt_power);
  1365. goto register_em;
  1366. }
  1367. np = of_node_get(dev->of_node);
  1368. if (!np) {
  1369. ret = -EINVAL;
  1370. goto failed;
  1371. }
  1372. /*
  1373. * Register an EM only if the 'dynamic-power-coefficient' property is
  1374. * set in devicetree. It is assumed the voltage values are known if that
  1375. * property is set since it is useless otherwise. If voltages are not
  1376. * known, just let the EM registration fail with an error to alert the
  1377. * user about the inconsistent configuration.
  1378. */
  1379. ret = of_property_read_u32(np, "dynamic-power-coefficient", &cap);
  1380. of_node_put(np);
  1381. if (ret || !cap) {
  1382. dev_dbg(dev, "Couldn't find proper 'dynamic-power-coefficient' in DT\n");
  1383. ret = -EINVAL;
  1384. goto failed;
  1385. }
  1386. EM_SET_ACTIVE_POWER_CB(em_cb, dev_pm_opp_calc_power);
  1387. register_em:
  1388. ret = em_dev_register_perf_domain(dev, nr_opp, &em_cb, cpus, true);
  1389. if (ret)
  1390. goto failed;
  1391. return 0;
  1392. failed:
  1393. dev_dbg(dev, "Couldn't register Energy Model %d\n", ret);
  1394. return ret;
  1395. }
  1396. EXPORT_SYMBOL_GPL(dev_pm_opp_of_register_em);