core.c 27 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Interconnect framework core driver
  4. *
  5. * Copyright (c) 2017-2019, Linaro Ltd.
  6. * Author: Georgi Djakov <georgi.djakov@linaro.org>
  7. */
  8. #include <linux/debugfs.h>
  9. #include <linux/device.h>
  10. #include <linux/idr.h>
  11. #include <linux/init.h>
  12. #include <linux/interconnect.h>
  13. #include <linux/interconnect-provider.h>
  14. #include <linux/list.h>
  15. #include <linux/mutex.h>
  16. #include <linux/slab.h>
  17. #include <linux/of.h>
  18. #include <linux/overflow.h>
  19. #include "internal.h"
  20. #define CREATE_TRACE_POINTS
  21. #include "trace.h"
  22. static DEFINE_IDR(icc_idr);
  23. static LIST_HEAD(icc_providers);
  24. static int providers_count;
  25. static bool synced_state;
  26. static DEFINE_MUTEX(icc_lock);
  27. static DEFINE_MUTEX(icc_bw_lock);
  28. static struct dentry *icc_debugfs_dir;
  29. static void icc_summary_show_one(struct seq_file *s, struct icc_node *n)
  30. {
  31. if (!n)
  32. return;
  33. seq_printf(s, "%-42s %12u %12u\n",
  34. n->name, n->avg_bw, n->peak_bw);
  35. }
  36. static int icc_summary_show(struct seq_file *s, void *data)
  37. {
  38. struct icc_provider *provider;
  39. seq_puts(s, " node tag avg peak\n");
  40. seq_puts(s, "--------------------------------------------------------------------\n");
  41. mutex_lock(&icc_lock);
  42. list_for_each_entry(provider, &icc_providers, provider_list) {
  43. struct icc_node *n;
  44. list_for_each_entry(n, &provider->nodes, node_list) {
  45. struct icc_req *r;
  46. icc_summary_show_one(s, n);
  47. hlist_for_each_entry(r, &n->req_list, req_node) {
  48. u32 avg_bw = 0, peak_bw = 0;
  49. if (!r->dev)
  50. continue;
  51. if (r->enabled) {
  52. avg_bw = r->avg_bw;
  53. peak_bw = r->peak_bw;
  54. }
  55. seq_printf(s, " %-27s %12u %12u %12u\n",
  56. dev_name(r->dev), r->tag, avg_bw, peak_bw);
  57. }
  58. }
  59. }
  60. mutex_unlock(&icc_lock);
  61. return 0;
  62. }
  63. DEFINE_SHOW_ATTRIBUTE(icc_summary);
  64. static void icc_graph_show_link(struct seq_file *s, int level,
  65. struct icc_node *n, struct icc_node *m)
  66. {
  67. seq_printf(s, "%s\"%d:%s\" -> \"%d:%s\"\n",
  68. level == 2 ? "\t\t" : "\t",
  69. n->id, n->name, m->id, m->name);
  70. }
  71. static void icc_graph_show_node(struct seq_file *s, struct icc_node *n)
  72. {
  73. seq_printf(s, "\t\t\"%d:%s\" [label=\"%d:%s",
  74. n->id, n->name, n->id, n->name);
  75. seq_printf(s, "\n\t\t\t|avg_bw=%ukBps", n->avg_bw);
  76. seq_printf(s, "\n\t\t\t|peak_bw=%ukBps", n->peak_bw);
  77. seq_puts(s, "\"]\n");
  78. }
  79. static int icc_graph_show(struct seq_file *s, void *data)
  80. {
  81. struct icc_provider *provider;
  82. struct icc_node *n;
  83. int cluster_index = 0;
  84. int i;
  85. seq_puts(s, "digraph {\n\trankdir = LR\n\tnode [shape = record]\n");
  86. mutex_lock(&icc_lock);
  87. /* draw providers as cluster subgraphs */
  88. cluster_index = 0;
  89. list_for_each_entry(provider, &icc_providers, provider_list) {
  90. seq_printf(s, "\tsubgraph cluster_%d {\n", ++cluster_index);
  91. if (provider->dev)
  92. seq_printf(s, "\t\tlabel = \"%s\"\n",
  93. dev_name(provider->dev));
  94. /* draw nodes */
  95. list_for_each_entry(n, &provider->nodes, node_list)
  96. icc_graph_show_node(s, n);
  97. /* draw internal links */
  98. list_for_each_entry(n, &provider->nodes, node_list)
  99. for (i = 0; i < n->num_links; ++i)
  100. if (n->provider == n->links[i]->provider)
  101. icc_graph_show_link(s, 2, n,
  102. n->links[i]);
  103. seq_puts(s, "\t}\n");
  104. }
  105. /* draw external links */
  106. list_for_each_entry(provider, &icc_providers, provider_list)
  107. list_for_each_entry(n, &provider->nodes, node_list)
  108. for (i = 0; i < n->num_links; ++i)
  109. if (n->provider != n->links[i]->provider)
  110. icc_graph_show_link(s, 1, n,
  111. n->links[i]);
  112. mutex_unlock(&icc_lock);
  113. seq_puts(s, "}");
  114. return 0;
  115. }
  116. DEFINE_SHOW_ATTRIBUTE(icc_graph);
  117. static struct icc_node *node_find(const int id)
  118. {
  119. return idr_find(&icc_idr, id);
  120. }
  121. static struct icc_node *node_find_by_name(const char *name)
  122. {
  123. struct icc_provider *provider;
  124. struct icc_node *n;
  125. list_for_each_entry(provider, &icc_providers, provider_list) {
  126. list_for_each_entry(n, &provider->nodes, node_list) {
  127. if (!strcmp(n->name, name))
  128. return n;
  129. }
  130. }
  131. return NULL;
  132. }
  133. static struct icc_path *path_init(struct device *dev, struct icc_node *dst,
  134. ssize_t num_nodes)
  135. {
  136. struct icc_node *node = dst;
  137. struct icc_path *path;
  138. int i;
  139. path = kzalloc(struct_size(path, reqs, num_nodes), GFP_KERNEL);
  140. if (!path)
  141. return ERR_PTR(-ENOMEM);
  142. path->num_nodes = num_nodes;
  143. mutex_lock(&icc_bw_lock);
  144. for (i = num_nodes - 1; i >= 0; i--) {
  145. node->provider->users++;
  146. hlist_add_head(&path->reqs[i].req_node, &node->req_list);
  147. path->reqs[i].node = node;
  148. path->reqs[i].dev = dev;
  149. path->reqs[i].enabled = true;
  150. /* reference to previous node was saved during path traversal */
  151. node = node->reverse;
  152. }
  153. mutex_unlock(&icc_bw_lock);
  154. return path;
  155. }
  156. static struct icc_path *path_find(struct device *dev, struct icc_node *src,
  157. struct icc_node *dst)
  158. {
  159. struct icc_path *path = ERR_PTR(-EPROBE_DEFER);
  160. struct icc_node *n, *node = NULL;
  161. struct list_head traverse_list;
  162. struct list_head edge_list;
  163. struct list_head visited_list;
  164. size_t i, depth = 1;
  165. bool found = false;
  166. INIT_LIST_HEAD(&traverse_list);
  167. INIT_LIST_HEAD(&edge_list);
  168. INIT_LIST_HEAD(&visited_list);
  169. list_add(&src->search_list, &traverse_list);
  170. src->reverse = NULL;
  171. do {
  172. list_for_each_entry_safe(node, n, &traverse_list, search_list) {
  173. if (node == dst) {
  174. found = true;
  175. list_splice_init(&edge_list, &visited_list);
  176. list_splice_init(&traverse_list, &visited_list);
  177. break;
  178. }
  179. for (i = 0; i < node->num_links; i++) {
  180. struct icc_node *tmp = node->links[i];
  181. if (!tmp) {
  182. path = ERR_PTR(-ENOENT);
  183. goto out;
  184. }
  185. if (tmp->is_traversed)
  186. continue;
  187. tmp->is_traversed = true;
  188. tmp->reverse = node;
  189. list_add_tail(&tmp->search_list, &edge_list);
  190. }
  191. }
  192. if (found)
  193. break;
  194. list_splice_init(&traverse_list, &visited_list);
  195. list_splice_init(&edge_list, &traverse_list);
  196. /* count the hops including the source */
  197. depth++;
  198. } while (!list_empty(&traverse_list));
  199. out:
  200. /* reset the traversed state */
  201. list_for_each_entry_reverse(n, &visited_list, search_list)
  202. n->is_traversed = false;
  203. if (found)
  204. path = path_init(dev, dst, depth);
  205. return path;
  206. }
  207. /*
  208. * We want the path to honor all bandwidth requests, so the average and peak
  209. * bandwidth requirements from each consumer are aggregated at each node.
  210. * The aggregation is platform specific, so each platform can customize it by
  211. * implementing its own aggregate() function.
  212. */
  213. static int aggregate_requests(struct icc_node *node)
  214. {
  215. struct icc_provider *p = node->provider;
  216. struct icc_req *r;
  217. u32 avg_bw, peak_bw;
  218. node->avg_bw = 0;
  219. node->peak_bw = 0;
  220. if (p->pre_aggregate)
  221. p->pre_aggregate(node);
  222. hlist_for_each_entry(r, &node->req_list, req_node) {
  223. if (r->enabled) {
  224. avg_bw = r->avg_bw;
  225. peak_bw = r->peak_bw;
  226. } else {
  227. avg_bw = 0;
  228. peak_bw = 0;
  229. }
  230. p->aggregate(node, r->tag, avg_bw, peak_bw,
  231. &node->avg_bw, &node->peak_bw);
  232. /* during boot use the initial bandwidth as a floor value */
  233. if (!synced_state) {
  234. node->avg_bw = max(node->avg_bw, node->init_avg);
  235. node->peak_bw = max(node->peak_bw, node->init_peak);
  236. }
  237. }
  238. return 0;
  239. }
  240. static int apply_constraints(struct icc_path *path)
  241. {
  242. struct icc_node *next, *prev = NULL;
  243. struct icc_provider *p;
  244. int ret = -EINVAL;
  245. int i;
  246. for (i = 0; i < path->num_nodes; i++) {
  247. next = path->reqs[i].node;
  248. p = next->provider;
  249. /* both endpoints should be valid master-slave pairs */
  250. if (!prev || (p != prev->provider && !p->inter_set)) {
  251. prev = next;
  252. continue;
  253. }
  254. /* set the constraints */
  255. ret = p->set(prev, next);
  256. if (ret)
  257. goto out;
  258. prev = next;
  259. }
  260. out:
  261. return ret;
  262. }
  263. int icc_std_aggregate(struct icc_node *node, u32 tag, u32 avg_bw,
  264. u32 peak_bw, u32 *agg_avg, u32 *agg_peak)
  265. {
  266. *agg_avg += avg_bw;
  267. *agg_peak = max(*agg_peak, peak_bw);
  268. return 0;
  269. }
  270. EXPORT_SYMBOL_GPL(icc_std_aggregate);
  271. /* of_icc_xlate_onecell() - Translate function using a single index.
  272. * @spec: OF phandle args to map into an interconnect node.
  273. * @data: private data (pointer to struct icc_onecell_data)
  274. *
  275. * This is a generic translate function that can be used to model simple
  276. * interconnect providers that have one device tree node and provide
  277. * multiple interconnect nodes. A single cell is used as an index into
  278. * an array of icc nodes specified in the icc_onecell_data struct when
  279. * registering the provider.
  280. */
  281. struct icc_node *of_icc_xlate_onecell(const struct of_phandle_args *spec,
  282. void *data)
  283. {
  284. struct icc_onecell_data *icc_data = data;
  285. unsigned int idx = spec->args[0];
  286. if (idx >= icc_data->num_nodes) {
  287. pr_err("%s: invalid index %u\n", __func__, idx);
  288. return ERR_PTR(-EINVAL);
  289. }
  290. return icc_data->nodes[idx];
  291. }
  292. EXPORT_SYMBOL_GPL(of_icc_xlate_onecell);
  293. /**
  294. * of_icc_get_from_provider() - Look-up interconnect node
  295. * @spec: OF phandle args to use for look-up
  296. *
  297. * Looks for interconnect provider under the node specified by @spec and if
  298. * found, uses xlate function of the provider to map phandle args to node.
  299. *
  300. * Returns a valid pointer to struct icc_node_data on success or ERR_PTR()
  301. * on failure.
  302. */
  303. struct icc_node_data *of_icc_get_from_provider(const struct of_phandle_args *spec)
  304. {
  305. struct icc_node *node = ERR_PTR(-EPROBE_DEFER);
  306. struct icc_node_data *data = NULL;
  307. struct icc_provider *provider;
  308. if (!spec)
  309. return ERR_PTR(-EINVAL);
  310. mutex_lock(&icc_lock);
  311. list_for_each_entry(provider, &icc_providers, provider_list) {
  312. if (provider->dev->of_node == spec->np) {
  313. if (provider->xlate_extended) {
  314. data = provider->xlate_extended(spec, provider->data);
  315. if (!IS_ERR(data)) {
  316. node = data->node;
  317. break;
  318. }
  319. } else {
  320. node = provider->xlate(spec, provider->data);
  321. if (!IS_ERR(node))
  322. break;
  323. }
  324. }
  325. }
  326. mutex_unlock(&icc_lock);
  327. if (!node)
  328. return ERR_PTR(-EINVAL);
  329. if (IS_ERR(node))
  330. return ERR_CAST(node);
  331. if (!data) {
  332. data = kzalloc(sizeof(*data), GFP_KERNEL);
  333. if (!data)
  334. return ERR_PTR(-ENOMEM);
  335. data->node = node;
  336. }
  337. return data;
  338. }
  339. EXPORT_SYMBOL_GPL(of_icc_get_from_provider);
  340. static void devm_icc_release(struct device *dev, void *res)
  341. {
  342. icc_put(*(struct icc_path **)res);
  343. }
  344. struct icc_path *devm_of_icc_get(struct device *dev, const char *name)
  345. {
  346. struct icc_path **ptr, *path;
  347. ptr = devres_alloc(devm_icc_release, sizeof(*ptr), GFP_KERNEL);
  348. if (!ptr)
  349. return ERR_PTR(-ENOMEM);
  350. path = of_icc_get(dev, name);
  351. if (!IS_ERR(path)) {
  352. *ptr = path;
  353. devres_add(dev, ptr);
  354. } else {
  355. devres_free(ptr);
  356. }
  357. return path;
  358. }
  359. EXPORT_SYMBOL_GPL(devm_of_icc_get);
  360. /**
  361. * of_icc_get_by_index() - get a path handle from a DT node based on index
  362. * @dev: device pointer for the consumer device
  363. * @idx: interconnect path index
  364. *
  365. * This function will search for a path between two endpoints and return an
  366. * icc_path handle on success. Use icc_put() to release constraints when they
  367. * are not needed anymore.
  368. * If the interconnect API is disabled, NULL is returned and the consumer
  369. * drivers will still build. Drivers are free to handle this specifically,
  370. * but they don't have to.
  371. *
  372. * Return: icc_path pointer on success or ERR_PTR() on error. NULL is returned
  373. * when the API is disabled or the "interconnects" DT property is missing.
  374. */
  375. struct icc_path *of_icc_get_by_index(struct device *dev, int idx)
  376. {
  377. struct icc_path *path;
  378. struct icc_node_data *src_data, *dst_data;
  379. struct device_node *np;
  380. struct of_phandle_args src_args, dst_args;
  381. int ret;
  382. if (!dev || !dev->of_node)
  383. return ERR_PTR(-ENODEV);
  384. np = dev->of_node;
  385. /*
  386. * When the consumer DT node do not have "interconnects" property
  387. * return a NULL path to skip setting constraints.
  388. */
  389. if (!of_property_present(np, "interconnects"))
  390. return NULL;
  391. /*
  392. * We use a combination of phandle and specifier for endpoint. For now
  393. * lets support only global ids and extend this in the future if needed
  394. * without breaking DT compatibility.
  395. */
  396. ret = of_parse_phandle_with_args(np, "interconnects",
  397. "#interconnect-cells", idx * 2,
  398. &src_args);
  399. if (ret)
  400. return ERR_PTR(ret);
  401. of_node_put(src_args.np);
  402. ret = of_parse_phandle_with_args(np, "interconnects",
  403. "#interconnect-cells", idx * 2 + 1,
  404. &dst_args);
  405. if (ret)
  406. return ERR_PTR(ret);
  407. of_node_put(dst_args.np);
  408. src_data = of_icc_get_from_provider(&src_args);
  409. if (IS_ERR(src_data)) {
  410. dev_err_probe(dev, PTR_ERR(src_data), "error finding src node\n");
  411. return ERR_CAST(src_data);
  412. }
  413. dst_data = of_icc_get_from_provider(&dst_args);
  414. if (IS_ERR(dst_data)) {
  415. dev_err_probe(dev, PTR_ERR(dst_data), "error finding dst node\n");
  416. kfree(src_data);
  417. return ERR_CAST(dst_data);
  418. }
  419. mutex_lock(&icc_lock);
  420. path = path_find(dev, src_data->node, dst_data->node);
  421. mutex_unlock(&icc_lock);
  422. if (IS_ERR(path)) {
  423. dev_err(dev, "%s: invalid path=%ld\n", __func__, PTR_ERR(path));
  424. goto free_icc_data;
  425. }
  426. if (src_data->tag && src_data->tag == dst_data->tag)
  427. icc_set_tag(path, src_data->tag);
  428. path->name = kasprintf(GFP_KERNEL, "%s-%s",
  429. src_data->node->name, dst_data->node->name);
  430. if (!path->name) {
  431. kfree(path);
  432. path = ERR_PTR(-ENOMEM);
  433. }
  434. free_icc_data:
  435. kfree(src_data);
  436. kfree(dst_data);
  437. return path;
  438. }
  439. EXPORT_SYMBOL_GPL(of_icc_get_by_index);
  440. /**
  441. * of_icc_get() - get a path handle from a DT node based on name
  442. * @dev: device pointer for the consumer device
  443. * @name: interconnect path name
  444. *
  445. * This function will search for a path between two endpoints and return an
  446. * icc_path handle on success. Use icc_put() to release constraints when they
  447. * are not needed anymore.
  448. * If the interconnect API is disabled, NULL is returned and the consumer
  449. * drivers will still build. Drivers are free to handle this specifically,
  450. * but they don't have to.
  451. *
  452. * Return: icc_path pointer on success or ERR_PTR() on error. NULL is returned
  453. * when the API is disabled or the "interconnects" DT property is missing.
  454. */
  455. struct icc_path *of_icc_get(struct device *dev, const char *name)
  456. {
  457. struct device_node *np;
  458. int idx = 0;
  459. if (!dev || !dev->of_node)
  460. return ERR_PTR(-ENODEV);
  461. np = dev->of_node;
  462. /*
  463. * When the consumer DT node do not have "interconnects" property
  464. * return a NULL path to skip setting constraints.
  465. */
  466. if (!of_property_present(np, "interconnects"))
  467. return NULL;
  468. /*
  469. * We use a combination of phandle and specifier for endpoint. For now
  470. * lets support only global ids and extend this in the future if needed
  471. * without breaking DT compatibility.
  472. */
  473. if (name) {
  474. idx = of_property_match_string(np, "interconnect-names", name);
  475. if (idx < 0)
  476. return ERR_PTR(idx);
  477. }
  478. return of_icc_get_by_index(dev, idx);
  479. }
  480. EXPORT_SYMBOL_GPL(of_icc_get);
  481. /**
  482. * icc_get() - get a path handle between two endpoints
  483. * @dev: device pointer for the consumer device
  484. * @src: source node name
  485. * @dst: destination node name
  486. *
  487. * This function will search for a path between two endpoints and return an
  488. * icc_path handle on success. Use icc_put() to release constraints when they
  489. * are not needed anymore.
  490. *
  491. * Return: icc_path pointer on success or ERR_PTR() on error. NULL is returned
  492. * when the API is disabled.
  493. */
  494. struct icc_path *icc_get(struct device *dev, const char *src, const char *dst)
  495. {
  496. struct icc_node *src_node, *dst_node;
  497. struct icc_path *path = ERR_PTR(-EPROBE_DEFER);
  498. mutex_lock(&icc_lock);
  499. src_node = node_find_by_name(src);
  500. if (!src_node) {
  501. dev_err(dev, "%s: invalid src=%s\n", __func__, src);
  502. goto out;
  503. }
  504. dst_node = node_find_by_name(dst);
  505. if (!dst_node) {
  506. dev_err(dev, "%s: invalid dst=%s\n", __func__, dst);
  507. goto out;
  508. }
  509. path = path_find(dev, src_node, dst_node);
  510. if (IS_ERR(path)) {
  511. dev_err(dev, "%s: invalid path=%ld\n", __func__, PTR_ERR(path));
  512. goto out;
  513. }
  514. path->name = kasprintf(GFP_KERNEL, "%s-%s", src_node->name, dst_node->name);
  515. if (!path->name) {
  516. kfree(path);
  517. path = ERR_PTR(-ENOMEM);
  518. }
  519. out:
  520. mutex_unlock(&icc_lock);
  521. return path;
  522. }
  523. /**
  524. * icc_set_tag() - set an optional tag on a path
  525. * @path: the path we want to tag
  526. * @tag: the tag value
  527. *
  528. * This function allows consumers to append a tag to the requests associated
  529. * with a path, so that a different aggregation could be done based on this tag.
  530. */
  531. void icc_set_tag(struct icc_path *path, u32 tag)
  532. {
  533. int i;
  534. if (!path)
  535. return;
  536. mutex_lock(&icc_lock);
  537. for (i = 0; i < path->num_nodes; i++)
  538. path->reqs[i].tag = tag;
  539. mutex_unlock(&icc_lock);
  540. }
  541. EXPORT_SYMBOL_GPL(icc_set_tag);
  542. /**
  543. * icc_get_name() - Get name of the icc path
  544. * @path: interconnect path
  545. *
  546. * This function is used by an interconnect consumer to get the name of the icc
  547. * path.
  548. *
  549. * Returns a valid pointer on success, or NULL otherwise.
  550. */
  551. const char *icc_get_name(struct icc_path *path)
  552. {
  553. if (!path)
  554. return NULL;
  555. return path->name;
  556. }
  557. EXPORT_SYMBOL_GPL(icc_get_name);
  558. /**
  559. * icc_set_bw() - set bandwidth constraints on an interconnect path
  560. * @path: interconnect path
  561. * @avg_bw: average bandwidth in kilobytes per second
  562. * @peak_bw: peak bandwidth in kilobytes per second
  563. *
  564. * This function is used by an interconnect consumer to express its own needs
  565. * in terms of bandwidth for a previously requested path between two endpoints.
  566. * The requests are aggregated and each node is updated accordingly. The entire
  567. * path is locked by a mutex to ensure that the set() is completed.
  568. * The @path can be NULL when the "interconnects" DT properties is missing,
  569. * which will mean that no constraints will be set.
  570. *
  571. * Returns 0 on success, or an appropriate error code otherwise.
  572. */
  573. int icc_set_bw(struct icc_path *path, u32 avg_bw, u32 peak_bw)
  574. {
  575. struct icc_node *node;
  576. u32 old_avg, old_peak;
  577. size_t i;
  578. int ret;
  579. if (!path)
  580. return 0;
  581. if (WARN_ON(IS_ERR(path) || !path->num_nodes))
  582. return -EINVAL;
  583. mutex_lock(&icc_bw_lock);
  584. old_avg = path->reqs[0].avg_bw;
  585. old_peak = path->reqs[0].peak_bw;
  586. for (i = 0; i < path->num_nodes; i++) {
  587. node = path->reqs[i].node;
  588. /* update the consumer request for this path */
  589. path->reqs[i].avg_bw = avg_bw;
  590. path->reqs[i].peak_bw = peak_bw;
  591. /* aggregate requests for this node */
  592. aggregate_requests(node);
  593. trace_icc_set_bw(path, node, i, avg_bw, peak_bw);
  594. }
  595. ret = apply_constraints(path);
  596. if (ret) {
  597. pr_debug("interconnect: error applying constraints (%d)\n",
  598. ret);
  599. for (i = 0; i < path->num_nodes; i++) {
  600. node = path->reqs[i].node;
  601. path->reqs[i].avg_bw = old_avg;
  602. path->reqs[i].peak_bw = old_peak;
  603. aggregate_requests(node);
  604. }
  605. apply_constraints(path);
  606. }
  607. mutex_unlock(&icc_bw_lock);
  608. trace_icc_set_bw_end(path, ret);
  609. return ret;
  610. }
  611. EXPORT_SYMBOL_GPL(icc_set_bw);
  612. static int __icc_enable(struct icc_path *path, bool enable)
  613. {
  614. int i;
  615. if (!path)
  616. return 0;
  617. if (WARN_ON(IS_ERR(path) || !path->num_nodes))
  618. return -EINVAL;
  619. mutex_lock(&icc_lock);
  620. for (i = 0; i < path->num_nodes; i++)
  621. path->reqs[i].enabled = enable;
  622. mutex_unlock(&icc_lock);
  623. return icc_set_bw(path, path->reqs[0].avg_bw,
  624. path->reqs[0].peak_bw);
  625. }
  626. int icc_enable(struct icc_path *path)
  627. {
  628. return __icc_enable(path, true);
  629. }
  630. EXPORT_SYMBOL_GPL(icc_enable);
  631. int icc_disable(struct icc_path *path)
  632. {
  633. return __icc_enable(path, false);
  634. }
  635. EXPORT_SYMBOL_GPL(icc_disable);
  636. /**
  637. * icc_put() - release the reference to the icc_path
  638. * @path: interconnect path
  639. *
  640. * Use this function to release the constraints on a path when the path is
  641. * no longer needed. The constraints will be re-aggregated.
  642. */
  643. void icc_put(struct icc_path *path)
  644. {
  645. struct icc_node *node;
  646. size_t i;
  647. int ret;
  648. if (!path || WARN_ON(IS_ERR(path)))
  649. return;
  650. ret = icc_set_bw(path, 0, 0);
  651. if (ret)
  652. pr_err("%s: error (%d)\n", __func__, ret);
  653. mutex_lock(&icc_lock);
  654. mutex_lock(&icc_bw_lock);
  655. for (i = 0; i < path->num_nodes; i++) {
  656. node = path->reqs[i].node;
  657. hlist_del(&path->reqs[i].req_node);
  658. if (!WARN_ON(!node->provider->users))
  659. node->provider->users--;
  660. }
  661. mutex_unlock(&icc_bw_lock);
  662. mutex_unlock(&icc_lock);
  663. kfree_const(path->name);
  664. kfree(path);
  665. }
  666. EXPORT_SYMBOL_GPL(icc_put);
  667. static struct icc_node *icc_node_create_nolock(int id)
  668. {
  669. struct icc_node *node;
  670. /* check if node already exists */
  671. node = node_find(id);
  672. if (node)
  673. return node;
  674. node = kzalloc(sizeof(*node), GFP_KERNEL);
  675. if (!node)
  676. return ERR_PTR(-ENOMEM);
  677. id = idr_alloc(&icc_idr, node, id, id + 1, GFP_KERNEL);
  678. if (id < 0) {
  679. WARN(1, "%s: couldn't get idr\n", __func__);
  680. kfree(node);
  681. return ERR_PTR(id);
  682. }
  683. node->id = id;
  684. return node;
  685. }
  686. /**
  687. * icc_node_create() - create a node
  688. * @id: node id
  689. *
  690. * Return: icc_node pointer on success, or ERR_PTR() on error
  691. */
  692. struct icc_node *icc_node_create(int id)
  693. {
  694. struct icc_node *node;
  695. mutex_lock(&icc_lock);
  696. node = icc_node_create_nolock(id);
  697. mutex_unlock(&icc_lock);
  698. return node;
  699. }
  700. EXPORT_SYMBOL_GPL(icc_node_create);
  701. /**
  702. * icc_node_destroy() - destroy a node
  703. * @id: node id
  704. */
  705. void icc_node_destroy(int id)
  706. {
  707. struct icc_node *node;
  708. mutex_lock(&icc_lock);
  709. node = node_find(id);
  710. if (node) {
  711. idr_remove(&icc_idr, node->id);
  712. WARN_ON(!hlist_empty(&node->req_list));
  713. }
  714. mutex_unlock(&icc_lock);
  715. if (!node)
  716. return;
  717. kfree(node->links);
  718. kfree(node);
  719. }
  720. EXPORT_SYMBOL_GPL(icc_node_destroy);
  721. /**
  722. * icc_link_create() - create a link between two nodes
  723. * @node: source node id
  724. * @dst_id: destination node id
  725. *
  726. * Create a link between two nodes. The nodes might belong to different
  727. * interconnect providers and the @dst_id node might not exist (if the
  728. * provider driver has not probed yet). So just create the @dst_id node
  729. * and when the actual provider driver is probed, the rest of the node
  730. * data is filled.
  731. *
  732. * Return: 0 on success, or an error code otherwise
  733. */
  734. int icc_link_create(struct icc_node *node, const int dst_id)
  735. {
  736. struct icc_node *dst;
  737. struct icc_node **new;
  738. int ret = 0;
  739. if (!node->provider)
  740. return -EINVAL;
  741. mutex_lock(&icc_lock);
  742. dst = node_find(dst_id);
  743. if (!dst) {
  744. dst = icc_node_create_nolock(dst_id);
  745. if (IS_ERR(dst)) {
  746. ret = PTR_ERR(dst);
  747. goto out;
  748. }
  749. }
  750. new = krealloc(node->links,
  751. (node->num_links + 1) * sizeof(*node->links),
  752. GFP_KERNEL);
  753. if (!new) {
  754. ret = -ENOMEM;
  755. goto out;
  756. }
  757. node->links = new;
  758. node->links[node->num_links++] = dst;
  759. out:
  760. mutex_unlock(&icc_lock);
  761. return ret;
  762. }
  763. EXPORT_SYMBOL_GPL(icc_link_create);
  764. /**
  765. * icc_node_add() - add interconnect node to interconnect provider
  766. * @node: pointer to the interconnect node
  767. * @provider: pointer to the interconnect provider
  768. */
  769. void icc_node_add(struct icc_node *node, struct icc_provider *provider)
  770. {
  771. if (WARN_ON(node->provider))
  772. return;
  773. mutex_lock(&icc_lock);
  774. mutex_lock(&icc_bw_lock);
  775. node->provider = provider;
  776. list_add_tail(&node->node_list, &provider->nodes);
  777. /* get the initial bandwidth values and sync them with hardware */
  778. if (provider->get_bw) {
  779. provider->get_bw(node, &node->init_avg, &node->init_peak);
  780. } else {
  781. node->init_avg = INT_MAX;
  782. node->init_peak = INT_MAX;
  783. }
  784. node->avg_bw = node->init_avg;
  785. node->peak_bw = node->init_peak;
  786. if (node->avg_bw || node->peak_bw) {
  787. if (provider->pre_aggregate)
  788. provider->pre_aggregate(node);
  789. if (provider->aggregate)
  790. provider->aggregate(node, 0, node->init_avg, node->init_peak,
  791. &node->avg_bw, &node->peak_bw);
  792. if (provider->set)
  793. provider->set(node, node);
  794. }
  795. node->avg_bw = 0;
  796. node->peak_bw = 0;
  797. mutex_unlock(&icc_bw_lock);
  798. mutex_unlock(&icc_lock);
  799. }
  800. EXPORT_SYMBOL_GPL(icc_node_add);
  801. /**
  802. * icc_node_del() - delete interconnect node from interconnect provider
  803. * @node: pointer to the interconnect node
  804. */
  805. void icc_node_del(struct icc_node *node)
  806. {
  807. mutex_lock(&icc_lock);
  808. list_del(&node->node_list);
  809. mutex_unlock(&icc_lock);
  810. }
  811. EXPORT_SYMBOL_GPL(icc_node_del);
  812. /**
  813. * icc_nodes_remove() - remove all previously added nodes from provider
  814. * @provider: the interconnect provider we are removing nodes from
  815. *
  816. * Return: 0 on success, or an error code otherwise
  817. */
  818. int icc_nodes_remove(struct icc_provider *provider)
  819. {
  820. struct icc_node *n, *tmp;
  821. if (WARN_ON(IS_ERR_OR_NULL(provider)))
  822. return -EINVAL;
  823. list_for_each_entry_safe_reverse(n, tmp, &provider->nodes, node_list) {
  824. icc_node_del(n);
  825. icc_node_destroy(n->id);
  826. }
  827. return 0;
  828. }
  829. EXPORT_SYMBOL_GPL(icc_nodes_remove);
  830. /**
  831. * icc_provider_init() - initialize a new interconnect provider
  832. * @provider: the interconnect provider to initialize
  833. *
  834. * Must be called before adding nodes to the provider.
  835. */
  836. void icc_provider_init(struct icc_provider *provider)
  837. {
  838. WARN_ON(!provider->set);
  839. INIT_LIST_HEAD(&provider->nodes);
  840. }
  841. EXPORT_SYMBOL_GPL(icc_provider_init);
  842. /**
  843. * icc_provider_register() - register a new interconnect provider
  844. * @provider: the interconnect provider to register
  845. *
  846. * Return: 0 on success, or an error code otherwise
  847. */
  848. int icc_provider_register(struct icc_provider *provider)
  849. {
  850. if (WARN_ON(!provider->xlate && !provider->xlate_extended))
  851. return -EINVAL;
  852. mutex_lock(&icc_lock);
  853. list_add_tail(&provider->provider_list, &icc_providers);
  854. mutex_unlock(&icc_lock);
  855. dev_dbg(provider->dev, "interconnect provider registered\n");
  856. return 0;
  857. }
  858. EXPORT_SYMBOL_GPL(icc_provider_register);
  859. /**
  860. * icc_provider_deregister() - deregister an interconnect provider
  861. * @provider: the interconnect provider to deregister
  862. */
  863. void icc_provider_deregister(struct icc_provider *provider)
  864. {
  865. mutex_lock(&icc_lock);
  866. WARN_ON(provider->users);
  867. list_del(&provider->provider_list);
  868. mutex_unlock(&icc_lock);
  869. }
  870. EXPORT_SYMBOL_GPL(icc_provider_deregister);
  871. static const struct of_device_id __maybe_unused ignore_list[] = {
  872. { .compatible = "qcom,sc7180-ipa-virt" },
  873. { .compatible = "qcom,sc8180x-ipa-virt" },
  874. { .compatible = "qcom,sdx55-ipa-virt" },
  875. { .compatible = "qcom,sm8150-ipa-virt" },
  876. { .compatible = "qcom,sm8250-ipa-virt" },
  877. {}
  878. };
  879. static int of_count_icc_providers(struct device_node *np)
  880. {
  881. struct device_node *child;
  882. int count = 0;
  883. for_each_available_child_of_node(np, child) {
  884. if (of_property_read_bool(child, "#interconnect-cells") &&
  885. likely(!of_match_node(ignore_list, child)))
  886. count++;
  887. count += of_count_icc_providers(child);
  888. }
  889. return count;
  890. }
  891. void icc_sync_state(struct device *dev)
  892. {
  893. struct icc_provider *p;
  894. struct icc_node *n;
  895. static int count;
  896. count++;
  897. if (count < providers_count)
  898. return;
  899. mutex_lock(&icc_lock);
  900. mutex_lock(&icc_bw_lock);
  901. synced_state = true;
  902. list_for_each_entry(p, &icc_providers, provider_list) {
  903. dev_dbg(p->dev, "interconnect provider is in synced state\n");
  904. list_for_each_entry(n, &p->nodes, node_list) {
  905. if (n->init_avg || n->init_peak) {
  906. n->init_avg = 0;
  907. n->init_peak = 0;
  908. aggregate_requests(n);
  909. p->set(n, n);
  910. }
  911. }
  912. }
  913. mutex_unlock(&icc_bw_lock);
  914. mutex_unlock(&icc_lock);
  915. }
  916. EXPORT_SYMBOL_GPL(icc_sync_state);
  917. static int __init icc_init(void)
  918. {
  919. struct device_node *root;
  920. /* Teach lockdep about lock ordering wrt. shrinker: */
  921. fs_reclaim_acquire(GFP_KERNEL);
  922. might_lock(&icc_bw_lock);
  923. fs_reclaim_release(GFP_KERNEL);
  924. root = of_find_node_by_path("/");
  925. providers_count = of_count_icc_providers(root);
  926. of_node_put(root);
  927. icc_debugfs_dir = debugfs_create_dir("interconnect", NULL);
  928. debugfs_create_file("interconnect_summary", 0444,
  929. icc_debugfs_dir, NULL, &icc_summary_fops);
  930. debugfs_create_file("interconnect_graph", 0444,
  931. icc_debugfs_dir, NULL, &icc_graph_fops);
  932. icc_debugfs_client_init(icc_debugfs_dir);
  933. return 0;
  934. }
  935. device_initcall(icc_init);