svc.c 35 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * SVC Greybus driver.
  4. *
  5. * Copyright 2015 Google Inc.
  6. * Copyright 2015 Linaro Ltd.
  7. */
  8. #include <linux/debugfs.h>
  9. #include <linux/kstrtox.h>
  10. #include <linux/workqueue.h>
  11. #include <linux/greybus.h>
  12. #define SVC_INTF_EJECT_TIMEOUT 9000
  13. #define SVC_INTF_ACTIVATE_TIMEOUT 6000
  14. #define SVC_INTF_RESUME_TIMEOUT 3000
  15. struct gb_svc_deferred_request {
  16. struct work_struct work;
  17. struct gb_operation *operation;
  18. };
  19. static int gb_svc_queue_deferred_request(struct gb_operation *operation);
  20. static ssize_t endo_id_show(struct device *dev,
  21. struct device_attribute *attr, char *buf)
  22. {
  23. struct gb_svc *svc = to_gb_svc(dev);
  24. return sprintf(buf, "0x%04x\n", svc->endo_id);
  25. }
  26. static DEVICE_ATTR_RO(endo_id);
  27. static ssize_t ap_intf_id_show(struct device *dev,
  28. struct device_attribute *attr, char *buf)
  29. {
  30. struct gb_svc *svc = to_gb_svc(dev);
  31. return sprintf(buf, "%u\n", svc->ap_intf_id);
  32. }
  33. static DEVICE_ATTR_RO(ap_intf_id);
  34. // FIXME
  35. // This is a hack, we need to do this "right" and clean the interface up
  36. // properly, not just forcibly yank the thing out of the system and hope for the
  37. // best. But for now, people want their modules to come out without having to
  38. // throw the thing to the ground or get out a screwdriver.
  39. static ssize_t intf_eject_store(struct device *dev,
  40. struct device_attribute *attr, const char *buf,
  41. size_t len)
  42. {
  43. struct gb_svc *svc = to_gb_svc(dev);
  44. unsigned short intf_id;
  45. int ret;
  46. ret = kstrtou16(buf, 10, &intf_id);
  47. if (ret < 0)
  48. return ret;
  49. dev_warn(dev, "Forcibly trying to eject interface %d\n", intf_id);
  50. ret = gb_svc_intf_eject(svc, intf_id);
  51. if (ret < 0)
  52. return ret;
  53. return len;
  54. }
  55. static DEVICE_ATTR_WO(intf_eject);
  56. static ssize_t watchdog_show(struct device *dev, struct device_attribute *attr,
  57. char *buf)
  58. {
  59. struct gb_svc *svc = to_gb_svc(dev);
  60. return sprintf(buf, "%s\n",
  61. gb_svc_watchdog_enabled(svc) ? "enabled" : "disabled");
  62. }
  63. static ssize_t watchdog_store(struct device *dev,
  64. struct device_attribute *attr, const char *buf,
  65. size_t len)
  66. {
  67. struct gb_svc *svc = to_gb_svc(dev);
  68. int retval;
  69. bool user_request;
  70. retval = kstrtobool(buf, &user_request);
  71. if (retval)
  72. return retval;
  73. if (user_request)
  74. retval = gb_svc_watchdog_enable(svc);
  75. else
  76. retval = gb_svc_watchdog_disable(svc);
  77. if (retval)
  78. return retval;
  79. return len;
  80. }
  81. static DEVICE_ATTR_RW(watchdog);
  82. static ssize_t watchdog_action_show(struct device *dev,
  83. struct device_attribute *attr, char *buf)
  84. {
  85. struct gb_svc *svc = to_gb_svc(dev);
  86. if (svc->action == GB_SVC_WATCHDOG_BITE_PANIC_KERNEL)
  87. return sprintf(buf, "panic\n");
  88. else if (svc->action == GB_SVC_WATCHDOG_BITE_RESET_UNIPRO)
  89. return sprintf(buf, "reset\n");
  90. return -EINVAL;
  91. }
  92. static ssize_t watchdog_action_store(struct device *dev,
  93. struct device_attribute *attr,
  94. const char *buf, size_t len)
  95. {
  96. struct gb_svc *svc = to_gb_svc(dev);
  97. if (sysfs_streq(buf, "panic"))
  98. svc->action = GB_SVC_WATCHDOG_BITE_PANIC_KERNEL;
  99. else if (sysfs_streq(buf, "reset"))
  100. svc->action = GB_SVC_WATCHDOG_BITE_RESET_UNIPRO;
  101. else
  102. return -EINVAL;
  103. return len;
  104. }
  105. static DEVICE_ATTR_RW(watchdog_action);
  106. static int gb_svc_pwrmon_rail_count_get(struct gb_svc *svc, u8 *value)
  107. {
  108. struct gb_svc_pwrmon_rail_count_get_response response;
  109. int ret;
  110. ret = gb_operation_sync(svc->connection,
  111. GB_SVC_TYPE_PWRMON_RAIL_COUNT_GET, NULL, 0,
  112. &response, sizeof(response));
  113. if (ret) {
  114. dev_err(&svc->dev, "failed to get rail count: %d\n", ret);
  115. return ret;
  116. }
  117. *value = response.rail_count;
  118. return 0;
  119. }
  120. static int gb_svc_pwrmon_rail_names_get(struct gb_svc *svc,
  121. struct gb_svc_pwrmon_rail_names_get_response *response,
  122. size_t bufsize)
  123. {
  124. int ret;
  125. ret = gb_operation_sync(svc->connection,
  126. GB_SVC_TYPE_PWRMON_RAIL_NAMES_GET, NULL, 0,
  127. response, bufsize);
  128. if (ret) {
  129. dev_err(&svc->dev, "failed to get rail names: %d\n", ret);
  130. return ret;
  131. }
  132. if (response->status != GB_SVC_OP_SUCCESS) {
  133. dev_err(&svc->dev,
  134. "SVC error while getting rail names: %u\n",
  135. response->status);
  136. return -EREMOTEIO;
  137. }
  138. return 0;
  139. }
  140. static int gb_svc_pwrmon_sample_get(struct gb_svc *svc, u8 rail_id,
  141. u8 measurement_type, u32 *value)
  142. {
  143. struct gb_svc_pwrmon_sample_get_request request;
  144. struct gb_svc_pwrmon_sample_get_response response;
  145. int ret;
  146. request.rail_id = rail_id;
  147. request.measurement_type = measurement_type;
  148. ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_PWRMON_SAMPLE_GET,
  149. &request, sizeof(request),
  150. &response, sizeof(response));
  151. if (ret) {
  152. dev_err(&svc->dev, "failed to get rail sample: %d\n", ret);
  153. return ret;
  154. }
  155. if (response.result) {
  156. dev_err(&svc->dev,
  157. "UniPro error while getting rail power sample (%d %d): %d\n",
  158. rail_id, measurement_type, response.result);
  159. switch (response.result) {
  160. case GB_SVC_PWRMON_GET_SAMPLE_INVAL:
  161. return -EINVAL;
  162. case GB_SVC_PWRMON_GET_SAMPLE_NOSUPP:
  163. return -ENOMSG;
  164. default:
  165. return -EREMOTEIO;
  166. }
  167. }
  168. *value = le32_to_cpu(response.measurement);
  169. return 0;
  170. }
  171. int gb_svc_pwrmon_intf_sample_get(struct gb_svc *svc, u8 intf_id,
  172. u8 measurement_type, u32 *value)
  173. {
  174. struct gb_svc_pwrmon_intf_sample_get_request request;
  175. struct gb_svc_pwrmon_intf_sample_get_response response;
  176. int ret;
  177. request.intf_id = intf_id;
  178. request.measurement_type = measurement_type;
  179. ret = gb_operation_sync(svc->connection,
  180. GB_SVC_TYPE_PWRMON_INTF_SAMPLE_GET,
  181. &request, sizeof(request),
  182. &response, sizeof(response));
  183. if (ret) {
  184. dev_err(&svc->dev, "failed to get intf sample: %d\n", ret);
  185. return ret;
  186. }
  187. if (response.result) {
  188. dev_err(&svc->dev,
  189. "UniPro error while getting intf power sample (%d %d): %d\n",
  190. intf_id, measurement_type, response.result);
  191. switch (response.result) {
  192. case GB_SVC_PWRMON_GET_SAMPLE_INVAL:
  193. return -EINVAL;
  194. case GB_SVC_PWRMON_GET_SAMPLE_NOSUPP:
  195. return -ENOMSG;
  196. default:
  197. return -EREMOTEIO;
  198. }
  199. }
  200. *value = le32_to_cpu(response.measurement);
  201. return 0;
  202. }
  203. static struct attribute *svc_attrs[] = {
  204. &dev_attr_endo_id.attr,
  205. &dev_attr_ap_intf_id.attr,
  206. &dev_attr_intf_eject.attr,
  207. &dev_attr_watchdog.attr,
  208. &dev_attr_watchdog_action.attr,
  209. NULL,
  210. };
  211. ATTRIBUTE_GROUPS(svc);
  212. int gb_svc_intf_device_id(struct gb_svc *svc, u8 intf_id, u8 device_id)
  213. {
  214. struct gb_svc_intf_device_id_request request;
  215. request.intf_id = intf_id;
  216. request.device_id = device_id;
  217. return gb_operation_sync(svc->connection, GB_SVC_TYPE_INTF_DEVICE_ID,
  218. &request, sizeof(request), NULL, 0);
  219. }
  220. int gb_svc_intf_eject(struct gb_svc *svc, u8 intf_id)
  221. {
  222. struct gb_svc_intf_eject_request request;
  223. int ret;
  224. request.intf_id = intf_id;
  225. /*
  226. * The pulse width for module release in svc is long so we need to
  227. * increase the timeout so the operation will not return to soon.
  228. */
  229. ret = gb_operation_sync_timeout(svc->connection,
  230. GB_SVC_TYPE_INTF_EJECT, &request,
  231. sizeof(request), NULL, 0,
  232. SVC_INTF_EJECT_TIMEOUT);
  233. if (ret) {
  234. dev_err(&svc->dev, "failed to eject interface %u\n", intf_id);
  235. return ret;
  236. }
  237. return 0;
  238. }
  239. int gb_svc_intf_vsys_set(struct gb_svc *svc, u8 intf_id, bool enable)
  240. {
  241. struct gb_svc_intf_vsys_request request;
  242. struct gb_svc_intf_vsys_response response;
  243. int type, ret;
  244. request.intf_id = intf_id;
  245. if (enable)
  246. type = GB_SVC_TYPE_INTF_VSYS_ENABLE;
  247. else
  248. type = GB_SVC_TYPE_INTF_VSYS_DISABLE;
  249. ret = gb_operation_sync(svc->connection, type,
  250. &request, sizeof(request),
  251. &response, sizeof(response));
  252. if (ret < 0)
  253. return ret;
  254. if (response.result_code != GB_SVC_INTF_VSYS_OK)
  255. return -EREMOTEIO;
  256. return 0;
  257. }
  258. int gb_svc_intf_refclk_set(struct gb_svc *svc, u8 intf_id, bool enable)
  259. {
  260. struct gb_svc_intf_refclk_request request;
  261. struct gb_svc_intf_refclk_response response;
  262. int type, ret;
  263. request.intf_id = intf_id;
  264. if (enable)
  265. type = GB_SVC_TYPE_INTF_REFCLK_ENABLE;
  266. else
  267. type = GB_SVC_TYPE_INTF_REFCLK_DISABLE;
  268. ret = gb_operation_sync(svc->connection, type,
  269. &request, sizeof(request),
  270. &response, sizeof(response));
  271. if (ret < 0)
  272. return ret;
  273. if (response.result_code != GB_SVC_INTF_REFCLK_OK)
  274. return -EREMOTEIO;
  275. return 0;
  276. }
  277. int gb_svc_intf_unipro_set(struct gb_svc *svc, u8 intf_id, bool enable)
  278. {
  279. struct gb_svc_intf_unipro_request request;
  280. struct gb_svc_intf_unipro_response response;
  281. int type, ret;
  282. request.intf_id = intf_id;
  283. if (enable)
  284. type = GB_SVC_TYPE_INTF_UNIPRO_ENABLE;
  285. else
  286. type = GB_SVC_TYPE_INTF_UNIPRO_DISABLE;
  287. ret = gb_operation_sync(svc->connection, type,
  288. &request, sizeof(request),
  289. &response, sizeof(response));
  290. if (ret < 0)
  291. return ret;
  292. if (response.result_code != GB_SVC_INTF_UNIPRO_OK)
  293. return -EREMOTEIO;
  294. return 0;
  295. }
  296. int gb_svc_intf_activate(struct gb_svc *svc, u8 intf_id, u8 *intf_type)
  297. {
  298. struct gb_svc_intf_activate_request request;
  299. struct gb_svc_intf_activate_response response;
  300. int ret;
  301. request.intf_id = intf_id;
  302. ret = gb_operation_sync_timeout(svc->connection,
  303. GB_SVC_TYPE_INTF_ACTIVATE,
  304. &request, sizeof(request),
  305. &response, sizeof(response),
  306. SVC_INTF_ACTIVATE_TIMEOUT);
  307. if (ret < 0)
  308. return ret;
  309. if (response.status != GB_SVC_OP_SUCCESS) {
  310. dev_err(&svc->dev, "failed to activate interface %u: %u\n",
  311. intf_id, response.status);
  312. return -EREMOTEIO;
  313. }
  314. *intf_type = response.intf_type;
  315. return 0;
  316. }
  317. int gb_svc_intf_resume(struct gb_svc *svc, u8 intf_id)
  318. {
  319. struct gb_svc_intf_resume_request request;
  320. struct gb_svc_intf_resume_response response;
  321. int ret;
  322. request.intf_id = intf_id;
  323. ret = gb_operation_sync_timeout(svc->connection,
  324. GB_SVC_TYPE_INTF_RESUME,
  325. &request, sizeof(request),
  326. &response, sizeof(response),
  327. SVC_INTF_RESUME_TIMEOUT);
  328. if (ret < 0) {
  329. dev_err(&svc->dev, "failed to send interface resume %u: %d\n",
  330. intf_id, ret);
  331. return ret;
  332. }
  333. if (response.status != GB_SVC_OP_SUCCESS) {
  334. dev_err(&svc->dev, "failed to resume interface %u: %u\n",
  335. intf_id, response.status);
  336. return -EREMOTEIO;
  337. }
  338. return 0;
  339. }
  340. int gb_svc_dme_peer_get(struct gb_svc *svc, u8 intf_id, u16 attr, u16 selector,
  341. u32 *value)
  342. {
  343. struct gb_svc_dme_peer_get_request request;
  344. struct gb_svc_dme_peer_get_response response;
  345. u16 result;
  346. int ret;
  347. request.intf_id = intf_id;
  348. request.attr = cpu_to_le16(attr);
  349. request.selector = cpu_to_le16(selector);
  350. ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_DME_PEER_GET,
  351. &request, sizeof(request),
  352. &response, sizeof(response));
  353. if (ret) {
  354. dev_err(&svc->dev, "failed to get DME attribute (%u 0x%04x %u): %d\n",
  355. intf_id, attr, selector, ret);
  356. return ret;
  357. }
  358. result = le16_to_cpu(response.result_code);
  359. if (result) {
  360. dev_err(&svc->dev, "UniPro error while getting DME attribute (%u 0x%04x %u): %u\n",
  361. intf_id, attr, selector, result);
  362. return -EREMOTEIO;
  363. }
  364. if (value)
  365. *value = le32_to_cpu(response.attr_value);
  366. return 0;
  367. }
  368. int gb_svc_dme_peer_set(struct gb_svc *svc, u8 intf_id, u16 attr, u16 selector,
  369. u32 value)
  370. {
  371. struct gb_svc_dme_peer_set_request request;
  372. struct gb_svc_dme_peer_set_response response;
  373. u16 result;
  374. int ret;
  375. request.intf_id = intf_id;
  376. request.attr = cpu_to_le16(attr);
  377. request.selector = cpu_to_le16(selector);
  378. request.value = cpu_to_le32(value);
  379. ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_DME_PEER_SET,
  380. &request, sizeof(request),
  381. &response, sizeof(response));
  382. if (ret) {
  383. dev_err(&svc->dev, "failed to set DME attribute (%u 0x%04x %u %u): %d\n",
  384. intf_id, attr, selector, value, ret);
  385. return ret;
  386. }
  387. result = le16_to_cpu(response.result_code);
  388. if (result) {
  389. dev_err(&svc->dev, "UniPro error while setting DME attribute (%u 0x%04x %u %u): %u\n",
  390. intf_id, attr, selector, value, result);
  391. return -EREMOTEIO;
  392. }
  393. return 0;
  394. }
  395. int gb_svc_connection_create(struct gb_svc *svc,
  396. u8 intf1_id, u16 cport1_id,
  397. u8 intf2_id, u16 cport2_id,
  398. u8 cport_flags)
  399. {
  400. struct gb_svc_conn_create_request request;
  401. request.intf1_id = intf1_id;
  402. request.cport1_id = cpu_to_le16(cport1_id);
  403. request.intf2_id = intf2_id;
  404. request.cport2_id = cpu_to_le16(cport2_id);
  405. request.tc = 0; /* TC0 */
  406. request.flags = cport_flags;
  407. return gb_operation_sync(svc->connection, GB_SVC_TYPE_CONN_CREATE,
  408. &request, sizeof(request), NULL, 0);
  409. }
  410. void gb_svc_connection_destroy(struct gb_svc *svc, u8 intf1_id, u16 cport1_id,
  411. u8 intf2_id, u16 cport2_id)
  412. {
  413. struct gb_svc_conn_destroy_request request;
  414. struct gb_connection *connection = svc->connection;
  415. int ret;
  416. request.intf1_id = intf1_id;
  417. request.cport1_id = cpu_to_le16(cport1_id);
  418. request.intf2_id = intf2_id;
  419. request.cport2_id = cpu_to_le16(cport2_id);
  420. ret = gb_operation_sync(connection, GB_SVC_TYPE_CONN_DESTROY,
  421. &request, sizeof(request), NULL, 0);
  422. if (ret) {
  423. dev_err(&svc->dev, "failed to destroy connection (%u:%u %u:%u): %d\n",
  424. intf1_id, cport1_id, intf2_id, cport2_id, ret);
  425. }
  426. }
  427. /* Creates bi-directional routes between the devices */
  428. int gb_svc_route_create(struct gb_svc *svc, u8 intf1_id, u8 dev1_id,
  429. u8 intf2_id, u8 dev2_id)
  430. {
  431. struct gb_svc_route_create_request request;
  432. request.intf1_id = intf1_id;
  433. request.dev1_id = dev1_id;
  434. request.intf2_id = intf2_id;
  435. request.dev2_id = dev2_id;
  436. return gb_operation_sync(svc->connection, GB_SVC_TYPE_ROUTE_CREATE,
  437. &request, sizeof(request), NULL, 0);
  438. }
  439. /* Destroys bi-directional routes between the devices */
  440. void gb_svc_route_destroy(struct gb_svc *svc, u8 intf1_id, u8 intf2_id)
  441. {
  442. struct gb_svc_route_destroy_request request;
  443. int ret;
  444. request.intf1_id = intf1_id;
  445. request.intf2_id = intf2_id;
  446. ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_ROUTE_DESTROY,
  447. &request, sizeof(request), NULL, 0);
  448. if (ret) {
  449. dev_err(&svc->dev, "failed to destroy route (%u %u): %d\n",
  450. intf1_id, intf2_id, ret);
  451. }
  452. }
  453. int gb_svc_intf_set_power_mode(struct gb_svc *svc, u8 intf_id, u8 hs_series,
  454. u8 tx_mode, u8 tx_gear, u8 tx_nlanes,
  455. u8 tx_amplitude, u8 tx_hs_equalizer,
  456. u8 rx_mode, u8 rx_gear, u8 rx_nlanes,
  457. u8 flags, u32 quirks,
  458. struct gb_svc_l2_timer_cfg *local,
  459. struct gb_svc_l2_timer_cfg *remote)
  460. {
  461. struct gb_svc_intf_set_pwrm_request request;
  462. struct gb_svc_intf_set_pwrm_response response;
  463. int ret;
  464. u16 result_code;
  465. memset(&request, 0, sizeof(request));
  466. request.intf_id = intf_id;
  467. request.hs_series = hs_series;
  468. request.tx_mode = tx_mode;
  469. request.tx_gear = tx_gear;
  470. request.tx_nlanes = tx_nlanes;
  471. request.tx_amplitude = tx_amplitude;
  472. request.tx_hs_equalizer = tx_hs_equalizer;
  473. request.rx_mode = rx_mode;
  474. request.rx_gear = rx_gear;
  475. request.rx_nlanes = rx_nlanes;
  476. request.flags = flags;
  477. request.quirks = cpu_to_le32(quirks);
  478. if (local)
  479. request.local_l2timerdata = *local;
  480. if (remote)
  481. request.remote_l2timerdata = *remote;
  482. ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_INTF_SET_PWRM,
  483. &request, sizeof(request),
  484. &response, sizeof(response));
  485. if (ret < 0)
  486. return ret;
  487. result_code = response.result_code;
  488. if (result_code != GB_SVC_SETPWRM_PWR_LOCAL) {
  489. dev_err(&svc->dev, "set power mode = %d\n", result_code);
  490. return -EIO;
  491. }
  492. return 0;
  493. }
  494. EXPORT_SYMBOL_GPL(gb_svc_intf_set_power_mode);
  495. int gb_svc_intf_set_power_mode_hibernate(struct gb_svc *svc, u8 intf_id)
  496. {
  497. struct gb_svc_intf_set_pwrm_request request;
  498. struct gb_svc_intf_set_pwrm_response response;
  499. int ret;
  500. u16 result_code;
  501. memset(&request, 0, sizeof(request));
  502. request.intf_id = intf_id;
  503. request.hs_series = GB_SVC_UNIPRO_HS_SERIES_A;
  504. request.tx_mode = GB_SVC_UNIPRO_HIBERNATE_MODE;
  505. request.rx_mode = GB_SVC_UNIPRO_HIBERNATE_MODE;
  506. ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_INTF_SET_PWRM,
  507. &request, sizeof(request),
  508. &response, sizeof(response));
  509. if (ret < 0) {
  510. dev_err(&svc->dev,
  511. "failed to send set power mode operation to interface %u: %d\n",
  512. intf_id, ret);
  513. return ret;
  514. }
  515. result_code = response.result_code;
  516. if (result_code != GB_SVC_SETPWRM_PWR_OK) {
  517. dev_err(&svc->dev,
  518. "failed to hibernate the link for interface %u: %u\n",
  519. intf_id, result_code);
  520. return -EIO;
  521. }
  522. return 0;
  523. }
  524. int gb_svc_ping(struct gb_svc *svc)
  525. {
  526. return gb_operation_sync_timeout(svc->connection, GB_SVC_TYPE_PING,
  527. NULL, 0, NULL, 0,
  528. GB_OPERATION_TIMEOUT_DEFAULT * 2);
  529. }
  530. static int gb_svc_version_request(struct gb_operation *op)
  531. {
  532. struct gb_connection *connection = op->connection;
  533. struct gb_svc *svc = gb_connection_get_data(connection);
  534. struct gb_svc_version_request *request;
  535. struct gb_svc_version_response *response;
  536. if (op->request->payload_size < sizeof(*request)) {
  537. dev_err(&svc->dev, "short version request (%zu < %zu)\n",
  538. op->request->payload_size,
  539. sizeof(*request));
  540. return -EINVAL;
  541. }
  542. request = op->request->payload;
  543. if (request->major > GB_SVC_VERSION_MAJOR) {
  544. dev_warn(&svc->dev, "unsupported major version (%u > %u)\n",
  545. request->major, GB_SVC_VERSION_MAJOR);
  546. return -ENOTSUPP;
  547. }
  548. svc->protocol_major = request->major;
  549. svc->protocol_minor = request->minor;
  550. if (!gb_operation_response_alloc(op, sizeof(*response), GFP_KERNEL))
  551. return -ENOMEM;
  552. response = op->response->payload;
  553. response->major = svc->protocol_major;
  554. response->minor = svc->protocol_minor;
  555. return 0;
  556. }
  557. static ssize_t pwr_debugfs_voltage_read(struct file *file, char __user *buf,
  558. size_t len, loff_t *offset)
  559. {
  560. struct svc_debugfs_pwrmon_rail *pwrmon_rails =
  561. file_inode(file)->i_private;
  562. struct gb_svc *svc = pwrmon_rails->svc;
  563. int ret, desc;
  564. u32 value;
  565. char buff[16];
  566. ret = gb_svc_pwrmon_sample_get(svc, pwrmon_rails->id,
  567. GB_SVC_PWRMON_TYPE_VOL, &value);
  568. if (ret) {
  569. dev_err(&svc->dev,
  570. "failed to get voltage sample %u: %d\n",
  571. pwrmon_rails->id, ret);
  572. return ret;
  573. }
  574. desc = scnprintf(buff, sizeof(buff), "%u\n", value);
  575. return simple_read_from_buffer(buf, len, offset, buff, desc);
  576. }
  577. static ssize_t pwr_debugfs_current_read(struct file *file, char __user *buf,
  578. size_t len, loff_t *offset)
  579. {
  580. struct svc_debugfs_pwrmon_rail *pwrmon_rails =
  581. file_inode(file)->i_private;
  582. struct gb_svc *svc = pwrmon_rails->svc;
  583. int ret, desc;
  584. u32 value;
  585. char buff[16];
  586. ret = gb_svc_pwrmon_sample_get(svc, pwrmon_rails->id,
  587. GB_SVC_PWRMON_TYPE_CURR, &value);
  588. if (ret) {
  589. dev_err(&svc->dev,
  590. "failed to get current sample %u: %d\n",
  591. pwrmon_rails->id, ret);
  592. return ret;
  593. }
  594. desc = scnprintf(buff, sizeof(buff), "%u\n", value);
  595. return simple_read_from_buffer(buf, len, offset, buff, desc);
  596. }
  597. static ssize_t pwr_debugfs_power_read(struct file *file, char __user *buf,
  598. size_t len, loff_t *offset)
  599. {
  600. struct svc_debugfs_pwrmon_rail *pwrmon_rails =
  601. file_inode(file)->i_private;
  602. struct gb_svc *svc = pwrmon_rails->svc;
  603. int ret, desc;
  604. u32 value;
  605. char buff[16];
  606. ret = gb_svc_pwrmon_sample_get(svc, pwrmon_rails->id,
  607. GB_SVC_PWRMON_TYPE_PWR, &value);
  608. if (ret) {
  609. dev_err(&svc->dev, "failed to get power sample %u: %d\n",
  610. pwrmon_rails->id, ret);
  611. return ret;
  612. }
  613. desc = scnprintf(buff, sizeof(buff), "%u\n", value);
  614. return simple_read_from_buffer(buf, len, offset, buff, desc);
  615. }
  616. static const struct file_operations pwrmon_debugfs_voltage_fops = {
  617. .read = pwr_debugfs_voltage_read,
  618. };
  619. static const struct file_operations pwrmon_debugfs_current_fops = {
  620. .read = pwr_debugfs_current_read,
  621. };
  622. static const struct file_operations pwrmon_debugfs_power_fops = {
  623. .read = pwr_debugfs_power_read,
  624. };
  625. static void gb_svc_pwrmon_debugfs_init(struct gb_svc *svc)
  626. {
  627. int i;
  628. size_t bufsize;
  629. struct dentry *dent;
  630. struct gb_svc_pwrmon_rail_names_get_response *rail_names;
  631. u8 rail_count;
  632. dent = debugfs_create_dir("pwrmon", svc->debugfs_dentry);
  633. if (IS_ERR_OR_NULL(dent))
  634. return;
  635. if (gb_svc_pwrmon_rail_count_get(svc, &rail_count))
  636. goto err_pwrmon_debugfs;
  637. if (!rail_count || rail_count > GB_SVC_PWRMON_MAX_RAIL_COUNT)
  638. goto err_pwrmon_debugfs;
  639. bufsize = sizeof(*rail_names) +
  640. GB_SVC_PWRMON_RAIL_NAME_BUFSIZE * rail_count;
  641. rail_names = kzalloc(bufsize, GFP_KERNEL);
  642. if (!rail_names)
  643. goto err_pwrmon_debugfs;
  644. svc->pwrmon_rails = kcalloc(rail_count, sizeof(*svc->pwrmon_rails),
  645. GFP_KERNEL);
  646. if (!svc->pwrmon_rails)
  647. goto err_pwrmon_debugfs_free;
  648. if (gb_svc_pwrmon_rail_names_get(svc, rail_names, bufsize))
  649. goto err_pwrmon_debugfs_free;
  650. for (i = 0; i < rail_count; i++) {
  651. struct dentry *dir;
  652. struct svc_debugfs_pwrmon_rail *rail = &svc->pwrmon_rails[i];
  653. char fname[GB_SVC_PWRMON_RAIL_NAME_BUFSIZE];
  654. snprintf(fname, sizeof(fname), "%s",
  655. (char *)&rail_names->name[i]);
  656. rail->id = i;
  657. rail->svc = svc;
  658. dir = debugfs_create_dir(fname, dent);
  659. debugfs_create_file("voltage_now", 0444, dir, rail,
  660. &pwrmon_debugfs_voltage_fops);
  661. debugfs_create_file("current_now", 0444, dir, rail,
  662. &pwrmon_debugfs_current_fops);
  663. debugfs_create_file("power_now", 0444, dir, rail,
  664. &pwrmon_debugfs_power_fops);
  665. }
  666. kfree(rail_names);
  667. return;
  668. err_pwrmon_debugfs_free:
  669. kfree(rail_names);
  670. kfree(svc->pwrmon_rails);
  671. svc->pwrmon_rails = NULL;
  672. err_pwrmon_debugfs:
  673. debugfs_remove(dent);
  674. }
  675. static void gb_svc_debugfs_init(struct gb_svc *svc)
  676. {
  677. svc->debugfs_dentry = debugfs_create_dir(dev_name(&svc->dev),
  678. gb_debugfs_get());
  679. gb_svc_pwrmon_debugfs_init(svc);
  680. }
  681. static void gb_svc_debugfs_exit(struct gb_svc *svc)
  682. {
  683. debugfs_remove_recursive(svc->debugfs_dentry);
  684. kfree(svc->pwrmon_rails);
  685. svc->pwrmon_rails = NULL;
  686. }
  687. static int gb_svc_hello(struct gb_operation *op)
  688. {
  689. struct gb_connection *connection = op->connection;
  690. struct gb_svc *svc = gb_connection_get_data(connection);
  691. struct gb_svc_hello_request *hello_request;
  692. int ret;
  693. if (op->request->payload_size < sizeof(*hello_request)) {
  694. dev_warn(&svc->dev, "short hello request (%zu < %zu)\n",
  695. op->request->payload_size,
  696. sizeof(*hello_request));
  697. return -EINVAL;
  698. }
  699. hello_request = op->request->payload;
  700. svc->endo_id = le16_to_cpu(hello_request->endo_id);
  701. svc->ap_intf_id = hello_request->interface_id;
  702. ret = device_add(&svc->dev);
  703. if (ret) {
  704. dev_err(&svc->dev, "failed to register svc device: %d\n", ret);
  705. return ret;
  706. }
  707. ret = gb_svc_watchdog_create(svc);
  708. if (ret) {
  709. dev_err(&svc->dev, "failed to create watchdog: %d\n", ret);
  710. goto err_deregister_svc;
  711. }
  712. /*
  713. * FIXME: This is a temporary hack to reconfigure the link at HELLO
  714. * (which abuses the deferred request processing mechanism).
  715. */
  716. ret = gb_svc_queue_deferred_request(op);
  717. if (ret)
  718. goto err_destroy_watchdog;
  719. gb_svc_debugfs_init(svc);
  720. return 0;
  721. err_destroy_watchdog:
  722. gb_svc_watchdog_destroy(svc);
  723. err_deregister_svc:
  724. device_del(&svc->dev);
  725. return ret;
  726. }
  727. static struct gb_interface *gb_svc_interface_lookup(struct gb_svc *svc,
  728. u8 intf_id)
  729. {
  730. struct gb_host_device *hd = svc->hd;
  731. struct gb_module *module;
  732. size_t num_interfaces;
  733. u8 module_id;
  734. list_for_each_entry(module, &hd->modules, hd_node) {
  735. module_id = module->module_id;
  736. num_interfaces = module->num_interfaces;
  737. if (intf_id >= module_id &&
  738. intf_id < module_id + num_interfaces) {
  739. return module->interfaces[intf_id - module_id];
  740. }
  741. }
  742. return NULL;
  743. }
  744. static struct gb_module *gb_svc_module_lookup(struct gb_svc *svc, u8 module_id)
  745. {
  746. struct gb_host_device *hd = svc->hd;
  747. struct gb_module *module;
  748. list_for_each_entry(module, &hd->modules, hd_node) {
  749. if (module->module_id == module_id)
  750. return module;
  751. }
  752. return NULL;
  753. }
  754. static void gb_svc_process_hello_deferred(struct gb_operation *operation)
  755. {
  756. struct gb_connection *connection = operation->connection;
  757. struct gb_svc *svc = gb_connection_get_data(connection);
  758. int ret;
  759. /*
  760. * XXX This is a hack/work-around to reconfigure the APBridgeA-Switch
  761. * link to PWM G2, 1 Lane, Slow Auto, so that it has sufficient
  762. * bandwidth for 3 audio streams plus boot-over-UniPro of a hot-plugged
  763. * module.
  764. *
  765. * The code should be removed once SW-2217, Heuristic for UniPro
  766. * Power Mode Changes is resolved.
  767. */
  768. ret = gb_svc_intf_set_power_mode(svc, svc->ap_intf_id,
  769. GB_SVC_UNIPRO_HS_SERIES_A,
  770. GB_SVC_UNIPRO_SLOW_AUTO_MODE,
  771. 2, 1,
  772. GB_SVC_SMALL_AMPLITUDE,
  773. GB_SVC_NO_DE_EMPHASIS,
  774. GB_SVC_UNIPRO_SLOW_AUTO_MODE,
  775. 2, 1,
  776. 0, 0,
  777. NULL, NULL);
  778. if (ret)
  779. dev_warn(&svc->dev,
  780. "power mode change failed on AP to switch link: %d\n",
  781. ret);
  782. }
  783. static void gb_svc_process_module_inserted(struct gb_operation *operation)
  784. {
  785. struct gb_svc_module_inserted_request *request;
  786. struct gb_connection *connection = operation->connection;
  787. struct gb_svc *svc = gb_connection_get_data(connection);
  788. struct gb_host_device *hd = svc->hd;
  789. struct gb_module *module;
  790. size_t num_interfaces;
  791. u8 module_id;
  792. u16 flags;
  793. int ret;
  794. /* The request message size has already been verified. */
  795. request = operation->request->payload;
  796. module_id = request->primary_intf_id;
  797. num_interfaces = request->intf_count;
  798. flags = le16_to_cpu(request->flags);
  799. dev_dbg(&svc->dev, "%s - id = %u, num_interfaces = %zu, flags = 0x%04x\n",
  800. __func__, module_id, num_interfaces, flags);
  801. if (flags & GB_SVC_MODULE_INSERTED_FLAG_NO_PRIMARY) {
  802. dev_warn(&svc->dev, "no primary interface detected on module %u\n",
  803. module_id);
  804. }
  805. module = gb_svc_module_lookup(svc, module_id);
  806. if (module) {
  807. dev_warn(&svc->dev, "unexpected module-inserted event %u\n",
  808. module_id);
  809. return;
  810. }
  811. module = gb_module_create(hd, module_id, num_interfaces);
  812. if (!module) {
  813. dev_err(&svc->dev, "failed to create module\n");
  814. return;
  815. }
  816. ret = gb_module_add(module);
  817. if (ret) {
  818. gb_module_put(module);
  819. return;
  820. }
  821. list_add(&module->hd_node, &hd->modules);
  822. }
  823. static void gb_svc_process_module_removed(struct gb_operation *operation)
  824. {
  825. struct gb_svc_module_removed_request *request;
  826. struct gb_connection *connection = operation->connection;
  827. struct gb_svc *svc = gb_connection_get_data(connection);
  828. struct gb_module *module;
  829. u8 module_id;
  830. /* The request message size has already been verified. */
  831. request = operation->request->payload;
  832. module_id = request->primary_intf_id;
  833. dev_dbg(&svc->dev, "%s - id = %u\n", __func__, module_id);
  834. module = gb_svc_module_lookup(svc, module_id);
  835. if (!module) {
  836. dev_warn(&svc->dev, "unexpected module-removed event %u\n",
  837. module_id);
  838. return;
  839. }
  840. module->disconnected = true;
  841. gb_module_del(module);
  842. list_del(&module->hd_node);
  843. gb_module_put(module);
  844. }
  845. static void gb_svc_process_intf_oops(struct gb_operation *operation)
  846. {
  847. struct gb_svc_intf_oops_request *request;
  848. struct gb_connection *connection = operation->connection;
  849. struct gb_svc *svc = gb_connection_get_data(connection);
  850. struct gb_interface *intf;
  851. u8 intf_id;
  852. u8 reason;
  853. /* The request message size has already been verified. */
  854. request = operation->request->payload;
  855. intf_id = request->intf_id;
  856. reason = request->reason;
  857. intf = gb_svc_interface_lookup(svc, intf_id);
  858. if (!intf) {
  859. dev_warn(&svc->dev, "unexpected interface-oops event %u\n",
  860. intf_id);
  861. return;
  862. }
  863. dev_info(&svc->dev, "Deactivating interface %u, interface oops reason = %u\n",
  864. intf_id, reason);
  865. mutex_lock(&intf->mutex);
  866. intf->disconnected = true;
  867. gb_interface_disable(intf);
  868. gb_interface_deactivate(intf);
  869. mutex_unlock(&intf->mutex);
  870. }
  871. static void gb_svc_process_intf_mailbox_event(struct gb_operation *operation)
  872. {
  873. struct gb_svc_intf_mailbox_event_request *request;
  874. struct gb_connection *connection = operation->connection;
  875. struct gb_svc *svc = gb_connection_get_data(connection);
  876. struct gb_interface *intf;
  877. u8 intf_id;
  878. u16 result_code;
  879. u32 mailbox;
  880. /* The request message size has already been verified. */
  881. request = operation->request->payload;
  882. intf_id = request->intf_id;
  883. result_code = le16_to_cpu(request->result_code);
  884. mailbox = le32_to_cpu(request->mailbox);
  885. dev_dbg(&svc->dev, "%s - id = %u, result = 0x%04x, mailbox = 0x%08x\n",
  886. __func__, intf_id, result_code, mailbox);
  887. intf = gb_svc_interface_lookup(svc, intf_id);
  888. if (!intf) {
  889. dev_warn(&svc->dev, "unexpected mailbox event %u\n", intf_id);
  890. return;
  891. }
  892. gb_interface_mailbox_event(intf, result_code, mailbox);
  893. }
  894. static void gb_svc_process_deferred_request(struct work_struct *work)
  895. {
  896. struct gb_svc_deferred_request *dr;
  897. struct gb_operation *operation;
  898. struct gb_svc *svc;
  899. u8 type;
  900. dr = container_of(work, struct gb_svc_deferred_request, work);
  901. operation = dr->operation;
  902. svc = gb_connection_get_data(operation->connection);
  903. type = operation->request->header->type;
  904. switch (type) {
  905. case GB_SVC_TYPE_SVC_HELLO:
  906. gb_svc_process_hello_deferred(operation);
  907. break;
  908. case GB_SVC_TYPE_MODULE_INSERTED:
  909. gb_svc_process_module_inserted(operation);
  910. break;
  911. case GB_SVC_TYPE_MODULE_REMOVED:
  912. gb_svc_process_module_removed(operation);
  913. break;
  914. case GB_SVC_TYPE_INTF_MAILBOX_EVENT:
  915. gb_svc_process_intf_mailbox_event(operation);
  916. break;
  917. case GB_SVC_TYPE_INTF_OOPS:
  918. gb_svc_process_intf_oops(operation);
  919. break;
  920. default:
  921. dev_err(&svc->dev, "bad deferred request type: 0x%02x\n", type);
  922. }
  923. gb_operation_put(operation);
  924. kfree(dr);
  925. }
  926. static int gb_svc_queue_deferred_request(struct gb_operation *operation)
  927. {
  928. struct gb_svc *svc = gb_connection_get_data(operation->connection);
  929. struct gb_svc_deferred_request *dr;
  930. dr = kmalloc(sizeof(*dr), GFP_KERNEL);
  931. if (!dr)
  932. return -ENOMEM;
  933. gb_operation_get(operation);
  934. dr->operation = operation;
  935. INIT_WORK(&dr->work, gb_svc_process_deferred_request);
  936. queue_work(svc->wq, &dr->work);
  937. return 0;
  938. }
  939. static int gb_svc_intf_reset_recv(struct gb_operation *op)
  940. {
  941. struct gb_svc *svc = gb_connection_get_data(op->connection);
  942. struct gb_message *request = op->request;
  943. struct gb_svc_intf_reset_request *reset;
  944. if (request->payload_size < sizeof(*reset)) {
  945. dev_warn(&svc->dev, "short reset request received (%zu < %zu)\n",
  946. request->payload_size, sizeof(*reset));
  947. return -EINVAL;
  948. }
  949. reset = request->payload;
  950. /* FIXME Reset the interface here */
  951. return 0;
  952. }
  953. static int gb_svc_module_inserted_recv(struct gb_operation *op)
  954. {
  955. struct gb_svc *svc = gb_connection_get_data(op->connection);
  956. struct gb_svc_module_inserted_request *request;
  957. if (op->request->payload_size < sizeof(*request)) {
  958. dev_warn(&svc->dev, "short module-inserted request received (%zu < %zu)\n",
  959. op->request->payload_size, sizeof(*request));
  960. return -EINVAL;
  961. }
  962. request = op->request->payload;
  963. dev_dbg(&svc->dev, "%s - id = %u\n", __func__,
  964. request->primary_intf_id);
  965. return gb_svc_queue_deferred_request(op);
  966. }
  967. static int gb_svc_module_removed_recv(struct gb_operation *op)
  968. {
  969. struct gb_svc *svc = gb_connection_get_data(op->connection);
  970. struct gb_svc_module_removed_request *request;
  971. if (op->request->payload_size < sizeof(*request)) {
  972. dev_warn(&svc->dev, "short module-removed request received (%zu < %zu)\n",
  973. op->request->payload_size, sizeof(*request));
  974. return -EINVAL;
  975. }
  976. request = op->request->payload;
  977. dev_dbg(&svc->dev, "%s - id = %u\n", __func__,
  978. request->primary_intf_id);
  979. return gb_svc_queue_deferred_request(op);
  980. }
  981. static int gb_svc_intf_oops_recv(struct gb_operation *op)
  982. {
  983. struct gb_svc *svc = gb_connection_get_data(op->connection);
  984. struct gb_svc_intf_oops_request *request;
  985. if (op->request->payload_size < sizeof(*request)) {
  986. dev_warn(&svc->dev, "short intf-oops request received (%zu < %zu)\n",
  987. op->request->payload_size, sizeof(*request));
  988. return -EINVAL;
  989. }
  990. return gb_svc_queue_deferred_request(op);
  991. }
  992. static int gb_svc_intf_mailbox_event_recv(struct gb_operation *op)
  993. {
  994. struct gb_svc *svc = gb_connection_get_data(op->connection);
  995. struct gb_svc_intf_mailbox_event_request *request;
  996. if (op->request->payload_size < sizeof(*request)) {
  997. dev_warn(&svc->dev, "short mailbox request received (%zu < %zu)\n",
  998. op->request->payload_size, sizeof(*request));
  999. return -EINVAL;
  1000. }
  1001. request = op->request->payload;
  1002. dev_dbg(&svc->dev, "%s - id = %u\n", __func__, request->intf_id);
  1003. return gb_svc_queue_deferred_request(op);
  1004. }
  1005. static int gb_svc_request_handler(struct gb_operation *op)
  1006. {
  1007. struct gb_connection *connection = op->connection;
  1008. struct gb_svc *svc = gb_connection_get_data(connection);
  1009. u8 type = op->type;
  1010. int ret = 0;
  1011. /*
  1012. * SVC requests need to follow a specific order (at least initially) and
  1013. * below code takes care of enforcing that. The expected order is:
  1014. * - PROTOCOL_VERSION
  1015. * - SVC_HELLO
  1016. * - Any other request, but the earlier two.
  1017. *
  1018. * Incoming requests are guaranteed to be serialized and so we don't
  1019. * need to protect 'state' for any races.
  1020. */
  1021. switch (type) {
  1022. case GB_SVC_TYPE_PROTOCOL_VERSION:
  1023. if (svc->state != GB_SVC_STATE_RESET)
  1024. ret = -EINVAL;
  1025. break;
  1026. case GB_SVC_TYPE_SVC_HELLO:
  1027. if (svc->state != GB_SVC_STATE_PROTOCOL_VERSION)
  1028. ret = -EINVAL;
  1029. break;
  1030. default:
  1031. if (svc->state != GB_SVC_STATE_SVC_HELLO)
  1032. ret = -EINVAL;
  1033. break;
  1034. }
  1035. if (ret) {
  1036. dev_warn(&svc->dev, "unexpected request 0x%02x received (state %u)\n",
  1037. type, svc->state);
  1038. return ret;
  1039. }
  1040. switch (type) {
  1041. case GB_SVC_TYPE_PROTOCOL_VERSION:
  1042. ret = gb_svc_version_request(op);
  1043. if (!ret)
  1044. svc->state = GB_SVC_STATE_PROTOCOL_VERSION;
  1045. return ret;
  1046. case GB_SVC_TYPE_SVC_HELLO:
  1047. ret = gb_svc_hello(op);
  1048. if (!ret)
  1049. svc->state = GB_SVC_STATE_SVC_HELLO;
  1050. return ret;
  1051. case GB_SVC_TYPE_INTF_RESET:
  1052. return gb_svc_intf_reset_recv(op);
  1053. case GB_SVC_TYPE_MODULE_INSERTED:
  1054. return gb_svc_module_inserted_recv(op);
  1055. case GB_SVC_TYPE_MODULE_REMOVED:
  1056. return gb_svc_module_removed_recv(op);
  1057. case GB_SVC_TYPE_INTF_MAILBOX_EVENT:
  1058. return gb_svc_intf_mailbox_event_recv(op);
  1059. case GB_SVC_TYPE_INTF_OOPS:
  1060. return gb_svc_intf_oops_recv(op);
  1061. default:
  1062. dev_warn(&svc->dev, "unsupported request 0x%02x\n", type);
  1063. return -EINVAL;
  1064. }
  1065. }
  1066. static void gb_svc_release(struct device *dev)
  1067. {
  1068. struct gb_svc *svc = to_gb_svc(dev);
  1069. if (svc->connection)
  1070. gb_connection_destroy(svc->connection);
  1071. ida_destroy(&svc->device_id_map);
  1072. destroy_workqueue(svc->wq);
  1073. kfree(svc);
  1074. }
  1075. const struct device_type greybus_svc_type = {
  1076. .name = "greybus_svc",
  1077. .release = gb_svc_release,
  1078. };
  1079. struct gb_svc *gb_svc_create(struct gb_host_device *hd)
  1080. {
  1081. struct gb_svc *svc;
  1082. svc = kzalloc(sizeof(*svc), GFP_KERNEL);
  1083. if (!svc)
  1084. return NULL;
  1085. svc->wq = alloc_ordered_workqueue("%s:svc", 0, dev_name(&hd->dev));
  1086. if (!svc->wq) {
  1087. kfree(svc);
  1088. return NULL;
  1089. }
  1090. svc->dev.parent = &hd->dev;
  1091. svc->dev.bus = &greybus_bus_type;
  1092. svc->dev.type = &greybus_svc_type;
  1093. svc->dev.groups = svc_groups;
  1094. svc->dev.dma_mask = svc->dev.parent->dma_mask;
  1095. device_initialize(&svc->dev);
  1096. dev_set_name(&svc->dev, "%d-svc", hd->bus_id);
  1097. ida_init(&svc->device_id_map);
  1098. svc->state = GB_SVC_STATE_RESET;
  1099. svc->hd = hd;
  1100. svc->connection = gb_connection_create_static(hd, GB_SVC_CPORT_ID,
  1101. gb_svc_request_handler);
  1102. if (IS_ERR(svc->connection)) {
  1103. dev_err(&svc->dev, "failed to create connection: %ld\n",
  1104. PTR_ERR(svc->connection));
  1105. goto err_put_device;
  1106. }
  1107. gb_connection_set_data(svc->connection, svc);
  1108. return svc;
  1109. err_put_device:
  1110. put_device(&svc->dev);
  1111. return NULL;
  1112. }
  1113. int gb_svc_add(struct gb_svc *svc)
  1114. {
  1115. int ret;
  1116. /*
  1117. * The SVC protocol is currently driven by the SVC, so the SVC device
  1118. * is added from the connection request handler when enough
  1119. * information has been received.
  1120. */
  1121. ret = gb_connection_enable(svc->connection);
  1122. if (ret)
  1123. return ret;
  1124. return 0;
  1125. }
  1126. static void gb_svc_remove_modules(struct gb_svc *svc)
  1127. {
  1128. struct gb_host_device *hd = svc->hd;
  1129. struct gb_module *module, *tmp;
  1130. list_for_each_entry_safe(module, tmp, &hd->modules, hd_node) {
  1131. gb_module_del(module);
  1132. list_del(&module->hd_node);
  1133. gb_module_put(module);
  1134. }
  1135. }
  1136. void gb_svc_del(struct gb_svc *svc)
  1137. {
  1138. gb_connection_disable_rx(svc->connection);
  1139. /*
  1140. * The SVC device may have been registered from the request handler.
  1141. */
  1142. if (device_is_registered(&svc->dev)) {
  1143. gb_svc_debugfs_exit(svc);
  1144. gb_svc_watchdog_destroy(svc);
  1145. device_del(&svc->dev);
  1146. }
  1147. flush_workqueue(svc->wq);
  1148. gb_svc_remove_modules(svc);
  1149. gb_connection_disable(svc->connection);
  1150. }
  1151. void gb_svc_put(struct gb_svc *svc)
  1152. {
  1153. put_device(&svc->dev);
  1154. }