xdomain.c 66 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Thunderbolt XDomain discovery protocol support
  4. *
  5. * Copyright (C) 2017, Intel Corporation
  6. * Authors: Michael Jamet <michael.jamet@intel.com>
  7. * Mika Westerberg <mika.westerberg@linux.intel.com>
  8. */
  9. #include <linux/device.h>
  10. #include <linux/delay.h>
  11. #include <linux/kmod.h>
  12. #include <linux/module.h>
  13. #include <linux/pm_runtime.h>
  14. #include <linux/prandom.h>
  15. #include <linux/string_helpers.h>
  16. #include <linux/utsname.h>
  17. #include <linux/uuid.h>
  18. #include <linux/workqueue.h>
  19. #include "tb.h"
  20. #define XDOMAIN_SHORT_TIMEOUT 100 /* ms */
  21. #define XDOMAIN_DEFAULT_TIMEOUT 1000 /* ms */
  22. #define XDOMAIN_BONDING_TIMEOUT 10000 /* ms */
  23. #define XDOMAIN_RETRIES 10
  24. #define XDOMAIN_DEFAULT_MAX_HOPID 15
  25. enum {
  26. XDOMAIN_STATE_INIT,
  27. XDOMAIN_STATE_UUID,
  28. XDOMAIN_STATE_LINK_STATUS,
  29. XDOMAIN_STATE_LINK_STATE_CHANGE,
  30. XDOMAIN_STATE_LINK_STATUS2,
  31. XDOMAIN_STATE_BONDING_UUID_LOW,
  32. XDOMAIN_STATE_BONDING_UUID_HIGH,
  33. XDOMAIN_STATE_PROPERTIES,
  34. XDOMAIN_STATE_ENUMERATED,
  35. XDOMAIN_STATE_ERROR,
  36. };
  37. static const char * const state_names[] = {
  38. [XDOMAIN_STATE_INIT] = "INIT",
  39. [XDOMAIN_STATE_UUID] = "UUID",
  40. [XDOMAIN_STATE_LINK_STATUS] = "LINK_STATUS",
  41. [XDOMAIN_STATE_LINK_STATE_CHANGE] = "LINK_STATE_CHANGE",
  42. [XDOMAIN_STATE_LINK_STATUS2] = "LINK_STATUS2",
  43. [XDOMAIN_STATE_BONDING_UUID_LOW] = "BONDING_UUID_LOW",
  44. [XDOMAIN_STATE_BONDING_UUID_HIGH] = "BONDING_UUID_HIGH",
  45. [XDOMAIN_STATE_PROPERTIES] = "PROPERTIES",
  46. [XDOMAIN_STATE_ENUMERATED] = "ENUMERATED",
  47. [XDOMAIN_STATE_ERROR] = "ERROR",
  48. };
  49. struct xdomain_request_work {
  50. struct work_struct work;
  51. struct tb_xdp_header *pkg;
  52. struct tb *tb;
  53. };
  54. static bool tb_xdomain_enabled = true;
  55. module_param_named(xdomain, tb_xdomain_enabled, bool, 0444);
  56. MODULE_PARM_DESC(xdomain, "allow XDomain protocol (default: true)");
  57. /*
  58. * Serializes access to the properties and protocol handlers below. If
  59. * you need to take both this lock and the struct tb_xdomain lock, take
  60. * this one first.
  61. */
  62. static DEFINE_MUTEX(xdomain_lock);
  63. /* Properties exposed to the remote domains */
  64. static struct tb_property_dir *xdomain_property_dir;
  65. static u32 xdomain_property_block_gen;
  66. /* Additional protocol handlers */
  67. static LIST_HEAD(protocol_handlers);
  68. /* UUID for XDomain discovery protocol: b638d70e-42ff-40bb-97c2-90e2c0b2ff07 */
  69. static const uuid_t tb_xdp_uuid =
  70. UUID_INIT(0xb638d70e, 0x42ff, 0x40bb,
  71. 0x97, 0xc2, 0x90, 0xe2, 0xc0, 0xb2, 0xff, 0x07);
  72. bool tb_is_xdomain_enabled(void)
  73. {
  74. return tb_xdomain_enabled && tb_acpi_is_xdomain_allowed();
  75. }
  76. static bool tb_xdomain_match(const struct tb_cfg_request *req,
  77. const struct ctl_pkg *pkg)
  78. {
  79. switch (pkg->frame.eof) {
  80. case TB_CFG_PKG_ERROR:
  81. return true;
  82. case TB_CFG_PKG_XDOMAIN_RESP: {
  83. const struct tb_xdp_header *res_hdr = pkg->buffer;
  84. const struct tb_xdp_header *req_hdr = req->request;
  85. if (pkg->frame.size < req->response_size / 4)
  86. return false;
  87. /* Make sure route matches */
  88. if ((res_hdr->xd_hdr.route_hi & ~BIT(31)) !=
  89. req_hdr->xd_hdr.route_hi)
  90. return false;
  91. if ((res_hdr->xd_hdr.route_lo) != req_hdr->xd_hdr.route_lo)
  92. return false;
  93. /* Check that the XDomain protocol matches */
  94. if (!uuid_equal(&res_hdr->uuid, &req_hdr->uuid))
  95. return false;
  96. return true;
  97. }
  98. default:
  99. return false;
  100. }
  101. }
  102. static bool tb_xdomain_copy(struct tb_cfg_request *req,
  103. const struct ctl_pkg *pkg)
  104. {
  105. memcpy(req->response, pkg->buffer, req->response_size);
  106. req->result.err = 0;
  107. return true;
  108. }
  109. static void response_ready(void *data)
  110. {
  111. tb_cfg_request_put(data);
  112. }
  113. static int __tb_xdomain_response(struct tb_ctl *ctl, const void *response,
  114. size_t size, enum tb_cfg_pkg_type type)
  115. {
  116. struct tb_cfg_request *req;
  117. req = tb_cfg_request_alloc();
  118. if (!req)
  119. return -ENOMEM;
  120. req->match = tb_xdomain_match;
  121. req->copy = tb_xdomain_copy;
  122. req->request = response;
  123. req->request_size = size;
  124. req->request_type = type;
  125. return tb_cfg_request(ctl, req, response_ready, req);
  126. }
  127. /**
  128. * tb_xdomain_response() - Send a XDomain response message
  129. * @xd: XDomain to send the message
  130. * @response: Response to send
  131. * @size: Size of the response
  132. * @type: PDF type of the response
  133. *
  134. * This can be used to send a XDomain response message to the other
  135. * domain. No response for the message is expected.
  136. *
  137. * Return: %0 in case of success and negative errno in case of failure
  138. */
  139. int tb_xdomain_response(struct tb_xdomain *xd, const void *response,
  140. size_t size, enum tb_cfg_pkg_type type)
  141. {
  142. return __tb_xdomain_response(xd->tb->ctl, response, size, type);
  143. }
  144. EXPORT_SYMBOL_GPL(tb_xdomain_response);
  145. static int __tb_xdomain_request(struct tb_ctl *ctl, const void *request,
  146. size_t request_size, enum tb_cfg_pkg_type request_type, void *response,
  147. size_t response_size, enum tb_cfg_pkg_type response_type,
  148. unsigned int timeout_msec)
  149. {
  150. struct tb_cfg_request *req;
  151. struct tb_cfg_result res;
  152. req = tb_cfg_request_alloc();
  153. if (!req)
  154. return -ENOMEM;
  155. req->match = tb_xdomain_match;
  156. req->copy = tb_xdomain_copy;
  157. req->request = request;
  158. req->request_size = request_size;
  159. req->request_type = request_type;
  160. req->response = response;
  161. req->response_size = response_size;
  162. req->response_type = response_type;
  163. res = tb_cfg_request_sync(ctl, req, timeout_msec);
  164. tb_cfg_request_put(req);
  165. return res.err == 1 ? -EIO : res.err;
  166. }
  167. /**
  168. * tb_xdomain_request() - Send a XDomain request
  169. * @xd: XDomain to send the request
  170. * @request: Request to send
  171. * @request_size: Size of the request in bytes
  172. * @request_type: PDF type of the request
  173. * @response: Response is copied here
  174. * @response_size: Expected size of the response in bytes
  175. * @response_type: Expected PDF type of the response
  176. * @timeout_msec: Timeout in milliseconds to wait for the response
  177. *
  178. * This function can be used to send XDomain control channel messages to
  179. * the other domain. The function waits until the response is received
  180. * or when timeout triggers. Whichever comes first.
  181. *
  182. * Return: %0 in case of success and negative errno in case of failure
  183. */
  184. int tb_xdomain_request(struct tb_xdomain *xd, const void *request,
  185. size_t request_size, enum tb_cfg_pkg_type request_type,
  186. void *response, size_t response_size,
  187. enum tb_cfg_pkg_type response_type, unsigned int timeout_msec)
  188. {
  189. return __tb_xdomain_request(xd->tb->ctl, request, request_size,
  190. request_type, response, response_size,
  191. response_type, timeout_msec);
  192. }
  193. EXPORT_SYMBOL_GPL(tb_xdomain_request);
  194. static inline void tb_xdp_fill_header(struct tb_xdp_header *hdr, u64 route,
  195. u8 sequence, enum tb_xdp_type type, size_t size)
  196. {
  197. u32 length_sn;
  198. length_sn = (size - sizeof(hdr->xd_hdr)) / 4;
  199. length_sn |= (sequence << TB_XDOMAIN_SN_SHIFT) & TB_XDOMAIN_SN_MASK;
  200. hdr->xd_hdr.route_hi = upper_32_bits(route);
  201. hdr->xd_hdr.route_lo = lower_32_bits(route);
  202. hdr->xd_hdr.length_sn = length_sn;
  203. hdr->type = type;
  204. memcpy(&hdr->uuid, &tb_xdp_uuid, sizeof(tb_xdp_uuid));
  205. }
  206. static int tb_xdp_handle_error(const struct tb_xdp_error_response *res)
  207. {
  208. if (res->hdr.type != ERROR_RESPONSE)
  209. return 0;
  210. switch (res->error) {
  211. case ERROR_UNKNOWN_PACKET:
  212. case ERROR_UNKNOWN_DOMAIN:
  213. return -EIO;
  214. case ERROR_NOT_SUPPORTED:
  215. return -EOPNOTSUPP;
  216. case ERROR_NOT_READY:
  217. return -EAGAIN;
  218. default:
  219. break;
  220. }
  221. return 0;
  222. }
  223. static int tb_xdp_uuid_request(struct tb_ctl *ctl, u64 route, int retry,
  224. uuid_t *uuid, u64 *remote_route)
  225. {
  226. struct tb_xdp_uuid_response res;
  227. struct tb_xdp_uuid req;
  228. int ret;
  229. memset(&req, 0, sizeof(req));
  230. tb_xdp_fill_header(&req.hdr, route, retry % 4, UUID_REQUEST,
  231. sizeof(req));
  232. memset(&res, 0, sizeof(res));
  233. ret = __tb_xdomain_request(ctl, &req, sizeof(req),
  234. TB_CFG_PKG_XDOMAIN_REQ, &res, sizeof(res),
  235. TB_CFG_PKG_XDOMAIN_RESP,
  236. XDOMAIN_DEFAULT_TIMEOUT);
  237. if (ret)
  238. return ret;
  239. ret = tb_xdp_handle_error(&res.err);
  240. if (ret)
  241. return ret;
  242. uuid_copy(uuid, &res.src_uuid);
  243. *remote_route = (u64)res.src_route_hi << 32 | res.src_route_lo;
  244. return 0;
  245. }
  246. static int tb_xdp_uuid_response(struct tb_ctl *ctl, u64 route, u8 sequence,
  247. const uuid_t *uuid)
  248. {
  249. struct tb_xdp_uuid_response res;
  250. memset(&res, 0, sizeof(res));
  251. tb_xdp_fill_header(&res.hdr, route, sequence, UUID_RESPONSE,
  252. sizeof(res));
  253. uuid_copy(&res.src_uuid, uuid);
  254. res.src_route_hi = upper_32_bits(route);
  255. res.src_route_lo = lower_32_bits(route);
  256. return __tb_xdomain_response(ctl, &res, sizeof(res),
  257. TB_CFG_PKG_XDOMAIN_RESP);
  258. }
  259. static int tb_xdp_error_response(struct tb_ctl *ctl, u64 route, u8 sequence,
  260. enum tb_xdp_error error)
  261. {
  262. struct tb_xdp_error_response res;
  263. memset(&res, 0, sizeof(res));
  264. tb_xdp_fill_header(&res.hdr, route, sequence, ERROR_RESPONSE,
  265. sizeof(res));
  266. res.error = error;
  267. return __tb_xdomain_response(ctl, &res, sizeof(res),
  268. TB_CFG_PKG_XDOMAIN_RESP);
  269. }
  270. static int tb_xdp_properties_request(struct tb_ctl *ctl, u64 route,
  271. const uuid_t *src_uuid, const uuid_t *dst_uuid, int retry,
  272. u32 **block, u32 *generation)
  273. {
  274. struct tb_xdp_properties_response *res;
  275. struct tb_xdp_properties req;
  276. u16 data_len, len;
  277. size_t total_size;
  278. u32 *data = NULL;
  279. int ret;
  280. total_size = sizeof(*res) + TB_XDP_PROPERTIES_MAX_DATA_LENGTH * 4;
  281. res = kzalloc(total_size, GFP_KERNEL);
  282. if (!res)
  283. return -ENOMEM;
  284. memset(&req, 0, sizeof(req));
  285. tb_xdp_fill_header(&req.hdr, route, retry % 4, PROPERTIES_REQUEST,
  286. sizeof(req));
  287. memcpy(&req.src_uuid, src_uuid, sizeof(*src_uuid));
  288. memcpy(&req.dst_uuid, dst_uuid, sizeof(*dst_uuid));
  289. data_len = 0;
  290. do {
  291. ret = __tb_xdomain_request(ctl, &req, sizeof(req),
  292. TB_CFG_PKG_XDOMAIN_REQ, res,
  293. total_size, TB_CFG_PKG_XDOMAIN_RESP,
  294. XDOMAIN_DEFAULT_TIMEOUT);
  295. if (ret)
  296. goto err;
  297. ret = tb_xdp_handle_error(&res->err);
  298. if (ret)
  299. goto err;
  300. /*
  301. * Package length includes the whole payload without the
  302. * XDomain header. Validate first that the package is at
  303. * least size of the response structure.
  304. */
  305. len = res->hdr.xd_hdr.length_sn & TB_XDOMAIN_LENGTH_MASK;
  306. if (len < sizeof(*res) / 4) {
  307. ret = -EINVAL;
  308. goto err;
  309. }
  310. len += sizeof(res->hdr.xd_hdr) / 4;
  311. len -= sizeof(*res) / 4;
  312. if (res->offset != req.offset) {
  313. ret = -EINVAL;
  314. goto err;
  315. }
  316. /*
  317. * First time allocate block that has enough space for
  318. * the whole properties block.
  319. */
  320. if (!data) {
  321. data_len = res->data_length;
  322. if (data_len > TB_XDP_PROPERTIES_MAX_LENGTH) {
  323. ret = -E2BIG;
  324. goto err;
  325. }
  326. data = kcalloc(data_len, sizeof(u32), GFP_KERNEL);
  327. if (!data) {
  328. ret = -ENOMEM;
  329. goto err;
  330. }
  331. }
  332. memcpy(data + req.offset, res->data, len * 4);
  333. req.offset += len;
  334. } while (!data_len || req.offset < data_len);
  335. *block = data;
  336. *generation = res->generation;
  337. kfree(res);
  338. return data_len;
  339. err:
  340. kfree(data);
  341. kfree(res);
  342. return ret;
  343. }
  344. static int tb_xdp_properties_response(struct tb *tb, struct tb_ctl *ctl,
  345. struct tb_xdomain *xd, u8 sequence, const struct tb_xdp_properties *req)
  346. {
  347. struct tb_xdp_properties_response *res;
  348. size_t total_size;
  349. u16 len;
  350. int ret;
  351. /*
  352. * Currently we expect all requests to be directed to us. The
  353. * protocol supports forwarding, though which we might add
  354. * support later on.
  355. */
  356. if (!uuid_equal(xd->local_uuid, &req->dst_uuid)) {
  357. tb_xdp_error_response(ctl, xd->route, sequence,
  358. ERROR_UNKNOWN_DOMAIN);
  359. return 0;
  360. }
  361. mutex_lock(&xd->lock);
  362. if (req->offset >= xd->local_property_block_len) {
  363. mutex_unlock(&xd->lock);
  364. return -EINVAL;
  365. }
  366. len = xd->local_property_block_len - req->offset;
  367. len = min_t(u16, len, TB_XDP_PROPERTIES_MAX_DATA_LENGTH);
  368. total_size = sizeof(*res) + len * 4;
  369. res = kzalloc(total_size, GFP_KERNEL);
  370. if (!res) {
  371. mutex_unlock(&xd->lock);
  372. return -ENOMEM;
  373. }
  374. tb_xdp_fill_header(&res->hdr, xd->route, sequence, PROPERTIES_RESPONSE,
  375. total_size);
  376. res->generation = xd->local_property_block_gen;
  377. res->data_length = xd->local_property_block_len;
  378. res->offset = req->offset;
  379. uuid_copy(&res->src_uuid, xd->local_uuid);
  380. uuid_copy(&res->dst_uuid, &req->src_uuid);
  381. memcpy(res->data, &xd->local_property_block[req->offset], len * 4);
  382. mutex_unlock(&xd->lock);
  383. ret = __tb_xdomain_response(ctl, res, total_size,
  384. TB_CFG_PKG_XDOMAIN_RESP);
  385. kfree(res);
  386. return ret;
  387. }
  388. static int tb_xdp_properties_changed_request(struct tb_ctl *ctl, u64 route,
  389. int retry, const uuid_t *uuid)
  390. {
  391. struct tb_xdp_properties_changed_response res;
  392. struct tb_xdp_properties_changed req;
  393. int ret;
  394. memset(&req, 0, sizeof(req));
  395. tb_xdp_fill_header(&req.hdr, route, retry % 4,
  396. PROPERTIES_CHANGED_REQUEST, sizeof(req));
  397. uuid_copy(&req.src_uuid, uuid);
  398. memset(&res, 0, sizeof(res));
  399. ret = __tb_xdomain_request(ctl, &req, sizeof(req),
  400. TB_CFG_PKG_XDOMAIN_REQ, &res, sizeof(res),
  401. TB_CFG_PKG_XDOMAIN_RESP,
  402. XDOMAIN_DEFAULT_TIMEOUT);
  403. if (ret)
  404. return ret;
  405. return tb_xdp_handle_error(&res.err);
  406. }
  407. static int
  408. tb_xdp_properties_changed_response(struct tb_ctl *ctl, u64 route, u8 sequence)
  409. {
  410. struct tb_xdp_properties_changed_response res;
  411. memset(&res, 0, sizeof(res));
  412. tb_xdp_fill_header(&res.hdr, route, sequence,
  413. PROPERTIES_CHANGED_RESPONSE, sizeof(res));
  414. return __tb_xdomain_response(ctl, &res, sizeof(res),
  415. TB_CFG_PKG_XDOMAIN_RESP);
  416. }
  417. static int tb_xdp_link_state_status_request(struct tb_ctl *ctl, u64 route,
  418. u8 sequence, u8 *slw, u8 *tlw,
  419. u8 *sls, u8 *tls)
  420. {
  421. struct tb_xdp_link_state_status_response res;
  422. struct tb_xdp_link_state_status req;
  423. int ret;
  424. memset(&req, 0, sizeof(req));
  425. tb_xdp_fill_header(&req.hdr, route, sequence, LINK_STATE_STATUS_REQUEST,
  426. sizeof(req));
  427. memset(&res, 0, sizeof(res));
  428. ret = __tb_xdomain_request(ctl, &req, sizeof(req), TB_CFG_PKG_XDOMAIN_REQ,
  429. &res, sizeof(res), TB_CFG_PKG_XDOMAIN_RESP,
  430. XDOMAIN_DEFAULT_TIMEOUT);
  431. if (ret)
  432. return ret;
  433. ret = tb_xdp_handle_error(&res.err);
  434. if (ret)
  435. return ret;
  436. if (res.status != 0)
  437. return -EREMOTEIO;
  438. *slw = res.slw;
  439. *tlw = res.tlw;
  440. *sls = res.sls;
  441. *tls = res.tls;
  442. return 0;
  443. }
  444. static int tb_xdp_link_state_status_response(struct tb *tb, struct tb_ctl *ctl,
  445. struct tb_xdomain *xd, u8 sequence)
  446. {
  447. struct tb_xdp_link_state_status_response res;
  448. struct tb_port *port = tb_xdomain_downstream_port(xd);
  449. u32 val[2];
  450. int ret;
  451. memset(&res, 0, sizeof(res));
  452. tb_xdp_fill_header(&res.hdr, xd->route, sequence,
  453. LINK_STATE_STATUS_RESPONSE, sizeof(res));
  454. ret = tb_port_read(port, val, TB_CFG_PORT,
  455. port->cap_phy + LANE_ADP_CS_0, ARRAY_SIZE(val));
  456. if (ret)
  457. return ret;
  458. res.slw = (val[0] & LANE_ADP_CS_0_SUPPORTED_WIDTH_MASK) >>
  459. LANE_ADP_CS_0_SUPPORTED_WIDTH_SHIFT;
  460. res.sls = (val[0] & LANE_ADP_CS_0_SUPPORTED_SPEED_MASK) >>
  461. LANE_ADP_CS_0_SUPPORTED_SPEED_SHIFT;
  462. res.tls = val[1] & LANE_ADP_CS_1_TARGET_SPEED_MASK;
  463. res.tlw = (val[1] & LANE_ADP_CS_1_TARGET_WIDTH_MASK) >>
  464. LANE_ADP_CS_1_TARGET_WIDTH_SHIFT;
  465. return __tb_xdomain_response(ctl, &res, sizeof(res),
  466. TB_CFG_PKG_XDOMAIN_RESP);
  467. }
  468. static int tb_xdp_link_state_change_request(struct tb_ctl *ctl, u64 route,
  469. u8 sequence, u8 tlw, u8 tls)
  470. {
  471. struct tb_xdp_link_state_change_response res;
  472. struct tb_xdp_link_state_change req;
  473. int ret;
  474. memset(&req, 0, sizeof(req));
  475. tb_xdp_fill_header(&req.hdr, route, sequence, LINK_STATE_CHANGE_REQUEST,
  476. sizeof(req));
  477. req.tlw = tlw;
  478. req.tls = tls;
  479. memset(&res, 0, sizeof(res));
  480. ret = __tb_xdomain_request(ctl, &req, sizeof(req), TB_CFG_PKG_XDOMAIN_REQ,
  481. &res, sizeof(res), TB_CFG_PKG_XDOMAIN_RESP,
  482. XDOMAIN_DEFAULT_TIMEOUT);
  483. if (ret)
  484. return ret;
  485. ret = tb_xdp_handle_error(&res.err);
  486. if (ret)
  487. return ret;
  488. return res.status != 0 ? -EREMOTEIO : 0;
  489. }
  490. static int tb_xdp_link_state_change_response(struct tb_ctl *ctl, u64 route,
  491. u8 sequence, u32 status)
  492. {
  493. struct tb_xdp_link_state_change_response res;
  494. memset(&res, 0, sizeof(res));
  495. tb_xdp_fill_header(&res.hdr, route, sequence, LINK_STATE_CHANGE_RESPONSE,
  496. sizeof(res));
  497. res.status = status;
  498. return __tb_xdomain_response(ctl, &res, sizeof(res),
  499. TB_CFG_PKG_XDOMAIN_RESP);
  500. }
  501. /**
  502. * tb_register_protocol_handler() - Register protocol handler
  503. * @handler: Handler to register
  504. *
  505. * This allows XDomain service drivers to hook into incoming XDomain
  506. * messages. After this function is called the service driver needs to
  507. * be able to handle calls to callback whenever a package with the
  508. * registered protocol is received.
  509. */
  510. int tb_register_protocol_handler(struct tb_protocol_handler *handler)
  511. {
  512. if (!handler->uuid || !handler->callback)
  513. return -EINVAL;
  514. if (uuid_equal(handler->uuid, &tb_xdp_uuid))
  515. return -EINVAL;
  516. mutex_lock(&xdomain_lock);
  517. list_add_tail(&handler->list, &protocol_handlers);
  518. mutex_unlock(&xdomain_lock);
  519. return 0;
  520. }
  521. EXPORT_SYMBOL_GPL(tb_register_protocol_handler);
  522. /**
  523. * tb_unregister_protocol_handler() - Unregister protocol handler
  524. * @handler: Handler to unregister
  525. *
  526. * Removes the previously registered protocol handler.
  527. */
  528. void tb_unregister_protocol_handler(struct tb_protocol_handler *handler)
  529. {
  530. mutex_lock(&xdomain_lock);
  531. list_del_init(&handler->list);
  532. mutex_unlock(&xdomain_lock);
  533. }
  534. EXPORT_SYMBOL_GPL(tb_unregister_protocol_handler);
  535. static void update_property_block(struct tb_xdomain *xd)
  536. {
  537. mutex_lock(&xdomain_lock);
  538. mutex_lock(&xd->lock);
  539. /*
  540. * If the local property block is not up-to-date, rebuild it now
  541. * based on the global property template.
  542. */
  543. if (!xd->local_property_block ||
  544. xd->local_property_block_gen < xdomain_property_block_gen) {
  545. struct tb_property_dir *dir;
  546. int ret, block_len;
  547. u32 *block;
  548. dir = tb_property_copy_dir(xdomain_property_dir);
  549. if (!dir) {
  550. dev_warn(&xd->dev, "failed to copy properties\n");
  551. goto out_unlock;
  552. }
  553. /* Fill in non-static properties now */
  554. tb_property_add_text(dir, "deviceid", utsname()->nodename);
  555. tb_property_add_immediate(dir, "maxhopid", xd->local_max_hopid);
  556. ret = tb_property_format_dir(dir, NULL, 0);
  557. if (ret < 0) {
  558. dev_warn(&xd->dev, "local property block creation failed\n");
  559. tb_property_free_dir(dir);
  560. goto out_unlock;
  561. }
  562. block_len = ret;
  563. block = kcalloc(block_len, sizeof(*block), GFP_KERNEL);
  564. if (!block) {
  565. tb_property_free_dir(dir);
  566. goto out_unlock;
  567. }
  568. ret = tb_property_format_dir(dir, block, block_len);
  569. if (ret) {
  570. dev_warn(&xd->dev, "property block generation failed\n");
  571. tb_property_free_dir(dir);
  572. kfree(block);
  573. goto out_unlock;
  574. }
  575. tb_property_free_dir(dir);
  576. /* Release the previous block */
  577. kfree(xd->local_property_block);
  578. /* Assign new one */
  579. xd->local_property_block = block;
  580. xd->local_property_block_len = block_len;
  581. xd->local_property_block_gen = xdomain_property_block_gen;
  582. }
  583. out_unlock:
  584. mutex_unlock(&xd->lock);
  585. mutex_unlock(&xdomain_lock);
  586. }
  587. static void start_handshake(struct tb_xdomain *xd)
  588. {
  589. xd->state = XDOMAIN_STATE_INIT;
  590. queue_delayed_work(xd->tb->wq, &xd->state_work,
  591. msecs_to_jiffies(XDOMAIN_SHORT_TIMEOUT));
  592. }
  593. /* Can be called from state_work */
  594. static void __stop_handshake(struct tb_xdomain *xd)
  595. {
  596. cancel_delayed_work_sync(&xd->properties_changed_work);
  597. xd->properties_changed_retries = 0;
  598. xd->state_retries = 0;
  599. }
  600. static void stop_handshake(struct tb_xdomain *xd)
  601. {
  602. cancel_delayed_work_sync(&xd->state_work);
  603. __stop_handshake(xd);
  604. }
  605. static void tb_xdp_handle_request(struct work_struct *work)
  606. {
  607. struct xdomain_request_work *xw = container_of(work, typeof(*xw), work);
  608. const struct tb_xdp_header *pkg = xw->pkg;
  609. const struct tb_xdomain_header *xhdr = &pkg->xd_hdr;
  610. struct tb *tb = xw->tb;
  611. struct tb_ctl *ctl = tb->ctl;
  612. struct tb_xdomain *xd;
  613. const uuid_t *uuid;
  614. int ret = 0;
  615. u32 sequence;
  616. u64 route;
  617. route = ((u64)xhdr->route_hi << 32 | xhdr->route_lo) & ~BIT_ULL(63);
  618. sequence = xhdr->length_sn & TB_XDOMAIN_SN_MASK;
  619. sequence >>= TB_XDOMAIN_SN_SHIFT;
  620. mutex_lock(&tb->lock);
  621. if (tb->root_switch)
  622. uuid = tb->root_switch->uuid;
  623. else
  624. uuid = NULL;
  625. mutex_unlock(&tb->lock);
  626. if (!uuid) {
  627. tb_xdp_error_response(ctl, route, sequence, ERROR_NOT_READY);
  628. goto out;
  629. }
  630. xd = tb_xdomain_find_by_route_locked(tb, route);
  631. if (xd)
  632. update_property_block(xd);
  633. switch (pkg->type) {
  634. case PROPERTIES_REQUEST:
  635. tb_dbg(tb, "%llx: received XDomain properties request\n", route);
  636. if (xd) {
  637. ret = tb_xdp_properties_response(tb, ctl, xd, sequence,
  638. (const struct tb_xdp_properties *)pkg);
  639. }
  640. break;
  641. case PROPERTIES_CHANGED_REQUEST:
  642. tb_dbg(tb, "%llx: received XDomain properties changed request\n",
  643. route);
  644. ret = tb_xdp_properties_changed_response(ctl, route, sequence);
  645. /*
  646. * Since the properties have been changed, let's update
  647. * the xdomain related to this connection as well in
  648. * case there is a change in services it offers.
  649. */
  650. if (xd && device_is_registered(&xd->dev))
  651. queue_delayed_work(tb->wq, &xd->state_work,
  652. msecs_to_jiffies(XDOMAIN_SHORT_TIMEOUT));
  653. break;
  654. case UUID_REQUEST_OLD:
  655. case UUID_REQUEST:
  656. tb_dbg(tb, "%llx: received XDomain UUID request\n", route);
  657. ret = tb_xdp_uuid_response(ctl, route, sequence, uuid);
  658. /*
  659. * If we've stopped the discovery with an error such as
  660. * timing out, we will restart the handshake now that we
  661. * received UUID request from the remote host.
  662. */
  663. if (!ret && xd && xd->state == XDOMAIN_STATE_ERROR) {
  664. dev_dbg(&xd->dev, "restarting handshake\n");
  665. start_handshake(xd);
  666. }
  667. break;
  668. case LINK_STATE_STATUS_REQUEST:
  669. tb_dbg(tb, "%llx: received XDomain link state status request\n",
  670. route);
  671. if (xd) {
  672. ret = tb_xdp_link_state_status_response(tb, ctl, xd,
  673. sequence);
  674. } else {
  675. tb_xdp_error_response(ctl, route, sequence,
  676. ERROR_NOT_READY);
  677. }
  678. break;
  679. case LINK_STATE_CHANGE_REQUEST:
  680. tb_dbg(tb, "%llx: received XDomain link state change request\n",
  681. route);
  682. if (xd && xd->state == XDOMAIN_STATE_BONDING_UUID_HIGH) {
  683. const struct tb_xdp_link_state_change *lsc =
  684. (const struct tb_xdp_link_state_change *)pkg;
  685. ret = tb_xdp_link_state_change_response(ctl, route,
  686. sequence, 0);
  687. xd->target_link_width = lsc->tlw;
  688. queue_delayed_work(tb->wq, &xd->state_work,
  689. msecs_to_jiffies(XDOMAIN_SHORT_TIMEOUT));
  690. } else {
  691. tb_xdp_error_response(ctl, route, sequence,
  692. ERROR_NOT_READY);
  693. }
  694. break;
  695. default:
  696. tb_dbg(tb, "%llx: unknown XDomain request %#x\n", route, pkg->type);
  697. tb_xdp_error_response(ctl, route, sequence,
  698. ERROR_NOT_SUPPORTED);
  699. break;
  700. }
  701. tb_xdomain_put(xd);
  702. if (ret) {
  703. tb_warn(tb, "failed to send XDomain response for %#x\n",
  704. pkg->type);
  705. }
  706. out:
  707. kfree(xw->pkg);
  708. kfree(xw);
  709. tb_domain_put(tb);
  710. }
  711. static bool
  712. tb_xdp_schedule_request(struct tb *tb, const struct tb_xdp_header *hdr,
  713. size_t size)
  714. {
  715. struct xdomain_request_work *xw;
  716. xw = kmalloc(sizeof(*xw), GFP_KERNEL);
  717. if (!xw)
  718. return false;
  719. INIT_WORK(&xw->work, tb_xdp_handle_request);
  720. xw->pkg = kmemdup(hdr, size, GFP_KERNEL);
  721. if (!xw->pkg) {
  722. kfree(xw);
  723. return false;
  724. }
  725. xw->tb = tb_domain_get(tb);
  726. schedule_work(&xw->work);
  727. return true;
  728. }
  729. /**
  730. * tb_register_service_driver() - Register XDomain service driver
  731. * @drv: Driver to register
  732. *
  733. * Registers new service driver from @drv to the bus.
  734. */
  735. int tb_register_service_driver(struct tb_service_driver *drv)
  736. {
  737. drv->driver.bus = &tb_bus_type;
  738. return driver_register(&drv->driver);
  739. }
  740. EXPORT_SYMBOL_GPL(tb_register_service_driver);
  741. /**
  742. * tb_unregister_service_driver() - Unregister XDomain service driver
  743. * @drv: Driver to unregister
  744. *
  745. * Unregisters XDomain service driver from the bus.
  746. */
  747. void tb_unregister_service_driver(struct tb_service_driver *drv)
  748. {
  749. driver_unregister(&drv->driver);
  750. }
  751. EXPORT_SYMBOL_GPL(tb_unregister_service_driver);
  752. static ssize_t key_show(struct device *dev, struct device_attribute *attr,
  753. char *buf)
  754. {
  755. struct tb_service *svc = container_of(dev, struct tb_service, dev);
  756. /*
  757. * It should be null terminated but anything else is pretty much
  758. * allowed.
  759. */
  760. return sysfs_emit(buf, "%*pE\n", (int)strlen(svc->key), svc->key);
  761. }
  762. static DEVICE_ATTR_RO(key);
  763. static int get_modalias(const struct tb_service *svc, char *buf, size_t size)
  764. {
  765. return snprintf(buf, size, "tbsvc:k%sp%08Xv%08Xr%08X", svc->key,
  766. svc->prtcid, svc->prtcvers, svc->prtcrevs);
  767. }
  768. static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
  769. char *buf)
  770. {
  771. struct tb_service *svc = container_of(dev, struct tb_service, dev);
  772. /* Full buffer size except new line and null termination */
  773. get_modalias(svc, buf, PAGE_SIZE - 2);
  774. return strlen(strcat(buf, "\n"));
  775. }
  776. static DEVICE_ATTR_RO(modalias);
  777. static ssize_t prtcid_show(struct device *dev, struct device_attribute *attr,
  778. char *buf)
  779. {
  780. struct tb_service *svc = container_of(dev, struct tb_service, dev);
  781. return sysfs_emit(buf, "%u\n", svc->prtcid);
  782. }
  783. static DEVICE_ATTR_RO(prtcid);
  784. static ssize_t prtcvers_show(struct device *dev, struct device_attribute *attr,
  785. char *buf)
  786. {
  787. struct tb_service *svc = container_of(dev, struct tb_service, dev);
  788. return sysfs_emit(buf, "%u\n", svc->prtcvers);
  789. }
  790. static DEVICE_ATTR_RO(prtcvers);
  791. static ssize_t prtcrevs_show(struct device *dev, struct device_attribute *attr,
  792. char *buf)
  793. {
  794. struct tb_service *svc = container_of(dev, struct tb_service, dev);
  795. return sysfs_emit(buf, "%u\n", svc->prtcrevs);
  796. }
  797. static DEVICE_ATTR_RO(prtcrevs);
  798. static ssize_t prtcstns_show(struct device *dev, struct device_attribute *attr,
  799. char *buf)
  800. {
  801. struct tb_service *svc = container_of(dev, struct tb_service, dev);
  802. return sysfs_emit(buf, "0x%08x\n", svc->prtcstns);
  803. }
  804. static DEVICE_ATTR_RO(prtcstns);
  805. static struct attribute *tb_service_attrs[] = {
  806. &dev_attr_key.attr,
  807. &dev_attr_modalias.attr,
  808. &dev_attr_prtcid.attr,
  809. &dev_attr_prtcvers.attr,
  810. &dev_attr_prtcrevs.attr,
  811. &dev_attr_prtcstns.attr,
  812. NULL,
  813. };
  814. static const struct attribute_group tb_service_attr_group = {
  815. .attrs = tb_service_attrs,
  816. };
  817. static const struct attribute_group *tb_service_attr_groups[] = {
  818. &tb_service_attr_group,
  819. NULL,
  820. };
  821. static int tb_service_uevent(const struct device *dev, struct kobj_uevent_env *env)
  822. {
  823. const struct tb_service *svc = container_of_const(dev, struct tb_service, dev);
  824. char modalias[64];
  825. get_modalias(svc, modalias, sizeof(modalias));
  826. return add_uevent_var(env, "MODALIAS=%s", modalias);
  827. }
  828. static void tb_service_release(struct device *dev)
  829. {
  830. struct tb_service *svc = container_of(dev, struct tb_service, dev);
  831. struct tb_xdomain *xd = tb_service_parent(svc);
  832. tb_service_debugfs_remove(svc);
  833. ida_free(&xd->service_ids, svc->id);
  834. kfree(svc->key);
  835. kfree(svc);
  836. }
  837. const struct device_type tb_service_type = {
  838. .name = "thunderbolt_service",
  839. .groups = tb_service_attr_groups,
  840. .uevent = tb_service_uevent,
  841. .release = tb_service_release,
  842. };
  843. EXPORT_SYMBOL_GPL(tb_service_type);
  844. static int remove_missing_service(struct device *dev, void *data)
  845. {
  846. struct tb_xdomain *xd = data;
  847. struct tb_service *svc;
  848. svc = tb_to_service(dev);
  849. if (!svc)
  850. return 0;
  851. if (!tb_property_find(xd->remote_properties, svc->key,
  852. TB_PROPERTY_TYPE_DIRECTORY))
  853. device_unregister(dev);
  854. return 0;
  855. }
  856. static int find_service(struct device *dev, void *data)
  857. {
  858. const struct tb_property *p = data;
  859. struct tb_service *svc;
  860. svc = tb_to_service(dev);
  861. if (!svc)
  862. return 0;
  863. return !strcmp(svc->key, p->key);
  864. }
  865. static int populate_service(struct tb_service *svc,
  866. struct tb_property *property)
  867. {
  868. struct tb_property_dir *dir = property->value.dir;
  869. struct tb_property *p;
  870. /* Fill in standard properties */
  871. p = tb_property_find(dir, "prtcid", TB_PROPERTY_TYPE_VALUE);
  872. if (p)
  873. svc->prtcid = p->value.immediate;
  874. p = tb_property_find(dir, "prtcvers", TB_PROPERTY_TYPE_VALUE);
  875. if (p)
  876. svc->prtcvers = p->value.immediate;
  877. p = tb_property_find(dir, "prtcrevs", TB_PROPERTY_TYPE_VALUE);
  878. if (p)
  879. svc->prtcrevs = p->value.immediate;
  880. p = tb_property_find(dir, "prtcstns", TB_PROPERTY_TYPE_VALUE);
  881. if (p)
  882. svc->prtcstns = p->value.immediate;
  883. svc->key = kstrdup(property->key, GFP_KERNEL);
  884. if (!svc->key)
  885. return -ENOMEM;
  886. return 0;
  887. }
  888. static void enumerate_services(struct tb_xdomain *xd)
  889. {
  890. struct tb_service *svc;
  891. struct tb_property *p;
  892. struct device *dev;
  893. int id;
  894. /*
  895. * First remove all services that are not available anymore in
  896. * the updated property block.
  897. */
  898. device_for_each_child_reverse(&xd->dev, xd, remove_missing_service);
  899. /* Then re-enumerate properties creating new services as we go */
  900. tb_property_for_each(xd->remote_properties, p) {
  901. if (p->type != TB_PROPERTY_TYPE_DIRECTORY)
  902. continue;
  903. /* If the service exists already we are fine */
  904. dev = device_find_child(&xd->dev, p, find_service);
  905. if (dev) {
  906. put_device(dev);
  907. continue;
  908. }
  909. svc = kzalloc(sizeof(*svc), GFP_KERNEL);
  910. if (!svc)
  911. break;
  912. if (populate_service(svc, p)) {
  913. kfree(svc);
  914. break;
  915. }
  916. id = ida_alloc(&xd->service_ids, GFP_KERNEL);
  917. if (id < 0) {
  918. kfree(svc->key);
  919. kfree(svc);
  920. break;
  921. }
  922. svc->id = id;
  923. svc->dev.bus = &tb_bus_type;
  924. svc->dev.type = &tb_service_type;
  925. svc->dev.parent = &xd->dev;
  926. dev_set_name(&svc->dev, "%s.%d", dev_name(&xd->dev), svc->id);
  927. tb_service_debugfs_init(svc);
  928. if (device_register(&svc->dev)) {
  929. put_device(&svc->dev);
  930. break;
  931. }
  932. }
  933. }
  934. static int populate_properties(struct tb_xdomain *xd,
  935. struct tb_property_dir *dir)
  936. {
  937. const struct tb_property *p;
  938. /* Required properties */
  939. p = tb_property_find(dir, "deviceid", TB_PROPERTY_TYPE_VALUE);
  940. if (!p)
  941. return -EINVAL;
  942. xd->device = p->value.immediate;
  943. p = tb_property_find(dir, "vendorid", TB_PROPERTY_TYPE_VALUE);
  944. if (!p)
  945. return -EINVAL;
  946. xd->vendor = p->value.immediate;
  947. p = tb_property_find(dir, "maxhopid", TB_PROPERTY_TYPE_VALUE);
  948. /*
  949. * USB4 inter-domain spec suggests using 15 as HopID if the
  950. * other end does not announce it in a property. This is for
  951. * TBT3 compatibility.
  952. */
  953. xd->remote_max_hopid = p ? p->value.immediate : XDOMAIN_DEFAULT_MAX_HOPID;
  954. kfree(xd->device_name);
  955. xd->device_name = NULL;
  956. kfree(xd->vendor_name);
  957. xd->vendor_name = NULL;
  958. /* Optional properties */
  959. p = tb_property_find(dir, "deviceid", TB_PROPERTY_TYPE_TEXT);
  960. if (p)
  961. xd->device_name = kstrdup(p->value.text, GFP_KERNEL);
  962. p = tb_property_find(dir, "vendorid", TB_PROPERTY_TYPE_TEXT);
  963. if (p)
  964. xd->vendor_name = kstrdup(p->value.text, GFP_KERNEL);
  965. return 0;
  966. }
  967. static int tb_xdomain_update_link_attributes(struct tb_xdomain *xd)
  968. {
  969. bool change = false;
  970. struct tb_port *port;
  971. int ret;
  972. port = tb_xdomain_downstream_port(xd);
  973. ret = tb_port_get_link_speed(port);
  974. if (ret < 0)
  975. return ret;
  976. if (xd->link_speed != ret)
  977. change = true;
  978. xd->link_speed = ret;
  979. ret = tb_port_get_link_width(port);
  980. if (ret < 0)
  981. return ret;
  982. if (xd->link_width != ret)
  983. change = true;
  984. xd->link_width = ret;
  985. if (change)
  986. kobject_uevent(&xd->dev.kobj, KOBJ_CHANGE);
  987. return 0;
  988. }
  989. static int tb_xdomain_get_uuid(struct tb_xdomain *xd)
  990. {
  991. struct tb *tb = xd->tb;
  992. uuid_t uuid;
  993. u64 route;
  994. int ret;
  995. dev_dbg(&xd->dev, "requesting remote UUID\n");
  996. ret = tb_xdp_uuid_request(tb->ctl, xd->route, xd->state_retries, &uuid,
  997. &route);
  998. if (ret < 0) {
  999. if (xd->state_retries-- > 0) {
  1000. dev_dbg(&xd->dev, "failed to request UUID, retrying\n");
  1001. return -EAGAIN;
  1002. }
  1003. dev_dbg(&xd->dev, "failed to read remote UUID\n");
  1004. return ret;
  1005. }
  1006. dev_dbg(&xd->dev, "got remote UUID %pUb\n", &uuid);
  1007. if (uuid_equal(&uuid, xd->local_uuid)) {
  1008. if (route == xd->route)
  1009. dev_dbg(&xd->dev, "loop back detected\n");
  1010. else
  1011. dev_dbg(&xd->dev, "intra-domain loop detected\n");
  1012. /* Don't bond lanes automatically for loops */
  1013. xd->bonding_possible = false;
  1014. }
  1015. /*
  1016. * If the UUID is different, there is another domain connected
  1017. * so mark this one unplugged and wait for the connection
  1018. * manager to replace it.
  1019. */
  1020. if (xd->remote_uuid && !uuid_equal(&uuid, xd->remote_uuid)) {
  1021. dev_dbg(&xd->dev, "remote UUID is different, unplugging\n");
  1022. xd->is_unplugged = true;
  1023. return -ENODEV;
  1024. }
  1025. /* First time fill in the missing UUID */
  1026. if (!xd->remote_uuid) {
  1027. xd->remote_uuid = kmemdup(&uuid, sizeof(uuid_t), GFP_KERNEL);
  1028. if (!xd->remote_uuid)
  1029. return -ENOMEM;
  1030. }
  1031. return 0;
  1032. }
  1033. static int tb_xdomain_get_link_status(struct tb_xdomain *xd)
  1034. {
  1035. struct tb *tb = xd->tb;
  1036. u8 slw, tlw, sls, tls;
  1037. int ret;
  1038. dev_dbg(&xd->dev, "sending link state status request to %pUb\n",
  1039. xd->remote_uuid);
  1040. ret = tb_xdp_link_state_status_request(tb->ctl, xd->route,
  1041. xd->state_retries, &slw, &tlw, &sls,
  1042. &tls);
  1043. if (ret) {
  1044. if (ret != -EOPNOTSUPP && xd->state_retries-- > 0) {
  1045. dev_dbg(&xd->dev,
  1046. "failed to request remote link status, retrying\n");
  1047. return -EAGAIN;
  1048. }
  1049. dev_dbg(&xd->dev, "failed to receive remote link status\n");
  1050. return ret;
  1051. }
  1052. dev_dbg(&xd->dev, "remote link supports width %#x speed %#x\n", slw, sls);
  1053. if (slw < LANE_ADP_CS_0_SUPPORTED_WIDTH_DUAL) {
  1054. dev_dbg(&xd->dev, "remote adapter is single lane only\n");
  1055. return -EOPNOTSUPP;
  1056. }
  1057. return 0;
  1058. }
  1059. static int tb_xdomain_link_state_change(struct tb_xdomain *xd,
  1060. unsigned int width)
  1061. {
  1062. struct tb_port *port = tb_xdomain_downstream_port(xd);
  1063. struct tb *tb = xd->tb;
  1064. u8 tlw, tls;
  1065. u32 val;
  1066. int ret;
  1067. if (width == 2)
  1068. tlw = LANE_ADP_CS_1_TARGET_WIDTH_DUAL;
  1069. else if (width == 1)
  1070. tlw = LANE_ADP_CS_1_TARGET_WIDTH_SINGLE;
  1071. else
  1072. return -EINVAL;
  1073. /* Use the current target speed */
  1074. ret = tb_port_read(port, &val, TB_CFG_PORT, port->cap_phy + LANE_ADP_CS_1, 1);
  1075. if (ret)
  1076. return ret;
  1077. tls = val & LANE_ADP_CS_1_TARGET_SPEED_MASK;
  1078. dev_dbg(&xd->dev, "sending link state change request with width %#x speed %#x\n",
  1079. tlw, tls);
  1080. ret = tb_xdp_link_state_change_request(tb->ctl, xd->route,
  1081. xd->state_retries, tlw, tls);
  1082. if (ret) {
  1083. if (ret != -EOPNOTSUPP && xd->state_retries-- > 0) {
  1084. dev_dbg(&xd->dev,
  1085. "failed to change remote link state, retrying\n");
  1086. return -EAGAIN;
  1087. }
  1088. dev_err(&xd->dev, "failed request link state change, aborting\n");
  1089. return ret;
  1090. }
  1091. dev_dbg(&xd->dev, "received link state change response\n");
  1092. return 0;
  1093. }
  1094. static int tb_xdomain_bond_lanes_uuid_high(struct tb_xdomain *xd)
  1095. {
  1096. unsigned int width, width_mask;
  1097. struct tb_port *port;
  1098. int ret;
  1099. if (xd->target_link_width == LANE_ADP_CS_1_TARGET_WIDTH_SINGLE) {
  1100. width = TB_LINK_WIDTH_SINGLE;
  1101. width_mask = width;
  1102. } else if (xd->target_link_width == LANE_ADP_CS_1_TARGET_WIDTH_DUAL) {
  1103. width = TB_LINK_WIDTH_DUAL;
  1104. width_mask = width | TB_LINK_WIDTH_ASYM_TX | TB_LINK_WIDTH_ASYM_RX;
  1105. } else {
  1106. if (xd->state_retries-- > 0) {
  1107. dev_dbg(&xd->dev,
  1108. "link state change request not received yet, retrying\n");
  1109. return -EAGAIN;
  1110. }
  1111. dev_dbg(&xd->dev, "timeout waiting for link change request\n");
  1112. return -ETIMEDOUT;
  1113. }
  1114. port = tb_xdomain_downstream_port(xd);
  1115. /*
  1116. * We can't use tb_xdomain_lane_bonding_enable() here because it
  1117. * is the other side that initiates lane bonding. So here we
  1118. * just set the width to both lane adapters and wait for the
  1119. * link to transition bonded.
  1120. */
  1121. ret = tb_port_set_link_width(port->dual_link_port, width);
  1122. if (ret) {
  1123. tb_port_warn(port->dual_link_port,
  1124. "failed to set link width to %d\n", width);
  1125. return ret;
  1126. }
  1127. ret = tb_port_set_link_width(port, width);
  1128. if (ret) {
  1129. tb_port_warn(port, "failed to set link width to %d\n", width);
  1130. return ret;
  1131. }
  1132. ret = tb_port_wait_for_link_width(port, width_mask,
  1133. XDOMAIN_BONDING_TIMEOUT);
  1134. if (ret) {
  1135. dev_warn(&xd->dev, "error waiting for link width to become %d\n",
  1136. width_mask);
  1137. return ret;
  1138. }
  1139. port->bonded = width > TB_LINK_WIDTH_SINGLE;
  1140. port->dual_link_port->bonded = width > TB_LINK_WIDTH_SINGLE;
  1141. tb_port_update_credits(port);
  1142. tb_xdomain_update_link_attributes(xd);
  1143. dev_dbg(&xd->dev, "lane bonding %s\n", str_enabled_disabled(width == 2));
  1144. return 0;
  1145. }
  1146. static int tb_xdomain_get_properties(struct tb_xdomain *xd)
  1147. {
  1148. struct tb_property_dir *dir;
  1149. struct tb *tb = xd->tb;
  1150. bool update = false;
  1151. u32 *block = NULL;
  1152. u32 gen = 0;
  1153. int ret;
  1154. dev_dbg(&xd->dev, "requesting remote properties\n");
  1155. ret = tb_xdp_properties_request(tb->ctl, xd->route, xd->local_uuid,
  1156. xd->remote_uuid, xd->state_retries,
  1157. &block, &gen);
  1158. if (ret < 0) {
  1159. if (xd->state_retries-- > 0) {
  1160. dev_dbg(&xd->dev,
  1161. "failed to request remote properties, retrying\n");
  1162. return -EAGAIN;
  1163. }
  1164. /* Give up now */
  1165. dev_err(&xd->dev, "failed read XDomain properties from %pUb\n",
  1166. xd->remote_uuid);
  1167. return ret;
  1168. }
  1169. mutex_lock(&xd->lock);
  1170. /* Only accept newer generation properties */
  1171. if (xd->remote_properties && gen <= xd->remote_property_block_gen) {
  1172. ret = 0;
  1173. goto err_free_block;
  1174. }
  1175. dir = tb_property_parse_dir(block, ret);
  1176. if (!dir) {
  1177. dev_err(&xd->dev, "failed to parse XDomain properties\n");
  1178. ret = -ENOMEM;
  1179. goto err_free_block;
  1180. }
  1181. ret = populate_properties(xd, dir);
  1182. if (ret) {
  1183. dev_err(&xd->dev, "missing XDomain properties in response\n");
  1184. goto err_free_dir;
  1185. }
  1186. /* Release the existing one */
  1187. if (xd->remote_properties) {
  1188. tb_property_free_dir(xd->remote_properties);
  1189. update = true;
  1190. }
  1191. xd->remote_properties = dir;
  1192. xd->remote_property_block_gen = gen;
  1193. tb_xdomain_update_link_attributes(xd);
  1194. mutex_unlock(&xd->lock);
  1195. kfree(block);
  1196. /*
  1197. * Now the device should be ready enough so we can add it to the
  1198. * bus and let userspace know about it. If the device is already
  1199. * registered, we notify the userspace that it has changed.
  1200. */
  1201. if (!update) {
  1202. /*
  1203. * Now disable lane 1 if bonding was not enabled. Do
  1204. * this only if bonding was possible at the beginning
  1205. * (that is we are the connection manager and there are
  1206. * two lanes).
  1207. */
  1208. if (xd->bonding_possible) {
  1209. struct tb_port *port;
  1210. port = tb_xdomain_downstream_port(xd);
  1211. if (!port->bonded)
  1212. tb_port_disable(port->dual_link_port);
  1213. }
  1214. dev_dbg(&xd->dev, "current link speed %u.0 Gb/s\n",
  1215. xd->link_speed);
  1216. dev_dbg(&xd->dev, "current link width %s\n",
  1217. tb_width_name(xd->link_width));
  1218. if (device_add(&xd->dev)) {
  1219. dev_err(&xd->dev, "failed to add XDomain device\n");
  1220. return -ENODEV;
  1221. }
  1222. dev_info(&xd->dev, "new host found, vendor=%#x device=%#x\n",
  1223. xd->vendor, xd->device);
  1224. if (xd->vendor_name && xd->device_name)
  1225. dev_info(&xd->dev, "%s %s\n", xd->vendor_name,
  1226. xd->device_name);
  1227. tb_xdomain_debugfs_init(xd);
  1228. } else {
  1229. kobject_uevent(&xd->dev.kobj, KOBJ_CHANGE);
  1230. }
  1231. enumerate_services(xd);
  1232. return 0;
  1233. err_free_dir:
  1234. tb_property_free_dir(dir);
  1235. err_free_block:
  1236. kfree(block);
  1237. mutex_unlock(&xd->lock);
  1238. return ret;
  1239. }
  1240. static void tb_xdomain_queue_uuid(struct tb_xdomain *xd)
  1241. {
  1242. xd->state = XDOMAIN_STATE_UUID;
  1243. xd->state_retries = XDOMAIN_RETRIES;
  1244. queue_delayed_work(xd->tb->wq, &xd->state_work,
  1245. msecs_to_jiffies(XDOMAIN_SHORT_TIMEOUT));
  1246. }
  1247. static void tb_xdomain_queue_link_status(struct tb_xdomain *xd)
  1248. {
  1249. xd->state = XDOMAIN_STATE_LINK_STATUS;
  1250. xd->state_retries = XDOMAIN_RETRIES;
  1251. queue_delayed_work(xd->tb->wq, &xd->state_work,
  1252. msecs_to_jiffies(XDOMAIN_DEFAULT_TIMEOUT));
  1253. }
  1254. static void tb_xdomain_queue_link_status2(struct tb_xdomain *xd)
  1255. {
  1256. xd->state = XDOMAIN_STATE_LINK_STATUS2;
  1257. xd->state_retries = XDOMAIN_RETRIES;
  1258. queue_delayed_work(xd->tb->wq, &xd->state_work,
  1259. msecs_to_jiffies(XDOMAIN_DEFAULT_TIMEOUT));
  1260. }
  1261. static void tb_xdomain_queue_bonding(struct tb_xdomain *xd)
  1262. {
  1263. if (memcmp(xd->local_uuid, xd->remote_uuid, UUID_SIZE) > 0) {
  1264. dev_dbg(&xd->dev, "we have higher UUID, other side bonds the lanes\n");
  1265. xd->state = XDOMAIN_STATE_BONDING_UUID_HIGH;
  1266. } else {
  1267. dev_dbg(&xd->dev, "we have lower UUID, bonding lanes\n");
  1268. xd->state = XDOMAIN_STATE_LINK_STATE_CHANGE;
  1269. }
  1270. xd->state_retries = XDOMAIN_RETRIES;
  1271. queue_delayed_work(xd->tb->wq, &xd->state_work,
  1272. msecs_to_jiffies(XDOMAIN_DEFAULT_TIMEOUT));
  1273. }
  1274. static void tb_xdomain_queue_bonding_uuid_low(struct tb_xdomain *xd)
  1275. {
  1276. xd->state = XDOMAIN_STATE_BONDING_UUID_LOW;
  1277. xd->state_retries = XDOMAIN_RETRIES;
  1278. queue_delayed_work(xd->tb->wq, &xd->state_work,
  1279. msecs_to_jiffies(XDOMAIN_DEFAULT_TIMEOUT));
  1280. }
  1281. static void tb_xdomain_queue_properties(struct tb_xdomain *xd)
  1282. {
  1283. xd->state = XDOMAIN_STATE_PROPERTIES;
  1284. xd->state_retries = XDOMAIN_RETRIES;
  1285. queue_delayed_work(xd->tb->wq, &xd->state_work,
  1286. msecs_to_jiffies(XDOMAIN_DEFAULT_TIMEOUT));
  1287. }
  1288. static void tb_xdomain_queue_properties_changed(struct tb_xdomain *xd)
  1289. {
  1290. xd->properties_changed_retries = XDOMAIN_RETRIES;
  1291. queue_delayed_work(xd->tb->wq, &xd->properties_changed_work,
  1292. msecs_to_jiffies(XDOMAIN_SHORT_TIMEOUT));
  1293. }
  1294. static void tb_xdomain_failed(struct tb_xdomain *xd)
  1295. {
  1296. xd->state = XDOMAIN_STATE_ERROR;
  1297. queue_delayed_work(xd->tb->wq, &xd->state_work,
  1298. msecs_to_jiffies(XDOMAIN_DEFAULT_TIMEOUT));
  1299. }
  1300. static void tb_xdomain_state_work(struct work_struct *work)
  1301. {
  1302. struct tb_xdomain *xd = container_of(work, typeof(*xd), state_work.work);
  1303. int ret, state = xd->state;
  1304. if (WARN_ON_ONCE(state < XDOMAIN_STATE_INIT ||
  1305. state > XDOMAIN_STATE_ERROR))
  1306. return;
  1307. dev_dbg(&xd->dev, "running state %s\n", state_names[state]);
  1308. switch (state) {
  1309. case XDOMAIN_STATE_INIT:
  1310. if (xd->needs_uuid) {
  1311. tb_xdomain_queue_uuid(xd);
  1312. } else {
  1313. tb_xdomain_queue_properties_changed(xd);
  1314. tb_xdomain_queue_properties(xd);
  1315. }
  1316. break;
  1317. case XDOMAIN_STATE_UUID:
  1318. ret = tb_xdomain_get_uuid(xd);
  1319. if (ret) {
  1320. if (ret == -EAGAIN)
  1321. goto retry_state;
  1322. tb_xdomain_failed(xd);
  1323. } else {
  1324. tb_xdomain_queue_properties_changed(xd);
  1325. if (xd->bonding_possible)
  1326. tb_xdomain_queue_link_status(xd);
  1327. else
  1328. tb_xdomain_queue_properties(xd);
  1329. }
  1330. break;
  1331. case XDOMAIN_STATE_LINK_STATUS:
  1332. ret = tb_xdomain_get_link_status(xd);
  1333. if (ret) {
  1334. if (ret == -EAGAIN)
  1335. goto retry_state;
  1336. /*
  1337. * If any of the lane bonding states fail we skip
  1338. * bonding completely and try to continue from
  1339. * reading properties.
  1340. */
  1341. tb_xdomain_queue_properties(xd);
  1342. } else {
  1343. tb_xdomain_queue_bonding(xd);
  1344. }
  1345. break;
  1346. case XDOMAIN_STATE_LINK_STATE_CHANGE:
  1347. ret = tb_xdomain_link_state_change(xd, 2);
  1348. if (ret) {
  1349. if (ret == -EAGAIN)
  1350. goto retry_state;
  1351. tb_xdomain_queue_properties(xd);
  1352. } else {
  1353. tb_xdomain_queue_link_status2(xd);
  1354. }
  1355. break;
  1356. case XDOMAIN_STATE_LINK_STATUS2:
  1357. ret = tb_xdomain_get_link_status(xd);
  1358. if (ret) {
  1359. if (ret == -EAGAIN)
  1360. goto retry_state;
  1361. tb_xdomain_queue_properties(xd);
  1362. } else {
  1363. tb_xdomain_queue_bonding_uuid_low(xd);
  1364. }
  1365. break;
  1366. case XDOMAIN_STATE_BONDING_UUID_LOW:
  1367. tb_xdomain_lane_bonding_enable(xd);
  1368. tb_xdomain_queue_properties(xd);
  1369. break;
  1370. case XDOMAIN_STATE_BONDING_UUID_HIGH:
  1371. if (tb_xdomain_bond_lanes_uuid_high(xd) == -EAGAIN)
  1372. goto retry_state;
  1373. tb_xdomain_queue_properties(xd);
  1374. break;
  1375. case XDOMAIN_STATE_PROPERTIES:
  1376. ret = tb_xdomain_get_properties(xd);
  1377. if (ret) {
  1378. if (ret == -EAGAIN)
  1379. goto retry_state;
  1380. tb_xdomain_failed(xd);
  1381. } else {
  1382. xd->state = XDOMAIN_STATE_ENUMERATED;
  1383. }
  1384. break;
  1385. case XDOMAIN_STATE_ENUMERATED:
  1386. tb_xdomain_queue_properties(xd);
  1387. break;
  1388. case XDOMAIN_STATE_ERROR:
  1389. dev_dbg(&xd->dev, "discovery failed, stopping handshake\n");
  1390. __stop_handshake(xd);
  1391. break;
  1392. default:
  1393. dev_warn(&xd->dev, "unexpected state %d\n", state);
  1394. break;
  1395. }
  1396. return;
  1397. retry_state:
  1398. queue_delayed_work(xd->tb->wq, &xd->state_work,
  1399. msecs_to_jiffies(XDOMAIN_DEFAULT_TIMEOUT));
  1400. }
  1401. static void tb_xdomain_properties_changed(struct work_struct *work)
  1402. {
  1403. struct tb_xdomain *xd = container_of(work, typeof(*xd),
  1404. properties_changed_work.work);
  1405. int ret;
  1406. dev_dbg(&xd->dev, "sending properties changed notification\n");
  1407. ret = tb_xdp_properties_changed_request(xd->tb->ctl, xd->route,
  1408. xd->properties_changed_retries, xd->local_uuid);
  1409. if (ret) {
  1410. if (xd->properties_changed_retries-- > 0) {
  1411. dev_dbg(&xd->dev,
  1412. "failed to send properties changed notification, retrying\n");
  1413. queue_delayed_work(xd->tb->wq,
  1414. &xd->properties_changed_work,
  1415. msecs_to_jiffies(XDOMAIN_DEFAULT_TIMEOUT));
  1416. }
  1417. dev_err(&xd->dev, "failed to send properties changed notification\n");
  1418. return;
  1419. }
  1420. xd->properties_changed_retries = XDOMAIN_RETRIES;
  1421. }
  1422. static ssize_t device_show(struct device *dev, struct device_attribute *attr,
  1423. char *buf)
  1424. {
  1425. struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev);
  1426. return sysfs_emit(buf, "%#x\n", xd->device);
  1427. }
  1428. static DEVICE_ATTR_RO(device);
  1429. static ssize_t
  1430. device_name_show(struct device *dev, struct device_attribute *attr, char *buf)
  1431. {
  1432. struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev);
  1433. int ret;
  1434. if (mutex_lock_interruptible(&xd->lock))
  1435. return -ERESTARTSYS;
  1436. ret = sysfs_emit(buf, "%s\n", xd->device_name ?: "");
  1437. mutex_unlock(&xd->lock);
  1438. return ret;
  1439. }
  1440. static DEVICE_ATTR_RO(device_name);
  1441. static ssize_t maxhopid_show(struct device *dev, struct device_attribute *attr,
  1442. char *buf)
  1443. {
  1444. struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev);
  1445. return sysfs_emit(buf, "%d\n", xd->remote_max_hopid);
  1446. }
  1447. static DEVICE_ATTR_RO(maxhopid);
  1448. static ssize_t vendor_show(struct device *dev, struct device_attribute *attr,
  1449. char *buf)
  1450. {
  1451. struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev);
  1452. return sysfs_emit(buf, "%#x\n", xd->vendor);
  1453. }
  1454. static DEVICE_ATTR_RO(vendor);
  1455. static ssize_t
  1456. vendor_name_show(struct device *dev, struct device_attribute *attr, char *buf)
  1457. {
  1458. struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev);
  1459. int ret;
  1460. if (mutex_lock_interruptible(&xd->lock))
  1461. return -ERESTARTSYS;
  1462. ret = sysfs_emit(buf, "%s\n", xd->vendor_name ?: "");
  1463. mutex_unlock(&xd->lock);
  1464. return ret;
  1465. }
  1466. static DEVICE_ATTR_RO(vendor_name);
  1467. static ssize_t unique_id_show(struct device *dev, struct device_attribute *attr,
  1468. char *buf)
  1469. {
  1470. struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev);
  1471. return sysfs_emit(buf, "%pUb\n", xd->remote_uuid);
  1472. }
  1473. static DEVICE_ATTR_RO(unique_id);
  1474. static ssize_t speed_show(struct device *dev, struct device_attribute *attr,
  1475. char *buf)
  1476. {
  1477. struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev);
  1478. return sysfs_emit(buf, "%u.0 Gb/s\n", xd->link_speed);
  1479. }
  1480. static DEVICE_ATTR(rx_speed, 0444, speed_show, NULL);
  1481. static DEVICE_ATTR(tx_speed, 0444, speed_show, NULL);
  1482. static ssize_t rx_lanes_show(struct device *dev, struct device_attribute *attr,
  1483. char *buf)
  1484. {
  1485. struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev);
  1486. unsigned int width;
  1487. switch (xd->link_width) {
  1488. case TB_LINK_WIDTH_SINGLE:
  1489. case TB_LINK_WIDTH_ASYM_TX:
  1490. width = 1;
  1491. break;
  1492. case TB_LINK_WIDTH_DUAL:
  1493. width = 2;
  1494. break;
  1495. case TB_LINK_WIDTH_ASYM_RX:
  1496. width = 3;
  1497. break;
  1498. default:
  1499. WARN_ON_ONCE(1);
  1500. return -EINVAL;
  1501. }
  1502. return sysfs_emit(buf, "%u\n", width);
  1503. }
  1504. static DEVICE_ATTR(rx_lanes, 0444, rx_lanes_show, NULL);
  1505. static ssize_t tx_lanes_show(struct device *dev, struct device_attribute *attr,
  1506. char *buf)
  1507. {
  1508. struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev);
  1509. unsigned int width;
  1510. switch (xd->link_width) {
  1511. case TB_LINK_WIDTH_SINGLE:
  1512. case TB_LINK_WIDTH_ASYM_RX:
  1513. width = 1;
  1514. break;
  1515. case TB_LINK_WIDTH_DUAL:
  1516. width = 2;
  1517. break;
  1518. case TB_LINK_WIDTH_ASYM_TX:
  1519. width = 3;
  1520. break;
  1521. default:
  1522. WARN_ON_ONCE(1);
  1523. return -EINVAL;
  1524. }
  1525. return sysfs_emit(buf, "%u\n", width);
  1526. }
  1527. static DEVICE_ATTR(tx_lanes, 0444, tx_lanes_show, NULL);
  1528. static struct attribute *xdomain_attrs[] = {
  1529. &dev_attr_device.attr,
  1530. &dev_attr_device_name.attr,
  1531. &dev_attr_maxhopid.attr,
  1532. &dev_attr_rx_lanes.attr,
  1533. &dev_attr_rx_speed.attr,
  1534. &dev_attr_tx_lanes.attr,
  1535. &dev_attr_tx_speed.attr,
  1536. &dev_attr_unique_id.attr,
  1537. &dev_attr_vendor.attr,
  1538. &dev_attr_vendor_name.attr,
  1539. NULL,
  1540. };
  1541. static const struct attribute_group xdomain_attr_group = {
  1542. .attrs = xdomain_attrs,
  1543. };
  1544. static const struct attribute_group *xdomain_attr_groups[] = {
  1545. &xdomain_attr_group,
  1546. NULL,
  1547. };
  1548. static void tb_xdomain_release(struct device *dev)
  1549. {
  1550. struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev);
  1551. put_device(xd->dev.parent);
  1552. kfree(xd->local_property_block);
  1553. tb_property_free_dir(xd->remote_properties);
  1554. ida_destroy(&xd->out_hopids);
  1555. ida_destroy(&xd->in_hopids);
  1556. ida_destroy(&xd->service_ids);
  1557. kfree(xd->local_uuid);
  1558. kfree(xd->remote_uuid);
  1559. kfree(xd->device_name);
  1560. kfree(xd->vendor_name);
  1561. kfree(xd);
  1562. }
  1563. static int __maybe_unused tb_xdomain_suspend(struct device *dev)
  1564. {
  1565. stop_handshake(tb_to_xdomain(dev));
  1566. return 0;
  1567. }
  1568. static int __maybe_unused tb_xdomain_resume(struct device *dev)
  1569. {
  1570. start_handshake(tb_to_xdomain(dev));
  1571. return 0;
  1572. }
  1573. static const struct dev_pm_ops tb_xdomain_pm_ops = {
  1574. SET_SYSTEM_SLEEP_PM_OPS(tb_xdomain_suspend, tb_xdomain_resume)
  1575. };
  1576. const struct device_type tb_xdomain_type = {
  1577. .name = "thunderbolt_xdomain",
  1578. .release = tb_xdomain_release,
  1579. .pm = &tb_xdomain_pm_ops,
  1580. };
  1581. EXPORT_SYMBOL_GPL(tb_xdomain_type);
  1582. static void tb_xdomain_link_init(struct tb_xdomain *xd, struct tb_port *down)
  1583. {
  1584. if (!down->dual_link_port)
  1585. return;
  1586. /*
  1587. * Gen 4 links come up already as bonded so only update the port
  1588. * structures here.
  1589. */
  1590. if (tb_port_get_link_generation(down) >= 4) {
  1591. down->bonded = true;
  1592. down->dual_link_port->bonded = true;
  1593. } else {
  1594. xd->bonding_possible = true;
  1595. }
  1596. }
  1597. static void tb_xdomain_link_exit(struct tb_xdomain *xd)
  1598. {
  1599. struct tb_port *down = tb_xdomain_downstream_port(xd);
  1600. if (!down->dual_link_port)
  1601. return;
  1602. if (tb_port_get_link_generation(down) >= 4) {
  1603. down->bonded = false;
  1604. down->dual_link_port->bonded = false;
  1605. } else if (xd->link_width > TB_LINK_WIDTH_SINGLE) {
  1606. /*
  1607. * Just return port structures back to way they were and
  1608. * update credits. No need to update userspace because
  1609. * the XDomain is removed soon anyway.
  1610. */
  1611. tb_port_lane_bonding_disable(down);
  1612. tb_port_update_credits(down);
  1613. } else if (down->dual_link_port) {
  1614. /*
  1615. * Re-enable the lane 1 adapter we disabled at the end
  1616. * of tb_xdomain_get_properties().
  1617. */
  1618. tb_port_enable(down->dual_link_port);
  1619. }
  1620. }
  1621. /**
  1622. * tb_xdomain_alloc() - Allocate new XDomain object
  1623. * @tb: Domain where the XDomain belongs
  1624. * @parent: Parent device (the switch through the connection to the
  1625. * other domain is reached).
  1626. * @route: Route string used to reach the other domain
  1627. * @local_uuid: Our local domain UUID
  1628. * @remote_uuid: UUID of the other domain (optional)
  1629. *
  1630. * Allocates new XDomain structure and returns pointer to that. The
  1631. * object must be released by calling tb_xdomain_put().
  1632. */
  1633. struct tb_xdomain *tb_xdomain_alloc(struct tb *tb, struct device *parent,
  1634. u64 route, const uuid_t *local_uuid,
  1635. const uuid_t *remote_uuid)
  1636. {
  1637. struct tb_switch *parent_sw = tb_to_switch(parent);
  1638. struct tb_xdomain *xd;
  1639. struct tb_port *down;
  1640. /* Make sure the downstream domain is accessible */
  1641. down = tb_port_at(route, parent_sw);
  1642. tb_port_unlock(down);
  1643. xd = kzalloc(sizeof(*xd), GFP_KERNEL);
  1644. if (!xd)
  1645. return NULL;
  1646. xd->tb = tb;
  1647. xd->route = route;
  1648. xd->local_max_hopid = down->config.max_in_hop_id;
  1649. ida_init(&xd->service_ids);
  1650. ida_init(&xd->in_hopids);
  1651. ida_init(&xd->out_hopids);
  1652. mutex_init(&xd->lock);
  1653. INIT_DELAYED_WORK(&xd->state_work, tb_xdomain_state_work);
  1654. INIT_DELAYED_WORK(&xd->properties_changed_work,
  1655. tb_xdomain_properties_changed);
  1656. xd->local_uuid = kmemdup(local_uuid, sizeof(uuid_t), GFP_KERNEL);
  1657. if (!xd->local_uuid)
  1658. goto err_free;
  1659. if (remote_uuid) {
  1660. xd->remote_uuid = kmemdup(remote_uuid, sizeof(uuid_t),
  1661. GFP_KERNEL);
  1662. if (!xd->remote_uuid)
  1663. goto err_free_local_uuid;
  1664. } else {
  1665. xd->needs_uuid = true;
  1666. tb_xdomain_link_init(xd, down);
  1667. }
  1668. device_initialize(&xd->dev);
  1669. xd->dev.parent = get_device(parent);
  1670. xd->dev.bus = &tb_bus_type;
  1671. xd->dev.type = &tb_xdomain_type;
  1672. xd->dev.groups = xdomain_attr_groups;
  1673. dev_set_name(&xd->dev, "%u-%llx", tb->index, route);
  1674. dev_dbg(&xd->dev, "local UUID %pUb\n", local_uuid);
  1675. if (remote_uuid)
  1676. dev_dbg(&xd->dev, "remote UUID %pUb\n", remote_uuid);
  1677. /*
  1678. * This keeps the DMA powered on as long as we have active
  1679. * connection to another host.
  1680. */
  1681. pm_runtime_set_active(&xd->dev);
  1682. pm_runtime_get_noresume(&xd->dev);
  1683. pm_runtime_enable(&xd->dev);
  1684. return xd;
  1685. err_free_local_uuid:
  1686. kfree(xd->local_uuid);
  1687. err_free:
  1688. kfree(xd);
  1689. return NULL;
  1690. }
  1691. /**
  1692. * tb_xdomain_add() - Add XDomain to the bus
  1693. * @xd: XDomain to add
  1694. *
  1695. * This function starts XDomain discovery protocol handshake and
  1696. * eventually adds the XDomain to the bus. After calling this function
  1697. * the caller needs to call tb_xdomain_remove() in order to remove and
  1698. * release the object regardless whether the handshake succeeded or not.
  1699. */
  1700. void tb_xdomain_add(struct tb_xdomain *xd)
  1701. {
  1702. /* Start exchanging properties with the other host */
  1703. start_handshake(xd);
  1704. }
  1705. static int unregister_service(struct device *dev, void *data)
  1706. {
  1707. device_unregister(dev);
  1708. return 0;
  1709. }
  1710. /**
  1711. * tb_xdomain_remove() - Remove XDomain from the bus
  1712. * @xd: XDomain to remove
  1713. *
  1714. * This will stop all ongoing configuration work and remove the XDomain
  1715. * along with any services from the bus. When the last reference to @xd
  1716. * is released the object will be released as well.
  1717. */
  1718. void tb_xdomain_remove(struct tb_xdomain *xd)
  1719. {
  1720. tb_xdomain_debugfs_remove(xd);
  1721. stop_handshake(xd);
  1722. device_for_each_child_reverse(&xd->dev, xd, unregister_service);
  1723. tb_xdomain_link_exit(xd);
  1724. /*
  1725. * Undo runtime PM here explicitly because it is possible that
  1726. * the XDomain was never added to the bus and thus device_del()
  1727. * is not called for it (device_del() would handle this otherwise).
  1728. */
  1729. pm_runtime_disable(&xd->dev);
  1730. pm_runtime_put_noidle(&xd->dev);
  1731. pm_runtime_set_suspended(&xd->dev);
  1732. if (!device_is_registered(&xd->dev)) {
  1733. put_device(&xd->dev);
  1734. } else {
  1735. dev_info(&xd->dev, "host disconnected\n");
  1736. device_unregister(&xd->dev);
  1737. }
  1738. }
  1739. /**
  1740. * tb_xdomain_lane_bonding_enable() - Enable lane bonding on XDomain
  1741. * @xd: XDomain connection
  1742. *
  1743. * Lane bonding is disabled by default for XDomains. This function tries
  1744. * to enable bonding by first enabling the port and waiting for the CL0
  1745. * state.
  1746. *
  1747. * Return: %0 in case of success and negative errno in case of error.
  1748. */
  1749. int tb_xdomain_lane_bonding_enable(struct tb_xdomain *xd)
  1750. {
  1751. unsigned int width_mask;
  1752. struct tb_port *port;
  1753. int ret;
  1754. port = tb_xdomain_downstream_port(xd);
  1755. if (!port->dual_link_port)
  1756. return -ENODEV;
  1757. ret = tb_port_enable(port->dual_link_port);
  1758. if (ret)
  1759. return ret;
  1760. ret = tb_wait_for_port(port->dual_link_port, true);
  1761. if (ret < 0)
  1762. return ret;
  1763. if (!ret)
  1764. return -ENOTCONN;
  1765. ret = tb_port_lane_bonding_enable(port);
  1766. if (ret) {
  1767. tb_port_warn(port, "failed to enable lane bonding\n");
  1768. return ret;
  1769. }
  1770. /* Any of the widths are all bonded */
  1771. width_mask = TB_LINK_WIDTH_DUAL | TB_LINK_WIDTH_ASYM_TX |
  1772. TB_LINK_WIDTH_ASYM_RX;
  1773. ret = tb_port_wait_for_link_width(port, width_mask,
  1774. XDOMAIN_BONDING_TIMEOUT);
  1775. if (ret) {
  1776. tb_port_warn(port, "failed to enable lane bonding\n");
  1777. return ret;
  1778. }
  1779. tb_port_update_credits(port);
  1780. tb_xdomain_update_link_attributes(xd);
  1781. dev_dbg(&xd->dev, "lane bonding enabled\n");
  1782. return 0;
  1783. }
  1784. EXPORT_SYMBOL_GPL(tb_xdomain_lane_bonding_enable);
  1785. /**
  1786. * tb_xdomain_lane_bonding_disable() - Disable lane bonding
  1787. * @xd: XDomain connection
  1788. *
  1789. * Lane bonding is disabled by default for XDomains. If bonding has been
  1790. * enabled, this function can be used to disable it.
  1791. */
  1792. void tb_xdomain_lane_bonding_disable(struct tb_xdomain *xd)
  1793. {
  1794. struct tb_port *port;
  1795. port = tb_xdomain_downstream_port(xd);
  1796. if (port->dual_link_port) {
  1797. int ret;
  1798. tb_port_lane_bonding_disable(port);
  1799. ret = tb_port_wait_for_link_width(port, TB_LINK_WIDTH_SINGLE, 100);
  1800. if (ret == -ETIMEDOUT)
  1801. tb_port_warn(port, "timeout disabling lane bonding\n");
  1802. tb_port_disable(port->dual_link_port);
  1803. tb_port_update_credits(port);
  1804. tb_xdomain_update_link_attributes(xd);
  1805. dev_dbg(&xd->dev, "lane bonding disabled\n");
  1806. }
  1807. }
  1808. EXPORT_SYMBOL_GPL(tb_xdomain_lane_bonding_disable);
  1809. /**
  1810. * tb_xdomain_alloc_in_hopid() - Allocate input HopID for tunneling
  1811. * @xd: XDomain connection
  1812. * @hopid: Preferred HopID or %-1 for next available
  1813. *
  1814. * Returns allocated HopID or negative errno. Specifically returns
  1815. * %-ENOSPC if there are no more available HopIDs. Returned HopID is
  1816. * guaranteed to be within range supported by the input lane adapter.
  1817. * Call tb_xdomain_release_in_hopid() to release the allocated HopID.
  1818. */
  1819. int tb_xdomain_alloc_in_hopid(struct tb_xdomain *xd, int hopid)
  1820. {
  1821. if (hopid < 0)
  1822. hopid = TB_PATH_MIN_HOPID;
  1823. if (hopid < TB_PATH_MIN_HOPID || hopid > xd->local_max_hopid)
  1824. return -EINVAL;
  1825. return ida_alloc_range(&xd->in_hopids, hopid, xd->local_max_hopid,
  1826. GFP_KERNEL);
  1827. }
  1828. EXPORT_SYMBOL_GPL(tb_xdomain_alloc_in_hopid);
  1829. /**
  1830. * tb_xdomain_alloc_out_hopid() - Allocate output HopID for tunneling
  1831. * @xd: XDomain connection
  1832. * @hopid: Preferred HopID or %-1 for next available
  1833. *
  1834. * Returns allocated HopID or negative errno. Specifically returns
  1835. * %-ENOSPC if there are no more available HopIDs. Returned HopID is
  1836. * guaranteed to be within range supported by the output lane adapter.
  1837. * Call tb_xdomain_release_in_hopid() to release the allocated HopID.
  1838. */
  1839. int tb_xdomain_alloc_out_hopid(struct tb_xdomain *xd, int hopid)
  1840. {
  1841. if (hopid < 0)
  1842. hopid = TB_PATH_MIN_HOPID;
  1843. if (hopid < TB_PATH_MIN_HOPID || hopid > xd->remote_max_hopid)
  1844. return -EINVAL;
  1845. return ida_alloc_range(&xd->out_hopids, hopid, xd->remote_max_hopid,
  1846. GFP_KERNEL);
  1847. }
  1848. EXPORT_SYMBOL_GPL(tb_xdomain_alloc_out_hopid);
  1849. /**
  1850. * tb_xdomain_release_in_hopid() - Release input HopID
  1851. * @xd: XDomain connection
  1852. * @hopid: HopID to release
  1853. */
  1854. void tb_xdomain_release_in_hopid(struct tb_xdomain *xd, int hopid)
  1855. {
  1856. ida_free(&xd->in_hopids, hopid);
  1857. }
  1858. EXPORT_SYMBOL_GPL(tb_xdomain_release_in_hopid);
  1859. /**
  1860. * tb_xdomain_release_out_hopid() - Release output HopID
  1861. * @xd: XDomain connection
  1862. * @hopid: HopID to release
  1863. */
  1864. void tb_xdomain_release_out_hopid(struct tb_xdomain *xd, int hopid)
  1865. {
  1866. ida_free(&xd->out_hopids, hopid);
  1867. }
  1868. EXPORT_SYMBOL_GPL(tb_xdomain_release_out_hopid);
  1869. /**
  1870. * tb_xdomain_enable_paths() - Enable DMA paths for XDomain connection
  1871. * @xd: XDomain connection
  1872. * @transmit_path: HopID we are using to send out packets
  1873. * @transmit_ring: DMA ring used to send out packets
  1874. * @receive_path: HopID the other end is using to send packets to us
  1875. * @receive_ring: DMA ring used to receive packets from @receive_path
  1876. *
  1877. * The function enables DMA paths accordingly so that after successful
  1878. * return the caller can send and receive packets using high-speed DMA
  1879. * path. If a transmit or receive path is not needed, pass %-1 for those
  1880. * parameters.
  1881. *
  1882. * Return: %0 in case of success and negative errno in case of error
  1883. */
  1884. int tb_xdomain_enable_paths(struct tb_xdomain *xd, int transmit_path,
  1885. int transmit_ring, int receive_path,
  1886. int receive_ring)
  1887. {
  1888. return tb_domain_approve_xdomain_paths(xd->tb, xd, transmit_path,
  1889. transmit_ring, receive_path,
  1890. receive_ring);
  1891. }
  1892. EXPORT_SYMBOL_GPL(tb_xdomain_enable_paths);
  1893. /**
  1894. * tb_xdomain_disable_paths() - Disable DMA paths for XDomain connection
  1895. * @xd: XDomain connection
  1896. * @transmit_path: HopID we are using to send out packets
  1897. * @transmit_ring: DMA ring used to send out packets
  1898. * @receive_path: HopID the other end is using to send packets to us
  1899. * @receive_ring: DMA ring used to receive packets from @receive_path
  1900. *
  1901. * This does the opposite of tb_xdomain_enable_paths(). After call to
  1902. * this the caller is not expected to use the rings anymore. Passing %-1
  1903. * as path/ring parameter means don't care. Normally the callers should
  1904. * pass the same values here as they do when paths are enabled.
  1905. *
  1906. * Return: %0 in case of success and negative errno in case of error
  1907. */
  1908. int tb_xdomain_disable_paths(struct tb_xdomain *xd, int transmit_path,
  1909. int transmit_ring, int receive_path,
  1910. int receive_ring)
  1911. {
  1912. return tb_domain_disconnect_xdomain_paths(xd->tb, xd, transmit_path,
  1913. transmit_ring, receive_path,
  1914. receive_ring);
  1915. }
  1916. EXPORT_SYMBOL_GPL(tb_xdomain_disable_paths);
  1917. struct tb_xdomain_lookup {
  1918. const uuid_t *uuid;
  1919. u8 link;
  1920. u8 depth;
  1921. u64 route;
  1922. };
  1923. static struct tb_xdomain *switch_find_xdomain(struct tb_switch *sw,
  1924. const struct tb_xdomain_lookup *lookup)
  1925. {
  1926. struct tb_port *port;
  1927. tb_switch_for_each_port(sw, port) {
  1928. struct tb_xdomain *xd;
  1929. if (port->xdomain) {
  1930. xd = port->xdomain;
  1931. if (lookup->uuid) {
  1932. if (xd->remote_uuid &&
  1933. uuid_equal(xd->remote_uuid, lookup->uuid))
  1934. return xd;
  1935. } else {
  1936. if (lookup->link && lookup->link == xd->link &&
  1937. lookup->depth == xd->depth)
  1938. return xd;
  1939. if (lookup->route && lookup->route == xd->route)
  1940. return xd;
  1941. }
  1942. } else if (tb_port_has_remote(port)) {
  1943. xd = switch_find_xdomain(port->remote->sw, lookup);
  1944. if (xd)
  1945. return xd;
  1946. }
  1947. }
  1948. return NULL;
  1949. }
  1950. /**
  1951. * tb_xdomain_find_by_uuid() - Find an XDomain by UUID
  1952. * @tb: Domain where the XDomain belongs to
  1953. * @uuid: UUID to look for
  1954. *
  1955. * Finds XDomain by walking through the Thunderbolt topology below @tb.
  1956. * The returned XDomain will have its reference count increased so the
  1957. * caller needs to call tb_xdomain_put() when it is done with the
  1958. * object.
  1959. *
  1960. * This will find all XDomains including the ones that are not yet added
  1961. * to the bus (handshake is still in progress).
  1962. *
  1963. * The caller needs to hold @tb->lock.
  1964. */
  1965. struct tb_xdomain *tb_xdomain_find_by_uuid(struct tb *tb, const uuid_t *uuid)
  1966. {
  1967. struct tb_xdomain_lookup lookup;
  1968. struct tb_xdomain *xd;
  1969. memset(&lookup, 0, sizeof(lookup));
  1970. lookup.uuid = uuid;
  1971. xd = switch_find_xdomain(tb->root_switch, &lookup);
  1972. return tb_xdomain_get(xd);
  1973. }
  1974. EXPORT_SYMBOL_GPL(tb_xdomain_find_by_uuid);
  1975. /**
  1976. * tb_xdomain_find_by_link_depth() - Find an XDomain by link and depth
  1977. * @tb: Domain where the XDomain belongs to
  1978. * @link: Root switch link number
  1979. * @depth: Depth in the link
  1980. *
  1981. * Finds XDomain by walking through the Thunderbolt topology below @tb.
  1982. * The returned XDomain will have its reference count increased so the
  1983. * caller needs to call tb_xdomain_put() when it is done with the
  1984. * object.
  1985. *
  1986. * This will find all XDomains including the ones that are not yet added
  1987. * to the bus (handshake is still in progress).
  1988. *
  1989. * The caller needs to hold @tb->lock.
  1990. */
  1991. struct tb_xdomain *tb_xdomain_find_by_link_depth(struct tb *tb, u8 link,
  1992. u8 depth)
  1993. {
  1994. struct tb_xdomain_lookup lookup;
  1995. struct tb_xdomain *xd;
  1996. memset(&lookup, 0, sizeof(lookup));
  1997. lookup.link = link;
  1998. lookup.depth = depth;
  1999. xd = switch_find_xdomain(tb->root_switch, &lookup);
  2000. return tb_xdomain_get(xd);
  2001. }
  2002. /**
  2003. * tb_xdomain_find_by_route() - Find an XDomain by route string
  2004. * @tb: Domain where the XDomain belongs to
  2005. * @route: XDomain route string
  2006. *
  2007. * Finds XDomain by walking through the Thunderbolt topology below @tb.
  2008. * The returned XDomain will have its reference count increased so the
  2009. * caller needs to call tb_xdomain_put() when it is done with the
  2010. * object.
  2011. *
  2012. * This will find all XDomains including the ones that are not yet added
  2013. * to the bus (handshake is still in progress).
  2014. *
  2015. * The caller needs to hold @tb->lock.
  2016. */
  2017. struct tb_xdomain *tb_xdomain_find_by_route(struct tb *tb, u64 route)
  2018. {
  2019. struct tb_xdomain_lookup lookup;
  2020. struct tb_xdomain *xd;
  2021. memset(&lookup, 0, sizeof(lookup));
  2022. lookup.route = route;
  2023. xd = switch_find_xdomain(tb->root_switch, &lookup);
  2024. return tb_xdomain_get(xd);
  2025. }
  2026. EXPORT_SYMBOL_GPL(tb_xdomain_find_by_route);
  2027. bool tb_xdomain_handle_request(struct tb *tb, enum tb_cfg_pkg_type type,
  2028. const void *buf, size_t size)
  2029. {
  2030. const struct tb_protocol_handler *handler, *tmp;
  2031. const struct tb_xdp_header *hdr = buf;
  2032. unsigned int length;
  2033. int ret = 0;
  2034. /* We expect the packet is at least size of the header */
  2035. length = hdr->xd_hdr.length_sn & TB_XDOMAIN_LENGTH_MASK;
  2036. if (length != size / 4 - sizeof(hdr->xd_hdr) / 4)
  2037. return true;
  2038. if (length < sizeof(*hdr) / 4 - sizeof(hdr->xd_hdr) / 4)
  2039. return true;
  2040. /*
  2041. * Handle XDomain discovery protocol packets directly here. For
  2042. * other protocols (based on their UUID) we call registered
  2043. * handlers in turn.
  2044. */
  2045. if (uuid_equal(&hdr->uuid, &tb_xdp_uuid)) {
  2046. if (type == TB_CFG_PKG_XDOMAIN_REQ)
  2047. return tb_xdp_schedule_request(tb, hdr, size);
  2048. return false;
  2049. }
  2050. mutex_lock(&xdomain_lock);
  2051. list_for_each_entry_safe(handler, tmp, &protocol_handlers, list) {
  2052. if (!uuid_equal(&hdr->uuid, handler->uuid))
  2053. continue;
  2054. mutex_unlock(&xdomain_lock);
  2055. ret = handler->callback(buf, size, handler->data);
  2056. mutex_lock(&xdomain_lock);
  2057. if (ret)
  2058. break;
  2059. }
  2060. mutex_unlock(&xdomain_lock);
  2061. return ret > 0;
  2062. }
  2063. static int update_xdomain(struct device *dev, void *data)
  2064. {
  2065. struct tb_xdomain *xd;
  2066. xd = tb_to_xdomain(dev);
  2067. if (xd) {
  2068. queue_delayed_work(xd->tb->wq, &xd->properties_changed_work,
  2069. msecs_to_jiffies(50));
  2070. }
  2071. return 0;
  2072. }
  2073. static void update_all_xdomains(void)
  2074. {
  2075. bus_for_each_dev(&tb_bus_type, NULL, NULL, update_xdomain);
  2076. }
  2077. static bool remove_directory(const char *key, const struct tb_property_dir *dir)
  2078. {
  2079. struct tb_property *p;
  2080. p = tb_property_find(xdomain_property_dir, key,
  2081. TB_PROPERTY_TYPE_DIRECTORY);
  2082. if (p && p->value.dir == dir) {
  2083. tb_property_remove(p);
  2084. return true;
  2085. }
  2086. return false;
  2087. }
  2088. /**
  2089. * tb_register_property_dir() - Register property directory to the host
  2090. * @key: Key (name) of the directory to add
  2091. * @dir: Directory to add
  2092. *
  2093. * Service drivers can use this function to add new property directory
  2094. * to the host available properties. The other connected hosts are
  2095. * notified so they can re-read properties of this host if they are
  2096. * interested.
  2097. *
  2098. * Return: %0 on success and negative errno on failure
  2099. */
  2100. int tb_register_property_dir(const char *key, struct tb_property_dir *dir)
  2101. {
  2102. int ret;
  2103. if (WARN_ON(!xdomain_property_dir))
  2104. return -EAGAIN;
  2105. if (!key || strlen(key) > 8)
  2106. return -EINVAL;
  2107. mutex_lock(&xdomain_lock);
  2108. if (tb_property_find(xdomain_property_dir, key,
  2109. TB_PROPERTY_TYPE_DIRECTORY)) {
  2110. ret = -EEXIST;
  2111. goto err_unlock;
  2112. }
  2113. ret = tb_property_add_dir(xdomain_property_dir, key, dir);
  2114. if (ret)
  2115. goto err_unlock;
  2116. xdomain_property_block_gen++;
  2117. mutex_unlock(&xdomain_lock);
  2118. update_all_xdomains();
  2119. return 0;
  2120. err_unlock:
  2121. mutex_unlock(&xdomain_lock);
  2122. return ret;
  2123. }
  2124. EXPORT_SYMBOL_GPL(tb_register_property_dir);
  2125. /**
  2126. * tb_unregister_property_dir() - Removes property directory from host
  2127. * @key: Key (name) of the directory
  2128. * @dir: Directory to remove
  2129. *
  2130. * This will remove the existing directory from this host and notify the
  2131. * connected hosts about the change.
  2132. */
  2133. void tb_unregister_property_dir(const char *key, struct tb_property_dir *dir)
  2134. {
  2135. int ret = 0;
  2136. mutex_lock(&xdomain_lock);
  2137. if (remove_directory(key, dir))
  2138. xdomain_property_block_gen++;
  2139. mutex_unlock(&xdomain_lock);
  2140. if (!ret)
  2141. update_all_xdomains();
  2142. }
  2143. EXPORT_SYMBOL_GPL(tb_unregister_property_dir);
  2144. int tb_xdomain_init(void)
  2145. {
  2146. xdomain_property_dir = tb_property_create_dir(NULL);
  2147. if (!xdomain_property_dir)
  2148. return -ENOMEM;
  2149. /*
  2150. * Initialize standard set of properties without any service
  2151. * directories. Those will be added by service drivers
  2152. * themselves when they are loaded.
  2153. *
  2154. * Rest of the properties are filled dynamically based on these
  2155. * when the P2P connection is made.
  2156. */
  2157. tb_property_add_immediate(xdomain_property_dir, "vendorid",
  2158. PCI_VENDOR_ID_INTEL);
  2159. tb_property_add_text(xdomain_property_dir, "vendorid", "Intel Corp.");
  2160. tb_property_add_immediate(xdomain_property_dir, "deviceid", 0x1);
  2161. tb_property_add_immediate(xdomain_property_dir, "devicerv", 0x80000100);
  2162. xdomain_property_block_gen = get_random_u32();
  2163. return 0;
  2164. }
  2165. void tb_xdomain_exit(void)
  2166. {
  2167. tb_property_free_dir(xdomain_property_dir);
  2168. }