tb.c 84 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Thunderbolt driver - bus logic (NHI independent)
  4. *
  5. * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
  6. * Copyright (C) 2019, Intel Corporation
  7. */
  8. #include <linux/slab.h>
  9. #include <linux/errno.h>
  10. #include <linux/delay.h>
  11. #include <linux/pm_runtime.h>
  12. #include <linux/platform_data/x86/apple.h>
  13. #include "tb.h"
  14. #include "tb_regs.h"
  15. #include "tunnel.h"
  16. #define TB_TIMEOUT 100 /* ms */
  17. #define TB_RELEASE_BW_TIMEOUT 10000 /* ms */
  18. /*
  19. * Minimum bandwidth (in Mb/s) that is needed in the single transmitter/receiver
  20. * direction. This is 40G - 10% guard band bandwidth.
  21. */
  22. #define TB_ASYM_MIN (40000 * 90 / 100)
  23. /*
  24. * Threshold bandwidth (in Mb/s) that is used to switch the links to
  25. * asymmetric and back. This is selected as 45G which means when the
  26. * request is higher than this, we switch the link to asymmetric, and
  27. * when it is less than this we switch it back. The 45G is selected so
  28. * that we still have 27G (of the total 72G) for bulk PCIe traffic when
  29. * switching back to symmetric.
  30. */
  31. #define TB_ASYM_THRESHOLD 45000
  32. #define MAX_GROUPS 7 /* max Group_ID is 7 */
  33. static unsigned int asym_threshold = TB_ASYM_THRESHOLD;
  34. module_param_named(asym_threshold, asym_threshold, uint, 0444);
  35. MODULE_PARM_DESC(asym_threshold,
  36. "threshold (Mb/s) when to Gen 4 switch link symmetry. 0 disables. (default: "
  37. __MODULE_STRING(TB_ASYM_THRESHOLD) ")");
  38. /**
  39. * struct tb_cm - Simple Thunderbolt connection manager
  40. * @tunnel_list: List of active tunnels
  41. * @dp_resources: List of available DP resources for DP tunneling
  42. * @hotplug_active: tb_handle_hotplug will stop progressing plug
  43. * events and exit if this is not set (it needs to
  44. * acquire the lock one more time). Used to drain wq
  45. * after cfg has been paused.
  46. * @remove_work: Work used to remove any unplugged routers after
  47. * runtime resume
  48. * @groups: Bandwidth groups used in this domain.
  49. */
  50. struct tb_cm {
  51. struct list_head tunnel_list;
  52. struct list_head dp_resources;
  53. bool hotplug_active;
  54. struct delayed_work remove_work;
  55. struct tb_bandwidth_group groups[MAX_GROUPS];
  56. };
  57. static inline struct tb *tcm_to_tb(struct tb_cm *tcm)
  58. {
  59. return ((void *)tcm - sizeof(struct tb));
  60. }
  61. struct tb_hotplug_event {
  62. struct work_struct work;
  63. struct tb *tb;
  64. u64 route;
  65. u8 port;
  66. bool unplug;
  67. };
  68. static void tb_handle_hotplug(struct work_struct *work);
  69. static void tb_queue_hotplug(struct tb *tb, u64 route, u8 port, bool unplug)
  70. {
  71. struct tb_hotplug_event *ev;
  72. ev = kmalloc(sizeof(*ev), GFP_KERNEL);
  73. if (!ev)
  74. return;
  75. ev->tb = tb;
  76. ev->route = route;
  77. ev->port = port;
  78. ev->unplug = unplug;
  79. INIT_WORK(&ev->work, tb_handle_hotplug);
  80. queue_work(tb->wq, &ev->work);
  81. }
  82. /* enumeration & hot plug handling */
  83. static void tb_add_dp_resources(struct tb_switch *sw)
  84. {
  85. struct tb_cm *tcm = tb_priv(sw->tb);
  86. struct tb_port *port;
  87. tb_switch_for_each_port(sw, port) {
  88. if (!tb_port_is_dpin(port))
  89. continue;
  90. if (!tb_switch_query_dp_resource(sw, port))
  91. continue;
  92. /*
  93. * If DP IN on device router exist, position it at the
  94. * beginning of the DP resources list, so that it is used
  95. * before DP IN of the host router. This way external GPU(s)
  96. * will be prioritized when pairing DP IN to a DP OUT.
  97. */
  98. if (tb_route(sw))
  99. list_add(&port->list, &tcm->dp_resources);
  100. else
  101. list_add_tail(&port->list, &tcm->dp_resources);
  102. tb_port_dbg(port, "DP IN resource available\n");
  103. }
  104. }
  105. static void tb_remove_dp_resources(struct tb_switch *sw)
  106. {
  107. struct tb_cm *tcm = tb_priv(sw->tb);
  108. struct tb_port *port, *tmp;
  109. /* Clear children resources first */
  110. tb_switch_for_each_port(sw, port) {
  111. if (tb_port_has_remote(port))
  112. tb_remove_dp_resources(port->remote->sw);
  113. }
  114. list_for_each_entry_safe(port, tmp, &tcm->dp_resources, list) {
  115. if (port->sw == sw) {
  116. tb_port_dbg(port, "DP OUT resource unavailable\n");
  117. list_del_init(&port->list);
  118. }
  119. }
  120. }
  121. static void tb_discover_dp_resource(struct tb *tb, struct tb_port *port)
  122. {
  123. struct tb_cm *tcm = tb_priv(tb);
  124. struct tb_port *p;
  125. list_for_each_entry(p, &tcm->dp_resources, list) {
  126. if (p == port)
  127. return;
  128. }
  129. tb_port_dbg(port, "DP %s resource available discovered\n",
  130. tb_port_is_dpin(port) ? "IN" : "OUT");
  131. list_add_tail(&port->list, &tcm->dp_resources);
  132. }
  133. static void tb_discover_dp_resources(struct tb *tb)
  134. {
  135. struct tb_cm *tcm = tb_priv(tb);
  136. struct tb_tunnel *tunnel;
  137. list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
  138. if (tb_tunnel_is_dp(tunnel))
  139. tb_discover_dp_resource(tb, tunnel->dst_port);
  140. }
  141. }
  142. /* Enables CL states up to host router */
  143. static int tb_enable_clx(struct tb_switch *sw)
  144. {
  145. struct tb_cm *tcm = tb_priv(sw->tb);
  146. unsigned int clx = TB_CL0S | TB_CL1;
  147. const struct tb_tunnel *tunnel;
  148. int ret;
  149. /*
  150. * Currently only enable CLx for the first link. This is enough
  151. * to allow the CPU to save energy at least on Intel hardware
  152. * and makes it slightly simpler to implement. We may change
  153. * this in the future to cover the whole topology if it turns
  154. * out to be beneficial.
  155. */
  156. while (sw && tb_switch_depth(sw) > 1)
  157. sw = tb_switch_parent(sw);
  158. if (!sw)
  159. return 0;
  160. if (tb_switch_depth(sw) != 1)
  161. return 0;
  162. /*
  163. * If we are re-enabling then check if there is an active DMA
  164. * tunnel and in that case bail out.
  165. */
  166. list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
  167. if (tb_tunnel_is_dma(tunnel)) {
  168. if (tb_tunnel_port_on_path(tunnel, tb_upstream_port(sw)))
  169. return 0;
  170. }
  171. }
  172. /*
  173. * Initially try with CL2. If that's not supported by the
  174. * topology try with CL0s and CL1 and then give up.
  175. */
  176. ret = tb_switch_clx_enable(sw, clx | TB_CL2);
  177. if (ret == -EOPNOTSUPP)
  178. ret = tb_switch_clx_enable(sw, clx);
  179. return ret == -EOPNOTSUPP ? 0 : ret;
  180. }
  181. /**
  182. * tb_disable_clx() - Disable CL states up to host router
  183. * @sw: Router to start
  184. *
  185. * Disables CL states from @sw up to the host router. Returns true if
  186. * any CL state were disabled. This can be used to figure out whether
  187. * the link was setup by us or the boot firmware so we don't
  188. * accidentally enable them if they were not enabled during discovery.
  189. */
  190. static bool tb_disable_clx(struct tb_switch *sw)
  191. {
  192. bool disabled = false;
  193. do {
  194. int ret;
  195. ret = tb_switch_clx_disable(sw);
  196. if (ret > 0)
  197. disabled = true;
  198. else if (ret < 0)
  199. tb_sw_warn(sw, "failed to disable CL states\n");
  200. sw = tb_switch_parent(sw);
  201. } while (sw);
  202. return disabled;
  203. }
  204. static int tb_increase_switch_tmu_accuracy(struct device *dev, void *data)
  205. {
  206. struct tb_switch *sw;
  207. sw = tb_to_switch(dev);
  208. if (!sw)
  209. return 0;
  210. if (tb_switch_tmu_is_configured(sw, TB_SWITCH_TMU_MODE_LOWRES)) {
  211. enum tb_switch_tmu_mode mode;
  212. int ret;
  213. if (tb_switch_clx_is_enabled(sw, TB_CL1))
  214. mode = TB_SWITCH_TMU_MODE_HIFI_UNI;
  215. else
  216. mode = TB_SWITCH_TMU_MODE_HIFI_BI;
  217. ret = tb_switch_tmu_configure(sw, mode);
  218. if (ret)
  219. return ret;
  220. return tb_switch_tmu_enable(sw);
  221. }
  222. return 0;
  223. }
  224. static void tb_increase_tmu_accuracy(struct tb_tunnel *tunnel)
  225. {
  226. struct tb_switch *sw;
  227. if (!tunnel)
  228. return;
  229. /*
  230. * Once first DP tunnel is established we change the TMU
  231. * accuracy of first depth child routers (and the host router)
  232. * to the highest. This is needed for the DP tunneling to work
  233. * but also allows CL0s.
  234. *
  235. * If both routers are v2 then we don't need to do anything as
  236. * they are using enhanced TMU mode that allows all CLx.
  237. */
  238. sw = tunnel->tb->root_switch;
  239. device_for_each_child(&sw->dev, NULL, tb_increase_switch_tmu_accuracy);
  240. }
  241. static int tb_switch_tmu_hifi_uni_required(struct device *dev, void *not_used)
  242. {
  243. struct tb_switch *sw = tb_to_switch(dev);
  244. if (sw && tb_switch_tmu_is_enabled(sw) &&
  245. tb_switch_tmu_is_configured(sw, TB_SWITCH_TMU_MODE_HIFI_UNI))
  246. return 1;
  247. return device_for_each_child(dev, NULL,
  248. tb_switch_tmu_hifi_uni_required);
  249. }
  250. static bool tb_tmu_hifi_uni_required(struct tb *tb)
  251. {
  252. return device_for_each_child(&tb->dev, NULL,
  253. tb_switch_tmu_hifi_uni_required) == 1;
  254. }
  255. static int tb_enable_tmu(struct tb_switch *sw)
  256. {
  257. int ret;
  258. /*
  259. * If both routers at the end of the link are v2 we simply
  260. * enable the enhanched uni-directional mode. That covers all
  261. * the CL states. For v1 and before we need to use the normal
  262. * rate to allow CL1 (when supported). Otherwise we keep the TMU
  263. * running at the highest accuracy.
  264. */
  265. ret = tb_switch_tmu_configure(sw,
  266. TB_SWITCH_TMU_MODE_MEDRES_ENHANCED_UNI);
  267. if (ret == -EOPNOTSUPP) {
  268. if (tb_switch_clx_is_enabled(sw, TB_CL1)) {
  269. /*
  270. * Figure out uni-directional HiFi TMU requirements
  271. * currently in the domain. If there are no
  272. * uni-directional HiFi requirements we can put the TMU
  273. * into LowRes mode.
  274. *
  275. * Deliberately skip bi-directional HiFi links
  276. * as these work independently of other links
  277. * (and they do not allow any CL states anyway).
  278. */
  279. if (tb_tmu_hifi_uni_required(sw->tb))
  280. ret = tb_switch_tmu_configure(sw,
  281. TB_SWITCH_TMU_MODE_HIFI_UNI);
  282. else
  283. ret = tb_switch_tmu_configure(sw,
  284. TB_SWITCH_TMU_MODE_LOWRES);
  285. } else {
  286. ret = tb_switch_tmu_configure(sw, TB_SWITCH_TMU_MODE_HIFI_BI);
  287. }
  288. /* If not supported, fallback to bi-directional HiFi */
  289. if (ret == -EOPNOTSUPP)
  290. ret = tb_switch_tmu_configure(sw, TB_SWITCH_TMU_MODE_HIFI_BI);
  291. }
  292. if (ret)
  293. return ret;
  294. /* If it is already enabled in correct mode, don't touch it */
  295. if (tb_switch_tmu_is_enabled(sw))
  296. return 0;
  297. ret = tb_switch_tmu_disable(sw);
  298. if (ret)
  299. return ret;
  300. ret = tb_switch_tmu_post_time(sw);
  301. if (ret)
  302. return ret;
  303. return tb_switch_tmu_enable(sw);
  304. }
  305. static void tb_switch_discover_tunnels(struct tb_switch *sw,
  306. struct list_head *list,
  307. bool alloc_hopids)
  308. {
  309. struct tb *tb = sw->tb;
  310. struct tb_port *port;
  311. tb_switch_for_each_port(sw, port) {
  312. struct tb_tunnel *tunnel = NULL;
  313. switch (port->config.type) {
  314. case TB_TYPE_DP_HDMI_IN:
  315. tunnel = tb_tunnel_discover_dp(tb, port, alloc_hopids);
  316. tb_increase_tmu_accuracy(tunnel);
  317. break;
  318. case TB_TYPE_PCIE_DOWN:
  319. tunnel = tb_tunnel_discover_pci(tb, port, alloc_hopids);
  320. break;
  321. case TB_TYPE_USB3_DOWN:
  322. tunnel = tb_tunnel_discover_usb3(tb, port, alloc_hopids);
  323. break;
  324. default:
  325. break;
  326. }
  327. if (tunnel)
  328. list_add_tail(&tunnel->list, list);
  329. }
  330. tb_switch_for_each_port(sw, port) {
  331. if (tb_port_has_remote(port)) {
  332. tb_switch_discover_tunnels(port->remote->sw, list,
  333. alloc_hopids);
  334. }
  335. }
  336. }
  337. static int tb_port_configure_xdomain(struct tb_port *port, struct tb_xdomain *xd)
  338. {
  339. if (tb_switch_is_usb4(port->sw))
  340. return usb4_port_configure_xdomain(port, xd);
  341. return tb_lc_configure_xdomain(port);
  342. }
  343. static void tb_port_unconfigure_xdomain(struct tb_port *port)
  344. {
  345. if (tb_switch_is_usb4(port->sw))
  346. usb4_port_unconfigure_xdomain(port);
  347. else
  348. tb_lc_unconfigure_xdomain(port);
  349. }
  350. static void tb_scan_xdomain(struct tb_port *port)
  351. {
  352. struct tb_switch *sw = port->sw;
  353. struct tb *tb = sw->tb;
  354. struct tb_xdomain *xd;
  355. u64 route;
  356. if (!tb_is_xdomain_enabled())
  357. return;
  358. route = tb_downstream_route(port);
  359. xd = tb_xdomain_find_by_route(tb, route);
  360. if (xd) {
  361. tb_xdomain_put(xd);
  362. return;
  363. }
  364. xd = tb_xdomain_alloc(tb, &sw->dev, route, tb->root_switch->uuid,
  365. NULL);
  366. if (xd) {
  367. tb_port_at(route, sw)->xdomain = xd;
  368. tb_port_configure_xdomain(port, xd);
  369. tb_xdomain_add(xd);
  370. }
  371. }
  372. /**
  373. * tb_find_unused_port() - return the first inactive port on @sw
  374. * @sw: Switch to find the port on
  375. * @type: Port type to look for
  376. */
  377. static struct tb_port *tb_find_unused_port(struct tb_switch *sw,
  378. enum tb_port_type type)
  379. {
  380. struct tb_port *port;
  381. tb_switch_for_each_port(sw, port) {
  382. if (tb_is_upstream_port(port))
  383. continue;
  384. if (port->config.type != type)
  385. continue;
  386. if (!port->cap_adap)
  387. continue;
  388. if (tb_port_is_enabled(port))
  389. continue;
  390. return port;
  391. }
  392. return NULL;
  393. }
  394. static struct tb_port *tb_find_usb3_down(struct tb_switch *sw,
  395. const struct tb_port *port)
  396. {
  397. struct tb_port *down;
  398. down = usb4_switch_map_usb3_down(sw, port);
  399. if (down && !tb_usb3_port_is_enabled(down))
  400. return down;
  401. return NULL;
  402. }
  403. static struct tb_tunnel *tb_find_tunnel(struct tb *tb, enum tb_tunnel_type type,
  404. struct tb_port *src_port,
  405. struct tb_port *dst_port)
  406. {
  407. struct tb_cm *tcm = tb_priv(tb);
  408. struct tb_tunnel *tunnel;
  409. list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
  410. if (tunnel->type == type &&
  411. ((src_port && src_port == tunnel->src_port) ||
  412. (dst_port && dst_port == tunnel->dst_port))) {
  413. return tunnel;
  414. }
  415. }
  416. return NULL;
  417. }
  418. static struct tb_tunnel *tb_find_first_usb3_tunnel(struct tb *tb,
  419. struct tb_port *src_port,
  420. struct tb_port *dst_port)
  421. {
  422. struct tb_port *port, *usb3_down;
  423. struct tb_switch *sw;
  424. /* Pick the router that is deepest in the topology */
  425. if (tb_port_path_direction_downstream(src_port, dst_port))
  426. sw = dst_port->sw;
  427. else
  428. sw = src_port->sw;
  429. /* Can't be the host router */
  430. if (sw == tb->root_switch)
  431. return NULL;
  432. /* Find the downstream USB4 port that leads to this router */
  433. port = tb_port_at(tb_route(sw), tb->root_switch);
  434. /* Find the corresponding host router USB3 downstream port */
  435. usb3_down = usb4_switch_map_usb3_down(tb->root_switch, port);
  436. if (!usb3_down)
  437. return NULL;
  438. return tb_find_tunnel(tb, TB_TUNNEL_USB3, usb3_down, NULL);
  439. }
  440. /**
  441. * tb_consumed_usb3_pcie_bandwidth() - Consumed USB3/PCIe bandwidth over a single link
  442. * @tb: Domain structure
  443. * @src_port: Source protocol adapter
  444. * @dst_port: Destination protocol adapter
  445. * @port: USB4 port the consumed bandwidth is calculated
  446. * @consumed_up: Consumed upsream bandwidth (Mb/s)
  447. * @consumed_down: Consumed downstream bandwidth (Mb/s)
  448. *
  449. * Calculates consumed USB3 and PCIe bandwidth at @port between path
  450. * from @src_port to @dst_port. Does not take USB3 tunnel starting from
  451. * @src_port and ending on @src_port into account because that bandwidth is
  452. * already included in as part of the "first hop" USB3 tunnel.
  453. */
  454. static int tb_consumed_usb3_pcie_bandwidth(struct tb *tb,
  455. struct tb_port *src_port,
  456. struct tb_port *dst_port,
  457. struct tb_port *port,
  458. int *consumed_up,
  459. int *consumed_down)
  460. {
  461. int pci_consumed_up, pci_consumed_down;
  462. struct tb_tunnel *tunnel;
  463. *consumed_up = *consumed_down = 0;
  464. tunnel = tb_find_first_usb3_tunnel(tb, src_port, dst_port);
  465. if (tunnel && !tb_port_is_usb3_down(src_port) &&
  466. !tb_port_is_usb3_up(dst_port)) {
  467. int ret;
  468. ret = tb_tunnel_consumed_bandwidth(tunnel, consumed_up,
  469. consumed_down);
  470. if (ret)
  471. return ret;
  472. }
  473. /*
  474. * If there is anything reserved for PCIe bulk traffic take it
  475. * into account here too.
  476. */
  477. if (tb_tunnel_reserved_pci(port, &pci_consumed_up, &pci_consumed_down)) {
  478. *consumed_up += pci_consumed_up;
  479. *consumed_down += pci_consumed_down;
  480. }
  481. return 0;
  482. }
  483. /**
  484. * tb_consumed_dp_bandwidth() - Consumed DP bandwidth over a single link
  485. * @tb: Domain structure
  486. * @src_port: Source protocol adapter
  487. * @dst_port: Destination protocol adapter
  488. * @port: USB4 port the consumed bandwidth is calculated
  489. * @consumed_up: Consumed upsream bandwidth (Mb/s)
  490. * @consumed_down: Consumed downstream bandwidth (Mb/s)
  491. *
  492. * Calculates consumed DP bandwidth at @port between path from @src_port
  493. * to @dst_port. Does not take tunnel starting from @src_port and ending
  494. * from @src_port into account.
  495. *
  496. * If there is bandwidth reserved for any of the groups between
  497. * @src_port and @dst_port (but not yet used) that is also taken into
  498. * account in the returned consumed bandwidth.
  499. */
  500. static int tb_consumed_dp_bandwidth(struct tb *tb,
  501. struct tb_port *src_port,
  502. struct tb_port *dst_port,
  503. struct tb_port *port,
  504. int *consumed_up,
  505. int *consumed_down)
  506. {
  507. int group_reserved[MAX_GROUPS] = {};
  508. struct tb_cm *tcm = tb_priv(tb);
  509. struct tb_tunnel *tunnel;
  510. bool downstream;
  511. int i, ret;
  512. *consumed_up = *consumed_down = 0;
  513. /*
  514. * Find all DP tunnels that cross the port and reduce
  515. * their consumed bandwidth from the available.
  516. */
  517. list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
  518. const struct tb_bandwidth_group *group;
  519. int dp_consumed_up, dp_consumed_down;
  520. if (tb_tunnel_is_invalid(tunnel))
  521. continue;
  522. if (!tb_tunnel_is_dp(tunnel))
  523. continue;
  524. if (!tb_tunnel_port_on_path(tunnel, port))
  525. continue;
  526. /*
  527. * Calculate what is reserved for groups crossing the
  528. * same ports only once (as that is reserved for all the
  529. * tunnels in the group).
  530. */
  531. group = tunnel->src_port->group;
  532. if (group && group->reserved && !group_reserved[group->index])
  533. group_reserved[group->index] = group->reserved;
  534. /*
  535. * Ignore the DP tunnel between src_port and dst_port
  536. * because it is the same tunnel and we may be
  537. * re-calculating estimated bandwidth.
  538. */
  539. if (tunnel->src_port == src_port &&
  540. tunnel->dst_port == dst_port)
  541. continue;
  542. ret = tb_tunnel_consumed_bandwidth(tunnel, &dp_consumed_up,
  543. &dp_consumed_down);
  544. if (ret)
  545. return ret;
  546. *consumed_up += dp_consumed_up;
  547. *consumed_down += dp_consumed_down;
  548. }
  549. downstream = tb_port_path_direction_downstream(src_port, dst_port);
  550. for (i = 0; i < ARRAY_SIZE(group_reserved); i++) {
  551. if (downstream)
  552. *consumed_down += group_reserved[i];
  553. else
  554. *consumed_up += group_reserved[i];
  555. }
  556. return 0;
  557. }
  558. static bool tb_asym_supported(struct tb_port *src_port, struct tb_port *dst_port,
  559. struct tb_port *port)
  560. {
  561. bool downstream = tb_port_path_direction_downstream(src_port, dst_port);
  562. enum tb_link_width width;
  563. if (tb_is_upstream_port(port))
  564. width = downstream ? TB_LINK_WIDTH_ASYM_RX : TB_LINK_WIDTH_ASYM_TX;
  565. else
  566. width = downstream ? TB_LINK_WIDTH_ASYM_TX : TB_LINK_WIDTH_ASYM_RX;
  567. return tb_port_width_supported(port, width);
  568. }
  569. /**
  570. * tb_maximum_bandwidth() - Maximum bandwidth over a single link
  571. * @tb: Domain structure
  572. * @src_port: Source protocol adapter
  573. * @dst_port: Destination protocol adapter
  574. * @port: USB4 port the total bandwidth is calculated
  575. * @max_up: Maximum upstream bandwidth (Mb/s)
  576. * @max_down: Maximum downstream bandwidth (Mb/s)
  577. * @include_asym: Include bandwidth if the link is switched from
  578. * symmetric to asymmetric
  579. *
  580. * Returns maximum possible bandwidth in @max_up and @max_down over a
  581. * single link at @port. If @include_asym is set then includes the
  582. * additional banwdith if the links are transitioned into asymmetric to
  583. * direction from @src_port to @dst_port.
  584. */
  585. static int tb_maximum_bandwidth(struct tb *tb, struct tb_port *src_port,
  586. struct tb_port *dst_port, struct tb_port *port,
  587. int *max_up, int *max_down, bool include_asym)
  588. {
  589. bool downstream = tb_port_path_direction_downstream(src_port, dst_port);
  590. int link_speed, link_width, up_bw, down_bw;
  591. /*
  592. * Can include asymmetric, only if it is actually supported by
  593. * the lane adapter.
  594. */
  595. if (!tb_asym_supported(src_port, dst_port, port))
  596. include_asym = false;
  597. if (tb_is_upstream_port(port)) {
  598. link_speed = port->sw->link_speed;
  599. /*
  600. * sw->link_width is from upstream perspective so we use
  601. * the opposite for downstream of the host router.
  602. */
  603. if (port->sw->link_width == TB_LINK_WIDTH_ASYM_TX) {
  604. up_bw = link_speed * 3 * 1000;
  605. down_bw = link_speed * 1 * 1000;
  606. } else if (port->sw->link_width == TB_LINK_WIDTH_ASYM_RX) {
  607. up_bw = link_speed * 1 * 1000;
  608. down_bw = link_speed * 3 * 1000;
  609. } else if (include_asym) {
  610. /*
  611. * The link is symmetric at the moment but we
  612. * can switch it to asymmetric as needed. Report
  613. * this bandwidth as available (even though it
  614. * is not yet enabled).
  615. */
  616. if (downstream) {
  617. up_bw = link_speed * 1 * 1000;
  618. down_bw = link_speed * 3 * 1000;
  619. } else {
  620. up_bw = link_speed * 3 * 1000;
  621. down_bw = link_speed * 1 * 1000;
  622. }
  623. } else {
  624. up_bw = link_speed * port->sw->link_width * 1000;
  625. down_bw = up_bw;
  626. }
  627. } else {
  628. link_speed = tb_port_get_link_speed(port);
  629. if (link_speed < 0)
  630. return link_speed;
  631. link_width = tb_port_get_link_width(port);
  632. if (link_width < 0)
  633. return link_width;
  634. if (link_width == TB_LINK_WIDTH_ASYM_TX) {
  635. up_bw = link_speed * 1 * 1000;
  636. down_bw = link_speed * 3 * 1000;
  637. } else if (link_width == TB_LINK_WIDTH_ASYM_RX) {
  638. up_bw = link_speed * 3 * 1000;
  639. down_bw = link_speed * 1 * 1000;
  640. } else if (include_asym) {
  641. /*
  642. * The link is symmetric at the moment but we
  643. * can switch it to asymmetric as needed. Report
  644. * this bandwidth as available (even though it
  645. * is not yet enabled).
  646. */
  647. if (downstream) {
  648. up_bw = link_speed * 1 * 1000;
  649. down_bw = link_speed * 3 * 1000;
  650. } else {
  651. up_bw = link_speed * 3 * 1000;
  652. down_bw = link_speed * 1 * 1000;
  653. }
  654. } else {
  655. up_bw = link_speed * link_width * 1000;
  656. down_bw = up_bw;
  657. }
  658. }
  659. /* Leave 10% guard band */
  660. *max_up = up_bw - up_bw / 10;
  661. *max_down = down_bw - down_bw / 10;
  662. tb_port_dbg(port, "link maximum bandwidth %d/%d Mb/s\n", *max_up, *max_down);
  663. return 0;
  664. }
  665. /**
  666. * tb_available_bandwidth() - Available bandwidth for tunneling
  667. * @tb: Domain structure
  668. * @src_port: Source protocol adapter
  669. * @dst_port: Destination protocol adapter
  670. * @available_up: Available bandwidth upstream (Mb/s)
  671. * @available_down: Available bandwidth downstream (Mb/s)
  672. * @include_asym: Include bandwidth if the link is switched from
  673. * symmetric to asymmetric
  674. *
  675. * Calculates maximum available bandwidth for protocol tunneling between
  676. * @src_port and @dst_port at the moment. This is minimum of maximum
  677. * link bandwidth across all links reduced by currently consumed
  678. * bandwidth on that link.
  679. *
  680. * If @include_asym is true then includes also bandwidth that can be
  681. * added when the links are transitioned into asymmetric (but does not
  682. * transition the links).
  683. */
  684. static int tb_available_bandwidth(struct tb *tb, struct tb_port *src_port,
  685. struct tb_port *dst_port, int *available_up,
  686. int *available_down, bool include_asym)
  687. {
  688. struct tb_port *port;
  689. int ret;
  690. /* Maximum possible bandwidth asymmetric Gen 4 link is 120 Gb/s */
  691. *available_up = *available_down = 120000;
  692. /* Find the minimum available bandwidth over all links */
  693. tb_for_each_port_on_path(src_port, dst_port, port) {
  694. int max_up, max_down, consumed_up, consumed_down;
  695. if (!tb_port_is_null(port))
  696. continue;
  697. ret = tb_maximum_bandwidth(tb, src_port, dst_port, port,
  698. &max_up, &max_down, include_asym);
  699. if (ret)
  700. return ret;
  701. ret = tb_consumed_usb3_pcie_bandwidth(tb, src_port, dst_port,
  702. port, &consumed_up,
  703. &consumed_down);
  704. if (ret)
  705. return ret;
  706. max_up -= consumed_up;
  707. max_down -= consumed_down;
  708. ret = tb_consumed_dp_bandwidth(tb, src_port, dst_port, port,
  709. &consumed_up, &consumed_down);
  710. if (ret)
  711. return ret;
  712. max_up -= consumed_up;
  713. max_down -= consumed_down;
  714. if (max_up < *available_up)
  715. *available_up = max_up;
  716. if (max_down < *available_down)
  717. *available_down = max_down;
  718. }
  719. if (*available_up < 0)
  720. *available_up = 0;
  721. if (*available_down < 0)
  722. *available_down = 0;
  723. return 0;
  724. }
  725. static int tb_release_unused_usb3_bandwidth(struct tb *tb,
  726. struct tb_port *src_port,
  727. struct tb_port *dst_port)
  728. {
  729. struct tb_tunnel *tunnel;
  730. tunnel = tb_find_first_usb3_tunnel(tb, src_port, dst_port);
  731. return tunnel ? tb_tunnel_release_unused_bandwidth(tunnel) : 0;
  732. }
  733. static void tb_reclaim_usb3_bandwidth(struct tb *tb, struct tb_port *src_port,
  734. struct tb_port *dst_port)
  735. {
  736. int ret, available_up, available_down;
  737. struct tb_tunnel *tunnel;
  738. tunnel = tb_find_first_usb3_tunnel(tb, src_port, dst_port);
  739. if (!tunnel)
  740. return;
  741. tb_tunnel_dbg(tunnel, "reclaiming unused bandwidth\n");
  742. /*
  743. * Calculate available bandwidth for the first hop USB3 tunnel.
  744. * That determines the whole USB3 bandwidth for this branch.
  745. */
  746. ret = tb_available_bandwidth(tb, tunnel->src_port, tunnel->dst_port,
  747. &available_up, &available_down, false);
  748. if (ret) {
  749. tb_tunnel_warn(tunnel, "failed to calculate available bandwidth\n");
  750. return;
  751. }
  752. tb_tunnel_dbg(tunnel, "available bandwidth %d/%d Mb/s\n", available_up,
  753. available_down);
  754. tb_tunnel_reclaim_available_bandwidth(tunnel, &available_up, &available_down);
  755. }
  756. static int tb_tunnel_usb3(struct tb *tb, struct tb_switch *sw)
  757. {
  758. struct tb_switch *parent = tb_switch_parent(sw);
  759. int ret, available_up, available_down;
  760. struct tb_port *up, *down, *port;
  761. struct tb_cm *tcm = tb_priv(tb);
  762. struct tb_tunnel *tunnel;
  763. if (!tb_acpi_may_tunnel_usb3()) {
  764. tb_dbg(tb, "USB3 tunneling disabled, not creating tunnel\n");
  765. return 0;
  766. }
  767. up = tb_switch_find_port(sw, TB_TYPE_USB3_UP);
  768. if (!up)
  769. return 0;
  770. if (!sw->link_usb4)
  771. return 0;
  772. /*
  773. * Look up available down port. Since we are chaining it should
  774. * be found right above this switch.
  775. */
  776. port = tb_switch_downstream_port(sw);
  777. down = tb_find_usb3_down(parent, port);
  778. if (!down)
  779. return 0;
  780. if (tb_route(parent)) {
  781. struct tb_port *parent_up;
  782. /*
  783. * Check first that the parent switch has its upstream USB3
  784. * port enabled. Otherwise the chain is not complete and
  785. * there is no point setting up a new tunnel.
  786. */
  787. parent_up = tb_switch_find_port(parent, TB_TYPE_USB3_UP);
  788. if (!parent_up || !tb_port_is_enabled(parent_up))
  789. return 0;
  790. /* Make all unused bandwidth available for the new tunnel */
  791. ret = tb_release_unused_usb3_bandwidth(tb, down, up);
  792. if (ret)
  793. return ret;
  794. }
  795. ret = tb_available_bandwidth(tb, down, up, &available_up, &available_down,
  796. false);
  797. if (ret)
  798. goto err_reclaim;
  799. tb_port_dbg(up, "available bandwidth for new USB3 tunnel %d/%d Mb/s\n",
  800. available_up, available_down);
  801. tunnel = tb_tunnel_alloc_usb3(tb, up, down, available_up,
  802. available_down);
  803. if (!tunnel) {
  804. ret = -ENOMEM;
  805. goto err_reclaim;
  806. }
  807. if (tb_tunnel_activate(tunnel)) {
  808. tb_port_info(up,
  809. "USB3 tunnel activation failed, aborting\n");
  810. ret = -EIO;
  811. goto err_free;
  812. }
  813. list_add_tail(&tunnel->list, &tcm->tunnel_list);
  814. if (tb_route(parent))
  815. tb_reclaim_usb3_bandwidth(tb, down, up);
  816. return 0;
  817. err_free:
  818. tb_tunnel_free(tunnel);
  819. err_reclaim:
  820. if (tb_route(parent))
  821. tb_reclaim_usb3_bandwidth(tb, down, up);
  822. return ret;
  823. }
  824. static int tb_create_usb3_tunnels(struct tb_switch *sw)
  825. {
  826. struct tb_port *port;
  827. int ret;
  828. if (!tb_acpi_may_tunnel_usb3())
  829. return 0;
  830. if (tb_route(sw)) {
  831. ret = tb_tunnel_usb3(sw->tb, sw);
  832. if (ret)
  833. return ret;
  834. }
  835. tb_switch_for_each_port(sw, port) {
  836. if (!tb_port_has_remote(port))
  837. continue;
  838. ret = tb_create_usb3_tunnels(port->remote->sw);
  839. if (ret)
  840. return ret;
  841. }
  842. return 0;
  843. }
  844. /**
  845. * tb_configure_asym() - Transition links to asymmetric if needed
  846. * @tb: Domain structure
  847. * @src_port: Source adapter to start the transition
  848. * @dst_port: Destination adapter
  849. * @requested_up: Additional bandwidth (Mb/s) required upstream
  850. * @requested_down: Additional bandwidth (Mb/s) required downstream
  851. *
  852. * Transition links between @src_port and @dst_port into asymmetric, with
  853. * three lanes in the direction from @src_port towards @dst_port and one lane
  854. * in the opposite direction, if the bandwidth requirements
  855. * (requested + currently consumed) on that link exceed @asym_threshold.
  856. *
  857. * Must be called with available >= requested over all links.
  858. */
  859. static int tb_configure_asym(struct tb *tb, struct tb_port *src_port,
  860. struct tb_port *dst_port, int requested_up,
  861. int requested_down)
  862. {
  863. bool clx = false, clx_disabled = false, downstream;
  864. struct tb_switch *sw;
  865. struct tb_port *up;
  866. int ret = 0;
  867. if (!asym_threshold)
  868. return 0;
  869. downstream = tb_port_path_direction_downstream(src_port, dst_port);
  870. /* Pick up router deepest in the hierarchy */
  871. if (downstream)
  872. sw = dst_port->sw;
  873. else
  874. sw = src_port->sw;
  875. tb_for_each_upstream_port_on_path(src_port, dst_port, up) {
  876. struct tb_port *down = tb_switch_downstream_port(up->sw);
  877. enum tb_link_width width_up, width_down;
  878. int consumed_up, consumed_down;
  879. ret = tb_consumed_dp_bandwidth(tb, src_port, dst_port, up,
  880. &consumed_up, &consumed_down);
  881. if (ret)
  882. break;
  883. if (downstream) {
  884. /*
  885. * Downstream so make sure upstream is within the 36G
  886. * (40G - guard band 10%), and the requested is above
  887. * what the threshold is.
  888. */
  889. if (consumed_up + requested_up >= TB_ASYM_MIN) {
  890. ret = -ENOBUFS;
  891. break;
  892. }
  893. /* Does consumed + requested exceed the threshold */
  894. if (consumed_down + requested_down < asym_threshold)
  895. continue;
  896. width_up = TB_LINK_WIDTH_ASYM_RX;
  897. width_down = TB_LINK_WIDTH_ASYM_TX;
  898. } else {
  899. /* Upstream, the opposite of above */
  900. if (consumed_down + requested_down >= TB_ASYM_MIN) {
  901. ret = -ENOBUFS;
  902. break;
  903. }
  904. if (consumed_up + requested_up < asym_threshold)
  905. continue;
  906. width_up = TB_LINK_WIDTH_ASYM_TX;
  907. width_down = TB_LINK_WIDTH_ASYM_RX;
  908. }
  909. if (up->sw->link_width == width_up)
  910. continue;
  911. if (!tb_port_width_supported(up, width_up) ||
  912. !tb_port_width_supported(down, width_down))
  913. continue;
  914. /*
  915. * Disable CL states before doing any transitions. We
  916. * delayed it until now that we know there is a real
  917. * transition taking place.
  918. */
  919. if (!clx_disabled) {
  920. clx = tb_disable_clx(sw);
  921. clx_disabled = true;
  922. }
  923. tb_sw_dbg(up->sw, "configuring asymmetric link\n");
  924. /*
  925. * Here requested + consumed > threshold so we need to
  926. * transtion the link into asymmetric now.
  927. */
  928. ret = tb_switch_set_link_width(up->sw, width_up);
  929. if (ret) {
  930. tb_sw_warn(up->sw, "failed to set link width\n");
  931. break;
  932. }
  933. }
  934. /* Re-enable CL states if they were previosly enabled */
  935. if (clx)
  936. tb_enable_clx(sw);
  937. return ret;
  938. }
  939. /**
  940. * tb_configure_sym() - Transition links to symmetric if possible
  941. * @tb: Domain structure
  942. * @src_port: Source adapter to start the transition
  943. * @dst_port: Destination adapter
  944. * @keep_asym: Keep asymmetric link if preferred
  945. *
  946. * Goes over each link from @src_port to @dst_port and tries to
  947. * transition the link to symmetric if the currently consumed bandwidth
  948. * allows and link asymmetric preference is ignored (if @keep_asym is %false).
  949. */
  950. static int tb_configure_sym(struct tb *tb, struct tb_port *src_port,
  951. struct tb_port *dst_port, bool keep_asym)
  952. {
  953. bool clx = false, clx_disabled = false, downstream;
  954. struct tb_switch *sw;
  955. struct tb_port *up;
  956. int ret = 0;
  957. if (!asym_threshold)
  958. return 0;
  959. downstream = tb_port_path_direction_downstream(src_port, dst_port);
  960. /* Pick up router deepest in the hierarchy */
  961. if (downstream)
  962. sw = dst_port->sw;
  963. else
  964. sw = src_port->sw;
  965. tb_for_each_upstream_port_on_path(src_port, dst_port, up) {
  966. int consumed_up, consumed_down;
  967. /* Already symmetric */
  968. if (up->sw->link_width <= TB_LINK_WIDTH_DUAL)
  969. continue;
  970. /* Unplugged, no need to switch */
  971. if (up->sw->is_unplugged)
  972. continue;
  973. ret = tb_consumed_dp_bandwidth(tb, src_port, dst_port, up,
  974. &consumed_up, &consumed_down);
  975. if (ret)
  976. break;
  977. if (downstream) {
  978. /*
  979. * Downstream so we want the consumed_down < threshold.
  980. * Upstream traffic should be less than 36G (40G
  981. * guard band 10%) as the link was configured asymmetric
  982. * already.
  983. */
  984. if (consumed_down >= asym_threshold)
  985. continue;
  986. } else {
  987. if (consumed_up >= asym_threshold)
  988. continue;
  989. }
  990. if (up->sw->link_width == TB_LINK_WIDTH_DUAL)
  991. continue;
  992. /*
  993. * Here consumed < threshold so we can transition the
  994. * link to symmetric.
  995. *
  996. * However, if the router prefers asymmetric link we
  997. * honor that (unless @keep_asym is %false).
  998. */
  999. if (keep_asym &&
  1000. up->sw->preferred_link_width > TB_LINK_WIDTH_DUAL) {
  1001. tb_sw_dbg(up->sw, "keeping preferred asymmetric link\n");
  1002. continue;
  1003. }
  1004. /* Disable CL states before doing any transitions */
  1005. if (!clx_disabled) {
  1006. clx = tb_disable_clx(sw);
  1007. clx_disabled = true;
  1008. }
  1009. tb_sw_dbg(up->sw, "configuring symmetric link\n");
  1010. ret = tb_switch_set_link_width(up->sw, TB_LINK_WIDTH_DUAL);
  1011. if (ret) {
  1012. tb_sw_warn(up->sw, "failed to set link width\n");
  1013. break;
  1014. }
  1015. }
  1016. /* Re-enable CL states if they were previosly enabled */
  1017. if (clx)
  1018. tb_enable_clx(sw);
  1019. return ret;
  1020. }
  1021. static void tb_configure_link(struct tb_port *down, struct tb_port *up,
  1022. struct tb_switch *sw)
  1023. {
  1024. struct tb *tb = sw->tb;
  1025. /* Link the routers using both links if available */
  1026. down->remote = up;
  1027. up->remote = down;
  1028. if (down->dual_link_port && up->dual_link_port) {
  1029. down->dual_link_port->remote = up->dual_link_port;
  1030. up->dual_link_port->remote = down->dual_link_port;
  1031. }
  1032. /*
  1033. * Enable lane bonding if the link is currently two single lane
  1034. * links.
  1035. */
  1036. if (sw->link_width < TB_LINK_WIDTH_DUAL)
  1037. tb_switch_set_link_width(sw, TB_LINK_WIDTH_DUAL);
  1038. /*
  1039. * Device router that comes up as symmetric link is
  1040. * connected deeper in the hierarchy, we transition the links
  1041. * above into symmetric if bandwidth allows.
  1042. */
  1043. if (tb_switch_depth(sw) > 1 &&
  1044. tb_port_get_link_generation(up) >= 4 &&
  1045. up->sw->link_width == TB_LINK_WIDTH_DUAL) {
  1046. struct tb_port *host_port;
  1047. host_port = tb_port_at(tb_route(sw), tb->root_switch);
  1048. tb_configure_sym(tb, host_port, up, false);
  1049. }
  1050. /* Set the link configured */
  1051. tb_switch_configure_link(sw);
  1052. }
  1053. static void tb_scan_port(struct tb_port *port);
  1054. /*
  1055. * tb_scan_switch() - scan for and initialize downstream switches
  1056. */
  1057. static void tb_scan_switch(struct tb_switch *sw)
  1058. {
  1059. struct tb_port *port;
  1060. pm_runtime_get_sync(&sw->dev);
  1061. tb_switch_for_each_port(sw, port)
  1062. tb_scan_port(port);
  1063. pm_runtime_mark_last_busy(&sw->dev);
  1064. pm_runtime_put_autosuspend(&sw->dev);
  1065. }
  1066. /*
  1067. * tb_scan_port() - check for and initialize switches below port
  1068. */
  1069. static void tb_scan_port(struct tb_port *port)
  1070. {
  1071. struct tb_cm *tcm = tb_priv(port->sw->tb);
  1072. struct tb_port *upstream_port;
  1073. bool discovery = false;
  1074. struct tb_switch *sw;
  1075. if (tb_is_upstream_port(port))
  1076. return;
  1077. if (tb_port_is_dpout(port) && tb_dp_port_hpd_is_active(port) == 1 &&
  1078. !tb_dp_port_is_enabled(port)) {
  1079. tb_port_dbg(port, "DP adapter HPD set, queuing hotplug\n");
  1080. tb_queue_hotplug(port->sw->tb, tb_route(port->sw), port->port,
  1081. false);
  1082. return;
  1083. }
  1084. if (port->config.type != TB_TYPE_PORT)
  1085. return;
  1086. if (port->dual_link_port && port->link_nr)
  1087. return; /*
  1088. * Downstream switch is reachable through two ports.
  1089. * Only scan on the primary port (link_nr == 0).
  1090. */
  1091. if (port->usb4)
  1092. pm_runtime_get_sync(&port->usb4->dev);
  1093. if (tb_wait_for_port(port, false) <= 0)
  1094. goto out_rpm_put;
  1095. if (port->remote) {
  1096. tb_port_dbg(port, "port already has a remote\n");
  1097. goto out_rpm_put;
  1098. }
  1099. tb_retimer_scan(port, true);
  1100. sw = tb_switch_alloc(port->sw->tb, &port->sw->dev,
  1101. tb_downstream_route(port));
  1102. if (IS_ERR(sw)) {
  1103. /*
  1104. * If there is an error accessing the connected switch
  1105. * it may be connected to another domain. Also we allow
  1106. * the other domain to be connected to a max depth switch.
  1107. */
  1108. if (PTR_ERR(sw) == -EIO || PTR_ERR(sw) == -EADDRNOTAVAIL)
  1109. tb_scan_xdomain(port);
  1110. goto out_rpm_put;
  1111. }
  1112. if (tb_switch_configure(sw)) {
  1113. tb_switch_put(sw);
  1114. goto out_rpm_put;
  1115. }
  1116. /*
  1117. * If there was previously another domain connected remove it
  1118. * first.
  1119. */
  1120. if (port->xdomain) {
  1121. tb_xdomain_remove(port->xdomain);
  1122. tb_port_unconfigure_xdomain(port);
  1123. port->xdomain = NULL;
  1124. }
  1125. /*
  1126. * Do not send uevents until we have discovered all existing
  1127. * tunnels and know which switches were authorized already by
  1128. * the boot firmware.
  1129. */
  1130. if (!tcm->hotplug_active) {
  1131. dev_set_uevent_suppress(&sw->dev, true);
  1132. discovery = true;
  1133. }
  1134. /*
  1135. * At the moment Thunderbolt 2 and beyond (devices with LC) we
  1136. * can support runtime PM.
  1137. */
  1138. sw->rpm = sw->generation > 1;
  1139. if (tb_switch_add(sw)) {
  1140. tb_switch_put(sw);
  1141. goto out_rpm_put;
  1142. }
  1143. upstream_port = tb_upstream_port(sw);
  1144. tb_configure_link(port, upstream_port, sw);
  1145. /*
  1146. * CL0s and CL1 are enabled and supported together.
  1147. * Silently ignore CLx enabling in case CLx is not supported.
  1148. */
  1149. if (discovery)
  1150. tb_sw_dbg(sw, "discovery, not touching CL states\n");
  1151. else if (tb_enable_clx(sw))
  1152. tb_sw_warn(sw, "failed to enable CL states\n");
  1153. if (tb_enable_tmu(sw))
  1154. tb_sw_warn(sw, "failed to enable TMU\n");
  1155. /*
  1156. * Configuration valid needs to be set after the TMU has been
  1157. * enabled for the upstream port of the router so we do it here.
  1158. */
  1159. tb_switch_configuration_valid(sw);
  1160. /* Scan upstream retimers */
  1161. tb_retimer_scan(upstream_port, true);
  1162. /*
  1163. * Create USB 3.x tunnels only when the switch is plugged to the
  1164. * domain. This is because we scan the domain also during discovery
  1165. * and want to discover existing USB 3.x tunnels before we create
  1166. * any new.
  1167. */
  1168. if (tcm->hotplug_active && tb_tunnel_usb3(sw->tb, sw))
  1169. tb_sw_warn(sw, "USB3 tunnel creation failed\n");
  1170. tb_add_dp_resources(sw);
  1171. tb_scan_switch(sw);
  1172. out_rpm_put:
  1173. if (port->usb4) {
  1174. pm_runtime_mark_last_busy(&port->usb4->dev);
  1175. pm_runtime_put_autosuspend(&port->usb4->dev);
  1176. }
  1177. }
  1178. static void
  1179. tb_recalc_estimated_bandwidth_for_group(struct tb_bandwidth_group *group)
  1180. {
  1181. struct tb_tunnel *first_tunnel;
  1182. struct tb *tb = group->tb;
  1183. struct tb_port *in;
  1184. int ret;
  1185. tb_dbg(tb, "re-calculating bandwidth estimation for group %u\n",
  1186. group->index);
  1187. first_tunnel = NULL;
  1188. list_for_each_entry(in, &group->ports, group_list) {
  1189. int estimated_bw, estimated_up, estimated_down;
  1190. struct tb_tunnel *tunnel;
  1191. struct tb_port *out;
  1192. if (!usb4_dp_port_bandwidth_mode_enabled(in))
  1193. continue;
  1194. tunnel = tb_find_tunnel(tb, TB_TUNNEL_DP, in, NULL);
  1195. if (WARN_ON(!tunnel))
  1196. break;
  1197. if (!first_tunnel) {
  1198. /*
  1199. * Since USB3 bandwidth is shared by all DP
  1200. * tunnels under the host router USB4 port, even
  1201. * if they do not begin from the host router, we
  1202. * can release USB3 bandwidth just once and not
  1203. * for each tunnel separately.
  1204. */
  1205. first_tunnel = tunnel;
  1206. ret = tb_release_unused_usb3_bandwidth(tb,
  1207. first_tunnel->src_port, first_tunnel->dst_port);
  1208. if (ret) {
  1209. tb_tunnel_warn(tunnel,
  1210. "failed to release unused bandwidth\n");
  1211. break;
  1212. }
  1213. }
  1214. out = tunnel->dst_port;
  1215. ret = tb_available_bandwidth(tb, in, out, &estimated_up,
  1216. &estimated_down, true);
  1217. if (ret) {
  1218. tb_tunnel_warn(tunnel,
  1219. "failed to re-calculate estimated bandwidth\n");
  1220. break;
  1221. }
  1222. /*
  1223. * Estimated bandwidth includes:
  1224. * - already allocated bandwidth for the DP tunnel
  1225. * - available bandwidth along the path
  1226. * - bandwidth allocated for USB 3.x but not used.
  1227. */
  1228. if (tb_tunnel_direction_downstream(tunnel))
  1229. estimated_bw = estimated_down;
  1230. else
  1231. estimated_bw = estimated_up;
  1232. /*
  1233. * If there is reserved bandwidth for the group that is
  1234. * not yet released we report that too.
  1235. */
  1236. tb_tunnel_dbg(tunnel,
  1237. "re-calculated estimated bandwidth %u (+ %u reserved) = %u Mb/s\n",
  1238. estimated_bw, group->reserved,
  1239. estimated_bw + group->reserved);
  1240. if (usb4_dp_port_set_estimated_bandwidth(in,
  1241. estimated_bw + group->reserved))
  1242. tb_tunnel_warn(tunnel,
  1243. "failed to update estimated bandwidth\n");
  1244. }
  1245. if (first_tunnel)
  1246. tb_reclaim_usb3_bandwidth(tb, first_tunnel->src_port,
  1247. first_tunnel->dst_port);
  1248. tb_dbg(tb, "bandwidth estimation for group %u done\n", group->index);
  1249. }
  1250. static void tb_recalc_estimated_bandwidth(struct tb *tb)
  1251. {
  1252. struct tb_cm *tcm = tb_priv(tb);
  1253. int i;
  1254. tb_dbg(tb, "bandwidth consumption changed, re-calculating estimated bandwidth\n");
  1255. for (i = 0; i < ARRAY_SIZE(tcm->groups); i++) {
  1256. struct tb_bandwidth_group *group = &tcm->groups[i];
  1257. if (!list_empty(&group->ports))
  1258. tb_recalc_estimated_bandwidth_for_group(group);
  1259. }
  1260. tb_dbg(tb, "bandwidth re-calculation done\n");
  1261. }
  1262. static bool __release_group_bandwidth(struct tb_bandwidth_group *group)
  1263. {
  1264. if (group->reserved) {
  1265. tb_dbg(group->tb, "group %d released total %d Mb/s\n", group->index,
  1266. group->reserved);
  1267. group->reserved = 0;
  1268. return true;
  1269. }
  1270. return false;
  1271. }
  1272. static void __configure_group_sym(struct tb_bandwidth_group *group)
  1273. {
  1274. struct tb_tunnel *tunnel;
  1275. struct tb_port *in;
  1276. if (list_empty(&group->ports))
  1277. return;
  1278. /*
  1279. * All the tunnels in the group go through the same USB4 links
  1280. * so we find the first one here and pass the IN and OUT
  1281. * adapters to tb_configure_sym() which now transitions the
  1282. * links back to symmetric if bandwidth requirement < asym_threshold.
  1283. *
  1284. * We do this here to avoid unnecessary transitions (for example
  1285. * if the graphics released bandwidth for other tunnel in the
  1286. * same group).
  1287. */
  1288. in = list_first_entry(&group->ports, struct tb_port, group_list);
  1289. tunnel = tb_find_tunnel(group->tb, TB_TUNNEL_DP, in, NULL);
  1290. if (tunnel)
  1291. tb_configure_sym(group->tb, in, tunnel->dst_port, true);
  1292. }
  1293. static void tb_bandwidth_group_release_work(struct work_struct *work)
  1294. {
  1295. struct tb_bandwidth_group *group =
  1296. container_of(work, typeof(*group), release_work.work);
  1297. struct tb *tb = group->tb;
  1298. mutex_lock(&tb->lock);
  1299. if (__release_group_bandwidth(group))
  1300. tb_recalc_estimated_bandwidth(tb);
  1301. __configure_group_sym(group);
  1302. mutex_unlock(&tb->lock);
  1303. }
  1304. static void tb_init_bandwidth_groups(struct tb_cm *tcm)
  1305. {
  1306. int i;
  1307. for (i = 0; i < ARRAY_SIZE(tcm->groups); i++) {
  1308. struct tb_bandwidth_group *group = &tcm->groups[i];
  1309. group->tb = tcm_to_tb(tcm);
  1310. group->index = i + 1;
  1311. INIT_LIST_HEAD(&group->ports);
  1312. INIT_DELAYED_WORK(&group->release_work,
  1313. tb_bandwidth_group_release_work);
  1314. }
  1315. }
  1316. static void tb_bandwidth_group_attach_port(struct tb_bandwidth_group *group,
  1317. struct tb_port *in)
  1318. {
  1319. if (!group || WARN_ON(in->group))
  1320. return;
  1321. in->group = group;
  1322. list_add_tail(&in->group_list, &group->ports);
  1323. tb_port_dbg(in, "attached to bandwidth group %d\n", group->index);
  1324. }
  1325. static struct tb_bandwidth_group *tb_find_free_bandwidth_group(struct tb_cm *tcm)
  1326. {
  1327. int i;
  1328. for (i = 0; i < ARRAY_SIZE(tcm->groups); i++) {
  1329. struct tb_bandwidth_group *group = &tcm->groups[i];
  1330. if (list_empty(&group->ports))
  1331. return group;
  1332. }
  1333. return NULL;
  1334. }
  1335. static struct tb_bandwidth_group *
  1336. tb_attach_bandwidth_group(struct tb_cm *tcm, struct tb_port *in,
  1337. struct tb_port *out)
  1338. {
  1339. struct tb_bandwidth_group *group;
  1340. struct tb_tunnel *tunnel;
  1341. /*
  1342. * Find all DP tunnels that go through all the same USB4 links
  1343. * as this one. Because we always setup tunnels the same way we
  1344. * can just check for the routers at both ends of the tunnels
  1345. * and if they are the same we have a match.
  1346. */
  1347. list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
  1348. if (!tb_tunnel_is_dp(tunnel))
  1349. continue;
  1350. if (tunnel->src_port->sw == in->sw &&
  1351. tunnel->dst_port->sw == out->sw) {
  1352. group = tunnel->src_port->group;
  1353. if (group) {
  1354. tb_bandwidth_group_attach_port(group, in);
  1355. return group;
  1356. }
  1357. }
  1358. }
  1359. /* Pick up next available group then */
  1360. group = tb_find_free_bandwidth_group(tcm);
  1361. if (group)
  1362. tb_bandwidth_group_attach_port(group, in);
  1363. else
  1364. tb_port_warn(in, "no available bandwidth groups\n");
  1365. return group;
  1366. }
  1367. static void tb_discover_bandwidth_group(struct tb_cm *tcm, struct tb_port *in,
  1368. struct tb_port *out)
  1369. {
  1370. if (usb4_dp_port_bandwidth_mode_enabled(in)) {
  1371. int index, i;
  1372. index = usb4_dp_port_group_id(in);
  1373. for (i = 0; i < ARRAY_SIZE(tcm->groups); i++) {
  1374. if (tcm->groups[i].index == index) {
  1375. tb_bandwidth_group_attach_port(&tcm->groups[i], in);
  1376. return;
  1377. }
  1378. }
  1379. }
  1380. tb_attach_bandwidth_group(tcm, in, out);
  1381. }
  1382. static void tb_detach_bandwidth_group(struct tb_port *in)
  1383. {
  1384. struct tb_bandwidth_group *group = in->group;
  1385. if (group) {
  1386. in->group = NULL;
  1387. list_del_init(&in->group_list);
  1388. tb_port_dbg(in, "detached from bandwidth group %d\n", group->index);
  1389. /* No more tunnels so release the reserved bandwidth if any */
  1390. if (list_empty(&group->ports)) {
  1391. cancel_delayed_work(&group->release_work);
  1392. __release_group_bandwidth(group);
  1393. }
  1394. }
  1395. }
  1396. static void tb_discover_tunnels(struct tb *tb)
  1397. {
  1398. struct tb_cm *tcm = tb_priv(tb);
  1399. struct tb_tunnel *tunnel;
  1400. tb_switch_discover_tunnels(tb->root_switch, &tcm->tunnel_list, true);
  1401. list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
  1402. if (tb_tunnel_is_pci(tunnel)) {
  1403. struct tb_switch *parent = tunnel->dst_port->sw;
  1404. while (parent != tunnel->src_port->sw) {
  1405. parent->boot = true;
  1406. parent = tb_switch_parent(parent);
  1407. }
  1408. } else if (tb_tunnel_is_dp(tunnel)) {
  1409. struct tb_port *in = tunnel->src_port;
  1410. struct tb_port *out = tunnel->dst_port;
  1411. /* Keep the domain from powering down */
  1412. pm_runtime_get_sync(&in->sw->dev);
  1413. pm_runtime_get_sync(&out->sw->dev);
  1414. tb_discover_bandwidth_group(tcm, in, out);
  1415. }
  1416. }
  1417. }
  1418. static void tb_deactivate_and_free_tunnel(struct tb_tunnel *tunnel)
  1419. {
  1420. struct tb_port *src_port, *dst_port;
  1421. struct tb *tb;
  1422. if (!tunnel)
  1423. return;
  1424. tb_tunnel_deactivate(tunnel);
  1425. list_del(&tunnel->list);
  1426. tb = tunnel->tb;
  1427. src_port = tunnel->src_port;
  1428. dst_port = tunnel->dst_port;
  1429. switch (tunnel->type) {
  1430. case TB_TUNNEL_DP:
  1431. tb_detach_bandwidth_group(src_port);
  1432. /*
  1433. * In case of DP tunnel make sure the DP IN resource is
  1434. * deallocated properly.
  1435. */
  1436. tb_switch_dealloc_dp_resource(src_port->sw, src_port);
  1437. /*
  1438. * If bandwidth on a link is < asym_threshold
  1439. * transition the link to symmetric.
  1440. */
  1441. tb_configure_sym(tb, src_port, dst_port, true);
  1442. /* Now we can allow the domain to runtime suspend again */
  1443. pm_runtime_mark_last_busy(&dst_port->sw->dev);
  1444. pm_runtime_put_autosuspend(&dst_port->sw->dev);
  1445. pm_runtime_mark_last_busy(&src_port->sw->dev);
  1446. pm_runtime_put_autosuspend(&src_port->sw->dev);
  1447. fallthrough;
  1448. case TB_TUNNEL_USB3:
  1449. tb_reclaim_usb3_bandwidth(tb, src_port, dst_port);
  1450. break;
  1451. default:
  1452. /*
  1453. * PCIe and DMA tunnels do not consume guaranteed
  1454. * bandwidth.
  1455. */
  1456. break;
  1457. }
  1458. tb_tunnel_free(tunnel);
  1459. }
  1460. /*
  1461. * tb_free_invalid_tunnels() - destroy tunnels of devices that have gone away
  1462. */
  1463. static void tb_free_invalid_tunnels(struct tb *tb)
  1464. {
  1465. struct tb_cm *tcm = tb_priv(tb);
  1466. struct tb_tunnel *tunnel;
  1467. struct tb_tunnel *n;
  1468. list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
  1469. if (tb_tunnel_is_invalid(tunnel))
  1470. tb_deactivate_and_free_tunnel(tunnel);
  1471. }
  1472. }
  1473. /*
  1474. * tb_free_unplugged_children() - traverse hierarchy and free unplugged switches
  1475. */
  1476. static void tb_free_unplugged_children(struct tb_switch *sw)
  1477. {
  1478. struct tb_port *port;
  1479. tb_switch_for_each_port(sw, port) {
  1480. if (!tb_port_has_remote(port))
  1481. continue;
  1482. if (port->remote->sw->is_unplugged) {
  1483. tb_retimer_remove_all(port);
  1484. tb_remove_dp_resources(port->remote->sw);
  1485. tb_switch_unconfigure_link(port->remote->sw);
  1486. tb_switch_set_link_width(port->remote->sw,
  1487. TB_LINK_WIDTH_SINGLE);
  1488. tb_switch_remove(port->remote->sw);
  1489. port->remote = NULL;
  1490. if (port->dual_link_port)
  1491. port->dual_link_port->remote = NULL;
  1492. } else {
  1493. tb_free_unplugged_children(port->remote->sw);
  1494. }
  1495. }
  1496. }
  1497. static struct tb_port *tb_find_pcie_down(struct tb_switch *sw,
  1498. const struct tb_port *port)
  1499. {
  1500. struct tb_port *down = NULL;
  1501. /*
  1502. * To keep plugging devices consistently in the same PCIe
  1503. * hierarchy, do mapping here for switch downstream PCIe ports.
  1504. */
  1505. if (tb_switch_is_usb4(sw)) {
  1506. down = usb4_switch_map_pcie_down(sw, port);
  1507. } else if (!tb_route(sw)) {
  1508. int phy_port = tb_phy_port_from_link(port->port);
  1509. int index;
  1510. /*
  1511. * Hard-coded Thunderbolt port to PCIe down port mapping
  1512. * per controller.
  1513. */
  1514. if (tb_switch_is_cactus_ridge(sw) ||
  1515. tb_switch_is_alpine_ridge(sw))
  1516. index = !phy_port ? 6 : 7;
  1517. else if (tb_switch_is_falcon_ridge(sw))
  1518. index = !phy_port ? 6 : 8;
  1519. else if (tb_switch_is_titan_ridge(sw))
  1520. index = !phy_port ? 8 : 9;
  1521. else
  1522. goto out;
  1523. /* Validate the hard-coding */
  1524. if (WARN_ON(index > sw->config.max_port_number))
  1525. goto out;
  1526. down = &sw->ports[index];
  1527. }
  1528. if (down) {
  1529. if (WARN_ON(!tb_port_is_pcie_down(down)))
  1530. goto out;
  1531. if (tb_pci_port_is_enabled(down))
  1532. goto out;
  1533. return down;
  1534. }
  1535. out:
  1536. return tb_find_unused_port(sw, TB_TYPE_PCIE_DOWN);
  1537. }
  1538. static struct tb_port *tb_find_dp_out(struct tb *tb, struct tb_port *in)
  1539. {
  1540. struct tb_port *host_port, *port;
  1541. struct tb_cm *tcm = tb_priv(tb);
  1542. host_port = tb_route(in->sw) ?
  1543. tb_port_at(tb_route(in->sw), tb->root_switch) : NULL;
  1544. list_for_each_entry(port, &tcm->dp_resources, list) {
  1545. if (!tb_port_is_dpout(port))
  1546. continue;
  1547. if (tb_port_is_enabled(port)) {
  1548. tb_port_dbg(port, "DP OUT in use\n");
  1549. continue;
  1550. }
  1551. /* Needs to be on different routers */
  1552. if (in->sw == port->sw) {
  1553. tb_port_dbg(port, "skipping DP OUT on same router\n");
  1554. continue;
  1555. }
  1556. tb_port_dbg(port, "DP OUT available\n");
  1557. /*
  1558. * Keep the DP tunnel under the topology starting from
  1559. * the same host router downstream port.
  1560. */
  1561. if (host_port && tb_route(port->sw)) {
  1562. struct tb_port *p;
  1563. p = tb_port_at(tb_route(port->sw), tb->root_switch);
  1564. if (p != host_port)
  1565. continue;
  1566. }
  1567. return port;
  1568. }
  1569. return NULL;
  1570. }
  1571. static bool tb_tunnel_one_dp(struct tb *tb, struct tb_port *in,
  1572. struct tb_port *out)
  1573. {
  1574. int available_up, available_down, ret, link_nr;
  1575. struct tb_cm *tcm = tb_priv(tb);
  1576. int consumed_up, consumed_down;
  1577. struct tb_tunnel *tunnel;
  1578. /*
  1579. * This is only applicable to links that are not bonded (so
  1580. * when Thunderbolt 1 hardware is involved somewhere in the
  1581. * topology). For these try to share the DP bandwidth between
  1582. * the two lanes.
  1583. */
  1584. link_nr = 1;
  1585. list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
  1586. if (tb_tunnel_is_dp(tunnel)) {
  1587. link_nr = 0;
  1588. break;
  1589. }
  1590. }
  1591. /*
  1592. * DP stream needs the domain to be active so runtime resume
  1593. * both ends of the tunnel.
  1594. *
  1595. * This should bring the routers in the middle active as well
  1596. * and keeps the domain from runtime suspending while the DP
  1597. * tunnel is active.
  1598. */
  1599. pm_runtime_get_sync(&in->sw->dev);
  1600. pm_runtime_get_sync(&out->sw->dev);
  1601. if (tb_switch_alloc_dp_resource(in->sw, in)) {
  1602. tb_port_dbg(in, "no resource available for DP IN, not tunneling\n");
  1603. goto err_rpm_put;
  1604. }
  1605. if (!tb_attach_bandwidth_group(tcm, in, out))
  1606. goto err_dealloc_dp;
  1607. /* Make all unused USB3 bandwidth available for the new DP tunnel */
  1608. ret = tb_release_unused_usb3_bandwidth(tb, in, out);
  1609. if (ret) {
  1610. tb_warn(tb, "failed to release unused bandwidth\n");
  1611. goto err_detach_group;
  1612. }
  1613. ret = tb_available_bandwidth(tb, in, out, &available_up, &available_down,
  1614. true);
  1615. if (ret)
  1616. goto err_reclaim_usb;
  1617. tb_dbg(tb, "available bandwidth for new DP tunnel %u/%u Mb/s\n",
  1618. available_up, available_down);
  1619. tunnel = tb_tunnel_alloc_dp(tb, in, out, link_nr, available_up,
  1620. available_down);
  1621. if (!tunnel) {
  1622. tb_port_dbg(out, "could not allocate DP tunnel\n");
  1623. goto err_reclaim_usb;
  1624. }
  1625. if (tb_tunnel_activate(tunnel)) {
  1626. tb_port_info(out, "DP tunnel activation failed, aborting\n");
  1627. goto err_free;
  1628. }
  1629. /* If fail reading tunnel's consumed bandwidth, tear it down */
  1630. ret = tb_tunnel_consumed_bandwidth(tunnel, &consumed_up, &consumed_down);
  1631. if (ret)
  1632. goto err_deactivate;
  1633. list_add_tail(&tunnel->list, &tcm->tunnel_list);
  1634. tb_reclaim_usb3_bandwidth(tb, in, out);
  1635. /*
  1636. * Transition the links to asymmetric if the consumption exceeds
  1637. * the threshold.
  1638. */
  1639. tb_configure_asym(tb, in, out, consumed_up, consumed_down);
  1640. /* Update the domain with the new bandwidth estimation */
  1641. tb_recalc_estimated_bandwidth(tb);
  1642. /*
  1643. * In case of DP tunnel exists, change host router's 1st children
  1644. * TMU mode to HiFi for CL0s to work.
  1645. */
  1646. tb_increase_tmu_accuracy(tunnel);
  1647. return true;
  1648. err_deactivate:
  1649. tb_tunnel_deactivate(tunnel);
  1650. err_free:
  1651. tb_tunnel_free(tunnel);
  1652. err_reclaim_usb:
  1653. tb_reclaim_usb3_bandwidth(tb, in, out);
  1654. err_detach_group:
  1655. tb_detach_bandwidth_group(in);
  1656. err_dealloc_dp:
  1657. tb_switch_dealloc_dp_resource(in->sw, in);
  1658. err_rpm_put:
  1659. pm_runtime_mark_last_busy(&out->sw->dev);
  1660. pm_runtime_put_autosuspend(&out->sw->dev);
  1661. pm_runtime_mark_last_busy(&in->sw->dev);
  1662. pm_runtime_put_autosuspend(&in->sw->dev);
  1663. return false;
  1664. }
  1665. static void tb_tunnel_dp(struct tb *tb)
  1666. {
  1667. struct tb_cm *tcm = tb_priv(tb);
  1668. struct tb_port *port, *in, *out;
  1669. if (!tb_acpi_may_tunnel_dp()) {
  1670. tb_dbg(tb, "DP tunneling disabled, not creating tunnel\n");
  1671. return;
  1672. }
  1673. /*
  1674. * Find pair of inactive DP IN and DP OUT adapters and then
  1675. * establish a DP tunnel between them.
  1676. */
  1677. tb_dbg(tb, "looking for DP IN <-> DP OUT pairs:\n");
  1678. in = NULL;
  1679. out = NULL;
  1680. list_for_each_entry(port, &tcm->dp_resources, list) {
  1681. if (!tb_port_is_dpin(port))
  1682. continue;
  1683. if (tb_port_is_enabled(port)) {
  1684. tb_port_dbg(port, "DP IN in use\n");
  1685. continue;
  1686. }
  1687. in = port;
  1688. tb_port_dbg(in, "DP IN available\n");
  1689. out = tb_find_dp_out(tb, port);
  1690. if (out)
  1691. tb_tunnel_one_dp(tb, in, out);
  1692. else
  1693. tb_port_dbg(in, "no suitable DP OUT adapter available, not tunneling\n");
  1694. }
  1695. if (!in)
  1696. tb_dbg(tb, "no suitable DP IN adapter available, not tunneling\n");
  1697. }
  1698. static void tb_enter_redrive(struct tb_port *port)
  1699. {
  1700. struct tb_switch *sw = port->sw;
  1701. if (!(sw->quirks & QUIRK_KEEP_POWER_IN_DP_REDRIVE))
  1702. return;
  1703. /*
  1704. * If we get hot-unplug for the DP IN port of the host router
  1705. * and the DP resource is not available anymore it means there
  1706. * is a monitor connected directly to the Type-C port and we are
  1707. * in "redrive" mode. For this to work we cannot enter RTD3 so
  1708. * we bump up the runtime PM reference count here.
  1709. */
  1710. if (!tb_port_is_dpin(port))
  1711. return;
  1712. if (tb_route(sw))
  1713. return;
  1714. if (!tb_switch_query_dp_resource(sw, port)) {
  1715. port->redrive = true;
  1716. pm_runtime_get(&sw->dev);
  1717. tb_port_dbg(port, "enter redrive mode, keeping powered\n");
  1718. }
  1719. }
  1720. static void tb_exit_redrive(struct tb_port *port)
  1721. {
  1722. struct tb_switch *sw = port->sw;
  1723. if (!(sw->quirks & QUIRK_KEEP_POWER_IN_DP_REDRIVE))
  1724. return;
  1725. if (!tb_port_is_dpin(port))
  1726. return;
  1727. if (tb_route(sw))
  1728. return;
  1729. if (port->redrive && tb_switch_query_dp_resource(sw, port)) {
  1730. port->redrive = false;
  1731. pm_runtime_put(&sw->dev);
  1732. tb_port_dbg(port, "exit redrive mode\n");
  1733. }
  1734. }
  1735. static void tb_switch_enter_redrive(struct tb_switch *sw)
  1736. {
  1737. struct tb_port *port;
  1738. tb_switch_for_each_port(sw, port)
  1739. tb_enter_redrive(port);
  1740. }
  1741. /*
  1742. * Called during system and runtime suspend to forcefully exit redrive
  1743. * mode without querying whether the resource is available.
  1744. */
  1745. static void tb_switch_exit_redrive(struct tb_switch *sw)
  1746. {
  1747. struct tb_port *port;
  1748. if (!(sw->quirks & QUIRK_KEEP_POWER_IN_DP_REDRIVE))
  1749. return;
  1750. tb_switch_for_each_port(sw, port) {
  1751. if (!tb_port_is_dpin(port))
  1752. continue;
  1753. if (port->redrive) {
  1754. port->redrive = false;
  1755. pm_runtime_put(&sw->dev);
  1756. tb_port_dbg(port, "exit redrive mode\n");
  1757. }
  1758. }
  1759. }
  1760. static void tb_dp_resource_unavailable(struct tb *tb, struct tb_port *port)
  1761. {
  1762. struct tb_port *in, *out;
  1763. struct tb_tunnel *tunnel;
  1764. if (tb_port_is_dpin(port)) {
  1765. tb_port_dbg(port, "DP IN resource unavailable\n");
  1766. in = port;
  1767. out = NULL;
  1768. } else {
  1769. tb_port_dbg(port, "DP OUT resource unavailable\n");
  1770. in = NULL;
  1771. out = port;
  1772. }
  1773. tunnel = tb_find_tunnel(tb, TB_TUNNEL_DP, in, out);
  1774. if (tunnel)
  1775. tb_deactivate_and_free_tunnel(tunnel);
  1776. else
  1777. tb_enter_redrive(port);
  1778. list_del_init(&port->list);
  1779. /*
  1780. * See if there is another DP OUT port that can be used for
  1781. * to create another tunnel.
  1782. */
  1783. tb_recalc_estimated_bandwidth(tb);
  1784. tb_tunnel_dp(tb);
  1785. }
  1786. static void tb_dp_resource_available(struct tb *tb, struct tb_port *port)
  1787. {
  1788. struct tb_cm *tcm = tb_priv(tb);
  1789. struct tb_port *p;
  1790. if (tb_port_is_enabled(port))
  1791. return;
  1792. list_for_each_entry(p, &tcm->dp_resources, list) {
  1793. if (p == port)
  1794. return;
  1795. }
  1796. tb_port_dbg(port, "DP %s resource available after hotplug\n",
  1797. tb_port_is_dpin(port) ? "IN" : "OUT");
  1798. list_add_tail(&port->list, &tcm->dp_resources);
  1799. tb_exit_redrive(port);
  1800. /* Look for suitable DP IN <-> DP OUT pairs now */
  1801. tb_tunnel_dp(tb);
  1802. }
  1803. static void tb_disconnect_and_release_dp(struct tb *tb)
  1804. {
  1805. struct tb_cm *tcm = tb_priv(tb);
  1806. struct tb_tunnel *tunnel, *n;
  1807. /*
  1808. * Tear down all DP tunnels and release their resources. They
  1809. * will be re-established after resume based on plug events.
  1810. */
  1811. list_for_each_entry_safe_reverse(tunnel, n, &tcm->tunnel_list, list) {
  1812. if (tb_tunnel_is_dp(tunnel))
  1813. tb_deactivate_and_free_tunnel(tunnel);
  1814. }
  1815. while (!list_empty(&tcm->dp_resources)) {
  1816. struct tb_port *port;
  1817. port = list_first_entry(&tcm->dp_resources,
  1818. struct tb_port, list);
  1819. list_del_init(&port->list);
  1820. }
  1821. }
  1822. static int tb_disconnect_pci(struct tb *tb, struct tb_switch *sw)
  1823. {
  1824. struct tb_tunnel *tunnel;
  1825. struct tb_port *up;
  1826. up = tb_switch_find_port(sw, TB_TYPE_PCIE_UP);
  1827. if (WARN_ON(!up))
  1828. return -ENODEV;
  1829. tunnel = tb_find_tunnel(tb, TB_TUNNEL_PCI, NULL, up);
  1830. if (WARN_ON(!tunnel))
  1831. return -ENODEV;
  1832. tb_switch_xhci_disconnect(sw);
  1833. tb_tunnel_deactivate(tunnel);
  1834. list_del(&tunnel->list);
  1835. tb_tunnel_free(tunnel);
  1836. return 0;
  1837. }
  1838. static int tb_tunnel_pci(struct tb *tb, struct tb_switch *sw)
  1839. {
  1840. struct tb_port *up, *down, *port;
  1841. struct tb_cm *tcm = tb_priv(tb);
  1842. struct tb_tunnel *tunnel;
  1843. up = tb_switch_find_port(sw, TB_TYPE_PCIE_UP);
  1844. if (!up)
  1845. return 0;
  1846. /*
  1847. * Look up available down port. Since we are chaining it should
  1848. * be found right above this switch.
  1849. */
  1850. port = tb_switch_downstream_port(sw);
  1851. down = tb_find_pcie_down(tb_switch_parent(sw), port);
  1852. if (!down)
  1853. return 0;
  1854. tunnel = tb_tunnel_alloc_pci(tb, up, down);
  1855. if (!tunnel)
  1856. return -ENOMEM;
  1857. if (tb_tunnel_activate(tunnel)) {
  1858. tb_port_info(up,
  1859. "PCIe tunnel activation failed, aborting\n");
  1860. tb_tunnel_free(tunnel);
  1861. return -EIO;
  1862. }
  1863. /*
  1864. * PCIe L1 is needed to enable CL0s for Titan Ridge so enable it
  1865. * here.
  1866. */
  1867. if (tb_switch_pcie_l1_enable(sw))
  1868. tb_sw_warn(sw, "failed to enable PCIe L1 for Titan Ridge\n");
  1869. if (tb_switch_xhci_connect(sw))
  1870. tb_sw_warn(sw, "failed to connect xHCI\n");
  1871. list_add_tail(&tunnel->list, &tcm->tunnel_list);
  1872. return 0;
  1873. }
  1874. static int tb_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
  1875. int transmit_path, int transmit_ring,
  1876. int receive_path, int receive_ring)
  1877. {
  1878. struct tb_cm *tcm = tb_priv(tb);
  1879. struct tb_port *nhi_port, *dst_port;
  1880. struct tb_tunnel *tunnel;
  1881. struct tb_switch *sw;
  1882. int ret;
  1883. sw = tb_to_switch(xd->dev.parent);
  1884. dst_port = tb_port_at(xd->route, sw);
  1885. nhi_port = tb_switch_find_port(tb->root_switch, TB_TYPE_NHI);
  1886. mutex_lock(&tb->lock);
  1887. /*
  1888. * When tunneling DMA paths the link should not enter CL states
  1889. * so disable them now.
  1890. */
  1891. tb_disable_clx(sw);
  1892. tunnel = tb_tunnel_alloc_dma(tb, nhi_port, dst_port, transmit_path,
  1893. transmit_ring, receive_path, receive_ring);
  1894. if (!tunnel) {
  1895. ret = -ENOMEM;
  1896. goto err_clx;
  1897. }
  1898. if (tb_tunnel_activate(tunnel)) {
  1899. tb_port_info(nhi_port,
  1900. "DMA tunnel activation failed, aborting\n");
  1901. ret = -EIO;
  1902. goto err_free;
  1903. }
  1904. list_add_tail(&tunnel->list, &tcm->tunnel_list);
  1905. mutex_unlock(&tb->lock);
  1906. return 0;
  1907. err_free:
  1908. tb_tunnel_free(tunnel);
  1909. err_clx:
  1910. tb_enable_clx(sw);
  1911. mutex_unlock(&tb->lock);
  1912. return ret;
  1913. }
  1914. static void __tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
  1915. int transmit_path, int transmit_ring,
  1916. int receive_path, int receive_ring)
  1917. {
  1918. struct tb_cm *tcm = tb_priv(tb);
  1919. struct tb_port *nhi_port, *dst_port;
  1920. struct tb_tunnel *tunnel, *n;
  1921. struct tb_switch *sw;
  1922. sw = tb_to_switch(xd->dev.parent);
  1923. dst_port = tb_port_at(xd->route, sw);
  1924. nhi_port = tb_switch_find_port(tb->root_switch, TB_TYPE_NHI);
  1925. list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
  1926. if (!tb_tunnel_is_dma(tunnel))
  1927. continue;
  1928. if (tunnel->src_port != nhi_port || tunnel->dst_port != dst_port)
  1929. continue;
  1930. if (tb_tunnel_match_dma(tunnel, transmit_path, transmit_ring,
  1931. receive_path, receive_ring))
  1932. tb_deactivate_and_free_tunnel(tunnel);
  1933. }
  1934. /*
  1935. * Try to re-enable CL states now, it is OK if this fails
  1936. * because we may still have another DMA tunnel active through
  1937. * the same host router USB4 downstream port.
  1938. */
  1939. tb_enable_clx(sw);
  1940. }
  1941. static int tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
  1942. int transmit_path, int transmit_ring,
  1943. int receive_path, int receive_ring)
  1944. {
  1945. if (!xd->is_unplugged) {
  1946. mutex_lock(&tb->lock);
  1947. __tb_disconnect_xdomain_paths(tb, xd, transmit_path,
  1948. transmit_ring, receive_path,
  1949. receive_ring);
  1950. mutex_unlock(&tb->lock);
  1951. }
  1952. return 0;
  1953. }
  1954. /* hotplug handling */
  1955. /*
  1956. * tb_handle_hotplug() - handle hotplug event
  1957. *
  1958. * Executes on tb->wq.
  1959. */
  1960. static void tb_handle_hotplug(struct work_struct *work)
  1961. {
  1962. struct tb_hotplug_event *ev = container_of(work, typeof(*ev), work);
  1963. struct tb *tb = ev->tb;
  1964. struct tb_cm *tcm = tb_priv(tb);
  1965. struct tb_switch *sw;
  1966. struct tb_port *port;
  1967. /* Bring the domain back from sleep if it was suspended */
  1968. pm_runtime_get_sync(&tb->dev);
  1969. mutex_lock(&tb->lock);
  1970. if (!tcm->hotplug_active)
  1971. goto out; /* during init, suspend or shutdown */
  1972. sw = tb_switch_find_by_route(tb, ev->route);
  1973. if (!sw) {
  1974. tb_warn(tb,
  1975. "hotplug event from non existent switch %llx:%x (unplug: %d)\n",
  1976. ev->route, ev->port, ev->unplug);
  1977. goto out;
  1978. }
  1979. if (ev->port > sw->config.max_port_number) {
  1980. tb_warn(tb,
  1981. "hotplug event from non existent port %llx:%x (unplug: %d)\n",
  1982. ev->route, ev->port, ev->unplug);
  1983. goto put_sw;
  1984. }
  1985. port = &sw->ports[ev->port];
  1986. if (tb_is_upstream_port(port)) {
  1987. tb_dbg(tb, "hotplug event for upstream port %llx:%x (unplug: %d)\n",
  1988. ev->route, ev->port, ev->unplug);
  1989. goto put_sw;
  1990. }
  1991. pm_runtime_get_sync(&sw->dev);
  1992. if (ev->unplug) {
  1993. tb_retimer_remove_all(port);
  1994. if (tb_port_has_remote(port)) {
  1995. tb_port_dbg(port, "switch unplugged\n");
  1996. tb_sw_set_unplugged(port->remote->sw);
  1997. tb_free_invalid_tunnels(tb);
  1998. tb_remove_dp_resources(port->remote->sw);
  1999. tb_switch_tmu_disable(port->remote->sw);
  2000. tb_switch_unconfigure_link(port->remote->sw);
  2001. tb_switch_set_link_width(port->remote->sw,
  2002. TB_LINK_WIDTH_SINGLE);
  2003. tb_switch_remove(port->remote->sw);
  2004. port->remote = NULL;
  2005. if (port->dual_link_port)
  2006. port->dual_link_port->remote = NULL;
  2007. /* Maybe we can create another DP tunnel */
  2008. tb_recalc_estimated_bandwidth(tb);
  2009. tb_tunnel_dp(tb);
  2010. } else if (port->xdomain) {
  2011. struct tb_xdomain *xd = tb_xdomain_get(port->xdomain);
  2012. tb_port_dbg(port, "xdomain unplugged\n");
  2013. /*
  2014. * Service drivers are unbound during
  2015. * tb_xdomain_remove() so setting XDomain as
  2016. * unplugged here prevents deadlock if they call
  2017. * tb_xdomain_disable_paths(). We will tear down
  2018. * all the tunnels below.
  2019. */
  2020. xd->is_unplugged = true;
  2021. tb_xdomain_remove(xd);
  2022. port->xdomain = NULL;
  2023. __tb_disconnect_xdomain_paths(tb, xd, -1, -1, -1, -1);
  2024. tb_xdomain_put(xd);
  2025. tb_port_unconfigure_xdomain(port);
  2026. } else if (tb_port_is_dpout(port) || tb_port_is_dpin(port)) {
  2027. tb_dp_resource_unavailable(tb, port);
  2028. } else if (!port->port) {
  2029. tb_sw_dbg(sw, "xHCI disconnect request\n");
  2030. tb_switch_xhci_disconnect(sw);
  2031. } else {
  2032. tb_port_dbg(port,
  2033. "got unplug event for disconnected port, ignoring\n");
  2034. }
  2035. } else if (port->remote) {
  2036. tb_port_dbg(port, "got plug event for connected port, ignoring\n");
  2037. } else if (!port->port && sw->authorized) {
  2038. tb_sw_dbg(sw, "xHCI connect request\n");
  2039. tb_switch_xhci_connect(sw);
  2040. } else {
  2041. if (tb_port_is_null(port)) {
  2042. tb_port_dbg(port, "hotplug: scanning\n");
  2043. tb_scan_port(port);
  2044. if (!port->remote)
  2045. tb_port_dbg(port, "hotplug: no switch found\n");
  2046. } else if (tb_port_is_dpout(port) || tb_port_is_dpin(port)) {
  2047. tb_dp_resource_available(tb, port);
  2048. }
  2049. }
  2050. pm_runtime_mark_last_busy(&sw->dev);
  2051. pm_runtime_put_autosuspend(&sw->dev);
  2052. put_sw:
  2053. tb_switch_put(sw);
  2054. out:
  2055. mutex_unlock(&tb->lock);
  2056. pm_runtime_mark_last_busy(&tb->dev);
  2057. pm_runtime_put_autosuspend(&tb->dev);
  2058. kfree(ev);
  2059. }
  2060. static int tb_alloc_dp_bandwidth(struct tb_tunnel *tunnel, int *requested_up,
  2061. int *requested_down)
  2062. {
  2063. int allocated_up, allocated_down, available_up, available_down, ret;
  2064. int requested_up_corrected, requested_down_corrected, granularity;
  2065. int max_up, max_down, max_up_rounded, max_down_rounded;
  2066. struct tb_bandwidth_group *group;
  2067. struct tb *tb = tunnel->tb;
  2068. struct tb_port *in, *out;
  2069. bool downstream;
  2070. ret = tb_tunnel_allocated_bandwidth(tunnel, &allocated_up, &allocated_down);
  2071. if (ret)
  2072. return ret;
  2073. in = tunnel->src_port;
  2074. out = tunnel->dst_port;
  2075. tb_tunnel_dbg(tunnel, "bandwidth allocated currently %d/%d Mb/s\n",
  2076. allocated_up, allocated_down);
  2077. /*
  2078. * If we get rounded up request from graphics side, say HBR2 x 4
  2079. * that is 17500 instead of 17280 (this is because of the
  2080. * granularity), we allow it too. Here the graphics has already
  2081. * negotiated with the DPRX the maximum possible rates (which is
  2082. * 17280 in this case).
  2083. *
  2084. * Since the link cannot go higher than 17280 we use that in our
  2085. * calculations but the DP IN adapter Allocated BW write must be
  2086. * the same value (17500) otherwise the adapter will mark it as
  2087. * failed for graphics.
  2088. */
  2089. ret = tb_tunnel_maximum_bandwidth(tunnel, &max_up, &max_down);
  2090. if (ret)
  2091. goto fail;
  2092. ret = usb4_dp_port_granularity(in);
  2093. if (ret < 0)
  2094. goto fail;
  2095. granularity = ret;
  2096. max_up_rounded = roundup(max_up, granularity);
  2097. max_down_rounded = roundup(max_down, granularity);
  2098. /*
  2099. * This will "fix" the request down to the maximum supported
  2100. * rate * lanes if it is at the maximum rounded up level.
  2101. */
  2102. requested_up_corrected = *requested_up;
  2103. if (requested_up_corrected == max_up_rounded)
  2104. requested_up_corrected = max_up;
  2105. else if (requested_up_corrected < 0)
  2106. requested_up_corrected = 0;
  2107. requested_down_corrected = *requested_down;
  2108. if (requested_down_corrected == max_down_rounded)
  2109. requested_down_corrected = max_down;
  2110. else if (requested_down_corrected < 0)
  2111. requested_down_corrected = 0;
  2112. tb_tunnel_dbg(tunnel, "corrected bandwidth request %d/%d Mb/s\n",
  2113. requested_up_corrected, requested_down_corrected);
  2114. if ((*requested_up >= 0 && requested_up_corrected > max_up_rounded) ||
  2115. (*requested_down >= 0 && requested_down_corrected > max_down_rounded)) {
  2116. tb_tunnel_dbg(tunnel,
  2117. "bandwidth request too high (%d/%d Mb/s > %d/%d Mb/s)\n",
  2118. requested_up_corrected, requested_down_corrected,
  2119. max_up_rounded, max_down_rounded);
  2120. ret = -ENOBUFS;
  2121. goto fail;
  2122. }
  2123. downstream = tb_tunnel_direction_downstream(tunnel);
  2124. group = in->group;
  2125. if ((*requested_up >= 0 && requested_up_corrected <= allocated_up) ||
  2126. (*requested_down >= 0 && requested_down_corrected <= allocated_down)) {
  2127. if (tunnel->bw_mode) {
  2128. int reserved;
  2129. /*
  2130. * If requested bandwidth is less or equal than
  2131. * what is currently allocated to that tunnel we
  2132. * simply change the reservation of the tunnel
  2133. * and add the released bandwidth for the group
  2134. * for the next 10s. Then we release it for
  2135. * others to use.
  2136. */
  2137. if (downstream)
  2138. reserved = allocated_down - *requested_down;
  2139. else
  2140. reserved = allocated_up - *requested_up;
  2141. if (reserved > 0) {
  2142. group->reserved += reserved;
  2143. tb_dbg(tb, "group %d reserved %d total %d Mb/s\n",
  2144. group->index, reserved, group->reserved);
  2145. /*
  2146. * If it was not already pending,
  2147. * schedule release now. If it is then
  2148. * postpone it for the next 10s (unless
  2149. * it is already running in which case
  2150. * the 10s already expired and we should
  2151. * give the reserved back to others).
  2152. */
  2153. mod_delayed_work(system_wq, &group->release_work,
  2154. msecs_to_jiffies(TB_RELEASE_BW_TIMEOUT));
  2155. }
  2156. }
  2157. return tb_tunnel_alloc_bandwidth(tunnel, requested_up,
  2158. requested_down);
  2159. }
  2160. /*
  2161. * More bandwidth is requested. Release all the potential
  2162. * bandwidth from USB3 first.
  2163. */
  2164. ret = tb_release_unused_usb3_bandwidth(tb, in, out);
  2165. if (ret)
  2166. goto fail;
  2167. /*
  2168. * Then go over all tunnels that cross the same USB4 ports (they
  2169. * are also in the same group but we use the same function here
  2170. * that we use with the normal bandwidth allocation).
  2171. */
  2172. ret = tb_available_bandwidth(tb, in, out, &available_up, &available_down,
  2173. true);
  2174. if (ret)
  2175. goto reclaim;
  2176. tb_tunnel_dbg(tunnel, "bandwidth available for allocation %d/%d (+ %u reserved) Mb/s\n",
  2177. available_up, available_down, group->reserved);
  2178. if ((*requested_up >= 0 &&
  2179. available_up + group->reserved >= requested_up_corrected) ||
  2180. (*requested_down >= 0 &&
  2181. available_down + group->reserved >= requested_down_corrected)) {
  2182. int released = 0;
  2183. /*
  2184. * If bandwidth on a link is >= asym_threshold
  2185. * transition the link to asymmetric.
  2186. */
  2187. ret = tb_configure_asym(tb, in, out, *requested_up,
  2188. *requested_down);
  2189. if (ret) {
  2190. tb_configure_sym(tb, in, out, true);
  2191. goto fail;
  2192. }
  2193. ret = tb_tunnel_alloc_bandwidth(tunnel, requested_up,
  2194. requested_down);
  2195. if (ret) {
  2196. tb_tunnel_warn(tunnel, "failed to allocate bandwidth\n");
  2197. tb_configure_sym(tb, in, out, true);
  2198. }
  2199. if (downstream) {
  2200. if (*requested_down > available_down)
  2201. released = *requested_down - available_down;
  2202. } else {
  2203. if (*requested_up > available_up)
  2204. released = *requested_up - available_up;
  2205. }
  2206. if (released) {
  2207. group->reserved -= released;
  2208. tb_dbg(tb, "group %d released %d total %d Mb/s\n",
  2209. group->index, released, group->reserved);
  2210. }
  2211. } else {
  2212. ret = -ENOBUFS;
  2213. }
  2214. reclaim:
  2215. tb_reclaim_usb3_bandwidth(tb, in, out);
  2216. fail:
  2217. if (ret && ret != -ENODEV) {
  2218. /*
  2219. * Write back the same allocated (so no change), this
  2220. * makes the DPTX request fail on graphics side.
  2221. */
  2222. tb_tunnel_dbg(tunnel,
  2223. "failing the request by rewriting allocated %d/%d Mb/s\n",
  2224. allocated_up, allocated_down);
  2225. tb_tunnel_alloc_bandwidth(tunnel, &allocated_up, &allocated_down);
  2226. }
  2227. return ret;
  2228. }
  2229. static void tb_handle_dp_bandwidth_request(struct work_struct *work)
  2230. {
  2231. struct tb_hotplug_event *ev = container_of(work, typeof(*ev), work);
  2232. int requested_bw, requested_up, requested_down, ret;
  2233. struct tb_tunnel *tunnel;
  2234. struct tb *tb = ev->tb;
  2235. struct tb_cm *tcm = tb_priv(tb);
  2236. struct tb_switch *sw;
  2237. struct tb_port *in;
  2238. pm_runtime_get_sync(&tb->dev);
  2239. mutex_lock(&tb->lock);
  2240. if (!tcm->hotplug_active)
  2241. goto unlock;
  2242. sw = tb_switch_find_by_route(tb, ev->route);
  2243. if (!sw) {
  2244. tb_warn(tb, "bandwidth request from non-existent router %llx\n",
  2245. ev->route);
  2246. goto unlock;
  2247. }
  2248. in = &sw->ports[ev->port];
  2249. if (!tb_port_is_dpin(in)) {
  2250. tb_port_warn(in, "bandwidth request to non-DP IN adapter\n");
  2251. goto put_sw;
  2252. }
  2253. tb_port_dbg(in, "handling bandwidth allocation request\n");
  2254. tunnel = tb_find_tunnel(tb, TB_TUNNEL_DP, in, NULL);
  2255. if (!tunnel) {
  2256. tb_port_warn(in, "failed to find tunnel\n");
  2257. goto put_sw;
  2258. }
  2259. if (!usb4_dp_port_bandwidth_mode_enabled(in)) {
  2260. if (tunnel->bw_mode) {
  2261. /*
  2262. * Reset the tunnel back to use the legacy
  2263. * allocation.
  2264. */
  2265. tunnel->bw_mode = false;
  2266. tb_port_dbg(in, "DPTX disabled bandwidth allocation mode\n");
  2267. } else {
  2268. tb_port_warn(in, "bandwidth allocation mode not enabled\n");
  2269. }
  2270. goto put_sw;
  2271. }
  2272. ret = usb4_dp_port_requested_bandwidth(in);
  2273. if (ret < 0) {
  2274. if (ret == -ENODATA) {
  2275. /*
  2276. * There is no request active so this means the
  2277. * BW allocation mode was enabled from graphics
  2278. * side. At this point we know that the graphics
  2279. * driver has read the DRPX capabilities so we
  2280. * can offer an better bandwidth estimatation.
  2281. */
  2282. tb_port_dbg(in, "DPTX enabled bandwidth allocation mode, updating estimated bandwidth\n");
  2283. tb_recalc_estimated_bandwidth(tb);
  2284. } else {
  2285. tb_port_warn(in, "failed to read requested bandwidth\n");
  2286. }
  2287. goto put_sw;
  2288. }
  2289. requested_bw = ret;
  2290. tb_port_dbg(in, "requested bandwidth %d Mb/s\n", requested_bw);
  2291. if (tb_tunnel_direction_downstream(tunnel)) {
  2292. requested_up = -1;
  2293. requested_down = requested_bw;
  2294. } else {
  2295. requested_up = requested_bw;
  2296. requested_down = -1;
  2297. }
  2298. ret = tb_alloc_dp_bandwidth(tunnel, &requested_up, &requested_down);
  2299. if (ret) {
  2300. if (ret == -ENOBUFS)
  2301. tb_tunnel_warn(tunnel,
  2302. "not enough bandwidth available\n");
  2303. else
  2304. tb_tunnel_warn(tunnel,
  2305. "failed to change bandwidth allocation\n");
  2306. } else {
  2307. tb_tunnel_dbg(tunnel,
  2308. "bandwidth allocation changed to %d/%d Mb/s\n",
  2309. requested_up, requested_down);
  2310. /* Update other clients about the allocation change */
  2311. tb_recalc_estimated_bandwidth(tb);
  2312. }
  2313. put_sw:
  2314. tb_switch_put(sw);
  2315. unlock:
  2316. mutex_unlock(&tb->lock);
  2317. pm_runtime_mark_last_busy(&tb->dev);
  2318. pm_runtime_put_autosuspend(&tb->dev);
  2319. kfree(ev);
  2320. }
  2321. static void tb_queue_dp_bandwidth_request(struct tb *tb, u64 route, u8 port)
  2322. {
  2323. struct tb_hotplug_event *ev;
  2324. ev = kmalloc(sizeof(*ev), GFP_KERNEL);
  2325. if (!ev)
  2326. return;
  2327. ev->tb = tb;
  2328. ev->route = route;
  2329. ev->port = port;
  2330. INIT_WORK(&ev->work, tb_handle_dp_bandwidth_request);
  2331. queue_work(tb->wq, &ev->work);
  2332. }
  2333. static void tb_handle_notification(struct tb *tb, u64 route,
  2334. const struct cfg_error_pkg *error)
  2335. {
  2336. switch (error->error) {
  2337. case TB_CFG_ERROR_PCIE_WAKE:
  2338. case TB_CFG_ERROR_DP_CON_CHANGE:
  2339. case TB_CFG_ERROR_DPTX_DISCOVERY:
  2340. if (tb_cfg_ack_notification(tb->ctl, route, error))
  2341. tb_warn(tb, "could not ack notification on %llx\n",
  2342. route);
  2343. break;
  2344. case TB_CFG_ERROR_DP_BW:
  2345. if (tb_cfg_ack_notification(tb->ctl, route, error))
  2346. tb_warn(tb, "could not ack notification on %llx\n",
  2347. route);
  2348. tb_queue_dp_bandwidth_request(tb, route, error->port);
  2349. break;
  2350. default:
  2351. /* Ignore for now */
  2352. break;
  2353. }
  2354. }
  2355. /*
  2356. * tb_schedule_hotplug_handler() - callback function for the control channel
  2357. *
  2358. * Delegates to tb_handle_hotplug.
  2359. */
  2360. static void tb_handle_event(struct tb *tb, enum tb_cfg_pkg_type type,
  2361. const void *buf, size_t size)
  2362. {
  2363. const struct cfg_event_pkg *pkg = buf;
  2364. u64 route = tb_cfg_get_route(&pkg->header);
  2365. switch (type) {
  2366. case TB_CFG_PKG_ERROR:
  2367. tb_handle_notification(tb, route, (const struct cfg_error_pkg *)buf);
  2368. return;
  2369. case TB_CFG_PKG_EVENT:
  2370. break;
  2371. default:
  2372. tb_warn(tb, "unexpected event %#x, ignoring\n", type);
  2373. return;
  2374. }
  2375. if (tb_cfg_ack_plug(tb->ctl, route, pkg->port, pkg->unplug)) {
  2376. tb_warn(tb, "could not ack plug event on %llx:%x\n", route,
  2377. pkg->port);
  2378. }
  2379. tb_queue_hotplug(tb, route, pkg->port, pkg->unplug);
  2380. }
  2381. static void tb_stop(struct tb *tb)
  2382. {
  2383. struct tb_cm *tcm = tb_priv(tb);
  2384. struct tb_tunnel *tunnel;
  2385. struct tb_tunnel *n;
  2386. cancel_delayed_work(&tcm->remove_work);
  2387. /* tunnels are only present after everything has been initialized */
  2388. list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
  2389. /*
  2390. * DMA tunnels require the driver to be functional so we
  2391. * tear them down. Other protocol tunnels can be left
  2392. * intact.
  2393. */
  2394. if (tb_tunnel_is_dma(tunnel))
  2395. tb_tunnel_deactivate(tunnel);
  2396. tb_tunnel_free(tunnel);
  2397. }
  2398. tb_switch_remove(tb->root_switch);
  2399. tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */
  2400. }
  2401. static void tb_deinit(struct tb *tb)
  2402. {
  2403. struct tb_cm *tcm = tb_priv(tb);
  2404. int i;
  2405. /* Cancel all the release bandwidth workers */
  2406. for (i = 0; i < ARRAY_SIZE(tcm->groups); i++)
  2407. cancel_delayed_work_sync(&tcm->groups[i].release_work);
  2408. }
  2409. static int tb_scan_finalize_switch(struct device *dev, void *data)
  2410. {
  2411. if (tb_is_switch(dev)) {
  2412. struct tb_switch *sw = tb_to_switch(dev);
  2413. /*
  2414. * If we found that the switch was already setup by the
  2415. * boot firmware, mark it as authorized now before we
  2416. * send uevent to userspace.
  2417. */
  2418. if (sw->boot)
  2419. sw->authorized = 1;
  2420. dev_set_uevent_suppress(dev, false);
  2421. kobject_uevent(&dev->kobj, KOBJ_ADD);
  2422. device_for_each_child(dev, NULL, tb_scan_finalize_switch);
  2423. }
  2424. return 0;
  2425. }
  2426. static int tb_start(struct tb *tb, bool reset)
  2427. {
  2428. struct tb_cm *tcm = tb_priv(tb);
  2429. bool discover = true;
  2430. int ret;
  2431. tb->root_switch = tb_switch_alloc(tb, &tb->dev, 0);
  2432. if (IS_ERR(tb->root_switch))
  2433. return PTR_ERR(tb->root_switch);
  2434. /*
  2435. * ICM firmware upgrade needs running firmware and in native
  2436. * mode that is not available so disable firmware upgrade of the
  2437. * root switch.
  2438. *
  2439. * However, USB4 routers support NVM firmware upgrade if they
  2440. * implement the necessary router operations.
  2441. */
  2442. tb->root_switch->no_nvm_upgrade = !tb_switch_is_usb4(tb->root_switch);
  2443. /* All USB4 routers support runtime PM */
  2444. tb->root_switch->rpm = tb_switch_is_usb4(tb->root_switch);
  2445. ret = tb_switch_configure(tb->root_switch);
  2446. if (ret) {
  2447. tb_switch_put(tb->root_switch);
  2448. return ret;
  2449. }
  2450. /* Announce the switch to the world */
  2451. ret = tb_switch_add(tb->root_switch);
  2452. if (ret) {
  2453. tb_switch_put(tb->root_switch);
  2454. return ret;
  2455. }
  2456. /*
  2457. * To support highest CLx state, we set host router's TMU to
  2458. * Normal mode.
  2459. */
  2460. tb_switch_tmu_configure(tb->root_switch, TB_SWITCH_TMU_MODE_LOWRES);
  2461. /* Enable TMU if it is off */
  2462. tb_switch_tmu_enable(tb->root_switch);
  2463. /*
  2464. * Boot firmware might have created tunnels of its own. Since we
  2465. * cannot be sure they are usable for us, tear them down and
  2466. * reset the ports to handle it as new hotplug for USB4 v1
  2467. * routers (for USB4 v2 and beyond we already do host reset).
  2468. */
  2469. if (reset && tb_switch_is_usb4(tb->root_switch)) {
  2470. discover = false;
  2471. if (usb4_switch_version(tb->root_switch) == 1)
  2472. tb_switch_reset(tb->root_switch);
  2473. }
  2474. if (discover) {
  2475. /* Full scan to discover devices added before the driver was loaded. */
  2476. tb_scan_switch(tb->root_switch);
  2477. /* Find out tunnels created by the boot firmware */
  2478. tb_discover_tunnels(tb);
  2479. /* Add DP resources from the DP tunnels created by the boot firmware */
  2480. tb_discover_dp_resources(tb);
  2481. }
  2482. /*
  2483. * If the boot firmware did not create USB 3.x tunnels create them
  2484. * now for the whole topology.
  2485. */
  2486. tb_create_usb3_tunnels(tb->root_switch);
  2487. /* Add DP IN resources for the root switch */
  2488. tb_add_dp_resources(tb->root_switch);
  2489. tb_switch_enter_redrive(tb->root_switch);
  2490. /* Make the discovered switches available to the userspace */
  2491. device_for_each_child(&tb->root_switch->dev, NULL,
  2492. tb_scan_finalize_switch);
  2493. /* Allow tb_handle_hotplug to progress events */
  2494. tcm->hotplug_active = true;
  2495. return 0;
  2496. }
  2497. static int tb_suspend_noirq(struct tb *tb)
  2498. {
  2499. struct tb_cm *tcm = tb_priv(tb);
  2500. tb_dbg(tb, "suspending...\n");
  2501. tb_disconnect_and_release_dp(tb);
  2502. tb_switch_exit_redrive(tb->root_switch);
  2503. tb_switch_suspend(tb->root_switch, false);
  2504. tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */
  2505. tb_dbg(tb, "suspend finished\n");
  2506. return 0;
  2507. }
  2508. static void tb_restore_children(struct tb_switch *sw)
  2509. {
  2510. struct tb_port *port;
  2511. /* No need to restore if the router is already unplugged */
  2512. if (sw->is_unplugged)
  2513. return;
  2514. if (tb_enable_clx(sw))
  2515. tb_sw_warn(sw, "failed to re-enable CL states\n");
  2516. if (tb_enable_tmu(sw))
  2517. tb_sw_warn(sw, "failed to restore TMU configuration\n");
  2518. tb_switch_configuration_valid(sw);
  2519. tb_switch_for_each_port(sw, port) {
  2520. if (!tb_port_has_remote(port) && !port->xdomain)
  2521. continue;
  2522. if (port->remote) {
  2523. tb_switch_set_link_width(port->remote->sw,
  2524. port->remote->sw->link_width);
  2525. tb_switch_configure_link(port->remote->sw);
  2526. tb_restore_children(port->remote->sw);
  2527. } else if (port->xdomain) {
  2528. tb_port_configure_xdomain(port, port->xdomain);
  2529. }
  2530. }
  2531. }
  2532. static int tb_resume_noirq(struct tb *tb)
  2533. {
  2534. struct tb_cm *tcm = tb_priv(tb);
  2535. struct tb_tunnel *tunnel, *n;
  2536. unsigned int usb3_delay = 0;
  2537. LIST_HEAD(tunnels);
  2538. tb_dbg(tb, "resuming...\n");
  2539. /*
  2540. * For non-USB4 hosts (Apple systems) remove any PCIe devices
  2541. * the firmware might have setup.
  2542. */
  2543. if (!tb_switch_is_usb4(tb->root_switch))
  2544. tb_switch_reset(tb->root_switch);
  2545. tb_switch_resume(tb->root_switch, false);
  2546. tb_free_invalid_tunnels(tb);
  2547. tb_free_unplugged_children(tb->root_switch);
  2548. tb_restore_children(tb->root_switch);
  2549. /*
  2550. * If we get here from suspend to disk the boot firmware or the
  2551. * restore kernel might have created tunnels of its own. Since
  2552. * we cannot be sure they are usable for us we find and tear
  2553. * them down.
  2554. */
  2555. tb_switch_discover_tunnels(tb->root_switch, &tunnels, false);
  2556. list_for_each_entry_safe_reverse(tunnel, n, &tunnels, list) {
  2557. if (tb_tunnel_is_usb3(tunnel))
  2558. usb3_delay = 500;
  2559. tb_tunnel_deactivate(tunnel);
  2560. tb_tunnel_free(tunnel);
  2561. }
  2562. /* Re-create our tunnels now */
  2563. list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
  2564. /* USB3 requires delay before it can be re-activated */
  2565. if (tb_tunnel_is_usb3(tunnel)) {
  2566. msleep(usb3_delay);
  2567. /* Only need to do it once */
  2568. usb3_delay = 0;
  2569. }
  2570. tb_tunnel_restart(tunnel);
  2571. }
  2572. if (!list_empty(&tcm->tunnel_list)) {
  2573. /*
  2574. * the pcie links need some time to get going.
  2575. * 100ms works for me...
  2576. */
  2577. tb_dbg(tb, "tunnels restarted, sleeping for 100ms\n");
  2578. msleep(100);
  2579. }
  2580. tb_switch_enter_redrive(tb->root_switch);
  2581. /* Allow tb_handle_hotplug to progress events */
  2582. tcm->hotplug_active = true;
  2583. tb_dbg(tb, "resume finished\n");
  2584. return 0;
  2585. }
  2586. static int tb_free_unplugged_xdomains(struct tb_switch *sw)
  2587. {
  2588. struct tb_port *port;
  2589. int ret = 0;
  2590. tb_switch_for_each_port(sw, port) {
  2591. if (tb_is_upstream_port(port))
  2592. continue;
  2593. if (port->xdomain && port->xdomain->is_unplugged) {
  2594. tb_retimer_remove_all(port);
  2595. tb_xdomain_remove(port->xdomain);
  2596. tb_port_unconfigure_xdomain(port);
  2597. port->xdomain = NULL;
  2598. ret++;
  2599. } else if (port->remote) {
  2600. ret += tb_free_unplugged_xdomains(port->remote->sw);
  2601. }
  2602. }
  2603. return ret;
  2604. }
  2605. static int tb_freeze_noirq(struct tb *tb)
  2606. {
  2607. struct tb_cm *tcm = tb_priv(tb);
  2608. tcm->hotplug_active = false;
  2609. return 0;
  2610. }
  2611. static int tb_thaw_noirq(struct tb *tb)
  2612. {
  2613. struct tb_cm *tcm = tb_priv(tb);
  2614. tcm->hotplug_active = true;
  2615. return 0;
  2616. }
  2617. static void tb_complete(struct tb *tb)
  2618. {
  2619. /*
  2620. * Release any unplugged XDomains and if there is a case where
  2621. * another domain is swapped in place of unplugged XDomain we
  2622. * need to run another rescan.
  2623. */
  2624. mutex_lock(&tb->lock);
  2625. if (tb_free_unplugged_xdomains(tb->root_switch))
  2626. tb_scan_switch(tb->root_switch);
  2627. mutex_unlock(&tb->lock);
  2628. }
  2629. static int tb_runtime_suspend(struct tb *tb)
  2630. {
  2631. struct tb_cm *tcm = tb_priv(tb);
  2632. mutex_lock(&tb->lock);
  2633. /*
  2634. * The below call only releases DP resources to allow exiting and
  2635. * re-entering redrive mode.
  2636. */
  2637. tb_disconnect_and_release_dp(tb);
  2638. tb_switch_exit_redrive(tb->root_switch);
  2639. tb_switch_suspend(tb->root_switch, true);
  2640. tcm->hotplug_active = false;
  2641. mutex_unlock(&tb->lock);
  2642. return 0;
  2643. }
  2644. static void tb_remove_work(struct work_struct *work)
  2645. {
  2646. struct tb_cm *tcm = container_of(work, struct tb_cm, remove_work.work);
  2647. struct tb *tb = tcm_to_tb(tcm);
  2648. mutex_lock(&tb->lock);
  2649. if (tb->root_switch) {
  2650. tb_free_unplugged_children(tb->root_switch);
  2651. tb_free_unplugged_xdomains(tb->root_switch);
  2652. }
  2653. mutex_unlock(&tb->lock);
  2654. }
  2655. static int tb_runtime_resume(struct tb *tb)
  2656. {
  2657. struct tb_cm *tcm = tb_priv(tb);
  2658. struct tb_tunnel *tunnel, *n;
  2659. mutex_lock(&tb->lock);
  2660. tb_switch_resume(tb->root_switch, true);
  2661. tb_free_invalid_tunnels(tb);
  2662. tb_restore_children(tb->root_switch);
  2663. list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list)
  2664. tb_tunnel_restart(tunnel);
  2665. tb_switch_enter_redrive(tb->root_switch);
  2666. tcm->hotplug_active = true;
  2667. mutex_unlock(&tb->lock);
  2668. /*
  2669. * Schedule cleanup of any unplugged devices. Run this in a
  2670. * separate thread to avoid possible deadlock if the device
  2671. * removal runtime resumes the unplugged device.
  2672. */
  2673. queue_delayed_work(tb->wq, &tcm->remove_work, msecs_to_jiffies(50));
  2674. return 0;
  2675. }
  2676. static const struct tb_cm_ops tb_cm_ops = {
  2677. .start = tb_start,
  2678. .stop = tb_stop,
  2679. .deinit = tb_deinit,
  2680. .suspend_noirq = tb_suspend_noirq,
  2681. .resume_noirq = tb_resume_noirq,
  2682. .freeze_noirq = tb_freeze_noirq,
  2683. .thaw_noirq = tb_thaw_noirq,
  2684. .complete = tb_complete,
  2685. .runtime_suspend = tb_runtime_suspend,
  2686. .runtime_resume = tb_runtime_resume,
  2687. .handle_event = tb_handle_event,
  2688. .disapprove_switch = tb_disconnect_pci,
  2689. .approve_switch = tb_tunnel_pci,
  2690. .approve_xdomain_paths = tb_approve_xdomain_paths,
  2691. .disconnect_xdomain_paths = tb_disconnect_xdomain_paths,
  2692. };
  2693. /*
  2694. * During suspend the Thunderbolt controller is reset and all PCIe
  2695. * tunnels are lost. The NHI driver will try to reestablish all tunnels
  2696. * during resume. This adds device links between the tunneled PCIe
  2697. * downstream ports and the NHI so that the device core will make sure
  2698. * NHI is resumed first before the rest.
  2699. */
  2700. static bool tb_apple_add_links(struct tb_nhi *nhi)
  2701. {
  2702. struct pci_dev *upstream, *pdev;
  2703. bool ret;
  2704. if (!x86_apple_machine)
  2705. return false;
  2706. switch (nhi->pdev->device) {
  2707. case PCI_DEVICE_ID_INTEL_LIGHT_RIDGE:
  2708. case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C:
  2709. case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_NHI:
  2710. case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_NHI:
  2711. break;
  2712. default:
  2713. return false;
  2714. }
  2715. upstream = pci_upstream_bridge(nhi->pdev);
  2716. while (upstream) {
  2717. if (!pci_is_pcie(upstream))
  2718. return false;
  2719. if (pci_pcie_type(upstream) == PCI_EXP_TYPE_UPSTREAM)
  2720. break;
  2721. upstream = pci_upstream_bridge(upstream);
  2722. }
  2723. if (!upstream)
  2724. return false;
  2725. /*
  2726. * For each hotplug downstream port, create add device link
  2727. * back to NHI so that PCIe tunnels can be re-established after
  2728. * sleep.
  2729. */
  2730. ret = false;
  2731. for_each_pci_bridge(pdev, upstream->subordinate) {
  2732. const struct device_link *link;
  2733. if (!pci_is_pcie(pdev))
  2734. continue;
  2735. if (pci_pcie_type(pdev) != PCI_EXP_TYPE_DOWNSTREAM ||
  2736. !pdev->is_hotplug_bridge)
  2737. continue;
  2738. link = device_link_add(&pdev->dev, &nhi->pdev->dev,
  2739. DL_FLAG_AUTOREMOVE_SUPPLIER |
  2740. DL_FLAG_PM_RUNTIME);
  2741. if (link) {
  2742. dev_dbg(&nhi->pdev->dev, "created link from %s\n",
  2743. dev_name(&pdev->dev));
  2744. ret = true;
  2745. } else {
  2746. dev_warn(&nhi->pdev->dev, "device link creation from %s failed\n",
  2747. dev_name(&pdev->dev));
  2748. }
  2749. }
  2750. return ret;
  2751. }
  2752. struct tb *tb_probe(struct tb_nhi *nhi)
  2753. {
  2754. struct tb_cm *tcm;
  2755. struct tb *tb;
  2756. tb = tb_domain_alloc(nhi, TB_TIMEOUT, sizeof(*tcm));
  2757. if (!tb)
  2758. return NULL;
  2759. if (tb_acpi_may_tunnel_pcie())
  2760. tb->security_level = TB_SECURITY_USER;
  2761. else
  2762. tb->security_level = TB_SECURITY_NOPCIE;
  2763. tb->cm_ops = &tb_cm_ops;
  2764. tcm = tb_priv(tb);
  2765. INIT_LIST_HEAD(&tcm->tunnel_list);
  2766. INIT_LIST_HEAD(&tcm->dp_resources);
  2767. INIT_DELAYED_WORK(&tcm->remove_work, tb_remove_work);
  2768. tb_init_bandwidth_groups(tcm);
  2769. tb_dbg(tb, "using software connection manager\n");
  2770. /*
  2771. * Device links are needed to make sure we establish tunnels
  2772. * before the PCIe/USB stack is resumed so complain here if we
  2773. * found them missing.
  2774. */
  2775. if (!tb_apple_add_links(nhi) && !tb_acpi_add_links(nhi))
  2776. tb_warn(tb, "device links to tunneled native ports are missing!\n");
  2777. return tb;
  2778. }