tunnel.c 61 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Thunderbolt driver - Tunneling support
  4. *
  5. * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
  6. * Copyright (C) 2019, Intel Corporation
  7. */
  8. #include <linux/delay.h>
  9. #include <linux/slab.h>
  10. #include <linux/list.h>
  11. #include <linux/ktime.h>
  12. #include <linux/string_helpers.h>
  13. #include "tunnel.h"
  14. #include "tb.h"
  15. /* PCIe adapters use always HopID of 8 for both directions */
  16. #define TB_PCI_HOPID 8
  17. #define TB_PCI_PATH_DOWN 0
  18. #define TB_PCI_PATH_UP 1
  19. #define TB_PCI_PRIORITY 3
  20. #define TB_PCI_WEIGHT 1
  21. /* USB3 adapters use always HopID of 8 for both directions */
  22. #define TB_USB3_HOPID 8
  23. #define TB_USB3_PATH_DOWN 0
  24. #define TB_USB3_PATH_UP 1
  25. #define TB_USB3_PRIORITY 3
  26. #define TB_USB3_WEIGHT 2
  27. /* DP adapters use HopID 8 for AUX and 9 for Video */
  28. #define TB_DP_AUX_TX_HOPID 8
  29. #define TB_DP_AUX_RX_HOPID 8
  30. #define TB_DP_VIDEO_HOPID 9
  31. #define TB_DP_VIDEO_PATH_OUT 0
  32. #define TB_DP_AUX_PATH_OUT 1
  33. #define TB_DP_AUX_PATH_IN 2
  34. #define TB_DP_VIDEO_PRIORITY 1
  35. #define TB_DP_VIDEO_WEIGHT 1
  36. #define TB_DP_AUX_PRIORITY 2
  37. #define TB_DP_AUX_WEIGHT 1
  38. /* Minimum number of credits needed for PCIe path */
  39. #define TB_MIN_PCIE_CREDITS 6U
  40. /*
  41. * Number of credits we try to allocate for each DMA path if not limited
  42. * by the host router baMaxHI.
  43. */
  44. #define TB_DMA_CREDITS 14
  45. /* Minimum number of credits for DMA path */
  46. #define TB_MIN_DMA_CREDITS 1
  47. #define TB_DMA_PRIORITY 5
  48. #define TB_DMA_WEIGHT 1
  49. /*
  50. * Reserve additional bandwidth for USB 3.x and PCIe bulk traffic
  51. * according to USB4 v2 Connection Manager guide. This ends up reserving
  52. * 1500 Mb/s for PCIe and 3000 Mb/s for USB 3.x taking weights into
  53. * account.
  54. */
  55. #define USB4_V2_PCI_MIN_BANDWIDTH (1500 * TB_PCI_WEIGHT)
  56. #define USB4_V2_USB3_MIN_BANDWIDTH (1500 * TB_USB3_WEIGHT)
  57. static unsigned int dma_credits = TB_DMA_CREDITS;
  58. module_param(dma_credits, uint, 0444);
  59. MODULE_PARM_DESC(dma_credits, "specify custom credits for DMA tunnels (default: "
  60. __MODULE_STRING(TB_DMA_CREDITS) ")");
  61. static bool bw_alloc_mode = true;
  62. module_param(bw_alloc_mode, bool, 0444);
  63. MODULE_PARM_DESC(bw_alloc_mode,
  64. "enable bandwidth allocation mode if supported (default: true)");
  65. static const char * const tb_tunnel_names[] = { "PCI", "DP", "DMA", "USB3" };
  66. static inline unsigned int tb_usable_credits(const struct tb_port *port)
  67. {
  68. return port->total_credits - port->ctl_credits;
  69. }
  70. /**
  71. * tb_available_credits() - Available credits for PCIe and DMA
  72. * @port: Lane adapter to check
  73. * @max_dp_streams: If non-%NULL stores maximum number of simultaneous DP
  74. * streams possible through this lane adapter
  75. */
  76. static unsigned int tb_available_credits(const struct tb_port *port,
  77. size_t *max_dp_streams)
  78. {
  79. const struct tb_switch *sw = port->sw;
  80. int credits, usb3, pcie, spare;
  81. size_t ndp;
  82. usb3 = tb_acpi_may_tunnel_usb3() ? sw->max_usb3_credits : 0;
  83. pcie = tb_acpi_may_tunnel_pcie() ? sw->max_pcie_credits : 0;
  84. if (tb_acpi_is_xdomain_allowed()) {
  85. spare = min_not_zero(sw->max_dma_credits, dma_credits);
  86. /* Add some credits for potential second DMA tunnel */
  87. spare += TB_MIN_DMA_CREDITS;
  88. } else {
  89. spare = 0;
  90. }
  91. credits = tb_usable_credits(port);
  92. if (tb_acpi_may_tunnel_dp()) {
  93. /*
  94. * Maximum number of DP streams possible through the
  95. * lane adapter.
  96. */
  97. if (sw->min_dp_aux_credits + sw->min_dp_main_credits)
  98. ndp = (credits - (usb3 + pcie + spare)) /
  99. (sw->min_dp_aux_credits + sw->min_dp_main_credits);
  100. else
  101. ndp = 0;
  102. } else {
  103. ndp = 0;
  104. }
  105. credits -= ndp * (sw->min_dp_aux_credits + sw->min_dp_main_credits);
  106. credits -= usb3;
  107. if (max_dp_streams)
  108. *max_dp_streams = ndp;
  109. return credits > 0 ? credits : 0;
  110. }
  111. static void tb_init_pm_support(struct tb_path_hop *hop)
  112. {
  113. struct tb_port *out_port = hop->out_port;
  114. struct tb_port *in_port = hop->in_port;
  115. if (tb_port_is_null(in_port) && tb_port_is_null(out_port) &&
  116. usb4_switch_version(in_port->sw) >= 2)
  117. hop->pm_support = true;
  118. }
  119. static struct tb_tunnel *tb_tunnel_alloc(struct tb *tb, size_t npaths,
  120. enum tb_tunnel_type type)
  121. {
  122. struct tb_tunnel *tunnel;
  123. tunnel = kzalloc(sizeof(*tunnel), GFP_KERNEL);
  124. if (!tunnel)
  125. return NULL;
  126. tunnel->paths = kcalloc(npaths, sizeof(tunnel->paths[0]), GFP_KERNEL);
  127. if (!tunnel->paths) {
  128. tb_tunnel_free(tunnel);
  129. return NULL;
  130. }
  131. INIT_LIST_HEAD(&tunnel->list);
  132. tunnel->tb = tb;
  133. tunnel->npaths = npaths;
  134. tunnel->type = type;
  135. return tunnel;
  136. }
  137. static int tb_pci_set_ext_encapsulation(struct tb_tunnel *tunnel, bool enable)
  138. {
  139. struct tb_port *port = tb_upstream_port(tunnel->dst_port->sw);
  140. int ret;
  141. /* Only supported of both routers are at least USB4 v2 */
  142. if ((usb4_switch_version(tunnel->src_port->sw) < 2) ||
  143. (usb4_switch_version(tunnel->dst_port->sw) < 2))
  144. return 0;
  145. if (enable && tb_port_get_link_generation(port) < 4)
  146. return 0;
  147. ret = usb4_pci_port_set_ext_encapsulation(tunnel->src_port, enable);
  148. if (ret)
  149. return ret;
  150. /*
  151. * Downstream router could be unplugged so disable of encapsulation
  152. * in upstream router is still possible.
  153. */
  154. ret = usb4_pci_port_set_ext_encapsulation(tunnel->dst_port, enable);
  155. if (ret) {
  156. if (enable)
  157. return ret;
  158. if (ret != -ENODEV)
  159. return ret;
  160. }
  161. tb_tunnel_dbg(tunnel, "extended encapsulation %s\n",
  162. str_enabled_disabled(enable));
  163. return 0;
  164. }
  165. static int tb_pci_activate(struct tb_tunnel *tunnel, bool activate)
  166. {
  167. int res;
  168. if (activate) {
  169. res = tb_pci_set_ext_encapsulation(tunnel, activate);
  170. if (res)
  171. return res;
  172. }
  173. if (activate)
  174. res = tb_pci_port_enable(tunnel->dst_port, activate);
  175. else
  176. res = tb_pci_port_enable(tunnel->src_port, activate);
  177. if (res)
  178. return res;
  179. if (activate) {
  180. res = tb_pci_port_enable(tunnel->src_port, activate);
  181. if (res)
  182. return res;
  183. } else {
  184. /* Downstream router could be unplugged */
  185. tb_pci_port_enable(tunnel->dst_port, activate);
  186. }
  187. return activate ? 0 : tb_pci_set_ext_encapsulation(tunnel, activate);
  188. }
  189. static int tb_pci_init_credits(struct tb_path_hop *hop)
  190. {
  191. struct tb_port *port = hop->in_port;
  192. struct tb_switch *sw = port->sw;
  193. unsigned int credits;
  194. if (tb_port_use_credit_allocation(port)) {
  195. unsigned int available;
  196. available = tb_available_credits(port, NULL);
  197. credits = min(sw->max_pcie_credits, available);
  198. if (credits < TB_MIN_PCIE_CREDITS)
  199. return -ENOSPC;
  200. credits = max(TB_MIN_PCIE_CREDITS, credits);
  201. } else {
  202. if (tb_port_is_null(port))
  203. credits = port->bonded ? 32 : 16;
  204. else
  205. credits = 7;
  206. }
  207. hop->initial_credits = credits;
  208. return 0;
  209. }
  210. static int tb_pci_init_path(struct tb_path *path)
  211. {
  212. struct tb_path_hop *hop;
  213. path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL;
  214. path->egress_shared_buffer = TB_PATH_NONE;
  215. path->ingress_fc_enable = TB_PATH_ALL;
  216. path->ingress_shared_buffer = TB_PATH_NONE;
  217. path->priority = TB_PCI_PRIORITY;
  218. path->weight = TB_PCI_WEIGHT;
  219. path->drop_packages = 0;
  220. tb_path_for_each_hop(path, hop) {
  221. int ret;
  222. ret = tb_pci_init_credits(hop);
  223. if (ret)
  224. return ret;
  225. }
  226. return 0;
  227. }
  228. /**
  229. * tb_tunnel_discover_pci() - Discover existing PCIe tunnels
  230. * @tb: Pointer to the domain structure
  231. * @down: PCIe downstream adapter
  232. * @alloc_hopid: Allocate HopIDs from visited ports
  233. *
  234. * If @down adapter is active, follows the tunnel to the PCIe upstream
  235. * adapter and back. Returns the discovered tunnel or %NULL if there was
  236. * no tunnel.
  237. */
  238. struct tb_tunnel *tb_tunnel_discover_pci(struct tb *tb, struct tb_port *down,
  239. bool alloc_hopid)
  240. {
  241. struct tb_tunnel *tunnel;
  242. struct tb_path *path;
  243. if (!tb_pci_port_is_enabled(down))
  244. return NULL;
  245. tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_PCI);
  246. if (!tunnel)
  247. return NULL;
  248. tunnel->activate = tb_pci_activate;
  249. tunnel->src_port = down;
  250. /*
  251. * Discover both paths even if they are not complete. We will
  252. * clean them up by calling tb_tunnel_deactivate() below in that
  253. * case.
  254. */
  255. path = tb_path_discover(down, TB_PCI_HOPID, NULL, -1,
  256. &tunnel->dst_port, "PCIe Up", alloc_hopid);
  257. if (!path) {
  258. /* Just disable the downstream port */
  259. tb_pci_port_enable(down, false);
  260. goto err_free;
  261. }
  262. tunnel->paths[TB_PCI_PATH_UP] = path;
  263. if (tb_pci_init_path(tunnel->paths[TB_PCI_PATH_UP]))
  264. goto err_free;
  265. path = tb_path_discover(tunnel->dst_port, -1, down, TB_PCI_HOPID, NULL,
  266. "PCIe Down", alloc_hopid);
  267. if (!path)
  268. goto err_deactivate;
  269. tunnel->paths[TB_PCI_PATH_DOWN] = path;
  270. if (tb_pci_init_path(tunnel->paths[TB_PCI_PATH_DOWN]))
  271. goto err_deactivate;
  272. /* Validate that the tunnel is complete */
  273. if (!tb_port_is_pcie_up(tunnel->dst_port)) {
  274. tb_port_warn(tunnel->dst_port,
  275. "path does not end on a PCIe adapter, cleaning up\n");
  276. goto err_deactivate;
  277. }
  278. if (down != tunnel->src_port) {
  279. tb_tunnel_warn(tunnel, "path is not complete, cleaning up\n");
  280. goto err_deactivate;
  281. }
  282. if (!tb_pci_port_is_enabled(tunnel->dst_port)) {
  283. tb_tunnel_warn(tunnel,
  284. "tunnel is not fully activated, cleaning up\n");
  285. goto err_deactivate;
  286. }
  287. tb_tunnel_dbg(tunnel, "discovered\n");
  288. return tunnel;
  289. err_deactivate:
  290. tb_tunnel_deactivate(tunnel);
  291. err_free:
  292. tb_tunnel_free(tunnel);
  293. return NULL;
  294. }
  295. /**
  296. * tb_tunnel_alloc_pci() - allocate a pci tunnel
  297. * @tb: Pointer to the domain structure
  298. * @up: PCIe upstream adapter port
  299. * @down: PCIe downstream adapter port
  300. *
  301. * Allocate a PCI tunnel. The ports must be of type TB_TYPE_PCIE_UP and
  302. * TB_TYPE_PCIE_DOWN.
  303. *
  304. * Return: Returns a tb_tunnel on success or NULL on failure.
  305. */
  306. struct tb_tunnel *tb_tunnel_alloc_pci(struct tb *tb, struct tb_port *up,
  307. struct tb_port *down)
  308. {
  309. struct tb_tunnel *tunnel;
  310. struct tb_path *path;
  311. tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_PCI);
  312. if (!tunnel)
  313. return NULL;
  314. tunnel->activate = tb_pci_activate;
  315. tunnel->src_port = down;
  316. tunnel->dst_port = up;
  317. path = tb_path_alloc(tb, down, TB_PCI_HOPID, up, TB_PCI_HOPID, 0,
  318. "PCIe Down");
  319. if (!path)
  320. goto err_free;
  321. tunnel->paths[TB_PCI_PATH_DOWN] = path;
  322. if (tb_pci_init_path(path))
  323. goto err_free;
  324. path = tb_path_alloc(tb, up, TB_PCI_HOPID, down, TB_PCI_HOPID, 0,
  325. "PCIe Up");
  326. if (!path)
  327. goto err_free;
  328. tunnel->paths[TB_PCI_PATH_UP] = path;
  329. if (tb_pci_init_path(path))
  330. goto err_free;
  331. return tunnel;
  332. err_free:
  333. tb_tunnel_free(tunnel);
  334. return NULL;
  335. }
  336. /**
  337. * tb_tunnel_reserved_pci() - Amount of bandwidth to reserve for PCIe
  338. * @port: Lane 0 adapter
  339. * @reserved_up: Upstream bandwidth in Mb/s to reserve
  340. * @reserved_down: Downstream bandwidth in Mb/s to reserve
  341. *
  342. * Can be called to any connected lane 0 adapter to find out how much
  343. * bandwidth needs to be left in reserve for possible PCIe bulk traffic.
  344. * Returns true if there is something to be reserved and writes the
  345. * amount to @reserved_down/@reserved_up. Otherwise returns false and
  346. * does not touch the parameters.
  347. */
  348. bool tb_tunnel_reserved_pci(struct tb_port *port, int *reserved_up,
  349. int *reserved_down)
  350. {
  351. if (WARN_ON_ONCE(!port->remote))
  352. return false;
  353. if (!tb_acpi_may_tunnel_pcie())
  354. return false;
  355. if (tb_port_get_link_generation(port) < 4)
  356. return false;
  357. /* Must have PCIe adapters */
  358. if (tb_is_upstream_port(port)) {
  359. if (!tb_switch_find_port(port->sw, TB_TYPE_PCIE_UP))
  360. return false;
  361. if (!tb_switch_find_port(port->remote->sw, TB_TYPE_PCIE_DOWN))
  362. return false;
  363. } else {
  364. if (!tb_switch_find_port(port->sw, TB_TYPE_PCIE_DOWN))
  365. return false;
  366. if (!tb_switch_find_port(port->remote->sw, TB_TYPE_PCIE_UP))
  367. return false;
  368. }
  369. *reserved_up = USB4_V2_PCI_MIN_BANDWIDTH;
  370. *reserved_down = USB4_V2_PCI_MIN_BANDWIDTH;
  371. tb_port_dbg(port, "reserving %u/%u Mb/s for PCIe\n", *reserved_up,
  372. *reserved_down);
  373. return true;
  374. }
  375. static bool tb_dp_is_usb4(const struct tb_switch *sw)
  376. {
  377. /* Titan Ridge DP adapters need the same treatment as USB4 */
  378. return tb_switch_is_usb4(sw) || tb_switch_is_titan_ridge(sw);
  379. }
  380. static int tb_dp_cm_handshake(struct tb_port *in, struct tb_port *out,
  381. int timeout_msec)
  382. {
  383. ktime_t timeout = ktime_add_ms(ktime_get(), timeout_msec);
  384. u32 val;
  385. int ret;
  386. /* Both ends need to support this */
  387. if (!tb_dp_is_usb4(in->sw) || !tb_dp_is_usb4(out->sw))
  388. return 0;
  389. ret = tb_port_read(out, &val, TB_CFG_PORT,
  390. out->cap_adap + DP_STATUS_CTRL, 1);
  391. if (ret)
  392. return ret;
  393. val |= DP_STATUS_CTRL_UF | DP_STATUS_CTRL_CMHS;
  394. ret = tb_port_write(out, &val, TB_CFG_PORT,
  395. out->cap_adap + DP_STATUS_CTRL, 1);
  396. if (ret)
  397. return ret;
  398. do {
  399. ret = tb_port_read(out, &val, TB_CFG_PORT,
  400. out->cap_adap + DP_STATUS_CTRL, 1);
  401. if (ret)
  402. return ret;
  403. if (!(val & DP_STATUS_CTRL_CMHS))
  404. return 0;
  405. usleep_range(100, 150);
  406. } while (ktime_before(ktime_get(), timeout));
  407. return -ETIMEDOUT;
  408. }
  409. /*
  410. * Returns maximum possible rate from capability supporting only DP 2.0
  411. * and below. Used when DP BW allocation mode is not enabled.
  412. */
  413. static inline u32 tb_dp_cap_get_rate(u32 val)
  414. {
  415. u32 rate = (val & DP_COMMON_CAP_RATE_MASK) >> DP_COMMON_CAP_RATE_SHIFT;
  416. switch (rate) {
  417. case DP_COMMON_CAP_RATE_RBR:
  418. return 1620;
  419. case DP_COMMON_CAP_RATE_HBR:
  420. return 2700;
  421. case DP_COMMON_CAP_RATE_HBR2:
  422. return 5400;
  423. case DP_COMMON_CAP_RATE_HBR3:
  424. return 8100;
  425. default:
  426. return 0;
  427. }
  428. }
  429. /*
  430. * Returns maximum possible rate from capability supporting DP 2.1
  431. * UHBR20, 13.5 and 10 rates as well. Use only when DP BW allocation
  432. * mode is enabled.
  433. */
  434. static inline u32 tb_dp_cap_get_rate_ext(u32 val)
  435. {
  436. if (val & DP_COMMON_CAP_UHBR20)
  437. return 20000;
  438. else if (val & DP_COMMON_CAP_UHBR13_5)
  439. return 13500;
  440. else if (val & DP_COMMON_CAP_UHBR10)
  441. return 10000;
  442. return tb_dp_cap_get_rate(val);
  443. }
  444. static inline bool tb_dp_is_uhbr_rate(unsigned int rate)
  445. {
  446. return rate >= 10000;
  447. }
  448. static inline u32 tb_dp_cap_set_rate(u32 val, u32 rate)
  449. {
  450. val &= ~DP_COMMON_CAP_RATE_MASK;
  451. switch (rate) {
  452. default:
  453. WARN(1, "invalid rate %u passed, defaulting to 1620 MB/s\n", rate);
  454. fallthrough;
  455. case 1620:
  456. val |= DP_COMMON_CAP_RATE_RBR << DP_COMMON_CAP_RATE_SHIFT;
  457. break;
  458. case 2700:
  459. val |= DP_COMMON_CAP_RATE_HBR << DP_COMMON_CAP_RATE_SHIFT;
  460. break;
  461. case 5400:
  462. val |= DP_COMMON_CAP_RATE_HBR2 << DP_COMMON_CAP_RATE_SHIFT;
  463. break;
  464. case 8100:
  465. val |= DP_COMMON_CAP_RATE_HBR3 << DP_COMMON_CAP_RATE_SHIFT;
  466. break;
  467. }
  468. return val;
  469. }
  470. static inline u32 tb_dp_cap_get_lanes(u32 val)
  471. {
  472. u32 lanes = (val & DP_COMMON_CAP_LANES_MASK) >> DP_COMMON_CAP_LANES_SHIFT;
  473. switch (lanes) {
  474. case DP_COMMON_CAP_1_LANE:
  475. return 1;
  476. case DP_COMMON_CAP_2_LANES:
  477. return 2;
  478. case DP_COMMON_CAP_4_LANES:
  479. return 4;
  480. default:
  481. return 0;
  482. }
  483. }
  484. static inline u32 tb_dp_cap_set_lanes(u32 val, u32 lanes)
  485. {
  486. val &= ~DP_COMMON_CAP_LANES_MASK;
  487. switch (lanes) {
  488. default:
  489. WARN(1, "invalid number of lanes %u passed, defaulting to 1\n",
  490. lanes);
  491. fallthrough;
  492. case 1:
  493. val |= DP_COMMON_CAP_1_LANE << DP_COMMON_CAP_LANES_SHIFT;
  494. break;
  495. case 2:
  496. val |= DP_COMMON_CAP_2_LANES << DP_COMMON_CAP_LANES_SHIFT;
  497. break;
  498. case 4:
  499. val |= DP_COMMON_CAP_4_LANES << DP_COMMON_CAP_LANES_SHIFT;
  500. break;
  501. }
  502. return val;
  503. }
  504. static unsigned int tb_dp_bandwidth(unsigned int rate, unsigned int lanes)
  505. {
  506. /* Tunneling removes the DP 8b/10b 128/132b encoding */
  507. if (tb_dp_is_uhbr_rate(rate))
  508. return rate * lanes * 128 / 132;
  509. return rate * lanes * 8 / 10;
  510. }
  511. static int tb_dp_reduce_bandwidth(int max_bw, u32 in_rate, u32 in_lanes,
  512. u32 out_rate, u32 out_lanes, u32 *new_rate,
  513. u32 *new_lanes)
  514. {
  515. static const u32 dp_bw[][2] = {
  516. /* Mb/s, lanes */
  517. { 8100, 4 }, /* 25920 Mb/s */
  518. { 5400, 4 }, /* 17280 Mb/s */
  519. { 8100, 2 }, /* 12960 Mb/s */
  520. { 2700, 4 }, /* 8640 Mb/s */
  521. { 5400, 2 }, /* 8640 Mb/s */
  522. { 8100, 1 }, /* 6480 Mb/s */
  523. { 1620, 4 }, /* 5184 Mb/s */
  524. { 5400, 1 }, /* 4320 Mb/s */
  525. { 2700, 2 }, /* 4320 Mb/s */
  526. { 1620, 2 }, /* 2592 Mb/s */
  527. { 2700, 1 }, /* 2160 Mb/s */
  528. { 1620, 1 }, /* 1296 Mb/s */
  529. };
  530. unsigned int i;
  531. /*
  532. * Find a combination that can fit into max_bw and does not
  533. * exceed the maximum rate and lanes supported by the DP OUT and
  534. * DP IN adapters.
  535. */
  536. for (i = 0; i < ARRAY_SIZE(dp_bw); i++) {
  537. if (dp_bw[i][0] > out_rate || dp_bw[i][1] > out_lanes)
  538. continue;
  539. if (dp_bw[i][0] > in_rate || dp_bw[i][1] > in_lanes)
  540. continue;
  541. if (tb_dp_bandwidth(dp_bw[i][0], dp_bw[i][1]) <= max_bw) {
  542. *new_rate = dp_bw[i][0];
  543. *new_lanes = dp_bw[i][1];
  544. return 0;
  545. }
  546. }
  547. return -ENOSR;
  548. }
  549. static int tb_dp_xchg_caps(struct tb_tunnel *tunnel)
  550. {
  551. u32 out_dp_cap, out_rate, out_lanes, in_dp_cap, in_rate, in_lanes, bw;
  552. struct tb_port *out = tunnel->dst_port;
  553. struct tb_port *in = tunnel->src_port;
  554. int ret, max_bw;
  555. /*
  556. * Copy DP_LOCAL_CAP register to DP_REMOTE_CAP register for
  557. * newer generation hardware.
  558. */
  559. if (in->sw->generation < 2 || out->sw->generation < 2)
  560. return 0;
  561. /*
  562. * Perform connection manager handshake between IN and OUT ports
  563. * before capabilities exchange can take place.
  564. */
  565. ret = tb_dp_cm_handshake(in, out, 3000);
  566. if (ret)
  567. return ret;
  568. /* Read both DP_LOCAL_CAP registers */
  569. ret = tb_port_read(in, &in_dp_cap, TB_CFG_PORT,
  570. in->cap_adap + DP_LOCAL_CAP, 1);
  571. if (ret)
  572. return ret;
  573. ret = tb_port_read(out, &out_dp_cap, TB_CFG_PORT,
  574. out->cap_adap + DP_LOCAL_CAP, 1);
  575. if (ret)
  576. return ret;
  577. /* Write IN local caps to OUT remote caps */
  578. ret = tb_port_write(out, &in_dp_cap, TB_CFG_PORT,
  579. out->cap_adap + DP_REMOTE_CAP, 1);
  580. if (ret)
  581. return ret;
  582. in_rate = tb_dp_cap_get_rate(in_dp_cap);
  583. in_lanes = tb_dp_cap_get_lanes(in_dp_cap);
  584. tb_tunnel_dbg(tunnel,
  585. "DP IN maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n",
  586. in_rate, in_lanes, tb_dp_bandwidth(in_rate, in_lanes));
  587. /*
  588. * If the tunnel bandwidth is limited (max_bw is set) then see
  589. * if we need to reduce bandwidth to fit there.
  590. */
  591. out_rate = tb_dp_cap_get_rate(out_dp_cap);
  592. out_lanes = tb_dp_cap_get_lanes(out_dp_cap);
  593. bw = tb_dp_bandwidth(out_rate, out_lanes);
  594. tb_tunnel_dbg(tunnel,
  595. "DP OUT maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n",
  596. out_rate, out_lanes, bw);
  597. if (tb_tunnel_direction_downstream(tunnel))
  598. max_bw = tunnel->max_down;
  599. else
  600. max_bw = tunnel->max_up;
  601. if (max_bw && bw > max_bw) {
  602. u32 new_rate, new_lanes, new_bw;
  603. ret = tb_dp_reduce_bandwidth(max_bw, in_rate, in_lanes,
  604. out_rate, out_lanes, &new_rate,
  605. &new_lanes);
  606. if (ret) {
  607. tb_tunnel_info(tunnel, "not enough bandwidth\n");
  608. return ret;
  609. }
  610. new_bw = tb_dp_bandwidth(new_rate, new_lanes);
  611. tb_tunnel_dbg(tunnel,
  612. "bandwidth reduced to %u Mb/s x%u = %u Mb/s\n",
  613. new_rate, new_lanes, new_bw);
  614. /*
  615. * Set new rate and number of lanes before writing it to
  616. * the IN port remote caps.
  617. */
  618. out_dp_cap = tb_dp_cap_set_rate(out_dp_cap, new_rate);
  619. out_dp_cap = tb_dp_cap_set_lanes(out_dp_cap, new_lanes);
  620. }
  621. /*
  622. * Titan Ridge does not disable AUX timers when it gets
  623. * SET_CONFIG with SET_LTTPR_MODE set. This causes problems with
  624. * DP tunneling.
  625. */
  626. if (tb_route(out->sw) && tb_switch_is_titan_ridge(out->sw)) {
  627. out_dp_cap |= DP_COMMON_CAP_LTTPR_NS;
  628. tb_tunnel_dbg(tunnel, "disabling LTTPR\n");
  629. }
  630. return tb_port_write(in, &out_dp_cap, TB_CFG_PORT,
  631. in->cap_adap + DP_REMOTE_CAP, 1);
  632. }
  633. static int tb_dp_bandwidth_alloc_mode_enable(struct tb_tunnel *tunnel)
  634. {
  635. int ret, estimated_bw, granularity, tmp;
  636. struct tb_port *out = tunnel->dst_port;
  637. struct tb_port *in = tunnel->src_port;
  638. u32 out_dp_cap, out_rate, out_lanes;
  639. u32 in_dp_cap, in_rate, in_lanes;
  640. u32 rate, lanes;
  641. if (!bw_alloc_mode)
  642. return 0;
  643. ret = usb4_dp_port_set_cm_bandwidth_mode_supported(in, true);
  644. if (ret)
  645. return ret;
  646. ret = usb4_dp_port_set_group_id(in, in->group->index);
  647. if (ret)
  648. return ret;
  649. /*
  650. * Get the non-reduced rate and lanes based on the lowest
  651. * capability of both adapters.
  652. */
  653. ret = tb_port_read(in, &in_dp_cap, TB_CFG_PORT,
  654. in->cap_adap + DP_LOCAL_CAP, 1);
  655. if (ret)
  656. return ret;
  657. ret = tb_port_read(out, &out_dp_cap, TB_CFG_PORT,
  658. out->cap_adap + DP_LOCAL_CAP, 1);
  659. if (ret)
  660. return ret;
  661. in_rate = tb_dp_cap_get_rate(in_dp_cap);
  662. in_lanes = tb_dp_cap_get_lanes(in_dp_cap);
  663. out_rate = tb_dp_cap_get_rate(out_dp_cap);
  664. out_lanes = tb_dp_cap_get_lanes(out_dp_cap);
  665. rate = min(in_rate, out_rate);
  666. lanes = min(in_lanes, out_lanes);
  667. tmp = tb_dp_bandwidth(rate, lanes);
  668. tb_tunnel_dbg(tunnel, "non-reduced bandwidth %u Mb/s x%u = %u Mb/s\n",
  669. rate, lanes, tmp);
  670. ret = usb4_dp_port_set_nrd(in, rate, lanes);
  671. if (ret)
  672. return ret;
  673. /*
  674. * Pick up granularity that supports maximum possible bandwidth.
  675. * For that we use the UHBR rates too.
  676. */
  677. in_rate = tb_dp_cap_get_rate_ext(in_dp_cap);
  678. out_rate = tb_dp_cap_get_rate_ext(out_dp_cap);
  679. rate = min(in_rate, out_rate);
  680. tmp = tb_dp_bandwidth(rate, lanes);
  681. tb_tunnel_dbg(tunnel,
  682. "maximum bandwidth through allocation mode %u Mb/s x%u = %u Mb/s\n",
  683. rate, lanes, tmp);
  684. for (granularity = 250; tmp / granularity > 255 && granularity <= 1000;
  685. granularity *= 2)
  686. ;
  687. tb_tunnel_dbg(tunnel, "granularity %d Mb/s\n", granularity);
  688. /*
  689. * Returns -EINVAL if granularity above is outside of the
  690. * accepted ranges.
  691. */
  692. ret = usb4_dp_port_set_granularity(in, granularity);
  693. if (ret)
  694. return ret;
  695. /*
  696. * Bandwidth estimation is pretty much what we have in
  697. * max_up/down fields. For discovery we just read what the
  698. * estimation was set to.
  699. */
  700. if (tb_tunnel_direction_downstream(tunnel))
  701. estimated_bw = tunnel->max_down;
  702. else
  703. estimated_bw = tunnel->max_up;
  704. tb_tunnel_dbg(tunnel, "estimated bandwidth %d Mb/s\n", estimated_bw);
  705. ret = usb4_dp_port_set_estimated_bandwidth(in, estimated_bw);
  706. if (ret)
  707. return ret;
  708. /* Initial allocation should be 0 according the spec */
  709. ret = usb4_dp_port_allocate_bandwidth(in, 0);
  710. if (ret)
  711. return ret;
  712. tb_tunnel_dbg(tunnel, "bandwidth allocation mode enabled\n");
  713. return 0;
  714. }
  715. static int tb_dp_init(struct tb_tunnel *tunnel)
  716. {
  717. struct tb_port *in = tunnel->src_port;
  718. struct tb_switch *sw = in->sw;
  719. struct tb *tb = in->sw->tb;
  720. int ret;
  721. ret = tb_dp_xchg_caps(tunnel);
  722. if (ret)
  723. return ret;
  724. if (!tb_switch_is_usb4(sw))
  725. return 0;
  726. if (!usb4_dp_port_bandwidth_mode_supported(in))
  727. return 0;
  728. tb_tunnel_dbg(tunnel, "bandwidth allocation mode supported\n");
  729. ret = usb4_dp_port_set_cm_id(in, tb->index);
  730. if (ret)
  731. return ret;
  732. return tb_dp_bandwidth_alloc_mode_enable(tunnel);
  733. }
  734. static void tb_dp_deinit(struct tb_tunnel *tunnel)
  735. {
  736. struct tb_port *in = tunnel->src_port;
  737. if (!usb4_dp_port_bandwidth_mode_supported(in))
  738. return;
  739. if (usb4_dp_port_bandwidth_mode_enabled(in)) {
  740. usb4_dp_port_set_cm_bandwidth_mode_supported(in, false);
  741. tb_tunnel_dbg(tunnel, "bandwidth allocation mode disabled\n");
  742. }
  743. }
  744. static int tb_dp_activate(struct tb_tunnel *tunnel, bool active)
  745. {
  746. int ret;
  747. if (active) {
  748. struct tb_path **paths;
  749. int last;
  750. paths = tunnel->paths;
  751. last = paths[TB_DP_VIDEO_PATH_OUT]->path_length - 1;
  752. tb_dp_port_set_hops(tunnel->src_port,
  753. paths[TB_DP_VIDEO_PATH_OUT]->hops[0].in_hop_index,
  754. paths[TB_DP_AUX_PATH_OUT]->hops[0].in_hop_index,
  755. paths[TB_DP_AUX_PATH_IN]->hops[last].next_hop_index);
  756. tb_dp_port_set_hops(tunnel->dst_port,
  757. paths[TB_DP_VIDEO_PATH_OUT]->hops[last].next_hop_index,
  758. paths[TB_DP_AUX_PATH_IN]->hops[0].in_hop_index,
  759. paths[TB_DP_AUX_PATH_OUT]->hops[last].next_hop_index);
  760. } else {
  761. tb_dp_port_hpd_clear(tunnel->src_port);
  762. tb_dp_port_set_hops(tunnel->src_port, 0, 0, 0);
  763. if (tb_port_is_dpout(tunnel->dst_port))
  764. tb_dp_port_set_hops(tunnel->dst_port, 0, 0, 0);
  765. }
  766. ret = tb_dp_port_enable(tunnel->src_port, active);
  767. if (ret)
  768. return ret;
  769. if (tb_port_is_dpout(tunnel->dst_port))
  770. return tb_dp_port_enable(tunnel->dst_port, active);
  771. return 0;
  772. }
  773. /**
  774. * tb_dp_bandwidth_mode_maximum_bandwidth() - Maximum possible bandwidth
  775. * @tunnel: DP tunnel to check
  776. * @max_bw_rounded: Maximum bandwidth in Mb/s rounded up to the next granularity
  777. *
  778. * Returns maximum possible bandwidth for this tunnel in Mb/s.
  779. */
  780. static int tb_dp_bandwidth_mode_maximum_bandwidth(struct tb_tunnel *tunnel,
  781. int *max_bw_rounded)
  782. {
  783. struct tb_port *in = tunnel->src_port;
  784. int ret, rate, lanes, max_bw;
  785. u32 cap;
  786. /*
  787. * DP IN adapter DP_LOCAL_CAP gets updated to the lowest AUX
  788. * read parameter values so this so we can use this to determine
  789. * the maximum possible bandwidth over this link.
  790. *
  791. * See USB4 v2 spec 1.0 10.4.4.5.
  792. */
  793. ret = tb_port_read(in, &cap, TB_CFG_PORT,
  794. in->cap_adap + DP_LOCAL_CAP, 1);
  795. if (ret)
  796. return ret;
  797. rate = tb_dp_cap_get_rate_ext(cap);
  798. lanes = tb_dp_cap_get_lanes(cap);
  799. max_bw = tb_dp_bandwidth(rate, lanes);
  800. if (max_bw_rounded) {
  801. ret = usb4_dp_port_granularity(in);
  802. if (ret < 0)
  803. return ret;
  804. *max_bw_rounded = roundup(max_bw, ret);
  805. }
  806. return max_bw;
  807. }
  808. static int tb_dp_bandwidth_mode_consumed_bandwidth(struct tb_tunnel *tunnel,
  809. int *consumed_up,
  810. int *consumed_down)
  811. {
  812. struct tb_port *in = tunnel->src_port;
  813. int ret, allocated_bw, max_bw_rounded;
  814. if (!usb4_dp_port_bandwidth_mode_enabled(in))
  815. return -EOPNOTSUPP;
  816. if (!tunnel->bw_mode)
  817. return -EOPNOTSUPP;
  818. /* Read what was allocated previously if any */
  819. ret = usb4_dp_port_allocated_bandwidth(in);
  820. if (ret < 0)
  821. return ret;
  822. allocated_bw = ret;
  823. ret = tb_dp_bandwidth_mode_maximum_bandwidth(tunnel, &max_bw_rounded);
  824. if (ret < 0)
  825. return ret;
  826. if (allocated_bw == max_bw_rounded)
  827. allocated_bw = ret;
  828. if (tb_tunnel_direction_downstream(tunnel)) {
  829. *consumed_up = 0;
  830. *consumed_down = allocated_bw;
  831. } else {
  832. *consumed_up = allocated_bw;
  833. *consumed_down = 0;
  834. }
  835. return 0;
  836. }
  837. static int tb_dp_allocated_bandwidth(struct tb_tunnel *tunnel, int *allocated_up,
  838. int *allocated_down)
  839. {
  840. struct tb_port *in = tunnel->src_port;
  841. /*
  842. * If we have already set the allocated bandwidth then use that.
  843. * Otherwise we read it from the DPRX.
  844. */
  845. if (usb4_dp_port_bandwidth_mode_enabled(in) && tunnel->bw_mode) {
  846. int ret, allocated_bw, max_bw_rounded;
  847. ret = usb4_dp_port_allocated_bandwidth(in);
  848. if (ret < 0)
  849. return ret;
  850. allocated_bw = ret;
  851. ret = tb_dp_bandwidth_mode_maximum_bandwidth(tunnel,
  852. &max_bw_rounded);
  853. if (ret < 0)
  854. return ret;
  855. if (allocated_bw == max_bw_rounded)
  856. allocated_bw = ret;
  857. if (tb_tunnel_direction_downstream(tunnel)) {
  858. *allocated_up = 0;
  859. *allocated_down = allocated_bw;
  860. } else {
  861. *allocated_up = allocated_bw;
  862. *allocated_down = 0;
  863. }
  864. return 0;
  865. }
  866. return tunnel->consumed_bandwidth(tunnel, allocated_up,
  867. allocated_down);
  868. }
  869. static int tb_dp_alloc_bandwidth(struct tb_tunnel *tunnel, int *alloc_up,
  870. int *alloc_down)
  871. {
  872. struct tb_port *in = tunnel->src_port;
  873. int max_bw_rounded, ret, tmp;
  874. if (!usb4_dp_port_bandwidth_mode_enabled(in))
  875. return -EOPNOTSUPP;
  876. ret = tb_dp_bandwidth_mode_maximum_bandwidth(tunnel, &max_bw_rounded);
  877. if (ret < 0)
  878. return ret;
  879. if (tb_tunnel_direction_downstream(tunnel)) {
  880. tmp = min(*alloc_down, max_bw_rounded);
  881. ret = usb4_dp_port_allocate_bandwidth(in, tmp);
  882. if (ret)
  883. return ret;
  884. *alloc_down = tmp;
  885. *alloc_up = 0;
  886. } else {
  887. tmp = min(*alloc_up, max_bw_rounded);
  888. ret = usb4_dp_port_allocate_bandwidth(in, tmp);
  889. if (ret)
  890. return ret;
  891. *alloc_down = 0;
  892. *alloc_up = tmp;
  893. }
  894. /* Now we can use BW mode registers to figure out the bandwidth */
  895. /* TODO: need to handle discovery too */
  896. tunnel->bw_mode = true;
  897. return 0;
  898. }
  899. static int tb_dp_wait_dprx(struct tb_tunnel *tunnel, int timeout_msec)
  900. {
  901. ktime_t timeout = ktime_add_ms(ktime_get(), timeout_msec);
  902. struct tb_port *in = tunnel->src_port;
  903. /*
  904. * Wait for DPRX done. Normally it should be already set for
  905. * active tunnel.
  906. */
  907. do {
  908. u32 val;
  909. int ret;
  910. ret = tb_port_read(in, &val, TB_CFG_PORT,
  911. in->cap_adap + DP_COMMON_CAP, 1);
  912. if (ret)
  913. return ret;
  914. if (val & DP_COMMON_CAP_DPRX_DONE) {
  915. tb_tunnel_dbg(tunnel, "DPRX read done\n");
  916. return 0;
  917. }
  918. usleep_range(100, 150);
  919. } while (ktime_before(ktime_get(), timeout));
  920. tb_tunnel_dbg(tunnel, "DPRX read timeout\n");
  921. return -ETIMEDOUT;
  922. }
  923. /* Read cap from tunnel DP IN */
  924. static int tb_dp_read_cap(struct tb_tunnel *tunnel, unsigned int cap, u32 *rate,
  925. u32 *lanes)
  926. {
  927. struct tb_port *in = tunnel->src_port;
  928. u32 val;
  929. int ret;
  930. switch (cap) {
  931. case DP_LOCAL_CAP:
  932. case DP_REMOTE_CAP:
  933. case DP_COMMON_CAP:
  934. break;
  935. default:
  936. tb_tunnel_WARN(tunnel, "invalid capability index %#x\n", cap);
  937. return -EINVAL;
  938. }
  939. /*
  940. * Read from the copied remote cap so that we take into account
  941. * if capabilities were reduced during exchange.
  942. */
  943. ret = tb_port_read(in, &val, TB_CFG_PORT, in->cap_adap + cap, 1);
  944. if (ret)
  945. return ret;
  946. *rate = tb_dp_cap_get_rate(val);
  947. *lanes = tb_dp_cap_get_lanes(val);
  948. return 0;
  949. }
  950. static int tb_dp_maximum_bandwidth(struct tb_tunnel *tunnel, int *max_up,
  951. int *max_down)
  952. {
  953. int ret;
  954. if (!usb4_dp_port_bandwidth_mode_enabled(tunnel->src_port))
  955. return -EOPNOTSUPP;
  956. ret = tb_dp_bandwidth_mode_maximum_bandwidth(tunnel, NULL);
  957. if (ret < 0)
  958. return ret;
  959. if (tb_tunnel_direction_downstream(tunnel)) {
  960. *max_up = 0;
  961. *max_down = ret;
  962. } else {
  963. *max_up = ret;
  964. *max_down = 0;
  965. }
  966. return 0;
  967. }
  968. static int tb_dp_consumed_bandwidth(struct tb_tunnel *tunnel, int *consumed_up,
  969. int *consumed_down)
  970. {
  971. const struct tb_switch *sw = tunnel->src_port->sw;
  972. u32 rate = 0, lanes = 0;
  973. int ret;
  974. if (tb_dp_is_usb4(sw)) {
  975. /*
  976. * On USB4 routers check if the bandwidth allocation
  977. * mode is enabled first and then read the bandwidth
  978. * through those registers.
  979. */
  980. ret = tb_dp_bandwidth_mode_consumed_bandwidth(tunnel, consumed_up,
  981. consumed_down);
  982. if (ret < 0) {
  983. if (ret != -EOPNOTSUPP)
  984. return ret;
  985. } else if (!ret) {
  986. return 0;
  987. }
  988. /*
  989. * Then see if the DPRX negotiation is ready and if yes
  990. * return that bandwidth (it may be smaller than the
  991. * reduced one). According to VESA spec, the DPRX
  992. * negotiation shall compete in 5 seconds after tunnel
  993. * established. We give it 100ms extra just in case.
  994. */
  995. ret = tb_dp_wait_dprx(tunnel, 5100);
  996. if (ret)
  997. return ret;
  998. ret = tb_dp_read_cap(tunnel, DP_COMMON_CAP, &rate, &lanes);
  999. if (ret)
  1000. return ret;
  1001. } else if (sw->generation >= 2) {
  1002. ret = tb_dp_read_cap(tunnel, DP_REMOTE_CAP, &rate, &lanes);
  1003. if (ret)
  1004. return ret;
  1005. } else {
  1006. /* No bandwidth management for legacy devices */
  1007. *consumed_up = 0;
  1008. *consumed_down = 0;
  1009. return 0;
  1010. }
  1011. if (tb_tunnel_direction_downstream(tunnel)) {
  1012. *consumed_up = 0;
  1013. *consumed_down = tb_dp_bandwidth(rate, lanes);
  1014. } else {
  1015. *consumed_up = tb_dp_bandwidth(rate, lanes);
  1016. *consumed_down = 0;
  1017. }
  1018. return 0;
  1019. }
  1020. static void tb_dp_init_aux_credits(struct tb_path_hop *hop)
  1021. {
  1022. struct tb_port *port = hop->in_port;
  1023. struct tb_switch *sw = port->sw;
  1024. if (tb_port_use_credit_allocation(port))
  1025. hop->initial_credits = sw->min_dp_aux_credits;
  1026. else
  1027. hop->initial_credits = 1;
  1028. }
  1029. static void tb_dp_init_aux_path(struct tb_path *path, bool pm_support)
  1030. {
  1031. struct tb_path_hop *hop;
  1032. path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL;
  1033. path->egress_shared_buffer = TB_PATH_NONE;
  1034. path->ingress_fc_enable = TB_PATH_ALL;
  1035. path->ingress_shared_buffer = TB_PATH_NONE;
  1036. path->priority = TB_DP_AUX_PRIORITY;
  1037. path->weight = TB_DP_AUX_WEIGHT;
  1038. tb_path_for_each_hop(path, hop) {
  1039. tb_dp_init_aux_credits(hop);
  1040. if (pm_support)
  1041. tb_init_pm_support(hop);
  1042. }
  1043. }
  1044. static int tb_dp_init_video_credits(struct tb_path_hop *hop)
  1045. {
  1046. struct tb_port *port = hop->in_port;
  1047. struct tb_switch *sw = port->sw;
  1048. if (tb_port_use_credit_allocation(port)) {
  1049. unsigned int nfc_credits;
  1050. size_t max_dp_streams;
  1051. tb_available_credits(port, &max_dp_streams);
  1052. /*
  1053. * Read the number of currently allocated NFC credits
  1054. * from the lane adapter. Since we only use them for DP
  1055. * tunneling we can use that to figure out how many DP
  1056. * tunnels already go through the lane adapter.
  1057. */
  1058. nfc_credits = port->config.nfc_credits &
  1059. ADP_CS_4_NFC_BUFFERS_MASK;
  1060. if (nfc_credits / sw->min_dp_main_credits > max_dp_streams)
  1061. return -ENOSPC;
  1062. hop->nfc_credits = sw->min_dp_main_credits;
  1063. } else {
  1064. hop->nfc_credits = min(port->total_credits - 2, 12U);
  1065. }
  1066. return 0;
  1067. }
  1068. static int tb_dp_init_video_path(struct tb_path *path, bool pm_support)
  1069. {
  1070. struct tb_path_hop *hop;
  1071. path->egress_fc_enable = TB_PATH_NONE;
  1072. path->egress_shared_buffer = TB_PATH_NONE;
  1073. path->ingress_fc_enable = TB_PATH_NONE;
  1074. path->ingress_shared_buffer = TB_PATH_NONE;
  1075. path->priority = TB_DP_VIDEO_PRIORITY;
  1076. path->weight = TB_DP_VIDEO_WEIGHT;
  1077. tb_path_for_each_hop(path, hop) {
  1078. int ret;
  1079. ret = tb_dp_init_video_credits(hop);
  1080. if (ret)
  1081. return ret;
  1082. if (pm_support)
  1083. tb_init_pm_support(hop);
  1084. }
  1085. return 0;
  1086. }
  1087. static void tb_dp_dump(struct tb_tunnel *tunnel)
  1088. {
  1089. struct tb_port *in, *out;
  1090. u32 dp_cap, rate, lanes;
  1091. in = tunnel->src_port;
  1092. out = tunnel->dst_port;
  1093. if (tb_port_read(in, &dp_cap, TB_CFG_PORT,
  1094. in->cap_adap + DP_LOCAL_CAP, 1))
  1095. return;
  1096. rate = tb_dp_cap_get_rate(dp_cap);
  1097. lanes = tb_dp_cap_get_lanes(dp_cap);
  1098. tb_tunnel_dbg(tunnel,
  1099. "DP IN maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n",
  1100. rate, lanes, tb_dp_bandwidth(rate, lanes));
  1101. if (tb_port_read(out, &dp_cap, TB_CFG_PORT,
  1102. out->cap_adap + DP_LOCAL_CAP, 1))
  1103. return;
  1104. rate = tb_dp_cap_get_rate(dp_cap);
  1105. lanes = tb_dp_cap_get_lanes(dp_cap);
  1106. tb_tunnel_dbg(tunnel,
  1107. "DP OUT maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n",
  1108. rate, lanes, tb_dp_bandwidth(rate, lanes));
  1109. if (tb_port_read(in, &dp_cap, TB_CFG_PORT,
  1110. in->cap_adap + DP_REMOTE_CAP, 1))
  1111. return;
  1112. rate = tb_dp_cap_get_rate(dp_cap);
  1113. lanes = tb_dp_cap_get_lanes(dp_cap);
  1114. tb_tunnel_dbg(tunnel, "reduced bandwidth %u Mb/s x%u = %u Mb/s\n",
  1115. rate, lanes, tb_dp_bandwidth(rate, lanes));
  1116. }
  1117. /**
  1118. * tb_tunnel_discover_dp() - Discover existing Display Port tunnels
  1119. * @tb: Pointer to the domain structure
  1120. * @in: DP in adapter
  1121. * @alloc_hopid: Allocate HopIDs from visited ports
  1122. *
  1123. * If @in adapter is active, follows the tunnel to the DP out adapter
  1124. * and back. Returns the discovered tunnel or %NULL if there was no
  1125. * tunnel.
  1126. *
  1127. * Return: DP tunnel or %NULL if no tunnel found.
  1128. */
  1129. struct tb_tunnel *tb_tunnel_discover_dp(struct tb *tb, struct tb_port *in,
  1130. bool alloc_hopid)
  1131. {
  1132. struct tb_tunnel *tunnel;
  1133. struct tb_port *port;
  1134. struct tb_path *path;
  1135. if (!tb_dp_port_is_enabled(in))
  1136. return NULL;
  1137. tunnel = tb_tunnel_alloc(tb, 3, TB_TUNNEL_DP);
  1138. if (!tunnel)
  1139. return NULL;
  1140. tunnel->init = tb_dp_init;
  1141. tunnel->deinit = tb_dp_deinit;
  1142. tunnel->activate = tb_dp_activate;
  1143. tunnel->maximum_bandwidth = tb_dp_maximum_bandwidth;
  1144. tunnel->allocated_bandwidth = tb_dp_allocated_bandwidth;
  1145. tunnel->alloc_bandwidth = tb_dp_alloc_bandwidth;
  1146. tunnel->consumed_bandwidth = tb_dp_consumed_bandwidth;
  1147. tunnel->src_port = in;
  1148. path = tb_path_discover(in, TB_DP_VIDEO_HOPID, NULL, -1,
  1149. &tunnel->dst_port, "Video", alloc_hopid);
  1150. if (!path) {
  1151. /* Just disable the DP IN port */
  1152. tb_dp_port_enable(in, false);
  1153. goto err_free;
  1154. }
  1155. tunnel->paths[TB_DP_VIDEO_PATH_OUT] = path;
  1156. if (tb_dp_init_video_path(tunnel->paths[TB_DP_VIDEO_PATH_OUT], false))
  1157. goto err_free;
  1158. path = tb_path_discover(in, TB_DP_AUX_TX_HOPID, NULL, -1, NULL, "AUX TX",
  1159. alloc_hopid);
  1160. if (!path)
  1161. goto err_deactivate;
  1162. tunnel->paths[TB_DP_AUX_PATH_OUT] = path;
  1163. tb_dp_init_aux_path(tunnel->paths[TB_DP_AUX_PATH_OUT], false);
  1164. path = tb_path_discover(tunnel->dst_port, -1, in, TB_DP_AUX_RX_HOPID,
  1165. &port, "AUX RX", alloc_hopid);
  1166. if (!path)
  1167. goto err_deactivate;
  1168. tunnel->paths[TB_DP_AUX_PATH_IN] = path;
  1169. tb_dp_init_aux_path(tunnel->paths[TB_DP_AUX_PATH_IN], false);
  1170. /* Validate that the tunnel is complete */
  1171. if (!tb_port_is_dpout(tunnel->dst_port)) {
  1172. tb_port_warn(in, "path does not end on a DP adapter, cleaning up\n");
  1173. goto err_deactivate;
  1174. }
  1175. if (!tb_dp_port_is_enabled(tunnel->dst_port))
  1176. goto err_deactivate;
  1177. if (!tb_dp_port_hpd_is_active(tunnel->dst_port))
  1178. goto err_deactivate;
  1179. if (port != tunnel->src_port) {
  1180. tb_tunnel_warn(tunnel, "path is not complete, cleaning up\n");
  1181. goto err_deactivate;
  1182. }
  1183. tb_dp_dump(tunnel);
  1184. tb_tunnel_dbg(tunnel, "discovered\n");
  1185. return tunnel;
  1186. err_deactivate:
  1187. tb_tunnel_deactivate(tunnel);
  1188. err_free:
  1189. tb_tunnel_free(tunnel);
  1190. return NULL;
  1191. }
  1192. /**
  1193. * tb_tunnel_alloc_dp() - allocate a Display Port tunnel
  1194. * @tb: Pointer to the domain structure
  1195. * @in: DP in adapter port
  1196. * @out: DP out adapter port
  1197. * @link_nr: Preferred lane adapter when the link is not bonded
  1198. * @max_up: Maximum available upstream bandwidth for the DP tunnel.
  1199. * %0 if no available bandwidth.
  1200. * @max_down: Maximum available downstream bandwidth for the DP tunnel.
  1201. * %0 if no available bandwidth.
  1202. *
  1203. * Allocates a tunnel between @in and @out that is capable of tunneling
  1204. * Display Port traffic.
  1205. *
  1206. * Return: Returns a tb_tunnel on success or NULL on failure.
  1207. */
  1208. struct tb_tunnel *tb_tunnel_alloc_dp(struct tb *tb, struct tb_port *in,
  1209. struct tb_port *out, int link_nr,
  1210. int max_up, int max_down)
  1211. {
  1212. struct tb_tunnel *tunnel;
  1213. struct tb_path **paths;
  1214. struct tb_path *path;
  1215. bool pm_support;
  1216. if (WARN_ON(!in->cap_adap || !out->cap_adap))
  1217. return NULL;
  1218. tunnel = tb_tunnel_alloc(tb, 3, TB_TUNNEL_DP);
  1219. if (!tunnel)
  1220. return NULL;
  1221. tunnel->init = tb_dp_init;
  1222. tunnel->deinit = tb_dp_deinit;
  1223. tunnel->activate = tb_dp_activate;
  1224. tunnel->maximum_bandwidth = tb_dp_maximum_bandwidth;
  1225. tunnel->allocated_bandwidth = tb_dp_allocated_bandwidth;
  1226. tunnel->alloc_bandwidth = tb_dp_alloc_bandwidth;
  1227. tunnel->consumed_bandwidth = tb_dp_consumed_bandwidth;
  1228. tunnel->src_port = in;
  1229. tunnel->dst_port = out;
  1230. tunnel->max_up = max_up;
  1231. tunnel->max_down = max_down;
  1232. paths = tunnel->paths;
  1233. pm_support = usb4_switch_version(in->sw) >= 2;
  1234. path = tb_path_alloc(tb, in, TB_DP_VIDEO_HOPID, out, TB_DP_VIDEO_HOPID,
  1235. link_nr, "Video");
  1236. if (!path)
  1237. goto err_free;
  1238. tb_dp_init_video_path(path, pm_support);
  1239. paths[TB_DP_VIDEO_PATH_OUT] = path;
  1240. path = tb_path_alloc(tb, in, TB_DP_AUX_TX_HOPID, out,
  1241. TB_DP_AUX_TX_HOPID, link_nr, "AUX TX");
  1242. if (!path)
  1243. goto err_free;
  1244. tb_dp_init_aux_path(path, pm_support);
  1245. paths[TB_DP_AUX_PATH_OUT] = path;
  1246. path = tb_path_alloc(tb, out, TB_DP_AUX_RX_HOPID, in,
  1247. TB_DP_AUX_RX_HOPID, link_nr, "AUX RX");
  1248. if (!path)
  1249. goto err_free;
  1250. tb_dp_init_aux_path(path, pm_support);
  1251. paths[TB_DP_AUX_PATH_IN] = path;
  1252. return tunnel;
  1253. err_free:
  1254. tb_tunnel_free(tunnel);
  1255. return NULL;
  1256. }
  1257. static unsigned int tb_dma_available_credits(const struct tb_port *port)
  1258. {
  1259. const struct tb_switch *sw = port->sw;
  1260. int credits;
  1261. credits = tb_available_credits(port, NULL);
  1262. if (tb_acpi_may_tunnel_pcie())
  1263. credits -= sw->max_pcie_credits;
  1264. credits -= port->dma_credits;
  1265. return credits > 0 ? credits : 0;
  1266. }
  1267. static int tb_dma_reserve_credits(struct tb_path_hop *hop, unsigned int credits)
  1268. {
  1269. struct tb_port *port = hop->in_port;
  1270. if (tb_port_use_credit_allocation(port)) {
  1271. unsigned int available = tb_dma_available_credits(port);
  1272. /*
  1273. * Need to have at least TB_MIN_DMA_CREDITS, otherwise
  1274. * DMA path cannot be established.
  1275. */
  1276. if (available < TB_MIN_DMA_CREDITS)
  1277. return -ENOSPC;
  1278. while (credits > available)
  1279. credits--;
  1280. tb_port_dbg(port, "reserving %u credits for DMA path\n",
  1281. credits);
  1282. port->dma_credits += credits;
  1283. } else {
  1284. if (tb_port_is_null(port))
  1285. credits = port->bonded ? 14 : 6;
  1286. else
  1287. credits = min(port->total_credits, credits);
  1288. }
  1289. hop->initial_credits = credits;
  1290. return 0;
  1291. }
  1292. /* Path from lane adapter to NHI */
  1293. static int tb_dma_init_rx_path(struct tb_path *path, unsigned int credits)
  1294. {
  1295. struct tb_path_hop *hop;
  1296. unsigned int i, tmp;
  1297. path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL;
  1298. path->ingress_fc_enable = TB_PATH_ALL;
  1299. path->egress_shared_buffer = TB_PATH_NONE;
  1300. path->ingress_shared_buffer = TB_PATH_NONE;
  1301. path->priority = TB_DMA_PRIORITY;
  1302. path->weight = TB_DMA_WEIGHT;
  1303. path->clear_fc = true;
  1304. /*
  1305. * First lane adapter is the one connected to the remote host.
  1306. * We don't tunnel other traffic over this link so can use all
  1307. * the credits (except the ones reserved for control traffic).
  1308. */
  1309. hop = &path->hops[0];
  1310. tmp = min(tb_usable_credits(hop->in_port), credits);
  1311. hop->initial_credits = tmp;
  1312. hop->in_port->dma_credits += tmp;
  1313. for (i = 1; i < path->path_length; i++) {
  1314. int ret;
  1315. ret = tb_dma_reserve_credits(&path->hops[i], credits);
  1316. if (ret)
  1317. return ret;
  1318. }
  1319. return 0;
  1320. }
  1321. /* Path from NHI to lane adapter */
  1322. static int tb_dma_init_tx_path(struct tb_path *path, unsigned int credits)
  1323. {
  1324. struct tb_path_hop *hop;
  1325. path->egress_fc_enable = TB_PATH_ALL;
  1326. path->ingress_fc_enable = TB_PATH_ALL;
  1327. path->egress_shared_buffer = TB_PATH_NONE;
  1328. path->ingress_shared_buffer = TB_PATH_NONE;
  1329. path->priority = TB_DMA_PRIORITY;
  1330. path->weight = TB_DMA_WEIGHT;
  1331. path->clear_fc = true;
  1332. tb_path_for_each_hop(path, hop) {
  1333. int ret;
  1334. ret = tb_dma_reserve_credits(hop, credits);
  1335. if (ret)
  1336. return ret;
  1337. }
  1338. return 0;
  1339. }
  1340. static void tb_dma_release_credits(struct tb_path_hop *hop)
  1341. {
  1342. struct tb_port *port = hop->in_port;
  1343. if (tb_port_use_credit_allocation(port)) {
  1344. port->dma_credits -= hop->initial_credits;
  1345. tb_port_dbg(port, "released %u DMA path credits\n",
  1346. hop->initial_credits);
  1347. }
  1348. }
  1349. static void tb_dma_deinit_path(struct tb_path *path)
  1350. {
  1351. struct tb_path_hop *hop;
  1352. tb_path_for_each_hop(path, hop)
  1353. tb_dma_release_credits(hop);
  1354. }
  1355. static void tb_dma_deinit(struct tb_tunnel *tunnel)
  1356. {
  1357. int i;
  1358. for (i = 0; i < tunnel->npaths; i++) {
  1359. if (!tunnel->paths[i])
  1360. continue;
  1361. tb_dma_deinit_path(tunnel->paths[i]);
  1362. }
  1363. }
  1364. /**
  1365. * tb_tunnel_alloc_dma() - allocate a DMA tunnel
  1366. * @tb: Pointer to the domain structure
  1367. * @nhi: Host controller port
  1368. * @dst: Destination null port which the other domain is connected to
  1369. * @transmit_path: HopID used for transmitting packets
  1370. * @transmit_ring: NHI ring number used to send packets towards the
  1371. * other domain. Set to %-1 if TX path is not needed.
  1372. * @receive_path: HopID used for receiving packets
  1373. * @receive_ring: NHI ring number used to receive packets from the
  1374. * other domain. Set to %-1 if RX path is not needed.
  1375. *
  1376. * Return: Returns a tb_tunnel on success or NULL on failure.
  1377. */
  1378. struct tb_tunnel *tb_tunnel_alloc_dma(struct tb *tb, struct tb_port *nhi,
  1379. struct tb_port *dst, int transmit_path,
  1380. int transmit_ring, int receive_path,
  1381. int receive_ring)
  1382. {
  1383. struct tb_tunnel *tunnel;
  1384. size_t npaths = 0, i = 0;
  1385. struct tb_path *path;
  1386. int credits;
  1387. /* Ring 0 is reserved for control channel */
  1388. if (WARN_ON(!receive_ring || !transmit_ring))
  1389. return NULL;
  1390. if (receive_ring > 0)
  1391. npaths++;
  1392. if (transmit_ring > 0)
  1393. npaths++;
  1394. if (WARN_ON(!npaths))
  1395. return NULL;
  1396. tunnel = tb_tunnel_alloc(tb, npaths, TB_TUNNEL_DMA);
  1397. if (!tunnel)
  1398. return NULL;
  1399. tunnel->src_port = nhi;
  1400. tunnel->dst_port = dst;
  1401. tunnel->deinit = tb_dma_deinit;
  1402. credits = min_not_zero(dma_credits, nhi->sw->max_dma_credits);
  1403. if (receive_ring > 0) {
  1404. path = tb_path_alloc(tb, dst, receive_path, nhi, receive_ring, 0,
  1405. "DMA RX");
  1406. if (!path)
  1407. goto err_free;
  1408. tunnel->paths[i++] = path;
  1409. if (tb_dma_init_rx_path(path, credits)) {
  1410. tb_tunnel_dbg(tunnel, "not enough buffers for RX path\n");
  1411. goto err_free;
  1412. }
  1413. }
  1414. if (transmit_ring > 0) {
  1415. path = tb_path_alloc(tb, nhi, transmit_ring, dst, transmit_path, 0,
  1416. "DMA TX");
  1417. if (!path)
  1418. goto err_free;
  1419. tunnel->paths[i++] = path;
  1420. if (tb_dma_init_tx_path(path, credits)) {
  1421. tb_tunnel_dbg(tunnel, "not enough buffers for TX path\n");
  1422. goto err_free;
  1423. }
  1424. }
  1425. return tunnel;
  1426. err_free:
  1427. tb_tunnel_free(tunnel);
  1428. return NULL;
  1429. }
  1430. /**
  1431. * tb_tunnel_match_dma() - Match DMA tunnel
  1432. * @tunnel: Tunnel to match
  1433. * @transmit_path: HopID used for transmitting packets. Pass %-1 to ignore.
  1434. * @transmit_ring: NHI ring number used to send packets towards the
  1435. * other domain. Pass %-1 to ignore.
  1436. * @receive_path: HopID used for receiving packets. Pass %-1 to ignore.
  1437. * @receive_ring: NHI ring number used to receive packets from the
  1438. * other domain. Pass %-1 to ignore.
  1439. *
  1440. * This function can be used to match specific DMA tunnel, if there are
  1441. * multiple DMA tunnels going through the same XDomain connection.
  1442. * Returns true if there is match and false otherwise.
  1443. */
  1444. bool tb_tunnel_match_dma(const struct tb_tunnel *tunnel, int transmit_path,
  1445. int transmit_ring, int receive_path, int receive_ring)
  1446. {
  1447. const struct tb_path *tx_path = NULL, *rx_path = NULL;
  1448. int i;
  1449. if (!receive_ring || !transmit_ring)
  1450. return false;
  1451. for (i = 0; i < tunnel->npaths; i++) {
  1452. const struct tb_path *path = tunnel->paths[i];
  1453. if (!path)
  1454. continue;
  1455. if (tb_port_is_nhi(path->hops[0].in_port))
  1456. tx_path = path;
  1457. else if (tb_port_is_nhi(path->hops[path->path_length - 1].out_port))
  1458. rx_path = path;
  1459. }
  1460. if (transmit_ring > 0 || transmit_path > 0) {
  1461. if (!tx_path)
  1462. return false;
  1463. if (transmit_ring > 0 &&
  1464. (tx_path->hops[0].in_hop_index != transmit_ring))
  1465. return false;
  1466. if (transmit_path > 0 &&
  1467. (tx_path->hops[tx_path->path_length - 1].next_hop_index != transmit_path))
  1468. return false;
  1469. }
  1470. if (receive_ring > 0 || receive_path > 0) {
  1471. if (!rx_path)
  1472. return false;
  1473. if (receive_path > 0 &&
  1474. (rx_path->hops[0].in_hop_index != receive_path))
  1475. return false;
  1476. if (receive_ring > 0 &&
  1477. (rx_path->hops[rx_path->path_length - 1].next_hop_index != receive_ring))
  1478. return false;
  1479. }
  1480. return true;
  1481. }
  1482. static int tb_usb3_max_link_rate(struct tb_port *up, struct tb_port *down)
  1483. {
  1484. int ret, up_max_rate, down_max_rate;
  1485. ret = usb4_usb3_port_max_link_rate(up);
  1486. if (ret < 0)
  1487. return ret;
  1488. up_max_rate = ret;
  1489. ret = usb4_usb3_port_max_link_rate(down);
  1490. if (ret < 0)
  1491. return ret;
  1492. down_max_rate = ret;
  1493. return min(up_max_rate, down_max_rate);
  1494. }
  1495. static int tb_usb3_init(struct tb_tunnel *tunnel)
  1496. {
  1497. tb_tunnel_dbg(tunnel, "allocating initial bandwidth %d/%d Mb/s\n",
  1498. tunnel->allocated_up, tunnel->allocated_down);
  1499. return usb4_usb3_port_allocate_bandwidth(tunnel->src_port,
  1500. &tunnel->allocated_up,
  1501. &tunnel->allocated_down);
  1502. }
  1503. static int tb_usb3_activate(struct tb_tunnel *tunnel, bool activate)
  1504. {
  1505. int res;
  1506. res = tb_usb3_port_enable(tunnel->src_port, activate);
  1507. if (res)
  1508. return res;
  1509. if (tb_port_is_usb3_up(tunnel->dst_port))
  1510. return tb_usb3_port_enable(tunnel->dst_port, activate);
  1511. return 0;
  1512. }
  1513. static int tb_usb3_consumed_bandwidth(struct tb_tunnel *tunnel,
  1514. int *consumed_up, int *consumed_down)
  1515. {
  1516. struct tb_port *port = tb_upstream_port(tunnel->dst_port->sw);
  1517. int pcie_weight = tb_acpi_may_tunnel_pcie() ? TB_PCI_WEIGHT : 0;
  1518. /*
  1519. * PCIe tunneling, if enabled, affects the USB3 bandwidth so
  1520. * take that it into account here.
  1521. */
  1522. *consumed_up = tunnel->allocated_up *
  1523. (TB_USB3_WEIGHT + pcie_weight) / TB_USB3_WEIGHT;
  1524. *consumed_down = tunnel->allocated_down *
  1525. (TB_USB3_WEIGHT + pcie_weight) / TB_USB3_WEIGHT;
  1526. if (tb_port_get_link_generation(port) >= 4) {
  1527. *consumed_up = max(*consumed_up, USB4_V2_USB3_MIN_BANDWIDTH);
  1528. *consumed_down = max(*consumed_down, USB4_V2_USB3_MIN_BANDWIDTH);
  1529. }
  1530. return 0;
  1531. }
  1532. static int tb_usb3_release_unused_bandwidth(struct tb_tunnel *tunnel)
  1533. {
  1534. int ret;
  1535. ret = usb4_usb3_port_release_bandwidth(tunnel->src_port,
  1536. &tunnel->allocated_up,
  1537. &tunnel->allocated_down);
  1538. if (ret)
  1539. return ret;
  1540. tb_tunnel_dbg(tunnel, "decreased bandwidth allocation to %d/%d Mb/s\n",
  1541. tunnel->allocated_up, tunnel->allocated_down);
  1542. return 0;
  1543. }
  1544. static void tb_usb3_reclaim_available_bandwidth(struct tb_tunnel *tunnel,
  1545. int *available_up,
  1546. int *available_down)
  1547. {
  1548. int ret, max_rate, allocate_up, allocate_down;
  1549. ret = tb_usb3_max_link_rate(tunnel->dst_port, tunnel->src_port);
  1550. if (ret < 0) {
  1551. tb_tunnel_warn(tunnel, "failed to read maximum link rate\n");
  1552. return;
  1553. }
  1554. /*
  1555. * 90% of the max rate can be allocated for isochronous
  1556. * transfers.
  1557. */
  1558. max_rate = ret * 90 / 100;
  1559. /* No need to reclaim if already at maximum */
  1560. if (tunnel->allocated_up >= max_rate &&
  1561. tunnel->allocated_down >= max_rate)
  1562. return;
  1563. /* Don't go lower than what is already allocated */
  1564. allocate_up = min(max_rate, *available_up);
  1565. if (allocate_up < tunnel->allocated_up)
  1566. allocate_up = tunnel->allocated_up;
  1567. allocate_down = min(max_rate, *available_down);
  1568. if (allocate_down < tunnel->allocated_down)
  1569. allocate_down = tunnel->allocated_down;
  1570. /* If no changes no need to do more */
  1571. if (allocate_up == tunnel->allocated_up &&
  1572. allocate_down == tunnel->allocated_down)
  1573. return;
  1574. ret = usb4_usb3_port_allocate_bandwidth(tunnel->src_port, &allocate_up,
  1575. &allocate_down);
  1576. if (ret) {
  1577. tb_tunnel_info(tunnel, "failed to allocate bandwidth\n");
  1578. return;
  1579. }
  1580. tunnel->allocated_up = allocate_up;
  1581. *available_up -= tunnel->allocated_up;
  1582. tunnel->allocated_down = allocate_down;
  1583. *available_down -= tunnel->allocated_down;
  1584. tb_tunnel_dbg(tunnel, "increased bandwidth allocation to %d/%d Mb/s\n",
  1585. tunnel->allocated_up, tunnel->allocated_down);
  1586. }
  1587. static void tb_usb3_init_credits(struct tb_path_hop *hop)
  1588. {
  1589. struct tb_port *port = hop->in_port;
  1590. struct tb_switch *sw = port->sw;
  1591. unsigned int credits;
  1592. if (tb_port_use_credit_allocation(port)) {
  1593. credits = sw->max_usb3_credits;
  1594. } else {
  1595. if (tb_port_is_null(port))
  1596. credits = port->bonded ? 32 : 16;
  1597. else
  1598. credits = 7;
  1599. }
  1600. hop->initial_credits = credits;
  1601. }
  1602. static void tb_usb3_init_path(struct tb_path *path)
  1603. {
  1604. struct tb_path_hop *hop;
  1605. path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL;
  1606. path->egress_shared_buffer = TB_PATH_NONE;
  1607. path->ingress_fc_enable = TB_PATH_ALL;
  1608. path->ingress_shared_buffer = TB_PATH_NONE;
  1609. path->priority = TB_USB3_PRIORITY;
  1610. path->weight = TB_USB3_WEIGHT;
  1611. path->drop_packages = 0;
  1612. tb_path_for_each_hop(path, hop)
  1613. tb_usb3_init_credits(hop);
  1614. }
  1615. /**
  1616. * tb_tunnel_discover_usb3() - Discover existing USB3 tunnels
  1617. * @tb: Pointer to the domain structure
  1618. * @down: USB3 downstream adapter
  1619. * @alloc_hopid: Allocate HopIDs from visited ports
  1620. *
  1621. * If @down adapter is active, follows the tunnel to the USB3 upstream
  1622. * adapter and back. Returns the discovered tunnel or %NULL if there was
  1623. * no tunnel.
  1624. */
  1625. struct tb_tunnel *tb_tunnel_discover_usb3(struct tb *tb, struct tb_port *down,
  1626. bool alloc_hopid)
  1627. {
  1628. struct tb_tunnel *tunnel;
  1629. struct tb_path *path;
  1630. if (!tb_usb3_port_is_enabled(down))
  1631. return NULL;
  1632. tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_USB3);
  1633. if (!tunnel)
  1634. return NULL;
  1635. tunnel->activate = tb_usb3_activate;
  1636. tunnel->src_port = down;
  1637. /*
  1638. * Discover both paths even if they are not complete. We will
  1639. * clean them up by calling tb_tunnel_deactivate() below in that
  1640. * case.
  1641. */
  1642. path = tb_path_discover(down, TB_USB3_HOPID, NULL, -1,
  1643. &tunnel->dst_port, "USB3 Down", alloc_hopid);
  1644. if (!path) {
  1645. /* Just disable the downstream port */
  1646. tb_usb3_port_enable(down, false);
  1647. goto err_free;
  1648. }
  1649. tunnel->paths[TB_USB3_PATH_DOWN] = path;
  1650. tb_usb3_init_path(tunnel->paths[TB_USB3_PATH_DOWN]);
  1651. path = tb_path_discover(tunnel->dst_port, -1, down, TB_USB3_HOPID, NULL,
  1652. "USB3 Up", alloc_hopid);
  1653. if (!path)
  1654. goto err_deactivate;
  1655. tunnel->paths[TB_USB3_PATH_UP] = path;
  1656. tb_usb3_init_path(tunnel->paths[TB_USB3_PATH_UP]);
  1657. /* Validate that the tunnel is complete */
  1658. if (!tb_port_is_usb3_up(tunnel->dst_port)) {
  1659. tb_port_warn(tunnel->dst_port,
  1660. "path does not end on an USB3 adapter, cleaning up\n");
  1661. goto err_deactivate;
  1662. }
  1663. if (down != tunnel->src_port) {
  1664. tb_tunnel_warn(tunnel, "path is not complete, cleaning up\n");
  1665. goto err_deactivate;
  1666. }
  1667. if (!tb_usb3_port_is_enabled(tunnel->dst_port)) {
  1668. tb_tunnel_warn(tunnel,
  1669. "tunnel is not fully activated, cleaning up\n");
  1670. goto err_deactivate;
  1671. }
  1672. if (!tb_route(down->sw)) {
  1673. int ret;
  1674. /*
  1675. * Read the initial bandwidth allocation for the first
  1676. * hop tunnel.
  1677. */
  1678. ret = usb4_usb3_port_allocated_bandwidth(down,
  1679. &tunnel->allocated_up, &tunnel->allocated_down);
  1680. if (ret)
  1681. goto err_deactivate;
  1682. tb_tunnel_dbg(tunnel, "currently allocated bandwidth %d/%d Mb/s\n",
  1683. tunnel->allocated_up, tunnel->allocated_down);
  1684. tunnel->init = tb_usb3_init;
  1685. tunnel->consumed_bandwidth = tb_usb3_consumed_bandwidth;
  1686. tunnel->release_unused_bandwidth =
  1687. tb_usb3_release_unused_bandwidth;
  1688. tunnel->reclaim_available_bandwidth =
  1689. tb_usb3_reclaim_available_bandwidth;
  1690. }
  1691. tb_tunnel_dbg(tunnel, "discovered\n");
  1692. return tunnel;
  1693. err_deactivate:
  1694. tb_tunnel_deactivate(tunnel);
  1695. err_free:
  1696. tb_tunnel_free(tunnel);
  1697. return NULL;
  1698. }
  1699. /**
  1700. * tb_tunnel_alloc_usb3() - allocate a USB3 tunnel
  1701. * @tb: Pointer to the domain structure
  1702. * @up: USB3 upstream adapter port
  1703. * @down: USB3 downstream adapter port
  1704. * @max_up: Maximum available upstream bandwidth for the USB3 tunnel.
  1705. * %0 if no available bandwidth.
  1706. * @max_down: Maximum available downstream bandwidth for the USB3 tunnel.
  1707. * %0 if no available bandwidth.
  1708. *
  1709. * Allocate an USB3 tunnel. The ports must be of type @TB_TYPE_USB3_UP and
  1710. * @TB_TYPE_USB3_DOWN.
  1711. *
  1712. * Return: Returns a tb_tunnel on success or %NULL on failure.
  1713. */
  1714. struct tb_tunnel *tb_tunnel_alloc_usb3(struct tb *tb, struct tb_port *up,
  1715. struct tb_port *down, int max_up,
  1716. int max_down)
  1717. {
  1718. struct tb_tunnel *tunnel;
  1719. struct tb_path *path;
  1720. int max_rate = 0;
  1721. if (!tb_route(down->sw) && (max_up > 0 || max_down > 0)) {
  1722. /*
  1723. * For USB3 isochronous transfers, we allow bandwidth which is
  1724. * not higher than 90% of maximum supported bandwidth by USB3
  1725. * adapters.
  1726. */
  1727. max_rate = tb_usb3_max_link_rate(down, up);
  1728. if (max_rate < 0)
  1729. return NULL;
  1730. max_rate = max_rate * 90 / 100;
  1731. tb_port_dbg(up, "maximum required bandwidth for USB3 tunnel %d Mb/s\n",
  1732. max_rate);
  1733. }
  1734. tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_USB3);
  1735. if (!tunnel)
  1736. return NULL;
  1737. tunnel->activate = tb_usb3_activate;
  1738. tunnel->src_port = down;
  1739. tunnel->dst_port = up;
  1740. tunnel->max_up = max_up;
  1741. tunnel->max_down = max_down;
  1742. path = tb_path_alloc(tb, down, TB_USB3_HOPID, up, TB_USB3_HOPID, 0,
  1743. "USB3 Down");
  1744. if (!path) {
  1745. tb_tunnel_free(tunnel);
  1746. return NULL;
  1747. }
  1748. tb_usb3_init_path(path);
  1749. tunnel->paths[TB_USB3_PATH_DOWN] = path;
  1750. path = tb_path_alloc(tb, up, TB_USB3_HOPID, down, TB_USB3_HOPID, 0,
  1751. "USB3 Up");
  1752. if (!path) {
  1753. tb_tunnel_free(tunnel);
  1754. return NULL;
  1755. }
  1756. tb_usb3_init_path(path);
  1757. tunnel->paths[TB_USB3_PATH_UP] = path;
  1758. if (!tb_route(down->sw)) {
  1759. tunnel->allocated_up = min(max_rate, max_up);
  1760. tunnel->allocated_down = min(max_rate, max_down);
  1761. tunnel->init = tb_usb3_init;
  1762. tunnel->consumed_bandwidth = tb_usb3_consumed_bandwidth;
  1763. tunnel->release_unused_bandwidth =
  1764. tb_usb3_release_unused_bandwidth;
  1765. tunnel->reclaim_available_bandwidth =
  1766. tb_usb3_reclaim_available_bandwidth;
  1767. }
  1768. return tunnel;
  1769. }
  1770. /**
  1771. * tb_tunnel_free() - free a tunnel
  1772. * @tunnel: Tunnel to be freed
  1773. *
  1774. * Frees a tunnel. The tunnel does not need to be deactivated.
  1775. */
  1776. void tb_tunnel_free(struct tb_tunnel *tunnel)
  1777. {
  1778. int i;
  1779. if (!tunnel)
  1780. return;
  1781. if (tunnel->deinit)
  1782. tunnel->deinit(tunnel);
  1783. for (i = 0; i < tunnel->npaths; i++) {
  1784. if (tunnel->paths[i])
  1785. tb_path_free(tunnel->paths[i]);
  1786. }
  1787. kfree(tunnel->paths);
  1788. kfree(tunnel);
  1789. }
  1790. /**
  1791. * tb_tunnel_is_invalid - check whether an activated path is still valid
  1792. * @tunnel: Tunnel to check
  1793. */
  1794. bool tb_tunnel_is_invalid(struct tb_tunnel *tunnel)
  1795. {
  1796. int i;
  1797. for (i = 0; i < tunnel->npaths; i++) {
  1798. WARN_ON(!tunnel->paths[i]->activated);
  1799. if (tb_path_is_invalid(tunnel->paths[i]))
  1800. return true;
  1801. }
  1802. return false;
  1803. }
  1804. /**
  1805. * tb_tunnel_restart() - activate a tunnel after a hardware reset
  1806. * @tunnel: Tunnel to restart
  1807. *
  1808. * Return: 0 on success and negative errno in case if failure
  1809. */
  1810. int tb_tunnel_restart(struct tb_tunnel *tunnel)
  1811. {
  1812. int res, i;
  1813. tb_tunnel_dbg(tunnel, "activating\n");
  1814. /*
  1815. * Make sure all paths are properly disabled before enabling
  1816. * them again.
  1817. */
  1818. for (i = 0; i < tunnel->npaths; i++) {
  1819. if (tunnel->paths[i]->activated) {
  1820. tb_path_deactivate(tunnel->paths[i]);
  1821. tunnel->paths[i]->activated = false;
  1822. }
  1823. }
  1824. if (tunnel->init) {
  1825. res = tunnel->init(tunnel);
  1826. if (res)
  1827. return res;
  1828. }
  1829. for (i = 0; i < tunnel->npaths; i++) {
  1830. res = tb_path_activate(tunnel->paths[i]);
  1831. if (res)
  1832. goto err;
  1833. }
  1834. if (tunnel->activate) {
  1835. res = tunnel->activate(tunnel, true);
  1836. if (res)
  1837. goto err;
  1838. }
  1839. return 0;
  1840. err:
  1841. tb_tunnel_warn(tunnel, "activation failed\n");
  1842. tb_tunnel_deactivate(tunnel);
  1843. return res;
  1844. }
  1845. /**
  1846. * tb_tunnel_activate() - activate a tunnel
  1847. * @tunnel: Tunnel to activate
  1848. *
  1849. * Return: Returns 0 on success or an error code on failure.
  1850. */
  1851. int tb_tunnel_activate(struct tb_tunnel *tunnel)
  1852. {
  1853. int i;
  1854. for (i = 0; i < tunnel->npaths; i++) {
  1855. if (tunnel->paths[i]->activated) {
  1856. tb_tunnel_WARN(tunnel,
  1857. "trying to activate an already activated tunnel\n");
  1858. return -EINVAL;
  1859. }
  1860. }
  1861. return tb_tunnel_restart(tunnel);
  1862. }
  1863. /**
  1864. * tb_tunnel_deactivate() - deactivate a tunnel
  1865. * @tunnel: Tunnel to deactivate
  1866. */
  1867. void tb_tunnel_deactivate(struct tb_tunnel *tunnel)
  1868. {
  1869. int i;
  1870. tb_tunnel_dbg(tunnel, "deactivating\n");
  1871. if (tunnel->activate)
  1872. tunnel->activate(tunnel, false);
  1873. for (i = 0; i < tunnel->npaths; i++) {
  1874. if (tunnel->paths[i] && tunnel->paths[i]->activated)
  1875. tb_path_deactivate(tunnel->paths[i]);
  1876. }
  1877. }
  1878. /**
  1879. * tb_tunnel_port_on_path() - Does the tunnel go through port
  1880. * @tunnel: Tunnel to check
  1881. * @port: Port to check
  1882. *
  1883. * Returns true if @tunnel goes through @port (direction does not matter),
  1884. * false otherwise.
  1885. */
  1886. bool tb_tunnel_port_on_path(const struct tb_tunnel *tunnel,
  1887. const struct tb_port *port)
  1888. {
  1889. int i;
  1890. for (i = 0; i < tunnel->npaths; i++) {
  1891. if (!tunnel->paths[i])
  1892. continue;
  1893. if (tb_path_port_on_path(tunnel->paths[i], port))
  1894. return true;
  1895. }
  1896. return false;
  1897. }
  1898. static bool tb_tunnel_is_active(const struct tb_tunnel *tunnel)
  1899. {
  1900. int i;
  1901. for (i = 0; i < tunnel->npaths; i++) {
  1902. if (!tunnel->paths[i])
  1903. return false;
  1904. if (!tunnel->paths[i]->activated)
  1905. return false;
  1906. }
  1907. return true;
  1908. }
  1909. /**
  1910. * tb_tunnel_maximum_bandwidth() - Return maximum possible bandwidth
  1911. * @tunnel: Tunnel to check
  1912. * @max_up: Maximum upstream bandwidth in Mb/s
  1913. * @max_down: Maximum downstream bandwidth in Mb/s
  1914. *
  1915. * Returns maximum possible bandwidth this tunnel can go if not limited
  1916. * by other bandwidth clients. If the tunnel does not support this
  1917. * returns %-EOPNOTSUPP.
  1918. */
  1919. int tb_tunnel_maximum_bandwidth(struct tb_tunnel *tunnel, int *max_up,
  1920. int *max_down)
  1921. {
  1922. if (!tb_tunnel_is_active(tunnel))
  1923. return -EINVAL;
  1924. if (tunnel->maximum_bandwidth)
  1925. return tunnel->maximum_bandwidth(tunnel, max_up, max_down);
  1926. return -EOPNOTSUPP;
  1927. }
  1928. /**
  1929. * tb_tunnel_allocated_bandwidth() - Return bandwidth allocated for the tunnel
  1930. * @tunnel: Tunnel to check
  1931. * @allocated_up: Currently allocated upstream bandwidth in Mb/s is stored here
  1932. * @allocated_down: Currently allocated downstream bandwidth in Mb/s is
  1933. * stored here
  1934. *
  1935. * Returns the bandwidth allocated for the tunnel. This may be higher
  1936. * than what the tunnel actually consumes.
  1937. */
  1938. int tb_tunnel_allocated_bandwidth(struct tb_tunnel *tunnel, int *allocated_up,
  1939. int *allocated_down)
  1940. {
  1941. if (!tb_tunnel_is_active(tunnel))
  1942. return -EINVAL;
  1943. if (tunnel->allocated_bandwidth)
  1944. return tunnel->allocated_bandwidth(tunnel, allocated_up,
  1945. allocated_down);
  1946. return -EOPNOTSUPP;
  1947. }
  1948. /**
  1949. * tb_tunnel_alloc_bandwidth() - Change tunnel bandwidth allocation
  1950. * @tunnel: Tunnel whose bandwidth allocation to change
  1951. * @alloc_up: New upstream bandwidth in Mb/s
  1952. * @alloc_down: New downstream bandwidth in Mb/s
  1953. *
  1954. * Tries to change tunnel bandwidth allocation. If succeeds returns %0
  1955. * and updates @alloc_up and @alloc_down to that was actually allocated
  1956. * (it may not be the same as passed originally). Returns negative errno
  1957. * in case of failure.
  1958. */
  1959. int tb_tunnel_alloc_bandwidth(struct tb_tunnel *tunnel, int *alloc_up,
  1960. int *alloc_down)
  1961. {
  1962. if (!tb_tunnel_is_active(tunnel))
  1963. return -EINVAL;
  1964. if (tunnel->alloc_bandwidth)
  1965. return tunnel->alloc_bandwidth(tunnel, alloc_up, alloc_down);
  1966. return -EOPNOTSUPP;
  1967. }
  1968. /**
  1969. * tb_tunnel_consumed_bandwidth() - Return bandwidth consumed by the tunnel
  1970. * @tunnel: Tunnel to check
  1971. * @consumed_up: Consumed bandwidth in Mb/s from @dst_port to @src_port.
  1972. * Can be %NULL.
  1973. * @consumed_down: Consumed bandwidth in Mb/s from @src_port to @dst_port.
  1974. * Can be %NULL.
  1975. *
  1976. * Stores the amount of isochronous bandwidth @tunnel consumes in
  1977. * @consumed_up and @consumed_down. In case of success returns %0,
  1978. * negative errno otherwise.
  1979. */
  1980. int tb_tunnel_consumed_bandwidth(struct tb_tunnel *tunnel, int *consumed_up,
  1981. int *consumed_down)
  1982. {
  1983. int up_bw = 0, down_bw = 0;
  1984. if (!tb_tunnel_is_active(tunnel))
  1985. goto out;
  1986. if (tunnel->consumed_bandwidth) {
  1987. int ret;
  1988. ret = tunnel->consumed_bandwidth(tunnel, &up_bw, &down_bw);
  1989. if (ret)
  1990. return ret;
  1991. tb_tunnel_dbg(tunnel, "consumed bandwidth %d/%d Mb/s\n", up_bw,
  1992. down_bw);
  1993. }
  1994. out:
  1995. if (consumed_up)
  1996. *consumed_up = up_bw;
  1997. if (consumed_down)
  1998. *consumed_down = down_bw;
  1999. return 0;
  2000. }
  2001. /**
  2002. * tb_tunnel_release_unused_bandwidth() - Release unused bandwidth
  2003. * @tunnel: Tunnel whose unused bandwidth to release
  2004. *
  2005. * If tunnel supports dynamic bandwidth management (USB3 tunnels at the
  2006. * moment) this function makes it to release all the unused bandwidth.
  2007. *
  2008. * Returns %0 in case of success and negative errno otherwise.
  2009. */
  2010. int tb_tunnel_release_unused_bandwidth(struct tb_tunnel *tunnel)
  2011. {
  2012. if (!tb_tunnel_is_active(tunnel))
  2013. return 0;
  2014. if (tunnel->release_unused_bandwidth) {
  2015. int ret;
  2016. ret = tunnel->release_unused_bandwidth(tunnel);
  2017. if (ret)
  2018. return ret;
  2019. }
  2020. return 0;
  2021. }
  2022. /**
  2023. * tb_tunnel_reclaim_available_bandwidth() - Reclaim available bandwidth
  2024. * @tunnel: Tunnel reclaiming available bandwidth
  2025. * @available_up: Available upstream bandwidth (in Mb/s)
  2026. * @available_down: Available downstream bandwidth (in Mb/s)
  2027. *
  2028. * Reclaims bandwidth from @available_up and @available_down and updates
  2029. * the variables accordingly (e.g decreases both according to what was
  2030. * reclaimed by the tunnel). If nothing was reclaimed the values are
  2031. * kept as is.
  2032. */
  2033. void tb_tunnel_reclaim_available_bandwidth(struct tb_tunnel *tunnel,
  2034. int *available_up,
  2035. int *available_down)
  2036. {
  2037. if (!tb_tunnel_is_active(tunnel))
  2038. return;
  2039. if (tunnel->reclaim_available_bandwidth)
  2040. tunnel->reclaim_available_bandwidth(tunnel, available_up,
  2041. available_down);
  2042. }
  2043. const char *tb_tunnel_type_name(const struct tb_tunnel *tunnel)
  2044. {
  2045. return tb_tunnel_names[tunnel->type];
  2046. }