usb4.c 73 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * USB4 specific functionality
  4. *
  5. * Copyright (C) 2019, Intel Corporation
  6. * Authors: Mika Westerberg <mika.westerberg@linux.intel.com>
  7. * Rajmohan Mani <rajmohan.mani@intel.com>
  8. */
  9. #include <linux/delay.h>
  10. #include <linux/ktime.h>
  11. #include <linux/units.h>
  12. #include "sb_regs.h"
  13. #include "tb.h"
  14. #define USB4_DATA_RETRIES 3
  15. #define USB4_DATA_DWORDS 16
  16. #define USB4_NVM_READ_OFFSET_MASK GENMASK(23, 2)
  17. #define USB4_NVM_READ_OFFSET_SHIFT 2
  18. #define USB4_NVM_READ_LENGTH_MASK GENMASK(27, 24)
  19. #define USB4_NVM_READ_LENGTH_SHIFT 24
  20. #define USB4_NVM_SET_OFFSET_MASK USB4_NVM_READ_OFFSET_MASK
  21. #define USB4_NVM_SET_OFFSET_SHIFT USB4_NVM_READ_OFFSET_SHIFT
  22. #define USB4_DROM_ADDRESS_MASK GENMASK(14, 2)
  23. #define USB4_DROM_ADDRESS_SHIFT 2
  24. #define USB4_DROM_SIZE_MASK GENMASK(19, 15)
  25. #define USB4_DROM_SIZE_SHIFT 15
  26. #define USB4_NVM_SECTOR_SIZE_MASK GENMASK(23, 0)
  27. #define USB4_BA_LENGTH_MASK GENMASK(7, 0)
  28. #define USB4_BA_INDEX_MASK GENMASK(15, 0)
  29. enum usb4_ba_index {
  30. USB4_BA_MAX_USB3 = 0x1,
  31. USB4_BA_MIN_DP_AUX = 0x2,
  32. USB4_BA_MIN_DP_MAIN = 0x3,
  33. USB4_BA_MAX_PCIE = 0x4,
  34. USB4_BA_MAX_HI = 0x5,
  35. };
  36. #define USB4_BA_VALUE_MASK GENMASK(31, 16)
  37. #define USB4_BA_VALUE_SHIFT 16
  38. /* Delays in us used with usb4_port_wait_for_bit() */
  39. #define USB4_PORT_DELAY 50
  40. #define USB4_PORT_SB_DELAY 1000
  41. static int usb4_native_switch_op(struct tb_switch *sw, u16 opcode,
  42. u32 *metadata, u8 *status,
  43. const void *tx_data, size_t tx_dwords,
  44. void *rx_data, size_t rx_dwords)
  45. {
  46. u32 val;
  47. int ret;
  48. if (metadata) {
  49. ret = tb_sw_write(sw, metadata, TB_CFG_SWITCH, ROUTER_CS_25, 1);
  50. if (ret)
  51. return ret;
  52. }
  53. if (tx_dwords) {
  54. ret = tb_sw_write(sw, tx_data, TB_CFG_SWITCH, ROUTER_CS_9,
  55. tx_dwords);
  56. if (ret)
  57. return ret;
  58. }
  59. val = opcode | ROUTER_CS_26_OV;
  60. ret = tb_sw_write(sw, &val, TB_CFG_SWITCH, ROUTER_CS_26, 1);
  61. if (ret)
  62. return ret;
  63. ret = tb_switch_wait_for_bit(sw, ROUTER_CS_26, ROUTER_CS_26_OV, 0, 500);
  64. if (ret)
  65. return ret;
  66. ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_26, 1);
  67. if (ret)
  68. return ret;
  69. if (val & ROUTER_CS_26_ONS)
  70. return -EOPNOTSUPP;
  71. if (status)
  72. *status = (val & ROUTER_CS_26_STATUS_MASK) >>
  73. ROUTER_CS_26_STATUS_SHIFT;
  74. if (metadata) {
  75. ret = tb_sw_read(sw, metadata, TB_CFG_SWITCH, ROUTER_CS_25, 1);
  76. if (ret)
  77. return ret;
  78. }
  79. if (rx_dwords) {
  80. ret = tb_sw_read(sw, rx_data, TB_CFG_SWITCH, ROUTER_CS_9,
  81. rx_dwords);
  82. if (ret)
  83. return ret;
  84. }
  85. return 0;
  86. }
  87. static int __usb4_switch_op(struct tb_switch *sw, u16 opcode, u32 *metadata,
  88. u8 *status, const void *tx_data, size_t tx_dwords,
  89. void *rx_data, size_t rx_dwords)
  90. {
  91. const struct tb_cm_ops *cm_ops = sw->tb->cm_ops;
  92. if (tx_dwords > USB4_DATA_DWORDS || rx_dwords > USB4_DATA_DWORDS)
  93. return -EINVAL;
  94. /*
  95. * If the connection manager implementation provides USB4 router
  96. * operation proxy callback, call it here instead of running the
  97. * operation natively.
  98. */
  99. if (cm_ops->usb4_switch_op) {
  100. int ret;
  101. ret = cm_ops->usb4_switch_op(sw, opcode, metadata, status,
  102. tx_data, tx_dwords, rx_data,
  103. rx_dwords);
  104. if (ret != -EOPNOTSUPP)
  105. return ret;
  106. /*
  107. * If the proxy was not supported then run the native
  108. * router operation instead.
  109. */
  110. }
  111. return usb4_native_switch_op(sw, opcode, metadata, status, tx_data,
  112. tx_dwords, rx_data, rx_dwords);
  113. }
  114. static inline int usb4_switch_op(struct tb_switch *sw, u16 opcode,
  115. u32 *metadata, u8 *status)
  116. {
  117. return __usb4_switch_op(sw, opcode, metadata, status, NULL, 0, NULL, 0);
  118. }
  119. static inline int usb4_switch_op_data(struct tb_switch *sw, u16 opcode,
  120. u32 *metadata, u8 *status,
  121. const void *tx_data, size_t tx_dwords,
  122. void *rx_data, size_t rx_dwords)
  123. {
  124. return __usb4_switch_op(sw, opcode, metadata, status, tx_data,
  125. tx_dwords, rx_data, rx_dwords);
  126. }
  127. /**
  128. * usb4_switch_check_wakes() - Check for wakes and notify PM core about them
  129. * @sw: Router whose wakes to check
  130. *
  131. * Checks wakes occurred during suspend and notify the PM core about them.
  132. */
  133. void usb4_switch_check_wakes(struct tb_switch *sw)
  134. {
  135. bool wakeup_usb4 = false;
  136. struct usb4_port *usb4;
  137. struct tb_port *port;
  138. bool wakeup = false;
  139. u32 val;
  140. if (tb_route(sw)) {
  141. if (tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_6, 1))
  142. return;
  143. tb_sw_dbg(sw, "PCIe wake: %s, USB3 wake: %s\n",
  144. (val & ROUTER_CS_6_WOPS) ? "yes" : "no",
  145. (val & ROUTER_CS_6_WOUS) ? "yes" : "no");
  146. wakeup = val & (ROUTER_CS_6_WOPS | ROUTER_CS_6_WOUS);
  147. }
  148. /*
  149. * Check for any downstream ports for USB4 wake,
  150. * connection wake and disconnection wake.
  151. */
  152. tb_switch_for_each_port(sw, port) {
  153. if (!port->cap_usb4)
  154. continue;
  155. if (tb_port_read(port, &val, TB_CFG_PORT,
  156. port->cap_usb4 + PORT_CS_18, 1))
  157. break;
  158. tb_port_dbg(port, "USB4 wake: %s, connection wake: %s, disconnection wake: %s\n",
  159. (val & PORT_CS_18_WOU4S) ? "yes" : "no",
  160. (val & PORT_CS_18_WOCS) ? "yes" : "no",
  161. (val & PORT_CS_18_WODS) ? "yes" : "no");
  162. wakeup_usb4 = val & (PORT_CS_18_WOU4S | PORT_CS_18_WOCS |
  163. PORT_CS_18_WODS);
  164. usb4 = port->usb4;
  165. if (device_may_wakeup(&usb4->dev) && wakeup_usb4)
  166. pm_wakeup_event(&usb4->dev, 0);
  167. wakeup |= wakeup_usb4;
  168. }
  169. if (wakeup)
  170. pm_wakeup_event(&sw->dev, 0);
  171. }
  172. static bool link_is_usb4(struct tb_port *port)
  173. {
  174. u32 val;
  175. if (!port->cap_usb4)
  176. return false;
  177. if (tb_port_read(port, &val, TB_CFG_PORT,
  178. port->cap_usb4 + PORT_CS_18, 1))
  179. return false;
  180. return !(val & PORT_CS_18_TCM);
  181. }
  182. /**
  183. * usb4_switch_setup() - Additional setup for USB4 device
  184. * @sw: USB4 router to setup
  185. *
  186. * USB4 routers need additional settings in order to enable all the
  187. * tunneling. This function enables USB and PCIe tunneling if it can be
  188. * enabled (e.g the parent switch also supports them). If USB tunneling
  189. * is not available for some reason (like that there is Thunderbolt 3
  190. * switch upstream) then the internal xHCI controller is enabled
  191. * instead.
  192. *
  193. * This does not set the configuration valid bit of the router. To do
  194. * that call usb4_switch_configuration_valid().
  195. */
  196. int usb4_switch_setup(struct tb_switch *sw)
  197. {
  198. struct tb_switch *parent = tb_switch_parent(sw);
  199. struct tb_port *down;
  200. bool tbt3, xhci;
  201. u32 val = 0;
  202. int ret;
  203. if (!tb_route(sw))
  204. return 0;
  205. ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_6, 1);
  206. if (ret)
  207. return ret;
  208. down = tb_switch_downstream_port(sw);
  209. sw->link_usb4 = link_is_usb4(down);
  210. tb_sw_dbg(sw, "link: %s\n", sw->link_usb4 ? "USB4" : "TBT");
  211. xhci = val & ROUTER_CS_6_HCI;
  212. tbt3 = !(val & ROUTER_CS_6_TNS);
  213. tb_sw_dbg(sw, "TBT3 support: %s, xHCI: %s\n",
  214. tbt3 ? "yes" : "no", xhci ? "yes" : "no");
  215. ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1);
  216. if (ret)
  217. return ret;
  218. if (tb_acpi_may_tunnel_usb3() && sw->link_usb4 &&
  219. tb_switch_find_port(parent, TB_TYPE_USB3_DOWN)) {
  220. val |= ROUTER_CS_5_UTO;
  221. xhci = false;
  222. }
  223. /*
  224. * Only enable PCIe tunneling if the parent router supports it
  225. * and it is not disabled.
  226. */
  227. if (tb_acpi_may_tunnel_pcie() &&
  228. tb_switch_find_port(parent, TB_TYPE_PCIE_DOWN)) {
  229. val |= ROUTER_CS_5_PTO;
  230. /*
  231. * xHCI can be enabled if PCIe tunneling is supported
  232. * and the parent does not have any USB3 dowstream
  233. * adapters (so we cannot do USB 3.x tunneling).
  234. */
  235. if (xhci)
  236. val |= ROUTER_CS_5_HCO;
  237. }
  238. /* TBT3 supported by the CM */
  239. val &= ~ROUTER_CS_5_CNS;
  240. return tb_sw_write(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1);
  241. }
  242. /**
  243. * usb4_switch_configuration_valid() - Set tunneling configuration to be valid
  244. * @sw: USB4 router
  245. *
  246. * Sets configuration valid bit for the router. Must be called before
  247. * any tunnels can be set through the router and after
  248. * usb4_switch_setup() has been called. Can be called to host and device
  249. * routers (does nothing for the latter).
  250. *
  251. * Returns %0 in success and negative errno otherwise.
  252. */
  253. int usb4_switch_configuration_valid(struct tb_switch *sw)
  254. {
  255. u32 val;
  256. int ret;
  257. if (!tb_route(sw))
  258. return 0;
  259. ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1);
  260. if (ret)
  261. return ret;
  262. val |= ROUTER_CS_5_CV;
  263. ret = tb_sw_write(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1);
  264. if (ret)
  265. return ret;
  266. return tb_switch_wait_for_bit(sw, ROUTER_CS_6, ROUTER_CS_6_CR,
  267. ROUTER_CS_6_CR, 50);
  268. }
  269. /**
  270. * usb4_switch_read_uid() - Read UID from USB4 router
  271. * @sw: USB4 router
  272. * @uid: UID is stored here
  273. *
  274. * Reads 64-bit UID from USB4 router config space.
  275. */
  276. int usb4_switch_read_uid(struct tb_switch *sw, u64 *uid)
  277. {
  278. return tb_sw_read(sw, uid, TB_CFG_SWITCH, ROUTER_CS_7, 2);
  279. }
  280. static int usb4_switch_drom_read_block(void *data,
  281. unsigned int dwaddress, void *buf,
  282. size_t dwords)
  283. {
  284. struct tb_switch *sw = data;
  285. u8 status = 0;
  286. u32 metadata;
  287. int ret;
  288. metadata = (dwords << USB4_DROM_SIZE_SHIFT) & USB4_DROM_SIZE_MASK;
  289. metadata |= (dwaddress << USB4_DROM_ADDRESS_SHIFT) &
  290. USB4_DROM_ADDRESS_MASK;
  291. ret = usb4_switch_op_data(sw, USB4_SWITCH_OP_DROM_READ, &metadata,
  292. &status, NULL, 0, buf, dwords);
  293. if (ret)
  294. return ret;
  295. return status ? -EIO : 0;
  296. }
  297. /**
  298. * usb4_switch_drom_read() - Read arbitrary bytes from USB4 router DROM
  299. * @sw: USB4 router
  300. * @address: Byte address inside DROM to start reading
  301. * @buf: Buffer where the DROM content is stored
  302. * @size: Number of bytes to read from DROM
  303. *
  304. * Uses USB4 router operations to read router DROM. For devices this
  305. * should always work but for hosts it may return %-EOPNOTSUPP in which
  306. * case the host router does not have DROM.
  307. */
  308. int usb4_switch_drom_read(struct tb_switch *sw, unsigned int address, void *buf,
  309. size_t size)
  310. {
  311. return tb_nvm_read_data(address, buf, size, USB4_DATA_RETRIES,
  312. usb4_switch_drom_read_block, sw);
  313. }
  314. /**
  315. * usb4_switch_lane_bonding_possible() - Are conditions met for lane bonding
  316. * @sw: USB4 router
  317. *
  318. * Checks whether conditions are met so that lane bonding can be
  319. * established with the upstream router. Call only for device routers.
  320. */
  321. bool usb4_switch_lane_bonding_possible(struct tb_switch *sw)
  322. {
  323. struct tb_port *up;
  324. int ret;
  325. u32 val;
  326. up = tb_upstream_port(sw);
  327. ret = tb_port_read(up, &val, TB_CFG_PORT, up->cap_usb4 + PORT_CS_18, 1);
  328. if (ret)
  329. return false;
  330. return !!(val & PORT_CS_18_BE);
  331. }
  332. /**
  333. * usb4_switch_set_wake() - Enabled/disable wake
  334. * @sw: USB4 router
  335. * @flags: Wakeup flags (%0 to disable)
  336. *
  337. * Enables/disables router to wake up from sleep.
  338. */
  339. int usb4_switch_set_wake(struct tb_switch *sw, unsigned int flags)
  340. {
  341. struct usb4_port *usb4;
  342. struct tb_port *port;
  343. u64 route = tb_route(sw);
  344. u32 val;
  345. int ret;
  346. /*
  347. * Enable wakes coming from all USB4 downstream ports (from
  348. * child routers). For device routers do this also for the
  349. * upstream USB4 port.
  350. */
  351. tb_switch_for_each_port(sw, port) {
  352. if (!tb_port_is_null(port))
  353. continue;
  354. if (!route && tb_is_upstream_port(port))
  355. continue;
  356. if (!port->cap_usb4)
  357. continue;
  358. ret = tb_port_read(port, &val, TB_CFG_PORT,
  359. port->cap_usb4 + PORT_CS_19, 1);
  360. if (ret)
  361. return ret;
  362. val &= ~(PORT_CS_19_WOC | PORT_CS_19_WOD | PORT_CS_19_WOU4);
  363. if (tb_is_upstream_port(port)) {
  364. val |= PORT_CS_19_WOU4;
  365. } else {
  366. bool configured = val & PORT_CS_19_PC;
  367. usb4 = port->usb4;
  368. if (((flags & TB_WAKE_ON_CONNECT) |
  369. device_may_wakeup(&usb4->dev)) && !configured)
  370. val |= PORT_CS_19_WOC;
  371. if (((flags & TB_WAKE_ON_DISCONNECT) |
  372. device_may_wakeup(&usb4->dev)) && configured)
  373. val |= PORT_CS_19_WOD;
  374. if ((flags & TB_WAKE_ON_USB4) && configured)
  375. val |= PORT_CS_19_WOU4;
  376. }
  377. ret = tb_port_write(port, &val, TB_CFG_PORT,
  378. port->cap_usb4 + PORT_CS_19, 1);
  379. if (ret)
  380. return ret;
  381. }
  382. /*
  383. * Enable wakes from PCIe, USB 3.x and DP on this router. Only
  384. * needed for device routers.
  385. */
  386. if (route) {
  387. ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1);
  388. if (ret)
  389. return ret;
  390. val &= ~(ROUTER_CS_5_WOP | ROUTER_CS_5_WOU | ROUTER_CS_5_WOD);
  391. if (flags & TB_WAKE_ON_USB3)
  392. val |= ROUTER_CS_5_WOU;
  393. if (flags & TB_WAKE_ON_PCIE)
  394. val |= ROUTER_CS_5_WOP;
  395. if (flags & TB_WAKE_ON_DP)
  396. val |= ROUTER_CS_5_WOD;
  397. ret = tb_sw_write(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1);
  398. if (ret)
  399. return ret;
  400. }
  401. return 0;
  402. }
  403. /**
  404. * usb4_switch_set_sleep() - Prepare the router to enter sleep
  405. * @sw: USB4 router
  406. *
  407. * Sets sleep bit for the router. Returns when the router sleep ready
  408. * bit has been asserted.
  409. */
  410. int usb4_switch_set_sleep(struct tb_switch *sw)
  411. {
  412. int ret;
  413. u32 val;
  414. /* Set sleep bit and wait for sleep ready to be asserted */
  415. ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1);
  416. if (ret)
  417. return ret;
  418. val |= ROUTER_CS_5_SLP;
  419. ret = tb_sw_write(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1);
  420. if (ret)
  421. return ret;
  422. return tb_switch_wait_for_bit(sw, ROUTER_CS_6, ROUTER_CS_6_SLPR,
  423. ROUTER_CS_6_SLPR, 500);
  424. }
  425. /**
  426. * usb4_switch_nvm_sector_size() - Return router NVM sector size
  427. * @sw: USB4 router
  428. *
  429. * If the router supports NVM operations this function returns the NVM
  430. * sector size in bytes. If NVM operations are not supported returns
  431. * %-EOPNOTSUPP.
  432. */
  433. int usb4_switch_nvm_sector_size(struct tb_switch *sw)
  434. {
  435. u32 metadata;
  436. u8 status;
  437. int ret;
  438. ret = usb4_switch_op(sw, USB4_SWITCH_OP_NVM_SECTOR_SIZE, &metadata,
  439. &status);
  440. if (ret)
  441. return ret;
  442. if (status)
  443. return status == 0x2 ? -EOPNOTSUPP : -EIO;
  444. return metadata & USB4_NVM_SECTOR_SIZE_MASK;
  445. }
  446. static int usb4_switch_nvm_read_block(void *data,
  447. unsigned int dwaddress, void *buf, size_t dwords)
  448. {
  449. struct tb_switch *sw = data;
  450. u8 status = 0;
  451. u32 metadata;
  452. int ret;
  453. metadata = (dwords << USB4_NVM_READ_LENGTH_SHIFT) &
  454. USB4_NVM_READ_LENGTH_MASK;
  455. metadata |= (dwaddress << USB4_NVM_READ_OFFSET_SHIFT) &
  456. USB4_NVM_READ_OFFSET_MASK;
  457. ret = usb4_switch_op_data(sw, USB4_SWITCH_OP_NVM_READ, &metadata,
  458. &status, NULL, 0, buf, dwords);
  459. if (ret)
  460. return ret;
  461. return status ? -EIO : 0;
  462. }
  463. /**
  464. * usb4_switch_nvm_read() - Read arbitrary bytes from router NVM
  465. * @sw: USB4 router
  466. * @address: Starting address in bytes
  467. * @buf: Read data is placed here
  468. * @size: How many bytes to read
  469. *
  470. * Reads NVM contents of the router. If NVM is not supported returns
  471. * %-EOPNOTSUPP.
  472. */
  473. int usb4_switch_nvm_read(struct tb_switch *sw, unsigned int address, void *buf,
  474. size_t size)
  475. {
  476. return tb_nvm_read_data(address, buf, size, USB4_DATA_RETRIES,
  477. usb4_switch_nvm_read_block, sw);
  478. }
  479. /**
  480. * usb4_switch_nvm_set_offset() - Set NVM write offset
  481. * @sw: USB4 router
  482. * @address: Start offset
  483. *
  484. * Explicitly sets NVM write offset. Normally when writing to NVM this
  485. * is done automatically by usb4_switch_nvm_write().
  486. *
  487. * Returns %0 in success and negative errno if there was a failure.
  488. */
  489. int usb4_switch_nvm_set_offset(struct tb_switch *sw, unsigned int address)
  490. {
  491. u32 metadata, dwaddress;
  492. u8 status = 0;
  493. int ret;
  494. dwaddress = address / 4;
  495. metadata = (dwaddress << USB4_NVM_SET_OFFSET_SHIFT) &
  496. USB4_NVM_SET_OFFSET_MASK;
  497. ret = usb4_switch_op(sw, USB4_SWITCH_OP_NVM_SET_OFFSET, &metadata,
  498. &status);
  499. if (ret)
  500. return ret;
  501. return status ? -EIO : 0;
  502. }
  503. static int usb4_switch_nvm_write_next_block(void *data, unsigned int dwaddress,
  504. const void *buf, size_t dwords)
  505. {
  506. struct tb_switch *sw = data;
  507. u8 status;
  508. int ret;
  509. ret = usb4_switch_op_data(sw, USB4_SWITCH_OP_NVM_WRITE, NULL, &status,
  510. buf, dwords, NULL, 0);
  511. if (ret)
  512. return ret;
  513. return status ? -EIO : 0;
  514. }
  515. /**
  516. * usb4_switch_nvm_write() - Write to the router NVM
  517. * @sw: USB4 router
  518. * @address: Start address where to write in bytes
  519. * @buf: Pointer to the data to write
  520. * @size: Size of @buf in bytes
  521. *
  522. * Writes @buf to the router NVM using USB4 router operations. If NVM
  523. * write is not supported returns %-EOPNOTSUPP.
  524. */
  525. int usb4_switch_nvm_write(struct tb_switch *sw, unsigned int address,
  526. const void *buf, size_t size)
  527. {
  528. int ret;
  529. ret = usb4_switch_nvm_set_offset(sw, address);
  530. if (ret)
  531. return ret;
  532. return tb_nvm_write_data(address, buf, size, USB4_DATA_RETRIES,
  533. usb4_switch_nvm_write_next_block, sw);
  534. }
  535. /**
  536. * usb4_switch_nvm_authenticate() - Authenticate new NVM
  537. * @sw: USB4 router
  538. *
  539. * After the new NVM has been written via usb4_switch_nvm_write(), this
  540. * function triggers NVM authentication process. The router gets power
  541. * cycled and if the authentication is successful the new NVM starts
  542. * running. In case of failure returns negative errno.
  543. *
  544. * The caller should call usb4_switch_nvm_authenticate_status() to read
  545. * the status of the authentication after power cycle. It should be the
  546. * first router operation to avoid the status being lost.
  547. */
  548. int usb4_switch_nvm_authenticate(struct tb_switch *sw)
  549. {
  550. int ret;
  551. ret = usb4_switch_op(sw, USB4_SWITCH_OP_NVM_AUTH, NULL, NULL);
  552. switch (ret) {
  553. /*
  554. * The router is power cycled once NVM_AUTH is started so it is
  555. * expected to get any of the following errors back.
  556. */
  557. case -EACCES:
  558. case -ENOTCONN:
  559. case -ETIMEDOUT:
  560. return 0;
  561. default:
  562. return ret;
  563. }
  564. }
  565. /**
  566. * usb4_switch_nvm_authenticate_status() - Read status of last NVM authenticate
  567. * @sw: USB4 router
  568. * @status: Status code of the operation
  569. *
  570. * The function checks if there is status available from the last NVM
  571. * authenticate router operation. If there is status then %0 is returned
  572. * and the status code is placed in @status. Returns negative errno in case
  573. * of failure.
  574. *
  575. * Must be called before any other router operation.
  576. */
  577. int usb4_switch_nvm_authenticate_status(struct tb_switch *sw, u32 *status)
  578. {
  579. const struct tb_cm_ops *cm_ops = sw->tb->cm_ops;
  580. u16 opcode;
  581. u32 val;
  582. int ret;
  583. if (cm_ops->usb4_switch_nvm_authenticate_status) {
  584. ret = cm_ops->usb4_switch_nvm_authenticate_status(sw, status);
  585. if (ret != -EOPNOTSUPP)
  586. return ret;
  587. }
  588. ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_26, 1);
  589. if (ret)
  590. return ret;
  591. /* Check that the opcode is correct */
  592. opcode = val & ROUTER_CS_26_OPCODE_MASK;
  593. if (opcode == USB4_SWITCH_OP_NVM_AUTH) {
  594. if (val & ROUTER_CS_26_OV)
  595. return -EBUSY;
  596. if (val & ROUTER_CS_26_ONS)
  597. return -EOPNOTSUPP;
  598. *status = (val & ROUTER_CS_26_STATUS_MASK) >>
  599. ROUTER_CS_26_STATUS_SHIFT;
  600. } else {
  601. *status = 0;
  602. }
  603. return 0;
  604. }
  605. /**
  606. * usb4_switch_credits_init() - Read buffer allocation parameters
  607. * @sw: USB4 router
  608. *
  609. * Reads @sw buffer allocation parameters and initializes @sw buffer
  610. * allocation fields accordingly. Specifically @sw->credits_allocation
  611. * is set to %true if these parameters can be used in tunneling.
  612. *
  613. * Returns %0 on success and negative errno otherwise.
  614. */
  615. int usb4_switch_credits_init(struct tb_switch *sw)
  616. {
  617. int max_usb3, min_dp_aux, min_dp_main, max_pcie, max_dma;
  618. int ret, length, i, nports;
  619. const struct tb_port *port;
  620. u32 data[USB4_DATA_DWORDS];
  621. u32 metadata = 0;
  622. u8 status = 0;
  623. memset(data, 0, sizeof(data));
  624. ret = usb4_switch_op_data(sw, USB4_SWITCH_OP_BUFFER_ALLOC, &metadata,
  625. &status, NULL, 0, data, ARRAY_SIZE(data));
  626. if (ret)
  627. return ret;
  628. if (status)
  629. return -EIO;
  630. length = metadata & USB4_BA_LENGTH_MASK;
  631. if (WARN_ON(length > ARRAY_SIZE(data)))
  632. return -EMSGSIZE;
  633. max_usb3 = -1;
  634. min_dp_aux = -1;
  635. min_dp_main = -1;
  636. max_pcie = -1;
  637. max_dma = -1;
  638. tb_sw_dbg(sw, "credit allocation parameters:\n");
  639. for (i = 0; i < length; i++) {
  640. u16 index, value;
  641. index = data[i] & USB4_BA_INDEX_MASK;
  642. value = (data[i] & USB4_BA_VALUE_MASK) >> USB4_BA_VALUE_SHIFT;
  643. switch (index) {
  644. case USB4_BA_MAX_USB3:
  645. tb_sw_dbg(sw, " USB3: %u\n", value);
  646. max_usb3 = value;
  647. break;
  648. case USB4_BA_MIN_DP_AUX:
  649. tb_sw_dbg(sw, " DP AUX: %u\n", value);
  650. min_dp_aux = value;
  651. break;
  652. case USB4_BA_MIN_DP_MAIN:
  653. tb_sw_dbg(sw, " DP main: %u\n", value);
  654. min_dp_main = value;
  655. break;
  656. case USB4_BA_MAX_PCIE:
  657. tb_sw_dbg(sw, " PCIe: %u\n", value);
  658. max_pcie = value;
  659. break;
  660. case USB4_BA_MAX_HI:
  661. tb_sw_dbg(sw, " DMA: %u\n", value);
  662. max_dma = value;
  663. break;
  664. default:
  665. tb_sw_dbg(sw, " unknown credit allocation index %#x, skipping\n",
  666. index);
  667. break;
  668. }
  669. }
  670. /*
  671. * Validate the buffer allocation preferences. If we find
  672. * issues, log a warning and fall back using the hard-coded
  673. * values.
  674. */
  675. /* Host router must report baMaxHI */
  676. if (!tb_route(sw) && max_dma < 0) {
  677. tb_sw_warn(sw, "host router is missing baMaxHI\n");
  678. goto err_invalid;
  679. }
  680. nports = 0;
  681. tb_switch_for_each_port(sw, port) {
  682. if (tb_port_is_null(port))
  683. nports++;
  684. }
  685. /* Must have DP buffer allocation (multiple USB4 ports) */
  686. if (nports > 2 && (min_dp_aux < 0 || min_dp_main < 0)) {
  687. tb_sw_warn(sw, "multiple USB4 ports require baMinDPaux/baMinDPmain\n");
  688. goto err_invalid;
  689. }
  690. tb_switch_for_each_port(sw, port) {
  691. if (tb_port_is_dpout(port) && min_dp_main < 0) {
  692. tb_sw_warn(sw, "missing baMinDPmain");
  693. goto err_invalid;
  694. }
  695. if ((tb_port_is_dpin(port) || tb_port_is_dpout(port)) &&
  696. min_dp_aux < 0) {
  697. tb_sw_warn(sw, "missing baMinDPaux");
  698. goto err_invalid;
  699. }
  700. if ((tb_port_is_usb3_down(port) || tb_port_is_usb3_up(port)) &&
  701. max_usb3 < 0) {
  702. tb_sw_warn(sw, "missing baMaxUSB3");
  703. goto err_invalid;
  704. }
  705. if ((tb_port_is_pcie_down(port) || tb_port_is_pcie_up(port)) &&
  706. max_pcie < 0) {
  707. tb_sw_warn(sw, "missing baMaxPCIe");
  708. goto err_invalid;
  709. }
  710. }
  711. /*
  712. * Buffer allocation passed the validation so we can use it in
  713. * path creation.
  714. */
  715. sw->credit_allocation = true;
  716. if (max_usb3 > 0)
  717. sw->max_usb3_credits = max_usb3;
  718. if (min_dp_aux > 0)
  719. sw->min_dp_aux_credits = min_dp_aux;
  720. if (min_dp_main > 0)
  721. sw->min_dp_main_credits = min_dp_main;
  722. if (max_pcie > 0)
  723. sw->max_pcie_credits = max_pcie;
  724. if (max_dma > 0)
  725. sw->max_dma_credits = max_dma;
  726. return 0;
  727. err_invalid:
  728. return -EINVAL;
  729. }
  730. /**
  731. * usb4_switch_query_dp_resource() - Query availability of DP IN resource
  732. * @sw: USB4 router
  733. * @in: DP IN adapter
  734. *
  735. * For DP tunneling this function can be used to query availability of
  736. * DP IN resource. Returns true if the resource is available for DP
  737. * tunneling, false otherwise.
  738. */
  739. bool usb4_switch_query_dp_resource(struct tb_switch *sw, struct tb_port *in)
  740. {
  741. u32 metadata = in->port;
  742. u8 status;
  743. int ret;
  744. ret = usb4_switch_op(sw, USB4_SWITCH_OP_QUERY_DP_RESOURCE, &metadata,
  745. &status);
  746. /*
  747. * If DP resource allocation is not supported assume it is
  748. * always available.
  749. */
  750. if (ret == -EOPNOTSUPP)
  751. return true;
  752. if (ret)
  753. return false;
  754. return !status;
  755. }
  756. /**
  757. * usb4_switch_alloc_dp_resource() - Allocate DP IN resource
  758. * @sw: USB4 router
  759. * @in: DP IN adapter
  760. *
  761. * Allocates DP IN resource for DP tunneling using USB4 router
  762. * operations. If the resource was allocated returns %0. Otherwise
  763. * returns negative errno, in particular %-EBUSY if the resource is
  764. * already allocated.
  765. */
  766. int usb4_switch_alloc_dp_resource(struct tb_switch *sw, struct tb_port *in)
  767. {
  768. u32 metadata = in->port;
  769. u8 status;
  770. int ret;
  771. ret = usb4_switch_op(sw, USB4_SWITCH_OP_ALLOC_DP_RESOURCE, &metadata,
  772. &status);
  773. if (ret == -EOPNOTSUPP)
  774. return 0;
  775. if (ret)
  776. return ret;
  777. return status ? -EBUSY : 0;
  778. }
  779. /**
  780. * usb4_switch_dealloc_dp_resource() - Releases allocated DP IN resource
  781. * @sw: USB4 router
  782. * @in: DP IN adapter
  783. *
  784. * Releases the previously allocated DP IN resource.
  785. */
  786. int usb4_switch_dealloc_dp_resource(struct tb_switch *sw, struct tb_port *in)
  787. {
  788. u32 metadata = in->port;
  789. u8 status;
  790. int ret;
  791. ret = usb4_switch_op(sw, USB4_SWITCH_OP_DEALLOC_DP_RESOURCE, &metadata,
  792. &status);
  793. if (ret == -EOPNOTSUPP)
  794. return 0;
  795. if (ret)
  796. return ret;
  797. return status ? -EIO : 0;
  798. }
  799. static int usb4_port_idx(const struct tb_switch *sw, const struct tb_port *port)
  800. {
  801. struct tb_port *p;
  802. int usb4_idx = 0;
  803. /* Assume port is primary */
  804. tb_switch_for_each_port(sw, p) {
  805. if (!tb_port_is_null(p))
  806. continue;
  807. if (tb_is_upstream_port(p))
  808. continue;
  809. if (!p->link_nr) {
  810. if (p == port)
  811. break;
  812. usb4_idx++;
  813. }
  814. }
  815. return usb4_idx;
  816. }
  817. /**
  818. * usb4_switch_map_pcie_down() - Map USB4 port to a PCIe downstream adapter
  819. * @sw: USB4 router
  820. * @port: USB4 port
  821. *
  822. * USB4 routers have direct mapping between USB4 ports and PCIe
  823. * downstream adapters where the PCIe topology is extended. This
  824. * function returns the corresponding downstream PCIe adapter or %NULL
  825. * if no such mapping was possible.
  826. */
  827. struct tb_port *usb4_switch_map_pcie_down(struct tb_switch *sw,
  828. const struct tb_port *port)
  829. {
  830. int usb4_idx = usb4_port_idx(sw, port);
  831. struct tb_port *p;
  832. int pcie_idx = 0;
  833. /* Find PCIe down port matching usb4_port */
  834. tb_switch_for_each_port(sw, p) {
  835. if (!tb_port_is_pcie_down(p))
  836. continue;
  837. if (pcie_idx == usb4_idx)
  838. return p;
  839. pcie_idx++;
  840. }
  841. return NULL;
  842. }
  843. /**
  844. * usb4_switch_map_usb3_down() - Map USB4 port to a USB3 downstream adapter
  845. * @sw: USB4 router
  846. * @port: USB4 port
  847. *
  848. * USB4 routers have direct mapping between USB4 ports and USB 3.x
  849. * downstream adapters where the USB 3.x topology is extended. This
  850. * function returns the corresponding downstream USB 3.x adapter or
  851. * %NULL if no such mapping was possible.
  852. */
  853. struct tb_port *usb4_switch_map_usb3_down(struct tb_switch *sw,
  854. const struct tb_port *port)
  855. {
  856. int usb4_idx = usb4_port_idx(sw, port);
  857. struct tb_port *p;
  858. int usb_idx = 0;
  859. /* Find USB3 down port matching usb4_port */
  860. tb_switch_for_each_port(sw, p) {
  861. if (!tb_port_is_usb3_down(p))
  862. continue;
  863. if (usb_idx == usb4_idx)
  864. return p;
  865. usb_idx++;
  866. }
  867. return NULL;
  868. }
  869. /**
  870. * usb4_switch_add_ports() - Add USB4 ports for this router
  871. * @sw: USB4 router
  872. *
  873. * For USB4 router finds all USB4 ports and registers devices for each.
  874. * Can be called to any router.
  875. *
  876. * Return %0 in case of success and negative errno in case of failure.
  877. */
  878. int usb4_switch_add_ports(struct tb_switch *sw)
  879. {
  880. struct tb_port *port;
  881. if (tb_switch_is_icm(sw) || !tb_switch_is_usb4(sw))
  882. return 0;
  883. tb_switch_for_each_port(sw, port) {
  884. struct usb4_port *usb4;
  885. if (!tb_port_is_null(port))
  886. continue;
  887. if (!port->cap_usb4)
  888. continue;
  889. usb4 = usb4_port_device_add(port);
  890. if (IS_ERR(usb4)) {
  891. usb4_switch_remove_ports(sw);
  892. return PTR_ERR(usb4);
  893. }
  894. port->usb4 = usb4;
  895. }
  896. return 0;
  897. }
  898. /**
  899. * usb4_switch_remove_ports() - Removes USB4 ports from this router
  900. * @sw: USB4 router
  901. *
  902. * Unregisters previously registered USB4 ports.
  903. */
  904. void usb4_switch_remove_ports(struct tb_switch *sw)
  905. {
  906. struct tb_port *port;
  907. tb_switch_for_each_port(sw, port) {
  908. if (port->usb4) {
  909. usb4_port_device_remove(port->usb4);
  910. port->usb4 = NULL;
  911. }
  912. }
  913. }
  914. /**
  915. * usb4_port_unlock() - Unlock USB4 downstream port
  916. * @port: USB4 port to unlock
  917. *
  918. * Unlocks USB4 downstream port so that the connection manager can
  919. * access the router below this port.
  920. */
  921. int usb4_port_unlock(struct tb_port *port)
  922. {
  923. int ret;
  924. u32 val;
  925. ret = tb_port_read(port, &val, TB_CFG_PORT, ADP_CS_4, 1);
  926. if (ret)
  927. return ret;
  928. val &= ~ADP_CS_4_LCK;
  929. return tb_port_write(port, &val, TB_CFG_PORT, ADP_CS_4, 1);
  930. }
  931. /**
  932. * usb4_port_hotplug_enable() - Enables hotplug for a port
  933. * @port: USB4 port to operate on
  934. *
  935. * Enables hot plug events on a given port. This is only intended
  936. * to be used on lane, DP-IN, and DP-OUT adapters.
  937. */
  938. int usb4_port_hotplug_enable(struct tb_port *port)
  939. {
  940. int ret;
  941. u32 val;
  942. ret = tb_port_read(port, &val, TB_CFG_PORT, ADP_CS_5, 1);
  943. if (ret)
  944. return ret;
  945. val &= ~ADP_CS_5_DHP;
  946. return tb_port_write(port, &val, TB_CFG_PORT, ADP_CS_5, 1);
  947. }
  948. /**
  949. * usb4_port_reset() - Issue downstream port reset
  950. * @port: USB4 port to reset
  951. *
  952. * Issues downstream port reset to @port.
  953. */
  954. int usb4_port_reset(struct tb_port *port)
  955. {
  956. int ret;
  957. u32 val;
  958. if (!port->cap_usb4)
  959. return -EINVAL;
  960. ret = tb_port_read(port, &val, TB_CFG_PORT,
  961. port->cap_usb4 + PORT_CS_19, 1);
  962. if (ret)
  963. return ret;
  964. val |= PORT_CS_19_DPR;
  965. ret = tb_port_write(port, &val, TB_CFG_PORT,
  966. port->cap_usb4 + PORT_CS_19, 1);
  967. if (ret)
  968. return ret;
  969. fsleep(10000);
  970. ret = tb_port_read(port, &val, TB_CFG_PORT,
  971. port->cap_usb4 + PORT_CS_19, 1);
  972. if (ret)
  973. return ret;
  974. val &= ~PORT_CS_19_DPR;
  975. return tb_port_write(port, &val, TB_CFG_PORT,
  976. port->cap_usb4 + PORT_CS_19, 1);
  977. }
  978. static int usb4_port_set_configured(struct tb_port *port, bool configured)
  979. {
  980. int ret;
  981. u32 val;
  982. if (!port->cap_usb4)
  983. return -EINVAL;
  984. ret = tb_port_read(port, &val, TB_CFG_PORT,
  985. port->cap_usb4 + PORT_CS_19, 1);
  986. if (ret)
  987. return ret;
  988. if (configured)
  989. val |= PORT_CS_19_PC;
  990. else
  991. val &= ~PORT_CS_19_PC;
  992. return tb_port_write(port, &val, TB_CFG_PORT,
  993. port->cap_usb4 + PORT_CS_19, 1);
  994. }
  995. /**
  996. * usb4_port_configure() - Set USB4 port configured
  997. * @port: USB4 router
  998. *
  999. * Sets the USB4 link to be configured for power management purposes.
  1000. */
  1001. int usb4_port_configure(struct tb_port *port)
  1002. {
  1003. return usb4_port_set_configured(port, true);
  1004. }
  1005. /**
  1006. * usb4_port_unconfigure() - Set USB4 port unconfigured
  1007. * @port: USB4 router
  1008. *
  1009. * Sets the USB4 link to be unconfigured for power management purposes.
  1010. */
  1011. void usb4_port_unconfigure(struct tb_port *port)
  1012. {
  1013. usb4_port_set_configured(port, false);
  1014. }
  1015. static int usb4_set_xdomain_configured(struct tb_port *port, bool configured)
  1016. {
  1017. int ret;
  1018. u32 val;
  1019. if (!port->cap_usb4)
  1020. return -EINVAL;
  1021. ret = tb_port_read(port, &val, TB_CFG_PORT,
  1022. port->cap_usb4 + PORT_CS_19, 1);
  1023. if (ret)
  1024. return ret;
  1025. if (configured)
  1026. val |= PORT_CS_19_PID;
  1027. else
  1028. val &= ~PORT_CS_19_PID;
  1029. return tb_port_write(port, &val, TB_CFG_PORT,
  1030. port->cap_usb4 + PORT_CS_19, 1);
  1031. }
  1032. /**
  1033. * usb4_port_configure_xdomain() - Configure port for XDomain
  1034. * @port: USB4 port connected to another host
  1035. * @xd: XDomain that is connected to the port
  1036. *
  1037. * Marks the USB4 port as being connected to another host and updates
  1038. * the link type. Returns %0 in success and negative errno in failure.
  1039. */
  1040. int usb4_port_configure_xdomain(struct tb_port *port, struct tb_xdomain *xd)
  1041. {
  1042. xd->link_usb4 = link_is_usb4(port);
  1043. return usb4_set_xdomain_configured(port, true);
  1044. }
  1045. /**
  1046. * usb4_port_unconfigure_xdomain() - Unconfigure port for XDomain
  1047. * @port: USB4 port that was connected to another host
  1048. *
  1049. * Clears USB4 port from being marked as XDomain.
  1050. */
  1051. void usb4_port_unconfigure_xdomain(struct tb_port *port)
  1052. {
  1053. usb4_set_xdomain_configured(port, false);
  1054. }
  1055. static int usb4_port_wait_for_bit(struct tb_port *port, u32 offset, u32 bit,
  1056. u32 value, int timeout_msec, unsigned long delay_usec)
  1057. {
  1058. ktime_t timeout = ktime_add_ms(ktime_get(), timeout_msec);
  1059. do {
  1060. u32 val;
  1061. int ret;
  1062. ret = tb_port_read(port, &val, TB_CFG_PORT, offset, 1);
  1063. if (ret)
  1064. return ret;
  1065. if ((val & bit) == value)
  1066. return 0;
  1067. fsleep(delay_usec);
  1068. } while (ktime_before(ktime_get(), timeout));
  1069. return -ETIMEDOUT;
  1070. }
  1071. static int usb4_port_read_data(struct tb_port *port, void *data, size_t dwords)
  1072. {
  1073. if (dwords > USB4_DATA_DWORDS)
  1074. return -EINVAL;
  1075. return tb_port_read(port, data, TB_CFG_PORT, port->cap_usb4 + PORT_CS_2,
  1076. dwords);
  1077. }
  1078. static int usb4_port_write_data(struct tb_port *port, const void *data,
  1079. size_t dwords)
  1080. {
  1081. if (dwords > USB4_DATA_DWORDS)
  1082. return -EINVAL;
  1083. return tb_port_write(port, data, TB_CFG_PORT, port->cap_usb4 + PORT_CS_2,
  1084. dwords);
  1085. }
  1086. /**
  1087. * usb4_port_sb_read() - Read from sideband register
  1088. * @port: USB4 port to read
  1089. * @target: Sideband target
  1090. * @index: Retimer index if taget is %USB4_SB_TARGET_RETIMER
  1091. * @reg: Sideband register index
  1092. * @buf: Buffer where the sideband data is copied
  1093. * @size: Size of @buf
  1094. *
  1095. * Reads data from sideband register @reg and copies it into @buf.
  1096. * Returns %0 in case of success and negative errno in case of failure.
  1097. */
  1098. int usb4_port_sb_read(struct tb_port *port, enum usb4_sb_target target, u8 index,
  1099. u8 reg, void *buf, u8 size)
  1100. {
  1101. size_t dwords = DIV_ROUND_UP(size, 4);
  1102. int ret;
  1103. u32 val;
  1104. if (!port->cap_usb4)
  1105. return -EINVAL;
  1106. val = reg;
  1107. val |= size << PORT_CS_1_LENGTH_SHIFT;
  1108. val |= (target << PORT_CS_1_TARGET_SHIFT) & PORT_CS_1_TARGET_MASK;
  1109. if (target == USB4_SB_TARGET_RETIMER)
  1110. val |= (index << PORT_CS_1_RETIMER_INDEX_SHIFT);
  1111. val |= PORT_CS_1_PND;
  1112. ret = tb_port_write(port, &val, TB_CFG_PORT,
  1113. port->cap_usb4 + PORT_CS_1, 1);
  1114. if (ret)
  1115. return ret;
  1116. ret = usb4_port_wait_for_bit(port, port->cap_usb4 + PORT_CS_1,
  1117. PORT_CS_1_PND, 0, 500, USB4_PORT_SB_DELAY);
  1118. if (ret)
  1119. return ret;
  1120. ret = tb_port_read(port, &val, TB_CFG_PORT,
  1121. port->cap_usb4 + PORT_CS_1, 1);
  1122. if (ret)
  1123. return ret;
  1124. if (val & PORT_CS_1_NR)
  1125. return -ENODEV;
  1126. if (val & PORT_CS_1_RC)
  1127. return -EIO;
  1128. return buf ? usb4_port_read_data(port, buf, dwords) : 0;
  1129. }
  1130. /**
  1131. * usb4_port_sb_write() - Write to sideband register
  1132. * @port: USB4 port to write
  1133. * @target: Sideband target
  1134. * @index: Retimer index if taget is %USB4_SB_TARGET_RETIMER
  1135. * @reg: Sideband register index
  1136. * @buf: Data to write
  1137. * @size: Size of @buf
  1138. *
  1139. * Writes @buf to sideband register @reg. Returns %0 in case of success
  1140. * and negative errno in case of failure.
  1141. */
  1142. int usb4_port_sb_write(struct tb_port *port, enum usb4_sb_target target,
  1143. u8 index, u8 reg, const void *buf, u8 size)
  1144. {
  1145. size_t dwords = DIV_ROUND_UP(size, 4);
  1146. int ret;
  1147. u32 val;
  1148. if (!port->cap_usb4)
  1149. return -EINVAL;
  1150. if (buf) {
  1151. ret = usb4_port_write_data(port, buf, dwords);
  1152. if (ret)
  1153. return ret;
  1154. }
  1155. val = reg;
  1156. val |= size << PORT_CS_1_LENGTH_SHIFT;
  1157. val |= PORT_CS_1_WNR_WRITE;
  1158. val |= (target << PORT_CS_1_TARGET_SHIFT) & PORT_CS_1_TARGET_MASK;
  1159. if (target == USB4_SB_TARGET_RETIMER)
  1160. val |= (index << PORT_CS_1_RETIMER_INDEX_SHIFT);
  1161. val |= PORT_CS_1_PND;
  1162. ret = tb_port_write(port, &val, TB_CFG_PORT,
  1163. port->cap_usb4 + PORT_CS_1, 1);
  1164. if (ret)
  1165. return ret;
  1166. ret = usb4_port_wait_for_bit(port, port->cap_usb4 + PORT_CS_1,
  1167. PORT_CS_1_PND, 0, 500, USB4_PORT_SB_DELAY);
  1168. if (ret)
  1169. return ret;
  1170. ret = tb_port_read(port, &val, TB_CFG_PORT,
  1171. port->cap_usb4 + PORT_CS_1, 1);
  1172. if (ret)
  1173. return ret;
  1174. if (val & PORT_CS_1_NR)
  1175. return -ENODEV;
  1176. if (val & PORT_CS_1_RC)
  1177. return -EIO;
  1178. return 0;
  1179. }
  1180. static int usb4_port_sb_opcode_err_to_errno(u32 val)
  1181. {
  1182. switch (val) {
  1183. case 0:
  1184. return 0;
  1185. case USB4_SB_OPCODE_ERR:
  1186. return -EAGAIN;
  1187. case USB4_SB_OPCODE_ONS:
  1188. return -EOPNOTSUPP;
  1189. default:
  1190. return -EIO;
  1191. }
  1192. }
  1193. static int usb4_port_sb_op(struct tb_port *port, enum usb4_sb_target target,
  1194. u8 index, enum usb4_sb_opcode opcode, int timeout_msec)
  1195. {
  1196. ktime_t timeout;
  1197. u32 val;
  1198. int ret;
  1199. val = opcode;
  1200. ret = usb4_port_sb_write(port, target, index, USB4_SB_OPCODE, &val,
  1201. sizeof(val));
  1202. if (ret)
  1203. return ret;
  1204. timeout = ktime_add_ms(ktime_get(), timeout_msec);
  1205. do {
  1206. /* Check results */
  1207. ret = usb4_port_sb_read(port, target, index, USB4_SB_OPCODE,
  1208. &val, sizeof(val));
  1209. if (ret)
  1210. return ret;
  1211. if (val != opcode)
  1212. return usb4_port_sb_opcode_err_to_errno(val);
  1213. fsleep(USB4_PORT_SB_DELAY);
  1214. } while (ktime_before(ktime_get(), timeout));
  1215. return -ETIMEDOUT;
  1216. }
  1217. static int usb4_port_set_router_offline(struct tb_port *port, bool offline)
  1218. {
  1219. u32 val = !offline;
  1220. int ret;
  1221. ret = usb4_port_sb_write(port, USB4_SB_TARGET_ROUTER, 0,
  1222. USB4_SB_METADATA, &val, sizeof(val));
  1223. if (ret)
  1224. return ret;
  1225. val = USB4_SB_OPCODE_ROUTER_OFFLINE;
  1226. return usb4_port_sb_write(port, USB4_SB_TARGET_ROUTER, 0,
  1227. USB4_SB_OPCODE, &val, sizeof(val));
  1228. }
  1229. /**
  1230. * usb4_port_router_offline() - Put the USB4 port to offline mode
  1231. * @port: USB4 port
  1232. *
  1233. * This function puts the USB4 port into offline mode. In this mode the
  1234. * port does not react on hotplug events anymore. This needs to be
  1235. * called before retimer access is done when the USB4 links is not up.
  1236. *
  1237. * Returns %0 in case of success and negative errno if there was an
  1238. * error.
  1239. */
  1240. int usb4_port_router_offline(struct tb_port *port)
  1241. {
  1242. return usb4_port_set_router_offline(port, true);
  1243. }
  1244. /**
  1245. * usb4_port_router_online() - Put the USB4 port back to online
  1246. * @port: USB4 port
  1247. *
  1248. * Makes the USB4 port functional again.
  1249. */
  1250. int usb4_port_router_online(struct tb_port *port)
  1251. {
  1252. return usb4_port_set_router_offline(port, false);
  1253. }
  1254. /**
  1255. * usb4_port_enumerate_retimers() - Send RT broadcast transaction
  1256. * @port: USB4 port
  1257. *
  1258. * This forces the USB4 port to send broadcast RT transaction which
  1259. * makes the retimers on the link to assign index to themselves. Returns
  1260. * %0 in case of success and negative errno if there was an error.
  1261. */
  1262. int usb4_port_enumerate_retimers(struct tb_port *port)
  1263. {
  1264. u32 val;
  1265. val = USB4_SB_OPCODE_ENUMERATE_RETIMERS;
  1266. return usb4_port_sb_write(port, USB4_SB_TARGET_ROUTER, 0,
  1267. USB4_SB_OPCODE, &val, sizeof(val));
  1268. }
  1269. /**
  1270. * usb4_port_clx_supported() - Check if CLx is supported by the link
  1271. * @port: Port to check for CLx support for
  1272. *
  1273. * PORT_CS_18_CPS bit reflects if the link supports CLx including
  1274. * active cables (if connected on the link).
  1275. */
  1276. bool usb4_port_clx_supported(struct tb_port *port)
  1277. {
  1278. int ret;
  1279. u32 val;
  1280. ret = tb_port_read(port, &val, TB_CFG_PORT,
  1281. port->cap_usb4 + PORT_CS_18, 1);
  1282. if (ret)
  1283. return false;
  1284. return !!(val & PORT_CS_18_CPS);
  1285. }
  1286. /**
  1287. * usb4_port_asym_supported() - If the port supports asymmetric link
  1288. * @port: USB4 port
  1289. *
  1290. * Checks if the port and the cable supports asymmetric link and returns
  1291. * %true in that case.
  1292. */
  1293. bool usb4_port_asym_supported(struct tb_port *port)
  1294. {
  1295. u32 val;
  1296. if (!port->cap_usb4)
  1297. return false;
  1298. if (tb_port_read(port, &val, TB_CFG_PORT, port->cap_usb4 + PORT_CS_18, 1))
  1299. return false;
  1300. return !!(val & PORT_CS_18_CSA);
  1301. }
  1302. /**
  1303. * usb4_port_asym_set_link_width() - Set link width to asymmetric or symmetric
  1304. * @port: USB4 port
  1305. * @width: Asymmetric width to configure
  1306. *
  1307. * Sets USB4 port link width to @width. Can be called for widths where
  1308. * usb4_port_asym_width_supported() returned @true.
  1309. */
  1310. int usb4_port_asym_set_link_width(struct tb_port *port, enum tb_link_width width)
  1311. {
  1312. u32 val;
  1313. int ret;
  1314. if (!port->cap_phy)
  1315. return -EINVAL;
  1316. ret = tb_port_read(port, &val, TB_CFG_PORT,
  1317. port->cap_phy + LANE_ADP_CS_1, 1);
  1318. if (ret)
  1319. return ret;
  1320. val &= ~LANE_ADP_CS_1_TARGET_WIDTH_ASYM_MASK;
  1321. switch (width) {
  1322. case TB_LINK_WIDTH_DUAL:
  1323. val |= FIELD_PREP(LANE_ADP_CS_1_TARGET_WIDTH_ASYM_MASK,
  1324. LANE_ADP_CS_1_TARGET_WIDTH_ASYM_DUAL);
  1325. break;
  1326. case TB_LINK_WIDTH_ASYM_TX:
  1327. val |= FIELD_PREP(LANE_ADP_CS_1_TARGET_WIDTH_ASYM_MASK,
  1328. LANE_ADP_CS_1_TARGET_WIDTH_ASYM_TX);
  1329. break;
  1330. case TB_LINK_WIDTH_ASYM_RX:
  1331. val |= FIELD_PREP(LANE_ADP_CS_1_TARGET_WIDTH_ASYM_MASK,
  1332. LANE_ADP_CS_1_TARGET_WIDTH_ASYM_RX);
  1333. break;
  1334. default:
  1335. return -EINVAL;
  1336. }
  1337. return tb_port_write(port, &val, TB_CFG_PORT,
  1338. port->cap_phy + LANE_ADP_CS_1, 1);
  1339. }
  1340. /**
  1341. * usb4_port_asym_start() - Start symmetry change and wait for completion
  1342. * @port: USB4 port
  1343. *
  1344. * Start symmetry change of the link to asymmetric or symmetric
  1345. * (according to what was previously set in tb_port_set_link_width().
  1346. * Wait for completion of the change.
  1347. *
  1348. * Returns %0 in case of success, %-ETIMEDOUT if case of timeout or
  1349. * a negative errno in case of a failure.
  1350. */
  1351. int usb4_port_asym_start(struct tb_port *port)
  1352. {
  1353. int ret;
  1354. u32 val;
  1355. ret = tb_port_read(port, &val, TB_CFG_PORT,
  1356. port->cap_usb4 + PORT_CS_19, 1);
  1357. if (ret)
  1358. return ret;
  1359. val &= ~PORT_CS_19_START_ASYM;
  1360. val |= FIELD_PREP(PORT_CS_19_START_ASYM, 1);
  1361. ret = tb_port_write(port, &val, TB_CFG_PORT,
  1362. port->cap_usb4 + PORT_CS_19, 1);
  1363. if (ret)
  1364. return ret;
  1365. /*
  1366. * Wait for PORT_CS_19_START_ASYM to be 0. This means the USB4
  1367. * port started the symmetry transition.
  1368. */
  1369. ret = usb4_port_wait_for_bit(port, port->cap_usb4 + PORT_CS_19,
  1370. PORT_CS_19_START_ASYM, 0, 1000,
  1371. USB4_PORT_DELAY);
  1372. if (ret)
  1373. return ret;
  1374. /* Then wait for the transtion to be completed */
  1375. return usb4_port_wait_for_bit(port, port->cap_usb4 + PORT_CS_18,
  1376. PORT_CS_18_TIP, 0, 5000, USB4_PORT_DELAY);
  1377. }
  1378. /**
  1379. * usb4_port_margining_caps() - Read USB4 port marginig capabilities
  1380. * @port: USB4 port
  1381. * @target: Sideband target
  1382. * @index: Retimer index if taget is %USB4_SB_TARGET_RETIMER
  1383. * @caps: Array with at least two elements to hold the results
  1384. *
  1385. * Reads the USB4 port lane margining capabilities into @caps.
  1386. */
  1387. int usb4_port_margining_caps(struct tb_port *port, enum usb4_sb_target target,
  1388. u8 index, u32 *caps)
  1389. {
  1390. int ret;
  1391. ret = usb4_port_sb_op(port, target, index,
  1392. USB4_SB_OPCODE_READ_LANE_MARGINING_CAP, 500);
  1393. if (ret)
  1394. return ret;
  1395. return usb4_port_sb_read(port, target, index, USB4_SB_DATA, caps,
  1396. sizeof(*caps) * 2);
  1397. }
  1398. /**
  1399. * usb4_port_hw_margin() - Run hardware lane margining on port
  1400. * @port: USB4 port
  1401. * @target: Sideband target
  1402. * @index: Retimer index if taget is %USB4_SB_TARGET_RETIMER
  1403. * @params: Parameters for USB4 hardware margining
  1404. * @results: Array with at least two elements to hold the results
  1405. *
  1406. * Runs hardware lane margining on USB4 port and returns the result in
  1407. * @results.
  1408. */
  1409. int usb4_port_hw_margin(struct tb_port *port, enum usb4_sb_target target,
  1410. u8 index, const struct usb4_port_margining_params *params,
  1411. u32 *results)
  1412. {
  1413. u32 val;
  1414. int ret;
  1415. if (WARN_ON_ONCE(!params))
  1416. return -EINVAL;
  1417. val = params->lanes;
  1418. if (params->time)
  1419. val |= USB4_MARGIN_HW_TIME;
  1420. if (params->right_high)
  1421. val |= USB4_MARGIN_HW_RH;
  1422. if (params->ber_level)
  1423. val |= FIELD_PREP(USB4_MARGIN_HW_BER_MASK, params->ber_level);
  1424. if (params->optional_voltage_offset_range)
  1425. val |= USB4_MARGIN_HW_OPT_VOLTAGE;
  1426. ret = usb4_port_sb_write(port, target, index, USB4_SB_METADATA, &val,
  1427. sizeof(val));
  1428. if (ret)
  1429. return ret;
  1430. ret = usb4_port_sb_op(port, target, index,
  1431. USB4_SB_OPCODE_RUN_HW_LANE_MARGINING, 2500);
  1432. if (ret)
  1433. return ret;
  1434. return usb4_port_sb_read(port, target, index, USB4_SB_DATA, results,
  1435. sizeof(*results) * 2);
  1436. }
  1437. /**
  1438. * usb4_port_sw_margin() - Run software lane margining on port
  1439. * @port: USB4 port
  1440. * @target: Sideband target
  1441. * @index: Retimer index if taget is %USB4_SB_TARGET_RETIMER
  1442. * @params: Parameters for USB4 software margining
  1443. * @results: Data word for the operation completion data
  1444. *
  1445. * Runs software lane margining on USB4 port. Read back the error
  1446. * counters by calling usb4_port_sw_margin_errors(). Returns %0 in
  1447. * success and negative errno otherwise.
  1448. */
  1449. int usb4_port_sw_margin(struct tb_port *port, enum usb4_sb_target target,
  1450. u8 index, const struct usb4_port_margining_params *params,
  1451. u32 *results)
  1452. {
  1453. u32 val;
  1454. int ret;
  1455. if (WARN_ON_ONCE(!params))
  1456. return -EINVAL;
  1457. val = params->lanes;
  1458. if (params->time)
  1459. val |= USB4_MARGIN_SW_TIME;
  1460. if (params->optional_voltage_offset_range)
  1461. val |= USB4_MARGIN_SW_OPT_VOLTAGE;
  1462. if (params->right_high)
  1463. val |= USB4_MARGIN_SW_RH;
  1464. val |= FIELD_PREP(USB4_MARGIN_SW_COUNTER_MASK, params->error_counter);
  1465. val |= FIELD_PREP(USB4_MARGIN_SW_VT_MASK, params->voltage_time_offset);
  1466. ret = usb4_port_sb_write(port, target, index, USB4_SB_METADATA, &val,
  1467. sizeof(val));
  1468. if (ret)
  1469. return ret;
  1470. ret = usb4_port_sb_op(port, target, index,
  1471. USB4_SB_OPCODE_RUN_SW_LANE_MARGINING, 2500);
  1472. if (ret)
  1473. return ret;
  1474. return usb4_port_sb_read(port, target, index, USB4_SB_DATA, results,
  1475. sizeof(*results));
  1476. }
  1477. /**
  1478. * usb4_port_sw_margin_errors() - Read the software margining error counters
  1479. * @port: USB4 port
  1480. * @target: Sideband target
  1481. * @index: Retimer index if taget is %USB4_SB_TARGET_RETIMER
  1482. * @errors: Error metadata is copied here.
  1483. *
  1484. * This reads back the software margining error counters from the port.
  1485. * Returns %0 in success and negative errno otherwise.
  1486. */
  1487. int usb4_port_sw_margin_errors(struct tb_port *port, enum usb4_sb_target target,
  1488. u8 index, u32 *errors)
  1489. {
  1490. int ret;
  1491. ret = usb4_port_sb_op(port, target, index,
  1492. USB4_SB_OPCODE_READ_SW_MARGIN_ERR, 150);
  1493. if (ret)
  1494. return ret;
  1495. return usb4_port_sb_read(port, target, index, USB4_SB_METADATA, errors,
  1496. sizeof(*errors));
  1497. }
  1498. static inline int usb4_port_retimer_op(struct tb_port *port, u8 index,
  1499. enum usb4_sb_opcode opcode,
  1500. int timeout_msec)
  1501. {
  1502. return usb4_port_sb_op(port, USB4_SB_TARGET_RETIMER, index, opcode,
  1503. timeout_msec);
  1504. }
  1505. /**
  1506. * usb4_port_retimer_set_inbound_sbtx() - Enable sideband channel transactions
  1507. * @port: USB4 port
  1508. * @index: Retimer index
  1509. *
  1510. * Enables sideband channel transations on SBTX. Can be used when USB4
  1511. * link does not go up, for example if there is no device connected.
  1512. */
  1513. int usb4_port_retimer_set_inbound_sbtx(struct tb_port *port, u8 index)
  1514. {
  1515. int ret;
  1516. ret = usb4_port_retimer_op(port, index, USB4_SB_OPCODE_SET_INBOUND_SBTX,
  1517. 500);
  1518. if (ret != -ENODEV)
  1519. return ret;
  1520. /*
  1521. * Per the USB4 retimer spec, the retimer is not required to
  1522. * send an RT (Retimer Transaction) response for the first
  1523. * SET_INBOUND_SBTX command
  1524. */
  1525. return usb4_port_retimer_op(port, index, USB4_SB_OPCODE_SET_INBOUND_SBTX,
  1526. 500);
  1527. }
  1528. /**
  1529. * usb4_port_retimer_unset_inbound_sbtx() - Disable sideband channel transactions
  1530. * @port: USB4 port
  1531. * @index: Retimer index
  1532. *
  1533. * Disables sideband channel transations on SBTX. The reverse of
  1534. * usb4_port_retimer_set_inbound_sbtx().
  1535. */
  1536. int usb4_port_retimer_unset_inbound_sbtx(struct tb_port *port, u8 index)
  1537. {
  1538. return usb4_port_retimer_op(port, index,
  1539. USB4_SB_OPCODE_UNSET_INBOUND_SBTX, 500);
  1540. }
  1541. /**
  1542. * usb4_port_retimer_is_last() - Is the retimer last on-board retimer
  1543. * @port: USB4 port
  1544. * @index: Retimer index
  1545. *
  1546. * If the retimer at @index is last one (connected directly to the
  1547. * Type-C port) this function returns %1. If it is not returns %0. If
  1548. * the retimer is not present returns %-ENODEV. Otherwise returns
  1549. * negative errno.
  1550. */
  1551. int usb4_port_retimer_is_last(struct tb_port *port, u8 index)
  1552. {
  1553. u32 metadata;
  1554. int ret;
  1555. ret = usb4_port_retimer_op(port, index, USB4_SB_OPCODE_QUERY_LAST_RETIMER,
  1556. 500);
  1557. if (ret)
  1558. return ret;
  1559. ret = usb4_port_sb_read(port, USB4_SB_TARGET_RETIMER, index,
  1560. USB4_SB_METADATA, &metadata, sizeof(metadata));
  1561. return ret ? ret : metadata & 1;
  1562. }
  1563. /**
  1564. * usb4_port_retimer_is_cable() - Is the retimer cable retimer
  1565. * @port: USB4 port
  1566. * @index: Retimer index
  1567. *
  1568. * If the retimer at @index is last cable retimer this function returns
  1569. * %1 and %0 if it is on-board retimer. In case a retimer is not present
  1570. * at @index returns %-ENODEV. Otherwise returns negative errno.
  1571. */
  1572. int usb4_port_retimer_is_cable(struct tb_port *port, u8 index)
  1573. {
  1574. u32 metadata;
  1575. int ret;
  1576. ret = usb4_port_retimer_op(port, index, USB4_SB_OPCODE_QUERY_CABLE_RETIMER,
  1577. 500);
  1578. if (ret)
  1579. return ret;
  1580. ret = usb4_port_sb_read(port, USB4_SB_TARGET_RETIMER, index,
  1581. USB4_SB_METADATA, &metadata, sizeof(metadata));
  1582. return ret ? ret : metadata & 1;
  1583. }
  1584. /**
  1585. * usb4_port_retimer_nvm_sector_size() - Read retimer NVM sector size
  1586. * @port: USB4 port
  1587. * @index: Retimer index
  1588. *
  1589. * Reads NVM sector size (in bytes) of a retimer at @index. This
  1590. * operation can be used to determine whether the retimer supports NVM
  1591. * upgrade for example. Returns sector size in bytes or negative errno
  1592. * in case of error. Specifically returns %-ENODEV if there is no
  1593. * retimer at @index.
  1594. */
  1595. int usb4_port_retimer_nvm_sector_size(struct tb_port *port, u8 index)
  1596. {
  1597. u32 metadata;
  1598. int ret;
  1599. ret = usb4_port_retimer_op(port, index, USB4_SB_OPCODE_GET_NVM_SECTOR_SIZE,
  1600. 500);
  1601. if (ret)
  1602. return ret;
  1603. ret = usb4_port_sb_read(port, USB4_SB_TARGET_RETIMER, index,
  1604. USB4_SB_METADATA, &metadata, sizeof(metadata));
  1605. return ret ? ret : metadata & USB4_NVM_SECTOR_SIZE_MASK;
  1606. }
  1607. /**
  1608. * usb4_port_retimer_nvm_set_offset() - Set NVM write offset
  1609. * @port: USB4 port
  1610. * @index: Retimer index
  1611. * @address: Start offset
  1612. *
  1613. * Exlicitly sets NVM write offset. Normally when writing to NVM this is
  1614. * done automatically by usb4_port_retimer_nvm_write().
  1615. *
  1616. * Returns %0 in success and negative errno if there was a failure.
  1617. */
  1618. int usb4_port_retimer_nvm_set_offset(struct tb_port *port, u8 index,
  1619. unsigned int address)
  1620. {
  1621. u32 metadata, dwaddress;
  1622. int ret;
  1623. dwaddress = address / 4;
  1624. metadata = (dwaddress << USB4_NVM_SET_OFFSET_SHIFT) &
  1625. USB4_NVM_SET_OFFSET_MASK;
  1626. ret = usb4_port_sb_write(port, USB4_SB_TARGET_RETIMER, index,
  1627. USB4_SB_METADATA, &metadata, sizeof(metadata));
  1628. if (ret)
  1629. return ret;
  1630. return usb4_port_retimer_op(port, index, USB4_SB_OPCODE_NVM_SET_OFFSET,
  1631. 500);
  1632. }
  1633. struct retimer_info {
  1634. struct tb_port *port;
  1635. u8 index;
  1636. };
  1637. static int usb4_port_retimer_nvm_write_next_block(void *data,
  1638. unsigned int dwaddress, const void *buf, size_t dwords)
  1639. {
  1640. const struct retimer_info *info = data;
  1641. struct tb_port *port = info->port;
  1642. u8 index = info->index;
  1643. int ret;
  1644. ret = usb4_port_sb_write(port, USB4_SB_TARGET_RETIMER, index,
  1645. USB4_SB_DATA, buf, dwords * 4);
  1646. if (ret)
  1647. return ret;
  1648. return usb4_port_retimer_op(port, index,
  1649. USB4_SB_OPCODE_NVM_BLOCK_WRITE, 1000);
  1650. }
  1651. /**
  1652. * usb4_port_retimer_nvm_write() - Write to retimer NVM
  1653. * @port: USB4 port
  1654. * @index: Retimer index
  1655. * @address: Byte address where to start the write
  1656. * @buf: Data to write
  1657. * @size: Size in bytes how much to write
  1658. *
  1659. * Writes @size bytes from @buf to the retimer NVM. Used for NVM
  1660. * upgrade. Returns %0 if the data was written successfully and negative
  1661. * errno in case of failure. Specifically returns %-ENODEV if there is
  1662. * no retimer at @index.
  1663. */
  1664. int usb4_port_retimer_nvm_write(struct tb_port *port, u8 index, unsigned int address,
  1665. const void *buf, size_t size)
  1666. {
  1667. struct retimer_info info = { .port = port, .index = index };
  1668. int ret;
  1669. ret = usb4_port_retimer_nvm_set_offset(port, index, address);
  1670. if (ret)
  1671. return ret;
  1672. return tb_nvm_write_data(address, buf, size, USB4_DATA_RETRIES,
  1673. usb4_port_retimer_nvm_write_next_block, &info);
  1674. }
  1675. /**
  1676. * usb4_port_retimer_nvm_authenticate() - Start retimer NVM upgrade
  1677. * @port: USB4 port
  1678. * @index: Retimer index
  1679. *
  1680. * After the new NVM image has been written via usb4_port_retimer_nvm_write()
  1681. * this function can be used to trigger the NVM upgrade process. If
  1682. * successful the retimer restarts with the new NVM and may not have the
  1683. * index set so one needs to call usb4_port_enumerate_retimers() to
  1684. * force index to be assigned.
  1685. */
  1686. int usb4_port_retimer_nvm_authenticate(struct tb_port *port, u8 index)
  1687. {
  1688. u32 val;
  1689. /*
  1690. * We need to use the raw operation here because once the
  1691. * authentication completes the retimer index is not set anymore
  1692. * so we do not get back the status now.
  1693. */
  1694. val = USB4_SB_OPCODE_NVM_AUTH_WRITE;
  1695. return usb4_port_sb_write(port, USB4_SB_TARGET_RETIMER, index,
  1696. USB4_SB_OPCODE, &val, sizeof(val));
  1697. }
  1698. /**
  1699. * usb4_port_retimer_nvm_authenticate_status() - Read status of NVM upgrade
  1700. * @port: USB4 port
  1701. * @index: Retimer index
  1702. * @status: Raw status code read from metadata
  1703. *
  1704. * This can be called after usb4_port_retimer_nvm_authenticate() and
  1705. * usb4_port_enumerate_retimers() to fetch status of the NVM upgrade.
  1706. *
  1707. * Returns %0 if the authentication status was successfully read. The
  1708. * completion metadata (the result) is then stored into @status. If
  1709. * reading the status fails, returns negative errno.
  1710. */
  1711. int usb4_port_retimer_nvm_authenticate_status(struct tb_port *port, u8 index,
  1712. u32 *status)
  1713. {
  1714. u32 metadata, val;
  1715. int ret;
  1716. ret = usb4_port_sb_read(port, USB4_SB_TARGET_RETIMER, index,
  1717. USB4_SB_OPCODE, &val, sizeof(val));
  1718. if (ret)
  1719. return ret;
  1720. ret = usb4_port_sb_opcode_err_to_errno(val);
  1721. switch (ret) {
  1722. case 0:
  1723. *status = 0;
  1724. return 0;
  1725. case -EAGAIN:
  1726. ret = usb4_port_sb_read(port, USB4_SB_TARGET_RETIMER, index,
  1727. USB4_SB_METADATA, &metadata,
  1728. sizeof(metadata));
  1729. if (ret)
  1730. return ret;
  1731. *status = metadata & USB4_SB_METADATA_NVM_AUTH_WRITE_MASK;
  1732. return 0;
  1733. default:
  1734. return ret;
  1735. }
  1736. }
  1737. static int usb4_port_retimer_nvm_read_block(void *data, unsigned int dwaddress,
  1738. void *buf, size_t dwords)
  1739. {
  1740. const struct retimer_info *info = data;
  1741. struct tb_port *port = info->port;
  1742. u8 index = info->index;
  1743. u32 metadata;
  1744. int ret;
  1745. metadata = dwaddress << USB4_NVM_READ_OFFSET_SHIFT;
  1746. if (dwords < USB4_DATA_DWORDS)
  1747. metadata |= dwords << USB4_NVM_READ_LENGTH_SHIFT;
  1748. ret = usb4_port_sb_write(port, USB4_SB_TARGET_RETIMER, index,
  1749. USB4_SB_METADATA, &metadata, sizeof(metadata));
  1750. if (ret)
  1751. return ret;
  1752. ret = usb4_port_retimer_op(port, index, USB4_SB_OPCODE_NVM_READ, 500);
  1753. if (ret)
  1754. return ret;
  1755. return usb4_port_sb_read(port, USB4_SB_TARGET_RETIMER, index,
  1756. USB4_SB_DATA, buf, dwords * 4);
  1757. }
  1758. /**
  1759. * usb4_port_retimer_nvm_read() - Read contents of retimer NVM
  1760. * @port: USB4 port
  1761. * @index: Retimer index
  1762. * @address: NVM address (in bytes) to start reading
  1763. * @buf: Data read from NVM is stored here
  1764. * @size: Number of bytes to read
  1765. *
  1766. * Reads retimer NVM and copies the contents to @buf. Returns %0 if the
  1767. * read was successful and negative errno in case of failure.
  1768. * Specifically returns %-ENODEV if there is no retimer at @index.
  1769. */
  1770. int usb4_port_retimer_nvm_read(struct tb_port *port, u8 index,
  1771. unsigned int address, void *buf, size_t size)
  1772. {
  1773. struct retimer_info info = { .port = port, .index = index };
  1774. return tb_nvm_read_data(address, buf, size, USB4_DATA_RETRIES,
  1775. usb4_port_retimer_nvm_read_block, &info);
  1776. }
  1777. static inline unsigned int
  1778. usb4_usb3_port_max_bandwidth(const struct tb_port *port, unsigned int bw)
  1779. {
  1780. /* Take the possible bandwidth limitation into account */
  1781. if (port->max_bw)
  1782. return min(bw, port->max_bw);
  1783. return bw;
  1784. }
  1785. /**
  1786. * usb4_usb3_port_max_link_rate() - Maximum support USB3 link rate
  1787. * @port: USB3 adapter port
  1788. *
  1789. * Return maximum supported link rate of a USB3 adapter in Mb/s.
  1790. * Negative errno in case of error.
  1791. */
  1792. int usb4_usb3_port_max_link_rate(struct tb_port *port)
  1793. {
  1794. int ret, lr;
  1795. u32 val;
  1796. if (!tb_port_is_usb3_down(port) && !tb_port_is_usb3_up(port))
  1797. return -EINVAL;
  1798. ret = tb_port_read(port, &val, TB_CFG_PORT,
  1799. port->cap_adap + ADP_USB3_CS_4, 1);
  1800. if (ret)
  1801. return ret;
  1802. lr = (val & ADP_USB3_CS_4_MSLR_MASK) >> ADP_USB3_CS_4_MSLR_SHIFT;
  1803. ret = lr == ADP_USB3_CS_4_MSLR_20G ? 20000 : 10000;
  1804. return usb4_usb3_port_max_bandwidth(port, ret);
  1805. }
  1806. static int usb4_usb3_port_cm_request(struct tb_port *port, bool request)
  1807. {
  1808. int ret;
  1809. u32 val;
  1810. if (!tb_port_is_usb3_down(port))
  1811. return -EINVAL;
  1812. if (tb_route(port->sw))
  1813. return -EINVAL;
  1814. ret = tb_port_read(port, &val, TB_CFG_PORT,
  1815. port->cap_adap + ADP_USB3_CS_2, 1);
  1816. if (ret)
  1817. return ret;
  1818. if (request)
  1819. val |= ADP_USB3_CS_2_CMR;
  1820. else
  1821. val &= ~ADP_USB3_CS_2_CMR;
  1822. ret = tb_port_write(port, &val, TB_CFG_PORT,
  1823. port->cap_adap + ADP_USB3_CS_2, 1);
  1824. if (ret)
  1825. return ret;
  1826. /*
  1827. * We can use val here directly as the CMR bit is in the same place
  1828. * as HCA. Just mask out others.
  1829. */
  1830. val &= ADP_USB3_CS_2_CMR;
  1831. return usb4_port_wait_for_bit(port, port->cap_adap + ADP_USB3_CS_1,
  1832. ADP_USB3_CS_1_HCA, val, 1500,
  1833. USB4_PORT_DELAY);
  1834. }
  1835. static inline int usb4_usb3_port_set_cm_request(struct tb_port *port)
  1836. {
  1837. return usb4_usb3_port_cm_request(port, true);
  1838. }
  1839. static inline int usb4_usb3_port_clear_cm_request(struct tb_port *port)
  1840. {
  1841. return usb4_usb3_port_cm_request(port, false);
  1842. }
  1843. static unsigned int usb3_bw_to_mbps(u32 bw, u8 scale)
  1844. {
  1845. unsigned long uframes;
  1846. uframes = bw * 512UL << scale;
  1847. return DIV_ROUND_CLOSEST(uframes * 8000, MEGA);
  1848. }
  1849. static u32 mbps_to_usb3_bw(unsigned int mbps, u8 scale)
  1850. {
  1851. unsigned long uframes;
  1852. /* 1 uframe is 1/8 ms (125 us) -> 1 / 8000 s */
  1853. uframes = ((unsigned long)mbps * MEGA) / 8000;
  1854. return DIV_ROUND_UP(uframes, 512UL << scale);
  1855. }
  1856. static int usb4_usb3_port_read_allocated_bandwidth(struct tb_port *port,
  1857. int *upstream_bw,
  1858. int *downstream_bw)
  1859. {
  1860. u32 val, bw, scale;
  1861. int ret;
  1862. ret = tb_port_read(port, &val, TB_CFG_PORT,
  1863. port->cap_adap + ADP_USB3_CS_2, 1);
  1864. if (ret)
  1865. return ret;
  1866. ret = tb_port_read(port, &scale, TB_CFG_PORT,
  1867. port->cap_adap + ADP_USB3_CS_3, 1);
  1868. if (ret)
  1869. return ret;
  1870. scale &= ADP_USB3_CS_3_SCALE_MASK;
  1871. bw = val & ADP_USB3_CS_2_AUBW_MASK;
  1872. *upstream_bw = usb3_bw_to_mbps(bw, scale);
  1873. bw = (val & ADP_USB3_CS_2_ADBW_MASK) >> ADP_USB3_CS_2_ADBW_SHIFT;
  1874. *downstream_bw = usb3_bw_to_mbps(bw, scale);
  1875. return 0;
  1876. }
  1877. /**
  1878. * usb4_usb3_port_allocated_bandwidth() - Bandwidth allocated for USB3
  1879. * @port: USB3 adapter port
  1880. * @upstream_bw: Allocated upstream bandwidth is stored here
  1881. * @downstream_bw: Allocated downstream bandwidth is stored here
  1882. *
  1883. * Stores currently allocated USB3 bandwidth into @upstream_bw and
  1884. * @downstream_bw in Mb/s. Returns %0 in case of success and negative
  1885. * errno in failure.
  1886. */
  1887. int usb4_usb3_port_allocated_bandwidth(struct tb_port *port, int *upstream_bw,
  1888. int *downstream_bw)
  1889. {
  1890. int ret;
  1891. ret = usb4_usb3_port_set_cm_request(port);
  1892. if (ret)
  1893. return ret;
  1894. ret = usb4_usb3_port_read_allocated_bandwidth(port, upstream_bw,
  1895. downstream_bw);
  1896. usb4_usb3_port_clear_cm_request(port);
  1897. return ret;
  1898. }
  1899. static int usb4_usb3_port_read_consumed_bandwidth(struct tb_port *port,
  1900. int *upstream_bw,
  1901. int *downstream_bw)
  1902. {
  1903. u32 val, bw, scale;
  1904. int ret;
  1905. ret = tb_port_read(port, &val, TB_CFG_PORT,
  1906. port->cap_adap + ADP_USB3_CS_1, 1);
  1907. if (ret)
  1908. return ret;
  1909. ret = tb_port_read(port, &scale, TB_CFG_PORT,
  1910. port->cap_adap + ADP_USB3_CS_3, 1);
  1911. if (ret)
  1912. return ret;
  1913. scale &= ADP_USB3_CS_3_SCALE_MASK;
  1914. bw = val & ADP_USB3_CS_1_CUBW_MASK;
  1915. *upstream_bw = usb3_bw_to_mbps(bw, scale);
  1916. bw = (val & ADP_USB3_CS_1_CDBW_MASK) >> ADP_USB3_CS_1_CDBW_SHIFT;
  1917. *downstream_bw = usb3_bw_to_mbps(bw, scale);
  1918. return 0;
  1919. }
  1920. static int usb4_usb3_port_write_allocated_bandwidth(struct tb_port *port,
  1921. int upstream_bw,
  1922. int downstream_bw)
  1923. {
  1924. u32 val, ubw, dbw, scale;
  1925. int ret, max_bw;
  1926. /* Figure out suitable scale */
  1927. scale = 0;
  1928. max_bw = max(upstream_bw, downstream_bw);
  1929. while (scale < 64) {
  1930. if (mbps_to_usb3_bw(max_bw, scale) < 4096)
  1931. break;
  1932. scale++;
  1933. }
  1934. if (WARN_ON(scale >= 64))
  1935. return -EINVAL;
  1936. ret = tb_port_write(port, &scale, TB_CFG_PORT,
  1937. port->cap_adap + ADP_USB3_CS_3, 1);
  1938. if (ret)
  1939. return ret;
  1940. ubw = mbps_to_usb3_bw(upstream_bw, scale);
  1941. dbw = mbps_to_usb3_bw(downstream_bw, scale);
  1942. tb_port_dbg(port, "scaled bandwidth %u/%u, scale %u\n", ubw, dbw, scale);
  1943. ret = tb_port_read(port, &val, TB_CFG_PORT,
  1944. port->cap_adap + ADP_USB3_CS_2, 1);
  1945. if (ret)
  1946. return ret;
  1947. val &= ~(ADP_USB3_CS_2_AUBW_MASK | ADP_USB3_CS_2_ADBW_MASK);
  1948. val |= dbw << ADP_USB3_CS_2_ADBW_SHIFT;
  1949. val |= ubw;
  1950. return tb_port_write(port, &val, TB_CFG_PORT,
  1951. port->cap_adap + ADP_USB3_CS_2, 1);
  1952. }
  1953. /**
  1954. * usb4_usb3_port_allocate_bandwidth() - Allocate bandwidth for USB3
  1955. * @port: USB3 adapter port
  1956. * @upstream_bw: New upstream bandwidth
  1957. * @downstream_bw: New downstream bandwidth
  1958. *
  1959. * This can be used to set how much bandwidth is allocated for the USB3
  1960. * tunneled isochronous traffic. @upstream_bw and @downstream_bw are the
  1961. * new values programmed to the USB3 adapter allocation registers. If
  1962. * the values are lower than what is currently consumed the allocation
  1963. * is set to what is currently consumed instead (consumed bandwidth
  1964. * cannot be taken away by CM). The actual new values are returned in
  1965. * @upstream_bw and @downstream_bw.
  1966. *
  1967. * Returns %0 in case of success and negative errno if there was a
  1968. * failure.
  1969. */
  1970. int usb4_usb3_port_allocate_bandwidth(struct tb_port *port, int *upstream_bw,
  1971. int *downstream_bw)
  1972. {
  1973. int ret, consumed_up, consumed_down, allocate_up, allocate_down;
  1974. ret = usb4_usb3_port_set_cm_request(port);
  1975. if (ret)
  1976. return ret;
  1977. ret = usb4_usb3_port_read_consumed_bandwidth(port, &consumed_up,
  1978. &consumed_down);
  1979. if (ret)
  1980. goto err_request;
  1981. /* Don't allow it go lower than what is consumed */
  1982. allocate_up = max(*upstream_bw, consumed_up);
  1983. allocate_down = max(*downstream_bw, consumed_down);
  1984. ret = usb4_usb3_port_write_allocated_bandwidth(port, allocate_up,
  1985. allocate_down);
  1986. if (ret)
  1987. goto err_request;
  1988. *upstream_bw = allocate_up;
  1989. *downstream_bw = allocate_down;
  1990. err_request:
  1991. usb4_usb3_port_clear_cm_request(port);
  1992. return ret;
  1993. }
  1994. /**
  1995. * usb4_usb3_port_release_bandwidth() - Release allocated USB3 bandwidth
  1996. * @port: USB3 adapter port
  1997. * @upstream_bw: New allocated upstream bandwidth
  1998. * @downstream_bw: New allocated downstream bandwidth
  1999. *
  2000. * Releases USB3 allocated bandwidth down to what is actually consumed.
  2001. * The new bandwidth is returned in @upstream_bw and @downstream_bw.
  2002. *
  2003. * Returns 0% in success and negative errno in case of failure.
  2004. */
  2005. int usb4_usb3_port_release_bandwidth(struct tb_port *port, int *upstream_bw,
  2006. int *downstream_bw)
  2007. {
  2008. int ret, consumed_up, consumed_down;
  2009. ret = usb4_usb3_port_set_cm_request(port);
  2010. if (ret)
  2011. return ret;
  2012. ret = usb4_usb3_port_read_consumed_bandwidth(port, &consumed_up,
  2013. &consumed_down);
  2014. if (ret)
  2015. goto err_request;
  2016. /*
  2017. * Always keep 900 Mb/s to make sure xHCI has at least some
  2018. * bandwidth available for isochronous traffic.
  2019. */
  2020. if (consumed_up < 900)
  2021. consumed_up = 900;
  2022. if (consumed_down < 900)
  2023. consumed_down = 900;
  2024. ret = usb4_usb3_port_write_allocated_bandwidth(port, consumed_up,
  2025. consumed_down);
  2026. if (ret)
  2027. goto err_request;
  2028. *upstream_bw = consumed_up;
  2029. *downstream_bw = consumed_down;
  2030. err_request:
  2031. usb4_usb3_port_clear_cm_request(port);
  2032. return ret;
  2033. }
  2034. static bool is_usb4_dpin(const struct tb_port *port)
  2035. {
  2036. if (!tb_port_is_dpin(port))
  2037. return false;
  2038. if (!tb_switch_is_usb4(port->sw))
  2039. return false;
  2040. return true;
  2041. }
  2042. /**
  2043. * usb4_dp_port_set_cm_id() - Assign CM ID to the DP IN adapter
  2044. * @port: DP IN adapter
  2045. * @cm_id: CM ID to assign
  2046. *
  2047. * Sets CM ID for the @port. Returns %0 on success and negative errno
  2048. * otherwise. Speficially returns %-EOPNOTSUPP if the @port does not
  2049. * support this.
  2050. */
  2051. int usb4_dp_port_set_cm_id(struct tb_port *port, int cm_id)
  2052. {
  2053. u32 val;
  2054. int ret;
  2055. if (!is_usb4_dpin(port))
  2056. return -EOPNOTSUPP;
  2057. ret = tb_port_read(port, &val, TB_CFG_PORT,
  2058. port->cap_adap + ADP_DP_CS_2, 1);
  2059. if (ret)
  2060. return ret;
  2061. val &= ~ADP_DP_CS_2_CM_ID_MASK;
  2062. val |= cm_id << ADP_DP_CS_2_CM_ID_SHIFT;
  2063. return tb_port_write(port, &val, TB_CFG_PORT,
  2064. port->cap_adap + ADP_DP_CS_2, 1);
  2065. }
  2066. /**
  2067. * usb4_dp_port_bandwidth_mode_supported() - Is the bandwidth allocation mode
  2068. * supported
  2069. * @port: DP IN adapter to check
  2070. *
  2071. * Can be called to any DP IN adapter. Returns true if the adapter
  2072. * supports USB4 bandwidth allocation mode, false otherwise.
  2073. */
  2074. bool usb4_dp_port_bandwidth_mode_supported(struct tb_port *port)
  2075. {
  2076. int ret;
  2077. u32 val;
  2078. if (!is_usb4_dpin(port))
  2079. return false;
  2080. ret = tb_port_read(port, &val, TB_CFG_PORT,
  2081. port->cap_adap + DP_LOCAL_CAP, 1);
  2082. if (ret)
  2083. return false;
  2084. return !!(val & DP_COMMON_CAP_BW_MODE);
  2085. }
  2086. /**
  2087. * usb4_dp_port_bandwidth_mode_enabled() - Is the bandwidth allocation mode
  2088. * enabled
  2089. * @port: DP IN adapter to check
  2090. *
  2091. * Can be called to any DP IN adapter. Returns true if the bandwidth
  2092. * allocation mode has been enabled, false otherwise.
  2093. */
  2094. bool usb4_dp_port_bandwidth_mode_enabled(struct tb_port *port)
  2095. {
  2096. int ret;
  2097. u32 val;
  2098. if (!is_usb4_dpin(port))
  2099. return false;
  2100. ret = tb_port_read(port, &val, TB_CFG_PORT,
  2101. port->cap_adap + ADP_DP_CS_8, 1);
  2102. if (ret)
  2103. return false;
  2104. return !!(val & ADP_DP_CS_8_DPME);
  2105. }
  2106. /**
  2107. * usb4_dp_port_set_cm_bandwidth_mode_supported() - Set/clear CM support for
  2108. * bandwidth allocation mode
  2109. * @port: DP IN adapter
  2110. * @supported: Does the CM support bandwidth allocation mode
  2111. *
  2112. * Can be called to any DP IN adapter. Sets or clears the CM support bit
  2113. * of the DP IN adapter. Returns %0 in success and negative errno
  2114. * otherwise. Specifically returns %-OPNOTSUPP if the passed in adapter
  2115. * does not support this.
  2116. */
  2117. int usb4_dp_port_set_cm_bandwidth_mode_supported(struct tb_port *port,
  2118. bool supported)
  2119. {
  2120. u32 val;
  2121. int ret;
  2122. if (!is_usb4_dpin(port))
  2123. return -EOPNOTSUPP;
  2124. ret = tb_port_read(port, &val, TB_CFG_PORT,
  2125. port->cap_adap + ADP_DP_CS_2, 1);
  2126. if (ret)
  2127. return ret;
  2128. if (supported)
  2129. val |= ADP_DP_CS_2_CMMS;
  2130. else
  2131. val &= ~ADP_DP_CS_2_CMMS;
  2132. return tb_port_write(port, &val, TB_CFG_PORT,
  2133. port->cap_adap + ADP_DP_CS_2, 1);
  2134. }
  2135. /**
  2136. * usb4_dp_port_group_id() - Return Group ID assigned for the adapter
  2137. * @port: DP IN adapter
  2138. *
  2139. * Reads bandwidth allocation Group ID from the DP IN adapter and
  2140. * returns it. If the adapter does not support setting Group_ID
  2141. * %-EOPNOTSUPP is returned.
  2142. */
  2143. int usb4_dp_port_group_id(struct tb_port *port)
  2144. {
  2145. u32 val;
  2146. int ret;
  2147. if (!is_usb4_dpin(port))
  2148. return -EOPNOTSUPP;
  2149. ret = tb_port_read(port, &val, TB_CFG_PORT,
  2150. port->cap_adap + ADP_DP_CS_2, 1);
  2151. if (ret)
  2152. return ret;
  2153. return (val & ADP_DP_CS_2_GROUP_ID_MASK) >> ADP_DP_CS_2_GROUP_ID_SHIFT;
  2154. }
  2155. /**
  2156. * usb4_dp_port_set_group_id() - Set adapter Group ID
  2157. * @port: DP IN adapter
  2158. * @group_id: Group ID for the adapter
  2159. *
  2160. * Sets bandwidth allocation mode Group ID for the DP IN adapter.
  2161. * Returns %0 in case of success and negative errno otherwise.
  2162. * Specifically returns %-EOPNOTSUPP if the adapter does not support
  2163. * this.
  2164. */
  2165. int usb4_dp_port_set_group_id(struct tb_port *port, int group_id)
  2166. {
  2167. u32 val;
  2168. int ret;
  2169. if (!is_usb4_dpin(port))
  2170. return -EOPNOTSUPP;
  2171. ret = tb_port_read(port, &val, TB_CFG_PORT,
  2172. port->cap_adap + ADP_DP_CS_2, 1);
  2173. if (ret)
  2174. return ret;
  2175. val &= ~ADP_DP_CS_2_GROUP_ID_MASK;
  2176. val |= group_id << ADP_DP_CS_2_GROUP_ID_SHIFT;
  2177. return tb_port_write(port, &val, TB_CFG_PORT,
  2178. port->cap_adap + ADP_DP_CS_2, 1);
  2179. }
  2180. /**
  2181. * usb4_dp_port_nrd() - Read non-reduced rate and lanes
  2182. * @port: DP IN adapter
  2183. * @rate: Non-reduced rate in Mb/s is placed here
  2184. * @lanes: Non-reduced lanes are placed here
  2185. *
  2186. * Reads the non-reduced rate and lanes from the DP IN adapter. Returns
  2187. * %0 in success and negative errno otherwise. Specifically returns
  2188. * %-EOPNOTSUPP if the adapter does not support this.
  2189. */
  2190. int usb4_dp_port_nrd(struct tb_port *port, int *rate, int *lanes)
  2191. {
  2192. u32 val, tmp;
  2193. int ret;
  2194. if (!is_usb4_dpin(port))
  2195. return -EOPNOTSUPP;
  2196. ret = tb_port_read(port, &val, TB_CFG_PORT,
  2197. port->cap_adap + ADP_DP_CS_2, 1);
  2198. if (ret)
  2199. return ret;
  2200. tmp = (val & ADP_DP_CS_2_NRD_MLR_MASK) >> ADP_DP_CS_2_NRD_MLR_SHIFT;
  2201. switch (tmp) {
  2202. case DP_COMMON_CAP_RATE_RBR:
  2203. *rate = 1620;
  2204. break;
  2205. case DP_COMMON_CAP_RATE_HBR:
  2206. *rate = 2700;
  2207. break;
  2208. case DP_COMMON_CAP_RATE_HBR2:
  2209. *rate = 5400;
  2210. break;
  2211. case DP_COMMON_CAP_RATE_HBR3:
  2212. *rate = 8100;
  2213. break;
  2214. }
  2215. tmp = val & ADP_DP_CS_2_NRD_MLC_MASK;
  2216. switch (tmp) {
  2217. case DP_COMMON_CAP_1_LANE:
  2218. *lanes = 1;
  2219. break;
  2220. case DP_COMMON_CAP_2_LANES:
  2221. *lanes = 2;
  2222. break;
  2223. case DP_COMMON_CAP_4_LANES:
  2224. *lanes = 4;
  2225. break;
  2226. }
  2227. return 0;
  2228. }
  2229. /**
  2230. * usb4_dp_port_set_nrd() - Set non-reduced rate and lanes
  2231. * @port: DP IN adapter
  2232. * @rate: Non-reduced rate in Mb/s
  2233. * @lanes: Non-reduced lanes
  2234. *
  2235. * Before the capabilities reduction this function can be used to set
  2236. * the non-reduced values for the DP IN adapter. Returns %0 in success
  2237. * and negative errno otherwise. If the adapter does not support this
  2238. * %-EOPNOTSUPP is returned.
  2239. */
  2240. int usb4_dp_port_set_nrd(struct tb_port *port, int rate, int lanes)
  2241. {
  2242. u32 val;
  2243. int ret;
  2244. if (!is_usb4_dpin(port))
  2245. return -EOPNOTSUPP;
  2246. ret = tb_port_read(port, &val, TB_CFG_PORT,
  2247. port->cap_adap + ADP_DP_CS_2, 1);
  2248. if (ret)
  2249. return ret;
  2250. val &= ~ADP_DP_CS_2_NRD_MLR_MASK;
  2251. switch (rate) {
  2252. case 1620:
  2253. break;
  2254. case 2700:
  2255. val |= (DP_COMMON_CAP_RATE_HBR << ADP_DP_CS_2_NRD_MLR_SHIFT)
  2256. & ADP_DP_CS_2_NRD_MLR_MASK;
  2257. break;
  2258. case 5400:
  2259. val |= (DP_COMMON_CAP_RATE_HBR2 << ADP_DP_CS_2_NRD_MLR_SHIFT)
  2260. & ADP_DP_CS_2_NRD_MLR_MASK;
  2261. break;
  2262. case 8100:
  2263. val |= (DP_COMMON_CAP_RATE_HBR3 << ADP_DP_CS_2_NRD_MLR_SHIFT)
  2264. & ADP_DP_CS_2_NRD_MLR_MASK;
  2265. break;
  2266. default:
  2267. return -EINVAL;
  2268. }
  2269. val &= ~ADP_DP_CS_2_NRD_MLC_MASK;
  2270. switch (lanes) {
  2271. case 1:
  2272. break;
  2273. case 2:
  2274. val |= DP_COMMON_CAP_2_LANES;
  2275. break;
  2276. case 4:
  2277. val |= DP_COMMON_CAP_4_LANES;
  2278. break;
  2279. default:
  2280. return -EINVAL;
  2281. }
  2282. return tb_port_write(port, &val, TB_CFG_PORT,
  2283. port->cap_adap + ADP_DP_CS_2, 1);
  2284. }
  2285. /**
  2286. * usb4_dp_port_granularity() - Return granularity for the bandwidth values
  2287. * @port: DP IN adapter
  2288. *
  2289. * Reads the programmed granularity from @port. If the DP IN adapter does
  2290. * not support bandwidth allocation mode returns %-EOPNOTSUPP and negative
  2291. * errno in other error cases.
  2292. */
  2293. int usb4_dp_port_granularity(struct tb_port *port)
  2294. {
  2295. u32 val;
  2296. int ret;
  2297. if (!is_usb4_dpin(port))
  2298. return -EOPNOTSUPP;
  2299. ret = tb_port_read(port, &val, TB_CFG_PORT,
  2300. port->cap_adap + ADP_DP_CS_2, 1);
  2301. if (ret)
  2302. return ret;
  2303. val &= ADP_DP_CS_2_GR_MASK;
  2304. val >>= ADP_DP_CS_2_GR_SHIFT;
  2305. switch (val) {
  2306. case ADP_DP_CS_2_GR_0_25G:
  2307. return 250;
  2308. case ADP_DP_CS_2_GR_0_5G:
  2309. return 500;
  2310. case ADP_DP_CS_2_GR_1G:
  2311. return 1000;
  2312. }
  2313. return -EINVAL;
  2314. }
  2315. /**
  2316. * usb4_dp_port_set_granularity() - Set granularity for the bandwidth values
  2317. * @port: DP IN adapter
  2318. * @granularity: Granularity in Mb/s. Supported values: 1000, 500 and 250.
  2319. *
  2320. * Sets the granularity used with the estimated, allocated and requested
  2321. * bandwidth. Returns %0 in success and negative errno otherwise. If the
  2322. * adapter does not support this %-EOPNOTSUPP is returned.
  2323. */
  2324. int usb4_dp_port_set_granularity(struct tb_port *port, int granularity)
  2325. {
  2326. u32 val;
  2327. int ret;
  2328. if (!is_usb4_dpin(port))
  2329. return -EOPNOTSUPP;
  2330. ret = tb_port_read(port, &val, TB_CFG_PORT,
  2331. port->cap_adap + ADP_DP_CS_2, 1);
  2332. if (ret)
  2333. return ret;
  2334. val &= ~ADP_DP_CS_2_GR_MASK;
  2335. switch (granularity) {
  2336. case 250:
  2337. val |= ADP_DP_CS_2_GR_0_25G << ADP_DP_CS_2_GR_SHIFT;
  2338. break;
  2339. case 500:
  2340. val |= ADP_DP_CS_2_GR_0_5G << ADP_DP_CS_2_GR_SHIFT;
  2341. break;
  2342. case 1000:
  2343. val |= ADP_DP_CS_2_GR_1G << ADP_DP_CS_2_GR_SHIFT;
  2344. break;
  2345. default:
  2346. return -EINVAL;
  2347. }
  2348. return tb_port_write(port, &val, TB_CFG_PORT,
  2349. port->cap_adap + ADP_DP_CS_2, 1);
  2350. }
  2351. /**
  2352. * usb4_dp_port_set_estimated_bandwidth() - Set estimated bandwidth
  2353. * @port: DP IN adapter
  2354. * @bw: Estimated bandwidth in Mb/s.
  2355. *
  2356. * Sets the estimated bandwidth to @bw. Set the granularity by calling
  2357. * usb4_dp_port_set_granularity() before calling this. The @bw is round
  2358. * down to the closest granularity multiplier. Returns %0 in success
  2359. * and negative errno otherwise. Specifically returns %-EOPNOTSUPP if
  2360. * the adapter does not support this.
  2361. */
  2362. int usb4_dp_port_set_estimated_bandwidth(struct tb_port *port, int bw)
  2363. {
  2364. u32 val, granularity;
  2365. int ret;
  2366. if (!is_usb4_dpin(port))
  2367. return -EOPNOTSUPP;
  2368. ret = usb4_dp_port_granularity(port);
  2369. if (ret < 0)
  2370. return ret;
  2371. granularity = ret;
  2372. ret = tb_port_read(port, &val, TB_CFG_PORT,
  2373. port->cap_adap + ADP_DP_CS_2, 1);
  2374. if (ret)
  2375. return ret;
  2376. val &= ~ADP_DP_CS_2_ESTIMATED_BW_MASK;
  2377. val |= (bw / granularity) << ADP_DP_CS_2_ESTIMATED_BW_SHIFT;
  2378. return tb_port_write(port, &val, TB_CFG_PORT,
  2379. port->cap_adap + ADP_DP_CS_2, 1);
  2380. }
  2381. /**
  2382. * usb4_dp_port_allocated_bandwidth() - Return allocated bandwidth
  2383. * @port: DP IN adapter
  2384. *
  2385. * Reads and returns allocated bandwidth for @port in Mb/s (taking into
  2386. * account the programmed granularity). Returns negative errno in case
  2387. * of error.
  2388. */
  2389. int usb4_dp_port_allocated_bandwidth(struct tb_port *port)
  2390. {
  2391. u32 val, granularity;
  2392. int ret;
  2393. if (!is_usb4_dpin(port))
  2394. return -EOPNOTSUPP;
  2395. ret = usb4_dp_port_granularity(port);
  2396. if (ret < 0)
  2397. return ret;
  2398. granularity = ret;
  2399. ret = tb_port_read(port, &val, TB_CFG_PORT,
  2400. port->cap_adap + DP_STATUS, 1);
  2401. if (ret)
  2402. return ret;
  2403. val &= DP_STATUS_ALLOCATED_BW_MASK;
  2404. val >>= DP_STATUS_ALLOCATED_BW_SHIFT;
  2405. return val * granularity;
  2406. }
  2407. static int __usb4_dp_port_set_cm_ack(struct tb_port *port, bool ack)
  2408. {
  2409. u32 val;
  2410. int ret;
  2411. ret = tb_port_read(port, &val, TB_CFG_PORT,
  2412. port->cap_adap + ADP_DP_CS_2, 1);
  2413. if (ret)
  2414. return ret;
  2415. if (ack)
  2416. val |= ADP_DP_CS_2_CA;
  2417. else
  2418. val &= ~ADP_DP_CS_2_CA;
  2419. return tb_port_write(port, &val, TB_CFG_PORT,
  2420. port->cap_adap + ADP_DP_CS_2, 1);
  2421. }
  2422. static inline int usb4_dp_port_set_cm_ack(struct tb_port *port)
  2423. {
  2424. return __usb4_dp_port_set_cm_ack(port, true);
  2425. }
  2426. static int usb4_dp_port_wait_and_clear_cm_ack(struct tb_port *port,
  2427. int timeout_msec)
  2428. {
  2429. ktime_t end;
  2430. u32 val;
  2431. int ret;
  2432. ret = __usb4_dp_port_set_cm_ack(port, false);
  2433. if (ret)
  2434. return ret;
  2435. end = ktime_add_ms(ktime_get(), timeout_msec);
  2436. do {
  2437. ret = tb_port_read(port, &val, TB_CFG_PORT,
  2438. port->cap_adap + ADP_DP_CS_8, 1);
  2439. if (ret)
  2440. return ret;
  2441. if (!(val & ADP_DP_CS_8_DR))
  2442. break;
  2443. usleep_range(50, 100);
  2444. } while (ktime_before(ktime_get(), end));
  2445. if (val & ADP_DP_CS_8_DR) {
  2446. tb_port_warn(port, "timeout waiting for DPTX request to clear\n");
  2447. return -ETIMEDOUT;
  2448. }
  2449. ret = tb_port_read(port, &val, TB_CFG_PORT,
  2450. port->cap_adap + ADP_DP_CS_2, 1);
  2451. if (ret)
  2452. return ret;
  2453. val &= ~ADP_DP_CS_2_CA;
  2454. return tb_port_write(port, &val, TB_CFG_PORT,
  2455. port->cap_adap + ADP_DP_CS_2, 1);
  2456. }
  2457. /**
  2458. * usb4_dp_port_allocate_bandwidth() - Set allocated bandwidth
  2459. * @port: DP IN adapter
  2460. * @bw: New allocated bandwidth in Mb/s
  2461. *
  2462. * Communicates the new allocated bandwidth with the DPCD (graphics
  2463. * driver). Takes into account the programmed granularity. Returns %0 in
  2464. * success and negative errno in case of error.
  2465. */
  2466. int usb4_dp_port_allocate_bandwidth(struct tb_port *port, int bw)
  2467. {
  2468. u32 val, granularity;
  2469. int ret;
  2470. if (!is_usb4_dpin(port))
  2471. return -EOPNOTSUPP;
  2472. ret = usb4_dp_port_granularity(port);
  2473. if (ret < 0)
  2474. return ret;
  2475. granularity = ret;
  2476. ret = tb_port_read(port, &val, TB_CFG_PORT,
  2477. port->cap_adap + DP_STATUS, 1);
  2478. if (ret)
  2479. return ret;
  2480. val &= ~DP_STATUS_ALLOCATED_BW_MASK;
  2481. val |= (bw / granularity) << DP_STATUS_ALLOCATED_BW_SHIFT;
  2482. ret = tb_port_write(port, &val, TB_CFG_PORT,
  2483. port->cap_adap + DP_STATUS, 1);
  2484. if (ret)
  2485. return ret;
  2486. ret = usb4_dp_port_set_cm_ack(port);
  2487. if (ret)
  2488. return ret;
  2489. return usb4_dp_port_wait_and_clear_cm_ack(port, 500);
  2490. }
  2491. /**
  2492. * usb4_dp_port_requested_bandwidth() - Read requested bandwidth
  2493. * @port: DP IN adapter
  2494. *
  2495. * Reads the DPCD (graphics driver) requested bandwidth and returns it
  2496. * in Mb/s. Takes the programmed granularity into account. In case of
  2497. * error returns negative errno. Specifically returns %-EOPNOTSUPP if
  2498. * the adapter does not support bandwidth allocation mode, and %ENODATA
  2499. * if there is no active bandwidth request from the graphics driver.
  2500. */
  2501. int usb4_dp_port_requested_bandwidth(struct tb_port *port)
  2502. {
  2503. u32 val, granularity;
  2504. int ret;
  2505. if (!is_usb4_dpin(port))
  2506. return -EOPNOTSUPP;
  2507. ret = usb4_dp_port_granularity(port);
  2508. if (ret < 0)
  2509. return ret;
  2510. granularity = ret;
  2511. ret = tb_port_read(port, &val, TB_CFG_PORT,
  2512. port->cap_adap + ADP_DP_CS_8, 1);
  2513. if (ret)
  2514. return ret;
  2515. if (!(val & ADP_DP_CS_8_DR))
  2516. return -ENODATA;
  2517. return (val & ADP_DP_CS_8_REQUESTED_BW_MASK) * granularity;
  2518. }
  2519. /**
  2520. * usb4_pci_port_set_ext_encapsulation() - Enable/disable extended encapsulation
  2521. * @port: PCIe adapter
  2522. * @enable: Enable/disable extended encapsulation
  2523. *
  2524. * Enables or disables extended encapsulation used in PCIe tunneling. Caller
  2525. * needs to make sure both adapters support this before enabling. Returns %0 on
  2526. * success and negative errno otherwise.
  2527. */
  2528. int usb4_pci_port_set_ext_encapsulation(struct tb_port *port, bool enable)
  2529. {
  2530. u32 val;
  2531. int ret;
  2532. if (!tb_port_is_pcie_up(port) && !tb_port_is_pcie_down(port))
  2533. return -EINVAL;
  2534. ret = tb_port_read(port, &val, TB_CFG_PORT,
  2535. port->cap_adap + ADP_PCIE_CS_1, 1);
  2536. if (ret)
  2537. return ret;
  2538. if (enable)
  2539. val |= ADP_PCIE_CS_1_EE;
  2540. else
  2541. val &= ~ADP_PCIE_CS_1_EE;
  2542. return tb_port_write(port, &val, TB_CFG_PORT,
  2543. port->cap_adap + ADP_PCIE_CS_1, 1);
  2544. }