usb4.c 74 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * USB4 specific functionality
  4. *
  5. * Copyright (C) 2019, Intel Corporation
  6. * Authors: Mika Westerberg <mika.westerberg@linux.intel.com>
  7. * Rajmohan Mani <rajmohan.mani@intel.com>
  8. */
  9. #include <linux/delay.h>
  10. #include <linux/ktime.h>
  11. #include <linux/units.h>
  12. #include "sb_regs.h"
  13. #include "tb.h"
  14. #define USB4_DATA_RETRIES 3
  15. #define USB4_DATA_DWORDS 16
  16. #define USB4_NVM_READ_OFFSET_MASK GENMASK(23, 2)
  17. #define USB4_NVM_READ_OFFSET_SHIFT 2
  18. #define USB4_NVM_READ_LENGTH_MASK GENMASK(27, 24)
  19. #define USB4_NVM_READ_LENGTH_SHIFT 24
  20. #define USB4_NVM_SET_OFFSET_MASK USB4_NVM_READ_OFFSET_MASK
  21. #define USB4_NVM_SET_OFFSET_SHIFT USB4_NVM_READ_OFFSET_SHIFT
  22. #define USB4_DROM_ADDRESS_MASK GENMASK(14, 2)
  23. #define USB4_DROM_ADDRESS_SHIFT 2
  24. #define USB4_DROM_SIZE_MASK GENMASK(19, 15)
  25. #define USB4_DROM_SIZE_SHIFT 15
  26. #define USB4_NVM_SECTOR_SIZE_MASK GENMASK(23, 0)
  27. #define USB4_BA_LENGTH_MASK GENMASK(7, 0)
  28. #define USB4_BA_INDEX_MASK GENMASK(15, 0)
  29. enum usb4_ba_index {
  30. USB4_BA_MAX_USB3 = 0x1,
  31. USB4_BA_MIN_DP_AUX = 0x2,
  32. USB4_BA_MIN_DP_MAIN = 0x3,
  33. USB4_BA_MAX_PCIE = 0x4,
  34. USB4_BA_MAX_HI = 0x5,
  35. };
  36. #define USB4_BA_VALUE_MASK GENMASK(31, 16)
  37. #define USB4_BA_VALUE_SHIFT 16
  38. /* Delays in us used with usb4_port_wait_for_bit() */
  39. #define USB4_PORT_DELAY 50
  40. #define USB4_PORT_SB_DELAY 1000
  41. static int usb4_native_switch_op(struct tb_switch *sw, u16 opcode,
  42. u32 *metadata, u8 *status,
  43. const void *tx_data, size_t tx_dwords,
  44. void *rx_data, size_t rx_dwords)
  45. {
  46. u32 val;
  47. int ret;
  48. if (metadata) {
  49. ret = tb_sw_write(sw, metadata, TB_CFG_SWITCH, ROUTER_CS_25, 1);
  50. if (ret)
  51. return ret;
  52. }
  53. if (tx_dwords) {
  54. ret = tb_sw_write(sw, tx_data, TB_CFG_SWITCH, ROUTER_CS_9,
  55. tx_dwords);
  56. if (ret)
  57. return ret;
  58. }
  59. val = opcode | ROUTER_CS_26_OV;
  60. ret = tb_sw_write(sw, &val, TB_CFG_SWITCH, ROUTER_CS_26, 1);
  61. if (ret)
  62. return ret;
  63. ret = tb_switch_wait_for_bit(sw, ROUTER_CS_26, ROUTER_CS_26_OV, 0, 500);
  64. if (ret)
  65. return ret;
  66. ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_26, 1);
  67. if (ret)
  68. return ret;
  69. if (val & ROUTER_CS_26_ONS)
  70. return -EOPNOTSUPP;
  71. if (status)
  72. *status = (val & ROUTER_CS_26_STATUS_MASK) >>
  73. ROUTER_CS_26_STATUS_SHIFT;
  74. if (metadata) {
  75. ret = tb_sw_read(sw, metadata, TB_CFG_SWITCH, ROUTER_CS_25, 1);
  76. if (ret)
  77. return ret;
  78. }
  79. if (rx_dwords) {
  80. ret = tb_sw_read(sw, rx_data, TB_CFG_SWITCH, ROUTER_CS_9,
  81. rx_dwords);
  82. if (ret)
  83. return ret;
  84. }
  85. return 0;
  86. }
  87. static int __usb4_switch_op(struct tb_switch *sw, u16 opcode, u32 *metadata,
  88. u8 *status, const void *tx_data, size_t tx_dwords,
  89. void *rx_data, size_t rx_dwords)
  90. {
  91. const struct tb_cm_ops *cm_ops = sw->tb->cm_ops;
  92. if (tx_dwords > USB4_DATA_DWORDS || rx_dwords > USB4_DATA_DWORDS)
  93. return -EINVAL;
  94. /*
  95. * If the connection manager implementation provides USB4 router
  96. * operation proxy callback, call it here instead of running the
  97. * operation natively.
  98. */
  99. if (cm_ops->usb4_switch_op) {
  100. int ret;
  101. ret = cm_ops->usb4_switch_op(sw, opcode, metadata, status,
  102. tx_data, tx_dwords, rx_data,
  103. rx_dwords);
  104. if (ret != -EOPNOTSUPP)
  105. return ret;
  106. /*
  107. * If the proxy was not supported then run the native
  108. * router operation instead.
  109. */
  110. }
  111. return usb4_native_switch_op(sw, opcode, metadata, status, tx_data,
  112. tx_dwords, rx_data, rx_dwords);
  113. }
  114. static inline int usb4_switch_op(struct tb_switch *sw, u16 opcode,
  115. u32 *metadata, u8 *status)
  116. {
  117. return __usb4_switch_op(sw, opcode, metadata, status, NULL, 0, NULL, 0);
  118. }
  119. static inline int usb4_switch_op_data(struct tb_switch *sw, u16 opcode,
  120. u32 *metadata, u8 *status,
  121. const void *tx_data, size_t tx_dwords,
  122. void *rx_data, size_t rx_dwords)
  123. {
  124. return __usb4_switch_op(sw, opcode, metadata, status, tx_data,
  125. tx_dwords, rx_data, rx_dwords);
  126. }
  127. /**
  128. * usb4_switch_check_wakes() - Check for wakes and notify PM core about them
  129. * @sw: Router whose wakes to check
  130. *
  131. * Checks wakes occurred during suspend and notify the PM core about them.
  132. */
  133. void usb4_switch_check_wakes(struct tb_switch *sw)
  134. {
  135. bool wakeup_usb4 = false;
  136. struct usb4_port *usb4;
  137. struct tb_port *port;
  138. bool wakeup = false;
  139. u32 val;
  140. if (tb_route(sw)) {
  141. if (tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_6, 1))
  142. return;
  143. tb_sw_dbg(sw, "PCIe wake: %s, USB3 wake: %s\n",
  144. (val & ROUTER_CS_6_WOPS) ? "yes" : "no",
  145. (val & ROUTER_CS_6_WOUS) ? "yes" : "no");
  146. wakeup = val & (ROUTER_CS_6_WOPS | ROUTER_CS_6_WOUS);
  147. }
  148. /*
  149. * Check for any downstream ports for USB4 wake,
  150. * connection wake and disconnection wake.
  151. */
  152. tb_switch_for_each_port(sw, port) {
  153. if (!port->cap_usb4)
  154. continue;
  155. if (tb_port_read(port, &val, TB_CFG_PORT,
  156. port->cap_usb4 + PORT_CS_18, 1))
  157. break;
  158. tb_port_dbg(port, "USB4 wake: %s, connection wake: %s, disconnection wake: %s\n",
  159. (val & PORT_CS_18_WOU4S) ? "yes" : "no",
  160. (val & PORT_CS_18_WOCS) ? "yes" : "no",
  161. (val & PORT_CS_18_WODS) ? "yes" : "no");
  162. wakeup_usb4 = val & (PORT_CS_18_WOU4S | PORT_CS_18_WOCS |
  163. PORT_CS_18_WODS);
  164. usb4 = port->usb4;
  165. if (device_may_wakeup(&usb4->dev) && wakeup_usb4)
  166. pm_wakeup_event(&usb4->dev, 0);
  167. wakeup |= wakeup_usb4;
  168. }
  169. if (wakeup)
  170. pm_wakeup_event(&sw->dev, 0);
  171. }
  172. static bool link_is_usb4(struct tb_port *port)
  173. {
  174. u32 val;
  175. if (!port->cap_usb4)
  176. return false;
  177. if (tb_port_read(port, &val, TB_CFG_PORT,
  178. port->cap_usb4 + PORT_CS_18, 1))
  179. return false;
  180. return !(val & PORT_CS_18_TCM);
  181. }
  182. /**
  183. * usb4_switch_setup() - Additional setup for USB4 device
  184. * @sw: USB4 router to setup
  185. *
  186. * USB4 routers need additional settings in order to enable all the
  187. * tunneling. This function enables USB and PCIe tunneling if it can be
  188. * enabled (e.g the parent switch also supports them). If USB tunneling
  189. * is not available for some reason (like that there is Thunderbolt 3
  190. * switch upstream) then the internal xHCI controller is enabled
  191. * instead.
  192. *
  193. * This does not set the configuration valid bit of the router. To do
  194. * that call usb4_switch_configuration_valid().
  195. */
  196. int usb4_switch_setup(struct tb_switch *sw)
  197. {
  198. struct tb_switch *parent = tb_switch_parent(sw);
  199. struct tb_port *down;
  200. bool tbt3, xhci;
  201. u32 val = 0;
  202. int ret;
  203. if (!tb_route(sw))
  204. return 0;
  205. ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_6, 1);
  206. if (ret)
  207. return ret;
  208. down = tb_switch_downstream_port(sw);
  209. sw->link_usb4 = link_is_usb4(down);
  210. tb_sw_dbg(sw, "link: %s\n", sw->link_usb4 ? "USB4" : "TBT");
  211. xhci = val & ROUTER_CS_6_HCI;
  212. tbt3 = !(val & ROUTER_CS_6_TNS);
  213. tb_sw_dbg(sw, "TBT3 support: %s, xHCI: %s\n",
  214. tbt3 ? "yes" : "no", xhci ? "yes" : "no");
  215. ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1);
  216. if (ret)
  217. return ret;
  218. if (tb_acpi_may_tunnel_usb3() && sw->link_usb4 &&
  219. tb_switch_find_port(parent, TB_TYPE_USB3_DOWN)) {
  220. val |= ROUTER_CS_5_UTO;
  221. xhci = false;
  222. }
  223. /*
  224. * Only enable PCIe tunneling if the parent router supports it
  225. * and it is not disabled.
  226. */
  227. if (tb_acpi_may_tunnel_pcie() &&
  228. tb_switch_find_port(parent, TB_TYPE_PCIE_DOWN)) {
  229. val |= ROUTER_CS_5_PTO;
  230. /*
  231. * xHCI can be enabled if PCIe tunneling is supported
  232. * and the parent does not have any USB3 dowstream
  233. * adapters (so we cannot do USB 3.x tunneling).
  234. */
  235. if (xhci)
  236. val |= ROUTER_CS_5_HCO;
  237. }
  238. /* TBT3 supported by the CM */
  239. val &= ~ROUTER_CS_5_CNS;
  240. return tb_sw_write(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1);
  241. }
  242. /**
  243. * usb4_switch_configuration_valid() - Set tunneling configuration to be valid
  244. * @sw: USB4 router
  245. *
  246. * Sets configuration valid bit for the router. Must be called before
  247. * any tunnels can be set through the router and after
  248. * usb4_switch_setup() has been called. Can be called to host and device
  249. * routers (does nothing for the latter).
  250. *
  251. * Returns %0 in success and negative errno otherwise.
  252. */
  253. int usb4_switch_configuration_valid(struct tb_switch *sw)
  254. {
  255. u32 val;
  256. int ret;
  257. if (!tb_route(sw))
  258. return 0;
  259. ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1);
  260. if (ret)
  261. return ret;
  262. val |= ROUTER_CS_5_CV;
  263. ret = tb_sw_write(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1);
  264. if (ret)
  265. return ret;
  266. return tb_switch_wait_for_bit(sw, ROUTER_CS_6, ROUTER_CS_6_CR,
  267. ROUTER_CS_6_CR, 50);
  268. }
  269. /**
  270. * usb4_switch_read_uid() - Read UID from USB4 router
  271. * @sw: USB4 router
  272. * @uid: UID is stored here
  273. *
  274. * Reads 64-bit UID from USB4 router config space.
  275. */
  276. int usb4_switch_read_uid(struct tb_switch *sw, u64 *uid)
  277. {
  278. return tb_sw_read(sw, uid, TB_CFG_SWITCH, ROUTER_CS_7, 2);
  279. }
  280. static int usb4_switch_drom_read_block(void *data,
  281. unsigned int dwaddress, void *buf,
  282. size_t dwords)
  283. {
  284. struct tb_switch *sw = data;
  285. u8 status = 0;
  286. u32 metadata;
  287. int ret;
  288. metadata = (dwords << USB4_DROM_SIZE_SHIFT) & USB4_DROM_SIZE_MASK;
  289. metadata |= (dwaddress << USB4_DROM_ADDRESS_SHIFT) &
  290. USB4_DROM_ADDRESS_MASK;
  291. ret = usb4_switch_op_data(sw, USB4_SWITCH_OP_DROM_READ, &metadata,
  292. &status, NULL, 0, buf, dwords);
  293. if (ret)
  294. return ret;
  295. return status ? -EIO : 0;
  296. }
  297. /**
  298. * usb4_switch_drom_read() - Read arbitrary bytes from USB4 router DROM
  299. * @sw: USB4 router
  300. * @address: Byte address inside DROM to start reading
  301. * @buf: Buffer where the DROM content is stored
  302. * @size: Number of bytes to read from DROM
  303. *
  304. * Uses USB4 router operations to read router DROM. For devices this
  305. * should always work but for hosts it may return %-EOPNOTSUPP in which
  306. * case the host router does not have DROM.
  307. */
  308. int usb4_switch_drom_read(struct tb_switch *sw, unsigned int address, void *buf,
  309. size_t size)
  310. {
  311. return tb_nvm_read_data(address, buf, size, USB4_DATA_RETRIES,
  312. usb4_switch_drom_read_block, sw);
  313. }
  314. /**
  315. * usb4_switch_lane_bonding_possible() - Are conditions met for lane bonding
  316. * @sw: USB4 router
  317. *
  318. * Checks whether conditions are met so that lane bonding can be
  319. * established with the upstream router. Call only for device routers.
  320. */
  321. bool usb4_switch_lane_bonding_possible(struct tb_switch *sw)
  322. {
  323. struct tb_port *up;
  324. int ret;
  325. u32 val;
  326. up = tb_upstream_port(sw);
  327. ret = tb_port_read(up, &val, TB_CFG_PORT, up->cap_usb4 + PORT_CS_18, 1);
  328. if (ret)
  329. return false;
  330. return !!(val & PORT_CS_18_BE);
  331. }
  332. /**
  333. * usb4_switch_set_wake() - Enabled/disable wake
  334. * @sw: USB4 router
  335. * @flags: Wakeup flags (%0 to disable)
  336. * @runtime: Wake is being programmed during system runtime
  337. *
  338. * Enables/disables router to wake up from sleep.
  339. */
  340. int usb4_switch_set_wake(struct tb_switch *sw, unsigned int flags, bool runtime)
  341. {
  342. struct tb_port *port;
  343. u64 route = tb_route(sw);
  344. u32 val;
  345. int ret;
  346. /*
  347. * Enable wakes coming from all USB4 downstream ports (from
  348. * child routers). For device routers do this also for the
  349. * upstream USB4 port.
  350. */
  351. tb_switch_for_each_port(sw, port) {
  352. if (!tb_port_is_null(port))
  353. continue;
  354. if (!route && tb_is_upstream_port(port))
  355. continue;
  356. if (!port->cap_usb4)
  357. continue;
  358. ret = tb_port_read(port, &val, TB_CFG_PORT,
  359. port->cap_usb4 + PORT_CS_19, 1);
  360. if (ret)
  361. return ret;
  362. val &= ~(PORT_CS_19_WOC | PORT_CS_19_WOD | PORT_CS_19_WOU4);
  363. if (tb_is_upstream_port(port)) {
  364. val |= PORT_CS_19_WOU4;
  365. } else {
  366. bool configured = val & PORT_CS_19_PC;
  367. bool wakeup = runtime || device_may_wakeup(&port->usb4->dev);
  368. if ((flags & TB_WAKE_ON_CONNECT) && wakeup && !configured)
  369. val |= PORT_CS_19_WOC;
  370. if ((flags & TB_WAKE_ON_DISCONNECT) && wakeup && configured)
  371. val |= PORT_CS_19_WOD;
  372. if ((flags & TB_WAKE_ON_USB4) && configured)
  373. val |= PORT_CS_19_WOU4;
  374. }
  375. ret = tb_port_write(port, &val, TB_CFG_PORT,
  376. port->cap_usb4 + PORT_CS_19, 1);
  377. if (ret)
  378. return ret;
  379. }
  380. /*
  381. * Enable wakes from PCIe, USB 3.x and DP on this router. Only
  382. * needed for device routers.
  383. */
  384. if (route) {
  385. ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1);
  386. if (ret)
  387. return ret;
  388. val &= ~(ROUTER_CS_5_WOP | ROUTER_CS_5_WOU | ROUTER_CS_5_WOD);
  389. if (flags & TB_WAKE_ON_USB3)
  390. val |= ROUTER_CS_5_WOU;
  391. if (flags & TB_WAKE_ON_PCIE)
  392. val |= ROUTER_CS_5_WOP;
  393. if (flags & TB_WAKE_ON_DP)
  394. val |= ROUTER_CS_5_WOD;
  395. ret = tb_sw_write(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1);
  396. if (ret)
  397. return ret;
  398. }
  399. return 0;
  400. }
  401. /**
  402. * usb4_switch_set_sleep() - Prepare the router to enter sleep
  403. * @sw: USB4 router
  404. *
  405. * Sets sleep bit for the router. Returns when the router sleep ready
  406. * bit has been asserted.
  407. */
  408. int usb4_switch_set_sleep(struct tb_switch *sw)
  409. {
  410. int ret;
  411. u32 val;
  412. /* Set sleep bit and wait for sleep ready to be asserted */
  413. ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1);
  414. if (ret)
  415. return ret;
  416. val |= ROUTER_CS_5_SLP;
  417. ret = tb_sw_write(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1);
  418. if (ret)
  419. return ret;
  420. return tb_switch_wait_for_bit(sw, ROUTER_CS_6, ROUTER_CS_6_SLPR,
  421. ROUTER_CS_6_SLPR, 500);
  422. }
  423. /**
  424. * usb4_switch_nvm_sector_size() - Return router NVM sector size
  425. * @sw: USB4 router
  426. *
  427. * If the router supports NVM operations this function returns the NVM
  428. * sector size in bytes. If NVM operations are not supported returns
  429. * %-EOPNOTSUPP.
  430. */
  431. int usb4_switch_nvm_sector_size(struct tb_switch *sw)
  432. {
  433. u32 metadata;
  434. u8 status;
  435. int ret;
  436. ret = usb4_switch_op(sw, USB4_SWITCH_OP_NVM_SECTOR_SIZE, &metadata,
  437. &status);
  438. if (ret)
  439. return ret;
  440. if (status)
  441. return status == 0x2 ? -EOPNOTSUPP : -EIO;
  442. return metadata & USB4_NVM_SECTOR_SIZE_MASK;
  443. }
  444. static int usb4_switch_nvm_read_block(void *data,
  445. unsigned int dwaddress, void *buf, size_t dwords)
  446. {
  447. struct tb_switch *sw = data;
  448. u8 status = 0;
  449. u32 metadata;
  450. int ret;
  451. metadata = (dwords << USB4_NVM_READ_LENGTH_SHIFT) &
  452. USB4_NVM_READ_LENGTH_MASK;
  453. metadata |= (dwaddress << USB4_NVM_READ_OFFSET_SHIFT) &
  454. USB4_NVM_READ_OFFSET_MASK;
  455. ret = usb4_switch_op_data(sw, USB4_SWITCH_OP_NVM_READ, &metadata,
  456. &status, NULL, 0, buf, dwords);
  457. if (ret)
  458. return ret;
  459. return status ? -EIO : 0;
  460. }
  461. /**
  462. * usb4_switch_nvm_read() - Read arbitrary bytes from router NVM
  463. * @sw: USB4 router
  464. * @address: Starting address in bytes
  465. * @buf: Read data is placed here
  466. * @size: How many bytes to read
  467. *
  468. * Reads NVM contents of the router. If NVM is not supported returns
  469. * %-EOPNOTSUPP.
  470. */
  471. int usb4_switch_nvm_read(struct tb_switch *sw, unsigned int address, void *buf,
  472. size_t size)
  473. {
  474. return tb_nvm_read_data(address, buf, size, USB4_DATA_RETRIES,
  475. usb4_switch_nvm_read_block, sw);
  476. }
  477. /**
  478. * usb4_switch_nvm_set_offset() - Set NVM write offset
  479. * @sw: USB4 router
  480. * @address: Start offset
  481. *
  482. * Explicitly sets NVM write offset. Normally when writing to NVM this
  483. * is done automatically by usb4_switch_nvm_write().
  484. *
  485. * Returns %0 in success and negative errno if there was a failure.
  486. */
  487. int usb4_switch_nvm_set_offset(struct tb_switch *sw, unsigned int address)
  488. {
  489. u32 metadata, dwaddress;
  490. u8 status = 0;
  491. int ret;
  492. dwaddress = address / 4;
  493. metadata = (dwaddress << USB4_NVM_SET_OFFSET_SHIFT) &
  494. USB4_NVM_SET_OFFSET_MASK;
  495. ret = usb4_switch_op(sw, USB4_SWITCH_OP_NVM_SET_OFFSET, &metadata,
  496. &status);
  497. if (ret)
  498. return ret;
  499. return status ? -EIO : 0;
  500. }
  501. static int usb4_switch_nvm_write_next_block(void *data, unsigned int dwaddress,
  502. const void *buf, size_t dwords)
  503. {
  504. struct tb_switch *sw = data;
  505. u8 status;
  506. int ret;
  507. ret = usb4_switch_op_data(sw, USB4_SWITCH_OP_NVM_WRITE, NULL, &status,
  508. buf, dwords, NULL, 0);
  509. if (ret)
  510. return ret;
  511. return status ? -EIO : 0;
  512. }
  513. /**
  514. * usb4_switch_nvm_write() - Write to the router NVM
  515. * @sw: USB4 router
  516. * @address: Start address where to write in bytes
  517. * @buf: Pointer to the data to write
  518. * @size: Size of @buf in bytes
  519. *
  520. * Writes @buf to the router NVM using USB4 router operations. If NVM
  521. * write is not supported returns %-EOPNOTSUPP.
  522. */
  523. int usb4_switch_nvm_write(struct tb_switch *sw, unsigned int address,
  524. const void *buf, size_t size)
  525. {
  526. int ret;
  527. ret = usb4_switch_nvm_set_offset(sw, address);
  528. if (ret)
  529. return ret;
  530. return tb_nvm_write_data(address, buf, size, USB4_DATA_RETRIES,
  531. usb4_switch_nvm_write_next_block, sw);
  532. }
  533. /**
  534. * usb4_switch_nvm_authenticate() - Authenticate new NVM
  535. * @sw: USB4 router
  536. *
  537. * After the new NVM has been written via usb4_switch_nvm_write(), this
  538. * function triggers NVM authentication process. The router gets power
  539. * cycled and if the authentication is successful the new NVM starts
  540. * running. In case of failure returns negative errno.
  541. *
  542. * The caller should call usb4_switch_nvm_authenticate_status() to read
  543. * the status of the authentication after power cycle. It should be the
  544. * first router operation to avoid the status being lost.
  545. */
  546. int usb4_switch_nvm_authenticate(struct tb_switch *sw)
  547. {
  548. int ret;
  549. ret = usb4_switch_op(sw, USB4_SWITCH_OP_NVM_AUTH, NULL, NULL);
  550. switch (ret) {
  551. /*
  552. * The router is power cycled once NVM_AUTH is started so it is
  553. * expected to get any of the following errors back.
  554. */
  555. case -EACCES:
  556. case -ENOTCONN:
  557. case -ETIMEDOUT:
  558. return 0;
  559. default:
  560. return ret;
  561. }
  562. }
  563. /**
  564. * usb4_switch_nvm_authenticate_status() - Read status of last NVM authenticate
  565. * @sw: USB4 router
  566. * @status: Status code of the operation
  567. *
  568. * The function checks if there is status available from the last NVM
  569. * authenticate router operation. If there is status then %0 is returned
  570. * and the status code is placed in @status. Returns negative errno in case
  571. * of failure.
  572. *
  573. * Must be called before any other router operation.
  574. */
  575. int usb4_switch_nvm_authenticate_status(struct tb_switch *sw, u32 *status)
  576. {
  577. const struct tb_cm_ops *cm_ops = sw->tb->cm_ops;
  578. u16 opcode;
  579. u32 val;
  580. int ret;
  581. if (cm_ops->usb4_switch_nvm_authenticate_status) {
  582. ret = cm_ops->usb4_switch_nvm_authenticate_status(sw, status);
  583. if (ret != -EOPNOTSUPP)
  584. return ret;
  585. }
  586. ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_26, 1);
  587. if (ret)
  588. return ret;
  589. /* Check that the opcode is correct */
  590. opcode = val & ROUTER_CS_26_OPCODE_MASK;
  591. if (opcode == USB4_SWITCH_OP_NVM_AUTH) {
  592. if (val & ROUTER_CS_26_OV)
  593. return -EBUSY;
  594. if (val & ROUTER_CS_26_ONS)
  595. return -EOPNOTSUPP;
  596. *status = (val & ROUTER_CS_26_STATUS_MASK) >>
  597. ROUTER_CS_26_STATUS_SHIFT;
  598. } else {
  599. *status = 0;
  600. }
  601. return 0;
  602. }
  603. /**
  604. * usb4_switch_credits_init() - Read buffer allocation parameters
  605. * @sw: USB4 router
  606. *
  607. * Reads @sw buffer allocation parameters and initializes @sw buffer
  608. * allocation fields accordingly. Specifically @sw->credits_allocation
  609. * is set to %true if these parameters can be used in tunneling.
  610. *
  611. * Returns %0 on success and negative errno otherwise.
  612. */
  613. int usb4_switch_credits_init(struct tb_switch *sw)
  614. {
  615. int max_usb3, min_dp_aux, min_dp_main, max_pcie, max_dma;
  616. int ret, length, i, nports;
  617. const struct tb_port *port;
  618. u32 data[USB4_DATA_DWORDS];
  619. u32 metadata = 0;
  620. u8 status = 0;
  621. memset(data, 0, sizeof(data));
  622. ret = usb4_switch_op_data(sw, USB4_SWITCH_OP_BUFFER_ALLOC, &metadata,
  623. &status, NULL, 0, data, ARRAY_SIZE(data));
  624. if (ret)
  625. return ret;
  626. if (status)
  627. return -EIO;
  628. length = metadata & USB4_BA_LENGTH_MASK;
  629. if (WARN_ON(length > ARRAY_SIZE(data)))
  630. return -EMSGSIZE;
  631. max_usb3 = -1;
  632. min_dp_aux = -1;
  633. min_dp_main = -1;
  634. max_pcie = -1;
  635. max_dma = -1;
  636. tb_sw_dbg(sw, "credit allocation parameters:\n");
  637. for (i = 0; i < length; i++) {
  638. u16 index, value;
  639. index = data[i] & USB4_BA_INDEX_MASK;
  640. value = (data[i] & USB4_BA_VALUE_MASK) >> USB4_BA_VALUE_SHIFT;
  641. switch (index) {
  642. case USB4_BA_MAX_USB3:
  643. tb_sw_dbg(sw, " USB3: %u\n", value);
  644. max_usb3 = value;
  645. break;
  646. case USB4_BA_MIN_DP_AUX:
  647. tb_sw_dbg(sw, " DP AUX: %u\n", value);
  648. min_dp_aux = value;
  649. break;
  650. case USB4_BA_MIN_DP_MAIN:
  651. tb_sw_dbg(sw, " DP main: %u\n", value);
  652. min_dp_main = value;
  653. break;
  654. case USB4_BA_MAX_PCIE:
  655. tb_sw_dbg(sw, " PCIe: %u\n", value);
  656. max_pcie = value;
  657. break;
  658. case USB4_BA_MAX_HI:
  659. tb_sw_dbg(sw, " DMA: %u\n", value);
  660. max_dma = value;
  661. break;
  662. default:
  663. tb_sw_dbg(sw, " unknown credit allocation index %#x, skipping\n",
  664. index);
  665. break;
  666. }
  667. }
  668. /*
  669. * Validate the buffer allocation preferences. If we find
  670. * issues, log a warning and fall back using the hard-coded
  671. * values.
  672. */
  673. /* Host router must report baMaxHI */
  674. if (!tb_route(sw) && max_dma < 0) {
  675. tb_sw_warn(sw, "host router is missing baMaxHI\n");
  676. goto err_invalid;
  677. }
  678. nports = 0;
  679. tb_switch_for_each_port(sw, port) {
  680. if (tb_port_is_null(port))
  681. nports++;
  682. }
  683. /* Must have DP buffer allocation (multiple USB4 ports) */
  684. if (nports > 2 && (min_dp_aux < 0 || min_dp_main < 0)) {
  685. tb_sw_warn(sw, "multiple USB4 ports require baMinDPaux/baMinDPmain\n");
  686. goto err_invalid;
  687. }
  688. tb_switch_for_each_port(sw, port) {
  689. if (tb_port_is_dpout(port) && min_dp_main < 0) {
  690. tb_sw_warn(sw, "missing baMinDPmain");
  691. goto err_invalid;
  692. }
  693. if ((tb_port_is_dpin(port) || tb_port_is_dpout(port)) &&
  694. min_dp_aux < 0) {
  695. tb_sw_warn(sw, "missing baMinDPaux");
  696. goto err_invalid;
  697. }
  698. if ((tb_port_is_usb3_down(port) || tb_port_is_usb3_up(port)) &&
  699. max_usb3 < 0) {
  700. tb_sw_warn(sw, "missing baMaxUSB3");
  701. goto err_invalid;
  702. }
  703. if ((tb_port_is_pcie_down(port) || tb_port_is_pcie_up(port)) &&
  704. max_pcie < 0) {
  705. tb_sw_warn(sw, "missing baMaxPCIe");
  706. goto err_invalid;
  707. }
  708. }
  709. /*
  710. * Buffer allocation passed the validation so we can use it in
  711. * path creation.
  712. */
  713. sw->credit_allocation = true;
  714. if (max_usb3 > 0)
  715. sw->max_usb3_credits = max_usb3;
  716. if (min_dp_aux > 0)
  717. sw->min_dp_aux_credits = min_dp_aux;
  718. if (min_dp_main > 0)
  719. sw->min_dp_main_credits = min_dp_main;
  720. if (max_pcie > 0)
  721. sw->max_pcie_credits = max_pcie;
  722. if (max_dma > 0)
  723. sw->max_dma_credits = max_dma;
  724. return 0;
  725. err_invalid:
  726. return -EINVAL;
  727. }
  728. /**
  729. * usb4_switch_query_dp_resource() - Query availability of DP IN resource
  730. * @sw: USB4 router
  731. * @in: DP IN adapter
  732. *
  733. * For DP tunneling this function can be used to query availability of
  734. * DP IN resource. Returns true if the resource is available for DP
  735. * tunneling, false otherwise.
  736. */
  737. bool usb4_switch_query_dp_resource(struct tb_switch *sw, struct tb_port *in)
  738. {
  739. u32 metadata = in->port;
  740. u8 status;
  741. int ret;
  742. ret = usb4_switch_op(sw, USB4_SWITCH_OP_QUERY_DP_RESOURCE, &metadata,
  743. &status);
  744. /*
  745. * If DP resource allocation is not supported assume it is
  746. * always available.
  747. */
  748. if (ret == -EOPNOTSUPP)
  749. return true;
  750. if (ret)
  751. return false;
  752. return !status;
  753. }
  754. /**
  755. * usb4_switch_alloc_dp_resource() - Allocate DP IN resource
  756. * @sw: USB4 router
  757. * @in: DP IN adapter
  758. *
  759. * Allocates DP IN resource for DP tunneling using USB4 router
  760. * operations. If the resource was allocated returns %0. Otherwise
  761. * returns negative errno, in particular %-EBUSY if the resource is
  762. * already allocated.
  763. */
  764. int usb4_switch_alloc_dp_resource(struct tb_switch *sw, struct tb_port *in)
  765. {
  766. u32 metadata = in->port;
  767. u8 status;
  768. int ret;
  769. ret = usb4_switch_op(sw, USB4_SWITCH_OP_ALLOC_DP_RESOURCE, &metadata,
  770. &status);
  771. if (ret == -EOPNOTSUPP)
  772. return 0;
  773. if (ret)
  774. return ret;
  775. return status ? -EBUSY : 0;
  776. }
  777. /**
  778. * usb4_switch_dealloc_dp_resource() - Releases allocated DP IN resource
  779. * @sw: USB4 router
  780. * @in: DP IN adapter
  781. *
  782. * Releases the previously allocated DP IN resource.
  783. */
  784. int usb4_switch_dealloc_dp_resource(struct tb_switch *sw, struct tb_port *in)
  785. {
  786. u32 metadata = in->port;
  787. u8 status;
  788. int ret;
  789. ret = usb4_switch_op(sw, USB4_SWITCH_OP_DEALLOC_DP_RESOURCE, &metadata,
  790. &status);
  791. if (ret == -EOPNOTSUPP)
  792. return 0;
  793. if (ret)
  794. return ret;
  795. return status ? -EIO : 0;
  796. }
  797. static int usb4_port_idx(const struct tb_switch *sw, const struct tb_port *port)
  798. {
  799. struct tb_port *p;
  800. int usb4_idx = 0;
  801. /* Assume port is primary */
  802. tb_switch_for_each_port(sw, p) {
  803. if (!tb_port_is_null(p))
  804. continue;
  805. if (tb_is_upstream_port(p))
  806. continue;
  807. if (!p->link_nr) {
  808. if (p == port)
  809. break;
  810. usb4_idx++;
  811. }
  812. }
  813. return usb4_idx;
  814. }
  815. /**
  816. * usb4_switch_map_pcie_down() - Map USB4 port to a PCIe downstream adapter
  817. * @sw: USB4 router
  818. * @port: USB4 port
  819. *
  820. * USB4 routers have direct mapping between USB4 ports and PCIe
  821. * downstream adapters where the PCIe topology is extended. This
  822. * function returns the corresponding downstream PCIe adapter or %NULL
  823. * if no such mapping was possible.
  824. */
  825. struct tb_port *usb4_switch_map_pcie_down(struct tb_switch *sw,
  826. const struct tb_port *port)
  827. {
  828. int usb4_idx = usb4_port_idx(sw, port);
  829. struct tb_port *p;
  830. int pcie_idx = 0;
  831. /* Find PCIe down port matching usb4_port */
  832. tb_switch_for_each_port(sw, p) {
  833. if (!tb_port_is_pcie_down(p))
  834. continue;
  835. if (pcie_idx == usb4_idx)
  836. return p;
  837. pcie_idx++;
  838. }
  839. return NULL;
  840. }
  841. /**
  842. * usb4_switch_map_usb3_down() - Map USB4 port to a USB3 downstream adapter
  843. * @sw: USB4 router
  844. * @port: USB4 port
  845. *
  846. * USB4 routers have direct mapping between USB4 ports and USB 3.x
  847. * downstream adapters where the USB 3.x topology is extended. This
  848. * function returns the corresponding downstream USB 3.x adapter or
  849. * %NULL if no such mapping was possible.
  850. */
  851. struct tb_port *usb4_switch_map_usb3_down(struct tb_switch *sw,
  852. const struct tb_port *port)
  853. {
  854. int usb4_idx = usb4_port_idx(sw, port);
  855. struct tb_port *p;
  856. int usb_idx = 0;
  857. /* Find USB3 down port matching usb4_port */
  858. tb_switch_for_each_port(sw, p) {
  859. if (!tb_port_is_usb3_down(p))
  860. continue;
  861. if (usb_idx == usb4_idx)
  862. return p;
  863. usb_idx++;
  864. }
  865. return NULL;
  866. }
  867. /**
  868. * usb4_switch_add_ports() - Add USB4 ports for this router
  869. * @sw: USB4 router
  870. *
  871. * For USB4 router finds all USB4 ports and registers devices for each.
  872. * Can be called to any router.
  873. *
  874. * Return %0 in case of success and negative errno in case of failure.
  875. */
  876. int usb4_switch_add_ports(struct tb_switch *sw)
  877. {
  878. struct tb_port *port;
  879. if (tb_switch_is_icm(sw) || !tb_switch_is_usb4(sw))
  880. return 0;
  881. tb_switch_for_each_port(sw, port) {
  882. struct usb4_port *usb4;
  883. if (!tb_port_is_null(port))
  884. continue;
  885. if (!port->cap_usb4)
  886. continue;
  887. usb4 = usb4_port_device_add(port);
  888. if (IS_ERR(usb4)) {
  889. usb4_switch_remove_ports(sw);
  890. return PTR_ERR(usb4);
  891. }
  892. port->usb4 = usb4;
  893. }
  894. return 0;
  895. }
  896. /**
  897. * usb4_switch_remove_ports() - Removes USB4 ports from this router
  898. * @sw: USB4 router
  899. *
  900. * Unregisters previously registered USB4 ports.
  901. */
  902. void usb4_switch_remove_ports(struct tb_switch *sw)
  903. {
  904. struct tb_port *port;
  905. tb_switch_for_each_port(sw, port) {
  906. if (port->usb4) {
  907. usb4_port_device_remove(port->usb4);
  908. port->usb4 = NULL;
  909. }
  910. }
  911. }
  912. /**
  913. * usb4_port_unlock() - Unlock USB4 downstream port
  914. * @port: USB4 port to unlock
  915. *
  916. * Unlocks USB4 downstream port so that the connection manager can
  917. * access the router below this port.
  918. */
  919. int usb4_port_unlock(struct tb_port *port)
  920. {
  921. int ret;
  922. u32 val;
  923. ret = tb_port_read(port, &val, TB_CFG_PORT, ADP_CS_4, 1);
  924. if (ret)
  925. return ret;
  926. val &= ~ADP_CS_4_LCK;
  927. return tb_port_write(port, &val, TB_CFG_PORT, ADP_CS_4, 1);
  928. }
  929. /**
  930. * usb4_port_hotplug_enable() - Enables hotplug for a port
  931. * @port: USB4 port to operate on
  932. *
  933. * Enables hot plug events on a given port. This is only intended
  934. * to be used on lane, DP-IN, and DP-OUT adapters.
  935. */
  936. int usb4_port_hotplug_enable(struct tb_port *port)
  937. {
  938. int ret;
  939. u32 val;
  940. ret = tb_port_read(port, &val, TB_CFG_PORT, ADP_CS_5, 1);
  941. if (ret)
  942. return ret;
  943. val &= ~ADP_CS_5_DHP;
  944. return tb_port_write(port, &val, TB_CFG_PORT, ADP_CS_5, 1);
  945. }
  946. /**
  947. * usb4_port_reset() - Issue downstream port reset
  948. * @port: USB4 port to reset
  949. *
  950. * Issues downstream port reset to @port.
  951. */
  952. int usb4_port_reset(struct tb_port *port)
  953. {
  954. int ret;
  955. u32 val;
  956. if (!port->cap_usb4)
  957. return -EINVAL;
  958. ret = tb_port_read(port, &val, TB_CFG_PORT,
  959. port->cap_usb4 + PORT_CS_19, 1);
  960. if (ret)
  961. return ret;
  962. val |= PORT_CS_19_DPR;
  963. ret = tb_port_write(port, &val, TB_CFG_PORT,
  964. port->cap_usb4 + PORT_CS_19, 1);
  965. if (ret)
  966. return ret;
  967. fsleep(10000);
  968. ret = tb_port_read(port, &val, TB_CFG_PORT,
  969. port->cap_usb4 + PORT_CS_19, 1);
  970. if (ret)
  971. return ret;
  972. val &= ~PORT_CS_19_DPR;
  973. return tb_port_write(port, &val, TB_CFG_PORT,
  974. port->cap_usb4 + PORT_CS_19, 1);
  975. }
  976. static int usb4_port_set_configured(struct tb_port *port, bool configured)
  977. {
  978. int ret;
  979. u32 val;
  980. if (!port->cap_usb4)
  981. return -EINVAL;
  982. ret = tb_port_read(port, &val, TB_CFG_PORT,
  983. port->cap_usb4 + PORT_CS_19, 1);
  984. if (ret)
  985. return ret;
  986. if (configured)
  987. val |= PORT_CS_19_PC;
  988. else
  989. val &= ~PORT_CS_19_PC;
  990. return tb_port_write(port, &val, TB_CFG_PORT,
  991. port->cap_usb4 + PORT_CS_19, 1);
  992. }
  993. /**
  994. * usb4_port_configure() - Set USB4 port configured
  995. * @port: USB4 router
  996. *
  997. * Sets the USB4 link to be configured for power management purposes.
  998. */
  999. int usb4_port_configure(struct tb_port *port)
  1000. {
  1001. return usb4_port_set_configured(port, true);
  1002. }
  1003. /**
  1004. * usb4_port_unconfigure() - Set USB4 port unconfigured
  1005. * @port: USB4 router
  1006. *
  1007. * Sets the USB4 link to be unconfigured for power management purposes.
  1008. */
  1009. void usb4_port_unconfigure(struct tb_port *port)
  1010. {
  1011. usb4_port_set_configured(port, false);
  1012. }
  1013. static int usb4_set_xdomain_configured(struct tb_port *port, bool configured)
  1014. {
  1015. int ret;
  1016. u32 val;
  1017. if (!port->cap_usb4)
  1018. return -EINVAL;
  1019. ret = tb_port_read(port, &val, TB_CFG_PORT,
  1020. port->cap_usb4 + PORT_CS_19, 1);
  1021. if (ret)
  1022. return ret;
  1023. if (configured)
  1024. val |= PORT_CS_19_PID;
  1025. else
  1026. val &= ~PORT_CS_19_PID;
  1027. return tb_port_write(port, &val, TB_CFG_PORT,
  1028. port->cap_usb4 + PORT_CS_19, 1);
  1029. }
  1030. /**
  1031. * usb4_port_configure_xdomain() - Configure port for XDomain
  1032. * @port: USB4 port connected to another host
  1033. * @xd: XDomain that is connected to the port
  1034. *
  1035. * Marks the USB4 port as being connected to another host and updates
  1036. * the link type. Returns %0 in success and negative errno in failure.
  1037. */
  1038. int usb4_port_configure_xdomain(struct tb_port *port, struct tb_xdomain *xd)
  1039. {
  1040. xd->link_usb4 = link_is_usb4(port);
  1041. return usb4_set_xdomain_configured(port, true);
  1042. }
  1043. /**
  1044. * usb4_port_unconfigure_xdomain() - Unconfigure port for XDomain
  1045. * @port: USB4 port that was connected to another host
  1046. *
  1047. * Clears USB4 port from being marked as XDomain.
  1048. */
  1049. void usb4_port_unconfigure_xdomain(struct tb_port *port)
  1050. {
  1051. usb4_set_xdomain_configured(port, false);
  1052. }
  1053. static int usb4_port_wait_for_bit(struct tb_port *port, u32 offset, u32 bit,
  1054. u32 value, int timeout_msec, unsigned long delay_usec)
  1055. {
  1056. ktime_t timeout = ktime_add_ms(ktime_get(), timeout_msec);
  1057. do {
  1058. u32 val;
  1059. int ret;
  1060. ret = tb_port_read(port, &val, TB_CFG_PORT, offset, 1);
  1061. if (ret)
  1062. return ret;
  1063. if ((val & bit) == value)
  1064. return 0;
  1065. fsleep(delay_usec);
  1066. } while (ktime_before(ktime_get(), timeout));
  1067. return -ETIMEDOUT;
  1068. }
  1069. static int usb4_port_read_data(struct tb_port *port, void *data, size_t dwords)
  1070. {
  1071. if (dwords > USB4_DATA_DWORDS)
  1072. return -EINVAL;
  1073. return tb_port_read(port, data, TB_CFG_PORT, port->cap_usb4 + PORT_CS_2,
  1074. dwords);
  1075. }
  1076. static int usb4_port_write_data(struct tb_port *port, const void *data,
  1077. size_t dwords)
  1078. {
  1079. if (dwords > USB4_DATA_DWORDS)
  1080. return -EINVAL;
  1081. return tb_port_write(port, data, TB_CFG_PORT, port->cap_usb4 + PORT_CS_2,
  1082. dwords);
  1083. }
  1084. /**
  1085. * usb4_port_sb_read() - Read from sideband register
  1086. * @port: USB4 port to read
  1087. * @target: Sideband target
  1088. * @index: Retimer index if taget is %USB4_SB_TARGET_RETIMER
  1089. * @reg: Sideband register index
  1090. * @buf: Buffer where the sideband data is copied
  1091. * @size: Size of @buf
  1092. *
  1093. * Reads data from sideband register @reg and copies it into @buf.
  1094. * Returns %0 in case of success and negative errno in case of failure.
  1095. */
  1096. int usb4_port_sb_read(struct tb_port *port, enum usb4_sb_target target, u8 index,
  1097. u8 reg, void *buf, u8 size)
  1098. {
  1099. size_t dwords = DIV_ROUND_UP(size, 4);
  1100. int ret;
  1101. u32 val;
  1102. if (!port->cap_usb4)
  1103. return -EINVAL;
  1104. val = reg;
  1105. val |= size << PORT_CS_1_LENGTH_SHIFT;
  1106. val |= (target << PORT_CS_1_TARGET_SHIFT) & PORT_CS_1_TARGET_MASK;
  1107. if (target == USB4_SB_TARGET_RETIMER)
  1108. val |= (index << PORT_CS_1_RETIMER_INDEX_SHIFT);
  1109. val |= PORT_CS_1_PND;
  1110. ret = tb_port_write(port, &val, TB_CFG_PORT,
  1111. port->cap_usb4 + PORT_CS_1, 1);
  1112. if (ret)
  1113. return ret;
  1114. ret = usb4_port_wait_for_bit(port, port->cap_usb4 + PORT_CS_1,
  1115. PORT_CS_1_PND, 0, 500, USB4_PORT_SB_DELAY);
  1116. if (ret)
  1117. return ret;
  1118. ret = tb_port_read(port, &val, TB_CFG_PORT,
  1119. port->cap_usb4 + PORT_CS_1, 1);
  1120. if (ret)
  1121. return ret;
  1122. if (val & PORT_CS_1_NR)
  1123. return -ENODEV;
  1124. if (val & PORT_CS_1_RC)
  1125. return -EIO;
  1126. return buf ? usb4_port_read_data(port, buf, dwords) : 0;
  1127. }
  1128. /**
  1129. * usb4_port_sb_write() - Write to sideband register
  1130. * @port: USB4 port to write
  1131. * @target: Sideband target
  1132. * @index: Retimer index if taget is %USB4_SB_TARGET_RETIMER
  1133. * @reg: Sideband register index
  1134. * @buf: Data to write
  1135. * @size: Size of @buf
  1136. *
  1137. * Writes @buf to sideband register @reg. Returns %0 in case of success
  1138. * and negative errno in case of failure.
  1139. */
  1140. int usb4_port_sb_write(struct tb_port *port, enum usb4_sb_target target,
  1141. u8 index, u8 reg, const void *buf, u8 size)
  1142. {
  1143. size_t dwords = DIV_ROUND_UP(size, 4);
  1144. int ret;
  1145. u32 val;
  1146. if (!port->cap_usb4)
  1147. return -EINVAL;
  1148. if (buf) {
  1149. ret = usb4_port_write_data(port, buf, dwords);
  1150. if (ret)
  1151. return ret;
  1152. }
  1153. val = reg;
  1154. val |= size << PORT_CS_1_LENGTH_SHIFT;
  1155. val |= PORT_CS_1_WNR_WRITE;
  1156. val |= (target << PORT_CS_1_TARGET_SHIFT) & PORT_CS_1_TARGET_MASK;
  1157. if (target == USB4_SB_TARGET_RETIMER)
  1158. val |= (index << PORT_CS_1_RETIMER_INDEX_SHIFT);
  1159. val |= PORT_CS_1_PND;
  1160. ret = tb_port_write(port, &val, TB_CFG_PORT,
  1161. port->cap_usb4 + PORT_CS_1, 1);
  1162. if (ret)
  1163. return ret;
  1164. ret = usb4_port_wait_for_bit(port, port->cap_usb4 + PORT_CS_1,
  1165. PORT_CS_1_PND, 0, 500, USB4_PORT_SB_DELAY);
  1166. if (ret)
  1167. return ret;
  1168. ret = tb_port_read(port, &val, TB_CFG_PORT,
  1169. port->cap_usb4 + PORT_CS_1, 1);
  1170. if (ret)
  1171. return ret;
  1172. if (val & PORT_CS_1_NR)
  1173. return -ENODEV;
  1174. if (val & PORT_CS_1_RC)
  1175. return -EIO;
  1176. return 0;
  1177. }
  1178. static int usb4_port_sb_opcode_err_to_errno(u32 val)
  1179. {
  1180. switch (val) {
  1181. case 0:
  1182. return 0;
  1183. case USB4_SB_OPCODE_ERR:
  1184. return -EAGAIN;
  1185. case USB4_SB_OPCODE_ONS:
  1186. return -EOPNOTSUPP;
  1187. default:
  1188. return -EIO;
  1189. }
  1190. }
  1191. static int usb4_port_sb_op(struct tb_port *port, enum usb4_sb_target target,
  1192. u8 index, enum usb4_sb_opcode opcode, int timeout_msec)
  1193. {
  1194. ktime_t timeout;
  1195. u32 val;
  1196. int ret;
  1197. val = opcode;
  1198. ret = usb4_port_sb_write(port, target, index, USB4_SB_OPCODE, &val,
  1199. sizeof(val));
  1200. if (ret)
  1201. return ret;
  1202. timeout = ktime_add_ms(ktime_get(), timeout_msec);
  1203. do {
  1204. /* Check results */
  1205. ret = usb4_port_sb_read(port, target, index, USB4_SB_OPCODE,
  1206. &val, sizeof(val));
  1207. if (ret)
  1208. return ret;
  1209. if (val != opcode)
  1210. return usb4_port_sb_opcode_err_to_errno(val);
  1211. fsleep(USB4_PORT_SB_DELAY);
  1212. } while (ktime_before(ktime_get(), timeout));
  1213. return -ETIMEDOUT;
  1214. }
  1215. static int usb4_port_set_router_offline(struct tb_port *port, bool offline)
  1216. {
  1217. u32 val = !offline;
  1218. int ret;
  1219. ret = usb4_port_sb_write(port, USB4_SB_TARGET_ROUTER, 0,
  1220. USB4_SB_METADATA, &val, sizeof(val));
  1221. if (ret)
  1222. return ret;
  1223. val = USB4_SB_OPCODE_ROUTER_OFFLINE;
  1224. return usb4_port_sb_write(port, USB4_SB_TARGET_ROUTER, 0,
  1225. USB4_SB_OPCODE, &val, sizeof(val));
  1226. }
  1227. /**
  1228. * usb4_port_router_offline() - Put the USB4 port to offline mode
  1229. * @port: USB4 port
  1230. *
  1231. * This function puts the USB4 port into offline mode. In this mode the
  1232. * port does not react on hotplug events anymore. This needs to be
  1233. * called before retimer access is done when the USB4 links is not up.
  1234. *
  1235. * Returns %0 in case of success and negative errno if there was an
  1236. * error.
  1237. */
  1238. int usb4_port_router_offline(struct tb_port *port)
  1239. {
  1240. return usb4_port_set_router_offline(port, true);
  1241. }
  1242. /**
  1243. * usb4_port_router_online() - Put the USB4 port back to online
  1244. * @port: USB4 port
  1245. *
  1246. * Makes the USB4 port functional again.
  1247. */
  1248. int usb4_port_router_online(struct tb_port *port)
  1249. {
  1250. return usb4_port_set_router_offline(port, false);
  1251. }
  1252. /**
  1253. * usb4_port_enumerate_retimers() - Send RT broadcast transaction
  1254. * @port: USB4 port
  1255. *
  1256. * This forces the USB4 port to send broadcast RT transaction which
  1257. * makes the retimers on the link to assign index to themselves. Returns
  1258. * %0 in case of success and negative errno if there was an error.
  1259. */
  1260. int usb4_port_enumerate_retimers(struct tb_port *port)
  1261. {
  1262. u32 val;
  1263. val = USB4_SB_OPCODE_ENUMERATE_RETIMERS;
  1264. return usb4_port_sb_write(port, USB4_SB_TARGET_ROUTER, 0,
  1265. USB4_SB_OPCODE, &val, sizeof(val));
  1266. }
  1267. /**
  1268. * usb4_port_clx_supported() - Check if CLx is supported by the link
  1269. * @port: Port to check for CLx support for
  1270. *
  1271. * PORT_CS_18_CPS bit reflects if the link supports CLx including
  1272. * active cables (if connected on the link).
  1273. */
  1274. bool usb4_port_clx_supported(struct tb_port *port)
  1275. {
  1276. int ret;
  1277. u32 val;
  1278. ret = tb_port_read(port, &val, TB_CFG_PORT,
  1279. port->cap_usb4 + PORT_CS_18, 1);
  1280. if (ret)
  1281. return false;
  1282. return !!(val & PORT_CS_18_CPS);
  1283. }
  1284. /**
  1285. * usb4_port_asym_supported() - If the port supports asymmetric link
  1286. * @port: USB4 port
  1287. *
  1288. * Checks if the port and the cable supports asymmetric link and returns
  1289. * %true in that case.
  1290. */
  1291. bool usb4_port_asym_supported(struct tb_port *port)
  1292. {
  1293. u32 val;
  1294. if (!port->cap_usb4)
  1295. return false;
  1296. if (tb_port_read(port, &val, TB_CFG_PORT, port->cap_usb4 + PORT_CS_18, 1))
  1297. return false;
  1298. return !!(val & PORT_CS_18_CSA);
  1299. }
  1300. /**
  1301. * usb4_port_asym_set_link_width() - Set link width to asymmetric or symmetric
  1302. * @port: USB4 port
  1303. * @width: Asymmetric width to configure
  1304. *
  1305. * Sets USB4 port link width to @width. Can be called for widths where
  1306. * usb4_port_asym_width_supported() returned @true.
  1307. */
  1308. int usb4_port_asym_set_link_width(struct tb_port *port, enum tb_link_width width)
  1309. {
  1310. u32 val;
  1311. int ret;
  1312. if (!port->cap_phy)
  1313. return -EINVAL;
  1314. ret = tb_port_read(port, &val, TB_CFG_PORT,
  1315. port->cap_phy + LANE_ADP_CS_1, 1);
  1316. if (ret)
  1317. return ret;
  1318. val &= ~LANE_ADP_CS_1_TARGET_WIDTH_ASYM_MASK;
  1319. switch (width) {
  1320. case TB_LINK_WIDTH_DUAL:
  1321. val |= FIELD_PREP(LANE_ADP_CS_1_TARGET_WIDTH_ASYM_MASK,
  1322. LANE_ADP_CS_1_TARGET_WIDTH_ASYM_DUAL);
  1323. break;
  1324. case TB_LINK_WIDTH_ASYM_TX:
  1325. val |= FIELD_PREP(LANE_ADP_CS_1_TARGET_WIDTH_ASYM_MASK,
  1326. LANE_ADP_CS_1_TARGET_WIDTH_ASYM_TX);
  1327. break;
  1328. case TB_LINK_WIDTH_ASYM_RX:
  1329. val |= FIELD_PREP(LANE_ADP_CS_1_TARGET_WIDTH_ASYM_MASK,
  1330. LANE_ADP_CS_1_TARGET_WIDTH_ASYM_RX);
  1331. break;
  1332. default:
  1333. return -EINVAL;
  1334. }
  1335. return tb_port_write(port, &val, TB_CFG_PORT,
  1336. port->cap_phy + LANE_ADP_CS_1, 1);
  1337. }
  1338. /**
  1339. * usb4_port_asym_start() - Start symmetry change and wait for completion
  1340. * @port: USB4 port
  1341. *
  1342. * Start symmetry change of the link to asymmetric or symmetric
  1343. * (according to what was previously set in tb_port_set_link_width().
  1344. * Wait for completion of the change.
  1345. *
  1346. * Returns %0 in case of success, %-ETIMEDOUT if case of timeout or
  1347. * a negative errno in case of a failure.
  1348. */
  1349. int usb4_port_asym_start(struct tb_port *port)
  1350. {
  1351. int ret;
  1352. u32 val;
  1353. ret = tb_port_read(port, &val, TB_CFG_PORT,
  1354. port->cap_usb4 + PORT_CS_19, 1);
  1355. if (ret)
  1356. return ret;
  1357. val &= ~PORT_CS_19_START_ASYM;
  1358. val |= FIELD_PREP(PORT_CS_19_START_ASYM, 1);
  1359. ret = tb_port_write(port, &val, TB_CFG_PORT,
  1360. port->cap_usb4 + PORT_CS_19, 1);
  1361. if (ret)
  1362. return ret;
  1363. /*
  1364. * Wait for PORT_CS_19_START_ASYM to be 0. This means the USB4
  1365. * port started the symmetry transition.
  1366. */
  1367. ret = usb4_port_wait_for_bit(port, port->cap_usb4 + PORT_CS_19,
  1368. PORT_CS_19_START_ASYM, 0, 1000,
  1369. USB4_PORT_DELAY);
  1370. if (ret)
  1371. return ret;
  1372. /* Then wait for the transtion to be completed */
  1373. return usb4_port_wait_for_bit(port, port->cap_usb4 + PORT_CS_18,
  1374. PORT_CS_18_TIP, 0, 5000, USB4_PORT_DELAY);
  1375. }
  1376. /**
  1377. * usb4_port_margining_caps() - Read USB4 port marginig capabilities
  1378. * @port: USB4 port
  1379. * @target: Sideband target
  1380. * @index: Retimer index if taget is %USB4_SB_TARGET_RETIMER
  1381. * @caps: Array with at least two elements to hold the results
  1382. *
  1383. * Reads the USB4 port lane margining capabilities into @caps.
  1384. */
  1385. int usb4_port_margining_caps(struct tb_port *port, enum usb4_sb_target target,
  1386. u8 index, u32 *caps)
  1387. {
  1388. int ret;
  1389. ret = usb4_port_sb_op(port, target, index,
  1390. USB4_SB_OPCODE_READ_LANE_MARGINING_CAP, 500);
  1391. if (ret)
  1392. return ret;
  1393. return usb4_port_sb_read(port, target, index, USB4_SB_DATA, caps,
  1394. sizeof(*caps) * 2);
  1395. }
  1396. /**
  1397. * usb4_port_hw_margin() - Run hardware lane margining on port
  1398. * @port: USB4 port
  1399. * @target: Sideband target
  1400. * @index: Retimer index if taget is %USB4_SB_TARGET_RETIMER
  1401. * @params: Parameters for USB4 hardware margining
  1402. * @results: Array with at least two elements to hold the results
  1403. *
  1404. * Runs hardware lane margining on USB4 port and returns the result in
  1405. * @results.
  1406. */
  1407. int usb4_port_hw_margin(struct tb_port *port, enum usb4_sb_target target,
  1408. u8 index, const struct usb4_port_margining_params *params,
  1409. u32 *results)
  1410. {
  1411. u32 val;
  1412. int ret;
  1413. if (WARN_ON_ONCE(!params))
  1414. return -EINVAL;
  1415. val = params->lanes;
  1416. if (params->time)
  1417. val |= USB4_MARGIN_HW_TIME;
  1418. if (params->right_high)
  1419. val |= USB4_MARGIN_HW_RH;
  1420. if (params->ber_level)
  1421. val |= FIELD_PREP(USB4_MARGIN_HW_BER_MASK, params->ber_level);
  1422. if (params->optional_voltage_offset_range)
  1423. val |= USB4_MARGIN_HW_OPT_VOLTAGE;
  1424. ret = usb4_port_sb_write(port, target, index, USB4_SB_METADATA, &val,
  1425. sizeof(val));
  1426. if (ret)
  1427. return ret;
  1428. ret = usb4_port_sb_op(port, target, index,
  1429. USB4_SB_OPCODE_RUN_HW_LANE_MARGINING, 2500);
  1430. if (ret)
  1431. return ret;
  1432. return usb4_port_sb_read(port, target, index, USB4_SB_DATA, results,
  1433. sizeof(*results) * 2);
  1434. }
  1435. /**
  1436. * usb4_port_sw_margin() - Run software lane margining on port
  1437. * @port: USB4 port
  1438. * @target: Sideband target
  1439. * @index: Retimer index if taget is %USB4_SB_TARGET_RETIMER
  1440. * @params: Parameters for USB4 software margining
  1441. * @results: Data word for the operation completion data
  1442. *
  1443. * Runs software lane margining on USB4 port. Read back the error
  1444. * counters by calling usb4_port_sw_margin_errors(). Returns %0 in
  1445. * success and negative errno otherwise.
  1446. */
  1447. int usb4_port_sw_margin(struct tb_port *port, enum usb4_sb_target target,
  1448. u8 index, const struct usb4_port_margining_params *params,
  1449. u32 *results)
  1450. {
  1451. u32 val;
  1452. int ret;
  1453. if (WARN_ON_ONCE(!params))
  1454. return -EINVAL;
  1455. val = params->lanes;
  1456. if (params->time)
  1457. val |= USB4_MARGIN_SW_TIME;
  1458. if (params->optional_voltage_offset_range)
  1459. val |= USB4_MARGIN_SW_OPT_VOLTAGE;
  1460. if (params->right_high)
  1461. val |= USB4_MARGIN_SW_RH;
  1462. val |= FIELD_PREP(USB4_MARGIN_SW_COUNTER_MASK, params->error_counter);
  1463. val |= FIELD_PREP(USB4_MARGIN_SW_VT_MASK, params->voltage_time_offset);
  1464. ret = usb4_port_sb_write(port, target, index, USB4_SB_METADATA, &val,
  1465. sizeof(val));
  1466. if (ret)
  1467. return ret;
  1468. ret = usb4_port_sb_op(port, target, index,
  1469. USB4_SB_OPCODE_RUN_SW_LANE_MARGINING, 2500);
  1470. if (ret)
  1471. return ret;
  1472. return usb4_port_sb_read(port, target, index, USB4_SB_DATA, results,
  1473. sizeof(*results));
  1474. }
  1475. /**
  1476. * usb4_port_sw_margin_errors() - Read the software margining error counters
  1477. * @port: USB4 port
  1478. * @target: Sideband target
  1479. * @index: Retimer index if taget is %USB4_SB_TARGET_RETIMER
  1480. * @errors: Error metadata is copied here.
  1481. *
  1482. * This reads back the software margining error counters from the port.
  1483. * Returns %0 in success and negative errno otherwise.
  1484. */
  1485. int usb4_port_sw_margin_errors(struct tb_port *port, enum usb4_sb_target target,
  1486. u8 index, u32 *errors)
  1487. {
  1488. int ret;
  1489. ret = usb4_port_sb_op(port, target, index,
  1490. USB4_SB_OPCODE_READ_SW_MARGIN_ERR, 150);
  1491. if (ret)
  1492. return ret;
  1493. return usb4_port_sb_read(port, target, index, USB4_SB_METADATA, errors,
  1494. sizeof(*errors));
  1495. }
  1496. static inline int usb4_port_retimer_op(struct tb_port *port, u8 index,
  1497. enum usb4_sb_opcode opcode,
  1498. int timeout_msec)
  1499. {
  1500. return usb4_port_sb_op(port, USB4_SB_TARGET_RETIMER, index, opcode,
  1501. timeout_msec);
  1502. }
  1503. /**
  1504. * usb4_port_retimer_set_inbound_sbtx() - Enable sideband channel transactions
  1505. * @port: USB4 port
  1506. * @index: Retimer index
  1507. *
  1508. * Enables sideband channel transations on SBTX. Can be used when USB4
  1509. * link does not go up, for example if there is no device connected.
  1510. */
  1511. int usb4_port_retimer_set_inbound_sbtx(struct tb_port *port, u8 index)
  1512. {
  1513. int ret;
  1514. ret = usb4_port_retimer_op(port, index, USB4_SB_OPCODE_SET_INBOUND_SBTX,
  1515. 500);
  1516. if (ret != -ENODEV)
  1517. return ret;
  1518. /*
  1519. * Per the USB4 retimer spec, the retimer is not required to
  1520. * send an RT (Retimer Transaction) response for the first
  1521. * SET_INBOUND_SBTX command
  1522. */
  1523. return usb4_port_retimer_op(port, index, USB4_SB_OPCODE_SET_INBOUND_SBTX,
  1524. 500);
  1525. }
  1526. /**
  1527. * usb4_port_retimer_unset_inbound_sbtx() - Disable sideband channel transactions
  1528. * @port: USB4 port
  1529. * @index: Retimer index
  1530. *
  1531. * Disables sideband channel transations on SBTX. The reverse of
  1532. * usb4_port_retimer_set_inbound_sbtx().
  1533. */
  1534. int usb4_port_retimer_unset_inbound_sbtx(struct tb_port *port, u8 index)
  1535. {
  1536. return usb4_port_retimer_op(port, index,
  1537. USB4_SB_OPCODE_UNSET_INBOUND_SBTX, 500);
  1538. }
  1539. /**
  1540. * usb4_port_retimer_is_last() - Is the retimer last on-board retimer
  1541. * @port: USB4 port
  1542. * @index: Retimer index
  1543. *
  1544. * If the retimer at @index is last one (connected directly to the
  1545. * Type-C port) this function returns %1. If it is not returns %0. If
  1546. * the retimer is not present returns %-ENODEV. Otherwise returns
  1547. * negative errno.
  1548. */
  1549. int usb4_port_retimer_is_last(struct tb_port *port, u8 index)
  1550. {
  1551. u32 metadata;
  1552. int ret;
  1553. ret = usb4_port_retimer_op(port, index, USB4_SB_OPCODE_QUERY_LAST_RETIMER,
  1554. 500);
  1555. if (ret)
  1556. return ret;
  1557. ret = usb4_port_sb_read(port, USB4_SB_TARGET_RETIMER, index,
  1558. USB4_SB_METADATA, &metadata, sizeof(metadata));
  1559. return ret ? ret : metadata & 1;
  1560. }
  1561. /**
  1562. * usb4_port_retimer_is_cable() - Is the retimer cable retimer
  1563. * @port: USB4 port
  1564. * @index: Retimer index
  1565. *
  1566. * If the retimer at @index is last cable retimer this function returns
  1567. * %1 and %0 if it is on-board retimer. In case a retimer is not present
  1568. * at @index returns %-ENODEV. Otherwise returns negative errno.
  1569. */
  1570. int usb4_port_retimer_is_cable(struct tb_port *port, u8 index)
  1571. {
  1572. u32 metadata;
  1573. int ret;
  1574. ret = usb4_port_retimer_op(port, index, USB4_SB_OPCODE_QUERY_CABLE_RETIMER,
  1575. 500);
  1576. if (ret)
  1577. return ret;
  1578. ret = usb4_port_sb_read(port, USB4_SB_TARGET_RETIMER, index,
  1579. USB4_SB_METADATA, &metadata, sizeof(metadata));
  1580. return ret ? ret : metadata & 1;
  1581. }
  1582. /**
  1583. * usb4_port_retimer_nvm_sector_size() - Read retimer NVM sector size
  1584. * @port: USB4 port
  1585. * @index: Retimer index
  1586. *
  1587. * Reads NVM sector size (in bytes) of a retimer at @index. This
  1588. * operation can be used to determine whether the retimer supports NVM
  1589. * upgrade for example. Returns sector size in bytes or negative errno
  1590. * in case of error. Specifically returns %-ENODEV if there is no
  1591. * retimer at @index.
  1592. */
  1593. int usb4_port_retimer_nvm_sector_size(struct tb_port *port, u8 index)
  1594. {
  1595. u32 metadata;
  1596. int ret;
  1597. ret = usb4_port_retimer_op(port, index, USB4_SB_OPCODE_GET_NVM_SECTOR_SIZE,
  1598. 500);
  1599. if (ret)
  1600. return ret;
  1601. ret = usb4_port_sb_read(port, USB4_SB_TARGET_RETIMER, index,
  1602. USB4_SB_METADATA, &metadata, sizeof(metadata));
  1603. return ret ? ret : metadata & USB4_NVM_SECTOR_SIZE_MASK;
  1604. }
  1605. /**
  1606. * usb4_port_retimer_nvm_set_offset() - Set NVM write offset
  1607. * @port: USB4 port
  1608. * @index: Retimer index
  1609. * @address: Start offset
  1610. *
  1611. * Exlicitly sets NVM write offset. Normally when writing to NVM this is
  1612. * done automatically by usb4_port_retimer_nvm_write().
  1613. *
  1614. * Returns %0 in success and negative errno if there was a failure.
  1615. */
  1616. int usb4_port_retimer_nvm_set_offset(struct tb_port *port, u8 index,
  1617. unsigned int address)
  1618. {
  1619. u32 metadata, dwaddress;
  1620. int ret;
  1621. dwaddress = address / 4;
  1622. metadata = (dwaddress << USB4_NVM_SET_OFFSET_SHIFT) &
  1623. USB4_NVM_SET_OFFSET_MASK;
  1624. ret = usb4_port_sb_write(port, USB4_SB_TARGET_RETIMER, index,
  1625. USB4_SB_METADATA, &metadata, sizeof(metadata));
  1626. if (ret)
  1627. return ret;
  1628. return usb4_port_retimer_op(port, index, USB4_SB_OPCODE_NVM_SET_OFFSET,
  1629. 500);
  1630. }
  1631. struct retimer_info {
  1632. struct tb_port *port;
  1633. u8 index;
  1634. };
  1635. static int usb4_port_retimer_nvm_write_next_block(void *data,
  1636. unsigned int dwaddress, const void *buf, size_t dwords)
  1637. {
  1638. const struct retimer_info *info = data;
  1639. struct tb_port *port = info->port;
  1640. u8 index = info->index;
  1641. int ret;
  1642. ret = usb4_port_sb_write(port, USB4_SB_TARGET_RETIMER, index,
  1643. USB4_SB_DATA, buf, dwords * 4);
  1644. if (ret)
  1645. return ret;
  1646. return usb4_port_retimer_op(port, index,
  1647. USB4_SB_OPCODE_NVM_BLOCK_WRITE, 1000);
  1648. }
  1649. /**
  1650. * usb4_port_retimer_nvm_write() - Write to retimer NVM
  1651. * @port: USB4 port
  1652. * @index: Retimer index
  1653. * @address: Byte address where to start the write
  1654. * @buf: Data to write
  1655. * @size: Size in bytes how much to write
  1656. *
  1657. * Writes @size bytes from @buf to the retimer NVM. Used for NVM
  1658. * upgrade. Returns %0 if the data was written successfully and negative
  1659. * errno in case of failure. Specifically returns %-ENODEV if there is
  1660. * no retimer at @index.
  1661. */
  1662. int usb4_port_retimer_nvm_write(struct tb_port *port, u8 index, unsigned int address,
  1663. const void *buf, size_t size)
  1664. {
  1665. struct retimer_info info = { .port = port, .index = index };
  1666. int ret;
  1667. ret = usb4_port_retimer_nvm_set_offset(port, index, address);
  1668. if (ret)
  1669. return ret;
  1670. return tb_nvm_write_data(address, buf, size, USB4_DATA_RETRIES,
  1671. usb4_port_retimer_nvm_write_next_block, &info);
  1672. }
  1673. /**
  1674. * usb4_port_retimer_nvm_authenticate() - Start retimer NVM upgrade
  1675. * @port: USB4 port
  1676. * @index: Retimer index
  1677. *
  1678. * After the new NVM image has been written via usb4_port_retimer_nvm_write()
  1679. * this function can be used to trigger the NVM upgrade process. If
  1680. * successful the retimer restarts with the new NVM and may not have the
  1681. * index set so one needs to call usb4_port_enumerate_retimers() to
  1682. * force index to be assigned.
  1683. */
  1684. int usb4_port_retimer_nvm_authenticate(struct tb_port *port, u8 index)
  1685. {
  1686. u32 val;
  1687. /*
  1688. * We need to use the raw operation here because once the
  1689. * authentication completes the retimer index is not set anymore
  1690. * so we do not get back the status now.
  1691. */
  1692. val = USB4_SB_OPCODE_NVM_AUTH_WRITE;
  1693. return usb4_port_sb_write(port, USB4_SB_TARGET_RETIMER, index,
  1694. USB4_SB_OPCODE, &val, sizeof(val));
  1695. }
  1696. /**
  1697. * usb4_port_retimer_nvm_authenticate_status() - Read status of NVM upgrade
  1698. * @port: USB4 port
  1699. * @index: Retimer index
  1700. * @status: Raw status code read from metadata
  1701. *
  1702. * This can be called after usb4_port_retimer_nvm_authenticate() and
  1703. * usb4_port_enumerate_retimers() to fetch status of the NVM upgrade.
  1704. *
  1705. * Returns %0 if the authentication status was successfully read. The
  1706. * completion metadata (the result) is then stored into @status. If
  1707. * reading the status fails, returns negative errno.
  1708. */
  1709. int usb4_port_retimer_nvm_authenticate_status(struct tb_port *port, u8 index,
  1710. u32 *status)
  1711. {
  1712. u32 metadata, val;
  1713. int ret;
  1714. ret = usb4_port_sb_read(port, USB4_SB_TARGET_RETIMER, index,
  1715. USB4_SB_OPCODE, &val, sizeof(val));
  1716. if (ret)
  1717. return ret;
  1718. ret = usb4_port_sb_opcode_err_to_errno(val);
  1719. switch (ret) {
  1720. case 0:
  1721. *status = 0;
  1722. return 0;
  1723. case -EAGAIN:
  1724. ret = usb4_port_sb_read(port, USB4_SB_TARGET_RETIMER, index,
  1725. USB4_SB_METADATA, &metadata,
  1726. sizeof(metadata));
  1727. if (ret)
  1728. return ret;
  1729. *status = metadata & USB4_SB_METADATA_NVM_AUTH_WRITE_MASK;
  1730. return 0;
  1731. default:
  1732. return ret;
  1733. }
  1734. }
  1735. static int usb4_port_retimer_nvm_read_block(void *data, unsigned int dwaddress,
  1736. void *buf, size_t dwords)
  1737. {
  1738. const struct retimer_info *info = data;
  1739. struct tb_port *port = info->port;
  1740. u8 index = info->index;
  1741. u32 metadata;
  1742. int ret;
  1743. metadata = dwaddress << USB4_NVM_READ_OFFSET_SHIFT;
  1744. if (dwords < USB4_DATA_DWORDS)
  1745. metadata |= dwords << USB4_NVM_READ_LENGTH_SHIFT;
  1746. ret = usb4_port_sb_write(port, USB4_SB_TARGET_RETIMER, index,
  1747. USB4_SB_METADATA, &metadata, sizeof(metadata));
  1748. if (ret)
  1749. return ret;
  1750. ret = usb4_port_retimer_op(port, index, USB4_SB_OPCODE_NVM_READ, 500);
  1751. if (ret)
  1752. return ret;
  1753. return usb4_port_sb_read(port, USB4_SB_TARGET_RETIMER, index,
  1754. USB4_SB_DATA, buf, dwords * 4);
  1755. }
  1756. /**
  1757. * usb4_port_retimer_nvm_read() - Read contents of retimer NVM
  1758. * @port: USB4 port
  1759. * @index: Retimer index
  1760. * @address: NVM address (in bytes) to start reading
  1761. * @buf: Data read from NVM is stored here
  1762. * @size: Number of bytes to read
  1763. *
  1764. * Reads retimer NVM and copies the contents to @buf. Returns %0 if the
  1765. * read was successful and negative errno in case of failure.
  1766. * Specifically returns %-ENODEV if there is no retimer at @index.
  1767. */
  1768. int usb4_port_retimer_nvm_read(struct tb_port *port, u8 index,
  1769. unsigned int address, void *buf, size_t size)
  1770. {
  1771. struct retimer_info info = { .port = port, .index = index };
  1772. return tb_nvm_read_data(address, buf, size, USB4_DATA_RETRIES,
  1773. usb4_port_retimer_nvm_read_block, &info);
  1774. }
  1775. static inline unsigned int
  1776. usb4_usb3_port_max_bandwidth(const struct tb_port *port, unsigned int bw)
  1777. {
  1778. /* Take the possible bandwidth limitation into account */
  1779. if (port->max_bw)
  1780. return min(bw, port->max_bw);
  1781. return bw;
  1782. }
  1783. /**
  1784. * usb4_usb3_port_max_link_rate() - Maximum support USB3 link rate
  1785. * @port: USB3 adapter port
  1786. *
  1787. * Return maximum supported link rate of a USB3 adapter in Mb/s.
  1788. * Negative errno in case of error.
  1789. */
  1790. int usb4_usb3_port_max_link_rate(struct tb_port *port)
  1791. {
  1792. int ret, lr;
  1793. u32 val;
  1794. if (!tb_port_is_usb3_down(port) && !tb_port_is_usb3_up(port))
  1795. return -EINVAL;
  1796. ret = tb_port_read(port, &val, TB_CFG_PORT,
  1797. port->cap_adap + ADP_USB3_CS_4, 1);
  1798. if (ret)
  1799. return ret;
  1800. lr = (val & ADP_USB3_CS_4_MSLR_MASK) >> ADP_USB3_CS_4_MSLR_SHIFT;
  1801. ret = lr == ADP_USB3_CS_4_MSLR_20G ? 20000 : 10000;
  1802. return usb4_usb3_port_max_bandwidth(port, ret);
  1803. }
  1804. static int usb4_usb3_port_cm_request(struct tb_port *port, bool request)
  1805. {
  1806. int ret;
  1807. u32 val;
  1808. if (!tb_port_is_usb3_down(port))
  1809. return -EINVAL;
  1810. if (tb_route(port->sw))
  1811. return -EINVAL;
  1812. ret = tb_port_read(port, &val, TB_CFG_PORT,
  1813. port->cap_adap + ADP_USB3_CS_2, 1);
  1814. if (ret)
  1815. return ret;
  1816. if (request)
  1817. val |= ADP_USB3_CS_2_CMR;
  1818. else
  1819. val &= ~ADP_USB3_CS_2_CMR;
  1820. ret = tb_port_write(port, &val, TB_CFG_PORT,
  1821. port->cap_adap + ADP_USB3_CS_2, 1);
  1822. if (ret)
  1823. return ret;
  1824. /*
  1825. * We can use val here directly as the CMR bit is in the same place
  1826. * as HCA. Just mask out others.
  1827. */
  1828. val &= ADP_USB3_CS_2_CMR;
  1829. return usb4_port_wait_for_bit(port, port->cap_adap + ADP_USB3_CS_1,
  1830. ADP_USB3_CS_1_HCA, val, 1500,
  1831. USB4_PORT_DELAY);
  1832. }
  1833. static inline int usb4_usb3_port_set_cm_request(struct tb_port *port)
  1834. {
  1835. return usb4_usb3_port_cm_request(port, true);
  1836. }
  1837. static inline int usb4_usb3_port_clear_cm_request(struct tb_port *port)
  1838. {
  1839. return usb4_usb3_port_cm_request(port, false);
  1840. }
  1841. static unsigned int usb3_bw_to_mbps(u32 bw, u8 scale)
  1842. {
  1843. unsigned long uframes;
  1844. uframes = bw * 512UL << scale;
  1845. return DIV_ROUND_CLOSEST(uframes * 8000, MEGA);
  1846. }
  1847. static u32 mbps_to_usb3_bw(unsigned int mbps, u8 scale)
  1848. {
  1849. unsigned long uframes;
  1850. /* 1 uframe is 1/8 ms (125 us) -> 1 / 8000 s */
  1851. uframes = ((unsigned long)mbps * MEGA) / 8000;
  1852. return DIV_ROUND_UP(uframes, 512UL << scale);
  1853. }
  1854. static int usb4_usb3_port_read_allocated_bandwidth(struct tb_port *port,
  1855. int *upstream_bw,
  1856. int *downstream_bw)
  1857. {
  1858. u32 val, bw, scale;
  1859. int ret;
  1860. ret = tb_port_read(port, &val, TB_CFG_PORT,
  1861. port->cap_adap + ADP_USB3_CS_2, 1);
  1862. if (ret)
  1863. return ret;
  1864. ret = tb_port_read(port, &scale, TB_CFG_PORT,
  1865. port->cap_adap + ADP_USB3_CS_3, 1);
  1866. if (ret)
  1867. return ret;
  1868. scale &= ADP_USB3_CS_3_SCALE_MASK;
  1869. bw = val & ADP_USB3_CS_2_AUBW_MASK;
  1870. *upstream_bw = usb3_bw_to_mbps(bw, scale);
  1871. bw = (val & ADP_USB3_CS_2_ADBW_MASK) >> ADP_USB3_CS_2_ADBW_SHIFT;
  1872. *downstream_bw = usb3_bw_to_mbps(bw, scale);
  1873. return 0;
  1874. }
  1875. /**
  1876. * usb4_usb3_port_allocated_bandwidth() - Bandwidth allocated for USB3
  1877. * @port: USB3 adapter port
  1878. * @upstream_bw: Allocated upstream bandwidth is stored here
  1879. * @downstream_bw: Allocated downstream bandwidth is stored here
  1880. *
  1881. * Stores currently allocated USB3 bandwidth into @upstream_bw and
  1882. * @downstream_bw in Mb/s. Returns %0 in case of success and negative
  1883. * errno in failure.
  1884. */
  1885. int usb4_usb3_port_allocated_bandwidth(struct tb_port *port, int *upstream_bw,
  1886. int *downstream_bw)
  1887. {
  1888. int ret;
  1889. ret = usb4_usb3_port_set_cm_request(port);
  1890. if (ret)
  1891. return ret;
  1892. ret = usb4_usb3_port_read_allocated_bandwidth(port, upstream_bw,
  1893. downstream_bw);
  1894. usb4_usb3_port_clear_cm_request(port);
  1895. return ret;
  1896. }
  1897. static int usb4_usb3_port_read_consumed_bandwidth(struct tb_port *port,
  1898. int *upstream_bw,
  1899. int *downstream_bw)
  1900. {
  1901. u32 val, bw, scale;
  1902. int ret;
  1903. ret = tb_port_read(port, &val, TB_CFG_PORT,
  1904. port->cap_adap + ADP_USB3_CS_1, 1);
  1905. if (ret)
  1906. return ret;
  1907. ret = tb_port_read(port, &scale, TB_CFG_PORT,
  1908. port->cap_adap + ADP_USB3_CS_3, 1);
  1909. if (ret)
  1910. return ret;
  1911. scale &= ADP_USB3_CS_3_SCALE_MASK;
  1912. bw = val & ADP_USB3_CS_1_CUBW_MASK;
  1913. *upstream_bw = usb3_bw_to_mbps(bw, scale);
  1914. bw = (val & ADP_USB3_CS_1_CDBW_MASK) >> ADP_USB3_CS_1_CDBW_SHIFT;
  1915. *downstream_bw = usb3_bw_to_mbps(bw, scale);
  1916. return 0;
  1917. }
  1918. static int usb4_usb3_port_write_allocated_bandwidth(struct tb_port *port,
  1919. int upstream_bw,
  1920. int downstream_bw)
  1921. {
  1922. u32 val, ubw, dbw, scale;
  1923. int ret, max_bw;
  1924. /* Figure out suitable scale */
  1925. scale = 0;
  1926. max_bw = max(upstream_bw, downstream_bw);
  1927. while (scale < 64) {
  1928. if (mbps_to_usb3_bw(max_bw, scale) < 4096)
  1929. break;
  1930. scale++;
  1931. }
  1932. if (WARN_ON(scale >= 64))
  1933. return -EINVAL;
  1934. ret = tb_port_write(port, &scale, TB_CFG_PORT,
  1935. port->cap_adap + ADP_USB3_CS_3, 1);
  1936. if (ret)
  1937. return ret;
  1938. ubw = mbps_to_usb3_bw(upstream_bw, scale);
  1939. dbw = mbps_to_usb3_bw(downstream_bw, scale);
  1940. tb_port_dbg(port, "scaled bandwidth %u/%u, scale %u\n", ubw, dbw, scale);
  1941. ret = tb_port_read(port, &val, TB_CFG_PORT,
  1942. port->cap_adap + ADP_USB3_CS_2, 1);
  1943. if (ret)
  1944. return ret;
  1945. val &= ~(ADP_USB3_CS_2_AUBW_MASK | ADP_USB3_CS_2_ADBW_MASK);
  1946. val |= dbw << ADP_USB3_CS_2_ADBW_SHIFT;
  1947. val |= ubw;
  1948. return tb_port_write(port, &val, TB_CFG_PORT,
  1949. port->cap_adap + ADP_USB3_CS_2, 1);
  1950. }
  1951. /**
  1952. * usb4_usb3_port_allocate_bandwidth() - Allocate bandwidth for USB3
  1953. * @port: USB3 adapter port
  1954. * @upstream_bw: New upstream bandwidth
  1955. * @downstream_bw: New downstream bandwidth
  1956. *
  1957. * This can be used to set how much bandwidth is allocated for the USB3
  1958. * tunneled isochronous traffic. @upstream_bw and @downstream_bw are the
  1959. * new values programmed to the USB3 adapter allocation registers. If
  1960. * the values are lower than what is currently consumed the allocation
  1961. * is set to what is currently consumed instead (consumed bandwidth
  1962. * cannot be taken away by CM). The actual new values are returned in
  1963. * @upstream_bw and @downstream_bw.
  1964. *
  1965. * Returns %0 in case of success and negative errno if there was a
  1966. * failure.
  1967. */
  1968. int usb4_usb3_port_allocate_bandwidth(struct tb_port *port, int *upstream_bw,
  1969. int *downstream_bw)
  1970. {
  1971. int ret, consumed_up, consumed_down, allocate_up, allocate_down;
  1972. ret = usb4_usb3_port_set_cm_request(port);
  1973. if (ret)
  1974. return ret;
  1975. ret = usb4_usb3_port_read_consumed_bandwidth(port, &consumed_up,
  1976. &consumed_down);
  1977. if (ret)
  1978. goto err_request;
  1979. /* Don't allow it go lower than what is consumed */
  1980. allocate_up = max(*upstream_bw, consumed_up);
  1981. allocate_down = max(*downstream_bw, consumed_down);
  1982. ret = usb4_usb3_port_write_allocated_bandwidth(port, allocate_up,
  1983. allocate_down);
  1984. if (ret)
  1985. goto err_request;
  1986. *upstream_bw = allocate_up;
  1987. *downstream_bw = allocate_down;
  1988. err_request:
  1989. usb4_usb3_port_clear_cm_request(port);
  1990. return ret;
  1991. }
  1992. /**
  1993. * usb4_usb3_port_release_bandwidth() - Release allocated USB3 bandwidth
  1994. * @port: USB3 adapter port
  1995. * @upstream_bw: New allocated upstream bandwidth
  1996. * @downstream_bw: New allocated downstream bandwidth
  1997. *
  1998. * Releases USB3 allocated bandwidth down to what is actually consumed.
  1999. * The new bandwidth is returned in @upstream_bw and @downstream_bw.
  2000. *
  2001. * Returns 0% in success and negative errno in case of failure.
  2002. */
  2003. int usb4_usb3_port_release_bandwidth(struct tb_port *port, int *upstream_bw,
  2004. int *downstream_bw)
  2005. {
  2006. int ret, consumed_up, consumed_down;
  2007. ret = usb4_usb3_port_set_cm_request(port);
  2008. if (ret)
  2009. return ret;
  2010. ret = usb4_usb3_port_read_consumed_bandwidth(port, &consumed_up,
  2011. &consumed_down);
  2012. if (ret)
  2013. goto err_request;
  2014. /*
  2015. * Always keep 900 Mb/s to make sure xHCI has at least some
  2016. * bandwidth available for isochronous traffic.
  2017. */
  2018. if (consumed_up < 900)
  2019. consumed_up = 900;
  2020. if (consumed_down < 900)
  2021. consumed_down = 900;
  2022. ret = usb4_usb3_port_write_allocated_bandwidth(port, consumed_up,
  2023. consumed_down);
  2024. if (ret)
  2025. goto err_request;
  2026. *upstream_bw = consumed_up;
  2027. *downstream_bw = consumed_down;
  2028. err_request:
  2029. usb4_usb3_port_clear_cm_request(port);
  2030. return ret;
  2031. }
  2032. static bool is_usb4_dpin(const struct tb_port *port)
  2033. {
  2034. if (!tb_port_is_dpin(port))
  2035. return false;
  2036. if (!tb_switch_is_usb4(port->sw))
  2037. return false;
  2038. return true;
  2039. }
  2040. /**
  2041. * usb4_dp_port_set_cm_id() - Assign CM ID to the DP IN adapter
  2042. * @port: DP IN adapter
  2043. * @cm_id: CM ID to assign
  2044. *
  2045. * Sets CM ID for the @port. Returns %0 on success and negative errno
  2046. * otherwise. Speficially returns %-EOPNOTSUPP if the @port does not
  2047. * support this.
  2048. */
  2049. int usb4_dp_port_set_cm_id(struct tb_port *port, int cm_id)
  2050. {
  2051. u32 val;
  2052. int ret;
  2053. if (!is_usb4_dpin(port))
  2054. return -EOPNOTSUPP;
  2055. ret = tb_port_read(port, &val, TB_CFG_PORT,
  2056. port->cap_adap + ADP_DP_CS_2, 1);
  2057. if (ret)
  2058. return ret;
  2059. val &= ~ADP_DP_CS_2_CM_ID_MASK;
  2060. val |= cm_id << ADP_DP_CS_2_CM_ID_SHIFT;
  2061. return tb_port_write(port, &val, TB_CFG_PORT,
  2062. port->cap_adap + ADP_DP_CS_2, 1);
  2063. }
  2064. /**
  2065. * usb4_dp_port_bandwidth_mode_supported() - Is the bandwidth allocation mode
  2066. * supported
  2067. * @port: DP IN adapter to check
  2068. *
  2069. * Can be called to any DP IN adapter. Returns true if the adapter
  2070. * supports USB4 bandwidth allocation mode, false otherwise.
  2071. */
  2072. bool usb4_dp_port_bandwidth_mode_supported(struct tb_port *port)
  2073. {
  2074. int ret;
  2075. u32 val;
  2076. if (!is_usb4_dpin(port))
  2077. return false;
  2078. ret = tb_port_read(port, &val, TB_CFG_PORT,
  2079. port->cap_adap + DP_LOCAL_CAP, 1);
  2080. if (ret)
  2081. return false;
  2082. return !!(val & DP_COMMON_CAP_BW_MODE);
  2083. }
  2084. /**
  2085. * usb4_dp_port_bandwidth_mode_enabled() - Is the bandwidth allocation mode
  2086. * enabled
  2087. * @port: DP IN adapter to check
  2088. *
  2089. * Can be called to any DP IN adapter. Returns true if the bandwidth
  2090. * allocation mode has been enabled, false otherwise.
  2091. */
  2092. bool usb4_dp_port_bandwidth_mode_enabled(struct tb_port *port)
  2093. {
  2094. int ret;
  2095. u32 val;
  2096. if (!is_usb4_dpin(port))
  2097. return false;
  2098. ret = tb_port_read(port, &val, TB_CFG_PORT,
  2099. port->cap_adap + ADP_DP_CS_8, 1);
  2100. if (ret)
  2101. return false;
  2102. return !!(val & ADP_DP_CS_8_DPME);
  2103. }
  2104. /**
  2105. * usb4_dp_port_set_cm_bandwidth_mode_supported() - Set/clear CM support for
  2106. * bandwidth allocation mode
  2107. * @port: DP IN adapter
  2108. * @supported: Does the CM support bandwidth allocation mode
  2109. *
  2110. * Can be called to any DP IN adapter. Sets or clears the CM support bit
  2111. * of the DP IN adapter. Returns %0 in success and negative errno
  2112. * otherwise. Specifically returns %-OPNOTSUPP if the passed in adapter
  2113. * does not support this.
  2114. */
  2115. int usb4_dp_port_set_cm_bandwidth_mode_supported(struct tb_port *port,
  2116. bool supported)
  2117. {
  2118. u32 val;
  2119. int ret;
  2120. if (!is_usb4_dpin(port))
  2121. return -EOPNOTSUPP;
  2122. ret = tb_port_read(port, &val, TB_CFG_PORT,
  2123. port->cap_adap + ADP_DP_CS_2, 1);
  2124. if (ret)
  2125. return ret;
  2126. if (supported)
  2127. val |= ADP_DP_CS_2_CMMS;
  2128. else
  2129. val &= ~ADP_DP_CS_2_CMMS;
  2130. return tb_port_write(port, &val, TB_CFG_PORT,
  2131. port->cap_adap + ADP_DP_CS_2, 1);
  2132. }
  2133. /**
  2134. * usb4_dp_port_group_id() - Return Group ID assigned for the adapter
  2135. * @port: DP IN adapter
  2136. *
  2137. * Reads bandwidth allocation Group ID from the DP IN adapter and
  2138. * returns it. If the adapter does not support setting Group_ID
  2139. * %-EOPNOTSUPP is returned.
  2140. */
  2141. int usb4_dp_port_group_id(struct tb_port *port)
  2142. {
  2143. u32 val;
  2144. int ret;
  2145. if (!is_usb4_dpin(port))
  2146. return -EOPNOTSUPP;
  2147. ret = tb_port_read(port, &val, TB_CFG_PORT,
  2148. port->cap_adap + ADP_DP_CS_2, 1);
  2149. if (ret)
  2150. return ret;
  2151. return (val & ADP_DP_CS_2_GROUP_ID_MASK) >> ADP_DP_CS_2_GROUP_ID_SHIFT;
  2152. }
  2153. /**
  2154. * usb4_dp_port_set_group_id() - Set adapter Group ID
  2155. * @port: DP IN adapter
  2156. * @group_id: Group ID for the adapter
  2157. *
  2158. * Sets bandwidth allocation mode Group ID for the DP IN adapter.
  2159. * Returns %0 in case of success and negative errno otherwise.
  2160. * Specifically returns %-EOPNOTSUPP if the adapter does not support
  2161. * this.
  2162. */
  2163. int usb4_dp_port_set_group_id(struct tb_port *port, int group_id)
  2164. {
  2165. u32 val;
  2166. int ret;
  2167. if (!is_usb4_dpin(port))
  2168. return -EOPNOTSUPP;
  2169. ret = tb_port_read(port, &val, TB_CFG_PORT,
  2170. port->cap_adap + ADP_DP_CS_2, 1);
  2171. if (ret)
  2172. return ret;
  2173. val &= ~ADP_DP_CS_2_GROUP_ID_MASK;
  2174. val |= group_id << ADP_DP_CS_2_GROUP_ID_SHIFT;
  2175. return tb_port_write(port, &val, TB_CFG_PORT,
  2176. port->cap_adap + ADP_DP_CS_2, 1);
  2177. }
  2178. /**
  2179. * usb4_dp_port_nrd() - Read non-reduced rate and lanes
  2180. * @port: DP IN adapter
  2181. * @rate: Non-reduced rate in Mb/s is placed here
  2182. * @lanes: Non-reduced lanes are placed here
  2183. *
  2184. * Reads the non-reduced rate and lanes from the DP IN adapter. Returns
  2185. * %0 in success and negative errno otherwise. Specifically returns
  2186. * %-EOPNOTSUPP if the adapter does not support this.
  2187. */
  2188. int usb4_dp_port_nrd(struct tb_port *port, int *rate, int *lanes)
  2189. {
  2190. u32 val, tmp;
  2191. int ret;
  2192. if (!is_usb4_dpin(port))
  2193. return -EOPNOTSUPP;
  2194. ret = tb_port_read(port, &val, TB_CFG_PORT,
  2195. port->cap_adap + ADP_DP_CS_2, 1);
  2196. if (ret)
  2197. return ret;
  2198. tmp = (val & ADP_DP_CS_2_NRD_MLR_MASK) >> ADP_DP_CS_2_NRD_MLR_SHIFT;
  2199. switch (tmp) {
  2200. case DP_COMMON_CAP_RATE_RBR:
  2201. *rate = 1620;
  2202. break;
  2203. case DP_COMMON_CAP_RATE_HBR:
  2204. *rate = 2700;
  2205. break;
  2206. case DP_COMMON_CAP_RATE_HBR2:
  2207. *rate = 5400;
  2208. break;
  2209. case DP_COMMON_CAP_RATE_HBR3:
  2210. *rate = 8100;
  2211. break;
  2212. }
  2213. tmp = val & ADP_DP_CS_2_NRD_MLC_MASK;
  2214. switch (tmp) {
  2215. case DP_COMMON_CAP_1_LANE:
  2216. *lanes = 1;
  2217. break;
  2218. case DP_COMMON_CAP_2_LANES:
  2219. *lanes = 2;
  2220. break;
  2221. case DP_COMMON_CAP_4_LANES:
  2222. *lanes = 4;
  2223. break;
  2224. }
  2225. return 0;
  2226. }
  2227. /**
  2228. * usb4_dp_port_set_nrd() - Set non-reduced rate and lanes
  2229. * @port: DP IN adapter
  2230. * @rate: Non-reduced rate in Mb/s
  2231. * @lanes: Non-reduced lanes
  2232. *
  2233. * Before the capabilities reduction this function can be used to set
  2234. * the non-reduced values for the DP IN adapter. Returns %0 in success
  2235. * and negative errno otherwise. If the adapter does not support this
  2236. * %-EOPNOTSUPP is returned.
  2237. */
  2238. int usb4_dp_port_set_nrd(struct tb_port *port, int rate, int lanes)
  2239. {
  2240. u32 val;
  2241. int ret;
  2242. if (!is_usb4_dpin(port))
  2243. return -EOPNOTSUPP;
  2244. ret = tb_port_read(port, &val, TB_CFG_PORT,
  2245. port->cap_adap + ADP_DP_CS_2, 1);
  2246. if (ret)
  2247. return ret;
  2248. val &= ~ADP_DP_CS_2_NRD_MLR_MASK;
  2249. switch (rate) {
  2250. case 1620:
  2251. break;
  2252. case 2700:
  2253. val |= (DP_COMMON_CAP_RATE_HBR << ADP_DP_CS_2_NRD_MLR_SHIFT)
  2254. & ADP_DP_CS_2_NRD_MLR_MASK;
  2255. break;
  2256. case 5400:
  2257. val |= (DP_COMMON_CAP_RATE_HBR2 << ADP_DP_CS_2_NRD_MLR_SHIFT)
  2258. & ADP_DP_CS_2_NRD_MLR_MASK;
  2259. break;
  2260. case 8100:
  2261. val |= (DP_COMMON_CAP_RATE_HBR3 << ADP_DP_CS_2_NRD_MLR_SHIFT)
  2262. & ADP_DP_CS_2_NRD_MLR_MASK;
  2263. break;
  2264. default:
  2265. return -EINVAL;
  2266. }
  2267. val &= ~ADP_DP_CS_2_NRD_MLC_MASK;
  2268. switch (lanes) {
  2269. case 1:
  2270. break;
  2271. case 2:
  2272. val |= DP_COMMON_CAP_2_LANES;
  2273. break;
  2274. case 4:
  2275. val |= DP_COMMON_CAP_4_LANES;
  2276. break;
  2277. default:
  2278. return -EINVAL;
  2279. }
  2280. return tb_port_write(port, &val, TB_CFG_PORT,
  2281. port->cap_adap + ADP_DP_CS_2, 1);
  2282. }
  2283. /**
  2284. * usb4_dp_port_granularity() - Return granularity for the bandwidth values
  2285. * @port: DP IN adapter
  2286. *
  2287. * Reads the programmed granularity from @port. If the DP IN adapter does
  2288. * not support bandwidth allocation mode returns %-EOPNOTSUPP and negative
  2289. * errno in other error cases.
  2290. */
  2291. int usb4_dp_port_granularity(struct tb_port *port)
  2292. {
  2293. u32 val;
  2294. int ret;
  2295. if (!is_usb4_dpin(port))
  2296. return -EOPNOTSUPP;
  2297. ret = tb_port_read(port, &val, TB_CFG_PORT,
  2298. port->cap_adap + ADP_DP_CS_2, 1);
  2299. if (ret)
  2300. return ret;
  2301. val &= ADP_DP_CS_2_GR_MASK;
  2302. val >>= ADP_DP_CS_2_GR_SHIFT;
  2303. switch (val) {
  2304. case ADP_DP_CS_2_GR_0_25G:
  2305. return 250;
  2306. case ADP_DP_CS_2_GR_0_5G:
  2307. return 500;
  2308. case ADP_DP_CS_2_GR_1G:
  2309. return 1000;
  2310. }
  2311. return -EINVAL;
  2312. }
  2313. /**
  2314. * usb4_dp_port_set_granularity() - Set granularity for the bandwidth values
  2315. * @port: DP IN adapter
  2316. * @granularity: Granularity in Mb/s. Supported values: 1000, 500 and 250.
  2317. *
  2318. * Sets the granularity used with the estimated, allocated and requested
  2319. * bandwidth. Returns %0 in success and negative errno otherwise. If the
  2320. * adapter does not support this %-EOPNOTSUPP is returned.
  2321. */
  2322. int usb4_dp_port_set_granularity(struct tb_port *port, int granularity)
  2323. {
  2324. u32 val;
  2325. int ret;
  2326. if (!is_usb4_dpin(port))
  2327. return -EOPNOTSUPP;
  2328. ret = tb_port_read(port, &val, TB_CFG_PORT,
  2329. port->cap_adap + ADP_DP_CS_2, 1);
  2330. if (ret)
  2331. return ret;
  2332. val &= ~ADP_DP_CS_2_GR_MASK;
  2333. switch (granularity) {
  2334. case 250:
  2335. val |= ADP_DP_CS_2_GR_0_25G << ADP_DP_CS_2_GR_SHIFT;
  2336. break;
  2337. case 500:
  2338. val |= ADP_DP_CS_2_GR_0_5G << ADP_DP_CS_2_GR_SHIFT;
  2339. break;
  2340. case 1000:
  2341. val |= ADP_DP_CS_2_GR_1G << ADP_DP_CS_2_GR_SHIFT;
  2342. break;
  2343. default:
  2344. return -EINVAL;
  2345. }
  2346. return tb_port_write(port, &val, TB_CFG_PORT,
  2347. port->cap_adap + ADP_DP_CS_2, 1);
  2348. }
  2349. /**
  2350. * usb4_dp_port_set_estimated_bandwidth() - Set estimated bandwidth
  2351. * @port: DP IN adapter
  2352. * @bw: Estimated bandwidth in Mb/s.
  2353. *
  2354. * Sets the estimated bandwidth to @bw. Set the granularity by calling
  2355. * usb4_dp_port_set_granularity() before calling this. The @bw is round
  2356. * down to the closest granularity multiplier. Returns %0 in success
  2357. * and negative errno otherwise. Specifically returns %-EOPNOTSUPP if
  2358. * the adapter does not support this.
  2359. */
  2360. int usb4_dp_port_set_estimated_bandwidth(struct tb_port *port, int bw)
  2361. {
  2362. u32 val, granularity;
  2363. int ret;
  2364. if (!is_usb4_dpin(port))
  2365. return -EOPNOTSUPP;
  2366. ret = usb4_dp_port_granularity(port);
  2367. if (ret < 0)
  2368. return ret;
  2369. granularity = ret;
  2370. ret = tb_port_read(port, &val, TB_CFG_PORT,
  2371. port->cap_adap + ADP_DP_CS_2, 1);
  2372. if (ret)
  2373. return ret;
  2374. val &= ~ADP_DP_CS_2_ESTIMATED_BW_MASK;
  2375. val |= (bw / granularity) << ADP_DP_CS_2_ESTIMATED_BW_SHIFT;
  2376. return tb_port_write(port, &val, TB_CFG_PORT,
  2377. port->cap_adap + ADP_DP_CS_2, 1);
  2378. }
  2379. /**
  2380. * usb4_dp_port_allocated_bandwidth() - Return allocated bandwidth
  2381. * @port: DP IN adapter
  2382. *
  2383. * Reads and returns allocated bandwidth for @port in Mb/s (taking into
  2384. * account the programmed granularity). Returns negative errno in case
  2385. * of error.
  2386. */
  2387. int usb4_dp_port_allocated_bandwidth(struct tb_port *port)
  2388. {
  2389. u32 val, granularity;
  2390. int ret;
  2391. if (!is_usb4_dpin(port))
  2392. return -EOPNOTSUPP;
  2393. ret = usb4_dp_port_granularity(port);
  2394. if (ret < 0)
  2395. return ret;
  2396. granularity = ret;
  2397. ret = tb_port_read(port, &val, TB_CFG_PORT,
  2398. port->cap_adap + DP_STATUS, 1);
  2399. if (ret)
  2400. return ret;
  2401. val &= DP_STATUS_ALLOCATED_BW_MASK;
  2402. val >>= DP_STATUS_ALLOCATED_BW_SHIFT;
  2403. return val * granularity;
  2404. }
  2405. static int __usb4_dp_port_set_cm_ack(struct tb_port *port, bool ack)
  2406. {
  2407. u32 val;
  2408. int ret;
  2409. ret = tb_port_read(port, &val, TB_CFG_PORT,
  2410. port->cap_adap + ADP_DP_CS_2, 1);
  2411. if (ret)
  2412. return ret;
  2413. if (ack)
  2414. val |= ADP_DP_CS_2_CA;
  2415. else
  2416. val &= ~ADP_DP_CS_2_CA;
  2417. return tb_port_write(port, &val, TB_CFG_PORT,
  2418. port->cap_adap + ADP_DP_CS_2, 1);
  2419. }
  2420. static inline int usb4_dp_port_set_cm_ack(struct tb_port *port)
  2421. {
  2422. return __usb4_dp_port_set_cm_ack(port, true);
  2423. }
  2424. static int usb4_dp_port_wait_and_clear_cm_ack(struct tb_port *port,
  2425. int timeout_msec)
  2426. {
  2427. ktime_t end;
  2428. u32 val;
  2429. int ret;
  2430. ret = __usb4_dp_port_set_cm_ack(port, false);
  2431. if (ret)
  2432. return ret;
  2433. end = ktime_add_ms(ktime_get(), timeout_msec);
  2434. do {
  2435. ret = tb_port_read(port, &val, TB_CFG_PORT,
  2436. port->cap_adap + ADP_DP_CS_8, 1);
  2437. if (ret)
  2438. return ret;
  2439. if (!(val & ADP_DP_CS_8_DR))
  2440. break;
  2441. usleep_range(50, 100);
  2442. } while (ktime_before(ktime_get(), end));
  2443. if (val & ADP_DP_CS_8_DR) {
  2444. tb_port_warn(port, "timeout waiting for DPTX request to clear\n");
  2445. return -ETIMEDOUT;
  2446. }
  2447. ret = tb_port_read(port, &val, TB_CFG_PORT,
  2448. port->cap_adap + ADP_DP_CS_2, 1);
  2449. if (ret)
  2450. return ret;
  2451. val &= ~ADP_DP_CS_2_CA;
  2452. return tb_port_write(port, &val, TB_CFG_PORT,
  2453. port->cap_adap + ADP_DP_CS_2, 1);
  2454. }
  2455. /**
  2456. * usb4_dp_port_allocate_bandwidth() - Set allocated bandwidth
  2457. * @port: DP IN adapter
  2458. * @bw: New allocated bandwidth in Mb/s
  2459. *
  2460. * Communicates the new allocated bandwidth with the DPCD (graphics
  2461. * driver). Takes into account the programmed granularity. Returns %0 in
  2462. * success and negative errno in case of error.
  2463. */
  2464. int usb4_dp_port_allocate_bandwidth(struct tb_port *port, int bw)
  2465. {
  2466. u32 val, granularity;
  2467. int ret;
  2468. if (!is_usb4_dpin(port))
  2469. return -EOPNOTSUPP;
  2470. ret = usb4_dp_port_granularity(port);
  2471. if (ret < 0)
  2472. return ret;
  2473. granularity = ret;
  2474. ret = tb_port_read(port, &val, TB_CFG_PORT,
  2475. port->cap_adap + DP_STATUS, 1);
  2476. if (ret)
  2477. return ret;
  2478. val &= ~DP_STATUS_ALLOCATED_BW_MASK;
  2479. val |= (bw / granularity) << DP_STATUS_ALLOCATED_BW_SHIFT;
  2480. ret = tb_port_write(port, &val, TB_CFG_PORT,
  2481. port->cap_adap + DP_STATUS, 1);
  2482. if (ret)
  2483. return ret;
  2484. ret = usb4_dp_port_set_cm_ack(port);
  2485. if (ret)
  2486. return ret;
  2487. return usb4_dp_port_wait_and_clear_cm_ack(port, 500);
  2488. }
  2489. /**
  2490. * usb4_dp_port_requested_bandwidth() - Read requested bandwidth
  2491. * @port: DP IN adapter
  2492. *
  2493. * Reads the DPCD (graphics driver) requested bandwidth and returns it
  2494. * in Mb/s. Takes the programmed granularity into account. In case of
  2495. * error returns negative errno. Specifically returns %-EOPNOTSUPP if
  2496. * the adapter does not support bandwidth allocation mode, and %ENODATA
  2497. * if there is no active bandwidth request from the graphics driver.
  2498. */
  2499. int usb4_dp_port_requested_bandwidth(struct tb_port *port)
  2500. {
  2501. u32 val, granularity;
  2502. int ret;
  2503. if (!is_usb4_dpin(port))
  2504. return -EOPNOTSUPP;
  2505. ret = usb4_dp_port_granularity(port);
  2506. if (ret < 0)
  2507. return ret;
  2508. granularity = ret;
  2509. ret = tb_port_read(port, &val, TB_CFG_PORT,
  2510. port->cap_adap + ADP_DP_CS_8, 1);
  2511. if (ret)
  2512. return ret;
  2513. if (!(val & ADP_DP_CS_8_DR))
  2514. return -ENODATA;
  2515. return (val & ADP_DP_CS_8_REQUESTED_BW_MASK) * granularity;
  2516. }
  2517. /**
  2518. * usb4_pci_port_set_ext_encapsulation() - Enable/disable extended encapsulation
  2519. * @port: PCIe adapter
  2520. * @enable: Enable/disable extended encapsulation
  2521. *
  2522. * Enables or disables extended encapsulation used in PCIe tunneling. Caller
  2523. * needs to make sure both adapters support this before enabling. Returns %0 on
  2524. * success and negative errno otherwise.
  2525. */
  2526. int usb4_pci_port_set_ext_encapsulation(struct tb_port *port, bool enable)
  2527. {
  2528. u32 val;
  2529. int ret;
  2530. if (!tb_port_is_pcie_up(port) && !tb_port_is_pcie_down(port))
  2531. return -EINVAL;
  2532. ret = tb_port_read(port, &val, TB_CFG_PORT,
  2533. port->cap_adap + ADP_PCIE_CS_1, 1);
  2534. if (ret)
  2535. return ret;
  2536. if (enable)
  2537. val |= ADP_PCIE_CS_1_EE;
  2538. else
  2539. val &= ~ADP_PCIE_CS_1_EE;
  2540. return tb_port_write(port, &val, TB_CFG_PORT,
  2541. port->cap_adap + ADP_PCIE_CS_1, 1);
  2542. }