test.c 87 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * KUnit tests
  4. *
  5. * Copyright (C) 2020, Intel Corporation
  6. * Author: Mika Westerberg <mika.westerberg@linux.intel.com>
  7. */
  8. #include <kunit/test.h>
  9. #include <linux/idr.h>
  10. #include "tb.h"
  11. #include "tunnel.h"
  12. static int __ida_init(struct kunit_resource *res, void *context)
  13. {
  14. struct ida *ida = context;
  15. ida_init(ida);
  16. res->data = ida;
  17. return 0;
  18. }
  19. static void __ida_destroy(struct kunit_resource *res)
  20. {
  21. struct ida *ida = res->data;
  22. ida_destroy(ida);
  23. }
  24. static void kunit_ida_init(struct kunit *test, struct ida *ida)
  25. {
  26. kunit_alloc_resource(test, __ida_init, __ida_destroy, GFP_KERNEL, ida);
  27. }
  28. static struct tb_switch *alloc_switch(struct kunit *test, u64 route,
  29. u8 upstream_port, u8 max_port_number)
  30. {
  31. struct tb_switch *sw;
  32. size_t size;
  33. int i;
  34. sw = kunit_kzalloc(test, sizeof(*sw), GFP_KERNEL);
  35. if (!sw)
  36. return NULL;
  37. sw->config.upstream_port_number = upstream_port;
  38. sw->config.depth = tb_route_length(route);
  39. sw->config.route_hi = upper_32_bits(route);
  40. sw->config.route_lo = lower_32_bits(route);
  41. sw->config.enabled = 0;
  42. sw->config.max_port_number = max_port_number;
  43. size = (sw->config.max_port_number + 1) * sizeof(*sw->ports);
  44. sw->ports = kunit_kzalloc(test, size, GFP_KERNEL);
  45. if (!sw->ports)
  46. return NULL;
  47. for (i = 0; i <= sw->config.max_port_number; i++) {
  48. sw->ports[i].sw = sw;
  49. sw->ports[i].port = i;
  50. sw->ports[i].config.port_number = i;
  51. if (i) {
  52. kunit_ida_init(test, &sw->ports[i].in_hopids);
  53. kunit_ida_init(test, &sw->ports[i].out_hopids);
  54. }
  55. }
  56. return sw;
  57. }
  58. static struct tb_switch *alloc_host(struct kunit *test)
  59. {
  60. struct tb_switch *sw;
  61. sw = alloc_switch(test, 0, 7, 13);
  62. if (!sw)
  63. return NULL;
  64. sw->config.vendor_id = 0x8086;
  65. sw->config.device_id = 0x9a1b;
  66. sw->ports[0].config.type = TB_TYPE_PORT;
  67. sw->ports[0].config.max_in_hop_id = 7;
  68. sw->ports[0].config.max_out_hop_id = 7;
  69. sw->ports[1].config.type = TB_TYPE_PORT;
  70. sw->ports[1].config.max_in_hop_id = 19;
  71. sw->ports[1].config.max_out_hop_id = 19;
  72. sw->ports[1].total_credits = 60;
  73. sw->ports[1].ctl_credits = 2;
  74. sw->ports[1].dual_link_port = &sw->ports[2];
  75. sw->ports[2].config.type = TB_TYPE_PORT;
  76. sw->ports[2].config.max_in_hop_id = 19;
  77. sw->ports[2].config.max_out_hop_id = 19;
  78. sw->ports[2].total_credits = 60;
  79. sw->ports[2].ctl_credits = 2;
  80. sw->ports[2].dual_link_port = &sw->ports[1];
  81. sw->ports[2].link_nr = 1;
  82. sw->ports[3].config.type = TB_TYPE_PORT;
  83. sw->ports[3].config.max_in_hop_id = 19;
  84. sw->ports[3].config.max_out_hop_id = 19;
  85. sw->ports[3].total_credits = 60;
  86. sw->ports[3].ctl_credits = 2;
  87. sw->ports[3].dual_link_port = &sw->ports[4];
  88. sw->ports[4].config.type = TB_TYPE_PORT;
  89. sw->ports[4].config.max_in_hop_id = 19;
  90. sw->ports[4].config.max_out_hop_id = 19;
  91. sw->ports[4].total_credits = 60;
  92. sw->ports[4].ctl_credits = 2;
  93. sw->ports[4].dual_link_port = &sw->ports[3];
  94. sw->ports[4].link_nr = 1;
  95. sw->ports[5].config.type = TB_TYPE_DP_HDMI_IN;
  96. sw->ports[5].config.max_in_hop_id = 9;
  97. sw->ports[5].config.max_out_hop_id = 9;
  98. sw->ports[5].cap_adap = -1;
  99. sw->ports[6].config.type = TB_TYPE_DP_HDMI_IN;
  100. sw->ports[6].config.max_in_hop_id = 9;
  101. sw->ports[6].config.max_out_hop_id = 9;
  102. sw->ports[6].cap_adap = -1;
  103. sw->ports[7].config.type = TB_TYPE_NHI;
  104. sw->ports[7].config.max_in_hop_id = 11;
  105. sw->ports[7].config.max_out_hop_id = 11;
  106. sw->ports[7].config.nfc_credits = 0x41800000;
  107. sw->ports[8].config.type = TB_TYPE_PCIE_DOWN;
  108. sw->ports[8].config.max_in_hop_id = 8;
  109. sw->ports[8].config.max_out_hop_id = 8;
  110. sw->ports[9].config.type = TB_TYPE_PCIE_DOWN;
  111. sw->ports[9].config.max_in_hop_id = 8;
  112. sw->ports[9].config.max_out_hop_id = 8;
  113. sw->ports[10].disabled = true;
  114. sw->ports[11].disabled = true;
  115. sw->ports[12].config.type = TB_TYPE_USB3_DOWN;
  116. sw->ports[12].config.max_in_hop_id = 8;
  117. sw->ports[12].config.max_out_hop_id = 8;
  118. sw->ports[13].config.type = TB_TYPE_USB3_DOWN;
  119. sw->ports[13].config.max_in_hop_id = 8;
  120. sw->ports[13].config.max_out_hop_id = 8;
  121. return sw;
  122. }
  123. static struct tb_switch *alloc_host_usb4(struct kunit *test)
  124. {
  125. struct tb_switch *sw;
  126. sw = alloc_host(test);
  127. if (!sw)
  128. return NULL;
  129. sw->generation = 4;
  130. sw->credit_allocation = true;
  131. sw->max_usb3_credits = 32;
  132. sw->min_dp_aux_credits = 1;
  133. sw->min_dp_main_credits = 0;
  134. sw->max_pcie_credits = 64;
  135. sw->max_dma_credits = 14;
  136. return sw;
  137. }
  138. static struct tb_switch *alloc_host_br(struct kunit *test)
  139. {
  140. struct tb_switch *sw;
  141. sw = alloc_host_usb4(test);
  142. if (!sw)
  143. return NULL;
  144. sw->ports[10].config.type = TB_TYPE_DP_HDMI_IN;
  145. sw->ports[10].config.max_in_hop_id = 9;
  146. sw->ports[10].config.max_out_hop_id = 9;
  147. sw->ports[10].cap_adap = -1;
  148. sw->ports[10].disabled = false;
  149. return sw;
  150. }
  151. static struct tb_switch *alloc_dev_default(struct kunit *test,
  152. struct tb_switch *parent,
  153. u64 route, bool bonded)
  154. {
  155. struct tb_port *port, *upstream_port;
  156. struct tb_switch *sw;
  157. sw = alloc_switch(test, route, 1, 19);
  158. if (!sw)
  159. return NULL;
  160. sw->config.vendor_id = 0x8086;
  161. sw->config.device_id = 0x15ef;
  162. sw->ports[0].config.type = TB_TYPE_PORT;
  163. sw->ports[0].config.max_in_hop_id = 8;
  164. sw->ports[0].config.max_out_hop_id = 8;
  165. sw->ports[1].config.type = TB_TYPE_PORT;
  166. sw->ports[1].config.max_in_hop_id = 19;
  167. sw->ports[1].config.max_out_hop_id = 19;
  168. sw->ports[1].total_credits = 60;
  169. sw->ports[1].ctl_credits = 2;
  170. sw->ports[1].dual_link_port = &sw->ports[2];
  171. sw->ports[2].config.type = TB_TYPE_PORT;
  172. sw->ports[2].config.max_in_hop_id = 19;
  173. sw->ports[2].config.max_out_hop_id = 19;
  174. sw->ports[2].total_credits = 60;
  175. sw->ports[2].ctl_credits = 2;
  176. sw->ports[2].dual_link_port = &sw->ports[1];
  177. sw->ports[2].link_nr = 1;
  178. sw->ports[3].config.type = TB_TYPE_PORT;
  179. sw->ports[3].config.max_in_hop_id = 19;
  180. sw->ports[3].config.max_out_hop_id = 19;
  181. sw->ports[3].total_credits = 60;
  182. sw->ports[3].ctl_credits = 2;
  183. sw->ports[3].dual_link_port = &sw->ports[4];
  184. sw->ports[4].config.type = TB_TYPE_PORT;
  185. sw->ports[4].config.max_in_hop_id = 19;
  186. sw->ports[4].config.max_out_hop_id = 19;
  187. sw->ports[4].total_credits = 60;
  188. sw->ports[4].ctl_credits = 2;
  189. sw->ports[4].dual_link_port = &sw->ports[3];
  190. sw->ports[4].link_nr = 1;
  191. sw->ports[5].config.type = TB_TYPE_PORT;
  192. sw->ports[5].config.max_in_hop_id = 19;
  193. sw->ports[5].config.max_out_hop_id = 19;
  194. sw->ports[5].total_credits = 60;
  195. sw->ports[5].ctl_credits = 2;
  196. sw->ports[5].dual_link_port = &sw->ports[6];
  197. sw->ports[6].config.type = TB_TYPE_PORT;
  198. sw->ports[6].config.max_in_hop_id = 19;
  199. sw->ports[6].config.max_out_hop_id = 19;
  200. sw->ports[6].total_credits = 60;
  201. sw->ports[6].ctl_credits = 2;
  202. sw->ports[6].dual_link_port = &sw->ports[5];
  203. sw->ports[6].link_nr = 1;
  204. sw->ports[7].config.type = TB_TYPE_PORT;
  205. sw->ports[7].config.max_in_hop_id = 19;
  206. sw->ports[7].config.max_out_hop_id = 19;
  207. sw->ports[7].total_credits = 60;
  208. sw->ports[7].ctl_credits = 2;
  209. sw->ports[7].dual_link_port = &sw->ports[8];
  210. sw->ports[8].config.type = TB_TYPE_PORT;
  211. sw->ports[8].config.max_in_hop_id = 19;
  212. sw->ports[8].config.max_out_hop_id = 19;
  213. sw->ports[8].total_credits = 60;
  214. sw->ports[8].ctl_credits = 2;
  215. sw->ports[8].dual_link_port = &sw->ports[7];
  216. sw->ports[8].link_nr = 1;
  217. sw->ports[9].config.type = TB_TYPE_PCIE_UP;
  218. sw->ports[9].config.max_in_hop_id = 8;
  219. sw->ports[9].config.max_out_hop_id = 8;
  220. sw->ports[10].config.type = TB_TYPE_PCIE_DOWN;
  221. sw->ports[10].config.max_in_hop_id = 8;
  222. sw->ports[10].config.max_out_hop_id = 8;
  223. sw->ports[11].config.type = TB_TYPE_PCIE_DOWN;
  224. sw->ports[11].config.max_in_hop_id = 8;
  225. sw->ports[11].config.max_out_hop_id = 8;
  226. sw->ports[12].config.type = TB_TYPE_PCIE_DOWN;
  227. sw->ports[12].config.max_in_hop_id = 8;
  228. sw->ports[12].config.max_out_hop_id = 8;
  229. sw->ports[13].config.type = TB_TYPE_DP_HDMI_OUT;
  230. sw->ports[13].config.max_in_hop_id = 9;
  231. sw->ports[13].config.max_out_hop_id = 9;
  232. sw->ports[13].cap_adap = -1;
  233. sw->ports[14].config.type = TB_TYPE_DP_HDMI_OUT;
  234. sw->ports[14].config.max_in_hop_id = 9;
  235. sw->ports[14].config.max_out_hop_id = 9;
  236. sw->ports[14].cap_adap = -1;
  237. sw->ports[15].disabled = true;
  238. sw->ports[16].config.type = TB_TYPE_USB3_UP;
  239. sw->ports[16].config.max_in_hop_id = 8;
  240. sw->ports[16].config.max_out_hop_id = 8;
  241. sw->ports[17].config.type = TB_TYPE_USB3_DOWN;
  242. sw->ports[17].config.max_in_hop_id = 8;
  243. sw->ports[17].config.max_out_hop_id = 8;
  244. sw->ports[18].config.type = TB_TYPE_USB3_DOWN;
  245. sw->ports[18].config.max_in_hop_id = 8;
  246. sw->ports[18].config.max_out_hop_id = 8;
  247. sw->ports[19].config.type = TB_TYPE_USB3_DOWN;
  248. sw->ports[19].config.max_in_hop_id = 8;
  249. sw->ports[19].config.max_out_hop_id = 8;
  250. if (!parent)
  251. return sw;
  252. /* Link them */
  253. upstream_port = tb_upstream_port(sw);
  254. port = tb_port_at(route, parent);
  255. port->remote = upstream_port;
  256. upstream_port->remote = port;
  257. if (port->dual_link_port && upstream_port->dual_link_port) {
  258. port->dual_link_port->remote = upstream_port->dual_link_port;
  259. upstream_port->dual_link_port->remote = port->dual_link_port;
  260. if (bonded) {
  261. /* Bonding is used */
  262. port->bonded = true;
  263. port->total_credits *= 2;
  264. port->dual_link_port->bonded = true;
  265. port->dual_link_port->total_credits = 0;
  266. upstream_port->bonded = true;
  267. upstream_port->total_credits *= 2;
  268. upstream_port->dual_link_port->bonded = true;
  269. upstream_port->dual_link_port->total_credits = 0;
  270. }
  271. }
  272. return sw;
  273. }
  274. static struct tb_switch *alloc_dev_with_dpin(struct kunit *test,
  275. struct tb_switch *parent,
  276. u64 route, bool bonded)
  277. {
  278. struct tb_switch *sw;
  279. sw = alloc_dev_default(test, parent, route, bonded);
  280. if (!sw)
  281. return NULL;
  282. sw->ports[13].config.type = TB_TYPE_DP_HDMI_IN;
  283. sw->ports[13].config.max_in_hop_id = 9;
  284. sw->ports[13].config.max_out_hop_id = 9;
  285. sw->ports[14].config.type = TB_TYPE_DP_HDMI_IN;
  286. sw->ports[14].config.max_in_hop_id = 9;
  287. sw->ports[14].config.max_out_hop_id = 9;
  288. return sw;
  289. }
  290. static struct tb_switch *alloc_dev_without_dp(struct kunit *test,
  291. struct tb_switch *parent,
  292. u64 route, bool bonded)
  293. {
  294. struct tb_switch *sw;
  295. int i;
  296. sw = alloc_dev_default(test, parent, route, bonded);
  297. if (!sw)
  298. return NULL;
  299. /*
  300. * Device with:
  301. * 2x USB4 Adapters (adapters 1,2 and 3,4),
  302. * 1x PCIe Upstream (adapter 9),
  303. * 1x PCIe Downstream (adapter 10),
  304. * 1x USB3 Upstream (adapter 16),
  305. * 1x USB3 Downstream (adapter 17)
  306. */
  307. for (i = 5; i <= 8; i++)
  308. sw->ports[i].disabled = true;
  309. for (i = 11; i <= 14; i++)
  310. sw->ports[i].disabled = true;
  311. sw->ports[13].cap_adap = 0;
  312. sw->ports[14].cap_adap = 0;
  313. for (i = 18; i <= 19; i++)
  314. sw->ports[i].disabled = true;
  315. sw->generation = 4;
  316. sw->credit_allocation = true;
  317. sw->max_usb3_credits = 109;
  318. sw->min_dp_aux_credits = 0;
  319. sw->min_dp_main_credits = 0;
  320. sw->max_pcie_credits = 30;
  321. sw->max_dma_credits = 1;
  322. return sw;
  323. }
  324. static struct tb_switch *alloc_dev_usb4(struct kunit *test,
  325. struct tb_switch *parent,
  326. u64 route, bool bonded)
  327. {
  328. struct tb_switch *sw;
  329. sw = alloc_dev_default(test, parent, route, bonded);
  330. if (!sw)
  331. return NULL;
  332. sw->generation = 4;
  333. sw->credit_allocation = true;
  334. sw->max_usb3_credits = 14;
  335. sw->min_dp_aux_credits = 1;
  336. sw->min_dp_main_credits = 18;
  337. sw->max_pcie_credits = 32;
  338. sw->max_dma_credits = 14;
  339. return sw;
  340. }
  341. static void tb_test_path_basic(struct kunit *test)
  342. {
  343. struct tb_port *src_port, *dst_port, *p;
  344. struct tb_switch *host;
  345. host = alloc_host(test);
  346. src_port = &host->ports[5];
  347. dst_port = src_port;
  348. p = tb_next_port_on_path(src_port, dst_port, NULL);
  349. KUNIT_EXPECT_PTR_EQ(test, p, dst_port);
  350. p = tb_next_port_on_path(src_port, dst_port, p);
  351. KUNIT_EXPECT_TRUE(test, !p);
  352. }
  353. static void tb_test_path_not_connected_walk(struct kunit *test)
  354. {
  355. struct tb_port *src_port, *dst_port, *p;
  356. struct tb_switch *host, *dev;
  357. host = alloc_host(test);
  358. /* No connection between host and dev */
  359. dev = alloc_dev_default(test, NULL, 3, true);
  360. src_port = &host->ports[12];
  361. dst_port = &dev->ports[16];
  362. p = tb_next_port_on_path(src_port, dst_port, NULL);
  363. KUNIT_EXPECT_PTR_EQ(test, p, src_port);
  364. p = tb_next_port_on_path(src_port, dst_port, p);
  365. KUNIT_EXPECT_PTR_EQ(test, p, &host->ports[3]);
  366. p = tb_next_port_on_path(src_port, dst_port, p);
  367. KUNIT_EXPECT_TRUE(test, !p);
  368. /* Other direction */
  369. p = tb_next_port_on_path(dst_port, src_port, NULL);
  370. KUNIT_EXPECT_PTR_EQ(test, p, dst_port);
  371. p = tb_next_port_on_path(dst_port, src_port, p);
  372. KUNIT_EXPECT_PTR_EQ(test, p, &dev->ports[1]);
  373. p = tb_next_port_on_path(dst_port, src_port, p);
  374. KUNIT_EXPECT_TRUE(test, !p);
  375. }
  376. struct port_expectation {
  377. u64 route;
  378. u8 port;
  379. enum tb_port_type type;
  380. };
  381. static void tb_test_path_single_hop_walk(struct kunit *test)
  382. {
  383. /*
  384. * Walks from Host PCIe downstream port to Device #1 PCIe
  385. * upstream port.
  386. *
  387. * [Host]
  388. * 1 |
  389. * 1 |
  390. * [Device]
  391. */
  392. static const struct port_expectation test_data[] = {
  393. { .route = 0x0, .port = 8, .type = TB_TYPE_PCIE_DOWN },
  394. { .route = 0x0, .port = 1, .type = TB_TYPE_PORT },
  395. { .route = 0x1, .port = 1, .type = TB_TYPE_PORT },
  396. { .route = 0x1, .port = 9, .type = TB_TYPE_PCIE_UP },
  397. };
  398. struct tb_port *src_port, *dst_port, *p;
  399. struct tb_switch *host, *dev;
  400. int i;
  401. host = alloc_host(test);
  402. dev = alloc_dev_default(test, host, 1, true);
  403. src_port = &host->ports[8];
  404. dst_port = &dev->ports[9];
  405. /* Walk both directions */
  406. i = 0;
  407. tb_for_each_port_on_path(src_port, dst_port, p) {
  408. KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data));
  409. KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route);
  410. KUNIT_EXPECT_EQ(test, p->port, test_data[i].port);
  411. KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type,
  412. test_data[i].type);
  413. i++;
  414. }
  415. KUNIT_EXPECT_EQ(test, i, ARRAY_SIZE(test_data));
  416. i = ARRAY_SIZE(test_data) - 1;
  417. tb_for_each_port_on_path(dst_port, src_port, p) {
  418. KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data));
  419. KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route);
  420. KUNIT_EXPECT_EQ(test, p->port, test_data[i].port);
  421. KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type,
  422. test_data[i].type);
  423. i--;
  424. }
  425. KUNIT_EXPECT_EQ(test, i, -1);
  426. }
  427. static void tb_test_path_daisy_chain_walk(struct kunit *test)
  428. {
  429. /*
  430. * Walks from Host DP IN to Device #2 DP OUT.
  431. *
  432. * [Host]
  433. * 1 |
  434. * 1 |
  435. * [Device #1]
  436. * 3 /
  437. * 1 /
  438. * [Device #2]
  439. */
  440. static const struct port_expectation test_data[] = {
  441. { .route = 0x0, .port = 5, .type = TB_TYPE_DP_HDMI_IN },
  442. { .route = 0x0, .port = 1, .type = TB_TYPE_PORT },
  443. { .route = 0x1, .port = 1, .type = TB_TYPE_PORT },
  444. { .route = 0x1, .port = 3, .type = TB_TYPE_PORT },
  445. { .route = 0x301, .port = 1, .type = TB_TYPE_PORT },
  446. { .route = 0x301, .port = 13, .type = TB_TYPE_DP_HDMI_OUT },
  447. };
  448. struct tb_port *src_port, *dst_port, *p;
  449. struct tb_switch *host, *dev1, *dev2;
  450. int i;
  451. host = alloc_host(test);
  452. dev1 = alloc_dev_default(test, host, 0x1, true);
  453. dev2 = alloc_dev_default(test, dev1, 0x301, true);
  454. src_port = &host->ports[5];
  455. dst_port = &dev2->ports[13];
  456. /* Walk both directions */
  457. i = 0;
  458. tb_for_each_port_on_path(src_port, dst_port, p) {
  459. KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data));
  460. KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route);
  461. KUNIT_EXPECT_EQ(test, p->port, test_data[i].port);
  462. KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type,
  463. test_data[i].type);
  464. i++;
  465. }
  466. KUNIT_EXPECT_EQ(test, i, ARRAY_SIZE(test_data));
  467. i = ARRAY_SIZE(test_data) - 1;
  468. tb_for_each_port_on_path(dst_port, src_port, p) {
  469. KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data));
  470. KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route);
  471. KUNIT_EXPECT_EQ(test, p->port, test_data[i].port);
  472. KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type,
  473. test_data[i].type);
  474. i--;
  475. }
  476. KUNIT_EXPECT_EQ(test, i, -1);
  477. }
  478. static void tb_test_path_simple_tree_walk(struct kunit *test)
  479. {
  480. /*
  481. * Walks from Host DP IN to Device #3 DP OUT.
  482. *
  483. * [Host]
  484. * 1 |
  485. * 1 |
  486. * [Device #1]
  487. * 3 / | 5 \ 7
  488. * 1 / | \ 1
  489. * [Device #2] | [Device #4]
  490. * | 1
  491. * [Device #3]
  492. */
  493. static const struct port_expectation test_data[] = {
  494. { .route = 0x0, .port = 5, .type = TB_TYPE_DP_HDMI_IN },
  495. { .route = 0x0, .port = 1, .type = TB_TYPE_PORT },
  496. { .route = 0x1, .port = 1, .type = TB_TYPE_PORT },
  497. { .route = 0x1, .port = 5, .type = TB_TYPE_PORT },
  498. { .route = 0x501, .port = 1, .type = TB_TYPE_PORT },
  499. { .route = 0x501, .port = 13, .type = TB_TYPE_DP_HDMI_OUT },
  500. };
  501. struct tb_port *src_port, *dst_port, *p;
  502. struct tb_switch *host, *dev1, *dev3;
  503. int i;
  504. host = alloc_host(test);
  505. dev1 = alloc_dev_default(test, host, 0x1, true);
  506. alloc_dev_default(test, dev1, 0x301, true);
  507. dev3 = alloc_dev_default(test, dev1, 0x501, true);
  508. alloc_dev_default(test, dev1, 0x701, true);
  509. src_port = &host->ports[5];
  510. dst_port = &dev3->ports[13];
  511. /* Walk both directions */
  512. i = 0;
  513. tb_for_each_port_on_path(src_port, dst_port, p) {
  514. KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data));
  515. KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route);
  516. KUNIT_EXPECT_EQ(test, p->port, test_data[i].port);
  517. KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type,
  518. test_data[i].type);
  519. i++;
  520. }
  521. KUNIT_EXPECT_EQ(test, i, ARRAY_SIZE(test_data));
  522. i = ARRAY_SIZE(test_data) - 1;
  523. tb_for_each_port_on_path(dst_port, src_port, p) {
  524. KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data));
  525. KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route);
  526. KUNIT_EXPECT_EQ(test, p->port, test_data[i].port);
  527. KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type,
  528. test_data[i].type);
  529. i--;
  530. }
  531. KUNIT_EXPECT_EQ(test, i, -1);
  532. }
  533. static void tb_test_path_complex_tree_walk(struct kunit *test)
  534. {
  535. /*
  536. * Walks from Device #3 DP IN to Device #9 DP OUT.
  537. *
  538. * [Host]
  539. * 1 |
  540. * 1 |
  541. * [Device #1]
  542. * 3 / | 5 \ 7
  543. * 1 / | \ 1
  544. * [Device #2] | [Device #5]
  545. * 5 | | 1 \ 7
  546. * 1 | [Device #4] \ 1
  547. * [Device #3] [Device #6]
  548. * 3 /
  549. * 1 /
  550. * [Device #7]
  551. * 3 / | 5
  552. * 1 / |
  553. * [Device #8] | 1
  554. * [Device #9]
  555. */
  556. static const struct port_expectation test_data[] = {
  557. { .route = 0x50301, .port = 13, .type = TB_TYPE_DP_HDMI_IN },
  558. { .route = 0x50301, .port = 1, .type = TB_TYPE_PORT },
  559. { .route = 0x301, .port = 5, .type = TB_TYPE_PORT },
  560. { .route = 0x301, .port = 1, .type = TB_TYPE_PORT },
  561. { .route = 0x1, .port = 3, .type = TB_TYPE_PORT },
  562. { .route = 0x1, .port = 7, .type = TB_TYPE_PORT },
  563. { .route = 0x701, .port = 1, .type = TB_TYPE_PORT },
  564. { .route = 0x701, .port = 7, .type = TB_TYPE_PORT },
  565. { .route = 0x70701, .port = 1, .type = TB_TYPE_PORT },
  566. { .route = 0x70701, .port = 3, .type = TB_TYPE_PORT },
  567. { .route = 0x3070701, .port = 1, .type = TB_TYPE_PORT },
  568. { .route = 0x3070701, .port = 5, .type = TB_TYPE_PORT },
  569. { .route = 0x503070701, .port = 1, .type = TB_TYPE_PORT },
  570. { .route = 0x503070701, .port = 14, .type = TB_TYPE_DP_HDMI_OUT },
  571. };
  572. struct tb_switch *host, *dev1, *dev2, *dev3, *dev5, *dev6, *dev7, *dev9;
  573. struct tb_port *src_port, *dst_port, *p;
  574. int i;
  575. host = alloc_host(test);
  576. dev1 = alloc_dev_default(test, host, 0x1, true);
  577. dev2 = alloc_dev_default(test, dev1, 0x301, true);
  578. dev3 = alloc_dev_with_dpin(test, dev2, 0x50301, true);
  579. alloc_dev_default(test, dev1, 0x501, true);
  580. dev5 = alloc_dev_default(test, dev1, 0x701, true);
  581. dev6 = alloc_dev_default(test, dev5, 0x70701, true);
  582. dev7 = alloc_dev_default(test, dev6, 0x3070701, true);
  583. alloc_dev_default(test, dev7, 0x303070701, true);
  584. dev9 = alloc_dev_default(test, dev7, 0x503070701, true);
  585. src_port = &dev3->ports[13];
  586. dst_port = &dev9->ports[14];
  587. /* Walk both directions */
  588. i = 0;
  589. tb_for_each_port_on_path(src_port, dst_port, p) {
  590. KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data));
  591. KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route);
  592. KUNIT_EXPECT_EQ(test, p->port, test_data[i].port);
  593. KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type,
  594. test_data[i].type);
  595. i++;
  596. }
  597. KUNIT_EXPECT_EQ(test, i, ARRAY_SIZE(test_data));
  598. i = ARRAY_SIZE(test_data) - 1;
  599. tb_for_each_port_on_path(dst_port, src_port, p) {
  600. KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data));
  601. KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route);
  602. KUNIT_EXPECT_EQ(test, p->port, test_data[i].port);
  603. KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type,
  604. test_data[i].type);
  605. i--;
  606. }
  607. KUNIT_EXPECT_EQ(test, i, -1);
  608. }
  609. static void tb_test_path_max_length_walk(struct kunit *test)
  610. {
  611. struct tb_switch *host, *dev1, *dev2, *dev3, *dev4, *dev5, *dev6;
  612. struct tb_switch *dev7, *dev8, *dev9, *dev10, *dev11, *dev12;
  613. struct tb_port *src_port, *dst_port, *p;
  614. int i;
  615. /*
  616. * Walks from Device #6 DP IN to Device #12 DP OUT.
  617. *
  618. * [Host]
  619. * 1 / \ 3
  620. * 1 / \ 1
  621. * [Device #1] [Device #7]
  622. * 3 | | 3
  623. * 1 | | 1
  624. * [Device #2] [Device #8]
  625. * 3 | | 3
  626. * 1 | | 1
  627. * [Device #3] [Device #9]
  628. * 3 | | 3
  629. * 1 | | 1
  630. * [Device #4] [Device #10]
  631. * 3 | | 3
  632. * 1 | | 1
  633. * [Device #5] [Device #11]
  634. * 3 | | 3
  635. * 1 | | 1
  636. * [Device #6] [Device #12]
  637. */
  638. static const struct port_expectation test_data[] = {
  639. { .route = 0x30303030301, .port = 13, .type = TB_TYPE_DP_HDMI_IN },
  640. { .route = 0x30303030301, .port = 1, .type = TB_TYPE_PORT },
  641. { .route = 0x303030301, .port = 3, .type = TB_TYPE_PORT },
  642. { .route = 0x303030301, .port = 1, .type = TB_TYPE_PORT },
  643. { .route = 0x3030301, .port = 3, .type = TB_TYPE_PORT },
  644. { .route = 0x3030301, .port = 1, .type = TB_TYPE_PORT },
  645. { .route = 0x30301, .port = 3, .type = TB_TYPE_PORT },
  646. { .route = 0x30301, .port = 1, .type = TB_TYPE_PORT },
  647. { .route = 0x301, .port = 3, .type = TB_TYPE_PORT },
  648. { .route = 0x301, .port = 1, .type = TB_TYPE_PORT },
  649. { .route = 0x1, .port = 3, .type = TB_TYPE_PORT },
  650. { .route = 0x1, .port = 1, .type = TB_TYPE_PORT },
  651. { .route = 0x0, .port = 1, .type = TB_TYPE_PORT },
  652. { .route = 0x0, .port = 3, .type = TB_TYPE_PORT },
  653. { .route = 0x3, .port = 1, .type = TB_TYPE_PORT },
  654. { .route = 0x3, .port = 3, .type = TB_TYPE_PORT },
  655. { .route = 0x303, .port = 1, .type = TB_TYPE_PORT },
  656. { .route = 0x303, .port = 3, .type = TB_TYPE_PORT },
  657. { .route = 0x30303, .port = 1, .type = TB_TYPE_PORT },
  658. { .route = 0x30303, .port = 3, .type = TB_TYPE_PORT },
  659. { .route = 0x3030303, .port = 1, .type = TB_TYPE_PORT },
  660. { .route = 0x3030303, .port = 3, .type = TB_TYPE_PORT },
  661. { .route = 0x303030303, .port = 1, .type = TB_TYPE_PORT },
  662. { .route = 0x303030303, .port = 3, .type = TB_TYPE_PORT },
  663. { .route = 0x30303030303, .port = 1, .type = TB_TYPE_PORT },
  664. { .route = 0x30303030303, .port = 13, .type = TB_TYPE_DP_HDMI_OUT },
  665. };
  666. host = alloc_host(test);
  667. dev1 = alloc_dev_default(test, host, 0x1, true);
  668. dev2 = alloc_dev_default(test, dev1, 0x301, true);
  669. dev3 = alloc_dev_default(test, dev2, 0x30301, true);
  670. dev4 = alloc_dev_default(test, dev3, 0x3030301, true);
  671. dev5 = alloc_dev_default(test, dev4, 0x303030301, true);
  672. dev6 = alloc_dev_with_dpin(test, dev5, 0x30303030301, true);
  673. dev7 = alloc_dev_default(test, host, 0x3, true);
  674. dev8 = alloc_dev_default(test, dev7, 0x303, true);
  675. dev9 = alloc_dev_default(test, dev8, 0x30303, true);
  676. dev10 = alloc_dev_default(test, dev9, 0x3030303, true);
  677. dev11 = alloc_dev_default(test, dev10, 0x303030303, true);
  678. dev12 = alloc_dev_default(test, dev11, 0x30303030303, true);
  679. src_port = &dev6->ports[13];
  680. dst_port = &dev12->ports[13];
  681. /* Walk both directions */
  682. i = 0;
  683. tb_for_each_port_on_path(src_port, dst_port, p) {
  684. KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data));
  685. KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route);
  686. KUNIT_EXPECT_EQ(test, p->port, test_data[i].port);
  687. KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type,
  688. test_data[i].type);
  689. i++;
  690. }
  691. KUNIT_EXPECT_EQ(test, i, ARRAY_SIZE(test_data));
  692. i = ARRAY_SIZE(test_data) - 1;
  693. tb_for_each_port_on_path(dst_port, src_port, p) {
  694. KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data));
  695. KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route);
  696. KUNIT_EXPECT_EQ(test, p->port, test_data[i].port);
  697. KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type,
  698. test_data[i].type);
  699. i--;
  700. }
  701. KUNIT_EXPECT_EQ(test, i, -1);
  702. }
  703. static void tb_test_path_not_connected(struct kunit *test)
  704. {
  705. struct tb_switch *host, *dev1, *dev2;
  706. struct tb_port *down, *up;
  707. struct tb_path *path;
  708. host = alloc_host(test);
  709. dev1 = alloc_dev_default(test, host, 0x3, false);
  710. /* Not connected to anything */
  711. dev2 = alloc_dev_default(test, NULL, 0x303, false);
  712. down = &dev1->ports[10];
  713. up = &dev2->ports[9];
  714. path = tb_path_alloc(NULL, down, 8, up, 8, 0, "PCIe Down");
  715. KUNIT_ASSERT_NULL(test, path);
  716. path = tb_path_alloc(NULL, down, 8, up, 8, 1, "PCIe Down");
  717. KUNIT_ASSERT_NULL(test, path);
  718. }
  719. struct hop_expectation {
  720. u64 route;
  721. u8 in_port;
  722. enum tb_port_type in_type;
  723. u8 out_port;
  724. enum tb_port_type out_type;
  725. };
  726. static void tb_test_path_not_bonded_lane0(struct kunit *test)
  727. {
  728. /*
  729. * PCIe path from host to device using lane 0.
  730. *
  731. * [Host]
  732. * 3 |: 4
  733. * 1 |: 2
  734. * [Device]
  735. */
  736. static const struct hop_expectation test_data[] = {
  737. {
  738. .route = 0x0,
  739. .in_port = 9,
  740. .in_type = TB_TYPE_PCIE_DOWN,
  741. .out_port = 3,
  742. .out_type = TB_TYPE_PORT,
  743. },
  744. {
  745. .route = 0x3,
  746. .in_port = 1,
  747. .in_type = TB_TYPE_PORT,
  748. .out_port = 9,
  749. .out_type = TB_TYPE_PCIE_UP,
  750. },
  751. };
  752. struct tb_switch *host, *dev;
  753. struct tb_port *down, *up;
  754. struct tb_path *path;
  755. int i;
  756. host = alloc_host(test);
  757. dev = alloc_dev_default(test, host, 0x3, false);
  758. down = &host->ports[9];
  759. up = &dev->ports[9];
  760. path = tb_path_alloc(NULL, down, 8, up, 8, 0, "PCIe Down");
  761. KUNIT_ASSERT_NOT_NULL(test, path);
  762. KUNIT_ASSERT_EQ(test, path->path_length, ARRAY_SIZE(test_data));
  763. for (i = 0; i < ARRAY_SIZE(test_data); i++) {
  764. const struct tb_port *in_port, *out_port;
  765. in_port = path->hops[i].in_port;
  766. out_port = path->hops[i].out_port;
  767. KUNIT_EXPECT_EQ(test, tb_route(in_port->sw), test_data[i].route);
  768. KUNIT_EXPECT_EQ(test, in_port->port, test_data[i].in_port);
  769. KUNIT_EXPECT_EQ(test, (enum tb_port_type)in_port->config.type,
  770. test_data[i].in_type);
  771. KUNIT_EXPECT_EQ(test, tb_route(out_port->sw), test_data[i].route);
  772. KUNIT_EXPECT_EQ(test, out_port->port, test_data[i].out_port);
  773. KUNIT_EXPECT_EQ(test, (enum tb_port_type)out_port->config.type,
  774. test_data[i].out_type);
  775. }
  776. tb_path_free(path);
  777. }
  778. static void tb_test_path_not_bonded_lane1(struct kunit *test)
  779. {
  780. /*
  781. * DP Video path from host to device using lane 1. Paths like
  782. * these are only used with Thunderbolt 1 devices where lane
  783. * bonding is not possible. USB4 specifically does not allow
  784. * paths like this (you either use lane 0 where lane 1 is
  785. * disabled or both lanes are bonded).
  786. *
  787. * [Host]
  788. * 1 :| 2
  789. * 1 :| 2
  790. * [Device]
  791. */
  792. static const struct hop_expectation test_data[] = {
  793. {
  794. .route = 0x0,
  795. .in_port = 5,
  796. .in_type = TB_TYPE_DP_HDMI_IN,
  797. .out_port = 2,
  798. .out_type = TB_TYPE_PORT,
  799. },
  800. {
  801. .route = 0x1,
  802. .in_port = 2,
  803. .in_type = TB_TYPE_PORT,
  804. .out_port = 13,
  805. .out_type = TB_TYPE_DP_HDMI_OUT,
  806. },
  807. };
  808. struct tb_switch *host, *dev;
  809. struct tb_port *in, *out;
  810. struct tb_path *path;
  811. int i;
  812. host = alloc_host(test);
  813. dev = alloc_dev_default(test, host, 0x1, false);
  814. in = &host->ports[5];
  815. out = &dev->ports[13];
  816. path = tb_path_alloc(NULL, in, 9, out, 9, 1, "Video");
  817. KUNIT_ASSERT_NOT_NULL(test, path);
  818. KUNIT_ASSERT_EQ(test, path->path_length, ARRAY_SIZE(test_data));
  819. for (i = 0; i < ARRAY_SIZE(test_data); i++) {
  820. const struct tb_port *in_port, *out_port;
  821. in_port = path->hops[i].in_port;
  822. out_port = path->hops[i].out_port;
  823. KUNIT_EXPECT_EQ(test, tb_route(in_port->sw), test_data[i].route);
  824. KUNIT_EXPECT_EQ(test, in_port->port, test_data[i].in_port);
  825. KUNIT_EXPECT_EQ(test, (enum tb_port_type)in_port->config.type,
  826. test_data[i].in_type);
  827. KUNIT_EXPECT_EQ(test, tb_route(out_port->sw), test_data[i].route);
  828. KUNIT_EXPECT_EQ(test, out_port->port, test_data[i].out_port);
  829. KUNIT_EXPECT_EQ(test, (enum tb_port_type)out_port->config.type,
  830. test_data[i].out_type);
  831. }
  832. tb_path_free(path);
  833. }
  834. static void tb_test_path_not_bonded_lane1_chain(struct kunit *test)
  835. {
  836. /*
  837. * DP Video path from host to device 3 using lane 1.
  838. *
  839. * [Host]
  840. * 1 :| 2
  841. * 1 :| 2
  842. * [Device #1]
  843. * 7 :| 8
  844. * 1 :| 2
  845. * [Device #2]
  846. * 5 :| 6
  847. * 1 :| 2
  848. * [Device #3]
  849. */
  850. static const struct hop_expectation test_data[] = {
  851. {
  852. .route = 0x0,
  853. .in_port = 5,
  854. .in_type = TB_TYPE_DP_HDMI_IN,
  855. .out_port = 2,
  856. .out_type = TB_TYPE_PORT,
  857. },
  858. {
  859. .route = 0x1,
  860. .in_port = 2,
  861. .in_type = TB_TYPE_PORT,
  862. .out_port = 8,
  863. .out_type = TB_TYPE_PORT,
  864. },
  865. {
  866. .route = 0x701,
  867. .in_port = 2,
  868. .in_type = TB_TYPE_PORT,
  869. .out_port = 6,
  870. .out_type = TB_TYPE_PORT,
  871. },
  872. {
  873. .route = 0x50701,
  874. .in_port = 2,
  875. .in_type = TB_TYPE_PORT,
  876. .out_port = 13,
  877. .out_type = TB_TYPE_DP_HDMI_OUT,
  878. },
  879. };
  880. struct tb_switch *host, *dev1, *dev2, *dev3;
  881. struct tb_port *in, *out;
  882. struct tb_path *path;
  883. int i;
  884. host = alloc_host(test);
  885. dev1 = alloc_dev_default(test, host, 0x1, false);
  886. dev2 = alloc_dev_default(test, dev1, 0x701, false);
  887. dev3 = alloc_dev_default(test, dev2, 0x50701, false);
  888. in = &host->ports[5];
  889. out = &dev3->ports[13];
  890. path = tb_path_alloc(NULL, in, 9, out, 9, 1, "Video");
  891. KUNIT_ASSERT_NOT_NULL(test, path);
  892. KUNIT_ASSERT_EQ(test, path->path_length, ARRAY_SIZE(test_data));
  893. for (i = 0; i < ARRAY_SIZE(test_data); i++) {
  894. const struct tb_port *in_port, *out_port;
  895. in_port = path->hops[i].in_port;
  896. out_port = path->hops[i].out_port;
  897. KUNIT_EXPECT_EQ(test, tb_route(in_port->sw), test_data[i].route);
  898. KUNIT_EXPECT_EQ(test, in_port->port, test_data[i].in_port);
  899. KUNIT_EXPECT_EQ(test, (enum tb_port_type)in_port->config.type,
  900. test_data[i].in_type);
  901. KUNIT_EXPECT_EQ(test, tb_route(out_port->sw), test_data[i].route);
  902. KUNIT_EXPECT_EQ(test, out_port->port, test_data[i].out_port);
  903. KUNIT_EXPECT_EQ(test, (enum tb_port_type)out_port->config.type,
  904. test_data[i].out_type);
  905. }
  906. tb_path_free(path);
  907. }
  908. static void tb_test_path_not_bonded_lane1_chain_reverse(struct kunit *test)
  909. {
  910. /*
  911. * DP Video path from device 3 to host using lane 1.
  912. *
  913. * [Host]
  914. * 1 :| 2
  915. * 1 :| 2
  916. * [Device #1]
  917. * 7 :| 8
  918. * 1 :| 2
  919. * [Device #2]
  920. * 5 :| 6
  921. * 1 :| 2
  922. * [Device #3]
  923. */
  924. static const struct hop_expectation test_data[] = {
  925. {
  926. .route = 0x50701,
  927. .in_port = 13,
  928. .in_type = TB_TYPE_DP_HDMI_IN,
  929. .out_port = 2,
  930. .out_type = TB_TYPE_PORT,
  931. },
  932. {
  933. .route = 0x701,
  934. .in_port = 6,
  935. .in_type = TB_TYPE_PORT,
  936. .out_port = 2,
  937. .out_type = TB_TYPE_PORT,
  938. },
  939. {
  940. .route = 0x1,
  941. .in_port = 8,
  942. .in_type = TB_TYPE_PORT,
  943. .out_port = 2,
  944. .out_type = TB_TYPE_PORT,
  945. },
  946. {
  947. .route = 0x0,
  948. .in_port = 2,
  949. .in_type = TB_TYPE_PORT,
  950. .out_port = 5,
  951. .out_type = TB_TYPE_DP_HDMI_IN,
  952. },
  953. };
  954. struct tb_switch *host, *dev1, *dev2, *dev3;
  955. struct tb_port *in, *out;
  956. struct tb_path *path;
  957. int i;
  958. host = alloc_host(test);
  959. dev1 = alloc_dev_default(test, host, 0x1, false);
  960. dev2 = alloc_dev_default(test, dev1, 0x701, false);
  961. dev3 = alloc_dev_with_dpin(test, dev2, 0x50701, false);
  962. in = &dev3->ports[13];
  963. out = &host->ports[5];
  964. path = tb_path_alloc(NULL, in, 9, out, 9, 1, "Video");
  965. KUNIT_ASSERT_NOT_NULL(test, path);
  966. KUNIT_ASSERT_EQ(test, path->path_length, ARRAY_SIZE(test_data));
  967. for (i = 0; i < ARRAY_SIZE(test_data); i++) {
  968. const struct tb_port *in_port, *out_port;
  969. in_port = path->hops[i].in_port;
  970. out_port = path->hops[i].out_port;
  971. KUNIT_EXPECT_EQ(test, tb_route(in_port->sw), test_data[i].route);
  972. KUNIT_EXPECT_EQ(test, in_port->port, test_data[i].in_port);
  973. KUNIT_EXPECT_EQ(test, (enum tb_port_type)in_port->config.type,
  974. test_data[i].in_type);
  975. KUNIT_EXPECT_EQ(test, tb_route(out_port->sw), test_data[i].route);
  976. KUNIT_EXPECT_EQ(test, out_port->port, test_data[i].out_port);
  977. KUNIT_EXPECT_EQ(test, (enum tb_port_type)out_port->config.type,
  978. test_data[i].out_type);
  979. }
  980. tb_path_free(path);
  981. }
  982. static void tb_test_path_mixed_chain(struct kunit *test)
  983. {
  984. /*
  985. * DP Video path from host to device 4 where first and last link
  986. * is bonded.
  987. *
  988. * [Host]
  989. * 1 |
  990. * 1 |
  991. * [Device #1]
  992. * 7 :| 8
  993. * 1 :| 2
  994. * [Device #2]
  995. * 5 :| 6
  996. * 1 :| 2
  997. * [Device #3]
  998. * 3 |
  999. * 1 |
  1000. * [Device #4]
  1001. */
  1002. static const struct hop_expectation test_data[] = {
  1003. {
  1004. .route = 0x0,
  1005. .in_port = 5,
  1006. .in_type = TB_TYPE_DP_HDMI_IN,
  1007. .out_port = 1,
  1008. .out_type = TB_TYPE_PORT,
  1009. },
  1010. {
  1011. .route = 0x1,
  1012. .in_port = 1,
  1013. .in_type = TB_TYPE_PORT,
  1014. .out_port = 8,
  1015. .out_type = TB_TYPE_PORT,
  1016. },
  1017. {
  1018. .route = 0x701,
  1019. .in_port = 2,
  1020. .in_type = TB_TYPE_PORT,
  1021. .out_port = 6,
  1022. .out_type = TB_TYPE_PORT,
  1023. },
  1024. {
  1025. .route = 0x50701,
  1026. .in_port = 2,
  1027. .in_type = TB_TYPE_PORT,
  1028. .out_port = 3,
  1029. .out_type = TB_TYPE_PORT,
  1030. },
  1031. {
  1032. .route = 0x3050701,
  1033. .in_port = 1,
  1034. .in_type = TB_TYPE_PORT,
  1035. .out_port = 13,
  1036. .out_type = TB_TYPE_DP_HDMI_OUT,
  1037. },
  1038. };
  1039. struct tb_switch *host, *dev1, *dev2, *dev3, *dev4;
  1040. struct tb_port *in, *out;
  1041. struct tb_path *path;
  1042. int i;
  1043. host = alloc_host(test);
  1044. dev1 = alloc_dev_default(test, host, 0x1, true);
  1045. dev2 = alloc_dev_default(test, dev1, 0x701, false);
  1046. dev3 = alloc_dev_default(test, dev2, 0x50701, false);
  1047. dev4 = alloc_dev_default(test, dev3, 0x3050701, true);
  1048. in = &host->ports[5];
  1049. out = &dev4->ports[13];
  1050. path = tb_path_alloc(NULL, in, 9, out, 9, 1, "Video");
  1051. KUNIT_ASSERT_NOT_NULL(test, path);
  1052. KUNIT_ASSERT_EQ(test, path->path_length, ARRAY_SIZE(test_data));
  1053. for (i = 0; i < ARRAY_SIZE(test_data); i++) {
  1054. const struct tb_port *in_port, *out_port;
  1055. in_port = path->hops[i].in_port;
  1056. out_port = path->hops[i].out_port;
  1057. KUNIT_EXPECT_EQ(test, tb_route(in_port->sw), test_data[i].route);
  1058. KUNIT_EXPECT_EQ(test, in_port->port, test_data[i].in_port);
  1059. KUNIT_EXPECT_EQ(test, (enum tb_port_type)in_port->config.type,
  1060. test_data[i].in_type);
  1061. KUNIT_EXPECT_EQ(test, tb_route(out_port->sw), test_data[i].route);
  1062. KUNIT_EXPECT_EQ(test, out_port->port, test_data[i].out_port);
  1063. KUNIT_EXPECT_EQ(test, (enum tb_port_type)out_port->config.type,
  1064. test_data[i].out_type);
  1065. }
  1066. tb_path_free(path);
  1067. }
  1068. static void tb_test_path_mixed_chain_reverse(struct kunit *test)
  1069. {
  1070. /*
  1071. * DP Video path from device 4 to host where first and last link
  1072. * is bonded.
  1073. *
  1074. * [Host]
  1075. * 1 |
  1076. * 1 |
  1077. * [Device #1]
  1078. * 7 :| 8
  1079. * 1 :| 2
  1080. * [Device #2]
  1081. * 5 :| 6
  1082. * 1 :| 2
  1083. * [Device #3]
  1084. * 3 |
  1085. * 1 |
  1086. * [Device #4]
  1087. */
  1088. static const struct hop_expectation test_data[] = {
  1089. {
  1090. .route = 0x3050701,
  1091. .in_port = 13,
  1092. .in_type = TB_TYPE_DP_HDMI_OUT,
  1093. .out_port = 1,
  1094. .out_type = TB_TYPE_PORT,
  1095. },
  1096. {
  1097. .route = 0x50701,
  1098. .in_port = 3,
  1099. .in_type = TB_TYPE_PORT,
  1100. .out_port = 2,
  1101. .out_type = TB_TYPE_PORT,
  1102. },
  1103. {
  1104. .route = 0x701,
  1105. .in_port = 6,
  1106. .in_type = TB_TYPE_PORT,
  1107. .out_port = 2,
  1108. .out_type = TB_TYPE_PORT,
  1109. },
  1110. {
  1111. .route = 0x1,
  1112. .in_port = 8,
  1113. .in_type = TB_TYPE_PORT,
  1114. .out_port = 1,
  1115. .out_type = TB_TYPE_PORT,
  1116. },
  1117. {
  1118. .route = 0x0,
  1119. .in_port = 1,
  1120. .in_type = TB_TYPE_PORT,
  1121. .out_port = 5,
  1122. .out_type = TB_TYPE_DP_HDMI_IN,
  1123. },
  1124. };
  1125. struct tb_switch *host, *dev1, *dev2, *dev3, *dev4;
  1126. struct tb_port *in, *out;
  1127. struct tb_path *path;
  1128. int i;
  1129. host = alloc_host(test);
  1130. dev1 = alloc_dev_default(test, host, 0x1, true);
  1131. dev2 = alloc_dev_default(test, dev1, 0x701, false);
  1132. dev3 = alloc_dev_default(test, dev2, 0x50701, false);
  1133. dev4 = alloc_dev_default(test, dev3, 0x3050701, true);
  1134. in = &dev4->ports[13];
  1135. out = &host->ports[5];
  1136. path = tb_path_alloc(NULL, in, 9, out, 9, 1, "Video");
  1137. KUNIT_ASSERT_NOT_NULL(test, path);
  1138. KUNIT_ASSERT_EQ(test, path->path_length, ARRAY_SIZE(test_data));
  1139. for (i = 0; i < ARRAY_SIZE(test_data); i++) {
  1140. const struct tb_port *in_port, *out_port;
  1141. in_port = path->hops[i].in_port;
  1142. out_port = path->hops[i].out_port;
  1143. KUNIT_EXPECT_EQ(test, tb_route(in_port->sw), test_data[i].route);
  1144. KUNIT_EXPECT_EQ(test, in_port->port, test_data[i].in_port);
  1145. KUNIT_EXPECT_EQ(test, (enum tb_port_type)in_port->config.type,
  1146. test_data[i].in_type);
  1147. KUNIT_EXPECT_EQ(test, tb_route(out_port->sw), test_data[i].route);
  1148. KUNIT_EXPECT_EQ(test, out_port->port, test_data[i].out_port);
  1149. KUNIT_EXPECT_EQ(test, (enum tb_port_type)out_port->config.type,
  1150. test_data[i].out_type);
  1151. }
  1152. tb_path_free(path);
  1153. }
  1154. static void tb_test_tunnel_pcie(struct kunit *test)
  1155. {
  1156. struct tb_switch *host, *dev1, *dev2;
  1157. struct tb_tunnel *tunnel1, *tunnel2;
  1158. struct tb_port *down, *up;
  1159. /*
  1160. * Create PCIe tunnel between host and two devices.
  1161. *
  1162. * [Host]
  1163. * 1 |
  1164. * 1 |
  1165. * [Device #1]
  1166. * 5 |
  1167. * 1 |
  1168. * [Device #2]
  1169. */
  1170. host = alloc_host(test);
  1171. dev1 = alloc_dev_default(test, host, 0x1, true);
  1172. dev2 = alloc_dev_default(test, dev1, 0x501, true);
  1173. down = &host->ports[8];
  1174. up = &dev1->ports[9];
  1175. tunnel1 = tb_tunnel_alloc_pci(NULL, up, down);
  1176. KUNIT_ASSERT_NOT_NULL(test, tunnel1);
  1177. KUNIT_EXPECT_EQ(test, tunnel1->type, TB_TUNNEL_PCI);
  1178. KUNIT_EXPECT_PTR_EQ(test, tunnel1->src_port, down);
  1179. KUNIT_EXPECT_PTR_EQ(test, tunnel1->dst_port, up);
  1180. KUNIT_ASSERT_EQ(test, tunnel1->npaths, 2);
  1181. KUNIT_ASSERT_EQ(test, tunnel1->paths[0]->path_length, 2);
  1182. KUNIT_EXPECT_PTR_EQ(test, tunnel1->paths[0]->hops[0].in_port, down);
  1183. KUNIT_EXPECT_PTR_EQ(test, tunnel1->paths[0]->hops[1].out_port, up);
  1184. KUNIT_ASSERT_EQ(test, tunnel1->paths[1]->path_length, 2);
  1185. KUNIT_EXPECT_PTR_EQ(test, tunnel1->paths[1]->hops[0].in_port, up);
  1186. KUNIT_EXPECT_PTR_EQ(test, tunnel1->paths[1]->hops[1].out_port, down);
  1187. down = &dev1->ports[10];
  1188. up = &dev2->ports[9];
  1189. tunnel2 = tb_tunnel_alloc_pci(NULL, up, down);
  1190. KUNIT_ASSERT_NOT_NULL(test, tunnel2);
  1191. KUNIT_EXPECT_EQ(test, tunnel2->type, TB_TUNNEL_PCI);
  1192. KUNIT_EXPECT_PTR_EQ(test, tunnel2->src_port, down);
  1193. KUNIT_EXPECT_PTR_EQ(test, tunnel2->dst_port, up);
  1194. KUNIT_ASSERT_EQ(test, tunnel2->npaths, 2);
  1195. KUNIT_ASSERT_EQ(test, tunnel2->paths[0]->path_length, 2);
  1196. KUNIT_EXPECT_PTR_EQ(test, tunnel2->paths[0]->hops[0].in_port, down);
  1197. KUNIT_EXPECT_PTR_EQ(test, tunnel2->paths[0]->hops[1].out_port, up);
  1198. KUNIT_ASSERT_EQ(test, tunnel2->paths[1]->path_length, 2);
  1199. KUNIT_EXPECT_PTR_EQ(test, tunnel2->paths[1]->hops[0].in_port, up);
  1200. KUNIT_EXPECT_PTR_EQ(test, tunnel2->paths[1]->hops[1].out_port, down);
  1201. tb_tunnel_free(tunnel2);
  1202. tb_tunnel_free(tunnel1);
  1203. }
  1204. static void tb_test_tunnel_dp(struct kunit *test)
  1205. {
  1206. struct tb_switch *host, *dev;
  1207. struct tb_port *in, *out;
  1208. struct tb_tunnel *tunnel;
  1209. /*
  1210. * Create DP tunnel between Host and Device
  1211. *
  1212. * [Host]
  1213. * 1 |
  1214. * 1 |
  1215. * [Device]
  1216. */
  1217. host = alloc_host(test);
  1218. dev = alloc_dev_default(test, host, 0x3, true);
  1219. in = &host->ports[5];
  1220. out = &dev->ports[13];
  1221. tunnel = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0);
  1222. KUNIT_ASSERT_NOT_NULL(test, tunnel);
  1223. KUNIT_EXPECT_EQ(test, tunnel->type, TB_TUNNEL_DP);
  1224. KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, in);
  1225. KUNIT_EXPECT_PTR_EQ(test, tunnel->dst_port, out);
  1226. KUNIT_ASSERT_EQ(test, tunnel->npaths, 3);
  1227. KUNIT_ASSERT_EQ(test, tunnel->paths[0]->path_length, 2);
  1228. KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].in_port, in);
  1229. KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[1].out_port, out);
  1230. KUNIT_ASSERT_EQ(test, tunnel->paths[1]->path_length, 2);
  1231. KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[0].in_port, in);
  1232. KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[1].out_port, out);
  1233. KUNIT_ASSERT_EQ(test, tunnel->paths[2]->path_length, 2);
  1234. KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[0].in_port, out);
  1235. KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[1].out_port, in);
  1236. tb_tunnel_free(tunnel);
  1237. }
  1238. static void tb_test_tunnel_dp_chain(struct kunit *test)
  1239. {
  1240. struct tb_switch *host, *dev1, *dev4;
  1241. struct tb_port *in, *out;
  1242. struct tb_tunnel *tunnel;
  1243. /*
  1244. * Create DP tunnel from Host DP IN to Device #4 DP OUT.
  1245. *
  1246. * [Host]
  1247. * 1 |
  1248. * 1 |
  1249. * [Device #1]
  1250. * 3 / | 5 \ 7
  1251. * 1 / | \ 1
  1252. * [Device #2] | [Device #4]
  1253. * | 1
  1254. * [Device #3]
  1255. */
  1256. host = alloc_host(test);
  1257. dev1 = alloc_dev_default(test, host, 0x1, true);
  1258. alloc_dev_default(test, dev1, 0x301, true);
  1259. alloc_dev_default(test, dev1, 0x501, true);
  1260. dev4 = alloc_dev_default(test, dev1, 0x701, true);
  1261. in = &host->ports[5];
  1262. out = &dev4->ports[14];
  1263. tunnel = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0);
  1264. KUNIT_ASSERT_NOT_NULL(test, tunnel);
  1265. KUNIT_EXPECT_EQ(test, tunnel->type, TB_TUNNEL_DP);
  1266. KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, in);
  1267. KUNIT_EXPECT_PTR_EQ(test, tunnel->dst_port, out);
  1268. KUNIT_ASSERT_EQ(test, tunnel->npaths, 3);
  1269. KUNIT_ASSERT_EQ(test, tunnel->paths[0]->path_length, 3);
  1270. KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].in_port, in);
  1271. KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[2].out_port, out);
  1272. KUNIT_ASSERT_EQ(test, tunnel->paths[1]->path_length, 3);
  1273. KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[0].in_port, in);
  1274. KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[2].out_port, out);
  1275. KUNIT_ASSERT_EQ(test, tunnel->paths[2]->path_length, 3);
  1276. KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[0].in_port, out);
  1277. KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[2].out_port, in);
  1278. tb_tunnel_free(tunnel);
  1279. }
  1280. static void tb_test_tunnel_dp_tree(struct kunit *test)
  1281. {
  1282. struct tb_switch *host, *dev1, *dev2, *dev3, *dev5;
  1283. struct tb_port *in, *out;
  1284. struct tb_tunnel *tunnel;
  1285. /*
  1286. * Create DP tunnel from Device #2 DP IN to Device #5 DP OUT.
  1287. *
  1288. * [Host]
  1289. * 3 |
  1290. * 1 |
  1291. * [Device #1]
  1292. * 3 / | 5 \ 7
  1293. * 1 / | \ 1
  1294. * [Device #2] | [Device #4]
  1295. * | 1
  1296. * [Device #3]
  1297. * | 5
  1298. * | 1
  1299. * [Device #5]
  1300. */
  1301. host = alloc_host(test);
  1302. dev1 = alloc_dev_default(test, host, 0x3, true);
  1303. dev2 = alloc_dev_with_dpin(test, dev1, 0x303, true);
  1304. dev3 = alloc_dev_default(test, dev1, 0x503, true);
  1305. alloc_dev_default(test, dev1, 0x703, true);
  1306. dev5 = alloc_dev_default(test, dev3, 0x50503, true);
  1307. in = &dev2->ports[13];
  1308. out = &dev5->ports[13];
  1309. tunnel = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0);
  1310. KUNIT_ASSERT_NOT_NULL(test, tunnel);
  1311. KUNIT_EXPECT_EQ(test, tunnel->type, TB_TUNNEL_DP);
  1312. KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, in);
  1313. KUNIT_EXPECT_PTR_EQ(test, tunnel->dst_port, out);
  1314. KUNIT_ASSERT_EQ(test, tunnel->npaths, 3);
  1315. KUNIT_ASSERT_EQ(test, tunnel->paths[0]->path_length, 4);
  1316. KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].in_port, in);
  1317. KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[3].out_port, out);
  1318. KUNIT_ASSERT_EQ(test, tunnel->paths[1]->path_length, 4);
  1319. KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[0].in_port, in);
  1320. KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[3].out_port, out);
  1321. KUNIT_ASSERT_EQ(test, tunnel->paths[2]->path_length, 4);
  1322. KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[0].in_port, out);
  1323. KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[3].out_port, in);
  1324. tb_tunnel_free(tunnel);
  1325. }
  1326. static void tb_test_tunnel_dp_max_length(struct kunit *test)
  1327. {
  1328. struct tb_switch *host, *dev1, *dev2, *dev3, *dev4, *dev5, *dev6;
  1329. struct tb_switch *dev7, *dev8, *dev9, *dev10, *dev11, *dev12;
  1330. struct tb_port *in, *out;
  1331. struct tb_tunnel *tunnel;
  1332. /*
  1333. * Creates DP tunnel from Device #6 to Device #12.
  1334. *
  1335. * [Host]
  1336. * 1 / \ 3
  1337. * 1 / \ 1
  1338. * [Device #1] [Device #7]
  1339. * 3 | | 3
  1340. * 1 | | 1
  1341. * [Device #2] [Device #8]
  1342. * 3 | | 3
  1343. * 1 | | 1
  1344. * [Device #3] [Device #9]
  1345. * 3 | | 3
  1346. * 1 | | 1
  1347. * [Device #4] [Device #10]
  1348. * 3 | | 3
  1349. * 1 | | 1
  1350. * [Device #5] [Device #11]
  1351. * 3 | | 3
  1352. * 1 | | 1
  1353. * [Device #6] [Device #12]
  1354. */
  1355. host = alloc_host(test);
  1356. dev1 = alloc_dev_default(test, host, 0x1, true);
  1357. dev2 = alloc_dev_default(test, dev1, 0x301, true);
  1358. dev3 = alloc_dev_default(test, dev2, 0x30301, true);
  1359. dev4 = alloc_dev_default(test, dev3, 0x3030301, true);
  1360. dev5 = alloc_dev_default(test, dev4, 0x303030301, true);
  1361. dev6 = alloc_dev_with_dpin(test, dev5, 0x30303030301, true);
  1362. dev7 = alloc_dev_default(test, host, 0x3, true);
  1363. dev8 = alloc_dev_default(test, dev7, 0x303, true);
  1364. dev9 = alloc_dev_default(test, dev8, 0x30303, true);
  1365. dev10 = alloc_dev_default(test, dev9, 0x3030303, true);
  1366. dev11 = alloc_dev_default(test, dev10, 0x303030303, true);
  1367. dev12 = alloc_dev_default(test, dev11, 0x30303030303, true);
  1368. in = &dev6->ports[13];
  1369. out = &dev12->ports[13];
  1370. tunnel = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0);
  1371. KUNIT_ASSERT_NOT_NULL(test, tunnel);
  1372. KUNIT_EXPECT_EQ(test, tunnel->type, TB_TUNNEL_DP);
  1373. KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, in);
  1374. KUNIT_EXPECT_PTR_EQ(test, tunnel->dst_port, out);
  1375. KUNIT_ASSERT_EQ(test, tunnel->npaths, 3);
  1376. KUNIT_ASSERT_EQ(test, tunnel->paths[0]->path_length, 13);
  1377. /* First hop */
  1378. KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].in_port, in);
  1379. /* Middle */
  1380. KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[6].in_port,
  1381. &host->ports[1]);
  1382. KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[6].out_port,
  1383. &host->ports[3]);
  1384. /* Last */
  1385. KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[12].out_port, out);
  1386. KUNIT_ASSERT_EQ(test, tunnel->paths[1]->path_length, 13);
  1387. KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[0].in_port, in);
  1388. KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[6].in_port,
  1389. &host->ports[1]);
  1390. KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[6].out_port,
  1391. &host->ports[3]);
  1392. KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[12].out_port, out);
  1393. KUNIT_ASSERT_EQ(test, tunnel->paths[2]->path_length, 13);
  1394. KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[0].in_port, out);
  1395. KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[6].in_port,
  1396. &host->ports[3]);
  1397. KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[6].out_port,
  1398. &host->ports[1]);
  1399. KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[12].out_port, in);
  1400. tb_tunnel_free(tunnel);
  1401. }
  1402. static void tb_test_tunnel_3dp(struct kunit *test)
  1403. {
  1404. struct tb_switch *host, *dev1, *dev2, *dev3, *dev4, *dev5;
  1405. struct tb_port *in1, *in2, *in3, *out1, *out2, *out3;
  1406. struct tb_tunnel *tunnel1, *tunnel2, *tunnel3;
  1407. /*
  1408. * Create 3 DP tunnels from Host to Devices #2, #5 and #4.
  1409. *
  1410. * [Host]
  1411. * 3 |
  1412. * 1 |
  1413. * [Device #1]
  1414. * 3 / | 5 \ 7
  1415. * 1 / | \ 1
  1416. * [Device #2] | [Device #4]
  1417. * | 1
  1418. * [Device #3]
  1419. * | 5
  1420. * | 1
  1421. * [Device #5]
  1422. */
  1423. host = alloc_host_br(test);
  1424. dev1 = alloc_dev_default(test, host, 0x3, true);
  1425. dev2 = alloc_dev_default(test, dev1, 0x303, true);
  1426. dev3 = alloc_dev_default(test, dev1, 0x503, true);
  1427. dev4 = alloc_dev_default(test, dev1, 0x703, true);
  1428. dev5 = alloc_dev_default(test, dev3, 0x50503, true);
  1429. in1 = &host->ports[5];
  1430. in2 = &host->ports[6];
  1431. in3 = &host->ports[10];
  1432. out1 = &dev2->ports[13];
  1433. out2 = &dev5->ports[13];
  1434. out3 = &dev4->ports[14];
  1435. tunnel1 = tb_tunnel_alloc_dp(NULL, in1, out1, 1, 0, 0);
  1436. KUNIT_ASSERT_TRUE(test, tunnel1 != NULL);
  1437. KUNIT_EXPECT_EQ(test, tunnel1->type, TB_TUNNEL_DP);
  1438. KUNIT_EXPECT_PTR_EQ(test, tunnel1->src_port, in1);
  1439. KUNIT_EXPECT_PTR_EQ(test, tunnel1->dst_port, out1);
  1440. KUNIT_ASSERT_EQ(test, tunnel1->npaths, 3);
  1441. KUNIT_ASSERT_EQ(test, tunnel1->paths[0]->path_length, 3);
  1442. tunnel2 = tb_tunnel_alloc_dp(NULL, in2, out2, 1, 0, 0);
  1443. KUNIT_ASSERT_TRUE(test, tunnel2 != NULL);
  1444. KUNIT_EXPECT_EQ(test, tunnel2->type, TB_TUNNEL_DP);
  1445. KUNIT_EXPECT_PTR_EQ(test, tunnel2->src_port, in2);
  1446. KUNIT_EXPECT_PTR_EQ(test, tunnel2->dst_port, out2);
  1447. KUNIT_ASSERT_EQ(test, tunnel2->npaths, 3);
  1448. KUNIT_ASSERT_EQ(test, tunnel2->paths[0]->path_length, 4);
  1449. tunnel3 = tb_tunnel_alloc_dp(NULL, in3, out3, 1, 0, 0);
  1450. KUNIT_ASSERT_TRUE(test, tunnel3 != NULL);
  1451. KUNIT_EXPECT_EQ(test, tunnel3->type, TB_TUNNEL_DP);
  1452. KUNIT_EXPECT_PTR_EQ(test, tunnel3->src_port, in3);
  1453. KUNIT_EXPECT_PTR_EQ(test, tunnel3->dst_port, out3);
  1454. KUNIT_ASSERT_EQ(test, tunnel3->npaths, 3);
  1455. KUNIT_ASSERT_EQ(test, tunnel3->paths[0]->path_length, 3);
  1456. tb_tunnel_free(tunnel2);
  1457. tb_tunnel_free(tunnel1);
  1458. }
  1459. static void tb_test_tunnel_usb3(struct kunit *test)
  1460. {
  1461. struct tb_switch *host, *dev1, *dev2;
  1462. struct tb_tunnel *tunnel1, *tunnel2;
  1463. struct tb_port *down, *up;
  1464. /*
  1465. * Create USB3 tunnel between host and two devices.
  1466. *
  1467. * [Host]
  1468. * 1 |
  1469. * 1 |
  1470. * [Device #1]
  1471. * \ 7
  1472. * \ 1
  1473. * [Device #2]
  1474. */
  1475. host = alloc_host(test);
  1476. dev1 = alloc_dev_default(test, host, 0x1, true);
  1477. dev2 = alloc_dev_default(test, dev1, 0x701, true);
  1478. down = &host->ports[12];
  1479. up = &dev1->ports[16];
  1480. tunnel1 = tb_tunnel_alloc_usb3(NULL, up, down, 0, 0);
  1481. KUNIT_ASSERT_NOT_NULL(test, tunnel1);
  1482. KUNIT_EXPECT_EQ(test, tunnel1->type, TB_TUNNEL_USB3);
  1483. KUNIT_EXPECT_PTR_EQ(test, tunnel1->src_port, down);
  1484. KUNIT_EXPECT_PTR_EQ(test, tunnel1->dst_port, up);
  1485. KUNIT_ASSERT_EQ(test, tunnel1->npaths, 2);
  1486. KUNIT_ASSERT_EQ(test, tunnel1->paths[0]->path_length, 2);
  1487. KUNIT_EXPECT_PTR_EQ(test, tunnel1->paths[0]->hops[0].in_port, down);
  1488. KUNIT_EXPECT_PTR_EQ(test, tunnel1->paths[0]->hops[1].out_port, up);
  1489. KUNIT_ASSERT_EQ(test, tunnel1->paths[1]->path_length, 2);
  1490. KUNIT_EXPECT_PTR_EQ(test, tunnel1->paths[1]->hops[0].in_port, up);
  1491. KUNIT_EXPECT_PTR_EQ(test, tunnel1->paths[1]->hops[1].out_port, down);
  1492. down = &dev1->ports[17];
  1493. up = &dev2->ports[16];
  1494. tunnel2 = tb_tunnel_alloc_usb3(NULL, up, down, 0, 0);
  1495. KUNIT_ASSERT_NOT_NULL(test, tunnel2);
  1496. KUNIT_EXPECT_EQ(test, tunnel2->type, TB_TUNNEL_USB3);
  1497. KUNIT_EXPECT_PTR_EQ(test, tunnel2->src_port, down);
  1498. KUNIT_EXPECT_PTR_EQ(test, tunnel2->dst_port, up);
  1499. KUNIT_ASSERT_EQ(test, tunnel2->npaths, 2);
  1500. KUNIT_ASSERT_EQ(test, tunnel2->paths[0]->path_length, 2);
  1501. KUNIT_EXPECT_PTR_EQ(test, tunnel2->paths[0]->hops[0].in_port, down);
  1502. KUNIT_EXPECT_PTR_EQ(test, tunnel2->paths[0]->hops[1].out_port, up);
  1503. KUNIT_ASSERT_EQ(test, tunnel2->paths[1]->path_length, 2);
  1504. KUNIT_EXPECT_PTR_EQ(test, tunnel2->paths[1]->hops[0].in_port, up);
  1505. KUNIT_EXPECT_PTR_EQ(test, tunnel2->paths[1]->hops[1].out_port, down);
  1506. tb_tunnel_free(tunnel2);
  1507. tb_tunnel_free(tunnel1);
  1508. }
  1509. static void tb_test_tunnel_port_on_path(struct kunit *test)
  1510. {
  1511. struct tb_switch *host, *dev1, *dev2, *dev3, *dev4, *dev5;
  1512. struct tb_port *in, *out, *port;
  1513. struct tb_tunnel *dp_tunnel;
  1514. /*
  1515. * [Host]
  1516. * 3 |
  1517. * 1 |
  1518. * [Device #1]
  1519. * 3 / | 5 \ 7
  1520. * 1 / | \ 1
  1521. * [Device #2] | [Device #4]
  1522. * | 1
  1523. * [Device #3]
  1524. * | 5
  1525. * | 1
  1526. * [Device #5]
  1527. */
  1528. host = alloc_host(test);
  1529. dev1 = alloc_dev_default(test, host, 0x3, true);
  1530. dev2 = alloc_dev_with_dpin(test, dev1, 0x303, true);
  1531. dev3 = alloc_dev_default(test, dev1, 0x503, true);
  1532. dev4 = alloc_dev_default(test, dev1, 0x703, true);
  1533. dev5 = alloc_dev_default(test, dev3, 0x50503, true);
  1534. in = &dev2->ports[13];
  1535. out = &dev5->ports[13];
  1536. dp_tunnel = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0);
  1537. KUNIT_ASSERT_NOT_NULL(test, dp_tunnel);
  1538. KUNIT_EXPECT_TRUE(test, tb_tunnel_port_on_path(dp_tunnel, in));
  1539. KUNIT_EXPECT_TRUE(test, tb_tunnel_port_on_path(dp_tunnel, out));
  1540. port = &host->ports[8];
  1541. KUNIT_EXPECT_FALSE(test, tb_tunnel_port_on_path(dp_tunnel, port));
  1542. port = &host->ports[3];
  1543. KUNIT_EXPECT_FALSE(test, tb_tunnel_port_on_path(dp_tunnel, port));
  1544. port = &dev1->ports[1];
  1545. KUNIT_EXPECT_FALSE(test, tb_tunnel_port_on_path(dp_tunnel, port));
  1546. port = &dev1->ports[3];
  1547. KUNIT_EXPECT_TRUE(test, tb_tunnel_port_on_path(dp_tunnel, port));
  1548. port = &dev1->ports[5];
  1549. KUNIT_EXPECT_TRUE(test, tb_tunnel_port_on_path(dp_tunnel, port));
  1550. port = &dev1->ports[7];
  1551. KUNIT_EXPECT_FALSE(test, tb_tunnel_port_on_path(dp_tunnel, port));
  1552. port = &dev3->ports[1];
  1553. KUNIT_EXPECT_TRUE(test, tb_tunnel_port_on_path(dp_tunnel, port));
  1554. port = &dev5->ports[1];
  1555. KUNIT_EXPECT_TRUE(test, tb_tunnel_port_on_path(dp_tunnel, port));
  1556. port = &dev4->ports[1];
  1557. KUNIT_EXPECT_FALSE(test, tb_tunnel_port_on_path(dp_tunnel, port));
  1558. tb_tunnel_free(dp_tunnel);
  1559. }
  1560. static void tb_test_tunnel_dma(struct kunit *test)
  1561. {
  1562. struct tb_port *nhi, *port;
  1563. struct tb_tunnel *tunnel;
  1564. struct tb_switch *host;
  1565. /*
  1566. * Create DMA tunnel from NHI to port 1 and back.
  1567. *
  1568. * [Host 1]
  1569. * 1 ^ In HopID 1 -> Out HopID 8
  1570. * |
  1571. * v In HopID 8 -> Out HopID 1
  1572. * ............ Domain border
  1573. * |
  1574. * [Host 2]
  1575. */
  1576. host = alloc_host(test);
  1577. nhi = &host->ports[7];
  1578. port = &host->ports[1];
  1579. tunnel = tb_tunnel_alloc_dma(NULL, nhi, port, 8, 1, 8, 1);
  1580. KUNIT_ASSERT_NOT_NULL(test, tunnel);
  1581. KUNIT_EXPECT_EQ(test, tunnel->type, TB_TUNNEL_DMA);
  1582. KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, nhi);
  1583. KUNIT_EXPECT_PTR_EQ(test, tunnel->dst_port, port);
  1584. KUNIT_ASSERT_EQ(test, tunnel->npaths, 2);
  1585. /* RX path */
  1586. KUNIT_ASSERT_EQ(test, tunnel->paths[0]->path_length, 1);
  1587. KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].in_port, port);
  1588. KUNIT_EXPECT_EQ(test, tunnel->paths[0]->hops[0].in_hop_index, 8);
  1589. KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].out_port, nhi);
  1590. KUNIT_EXPECT_EQ(test, tunnel->paths[0]->hops[0].next_hop_index, 1);
  1591. /* TX path */
  1592. KUNIT_ASSERT_EQ(test, tunnel->paths[1]->path_length, 1);
  1593. KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[0].in_port, nhi);
  1594. KUNIT_EXPECT_EQ(test, tunnel->paths[1]->hops[0].in_hop_index, 1);
  1595. KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[0].out_port, port);
  1596. KUNIT_EXPECT_EQ(test, tunnel->paths[1]->hops[0].next_hop_index, 8);
  1597. tb_tunnel_free(tunnel);
  1598. }
  1599. static void tb_test_tunnel_dma_rx(struct kunit *test)
  1600. {
  1601. struct tb_port *nhi, *port;
  1602. struct tb_tunnel *tunnel;
  1603. struct tb_switch *host;
  1604. /*
  1605. * Create DMA RX tunnel from port 1 to NHI.
  1606. *
  1607. * [Host 1]
  1608. * 1 ^
  1609. * |
  1610. * | In HopID 15 -> Out HopID 2
  1611. * ............ Domain border
  1612. * |
  1613. * [Host 2]
  1614. */
  1615. host = alloc_host(test);
  1616. nhi = &host->ports[7];
  1617. port = &host->ports[1];
  1618. tunnel = tb_tunnel_alloc_dma(NULL, nhi, port, -1, -1, 15, 2);
  1619. KUNIT_ASSERT_NOT_NULL(test, tunnel);
  1620. KUNIT_EXPECT_EQ(test, tunnel->type, TB_TUNNEL_DMA);
  1621. KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, nhi);
  1622. KUNIT_EXPECT_PTR_EQ(test, tunnel->dst_port, port);
  1623. KUNIT_ASSERT_EQ(test, tunnel->npaths, 1);
  1624. /* RX path */
  1625. KUNIT_ASSERT_EQ(test, tunnel->paths[0]->path_length, 1);
  1626. KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].in_port, port);
  1627. KUNIT_EXPECT_EQ(test, tunnel->paths[0]->hops[0].in_hop_index, 15);
  1628. KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].out_port, nhi);
  1629. KUNIT_EXPECT_EQ(test, tunnel->paths[0]->hops[0].next_hop_index, 2);
  1630. tb_tunnel_free(tunnel);
  1631. }
  1632. static void tb_test_tunnel_dma_tx(struct kunit *test)
  1633. {
  1634. struct tb_port *nhi, *port;
  1635. struct tb_tunnel *tunnel;
  1636. struct tb_switch *host;
  1637. /*
  1638. * Create DMA TX tunnel from NHI to port 1.
  1639. *
  1640. * [Host 1]
  1641. * 1 | In HopID 2 -> Out HopID 15
  1642. * |
  1643. * v
  1644. * ............ Domain border
  1645. * |
  1646. * [Host 2]
  1647. */
  1648. host = alloc_host(test);
  1649. nhi = &host->ports[7];
  1650. port = &host->ports[1];
  1651. tunnel = tb_tunnel_alloc_dma(NULL, nhi, port, 15, 2, -1, -1);
  1652. KUNIT_ASSERT_NOT_NULL(test, tunnel);
  1653. KUNIT_EXPECT_EQ(test, tunnel->type, TB_TUNNEL_DMA);
  1654. KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, nhi);
  1655. KUNIT_EXPECT_PTR_EQ(test, tunnel->dst_port, port);
  1656. KUNIT_ASSERT_EQ(test, tunnel->npaths, 1);
  1657. /* TX path */
  1658. KUNIT_ASSERT_EQ(test, tunnel->paths[0]->path_length, 1);
  1659. KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].in_port, nhi);
  1660. KUNIT_EXPECT_EQ(test, tunnel->paths[0]->hops[0].in_hop_index, 2);
  1661. KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].out_port, port);
  1662. KUNIT_EXPECT_EQ(test, tunnel->paths[0]->hops[0].next_hop_index, 15);
  1663. tb_tunnel_free(tunnel);
  1664. }
  1665. static void tb_test_tunnel_dma_chain(struct kunit *test)
  1666. {
  1667. struct tb_switch *host, *dev1, *dev2;
  1668. struct tb_port *nhi, *port;
  1669. struct tb_tunnel *tunnel;
  1670. /*
  1671. * Create DMA tunnel from NHI to Device #2 port 3 and back.
  1672. *
  1673. * [Host 1]
  1674. * 1 ^ In HopID 1 -> Out HopID x
  1675. * |
  1676. * 1 | In HopID x -> Out HopID 1
  1677. * [Device #1]
  1678. * 7 \
  1679. * 1 \
  1680. * [Device #2]
  1681. * 3 | In HopID x -> Out HopID 8
  1682. * |
  1683. * v In HopID 8 -> Out HopID x
  1684. * ............ Domain border
  1685. * |
  1686. * [Host 2]
  1687. */
  1688. host = alloc_host(test);
  1689. dev1 = alloc_dev_default(test, host, 0x1, true);
  1690. dev2 = alloc_dev_default(test, dev1, 0x701, true);
  1691. nhi = &host->ports[7];
  1692. port = &dev2->ports[3];
  1693. tunnel = tb_tunnel_alloc_dma(NULL, nhi, port, 8, 1, 8, 1);
  1694. KUNIT_ASSERT_NOT_NULL(test, tunnel);
  1695. KUNIT_EXPECT_EQ(test, tunnel->type, TB_TUNNEL_DMA);
  1696. KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, nhi);
  1697. KUNIT_EXPECT_PTR_EQ(test, tunnel->dst_port, port);
  1698. KUNIT_ASSERT_EQ(test, tunnel->npaths, 2);
  1699. /* RX path */
  1700. KUNIT_ASSERT_EQ(test, tunnel->paths[0]->path_length, 3);
  1701. KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].in_port, port);
  1702. KUNIT_EXPECT_EQ(test, tunnel->paths[0]->hops[0].in_hop_index, 8);
  1703. KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].out_port,
  1704. &dev2->ports[1]);
  1705. KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[1].in_port,
  1706. &dev1->ports[7]);
  1707. KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[1].out_port,
  1708. &dev1->ports[1]);
  1709. KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[2].in_port,
  1710. &host->ports[1]);
  1711. KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[2].out_port, nhi);
  1712. KUNIT_EXPECT_EQ(test, tunnel->paths[0]->hops[2].next_hop_index, 1);
  1713. /* TX path */
  1714. KUNIT_ASSERT_EQ(test, tunnel->paths[1]->path_length, 3);
  1715. KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[0].in_port, nhi);
  1716. KUNIT_EXPECT_EQ(test, tunnel->paths[1]->hops[0].in_hop_index, 1);
  1717. KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[1].in_port,
  1718. &dev1->ports[1]);
  1719. KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[1].out_port,
  1720. &dev1->ports[7]);
  1721. KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[2].in_port,
  1722. &dev2->ports[1]);
  1723. KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[2].out_port, port);
  1724. KUNIT_EXPECT_EQ(test, tunnel->paths[1]->hops[2].next_hop_index, 8);
  1725. tb_tunnel_free(tunnel);
  1726. }
  1727. static void tb_test_tunnel_dma_match(struct kunit *test)
  1728. {
  1729. struct tb_port *nhi, *port;
  1730. struct tb_tunnel *tunnel;
  1731. struct tb_switch *host;
  1732. host = alloc_host(test);
  1733. nhi = &host->ports[7];
  1734. port = &host->ports[1];
  1735. tunnel = tb_tunnel_alloc_dma(NULL, nhi, port, 15, 1, 15, 1);
  1736. KUNIT_ASSERT_NOT_NULL(test, tunnel);
  1737. KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, 15, 1, 15, 1));
  1738. KUNIT_ASSERT_FALSE(test, tb_tunnel_match_dma(tunnel, 8, 1, 15, 1));
  1739. KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, -1, 15, 1));
  1740. KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, 15, 1, -1, -1));
  1741. KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, 15, -1, -1, -1));
  1742. KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, 1, -1, -1));
  1743. KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, -1, 15, -1));
  1744. KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, -1, -1, 1));
  1745. KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, -1, -1, -1));
  1746. KUNIT_ASSERT_FALSE(test, tb_tunnel_match_dma(tunnel, 8, -1, 8, -1));
  1747. tb_tunnel_free(tunnel);
  1748. tunnel = tb_tunnel_alloc_dma(NULL, nhi, port, 15, 1, -1, -1);
  1749. KUNIT_ASSERT_NOT_NULL(test, tunnel);
  1750. KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, 15, 1, -1, -1));
  1751. KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, 15, -1, -1, -1));
  1752. KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, 1, -1, -1));
  1753. KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, -1, -1, -1));
  1754. KUNIT_ASSERT_FALSE(test, tb_tunnel_match_dma(tunnel, 15, 1, 15, 1));
  1755. KUNIT_ASSERT_FALSE(test, tb_tunnel_match_dma(tunnel, -1, -1, 15, 1));
  1756. KUNIT_ASSERT_FALSE(test, tb_tunnel_match_dma(tunnel, 15, 11, -1, -1));
  1757. tb_tunnel_free(tunnel);
  1758. tunnel = tb_tunnel_alloc_dma(NULL, nhi, port, -1, -1, 15, 11);
  1759. KUNIT_ASSERT_NOT_NULL(test, tunnel);
  1760. KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, -1, 15, 11));
  1761. KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, -1, 15, -1));
  1762. KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, -1, -1, 11));
  1763. KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, -1, -1, -1));
  1764. KUNIT_ASSERT_FALSE(test, tb_tunnel_match_dma(tunnel, -1, -1, 15, 1));
  1765. KUNIT_ASSERT_FALSE(test, tb_tunnel_match_dma(tunnel, -1, -1, 10, 11));
  1766. KUNIT_ASSERT_FALSE(test, tb_tunnel_match_dma(tunnel, 15, 11, -1, -1));
  1767. tb_tunnel_free(tunnel);
  1768. }
  1769. static void tb_test_credit_alloc_legacy_not_bonded(struct kunit *test)
  1770. {
  1771. struct tb_switch *host, *dev;
  1772. struct tb_port *up, *down;
  1773. struct tb_tunnel *tunnel;
  1774. struct tb_path *path;
  1775. host = alloc_host(test);
  1776. dev = alloc_dev_default(test, host, 0x1, false);
  1777. down = &host->ports[8];
  1778. up = &dev->ports[9];
  1779. tunnel = tb_tunnel_alloc_pci(NULL, up, down);
  1780. KUNIT_ASSERT_NOT_NULL(test, tunnel);
  1781. KUNIT_ASSERT_EQ(test, tunnel->npaths, (size_t)2);
  1782. path = tunnel->paths[0];
  1783. KUNIT_ASSERT_EQ(test, path->path_length, 2);
  1784. KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
  1785. KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U);
  1786. KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
  1787. KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 16U);
  1788. path = tunnel->paths[1];
  1789. KUNIT_ASSERT_EQ(test, path->path_length, 2);
  1790. KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
  1791. KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U);
  1792. KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
  1793. KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 16U);
  1794. tb_tunnel_free(tunnel);
  1795. }
  1796. static void tb_test_credit_alloc_legacy_bonded(struct kunit *test)
  1797. {
  1798. struct tb_switch *host, *dev;
  1799. struct tb_port *up, *down;
  1800. struct tb_tunnel *tunnel;
  1801. struct tb_path *path;
  1802. host = alloc_host(test);
  1803. dev = alloc_dev_default(test, host, 0x1, true);
  1804. down = &host->ports[8];
  1805. up = &dev->ports[9];
  1806. tunnel = tb_tunnel_alloc_pci(NULL, up, down);
  1807. KUNIT_ASSERT_NOT_NULL(test, tunnel);
  1808. KUNIT_ASSERT_EQ(test, tunnel->npaths, (size_t)2);
  1809. path = tunnel->paths[0];
  1810. KUNIT_ASSERT_EQ(test, path->path_length, 2);
  1811. KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
  1812. KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U);
  1813. KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
  1814. KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 32U);
  1815. path = tunnel->paths[1];
  1816. KUNIT_ASSERT_EQ(test, path->path_length, 2);
  1817. KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
  1818. KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U);
  1819. KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
  1820. KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 32U);
  1821. tb_tunnel_free(tunnel);
  1822. }
  1823. static void tb_test_credit_alloc_pcie(struct kunit *test)
  1824. {
  1825. struct tb_switch *host, *dev;
  1826. struct tb_port *up, *down;
  1827. struct tb_tunnel *tunnel;
  1828. struct tb_path *path;
  1829. host = alloc_host_usb4(test);
  1830. dev = alloc_dev_usb4(test, host, 0x1, true);
  1831. down = &host->ports[8];
  1832. up = &dev->ports[9];
  1833. tunnel = tb_tunnel_alloc_pci(NULL, up, down);
  1834. KUNIT_ASSERT_NOT_NULL(test, tunnel);
  1835. KUNIT_ASSERT_EQ(test, tunnel->npaths, (size_t)2);
  1836. path = tunnel->paths[0];
  1837. KUNIT_ASSERT_EQ(test, path->path_length, 2);
  1838. KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
  1839. KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U);
  1840. KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
  1841. KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 32U);
  1842. path = tunnel->paths[1];
  1843. KUNIT_ASSERT_EQ(test, path->path_length, 2);
  1844. KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
  1845. KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U);
  1846. KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
  1847. KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 64U);
  1848. tb_tunnel_free(tunnel);
  1849. }
  1850. static void tb_test_credit_alloc_without_dp(struct kunit *test)
  1851. {
  1852. struct tb_switch *host, *dev;
  1853. struct tb_port *up, *down;
  1854. struct tb_tunnel *tunnel;
  1855. struct tb_path *path;
  1856. host = alloc_host_usb4(test);
  1857. dev = alloc_dev_without_dp(test, host, 0x1, true);
  1858. /*
  1859. * The device has no DP therefore baMinDPmain = baMinDPaux = 0
  1860. *
  1861. * Create PCIe path with buffers less than baMaxPCIe.
  1862. *
  1863. * For a device with buffers configurations:
  1864. * baMaxUSB3 = 109
  1865. * baMinDPaux = 0
  1866. * baMinDPmain = 0
  1867. * baMaxPCIe = 30
  1868. * baMaxHI = 1
  1869. * Remaining Buffers = Total - (CP + DP) = 120 - (2 + 0) = 118
  1870. * PCIe Credits = Max(6, Min(baMaxPCIe, Remaining Buffers - baMaxUSB3)
  1871. * = Max(6, Min(30, 9) = 9
  1872. */
  1873. down = &host->ports[8];
  1874. up = &dev->ports[9];
  1875. tunnel = tb_tunnel_alloc_pci(NULL, up, down);
  1876. KUNIT_ASSERT_TRUE(test, tunnel != NULL);
  1877. KUNIT_ASSERT_EQ(test, tunnel->npaths, (size_t)2);
  1878. /* PCIe downstream path */
  1879. path = tunnel->paths[0];
  1880. KUNIT_ASSERT_EQ(test, path->path_length, 2);
  1881. KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
  1882. KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U);
  1883. KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
  1884. KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 9U);
  1885. /* PCIe upstream path */
  1886. path = tunnel->paths[1];
  1887. KUNIT_ASSERT_EQ(test, path->path_length, 2);
  1888. KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
  1889. KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U);
  1890. KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
  1891. KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 64U);
  1892. tb_tunnel_free(tunnel);
  1893. }
  1894. static void tb_test_credit_alloc_dp(struct kunit *test)
  1895. {
  1896. struct tb_switch *host, *dev;
  1897. struct tb_port *in, *out;
  1898. struct tb_tunnel *tunnel;
  1899. struct tb_path *path;
  1900. host = alloc_host_usb4(test);
  1901. dev = alloc_dev_usb4(test, host, 0x1, true);
  1902. in = &host->ports[5];
  1903. out = &dev->ports[14];
  1904. tunnel = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0);
  1905. KUNIT_ASSERT_NOT_NULL(test, tunnel);
  1906. KUNIT_ASSERT_EQ(test, tunnel->npaths, (size_t)3);
  1907. /* Video (main) path */
  1908. path = tunnel->paths[0];
  1909. KUNIT_ASSERT_EQ(test, path->path_length, 2);
  1910. KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 12U);
  1911. KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 0U);
  1912. KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 18U);
  1913. KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 0U);
  1914. /* AUX TX */
  1915. path = tunnel->paths[1];
  1916. KUNIT_ASSERT_EQ(test, path->path_length, 2);
  1917. KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
  1918. KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 1U);
  1919. KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
  1920. KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 1U);
  1921. /* AUX RX */
  1922. path = tunnel->paths[2];
  1923. KUNIT_ASSERT_EQ(test, path->path_length, 2);
  1924. KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
  1925. KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 1U);
  1926. KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
  1927. KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 1U);
  1928. tb_tunnel_free(tunnel);
  1929. }
  1930. static void tb_test_credit_alloc_usb3(struct kunit *test)
  1931. {
  1932. struct tb_switch *host, *dev;
  1933. struct tb_port *up, *down;
  1934. struct tb_tunnel *tunnel;
  1935. struct tb_path *path;
  1936. host = alloc_host_usb4(test);
  1937. dev = alloc_dev_usb4(test, host, 0x1, true);
  1938. down = &host->ports[12];
  1939. up = &dev->ports[16];
  1940. tunnel = tb_tunnel_alloc_usb3(NULL, up, down, 0, 0);
  1941. KUNIT_ASSERT_NOT_NULL(test, tunnel);
  1942. KUNIT_ASSERT_EQ(test, tunnel->npaths, (size_t)2);
  1943. path = tunnel->paths[0];
  1944. KUNIT_ASSERT_EQ(test, path->path_length, 2);
  1945. KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
  1946. KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U);
  1947. KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
  1948. KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 14U);
  1949. path = tunnel->paths[1];
  1950. KUNIT_ASSERT_EQ(test, path->path_length, 2);
  1951. KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
  1952. KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U);
  1953. KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
  1954. KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 32U);
  1955. tb_tunnel_free(tunnel);
  1956. }
  1957. static void tb_test_credit_alloc_dma(struct kunit *test)
  1958. {
  1959. struct tb_switch *host, *dev;
  1960. struct tb_port *nhi, *port;
  1961. struct tb_tunnel *tunnel;
  1962. struct tb_path *path;
  1963. host = alloc_host_usb4(test);
  1964. dev = alloc_dev_usb4(test, host, 0x1, true);
  1965. nhi = &host->ports[7];
  1966. port = &dev->ports[3];
  1967. tunnel = tb_tunnel_alloc_dma(NULL, nhi, port, 8, 1, 8, 1);
  1968. KUNIT_ASSERT_NOT_NULL(test, tunnel);
  1969. KUNIT_ASSERT_EQ(test, tunnel->npaths, (size_t)2);
  1970. /* DMA RX */
  1971. path = tunnel->paths[0];
  1972. KUNIT_ASSERT_EQ(test, path->path_length, 2);
  1973. KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
  1974. KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 14U);
  1975. KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
  1976. KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 14U);
  1977. /* DMA TX */
  1978. path = tunnel->paths[1];
  1979. KUNIT_ASSERT_EQ(test, path->path_length, 2);
  1980. KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
  1981. KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 0U);
  1982. KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
  1983. KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 14U);
  1984. tb_tunnel_free(tunnel);
  1985. }
  1986. static void tb_test_credit_alloc_dma_multiple(struct kunit *test)
  1987. {
  1988. struct tb_tunnel *tunnel1, *tunnel2, *tunnel3;
  1989. struct tb_switch *host, *dev;
  1990. struct tb_port *nhi, *port;
  1991. struct tb_path *path;
  1992. host = alloc_host_usb4(test);
  1993. dev = alloc_dev_usb4(test, host, 0x1, true);
  1994. nhi = &host->ports[7];
  1995. port = &dev->ports[3];
  1996. /*
  1997. * Create three DMA tunnels through the same ports. With the
  1998. * default buffers we should be able to create two and the last
  1999. * one fails.
  2000. *
  2001. * For default host we have following buffers for DMA:
  2002. *
  2003. * 120 - (2 + 2 * (1 + 0) + 32 + 64 + spare) = 20
  2004. *
  2005. * For device we have following:
  2006. *
  2007. * 120 - (2 + 2 * (1 + 18) + 14 + 32 + spare) = 34
  2008. *
  2009. * spare = 14 + 1 = 15
  2010. *
  2011. * So on host the first tunnel gets 14 and the second gets the
  2012. * remaining 1 and then we run out of buffers.
  2013. */
  2014. tunnel1 = tb_tunnel_alloc_dma(NULL, nhi, port, 8, 1, 8, 1);
  2015. KUNIT_ASSERT_NOT_NULL(test, tunnel1);
  2016. KUNIT_ASSERT_EQ(test, tunnel1->npaths, (size_t)2);
  2017. path = tunnel1->paths[0];
  2018. KUNIT_ASSERT_EQ(test, path->path_length, 2);
  2019. KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
  2020. KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 14U);
  2021. KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
  2022. KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 14U);
  2023. path = tunnel1->paths[1];
  2024. KUNIT_ASSERT_EQ(test, path->path_length, 2);
  2025. KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
  2026. KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 0U);
  2027. KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
  2028. KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 14U);
  2029. tunnel2 = tb_tunnel_alloc_dma(NULL, nhi, port, 9, 2, 9, 2);
  2030. KUNIT_ASSERT_NOT_NULL(test, tunnel2);
  2031. KUNIT_ASSERT_EQ(test, tunnel2->npaths, (size_t)2);
  2032. path = tunnel2->paths[0];
  2033. KUNIT_ASSERT_EQ(test, path->path_length, 2);
  2034. KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
  2035. KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 14U);
  2036. KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
  2037. KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 1U);
  2038. path = tunnel2->paths[1];
  2039. KUNIT_ASSERT_EQ(test, path->path_length, 2);
  2040. KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
  2041. KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 0U);
  2042. KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
  2043. KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 1U);
  2044. tunnel3 = tb_tunnel_alloc_dma(NULL, nhi, port, 10, 3, 10, 3);
  2045. KUNIT_ASSERT_NULL(test, tunnel3);
  2046. /*
  2047. * Release the first DMA tunnel. That should make 14 buffers
  2048. * available for the next tunnel.
  2049. */
  2050. tb_tunnel_free(tunnel1);
  2051. tunnel3 = tb_tunnel_alloc_dma(NULL, nhi, port, 10, 3, 10, 3);
  2052. KUNIT_ASSERT_NOT_NULL(test, tunnel3);
  2053. path = tunnel3->paths[0];
  2054. KUNIT_ASSERT_EQ(test, path->path_length, 2);
  2055. KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
  2056. KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 14U);
  2057. KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
  2058. KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 14U);
  2059. path = tunnel3->paths[1];
  2060. KUNIT_ASSERT_EQ(test, path->path_length, 2);
  2061. KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
  2062. KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 0U);
  2063. KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
  2064. KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 14U);
  2065. tb_tunnel_free(tunnel3);
  2066. tb_tunnel_free(tunnel2);
  2067. }
  2068. static struct tb_tunnel *TB_TEST_PCIE_TUNNEL(struct kunit *test,
  2069. struct tb_switch *host, struct tb_switch *dev)
  2070. {
  2071. struct tb_port *up, *down;
  2072. struct tb_tunnel *pcie_tunnel;
  2073. struct tb_path *path;
  2074. down = &host->ports[8];
  2075. up = &dev->ports[9];
  2076. pcie_tunnel = tb_tunnel_alloc_pci(NULL, up, down);
  2077. KUNIT_ASSERT_NOT_NULL(test, pcie_tunnel);
  2078. KUNIT_ASSERT_EQ(test, pcie_tunnel->npaths, (size_t)2);
  2079. path = pcie_tunnel->paths[0];
  2080. KUNIT_ASSERT_EQ(test, path->path_length, 2);
  2081. KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
  2082. KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U);
  2083. KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
  2084. KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 32U);
  2085. path = pcie_tunnel->paths[1];
  2086. KUNIT_ASSERT_EQ(test, path->path_length, 2);
  2087. KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
  2088. KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U);
  2089. KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
  2090. KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 64U);
  2091. return pcie_tunnel;
  2092. }
  2093. static struct tb_tunnel *TB_TEST_DP_TUNNEL1(struct kunit *test,
  2094. struct tb_switch *host, struct tb_switch *dev)
  2095. {
  2096. struct tb_port *in, *out;
  2097. struct tb_tunnel *dp_tunnel1;
  2098. struct tb_path *path;
  2099. in = &host->ports[5];
  2100. out = &dev->ports[13];
  2101. dp_tunnel1 = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0);
  2102. KUNIT_ASSERT_NOT_NULL(test, dp_tunnel1);
  2103. KUNIT_ASSERT_EQ(test, dp_tunnel1->npaths, (size_t)3);
  2104. path = dp_tunnel1->paths[0];
  2105. KUNIT_ASSERT_EQ(test, path->path_length, 2);
  2106. KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 12U);
  2107. KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 0U);
  2108. KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 18U);
  2109. KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 0U);
  2110. path = dp_tunnel1->paths[1];
  2111. KUNIT_ASSERT_EQ(test, path->path_length, 2);
  2112. KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
  2113. KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 1U);
  2114. KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
  2115. KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 1U);
  2116. path = dp_tunnel1->paths[2];
  2117. KUNIT_ASSERT_EQ(test, path->path_length, 2);
  2118. KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
  2119. KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 1U);
  2120. KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
  2121. KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 1U);
  2122. return dp_tunnel1;
  2123. }
  2124. static struct tb_tunnel *TB_TEST_DP_TUNNEL2(struct kunit *test,
  2125. struct tb_switch *host, struct tb_switch *dev)
  2126. {
  2127. struct tb_port *in, *out;
  2128. struct tb_tunnel *dp_tunnel2;
  2129. struct tb_path *path;
  2130. in = &host->ports[6];
  2131. out = &dev->ports[14];
  2132. dp_tunnel2 = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0);
  2133. KUNIT_ASSERT_NOT_NULL(test, dp_tunnel2);
  2134. KUNIT_ASSERT_EQ(test, dp_tunnel2->npaths, (size_t)3);
  2135. path = dp_tunnel2->paths[0];
  2136. KUNIT_ASSERT_EQ(test, path->path_length, 2);
  2137. KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 12U);
  2138. KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 0U);
  2139. KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 18U);
  2140. KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 0U);
  2141. path = dp_tunnel2->paths[1];
  2142. KUNIT_ASSERT_EQ(test, path->path_length, 2);
  2143. KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
  2144. KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 1U);
  2145. KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
  2146. KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 1U);
  2147. path = dp_tunnel2->paths[2];
  2148. KUNIT_ASSERT_EQ(test, path->path_length, 2);
  2149. KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
  2150. KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 1U);
  2151. KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
  2152. KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 1U);
  2153. return dp_tunnel2;
  2154. }
  2155. static struct tb_tunnel *TB_TEST_USB3_TUNNEL(struct kunit *test,
  2156. struct tb_switch *host, struct tb_switch *dev)
  2157. {
  2158. struct tb_port *up, *down;
  2159. struct tb_tunnel *usb3_tunnel;
  2160. struct tb_path *path;
  2161. down = &host->ports[12];
  2162. up = &dev->ports[16];
  2163. usb3_tunnel = tb_tunnel_alloc_usb3(NULL, up, down, 0, 0);
  2164. KUNIT_ASSERT_NOT_NULL(test, usb3_tunnel);
  2165. KUNIT_ASSERT_EQ(test, usb3_tunnel->npaths, (size_t)2);
  2166. path = usb3_tunnel->paths[0];
  2167. KUNIT_ASSERT_EQ(test, path->path_length, 2);
  2168. KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
  2169. KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U);
  2170. KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
  2171. KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 14U);
  2172. path = usb3_tunnel->paths[1];
  2173. KUNIT_ASSERT_EQ(test, path->path_length, 2);
  2174. KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
  2175. KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U);
  2176. KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
  2177. KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 32U);
  2178. return usb3_tunnel;
  2179. }
  2180. static struct tb_tunnel *TB_TEST_DMA_TUNNEL1(struct kunit *test,
  2181. struct tb_switch *host, struct tb_switch *dev)
  2182. {
  2183. struct tb_port *nhi, *port;
  2184. struct tb_tunnel *dma_tunnel1;
  2185. struct tb_path *path;
  2186. nhi = &host->ports[7];
  2187. port = &dev->ports[3];
  2188. dma_tunnel1 = tb_tunnel_alloc_dma(NULL, nhi, port, 8, 1, 8, 1);
  2189. KUNIT_ASSERT_NOT_NULL(test, dma_tunnel1);
  2190. KUNIT_ASSERT_EQ(test, dma_tunnel1->npaths, (size_t)2);
  2191. path = dma_tunnel1->paths[0];
  2192. KUNIT_ASSERT_EQ(test, path->path_length, 2);
  2193. KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
  2194. KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 14U);
  2195. KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
  2196. KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 14U);
  2197. path = dma_tunnel1->paths[1];
  2198. KUNIT_ASSERT_EQ(test, path->path_length, 2);
  2199. KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
  2200. KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 0U);
  2201. KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
  2202. KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 14U);
  2203. return dma_tunnel1;
  2204. }
  2205. static struct tb_tunnel *TB_TEST_DMA_TUNNEL2(struct kunit *test,
  2206. struct tb_switch *host, struct tb_switch *dev)
  2207. {
  2208. struct tb_port *nhi, *port;
  2209. struct tb_tunnel *dma_tunnel2;
  2210. struct tb_path *path;
  2211. nhi = &host->ports[7];
  2212. port = &dev->ports[3];
  2213. dma_tunnel2 = tb_tunnel_alloc_dma(NULL, nhi, port, 9, 2, 9, 2);
  2214. KUNIT_ASSERT_NOT_NULL(test, dma_tunnel2);
  2215. KUNIT_ASSERT_EQ(test, dma_tunnel2->npaths, (size_t)2);
  2216. path = dma_tunnel2->paths[0];
  2217. KUNIT_ASSERT_EQ(test, path->path_length, 2);
  2218. KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
  2219. KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 14U);
  2220. KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
  2221. KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 1U);
  2222. path = dma_tunnel2->paths[1];
  2223. KUNIT_ASSERT_EQ(test, path->path_length, 2);
  2224. KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
  2225. KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 0U);
  2226. KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
  2227. KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 1U);
  2228. return dma_tunnel2;
  2229. }
  2230. static void tb_test_credit_alloc_all(struct kunit *test)
  2231. {
  2232. struct tb_tunnel *pcie_tunnel, *dp_tunnel1, *dp_tunnel2, *usb3_tunnel;
  2233. struct tb_tunnel *dma_tunnel1, *dma_tunnel2;
  2234. struct tb_switch *host, *dev;
  2235. /*
  2236. * Create PCIe, 2 x DP, USB 3.x and two DMA tunnels from host to
  2237. * device. Expectation is that all these can be established with
  2238. * the default credit allocation found in Intel hardware.
  2239. */
  2240. host = alloc_host_usb4(test);
  2241. dev = alloc_dev_usb4(test, host, 0x1, true);
  2242. pcie_tunnel = TB_TEST_PCIE_TUNNEL(test, host, dev);
  2243. dp_tunnel1 = TB_TEST_DP_TUNNEL1(test, host, dev);
  2244. dp_tunnel2 = TB_TEST_DP_TUNNEL2(test, host, dev);
  2245. usb3_tunnel = TB_TEST_USB3_TUNNEL(test, host, dev);
  2246. dma_tunnel1 = TB_TEST_DMA_TUNNEL1(test, host, dev);
  2247. dma_tunnel2 = TB_TEST_DMA_TUNNEL2(test, host, dev);
  2248. tb_tunnel_free(dma_tunnel2);
  2249. tb_tunnel_free(dma_tunnel1);
  2250. tb_tunnel_free(usb3_tunnel);
  2251. tb_tunnel_free(dp_tunnel2);
  2252. tb_tunnel_free(dp_tunnel1);
  2253. tb_tunnel_free(pcie_tunnel);
  2254. }
  2255. static const u32 root_directory[] = {
  2256. 0x55584401, /* "UXD" v1 */
  2257. 0x00000018, /* Root directory length */
  2258. 0x76656e64, /* "vend" */
  2259. 0x6f726964, /* "orid" */
  2260. 0x76000001, /* "v" R 1 */
  2261. 0x00000a27, /* Immediate value, ! Vendor ID */
  2262. 0x76656e64, /* "vend" */
  2263. 0x6f726964, /* "orid" */
  2264. 0x74000003, /* "t" R 3 */
  2265. 0x0000001a, /* Text leaf offset, (“Apple Inc.”) */
  2266. 0x64657669, /* "devi" */
  2267. 0x63656964, /* "ceid" */
  2268. 0x76000001, /* "v" R 1 */
  2269. 0x0000000a, /* Immediate value, ! Device ID */
  2270. 0x64657669, /* "devi" */
  2271. 0x63656964, /* "ceid" */
  2272. 0x74000003, /* "t" R 3 */
  2273. 0x0000001d, /* Text leaf offset, (“Macintosh”) */
  2274. 0x64657669, /* "devi" */
  2275. 0x63657276, /* "cerv" */
  2276. 0x76000001, /* "v" R 1 */
  2277. 0x80000100, /* Immediate value, Device Revision */
  2278. 0x6e657477, /* "netw" */
  2279. 0x6f726b00, /* "ork" */
  2280. 0x44000014, /* "D" R 20 */
  2281. 0x00000021, /* Directory data offset, (Network Directory) */
  2282. 0x4170706c, /* "Appl" */
  2283. 0x6520496e, /* "e In" */
  2284. 0x632e0000, /* "c." ! */
  2285. 0x4d616369, /* "Maci" */
  2286. 0x6e746f73, /* "ntos" */
  2287. 0x68000000, /* "h" */
  2288. 0x00000000, /* padding */
  2289. 0xca8961c6, /* Directory UUID, Network Directory */
  2290. 0x9541ce1c, /* Directory UUID, Network Directory */
  2291. 0x5949b8bd, /* Directory UUID, Network Directory */
  2292. 0x4f5a5f2e, /* Directory UUID, Network Directory */
  2293. 0x70727463, /* "prtc" */
  2294. 0x69640000, /* "id" */
  2295. 0x76000001, /* "v" R 1 */
  2296. 0x00000001, /* Immediate value, Network Protocol ID */
  2297. 0x70727463, /* "prtc" */
  2298. 0x76657273, /* "vers" */
  2299. 0x76000001, /* "v" R 1 */
  2300. 0x00000001, /* Immediate value, Network Protocol Version */
  2301. 0x70727463, /* "prtc" */
  2302. 0x72657673, /* "revs" */
  2303. 0x76000001, /* "v" R 1 */
  2304. 0x00000001, /* Immediate value, Network Protocol Revision */
  2305. 0x70727463, /* "prtc" */
  2306. 0x73746e73, /* "stns" */
  2307. 0x76000001, /* "v" R 1 */
  2308. 0x00000000, /* Immediate value, Network Protocol Settings */
  2309. };
  2310. static const uuid_t network_dir_uuid =
  2311. UUID_INIT(0xc66189ca, 0x1cce, 0x4195,
  2312. 0xbd, 0xb8, 0x49, 0x59, 0x2e, 0x5f, 0x5a, 0x4f);
  2313. static void tb_test_property_parse(struct kunit *test)
  2314. {
  2315. struct tb_property_dir *dir, *network_dir;
  2316. struct tb_property *p;
  2317. dir = tb_property_parse_dir(root_directory, ARRAY_SIZE(root_directory));
  2318. KUNIT_ASSERT_NOT_NULL(test, dir);
  2319. p = tb_property_find(dir, "foo", TB_PROPERTY_TYPE_TEXT);
  2320. KUNIT_ASSERT_NULL(test, p);
  2321. p = tb_property_find(dir, "vendorid", TB_PROPERTY_TYPE_TEXT);
  2322. KUNIT_ASSERT_NOT_NULL(test, p);
  2323. KUNIT_EXPECT_STREQ(test, p->value.text, "Apple Inc.");
  2324. p = tb_property_find(dir, "vendorid", TB_PROPERTY_TYPE_VALUE);
  2325. KUNIT_ASSERT_NOT_NULL(test, p);
  2326. KUNIT_EXPECT_EQ(test, p->value.immediate, 0xa27);
  2327. p = tb_property_find(dir, "deviceid", TB_PROPERTY_TYPE_TEXT);
  2328. KUNIT_ASSERT_NOT_NULL(test, p);
  2329. KUNIT_EXPECT_STREQ(test, p->value.text, "Macintosh");
  2330. p = tb_property_find(dir, "deviceid", TB_PROPERTY_TYPE_VALUE);
  2331. KUNIT_ASSERT_NOT_NULL(test, p);
  2332. KUNIT_EXPECT_EQ(test, p->value.immediate, 0xa);
  2333. p = tb_property_find(dir, "missing", TB_PROPERTY_TYPE_DIRECTORY);
  2334. KUNIT_ASSERT_NULL(test, p);
  2335. p = tb_property_find(dir, "network", TB_PROPERTY_TYPE_DIRECTORY);
  2336. KUNIT_ASSERT_NOT_NULL(test, p);
  2337. network_dir = p->value.dir;
  2338. KUNIT_EXPECT_TRUE(test, uuid_equal(network_dir->uuid, &network_dir_uuid));
  2339. p = tb_property_find(network_dir, "prtcid", TB_PROPERTY_TYPE_VALUE);
  2340. KUNIT_ASSERT_NOT_NULL(test, p);
  2341. KUNIT_EXPECT_EQ(test, p->value.immediate, 0x1);
  2342. p = tb_property_find(network_dir, "prtcvers", TB_PROPERTY_TYPE_VALUE);
  2343. KUNIT_ASSERT_NOT_NULL(test, p);
  2344. KUNIT_EXPECT_EQ(test, p->value.immediate, 0x1);
  2345. p = tb_property_find(network_dir, "prtcrevs", TB_PROPERTY_TYPE_VALUE);
  2346. KUNIT_ASSERT_NOT_NULL(test, p);
  2347. KUNIT_EXPECT_EQ(test, p->value.immediate, 0x1);
  2348. p = tb_property_find(network_dir, "prtcstns", TB_PROPERTY_TYPE_VALUE);
  2349. KUNIT_ASSERT_NOT_NULL(test, p);
  2350. KUNIT_EXPECT_EQ(test, p->value.immediate, 0x0);
  2351. p = tb_property_find(network_dir, "deviceid", TB_PROPERTY_TYPE_VALUE);
  2352. KUNIT_EXPECT_TRUE(test, !p);
  2353. p = tb_property_find(network_dir, "deviceid", TB_PROPERTY_TYPE_TEXT);
  2354. KUNIT_EXPECT_TRUE(test, !p);
  2355. tb_property_free_dir(dir);
  2356. }
  2357. static void tb_test_property_format(struct kunit *test)
  2358. {
  2359. struct tb_property_dir *dir;
  2360. ssize_t block_len;
  2361. u32 *block;
  2362. int ret, i;
  2363. dir = tb_property_parse_dir(root_directory, ARRAY_SIZE(root_directory));
  2364. KUNIT_ASSERT_NOT_NULL(test, dir);
  2365. ret = tb_property_format_dir(dir, NULL, 0);
  2366. KUNIT_ASSERT_EQ(test, ret, ARRAY_SIZE(root_directory));
  2367. block_len = ret;
  2368. block = kunit_kzalloc(test, block_len * sizeof(u32), GFP_KERNEL);
  2369. KUNIT_ASSERT_NOT_NULL(test, block);
  2370. ret = tb_property_format_dir(dir, block, block_len);
  2371. KUNIT_EXPECT_EQ(test, ret, 0);
  2372. for (i = 0; i < ARRAY_SIZE(root_directory); i++)
  2373. KUNIT_EXPECT_EQ(test, root_directory[i], block[i]);
  2374. tb_property_free_dir(dir);
  2375. }
  2376. static void compare_dirs(struct kunit *test, struct tb_property_dir *d1,
  2377. struct tb_property_dir *d2)
  2378. {
  2379. struct tb_property *p1, *p2, *tmp;
  2380. int n1, n2, i;
  2381. if (d1->uuid) {
  2382. KUNIT_ASSERT_NOT_NULL(test, d2->uuid);
  2383. KUNIT_ASSERT_TRUE(test, uuid_equal(d1->uuid, d2->uuid));
  2384. } else {
  2385. KUNIT_ASSERT_NULL(test, d2->uuid);
  2386. }
  2387. n1 = 0;
  2388. tb_property_for_each(d1, tmp)
  2389. n1++;
  2390. KUNIT_ASSERT_NE(test, n1, 0);
  2391. n2 = 0;
  2392. tb_property_for_each(d2, tmp)
  2393. n2++;
  2394. KUNIT_ASSERT_NE(test, n2, 0);
  2395. KUNIT_ASSERT_EQ(test, n1, n2);
  2396. p1 = NULL;
  2397. p2 = NULL;
  2398. for (i = 0; i < n1; i++) {
  2399. p1 = tb_property_get_next(d1, p1);
  2400. KUNIT_ASSERT_NOT_NULL(test, p1);
  2401. p2 = tb_property_get_next(d2, p2);
  2402. KUNIT_ASSERT_NOT_NULL(test, p2);
  2403. KUNIT_ASSERT_STREQ(test, &p1->key[0], &p2->key[0]);
  2404. KUNIT_ASSERT_EQ(test, p1->type, p2->type);
  2405. KUNIT_ASSERT_EQ(test, p1->length, p2->length);
  2406. switch (p1->type) {
  2407. case TB_PROPERTY_TYPE_DIRECTORY:
  2408. KUNIT_ASSERT_NOT_NULL(test, p1->value.dir);
  2409. KUNIT_ASSERT_NOT_NULL(test, p2->value.dir);
  2410. compare_dirs(test, p1->value.dir, p2->value.dir);
  2411. break;
  2412. case TB_PROPERTY_TYPE_DATA:
  2413. KUNIT_ASSERT_NOT_NULL(test, p1->value.data);
  2414. KUNIT_ASSERT_NOT_NULL(test, p2->value.data);
  2415. KUNIT_ASSERT_TRUE(test,
  2416. !memcmp(p1->value.data, p2->value.data,
  2417. p1->length * 4)
  2418. );
  2419. break;
  2420. case TB_PROPERTY_TYPE_TEXT:
  2421. KUNIT_ASSERT_NOT_NULL(test, p1->value.text);
  2422. KUNIT_ASSERT_NOT_NULL(test, p2->value.text);
  2423. KUNIT_ASSERT_STREQ(test, p1->value.text, p2->value.text);
  2424. break;
  2425. case TB_PROPERTY_TYPE_VALUE:
  2426. KUNIT_ASSERT_EQ(test, p1->value.immediate,
  2427. p2->value.immediate);
  2428. break;
  2429. default:
  2430. KUNIT_FAIL(test, "unexpected property type");
  2431. break;
  2432. }
  2433. }
  2434. }
  2435. static void tb_test_property_copy(struct kunit *test)
  2436. {
  2437. struct tb_property_dir *src, *dst;
  2438. u32 *block;
  2439. int ret, i;
  2440. src = tb_property_parse_dir(root_directory, ARRAY_SIZE(root_directory));
  2441. KUNIT_ASSERT_NOT_NULL(test, src);
  2442. dst = tb_property_copy_dir(src);
  2443. KUNIT_ASSERT_NOT_NULL(test, dst);
  2444. /* Compare the structures */
  2445. compare_dirs(test, src, dst);
  2446. /* Compare the resulting property block */
  2447. ret = tb_property_format_dir(dst, NULL, 0);
  2448. KUNIT_ASSERT_EQ(test, ret, ARRAY_SIZE(root_directory));
  2449. block = kunit_kzalloc(test, sizeof(root_directory), GFP_KERNEL);
  2450. KUNIT_ASSERT_NOT_NULL(test, block);
  2451. ret = tb_property_format_dir(dst, block, ARRAY_SIZE(root_directory));
  2452. KUNIT_EXPECT_TRUE(test, !ret);
  2453. for (i = 0; i < ARRAY_SIZE(root_directory); i++)
  2454. KUNIT_EXPECT_EQ(test, root_directory[i], block[i]);
  2455. tb_property_free_dir(dst);
  2456. tb_property_free_dir(src);
  2457. }
  2458. static struct kunit_case tb_test_cases[] = {
  2459. KUNIT_CASE(tb_test_path_basic),
  2460. KUNIT_CASE(tb_test_path_not_connected_walk),
  2461. KUNIT_CASE(tb_test_path_single_hop_walk),
  2462. KUNIT_CASE(tb_test_path_daisy_chain_walk),
  2463. KUNIT_CASE(tb_test_path_simple_tree_walk),
  2464. KUNIT_CASE(tb_test_path_complex_tree_walk),
  2465. KUNIT_CASE(tb_test_path_max_length_walk),
  2466. KUNIT_CASE(tb_test_path_not_connected),
  2467. KUNIT_CASE(tb_test_path_not_bonded_lane0),
  2468. KUNIT_CASE(tb_test_path_not_bonded_lane1),
  2469. KUNIT_CASE(tb_test_path_not_bonded_lane1_chain),
  2470. KUNIT_CASE(tb_test_path_not_bonded_lane1_chain_reverse),
  2471. KUNIT_CASE(tb_test_path_mixed_chain),
  2472. KUNIT_CASE(tb_test_path_mixed_chain_reverse),
  2473. KUNIT_CASE(tb_test_tunnel_pcie),
  2474. KUNIT_CASE(tb_test_tunnel_dp),
  2475. KUNIT_CASE(tb_test_tunnel_dp_chain),
  2476. KUNIT_CASE(tb_test_tunnel_dp_tree),
  2477. KUNIT_CASE(tb_test_tunnel_dp_max_length),
  2478. KUNIT_CASE(tb_test_tunnel_3dp),
  2479. KUNIT_CASE(tb_test_tunnel_port_on_path),
  2480. KUNIT_CASE(tb_test_tunnel_usb3),
  2481. KUNIT_CASE(tb_test_tunnel_dma),
  2482. KUNIT_CASE(tb_test_tunnel_dma_rx),
  2483. KUNIT_CASE(tb_test_tunnel_dma_tx),
  2484. KUNIT_CASE(tb_test_tunnel_dma_chain),
  2485. KUNIT_CASE(tb_test_tunnel_dma_match),
  2486. KUNIT_CASE(tb_test_credit_alloc_legacy_not_bonded),
  2487. KUNIT_CASE(tb_test_credit_alloc_legacy_bonded),
  2488. KUNIT_CASE(tb_test_credit_alloc_pcie),
  2489. KUNIT_CASE(tb_test_credit_alloc_without_dp),
  2490. KUNIT_CASE(tb_test_credit_alloc_dp),
  2491. KUNIT_CASE(tb_test_credit_alloc_usb3),
  2492. KUNIT_CASE(tb_test_credit_alloc_dma),
  2493. KUNIT_CASE(tb_test_credit_alloc_dma_multiple),
  2494. KUNIT_CASE(tb_test_credit_alloc_all),
  2495. KUNIT_CASE(tb_test_property_parse),
  2496. KUNIT_CASE(tb_test_property_format),
  2497. KUNIT_CASE(tb_test_property_copy),
  2498. { }
  2499. };
  2500. static struct kunit_suite tb_test_suite = {
  2501. .name = "thunderbolt",
  2502. .test_cases = tb_test_cases,
  2503. };
  2504. kunit_test_suite(tb_test_suite);