xhci-ring.c 132 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403440444054406440744084409441044114412441344144415441644174418441944204421442244234424442544264427442844294430443144324433443444354436443744384439444044414442444344444445444644474448444944504451445244534454
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * xHCI host controller driver
  4. *
  5. * Copyright (C) 2008 Intel Corp.
  6. *
  7. * Author: Sarah Sharp
  8. * Some code borrowed from the Linux EHCI driver.
  9. */
  10. /*
  11. * Ring initialization rules:
  12. * 1. Each segment is initialized to zero, except for link TRBs.
  13. * 2. Ring cycle state = 0. This represents Producer Cycle State (PCS) or
  14. * Consumer Cycle State (CCS), depending on ring function.
  15. * 3. Enqueue pointer = dequeue pointer = address of first TRB in the segment.
  16. *
  17. * Ring behavior rules:
  18. * 1. A ring is empty if enqueue == dequeue. This means there will always be at
  19. * least one free TRB in the ring. This is useful if you want to turn that
  20. * into a link TRB and expand the ring.
  21. * 2. When incrementing an enqueue or dequeue pointer, if the next TRB is a
  22. * link TRB, then load the pointer with the address in the link TRB. If the
  23. * link TRB had its toggle bit set, you may need to update the ring cycle
  24. * state (see cycle bit rules). You may have to do this multiple times
  25. * until you reach a non-link TRB.
  26. * 3. A ring is full if enqueue++ (for the definition of increment above)
  27. * equals the dequeue pointer.
  28. *
  29. * Cycle bit rules:
  30. * 1. When a consumer increments a dequeue pointer and encounters a toggle bit
  31. * in a link TRB, it must toggle the ring cycle state.
  32. * 2. When a producer increments an enqueue pointer and encounters a toggle bit
  33. * in a link TRB, it must toggle the ring cycle state.
  34. *
  35. * Producer rules:
  36. * 1. Check if ring is full before you enqueue.
  37. * 2. Write the ring cycle state to the cycle bit in the TRB you're enqueuing.
  38. * Update enqueue pointer between each write (which may update the ring
  39. * cycle state).
  40. * 3. Notify consumer. If SW is producer, it rings the doorbell for command
  41. * and endpoint rings. If HC is the producer for the event ring,
  42. * and it generates an interrupt according to interrupt modulation rules.
  43. *
  44. * Consumer rules:
  45. * 1. Check if TRB belongs to you. If the cycle bit == your ring cycle state,
  46. * the TRB is owned by the consumer.
  47. * 2. Update dequeue pointer (which may update the ring cycle state) and
  48. * continue processing TRBs until you reach a TRB which is not owned by you.
  49. * 3. Notify the producer. SW is the consumer for the event ring, and it
  50. * updates event ring dequeue pointer. HC is the consumer for the command and
  51. * endpoint rings; it generates events on the event ring for these.
  52. */
  53. #include <linux/jiffies.h>
  54. #include <linux/scatterlist.h>
  55. #include <linux/slab.h>
  56. #include <linux/dma-mapping.h>
  57. #include "xhci.h"
  58. #include "xhci-trace.h"
  59. static int queue_command(struct xhci_hcd *xhci, struct xhci_command *cmd,
  60. u32 field1, u32 field2,
  61. u32 field3, u32 field4, bool command_must_succeed);
  62. /*
  63. * Returns zero if the TRB isn't in this segment, otherwise it returns the DMA
  64. * address of the TRB.
  65. */
  66. dma_addr_t xhci_trb_virt_to_dma(struct xhci_segment *seg,
  67. union xhci_trb *trb)
  68. {
  69. unsigned long segment_offset;
  70. if (!seg || !trb || trb < seg->trbs)
  71. return 0;
  72. /* offset in TRBs */
  73. segment_offset = trb - seg->trbs;
  74. if (segment_offset >= TRBS_PER_SEGMENT)
  75. return 0;
  76. return seg->dma + (segment_offset * sizeof(*trb));
  77. }
  78. static bool trb_is_noop(union xhci_trb *trb)
  79. {
  80. return TRB_TYPE_NOOP_LE32(trb->generic.field[3]);
  81. }
  82. static bool trb_is_link(union xhci_trb *trb)
  83. {
  84. return TRB_TYPE_LINK_LE32(trb->link.control);
  85. }
  86. static bool last_trb_on_seg(struct xhci_segment *seg, union xhci_trb *trb)
  87. {
  88. return trb == &seg->trbs[TRBS_PER_SEGMENT - 1];
  89. }
  90. static bool last_trb_on_ring(struct xhci_ring *ring,
  91. struct xhci_segment *seg, union xhci_trb *trb)
  92. {
  93. return last_trb_on_seg(seg, trb) && (seg->next == ring->first_seg);
  94. }
  95. static bool link_trb_toggles_cycle(union xhci_trb *trb)
  96. {
  97. return le32_to_cpu(trb->link.control) & LINK_TOGGLE;
  98. }
  99. static bool last_td_in_urb(struct xhci_td *td)
  100. {
  101. struct urb_priv *urb_priv = td->urb->hcpriv;
  102. return urb_priv->num_tds_done == urb_priv->num_tds;
  103. }
  104. static bool unhandled_event_trb(struct xhci_ring *ring)
  105. {
  106. return ((le32_to_cpu(ring->dequeue->event_cmd.flags) & TRB_CYCLE) ==
  107. ring->cycle_state);
  108. }
  109. static void inc_td_cnt(struct urb *urb)
  110. {
  111. struct urb_priv *urb_priv = urb->hcpriv;
  112. urb_priv->num_tds_done++;
  113. }
  114. static void trb_to_noop(union xhci_trb *trb, u32 noop_type)
  115. {
  116. if (trb_is_link(trb)) {
  117. /* unchain chained link TRBs */
  118. trb->link.control &= cpu_to_le32(~TRB_CHAIN);
  119. } else {
  120. trb->generic.field[0] = 0;
  121. trb->generic.field[1] = 0;
  122. trb->generic.field[2] = 0;
  123. /* Preserve only the cycle bit of this TRB */
  124. trb->generic.field[3] &= cpu_to_le32(TRB_CYCLE);
  125. trb->generic.field[3] |= cpu_to_le32(TRB_TYPE(noop_type));
  126. }
  127. }
  128. /* Updates trb to point to the next TRB in the ring, and updates seg if the next
  129. * TRB is in a new segment. This does not skip over link TRBs, and it does not
  130. * effect the ring dequeue or enqueue pointers.
  131. */
  132. static void next_trb(struct xhci_hcd *xhci,
  133. struct xhci_ring *ring,
  134. struct xhci_segment **seg,
  135. union xhci_trb **trb)
  136. {
  137. if (trb_is_link(*trb) || last_trb_on_seg(*seg, *trb)) {
  138. *seg = (*seg)->next;
  139. *trb = ((*seg)->trbs);
  140. } else {
  141. (*trb)++;
  142. }
  143. }
  144. /*
  145. * See Cycle bit rules. SW is the consumer for the event ring only.
  146. */
  147. void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring)
  148. {
  149. unsigned int link_trb_count = 0;
  150. /* event ring doesn't have link trbs, check for last trb */
  151. if (ring->type == TYPE_EVENT) {
  152. if (!last_trb_on_seg(ring->deq_seg, ring->dequeue)) {
  153. ring->dequeue++;
  154. goto out;
  155. }
  156. if (last_trb_on_ring(ring, ring->deq_seg, ring->dequeue))
  157. ring->cycle_state ^= 1;
  158. ring->deq_seg = ring->deq_seg->next;
  159. ring->dequeue = ring->deq_seg->trbs;
  160. goto out;
  161. }
  162. /* All other rings have link trbs */
  163. if (!trb_is_link(ring->dequeue)) {
  164. if (last_trb_on_seg(ring->deq_seg, ring->dequeue))
  165. xhci_warn(xhci, "Missing link TRB at end of segment\n");
  166. else
  167. ring->dequeue++;
  168. }
  169. while (trb_is_link(ring->dequeue)) {
  170. ring->deq_seg = ring->deq_seg->next;
  171. ring->dequeue = ring->deq_seg->trbs;
  172. if (link_trb_count++ > ring->num_segs) {
  173. xhci_warn(xhci, "Ring is an endless link TRB loop\n");
  174. break;
  175. }
  176. }
  177. out:
  178. trace_xhci_inc_deq(ring);
  179. return;
  180. }
  181. /*
  182. * See Cycle bit rules. SW is the consumer for the event ring only.
  183. *
  184. * If we've just enqueued a TRB that is in the middle of a TD (meaning the
  185. * chain bit is set), then set the chain bit in all the following link TRBs.
  186. * If we've enqueued the last TRB in a TD, make sure the following link TRBs
  187. * have their chain bit cleared (so that each Link TRB is a separate TD).
  188. *
  189. * Section 6.4.4.1 of the 0.95 spec says link TRBs cannot have the chain bit
  190. * set, but other sections talk about dealing with the chain bit set. This was
  191. * fixed in the 0.96 specification errata, but we have to assume that all 0.95
  192. * xHCI hardware can't handle the chain bit being cleared on a link TRB.
  193. *
  194. * @more_trbs_coming: Will you enqueue more TRBs before calling
  195. * prepare_transfer()?
  196. */
  197. static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring,
  198. bool more_trbs_coming)
  199. {
  200. u32 chain;
  201. union xhci_trb *next;
  202. unsigned int link_trb_count = 0;
  203. chain = le32_to_cpu(ring->enqueue->generic.field[3]) & TRB_CHAIN;
  204. if (last_trb_on_seg(ring->enq_seg, ring->enqueue)) {
  205. xhci_err(xhci, "Tried to move enqueue past ring segment\n");
  206. return;
  207. }
  208. next = ++(ring->enqueue);
  209. /* Update the dequeue pointer further if that was a link TRB */
  210. while (trb_is_link(next)) {
  211. /*
  212. * If the caller doesn't plan on enqueueing more TDs before
  213. * ringing the doorbell, then we don't want to give the link TRB
  214. * to the hardware just yet. We'll give the link TRB back in
  215. * prepare_ring() just before we enqueue the TD at the top of
  216. * the ring.
  217. */
  218. if (!chain && !more_trbs_coming)
  219. break;
  220. /* If we're not dealing with 0.95 hardware or isoc rings on
  221. * AMD 0.96 host, carry over the chain bit of the previous TRB
  222. * (which may mean the chain bit is cleared).
  223. */
  224. if (!xhci_link_chain_quirk(xhci, ring->type)) {
  225. next->link.control &= cpu_to_le32(~TRB_CHAIN);
  226. next->link.control |= cpu_to_le32(chain);
  227. }
  228. /* Give this link TRB to the hardware */
  229. wmb();
  230. next->link.control ^= cpu_to_le32(TRB_CYCLE);
  231. /* Toggle the cycle bit after the last ring segment. */
  232. if (link_trb_toggles_cycle(next))
  233. ring->cycle_state ^= 1;
  234. ring->enq_seg = ring->enq_seg->next;
  235. ring->enqueue = ring->enq_seg->trbs;
  236. next = ring->enqueue;
  237. if (link_trb_count++ > ring->num_segs) {
  238. xhci_warn(xhci, "%s: Ring link TRB loop\n", __func__);
  239. break;
  240. }
  241. }
  242. trace_xhci_inc_enq(ring);
  243. }
  244. /*
  245. * Return number of free normal TRBs from enqueue to dequeue pointer on ring.
  246. * Not counting an assumed link TRB at end of each TRBS_PER_SEGMENT sized segment.
  247. * Only for transfer and command rings where driver is the producer, not for
  248. * event rings.
  249. */
  250. static unsigned int xhci_num_trbs_free(struct xhci_ring *ring)
  251. {
  252. struct xhci_segment *enq_seg = ring->enq_seg;
  253. union xhci_trb *enq = ring->enqueue;
  254. union xhci_trb *last_on_seg;
  255. unsigned int free = 0;
  256. int i = 0;
  257. /* Ring might be empty even if enq != deq if enq is left on a link trb */
  258. if (trb_is_link(enq)) {
  259. enq_seg = enq_seg->next;
  260. enq = enq_seg->trbs;
  261. }
  262. /* Empty ring, common case, don't walk the segments */
  263. if (enq == ring->dequeue)
  264. return ring->num_segs * (TRBS_PER_SEGMENT - 1);
  265. do {
  266. if (ring->deq_seg == enq_seg && ring->dequeue >= enq)
  267. return free + (ring->dequeue - enq);
  268. last_on_seg = &enq_seg->trbs[TRBS_PER_SEGMENT - 1];
  269. free += last_on_seg - enq;
  270. enq_seg = enq_seg->next;
  271. enq = enq_seg->trbs;
  272. } while (i++ < ring->num_segs);
  273. return free;
  274. }
  275. /*
  276. * Check to see if there's room to enqueue num_trbs on the ring and make sure
  277. * enqueue pointer will not advance into dequeue segment. See rules above.
  278. * return number of new segments needed to ensure this.
  279. */
  280. static unsigned int xhci_ring_expansion_needed(struct xhci_hcd *xhci, struct xhci_ring *ring,
  281. unsigned int num_trbs)
  282. {
  283. struct xhci_segment *seg;
  284. int trbs_past_seg;
  285. int enq_used;
  286. int new_segs;
  287. enq_used = ring->enqueue - ring->enq_seg->trbs;
  288. /* how many trbs will be queued past the enqueue segment? */
  289. trbs_past_seg = enq_used + num_trbs - (TRBS_PER_SEGMENT - 1);
  290. /*
  291. * Consider expanding the ring already if num_trbs fills the current
  292. * segment (i.e. trbs_past_seg == 0), not only when num_trbs goes into
  293. * the next segment. Avoids confusing full ring with special empty ring
  294. * case below
  295. */
  296. if (trbs_past_seg < 0)
  297. return 0;
  298. /* Empty ring special case, enqueue stuck on link trb while dequeue advanced */
  299. if (trb_is_link(ring->enqueue) && ring->enq_seg->next->trbs == ring->dequeue)
  300. return 0;
  301. new_segs = 1 + (trbs_past_seg / (TRBS_PER_SEGMENT - 1));
  302. seg = ring->enq_seg;
  303. while (new_segs > 0) {
  304. seg = seg->next;
  305. if (seg == ring->deq_seg) {
  306. xhci_dbg(xhci, "Adding %d trbs requires expanding ring by %d segments\n",
  307. num_trbs, new_segs);
  308. return new_segs;
  309. }
  310. new_segs--;
  311. }
  312. return 0;
  313. }
  314. /* Ring the host controller doorbell after placing a command on the ring */
  315. void xhci_ring_cmd_db(struct xhci_hcd *xhci)
  316. {
  317. if (!(xhci->cmd_ring_state & CMD_RING_STATE_RUNNING))
  318. return;
  319. xhci_dbg(xhci, "// Ding dong!\n");
  320. trace_xhci_ring_host_doorbell(0, DB_VALUE_HOST);
  321. writel(DB_VALUE_HOST, &xhci->dba->doorbell[0]);
  322. /* Flush PCI posted writes */
  323. readl(&xhci->dba->doorbell[0]);
  324. }
  325. static bool xhci_mod_cmd_timer(struct xhci_hcd *xhci)
  326. {
  327. return mod_delayed_work(system_wq, &xhci->cmd_timer,
  328. msecs_to_jiffies(xhci->current_cmd->timeout_ms));
  329. }
  330. static struct xhci_command *xhci_next_queued_cmd(struct xhci_hcd *xhci)
  331. {
  332. return list_first_entry_or_null(&xhci->cmd_list, struct xhci_command,
  333. cmd_list);
  334. }
  335. /*
  336. * Turn all commands on command ring with status set to "aborted" to no-op trbs.
  337. * If there are other commands waiting then restart the ring and kick the timer.
  338. * This must be called with command ring stopped and xhci->lock held.
  339. */
  340. static void xhci_handle_stopped_cmd_ring(struct xhci_hcd *xhci,
  341. struct xhci_command *cur_cmd)
  342. {
  343. struct xhci_command *i_cmd;
  344. /* Turn all aborted commands in list to no-ops, then restart */
  345. list_for_each_entry(i_cmd, &xhci->cmd_list, cmd_list) {
  346. if (i_cmd->status != COMP_COMMAND_ABORTED)
  347. continue;
  348. i_cmd->status = COMP_COMMAND_RING_STOPPED;
  349. xhci_dbg(xhci, "Turn aborted command %p to no-op\n",
  350. i_cmd->command_trb);
  351. trb_to_noop(i_cmd->command_trb, TRB_CMD_NOOP);
  352. /*
  353. * caller waiting for completion is called when command
  354. * completion event is received for these no-op commands
  355. */
  356. }
  357. xhci->cmd_ring_state = CMD_RING_STATE_RUNNING;
  358. /* ring command ring doorbell to restart the command ring */
  359. if ((xhci->cmd_ring->dequeue != xhci->cmd_ring->enqueue) &&
  360. !(xhci->xhc_state & XHCI_STATE_DYING)) {
  361. xhci->current_cmd = cur_cmd;
  362. if (cur_cmd)
  363. xhci_mod_cmd_timer(xhci);
  364. xhci_ring_cmd_db(xhci);
  365. }
  366. }
  367. /* Must be called with xhci->lock held, releases and aquires lock back */
  368. static int xhci_abort_cmd_ring(struct xhci_hcd *xhci, unsigned long flags)
  369. {
  370. struct xhci_segment *new_seg = xhci->cmd_ring->deq_seg;
  371. union xhci_trb *new_deq = xhci->cmd_ring->dequeue;
  372. u64 crcr;
  373. int ret;
  374. xhci_dbg(xhci, "Abort command ring\n");
  375. reinit_completion(&xhci->cmd_ring_stop_completion);
  376. /*
  377. * The control bits like command stop, abort are located in lower
  378. * dword of the command ring control register.
  379. * Some controllers require all 64 bits to be written to abort the ring.
  380. * Make sure the upper dword is valid, pointing to the next command,
  381. * avoiding corrupting the command ring pointer in case the command ring
  382. * is stopped by the time the upper dword is written.
  383. */
  384. next_trb(xhci, NULL, &new_seg, &new_deq);
  385. if (trb_is_link(new_deq))
  386. next_trb(xhci, NULL, &new_seg, &new_deq);
  387. crcr = xhci_trb_virt_to_dma(new_seg, new_deq);
  388. xhci_write_64(xhci, crcr | CMD_RING_ABORT, &xhci->op_regs->cmd_ring);
  389. /* Section 4.6.1.2 of xHCI 1.0 spec says software should also time the
  390. * completion of the Command Abort operation. If CRR is not negated in 5
  391. * seconds then driver handles it as if host died (-ENODEV).
  392. * In the future we should distinguish between -ENODEV and -ETIMEDOUT
  393. * and try to recover a -ETIMEDOUT with a host controller reset.
  394. */
  395. ret = xhci_handshake_check_state(xhci, &xhci->op_regs->cmd_ring,
  396. CMD_RING_RUNNING, 0, 5 * 1000 * 1000,
  397. XHCI_STATE_REMOVING);
  398. if (ret < 0) {
  399. xhci_err(xhci, "Abort failed to stop command ring: %d\n", ret);
  400. xhci_halt(xhci);
  401. xhci_hc_died(xhci);
  402. return ret;
  403. }
  404. /*
  405. * Writing the CMD_RING_ABORT bit should cause a cmd completion event,
  406. * however on some host hw the CMD_RING_RUNNING bit is correctly cleared
  407. * but the completion event in never sent. Wait 2 secs (arbitrary
  408. * number) to handle those cases after negation of CMD_RING_RUNNING.
  409. */
  410. spin_unlock_irqrestore(&xhci->lock, flags);
  411. ret = wait_for_completion_timeout(&xhci->cmd_ring_stop_completion,
  412. msecs_to_jiffies(2000));
  413. spin_lock_irqsave(&xhci->lock, flags);
  414. if (!ret) {
  415. xhci_dbg(xhci, "No stop event for abort, ring start fail?\n");
  416. xhci_cleanup_command_queue(xhci);
  417. } else {
  418. xhci_handle_stopped_cmd_ring(xhci, xhci_next_queued_cmd(xhci));
  419. }
  420. return 0;
  421. }
  422. void xhci_ring_ep_doorbell(struct xhci_hcd *xhci,
  423. unsigned int slot_id,
  424. unsigned int ep_index,
  425. unsigned int stream_id)
  426. {
  427. __le32 __iomem *db_addr = &xhci->dba->doorbell[slot_id];
  428. struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index];
  429. unsigned int ep_state = ep->ep_state;
  430. /* Don't ring the doorbell for this endpoint if there are pending
  431. * cancellations because we don't want to interrupt processing.
  432. * We don't want to restart any stream rings if there's a set dequeue
  433. * pointer command pending because the device can choose to start any
  434. * stream once the endpoint is on the HW schedule.
  435. */
  436. if ((ep_state & EP_STOP_CMD_PENDING) || (ep_state & SET_DEQ_PENDING) ||
  437. (ep_state & EP_HALTED) || (ep_state & EP_CLEARING_TT))
  438. return;
  439. trace_xhci_ring_ep_doorbell(slot_id, DB_VALUE(ep_index, stream_id));
  440. writel(DB_VALUE(ep_index, stream_id), db_addr);
  441. /* flush the write */
  442. readl(db_addr);
  443. }
  444. /* Ring the doorbell for any rings with pending URBs */
  445. static void ring_doorbell_for_active_rings(struct xhci_hcd *xhci,
  446. unsigned int slot_id,
  447. unsigned int ep_index)
  448. {
  449. unsigned int stream_id;
  450. struct xhci_virt_ep *ep;
  451. ep = &xhci->devs[slot_id]->eps[ep_index];
  452. /* A ring has pending URBs if its TD list is not empty */
  453. if (!(ep->ep_state & EP_HAS_STREAMS)) {
  454. if (ep->ring && !(list_empty(&ep->ring->td_list)))
  455. xhci_ring_ep_doorbell(xhci, slot_id, ep_index, 0);
  456. return;
  457. }
  458. for (stream_id = 1; stream_id < ep->stream_info->num_streams;
  459. stream_id++) {
  460. struct xhci_stream_info *stream_info = ep->stream_info;
  461. if (!list_empty(&stream_info->stream_rings[stream_id]->td_list))
  462. xhci_ring_ep_doorbell(xhci, slot_id, ep_index,
  463. stream_id);
  464. }
  465. }
  466. void xhci_ring_doorbell_for_active_rings(struct xhci_hcd *xhci,
  467. unsigned int slot_id,
  468. unsigned int ep_index)
  469. {
  470. ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
  471. }
  472. static struct xhci_virt_ep *xhci_get_virt_ep(struct xhci_hcd *xhci,
  473. unsigned int slot_id,
  474. unsigned int ep_index)
  475. {
  476. if (slot_id == 0 || slot_id >= MAX_HC_SLOTS) {
  477. xhci_warn(xhci, "Invalid slot_id %u\n", slot_id);
  478. return NULL;
  479. }
  480. if (ep_index >= EP_CTX_PER_DEV) {
  481. xhci_warn(xhci, "Invalid endpoint index %u\n", ep_index);
  482. return NULL;
  483. }
  484. if (!xhci->devs[slot_id]) {
  485. xhci_warn(xhci, "No xhci virt device for slot_id %u\n", slot_id);
  486. return NULL;
  487. }
  488. return &xhci->devs[slot_id]->eps[ep_index];
  489. }
  490. static struct xhci_ring *xhci_virt_ep_to_ring(struct xhci_hcd *xhci,
  491. struct xhci_virt_ep *ep,
  492. unsigned int stream_id)
  493. {
  494. /* common case, no streams */
  495. if (!(ep->ep_state & EP_HAS_STREAMS))
  496. return ep->ring;
  497. if (!ep->stream_info)
  498. return NULL;
  499. if (stream_id == 0 || stream_id >= ep->stream_info->num_streams) {
  500. xhci_warn(xhci, "Invalid stream_id %u request for slot_id %u ep_index %u\n",
  501. stream_id, ep->vdev->slot_id, ep->ep_index);
  502. return NULL;
  503. }
  504. return ep->stream_info->stream_rings[stream_id];
  505. }
  506. /* Get the right ring for the given slot_id, ep_index and stream_id.
  507. * If the endpoint supports streams, boundary check the URB's stream ID.
  508. * If the endpoint doesn't support streams, return the singular endpoint ring.
  509. */
  510. struct xhci_ring *xhci_triad_to_transfer_ring(struct xhci_hcd *xhci,
  511. unsigned int slot_id, unsigned int ep_index,
  512. unsigned int stream_id)
  513. {
  514. struct xhci_virt_ep *ep;
  515. ep = xhci_get_virt_ep(xhci, slot_id, ep_index);
  516. if (!ep)
  517. return NULL;
  518. return xhci_virt_ep_to_ring(xhci, ep, stream_id);
  519. }
  520. /*
  521. * Get the hw dequeue pointer xHC stopped on, either directly from the
  522. * endpoint context, or if streams are in use from the stream context.
  523. * The returned hw_dequeue contains the lowest four bits with cycle state
  524. * and possbile stream context type.
  525. */
  526. static u64 xhci_get_hw_deq(struct xhci_hcd *xhci, struct xhci_virt_device *vdev,
  527. unsigned int ep_index, unsigned int stream_id)
  528. {
  529. struct xhci_ep_ctx *ep_ctx;
  530. struct xhci_stream_ctx *st_ctx;
  531. struct xhci_virt_ep *ep;
  532. ep = &vdev->eps[ep_index];
  533. if (ep->ep_state & EP_HAS_STREAMS) {
  534. st_ctx = &ep->stream_info->stream_ctx_array[stream_id];
  535. return le64_to_cpu(st_ctx->stream_ring);
  536. }
  537. ep_ctx = xhci_get_ep_ctx(xhci, vdev->out_ctx, ep_index);
  538. return le64_to_cpu(ep_ctx->deq);
  539. }
  540. static int xhci_move_dequeue_past_td(struct xhci_hcd *xhci,
  541. unsigned int slot_id, unsigned int ep_index,
  542. unsigned int stream_id, struct xhci_td *td)
  543. {
  544. struct xhci_virt_device *dev = xhci->devs[slot_id];
  545. struct xhci_virt_ep *ep = &dev->eps[ep_index];
  546. struct xhci_ring *ep_ring;
  547. struct xhci_command *cmd;
  548. struct xhci_segment *new_seg;
  549. union xhci_trb *new_deq;
  550. int new_cycle;
  551. dma_addr_t addr;
  552. u64 hw_dequeue;
  553. bool cycle_found = false;
  554. bool td_last_trb_found = false;
  555. u32 trb_sct = 0;
  556. int ret;
  557. ep_ring = xhci_triad_to_transfer_ring(xhci, slot_id,
  558. ep_index, stream_id);
  559. if (!ep_ring) {
  560. xhci_warn(xhci, "WARN can't find new dequeue, invalid stream ID %u\n",
  561. stream_id);
  562. return -ENODEV;
  563. }
  564. hw_dequeue = xhci_get_hw_deq(xhci, dev, ep_index, stream_id);
  565. new_seg = ep_ring->deq_seg;
  566. new_deq = ep_ring->dequeue;
  567. new_cycle = hw_dequeue & 0x1;
  568. /*
  569. * We want to find the pointer, segment and cycle state of the new trb
  570. * (the one after current TD's last_trb). We know the cycle state at
  571. * hw_dequeue, so walk the ring until both hw_dequeue and last_trb are
  572. * found.
  573. */
  574. do {
  575. if (!cycle_found && xhci_trb_virt_to_dma(new_seg, new_deq)
  576. == (dma_addr_t)(hw_dequeue & ~0xf)) {
  577. cycle_found = true;
  578. if (td_last_trb_found)
  579. break;
  580. }
  581. if (new_deq == td->last_trb)
  582. td_last_trb_found = true;
  583. if (cycle_found && trb_is_link(new_deq) &&
  584. link_trb_toggles_cycle(new_deq))
  585. new_cycle ^= 0x1;
  586. next_trb(xhci, ep_ring, &new_seg, &new_deq);
  587. /* Search wrapped around, bail out */
  588. if (new_deq == ep->ring->dequeue) {
  589. xhci_err(xhci, "Error: Failed finding new dequeue state\n");
  590. return -EINVAL;
  591. }
  592. } while (!cycle_found || !td_last_trb_found);
  593. /* Don't update the ring cycle state for the producer (us). */
  594. addr = xhci_trb_virt_to_dma(new_seg, new_deq);
  595. if (addr == 0) {
  596. xhci_warn(xhci, "Can't find dma of new dequeue ptr\n");
  597. xhci_warn(xhci, "deq seg = %p, deq ptr = %p\n", new_seg, new_deq);
  598. return -EINVAL;
  599. }
  600. if ((ep->ep_state & SET_DEQ_PENDING)) {
  601. xhci_warn(xhci, "Set TR Deq already pending, don't submit for 0x%pad\n",
  602. &addr);
  603. return -EBUSY;
  604. }
  605. /* This function gets called from contexts where it cannot sleep */
  606. cmd = xhci_alloc_command(xhci, false, GFP_ATOMIC);
  607. if (!cmd) {
  608. xhci_warn(xhci, "Can't alloc Set TR Deq cmd 0x%pad\n", &addr);
  609. return -ENOMEM;
  610. }
  611. if (stream_id)
  612. trb_sct = SCT_FOR_TRB(SCT_PRI_TR);
  613. ret = queue_command(xhci, cmd,
  614. lower_32_bits(addr) | trb_sct | new_cycle,
  615. upper_32_bits(addr),
  616. STREAM_ID_FOR_TRB(stream_id), SLOT_ID_FOR_TRB(slot_id) |
  617. EP_INDEX_FOR_TRB(ep_index) | TRB_TYPE(TRB_SET_DEQ), false);
  618. if (ret < 0) {
  619. xhci_free_command(xhci, cmd);
  620. return ret;
  621. }
  622. ep->queued_deq_seg = new_seg;
  623. ep->queued_deq_ptr = new_deq;
  624. xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
  625. "Set TR Deq ptr 0x%llx, cycle %u\n", addr, new_cycle);
  626. /* Stop the TD queueing code from ringing the doorbell until
  627. * this command completes. The HC won't set the dequeue pointer
  628. * if the ring is running, and ringing the doorbell starts the
  629. * ring running.
  630. */
  631. ep->ep_state |= SET_DEQ_PENDING;
  632. xhci_ring_cmd_db(xhci);
  633. return 0;
  634. }
  635. /* flip_cycle means flip the cycle bit of all but the first and last TRB.
  636. * (The last TRB actually points to the ring enqueue pointer, which is not part
  637. * of this TD.) This is used to remove partially enqueued isoc TDs from a ring.
  638. */
  639. static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
  640. struct xhci_td *td, bool flip_cycle)
  641. {
  642. struct xhci_segment *seg = td->start_seg;
  643. union xhci_trb *trb = td->first_trb;
  644. while (1) {
  645. trb_to_noop(trb, TRB_TR_NOOP);
  646. /* flip cycle if asked to */
  647. if (flip_cycle && trb != td->first_trb && trb != td->last_trb)
  648. trb->generic.field[3] ^= cpu_to_le32(TRB_CYCLE);
  649. if (trb == td->last_trb)
  650. break;
  651. next_trb(xhci, ep_ring, &seg, &trb);
  652. }
  653. }
  654. static void xhci_giveback_urb_in_irq(struct xhci_hcd *xhci,
  655. struct xhci_td *cur_td, int status)
  656. {
  657. struct urb *urb = cur_td->urb;
  658. struct urb_priv *urb_priv = urb->hcpriv;
  659. struct usb_hcd *hcd = bus_to_hcd(urb->dev->bus);
  660. if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
  661. xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs--;
  662. if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs == 0) {
  663. if (xhci->quirks & XHCI_AMD_PLL_FIX)
  664. usb_amd_quirk_pll_enable();
  665. }
  666. }
  667. xhci_urb_free_priv(urb_priv);
  668. usb_hcd_unlink_urb_from_ep(hcd, urb);
  669. trace_xhci_urb_giveback(urb);
  670. usb_hcd_giveback_urb(hcd, urb, status);
  671. }
  672. static void xhci_unmap_td_bounce_buffer(struct xhci_hcd *xhci,
  673. struct xhci_ring *ring, struct xhci_td *td)
  674. {
  675. struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
  676. struct xhci_segment *seg = td->bounce_seg;
  677. struct urb *urb = td->urb;
  678. size_t len;
  679. if (!ring || !seg || !urb)
  680. return;
  681. if (usb_urb_dir_out(urb)) {
  682. dma_unmap_single(dev, seg->bounce_dma, ring->bounce_buf_len,
  683. DMA_TO_DEVICE);
  684. return;
  685. }
  686. dma_unmap_single(dev, seg->bounce_dma, ring->bounce_buf_len,
  687. DMA_FROM_DEVICE);
  688. /* for in tranfers we need to copy the data from bounce to sg */
  689. if (urb->num_sgs) {
  690. len = sg_pcopy_from_buffer(urb->sg, urb->num_sgs, seg->bounce_buf,
  691. seg->bounce_len, seg->bounce_offs);
  692. if (len != seg->bounce_len)
  693. xhci_warn(xhci, "WARN Wrong bounce buffer read length: %zu != %d\n",
  694. len, seg->bounce_len);
  695. } else {
  696. memcpy(urb->transfer_buffer + seg->bounce_offs, seg->bounce_buf,
  697. seg->bounce_len);
  698. }
  699. seg->bounce_len = 0;
  700. seg->bounce_offs = 0;
  701. }
  702. static int xhci_td_cleanup(struct xhci_hcd *xhci, struct xhci_td *td,
  703. struct xhci_ring *ep_ring, int status)
  704. {
  705. struct urb *urb = NULL;
  706. /* Clean up the endpoint's TD list */
  707. urb = td->urb;
  708. /* if a bounce buffer was used to align this td then unmap it */
  709. xhci_unmap_td_bounce_buffer(xhci, ep_ring, td);
  710. /* Do one last check of the actual transfer length.
  711. * If the host controller said we transferred more data than the buffer
  712. * length, urb->actual_length will be a very big number (since it's
  713. * unsigned). Play it safe and say we didn't transfer anything.
  714. */
  715. if (urb->actual_length > urb->transfer_buffer_length) {
  716. xhci_warn(xhci, "URB req %u and actual %u transfer length mismatch\n",
  717. urb->transfer_buffer_length, urb->actual_length);
  718. urb->actual_length = 0;
  719. status = 0;
  720. }
  721. /* TD might be removed from td_list if we are giving back a cancelled URB */
  722. if (!list_empty(&td->td_list))
  723. list_del_init(&td->td_list);
  724. /* Giving back a cancelled URB, or if a slated TD completed anyway */
  725. if (!list_empty(&td->cancelled_td_list))
  726. list_del_init(&td->cancelled_td_list);
  727. inc_td_cnt(urb);
  728. /* Giveback the urb when all the tds are completed */
  729. if (last_td_in_urb(td)) {
  730. if ((urb->actual_length != urb->transfer_buffer_length &&
  731. (urb->transfer_flags & URB_SHORT_NOT_OK)) ||
  732. (status != 0 && !usb_endpoint_xfer_isoc(&urb->ep->desc)))
  733. xhci_dbg(xhci, "Giveback URB %p, len = %d, expected = %d, status = %d\n",
  734. urb, urb->actual_length,
  735. urb->transfer_buffer_length, status);
  736. /* set isoc urb status to 0 just as EHCI, UHCI, and OHCI */
  737. if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS)
  738. status = 0;
  739. xhci_giveback_urb_in_irq(xhci, td, status);
  740. }
  741. return 0;
  742. }
  743. /* Complete the cancelled URBs we unlinked from td_list. */
  744. static void xhci_giveback_invalidated_tds(struct xhci_virt_ep *ep)
  745. {
  746. struct xhci_ring *ring;
  747. struct xhci_td *td, *tmp_td;
  748. list_for_each_entry_safe(td, tmp_td, &ep->cancelled_td_list,
  749. cancelled_td_list) {
  750. ring = xhci_urb_to_transfer_ring(ep->xhci, td->urb);
  751. if (td->cancel_status == TD_CLEARED) {
  752. xhci_dbg(ep->xhci, "%s: Giveback cancelled URB %p TD\n",
  753. __func__, td->urb);
  754. xhci_td_cleanup(ep->xhci, td, ring, td->status);
  755. } else {
  756. xhci_dbg(ep->xhci, "%s: Keep cancelled URB %p TD as cancel_status is %d\n",
  757. __func__, td->urb, td->cancel_status);
  758. }
  759. if (ep->xhci->xhc_state & XHCI_STATE_DYING)
  760. return;
  761. }
  762. }
  763. static int xhci_reset_halted_ep(struct xhci_hcd *xhci, unsigned int slot_id,
  764. unsigned int ep_index, enum xhci_ep_reset_type reset_type)
  765. {
  766. struct xhci_command *command;
  767. int ret = 0;
  768. command = xhci_alloc_command(xhci, false, GFP_ATOMIC);
  769. if (!command) {
  770. ret = -ENOMEM;
  771. goto done;
  772. }
  773. xhci_dbg(xhci, "%s-reset ep %u, slot %u\n",
  774. (reset_type == EP_HARD_RESET) ? "Hard" : "Soft",
  775. ep_index, slot_id);
  776. ret = xhci_queue_reset_ep(xhci, command, slot_id, ep_index, reset_type);
  777. done:
  778. if (ret)
  779. xhci_err(xhci, "ERROR queuing reset endpoint for slot %d ep_index %d, %d\n",
  780. slot_id, ep_index, ret);
  781. return ret;
  782. }
  783. static int xhci_handle_halted_endpoint(struct xhci_hcd *xhci,
  784. struct xhci_virt_ep *ep,
  785. struct xhci_td *td,
  786. enum xhci_ep_reset_type reset_type)
  787. {
  788. unsigned int slot_id = ep->vdev->slot_id;
  789. int err;
  790. /*
  791. * Avoid resetting endpoint if link is inactive. Can cause host hang.
  792. * Device will be reset soon to recover the link so don't do anything
  793. */
  794. if (ep->vdev->flags & VDEV_PORT_ERROR)
  795. return -ENODEV;
  796. /* add td to cancelled list and let reset ep handler take care of it */
  797. if (reset_type == EP_HARD_RESET) {
  798. ep->ep_state |= EP_HARD_CLEAR_TOGGLE;
  799. if (td && list_empty(&td->cancelled_td_list)) {
  800. list_add_tail(&td->cancelled_td_list, &ep->cancelled_td_list);
  801. td->cancel_status = TD_HALTED;
  802. }
  803. }
  804. if (ep->ep_state & EP_HALTED) {
  805. xhci_dbg(xhci, "Reset ep command for ep_index %d already pending\n",
  806. ep->ep_index);
  807. return 0;
  808. }
  809. err = xhci_reset_halted_ep(xhci, slot_id, ep->ep_index, reset_type);
  810. if (err)
  811. return err;
  812. ep->ep_state |= EP_HALTED;
  813. xhci_ring_cmd_db(xhci);
  814. return 0;
  815. }
  816. /*
  817. * Fix up the ep ring first, so HW stops executing cancelled TDs.
  818. * We have the xHCI lock, so nothing can modify this list until we drop it.
  819. * We're also in the event handler, so we can't get re-interrupted if another
  820. * Stop Endpoint command completes.
  821. *
  822. * only call this when ring is not in a running state
  823. */
  824. static int xhci_invalidate_cancelled_tds(struct xhci_virt_ep *ep)
  825. {
  826. struct xhci_hcd *xhci;
  827. struct xhci_td *td = NULL;
  828. struct xhci_td *tmp_td = NULL;
  829. struct xhci_td *cached_td = NULL;
  830. struct xhci_ring *ring;
  831. u64 hw_deq;
  832. unsigned int slot_id = ep->vdev->slot_id;
  833. int err;
  834. /*
  835. * This is not going to work if the hardware is changing its dequeue
  836. * pointers as we look at them. Completion handler will call us later.
  837. */
  838. if (ep->ep_state & SET_DEQ_PENDING)
  839. return 0;
  840. xhci = ep->xhci;
  841. list_for_each_entry_safe(td, tmp_td, &ep->cancelled_td_list, cancelled_td_list) {
  842. xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
  843. "Removing canceled TD starting at 0x%llx (dma) in stream %u URB %p",
  844. (unsigned long long)xhci_trb_virt_to_dma(
  845. td->start_seg, td->first_trb),
  846. td->urb->stream_id, td->urb);
  847. list_del_init(&td->td_list);
  848. ring = xhci_urb_to_transfer_ring(xhci, td->urb);
  849. if (!ring) {
  850. xhci_warn(xhci, "WARN Cancelled URB %p has invalid stream ID %u.\n",
  851. td->urb, td->urb->stream_id);
  852. continue;
  853. }
  854. /*
  855. * If a ring stopped on the TD we need to cancel then we have to
  856. * move the xHC endpoint ring dequeue pointer past this TD.
  857. * Rings halted due to STALL may show hw_deq is past the stalled
  858. * TD, but still require a set TR Deq command to flush xHC cache.
  859. */
  860. hw_deq = xhci_get_hw_deq(xhci, ep->vdev, ep->ep_index,
  861. td->urb->stream_id);
  862. hw_deq &= ~0xf;
  863. if (td->cancel_status == TD_HALTED || trb_in_td(xhci, td, hw_deq, false)) {
  864. switch (td->cancel_status) {
  865. case TD_CLEARED: /* TD is already no-op */
  866. case TD_CLEARING_CACHE: /* set TR deq command already queued */
  867. break;
  868. case TD_DIRTY: /* TD is cached, clear it */
  869. case TD_HALTED:
  870. case TD_CLEARING_CACHE_DEFERRED:
  871. if (cached_td) {
  872. if (cached_td->urb->stream_id != td->urb->stream_id) {
  873. /* Multiple streams case, defer move dq */
  874. xhci_dbg(xhci,
  875. "Move dq deferred: stream %u URB %p\n",
  876. td->urb->stream_id, td->urb);
  877. td->cancel_status = TD_CLEARING_CACHE_DEFERRED;
  878. break;
  879. }
  880. /* Should never happen, but clear the TD if it does */
  881. xhci_warn(xhci,
  882. "Found multiple active URBs %p and %p in stream %u?\n",
  883. td->urb, cached_td->urb,
  884. td->urb->stream_id);
  885. td_to_noop(xhci, ring, cached_td, false);
  886. cached_td->cancel_status = TD_CLEARED;
  887. }
  888. td_to_noop(xhci, ring, td, false);
  889. td->cancel_status = TD_CLEARING_CACHE;
  890. cached_td = td;
  891. break;
  892. }
  893. } else {
  894. td_to_noop(xhci, ring, td, false);
  895. td->cancel_status = TD_CLEARED;
  896. }
  897. }
  898. /* If there's no need to move the dequeue pointer then we're done */
  899. if (!cached_td)
  900. return 0;
  901. err = xhci_move_dequeue_past_td(xhci, slot_id, ep->ep_index,
  902. cached_td->urb->stream_id,
  903. cached_td);
  904. if (err) {
  905. /* Failed to move past cached td, just set cached TDs to no-op */
  906. list_for_each_entry_safe(td, tmp_td, &ep->cancelled_td_list, cancelled_td_list) {
  907. /*
  908. * Deferred TDs need to have the deq pointer set after the above command
  909. * completes, so if that failed we just give up on all of them (and
  910. * complain loudly since this could cause issues due to caching).
  911. */
  912. if (td->cancel_status != TD_CLEARING_CACHE &&
  913. td->cancel_status != TD_CLEARING_CACHE_DEFERRED)
  914. continue;
  915. xhci_warn(xhci, "Failed to clear cancelled cached URB %p, mark clear anyway\n",
  916. td->urb);
  917. td_to_noop(xhci, ring, td, false);
  918. td->cancel_status = TD_CLEARED;
  919. }
  920. }
  921. return 0;
  922. }
  923. /*
  924. * Erase queued TDs from transfer ring(s) and give back those the xHC didn't
  925. * stop on. If necessary, queue commands to move the xHC off cancelled TDs it
  926. * stopped on. Those will be given back later when the commands complete.
  927. *
  928. * Call under xhci->lock on a stopped endpoint.
  929. */
  930. void xhci_process_cancelled_tds(struct xhci_virt_ep *ep)
  931. {
  932. xhci_invalidate_cancelled_tds(ep);
  933. xhci_giveback_invalidated_tds(ep);
  934. }
  935. /*
  936. * Returns the TD the endpoint ring halted on.
  937. * Only call for non-running rings without streams.
  938. */
  939. static struct xhci_td *find_halted_td(struct xhci_virt_ep *ep)
  940. {
  941. struct xhci_td *td;
  942. u64 hw_deq;
  943. if (!list_empty(&ep->ring->td_list)) { /* Not streams compatible */
  944. hw_deq = xhci_get_hw_deq(ep->xhci, ep->vdev, ep->ep_index, 0);
  945. hw_deq &= ~0xf;
  946. td = list_first_entry(&ep->ring->td_list, struct xhci_td, td_list);
  947. if (trb_in_td(ep->xhci, td, hw_deq, false))
  948. return td;
  949. }
  950. return NULL;
  951. }
  952. /*
  953. * When we get a command completion for a Stop Endpoint Command, we need to
  954. * unlink any cancelled TDs from the ring. There are two ways to do that:
  955. *
  956. * 1. If the HW was in the middle of processing the TD that needs to be
  957. * cancelled, then we must move the ring's dequeue pointer past the last TRB
  958. * in the TD with a Set Dequeue Pointer Command.
  959. * 2. Otherwise, we turn all the TRBs in the TD into No-op TRBs (with the chain
  960. * bit cleared) so that the HW will skip over them.
  961. */
  962. static void xhci_handle_cmd_stop_ep(struct xhci_hcd *xhci, int slot_id,
  963. union xhci_trb *trb, u32 comp_code)
  964. {
  965. unsigned int ep_index;
  966. struct xhci_virt_ep *ep;
  967. struct xhci_ep_ctx *ep_ctx;
  968. struct xhci_td *td = NULL;
  969. enum xhci_ep_reset_type reset_type;
  970. struct xhci_command *command;
  971. int err;
  972. if (unlikely(TRB_TO_SUSPEND_PORT(le32_to_cpu(trb->generic.field[3])))) {
  973. if (!xhci->devs[slot_id])
  974. xhci_warn(xhci, "Stop endpoint command completion for disabled slot %u\n",
  975. slot_id);
  976. return;
  977. }
  978. ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
  979. ep = xhci_get_virt_ep(xhci, slot_id, ep_index);
  980. if (!ep)
  981. return;
  982. ep_ctx = xhci_get_ep_ctx(xhci, ep->vdev->out_ctx, ep_index);
  983. trace_xhci_handle_cmd_stop_ep(ep_ctx);
  984. if (comp_code == COMP_CONTEXT_STATE_ERROR) {
  985. /*
  986. * If stop endpoint command raced with a halting endpoint we need to
  987. * reset the host side endpoint first.
  988. * If the TD we halted on isn't cancelled the TD should be given back
  989. * with a proper error code, and the ring dequeue moved past the TD.
  990. * If streams case we can't find hw_deq, or the TD we halted on so do a
  991. * soft reset.
  992. *
  993. * Proper error code is unknown here, it would be -EPIPE if device side
  994. * of enadpoit halted (aka STALL), and -EPROTO if not (transaction error)
  995. * We use -EPROTO, if device is stalled it should return a stall error on
  996. * next transfer, which then will return -EPIPE, and device side stall is
  997. * noted and cleared by class driver.
  998. */
  999. switch (GET_EP_CTX_STATE(ep_ctx)) {
  1000. case EP_STATE_HALTED:
  1001. xhci_dbg(xhci, "Stop ep completion raced with stall, reset ep\n");
  1002. if (ep->ep_state & EP_HAS_STREAMS) {
  1003. reset_type = EP_SOFT_RESET;
  1004. } else {
  1005. reset_type = EP_HARD_RESET;
  1006. td = find_halted_td(ep);
  1007. if (td)
  1008. td->status = -EPROTO;
  1009. }
  1010. /* reset ep, reset handler cleans up cancelled tds */
  1011. err = xhci_handle_halted_endpoint(xhci, ep, td, reset_type);
  1012. if (err)
  1013. break;
  1014. ep->ep_state &= ~EP_STOP_CMD_PENDING;
  1015. return;
  1016. case EP_STATE_STOPPED:
  1017. /*
  1018. * Per xHCI 4.6.9, Stop Endpoint command on a Stopped
  1019. * EP is a Context State Error, and EP stays Stopped.
  1020. *
  1021. * But maybe it failed on Halted, and somebody ran Reset
  1022. * Endpoint later. EP state is now Stopped and EP_HALTED
  1023. * still set because Reset EP handler will run after us.
  1024. */
  1025. if (ep->ep_state & EP_HALTED)
  1026. break;
  1027. /*
  1028. * On some HCs EP state remains Stopped for some tens of
  1029. * us to a few ms or more after a doorbell ring, and any
  1030. * new Stop Endpoint fails without aborting the restart.
  1031. * This handler may run quickly enough to still see this
  1032. * Stopped state, but it will soon change to Running.
  1033. *
  1034. * Assume this bug on unexpected Stop Endpoint failures.
  1035. * Keep retrying until the EP starts and stops again, on
  1036. * chips where this is known to help. Wait for 100ms.
  1037. */
  1038. if (time_is_before_jiffies(ep->stop_time + msecs_to_jiffies(100)))
  1039. break;
  1040. fallthrough;
  1041. case EP_STATE_RUNNING:
  1042. /* Race, HW handled stop ep cmd before ep was running */
  1043. xhci_dbg(xhci, "Stop ep completion ctx error, ctx_state %d\n",
  1044. GET_EP_CTX_STATE(ep_ctx));
  1045. command = xhci_alloc_command(xhci, false, GFP_ATOMIC);
  1046. if (!command) {
  1047. ep->ep_state &= ~EP_STOP_CMD_PENDING;
  1048. return;
  1049. }
  1050. xhci_queue_stop_endpoint(xhci, command, slot_id, ep_index, 0);
  1051. xhci_ring_cmd_db(xhci);
  1052. return;
  1053. default:
  1054. break;
  1055. }
  1056. }
  1057. /* will queue a set TR deq if stopped on a cancelled, uncleared TD */
  1058. xhci_invalidate_cancelled_tds(ep);
  1059. ep->ep_state &= ~EP_STOP_CMD_PENDING;
  1060. /* Otherwise ring the doorbell(s) to restart queued transfers */
  1061. xhci_giveback_invalidated_tds(ep);
  1062. ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
  1063. }
  1064. static void xhci_kill_ring_urbs(struct xhci_hcd *xhci, struct xhci_ring *ring)
  1065. {
  1066. struct xhci_td *cur_td;
  1067. struct xhci_td *tmp;
  1068. list_for_each_entry_safe(cur_td, tmp, &ring->td_list, td_list) {
  1069. list_del_init(&cur_td->td_list);
  1070. if (!list_empty(&cur_td->cancelled_td_list))
  1071. list_del_init(&cur_td->cancelled_td_list);
  1072. xhci_unmap_td_bounce_buffer(xhci, ring, cur_td);
  1073. inc_td_cnt(cur_td->urb);
  1074. if (last_td_in_urb(cur_td))
  1075. xhci_giveback_urb_in_irq(xhci, cur_td, -ESHUTDOWN);
  1076. }
  1077. }
  1078. static void xhci_kill_endpoint_urbs(struct xhci_hcd *xhci,
  1079. int slot_id, int ep_index)
  1080. {
  1081. struct xhci_td *cur_td;
  1082. struct xhci_td *tmp;
  1083. struct xhci_virt_ep *ep;
  1084. struct xhci_ring *ring;
  1085. ep = xhci_get_virt_ep(xhci, slot_id, ep_index);
  1086. if (!ep)
  1087. return;
  1088. if ((ep->ep_state & EP_HAS_STREAMS) ||
  1089. (ep->ep_state & EP_GETTING_NO_STREAMS)) {
  1090. int stream_id;
  1091. for (stream_id = 1; stream_id < ep->stream_info->num_streams;
  1092. stream_id++) {
  1093. ring = ep->stream_info->stream_rings[stream_id];
  1094. if (!ring)
  1095. continue;
  1096. xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
  1097. "Killing URBs for slot ID %u, ep index %u, stream %u",
  1098. slot_id, ep_index, stream_id);
  1099. xhci_kill_ring_urbs(xhci, ring);
  1100. }
  1101. } else {
  1102. ring = ep->ring;
  1103. if (!ring)
  1104. return;
  1105. xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
  1106. "Killing URBs for slot ID %u, ep index %u",
  1107. slot_id, ep_index);
  1108. xhci_kill_ring_urbs(xhci, ring);
  1109. }
  1110. list_for_each_entry_safe(cur_td, tmp, &ep->cancelled_td_list,
  1111. cancelled_td_list) {
  1112. list_del_init(&cur_td->cancelled_td_list);
  1113. inc_td_cnt(cur_td->urb);
  1114. if (last_td_in_urb(cur_td))
  1115. xhci_giveback_urb_in_irq(xhci, cur_td, -ESHUTDOWN);
  1116. }
  1117. }
  1118. /*
  1119. * host controller died, register read returns 0xffffffff
  1120. * Complete pending commands, mark them ABORTED.
  1121. * URBs need to be given back as usb core might be waiting with device locks
  1122. * held for the URBs to finish during device disconnect, blocking host remove.
  1123. *
  1124. * Call with xhci->lock held.
  1125. * lock is relased and re-acquired while giving back urb.
  1126. */
  1127. void xhci_hc_died(struct xhci_hcd *xhci)
  1128. {
  1129. int i, j;
  1130. if (xhci->xhc_state & XHCI_STATE_DYING)
  1131. return;
  1132. xhci_err(xhci, "xHCI host controller not responding, assume dead\n");
  1133. xhci->xhc_state |= XHCI_STATE_DYING;
  1134. xhci_cleanup_command_queue(xhci);
  1135. /* return any pending urbs, remove may be waiting for them */
  1136. for (i = 0; i <= HCS_MAX_SLOTS(xhci->hcs_params1); i++) {
  1137. if (!xhci->devs[i])
  1138. continue;
  1139. for (j = 0; j < 31; j++)
  1140. xhci_kill_endpoint_urbs(xhci, i, j);
  1141. }
  1142. /* inform usb core hc died if PCI remove isn't already handling it */
  1143. if (!(xhci->xhc_state & XHCI_STATE_REMOVING))
  1144. usb_hc_died(xhci_to_hcd(xhci));
  1145. }
  1146. static void update_ring_for_set_deq_completion(struct xhci_hcd *xhci,
  1147. struct xhci_virt_device *dev,
  1148. struct xhci_ring *ep_ring,
  1149. unsigned int ep_index)
  1150. {
  1151. union xhci_trb *dequeue_temp;
  1152. dequeue_temp = ep_ring->dequeue;
  1153. /* If we get two back-to-back stalls, and the first stalled transfer
  1154. * ends just before a link TRB, the dequeue pointer will be left on
  1155. * the link TRB by the code in the while loop. So we have to update
  1156. * the dequeue pointer one segment further, or we'll jump off
  1157. * the segment into la-la-land.
  1158. */
  1159. if (trb_is_link(ep_ring->dequeue)) {
  1160. ep_ring->deq_seg = ep_ring->deq_seg->next;
  1161. ep_ring->dequeue = ep_ring->deq_seg->trbs;
  1162. }
  1163. while (ep_ring->dequeue != dev->eps[ep_index].queued_deq_ptr) {
  1164. /* We have more usable TRBs */
  1165. ep_ring->dequeue++;
  1166. if (trb_is_link(ep_ring->dequeue)) {
  1167. if (ep_ring->dequeue ==
  1168. dev->eps[ep_index].queued_deq_ptr)
  1169. break;
  1170. ep_ring->deq_seg = ep_ring->deq_seg->next;
  1171. ep_ring->dequeue = ep_ring->deq_seg->trbs;
  1172. }
  1173. if (ep_ring->dequeue == dequeue_temp) {
  1174. xhci_dbg(xhci, "Unable to find new dequeue pointer\n");
  1175. break;
  1176. }
  1177. }
  1178. }
  1179. /*
  1180. * When we get a completion for a Set Transfer Ring Dequeue Pointer command,
  1181. * we need to clear the set deq pending flag in the endpoint ring state, so that
  1182. * the TD queueing code can ring the doorbell again. We also need to ring the
  1183. * endpoint doorbell to restart the ring, but only if there aren't more
  1184. * cancellations pending.
  1185. */
  1186. static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id,
  1187. union xhci_trb *trb, u32 cmd_comp_code)
  1188. {
  1189. unsigned int ep_index;
  1190. unsigned int stream_id;
  1191. struct xhci_ring *ep_ring;
  1192. struct xhci_virt_ep *ep;
  1193. struct xhci_ep_ctx *ep_ctx;
  1194. struct xhci_slot_ctx *slot_ctx;
  1195. struct xhci_td *td, *tmp_td;
  1196. ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
  1197. stream_id = TRB_TO_STREAM_ID(le32_to_cpu(trb->generic.field[2]));
  1198. ep = xhci_get_virt_ep(xhci, slot_id, ep_index);
  1199. if (!ep)
  1200. return;
  1201. ep_ring = xhci_virt_ep_to_ring(xhci, ep, stream_id);
  1202. if (!ep_ring) {
  1203. xhci_warn(xhci, "WARN Set TR deq ptr command for freed stream ID %u\n",
  1204. stream_id);
  1205. /* XXX: Harmless??? */
  1206. goto cleanup;
  1207. }
  1208. ep_ctx = xhci_get_ep_ctx(xhci, ep->vdev->out_ctx, ep_index);
  1209. slot_ctx = xhci_get_slot_ctx(xhci, ep->vdev->out_ctx);
  1210. trace_xhci_handle_cmd_set_deq(slot_ctx);
  1211. trace_xhci_handle_cmd_set_deq_ep(ep_ctx);
  1212. if (cmd_comp_code != COMP_SUCCESS) {
  1213. unsigned int ep_state;
  1214. unsigned int slot_state;
  1215. switch (cmd_comp_code) {
  1216. case COMP_TRB_ERROR:
  1217. xhci_warn(xhci, "WARN Set TR Deq Ptr cmd invalid because of stream ID configuration\n");
  1218. break;
  1219. case COMP_CONTEXT_STATE_ERROR:
  1220. xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed due to incorrect slot or ep state.\n");
  1221. ep_state = GET_EP_CTX_STATE(ep_ctx);
  1222. slot_state = le32_to_cpu(slot_ctx->dev_state);
  1223. slot_state = GET_SLOT_STATE(slot_state);
  1224. xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
  1225. "Slot state = %u, EP state = %u",
  1226. slot_state, ep_state);
  1227. break;
  1228. case COMP_SLOT_NOT_ENABLED_ERROR:
  1229. xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed because slot %u was not enabled.\n",
  1230. slot_id);
  1231. break;
  1232. default:
  1233. xhci_warn(xhci, "WARN Set TR Deq Ptr cmd with unknown completion code of %u.\n",
  1234. cmd_comp_code);
  1235. break;
  1236. }
  1237. /* OK what do we do now? The endpoint state is hosed, and we
  1238. * should never get to this point if the synchronization between
  1239. * queueing, and endpoint state are correct. This might happen
  1240. * if the device gets disconnected after we've finished
  1241. * cancelling URBs, which might not be an error...
  1242. */
  1243. } else {
  1244. u64 deq;
  1245. /* 4.6.10 deq ptr is written to the stream ctx for streams */
  1246. if (ep->ep_state & EP_HAS_STREAMS) {
  1247. struct xhci_stream_ctx *ctx =
  1248. &ep->stream_info->stream_ctx_array[stream_id];
  1249. deq = le64_to_cpu(ctx->stream_ring) & SCTX_DEQ_MASK;
  1250. /*
  1251. * Cadence xHCI controllers store some endpoint state
  1252. * information within Rsvd0 fields of Stream Endpoint
  1253. * context. This field is not cleared during Set TR
  1254. * Dequeue Pointer command which causes XDMA to skip
  1255. * over transfer ring and leads to data loss on stream
  1256. * pipe.
  1257. * To fix this issue driver must clear Rsvd0 field.
  1258. */
  1259. if (xhci->quirks & XHCI_CDNS_SCTX_QUIRK) {
  1260. ctx->reserved[0] = 0;
  1261. ctx->reserved[1] = 0;
  1262. }
  1263. } else {
  1264. deq = le64_to_cpu(ep_ctx->deq) & ~EP_CTX_CYCLE_MASK;
  1265. }
  1266. xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
  1267. "Successful Set TR Deq Ptr cmd, deq = @%08llx", deq);
  1268. if (xhci_trb_virt_to_dma(ep->queued_deq_seg,
  1269. ep->queued_deq_ptr) == deq) {
  1270. /* Update the ring's dequeue segment and dequeue pointer
  1271. * to reflect the new position.
  1272. */
  1273. update_ring_for_set_deq_completion(xhci, ep->vdev,
  1274. ep_ring, ep_index);
  1275. } else {
  1276. xhci_warn(xhci, "Mismatch between completed Set TR Deq Ptr command & xHCI internal state.\n");
  1277. xhci_warn(xhci, "ep deq seg = %p, deq ptr = %p\n",
  1278. ep->queued_deq_seg, ep->queued_deq_ptr);
  1279. }
  1280. }
  1281. /* HW cached TDs cleared from cache, give them back */
  1282. list_for_each_entry_safe(td, tmp_td, &ep->cancelled_td_list,
  1283. cancelled_td_list) {
  1284. ep_ring = xhci_urb_to_transfer_ring(ep->xhci, td->urb);
  1285. if (td->cancel_status == TD_CLEARING_CACHE) {
  1286. td->cancel_status = TD_CLEARED;
  1287. xhci_dbg(ep->xhci, "%s: Giveback cancelled URB %p TD\n",
  1288. __func__, td->urb);
  1289. xhci_td_cleanup(ep->xhci, td, ep_ring, td->status);
  1290. } else {
  1291. xhci_dbg(ep->xhci, "%s: Keep cancelled URB %p TD as cancel_status is %d\n",
  1292. __func__, td->urb, td->cancel_status);
  1293. }
  1294. }
  1295. cleanup:
  1296. ep->ep_state &= ~SET_DEQ_PENDING;
  1297. ep->queued_deq_seg = NULL;
  1298. ep->queued_deq_ptr = NULL;
  1299. /* Check for deferred or newly cancelled TDs */
  1300. if (!list_empty(&ep->cancelled_td_list)) {
  1301. xhci_dbg(ep->xhci, "%s: Pending TDs to clear, continuing with invalidation\n",
  1302. __func__);
  1303. xhci_invalidate_cancelled_tds(ep);
  1304. /* Try to restart the endpoint if all is done */
  1305. ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
  1306. /* Start giving back any TDs invalidated above */
  1307. xhci_giveback_invalidated_tds(ep);
  1308. } else {
  1309. /* Restart any rings with pending URBs */
  1310. xhci_dbg(ep->xhci, "%s: All TDs cleared, ring doorbell\n", __func__);
  1311. ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
  1312. }
  1313. }
  1314. static void xhci_handle_cmd_reset_ep(struct xhci_hcd *xhci, int slot_id,
  1315. union xhci_trb *trb, u32 cmd_comp_code)
  1316. {
  1317. struct xhci_virt_ep *ep;
  1318. struct xhci_ep_ctx *ep_ctx;
  1319. unsigned int ep_index;
  1320. ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
  1321. ep = xhci_get_virt_ep(xhci, slot_id, ep_index);
  1322. if (!ep)
  1323. return;
  1324. ep_ctx = xhci_get_ep_ctx(xhci, ep->vdev->out_ctx, ep_index);
  1325. trace_xhci_handle_cmd_reset_ep(ep_ctx);
  1326. /* This command will only fail if the endpoint wasn't halted,
  1327. * but we don't care.
  1328. */
  1329. xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep,
  1330. "Ignoring reset ep completion code of %u", cmd_comp_code);
  1331. /* Cleanup cancelled TDs as ep is stopped. May queue a Set TR Deq cmd */
  1332. xhci_invalidate_cancelled_tds(ep);
  1333. /* Clear our internal halted state */
  1334. ep->ep_state &= ~EP_HALTED;
  1335. xhci_giveback_invalidated_tds(ep);
  1336. /* if this was a soft reset, then restart */
  1337. if ((le32_to_cpu(trb->generic.field[3])) & TRB_TSP)
  1338. ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
  1339. }
  1340. static void xhci_handle_cmd_enable_slot(int slot_id, struct xhci_command *command,
  1341. u32 cmd_comp_code)
  1342. {
  1343. if (cmd_comp_code == COMP_SUCCESS)
  1344. command->slot_id = slot_id;
  1345. else
  1346. command->slot_id = 0;
  1347. }
  1348. static void xhci_handle_cmd_disable_slot(struct xhci_hcd *xhci, int slot_id)
  1349. {
  1350. struct xhci_virt_device *virt_dev;
  1351. struct xhci_slot_ctx *slot_ctx;
  1352. virt_dev = xhci->devs[slot_id];
  1353. if (!virt_dev)
  1354. return;
  1355. slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
  1356. trace_xhci_handle_cmd_disable_slot(slot_ctx);
  1357. if (xhci->quirks & XHCI_EP_LIMIT_QUIRK)
  1358. /* Delete default control endpoint resources */
  1359. xhci_free_device_endpoint_resources(xhci, virt_dev, true);
  1360. }
  1361. static void xhci_handle_cmd_config_ep(struct xhci_hcd *xhci, int slot_id)
  1362. {
  1363. struct xhci_virt_device *virt_dev;
  1364. struct xhci_input_control_ctx *ctrl_ctx;
  1365. struct xhci_ep_ctx *ep_ctx;
  1366. unsigned int ep_index;
  1367. u32 add_flags;
  1368. /*
  1369. * Configure endpoint commands can come from the USB core configuration
  1370. * or alt setting changes, or when streams were being configured.
  1371. */
  1372. virt_dev = xhci->devs[slot_id];
  1373. if (!virt_dev)
  1374. return;
  1375. ctrl_ctx = xhci_get_input_control_ctx(virt_dev->in_ctx);
  1376. if (!ctrl_ctx) {
  1377. xhci_warn(xhci, "Could not get input context, bad type.\n");
  1378. return;
  1379. }
  1380. add_flags = le32_to_cpu(ctrl_ctx->add_flags);
  1381. /* Input ctx add_flags are the endpoint index plus one */
  1382. ep_index = xhci_last_valid_endpoint(add_flags) - 1;
  1383. ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->out_ctx, ep_index);
  1384. trace_xhci_handle_cmd_config_ep(ep_ctx);
  1385. return;
  1386. }
  1387. static void xhci_handle_cmd_addr_dev(struct xhci_hcd *xhci, int slot_id)
  1388. {
  1389. struct xhci_virt_device *vdev;
  1390. struct xhci_slot_ctx *slot_ctx;
  1391. vdev = xhci->devs[slot_id];
  1392. if (!vdev)
  1393. return;
  1394. slot_ctx = xhci_get_slot_ctx(xhci, vdev->out_ctx);
  1395. trace_xhci_handle_cmd_addr_dev(slot_ctx);
  1396. }
  1397. static void xhci_handle_cmd_reset_dev(struct xhci_hcd *xhci, int slot_id)
  1398. {
  1399. struct xhci_virt_device *vdev;
  1400. struct xhci_slot_ctx *slot_ctx;
  1401. vdev = xhci->devs[slot_id];
  1402. if (!vdev) {
  1403. xhci_warn(xhci, "Reset device command completion for disabled slot %u\n",
  1404. slot_id);
  1405. return;
  1406. }
  1407. slot_ctx = xhci_get_slot_ctx(xhci, vdev->out_ctx);
  1408. trace_xhci_handle_cmd_reset_dev(slot_ctx);
  1409. xhci_dbg(xhci, "Completed reset device command.\n");
  1410. }
  1411. static void xhci_handle_cmd_nec_get_fw(struct xhci_hcd *xhci,
  1412. struct xhci_event_cmd *event)
  1413. {
  1414. if (!(xhci->quirks & XHCI_NEC_HOST)) {
  1415. xhci_warn(xhci, "WARN NEC_GET_FW command on non-NEC host\n");
  1416. return;
  1417. }
  1418. xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
  1419. "NEC firmware version %2x.%02x",
  1420. NEC_FW_MAJOR(le32_to_cpu(event->status)),
  1421. NEC_FW_MINOR(le32_to_cpu(event->status)));
  1422. }
  1423. static void xhci_complete_del_and_free_cmd(struct xhci_command *cmd, u32 status)
  1424. {
  1425. list_del(&cmd->cmd_list);
  1426. if (cmd->completion) {
  1427. cmd->status = status;
  1428. complete(cmd->completion);
  1429. } else {
  1430. kfree(cmd);
  1431. }
  1432. }
  1433. void xhci_cleanup_command_queue(struct xhci_hcd *xhci)
  1434. {
  1435. struct xhci_command *cur_cmd, *tmp_cmd;
  1436. xhci->current_cmd = NULL;
  1437. list_for_each_entry_safe(cur_cmd, tmp_cmd, &xhci->cmd_list, cmd_list)
  1438. xhci_complete_del_and_free_cmd(cur_cmd, COMP_COMMAND_ABORTED);
  1439. }
  1440. void xhci_handle_command_timeout(struct work_struct *work)
  1441. {
  1442. struct xhci_hcd *xhci;
  1443. unsigned long flags;
  1444. char str[XHCI_MSG_MAX];
  1445. u64 hw_ring_state;
  1446. u32 cmd_field3;
  1447. u32 usbsts;
  1448. xhci = container_of(to_delayed_work(work), struct xhci_hcd, cmd_timer);
  1449. spin_lock_irqsave(&xhci->lock, flags);
  1450. /*
  1451. * If timeout work is pending, or current_cmd is NULL, it means we
  1452. * raced with command completion. Command is handled so just return.
  1453. */
  1454. if (!xhci->current_cmd || delayed_work_pending(&xhci->cmd_timer)) {
  1455. spin_unlock_irqrestore(&xhci->lock, flags);
  1456. return;
  1457. }
  1458. cmd_field3 = le32_to_cpu(xhci->current_cmd->command_trb->generic.field[3]);
  1459. usbsts = readl(&xhci->op_regs->status);
  1460. xhci_dbg(xhci, "Command timeout, USBSTS:%s\n", xhci_decode_usbsts(str, usbsts));
  1461. /* Bail out and tear down xhci if a stop endpoint command failed */
  1462. if (TRB_FIELD_TO_TYPE(cmd_field3) == TRB_STOP_RING) {
  1463. struct xhci_virt_ep *ep;
  1464. xhci_warn(xhci, "xHCI host not responding to stop endpoint command\n");
  1465. ep = xhci_get_virt_ep(xhci, TRB_TO_SLOT_ID(cmd_field3),
  1466. TRB_TO_EP_INDEX(cmd_field3));
  1467. if (ep)
  1468. ep->ep_state &= ~EP_STOP_CMD_PENDING;
  1469. xhci_halt(xhci);
  1470. xhci_hc_died(xhci);
  1471. goto time_out_completed;
  1472. }
  1473. /* mark this command to be cancelled */
  1474. xhci->current_cmd->status = COMP_COMMAND_ABORTED;
  1475. /* Make sure command ring is running before aborting it */
  1476. hw_ring_state = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
  1477. if (hw_ring_state == ~(u64)0) {
  1478. xhci_hc_died(xhci);
  1479. goto time_out_completed;
  1480. }
  1481. if ((xhci->cmd_ring_state & CMD_RING_STATE_RUNNING) &&
  1482. (hw_ring_state & CMD_RING_RUNNING)) {
  1483. /* Prevent new doorbell, and start command abort */
  1484. xhci->cmd_ring_state = CMD_RING_STATE_ABORTED;
  1485. xhci_dbg(xhci, "Command timeout\n");
  1486. xhci_abort_cmd_ring(xhci, flags);
  1487. goto time_out_completed;
  1488. }
  1489. /* host removed. Bail out */
  1490. if (xhci->xhc_state & XHCI_STATE_REMOVING) {
  1491. xhci_dbg(xhci, "host removed, ring start fail?\n");
  1492. xhci_cleanup_command_queue(xhci);
  1493. goto time_out_completed;
  1494. }
  1495. /* command timeout on stopped ring, ring can't be aborted */
  1496. xhci_dbg(xhci, "Command timeout on stopped ring\n");
  1497. xhci_handle_stopped_cmd_ring(xhci, xhci->current_cmd);
  1498. time_out_completed:
  1499. spin_unlock_irqrestore(&xhci->lock, flags);
  1500. return;
  1501. }
  1502. static void handle_cmd_completion(struct xhci_hcd *xhci,
  1503. struct xhci_event_cmd *event)
  1504. {
  1505. unsigned int slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
  1506. u64 cmd_dma;
  1507. dma_addr_t cmd_dequeue_dma;
  1508. u32 cmd_comp_code;
  1509. union xhci_trb *cmd_trb;
  1510. struct xhci_command *cmd;
  1511. u32 cmd_type;
  1512. if (slot_id >= MAX_HC_SLOTS) {
  1513. xhci_warn(xhci, "Invalid slot_id %u\n", slot_id);
  1514. return;
  1515. }
  1516. cmd_dma = le64_to_cpu(event->cmd_trb);
  1517. cmd_trb = xhci->cmd_ring->dequeue;
  1518. trace_xhci_handle_command(xhci->cmd_ring, &cmd_trb->generic);
  1519. cmd_comp_code = GET_COMP_CODE(le32_to_cpu(event->status));
  1520. /* If CMD ring stopped we own the trbs between enqueue and dequeue */
  1521. if (cmd_comp_code == COMP_COMMAND_RING_STOPPED) {
  1522. complete_all(&xhci->cmd_ring_stop_completion);
  1523. return;
  1524. }
  1525. cmd_dequeue_dma = xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg,
  1526. cmd_trb);
  1527. /*
  1528. * Check whether the completion event is for our internal kept
  1529. * command.
  1530. */
  1531. if (!cmd_dequeue_dma || cmd_dma != (u64)cmd_dequeue_dma) {
  1532. xhci_warn(xhci,
  1533. "ERROR mismatched command completion event\n");
  1534. return;
  1535. }
  1536. cmd = list_first_entry(&xhci->cmd_list, struct xhci_command, cmd_list);
  1537. cancel_delayed_work(&xhci->cmd_timer);
  1538. if (cmd->command_trb != xhci->cmd_ring->dequeue) {
  1539. xhci_err(xhci,
  1540. "Command completion event does not match command\n");
  1541. return;
  1542. }
  1543. /*
  1544. * Host aborted the command ring, check if the current command was
  1545. * supposed to be aborted, otherwise continue normally.
  1546. * The command ring is stopped now, but the xHC will issue a Command
  1547. * Ring Stopped event which will cause us to restart it.
  1548. */
  1549. if (cmd_comp_code == COMP_COMMAND_ABORTED) {
  1550. xhci->cmd_ring_state = CMD_RING_STATE_STOPPED;
  1551. if (cmd->status == COMP_COMMAND_ABORTED) {
  1552. if (xhci->current_cmd == cmd)
  1553. xhci->current_cmd = NULL;
  1554. goto event_handled;
  1555. }
  1556. }
  1557. cmd_type = TRB_FIELD_TO_TYPE(le32_to_cpu(cmd_trb->generic.field[3]));
  1558. switch (cmd_type) {
  1559. case TRB_ENABLE_SLOT:
  1560. xhci_handle_cmd_enable_slot(slot_id, cmd, cmd_comp_code);
  1561. break;
  1562. case TRB_DISABLE_SLOT:
  1563. xhci_handle_cmd_disable_slot(xhci, slot_id);
  1564. break;
  1565. case TRB_CONFIG_EP:
  1566. if (!cmd->completion)
  1567. xhci_handle_cmd_config_ep(xhci, slot_id);
  1568. break;
  1569. case TRB_EVAL_CONTEXT:
  1570. break;
  1571. case TRB_ADDR_DEV:
  1572. xhci_handle_cmd_addr_dev(xhci, slot_id);
  1573. break;
  1574. case TRB_STOP_RING:
  1575. WARN_ON(slot_id != TRB_TO_SLOT_ID(
  1576. le32_to_cpu(cmd_trb->generic.field[3])));
  1577. if (!cmd->completion)
  1578. xhci_handle_cmd_stop_ep(xhci, slot_id, cmd_trb,
  1579. cmd_comp_code);
  1580. break;
  1581. case TRB_SET_DEQ:
  1582. WARN_ON(slot_id != TRB_TO_SLOT_ID(
  1583. le32_to_cpu(cmd_trb->generic.field[3])));
  1584. xhci_handle_cmd_set_deq(xhci, slot_id, cmd_trb, cmd_comp_code);
  1585. break;
  1586. case TRB_CMD_NOOP:
  1587. /* Is this an aborted command turned to NO-OP? */
  1588. if (cmd->status == COMP_COMMAND_RING_STOPPED)
  1589. cmd_comp_code = COMP_COMMAND_RING_STOPPED;
  1590. break;
  1591. case TRB_RESET_EP:
  1592. WARN_ON(slot_id != TRB_TO_SLOT_ID(
  1593. le32_to_cpu(cmd_trb->generic.field[3])));
  1594. xhci_handle_cmd_reset_ep(xhci, slot_id, cmd_trb, cmd_comp_code);
  1595. break;
  1596. case TRB_RESET_DEV:
  1597. /* SLOT_ID field in reset device cmd completion event TRB is 0.
  1598. * Use the SLOT_ID from the command TRB instead (xhci 4.6.11)
  1599. */
  1600. slot_id = TRB_TO_SLOT_ID(
  1601. le32_to_cpu(cmd_trb->generic.field[3]));
  1602. xhci_handle_cmd_reset_dev(xhci, slot_id);
  1603. break;
  1604. case TRB_NEC_GET_FW:
  1605. xhci_handle_cmd_nec_get_fw(xhci, event);
  1606. break;
  1607. default:
  1608. /* Skip over unknown commands on the event ring */
  1609. xhci_info(xhci, "INFO unknown command type %d\n", cmd_type);
  1610. break;
  1611. }
  1612. /* restart timer if this wasn't the last command */
  1613. if (!list_is_singular(&xhci->cmd_list)) {
  1614. xhci->current_cmd = list_first_entry(&cmd->cmd_list,
  1615. struct xhci_command, cmd_list);
  1616. xhci_mod_cmd_timer(xhci);
  1617. } else if (xhci->current_cmd == cmd) {
  1618. xhci->current_cmd = NULL;
  1619. }
  1620. event_handled:
  1621. xhci_complete_del_and_free_cmd(cmd, cmd_comp_code);
  1622. inc_deq(xhci, xhci->cmd_ring);
  1623. }
  1624. static void handle_vendor_event(struct xhci_hcd *xhci,
  1625. union xhci_trb *event, u32 trb_type)
  1626. {
  1627. xhci_dbg(xhci, "Vendor specific event TRB type = %u\n", trb_type);
  1628. if (trb_type == TRB_NEC_CMD_COMP && (xhci->quirks & XHCI_NEC_HOST))
  1629. handle_cmd_completion(xhci, &event->event_cmd);
  1630. }
  1631. static void handle_device_notification(struct xhci_hcd *xhci,
  1632. union xhci_trb *event)
  1633. {
  1634. u32 slot_id;
  1635. struct usb_device *udev;
  1636. slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->generic.field[3]));
  1637. if (!xhci->devs[slot_id]) {
  1638. xhci_warn(xhci, "Device Notification event for "
  1639. "unused slot %u\n", slot_id);
  1640. return;
  1641. }
  1642. xhci_dbg(xhci, "Device Wake Notification event for slot ID %u\n",
  1643. slot_id);
  1644. udev = xhci->devs[slot_id]->udev;
  1645. if (udev && udev->parent)
  1646. usb_wakeup_notification(udev->parent, udev->portnum);
  1647. }
  1648. /*
  1649. * Quirk hanlder for errata seen on Cavium ThunderX2 processor XHCI
  1650. * Controller.
  1651. * As per ThunderX2errata-129 USB 2 device may come up as USB 1
  1652. * If a connection to a USB 1 device is followed by another connection
  1653. * to a USB 2 device.
  1654. *
  1655. * Reset the PHY after the USB device is disconnected if device speed
  1656. * is less than HCD_USB3.
  1657. * Retry the reset sequence max of 4 times checking the PLL lock status.
  1658. *
  1659. */
  1660. static void xhci_cavium_reset_phy_quirk(struct xhci_hcd *xhci)
  1661. {
  1662. struct usb_hcd *hcd = xhci_to_hcd(xhci);
  1663. u32 pll_lock_check;
  1664. u32 retry_count = 4;
  1665. do {
  1666. /* Assert PHY reset */
  1667. writel(0x6F, hcd->regs + 0x1048);
  1668. udelay(10);
  1669. /* De-assert the PHY reset */
  1670. writel(0x7F, hcd->regs + 0x1048);
  1671. udelay(200);
  1672. pll_lock_check = readl(hcd->regs + 0x1070);
  1673. } while (!(pll_lock_check & 0x1) && --retry_count);
  1674. }
  1675. static void handle_port_status(struct xhci_hcd *xhci, union xhci_trb *event)
  1676. {
  1677. struct usb_hcd *hcd;
  1678. u32 port_id;
  1679. u32 portsc, cmd_reg;
  1680. int max_ports;
  1681. unsigned int hcd_portnum;
  1682. struct xhci_bus_state *bus_state;
  1683. bool bogus_port_status = false;
  1684. struct xhci_port *port;
  1685. /* Port status change events always have a successful completion code */
  1686. if (GET_COMP_CODE(le32_to_cpu(event->generic.field[2])) != COMP_SUCCESS)
  1687. xhci_warn(xhci,
  1688. "WARN: xHC returned failed port status event\n");
  1689. port_id = GET_PORT_ID(le32_to_cpu(event->generic.field[0]));
  1690. max_ports = HCS_MAX_PORTS(xhci->hcs_params1);
  1691. if ((port_id <= 0) || (port_id > max_ports)) {
  1692. xhci_warn(xhci, "Port change event with invalid port ID %d\n",
  1693. port_id);
  1694. return;
  1695. }
  1696. port = &xhci->hw_ports[port_id - 1];
  1697. if (!port || !port->rhub || port->hcd_portnum == DUPLICATE_ENTRY) {
  1698. xhci_warn(xhci, "Port change event, no port for port ID %u\n",
  1699. port_id);
  1700. bogus_port_status = true;
  1701. goto cleanup;
  1702. }
  1703. /* We might get interrupts after shared_hcd is removed */
  1704. if (port->rhub == &xhci->usb3_rhub && xhci->shared_hcd == NULL) {
  1705. xhci_dbg(xhci, "ignore port event for removed USB3 hcd\n");
  1706. bogus_port_status = true;
  1707. goto cleanup;
  1708. }
  1709. hcd = port->rhub->hcd;
  1710. bus_state = &port->rhub->bus_state;
  1711. hcd_portnum = port->hcd_portnum;
  1712. portsc = readl(port->addr);
  1713. xhci_dbg(xhci, "Port change event, %d-%d, id %d, portsc: 0x%x\n",
  1714. hcd->self.busnum, hcd_portnum + 1, port_id, portsc);
  1715. trace_xhci_handle_port_status(port, portsc);
  1716. if (hcd->state == HC_STATE_SUSPENDED) {
  1717. xhci_dbg(xhci, "resume root hub\n");
  1718. usb_hcd_resume_root_hub(hcd);
  1719. }
  1720. if (hcd->speed >= HCD_USB3 &&
  1721. (portsc & PORT_PLS_MASK) == XDEV_INACTIVE) {
  1722. if (port->slot_id && xhci->devs[port->slot_id])
  1723. xhci->devs[port->slot_id]->flags |= VDEV_PORT_ERROR;
  1724. }
  1725. if ((portsc & PORT_PLC) && (portsc & PORT_PLS_MASK) == XDEV_RESUME) {
  1726. xhci_dbg(xhci, "port resume event for port %d\n", port_id);
  1727. cmd_reg = readl(&xhci->op_regs->command);
  1728. if (!(cmd_reg & CMD_RUN)) {
  1729. xhci_warn(xhci, "xHC is not running.\n");
  1730. goto cleanup;
  1731. }
  1732. if (DEV_SUPERSPEED_ANY(portsc)) {
  1733. xhci_dbg(xhci, "remote wake SS port %d\n", port_id);
  1734. /* Set a flag to say the port signaled remote wakeup,
  1735. * so we can tell the difference between the end of
  1736. * device and host initiated resume.
  1737. */
  1738. bus_state->port_remote_wakeup |= 1 << hcd_portnum;
  1739. xhci_test_and_clear_bit(xhci, port, PORT_PLC);
  1740. usb_hcd_start_port_resume(&hcd->self, hcd_portnum);
  1741. xhci_set_link_state(xhci, port, XDEV_U0);
  1742. /* Need to wait until the next link state change
  1743. * indicates the device is actually in U0.
  1744. */
  1745. bogus_port_status = true;
  1746. goto cleanup;
  1747. } else if (!test_bit(hcd_portnum, &bus_state->resuming_ports)) {
  1748. xhci_dbg(xhci, "resume HS port %d\n", port_id);
  1749. port->resume_timestamp = jiffies +
  1750. msecs_to_jiffies(USB_RESUME_TIMEOUT);
  1751. set_bit(hcd_portnum, &bus_state->resuming_ports);
  1752. /* Do the rest in GetPortStatus after resume time delay.
  1753. * Avoid polling roothub status before that so that a
  1754. * usb device auto-resume latency around ~40ms.
  1755. */
  1756. set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
  1757. mod_timer(&hcd->rh_timer,
  1758. port->resume_timestamp);
  1759. usb_hcd_start_port_resume(&hcd->self, hcd_portnum);
  1760. bogus_port_status = true;
  1761. }
  1762. }
  1763. if ((portsc & PORT_PLC) &&
  1764. DEV_SUPERSPEED_ANY(portsc) &&
  1765. ((portsc & PORT_PLS_MASK) == XDEV_U0 ||
  1766. (portsc & PORT_PLS_MASK) == XDEV_U1 ||
  1767. (portsc & PORT_PLS_MASK) == XDEV_U2)) {
  1768. xhci_dbg(xhci, "resume SS port %d finished\n", port_id);
  1769. complete(&port->u3exit_done);
  1770. /* We've just brought the device into U0/1/2 through either the
  1771. * Resume state after a device remote wakeup, or through the
  1772. * U3Exit state after a host-initiated resume. If it's a device
  1773. * initiated remote wake, don't pass up the link state change,
  1774. * so the roothub behavior is consistent with external
  1775. * USB 3.0 hub behavior.
  1776. */
  1777. if (port->slot_id && xhci->devs[port->slot_id])
  1778. xhci_ring_device(xhci, port->slot_id);
  1779. if (bus_state->port_remote_wakeup & (1 << hcd_portnum)) {
  1780. xhci_test_and_clear_bit(xhci, port, PORT_PLC);
  1781. usb_wakeup_notification(hcd->self.root_hub,
  1782. hcd_portnum + 1);
  1783. bogus_port_status = true;
  1784. goto cleanup;
  1785. }
  1786. }
  1787. /*
  1788. * Check to see if xhci-hub.c is waiting on RExit to U0 transition (or
  1789. * RExit to a disconnect state). If so, let the driver know it's
  1790. * out of the RExit state.
  1791. */
  1792. if (hcd->speed < HCD_USB3 && port->rexit_active) {
  1793. complete(&port->rexit_done);
  1794. port->rexit_active = false;
  1795. bogus_port_status = true;
  1796. goto cleanup;
  1797. }
  1798. if (hcd->speed < HCD_USB3) {
  1799. xhci_test_and_clear_bit(xhci, port, PORT_PLC);
  1800. if ((xhci->quirks & XHCI_RESET_PLL_ON_DISCONNECT) &&
  1801. (portsc & PORT_CSC) && !(portsc & PORT_CONNECT))
  1802. xhci_cavium_reset_phy_quirk(xhci);
  1803. }
  1804. cleanup:
  1805. /* Don't make the USB core poll the roothub if we got a bad port status
  1806. * change event. Besides, at that point we can't tell which roothub
  1807. * (USB 2.0 or USB 3.0) to kick.
  1808. */
  1809. if (bogus_port_status)
  1810. return;
  1811. /*
  1812. * xHCI port-status-change events occur when the "or" of all the
  1813. * status-change bits in the portsc register changes from 0 to 1.
  1814. * New status changes won't cause an event if any other change
  1815. * bits are still set. When an event occurs, switch over to
  1816. * polling to avoid losing status changes.
  1817. */
  1818. xhci_dbg(xhci, "%s: starting usb%d port polling.\n",
  1819. __func__, hcd->self.busnum);
  1820. set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
  1821. spin_unlock(&xhci->lock);
  1822. /* Pass this up to the core */
  1823. usb_hcd_poll_rh_status(hcd);
  1824. spin_lock(&xhci->lock);
  1825. }
  1826. /*
  1827. * If the suspect DMA address is a TRB in this TD, this function returns that
  1828. * TRB's segment. Otherwise it returns 0.
  1829. */
  1830. struct xhci_segment *trb_in_td(struct xhci_hcd *xhci, struct xhci_td *td, dma_addr_t suspect_dma,
  1831. bool debug)
  1832. {
  1833. dma_addr_t start_dma;
  1834. dma_addr_t end_seg_dma;
  1835. dma_addr_t end_trb_dma;
  1836. struct xhci_segment *cur_seg;
  1837. start_dma = xhci_trb_virt_to_dma(td->start_seg, td->first_trb);
  1838. cur_seg = td->start_seg;
  1839. do {
  1840. if (start_dma == 0)
  1841. return NULL;
  1842. /* We may get an event for a Link TRB in the middle of a TD */
  1843. end_seg_dma = xhci_trb_virt_to_dma(cur_seg,
  1844. &cur_seg->trbs[TRBS_PER_SEGMENT - 1]);
  1845. /* If the end TRB isn't in this segment, this is set to 0 */
  1846. end_trb_dma = xhci_trb_virt_to_dma(cur_seg, td->last_trb);
  1847. if (debug)
  1848. xhci_warn(xhci,
  1849. "Looking for event-dma %016llx trb-start %016llx trb-end %016llx seg-start %016llx seg-end %016llx\n",
  1850. (unsigned long long)suspect_dma,
  1851. (unsigned long long)start_dma,
  1852. (unsigned long long)end_trb_dma,
  1853. (unsigned long long)cur_seg->dma,
  1854. (unsigned long long)end_seg_dma);
  1855. if (end_trb_dma > 0) {
  1856. /* The end TRB is in this segment, so suspect should be here */
  1857. if (start_dma <= end_trb_dma) {
  1858. if (suspect_dma >= start_dma && suspect_dma <= end_trb_dma)
  1859. return cur_seg;
  1860. } else {
  1861. /* Case for one segment with
  1862. * a TD wrapped around to the top
  1863. */
  1864. if ((suspect_dma >= start_dma &&
  1865. suspect_dma <= end_seg_dma) ||
  1866. (suspect_dma >= cur_seg->dma &&
  1867. suspect_dma <= end_trb_dma))
  1868. return cur_seg;
  1869. }
  1870. return NULL;
  1871. } else {
  1872. /* Might still be somewhere in this segment */
  1873. if (suspect_dma >= start_dma && suspect_dma <= end_seg_dma)
  1874. return cur_seg;
  1875. }
  1876. cur_seg = cur_seg->next;
  1877. start_dma = xhci_trb_virt_to_dma(cur_seg, &cur_seg->trbs[0]);
  1878. } while (cur_seg != td->start_seg);
  1879. return NULL;
  1880. }
  1881. static void xhci_clear_hub_tt_buffer(struct xhci_hcd *xhci, struct xhci_td *td,
  1882. struct xhci_virt_ep *ep)
  1883. {
  1884. /*
  1885. * As part of low/full-speed endpoint-halt processing
  1886. * we must clear the TT buffer (USB 2.0 specification 11.17.5).
  1887. */
  1888. if (td->urb->dev->tt && !usb_pipeint(td->urb->pipe) &&
  1889. (td->urb->dev->tt->hub != xhci_to_hcd(xhci)->self.root_hub) &&
  1890. !(ep->ep_state & EP_CLEARING_TT)) {
  1891. ep->ep_state |= EP_CLEARING_TT;
  1892. td->urb->ep->hcpriv = td->urb->dev;
  1893. if (usb_hub_clear_tt_buffer(td->urb))
  1894. ep->ep_state &= ~EP_CLEARING_TT;
  1895. }
  1896. }
  1897. /*
  1898. * Check if xhci internal endpoint state has gone to a "halt" state due to an
  1899. * error or stall, including default control pipe protocol stall.
  1900. * The internal halt needs to be cleared with a reset endpoint command.
  1901. *
  1902. * External device side is also halted in functional stall cases. Class driver
  1903. * will clear the device halt with a CLEAR_FEATURE(ENDPOINT_HALT) request later.
  1904. */
  1905. static bool xhci_halted_host_endpoint(struct xhci_ep_ctx *ep_ctx, unsigned int comp_code)
  1906. {
  1907. /* Stall halts both internal and device side endpoint */
  1908. if (comp_code == COMP_STALL_ERROR)
  1909. return true;
  1910. /* TRB completion codes that may require internal halt cleanup */
  1911. if (comp_code == COMP_USB_TRANSACTION_ERROR ||
  1912. comp_code == COMP_BABBLE_DETECTED_ERROR ||
  1913. comp_code == COMP_SPLIT_TRANSACTION_ERROR)
  1914. /*
  1915. * The 0.95 spec says a babbling control endpoint is not halted.
  1916. * The 0.96 spec says it is. Some HW claims to be 0.95
  1917. * compliant, but it halts the control endpoint anyway.
  1918. * Check endpoint context if endpoint is halted.
  1919. */
  1920. if (GET_EP_CTX_STATE(ep_ctx) == EP_STATE_HALTED)
  1921. return true;
  1922. return false;
  1923. }
  1924. int xhci_is_vendor_info_code(struct xhci_hcd *xhci, unsigned int trb_comp_code)
  1925. {
  1926. if (trb_comp_code >= 224 && trb_comp_code <= 255) {
  1927. /* Vendor defined "informational" completion code,
  1928. * treat as not-an-error.
  1929. */
  1930. xhci_dbg(xhci, "Vendor defined info completion code %u\n",
  1931. trb_comp_code);
  1932. xhci_dbg(xhci, "Treating code as success.\n");
  1933. return 1;
  1934. }
  1935. return 0;
  1936. }
  1937. static int finish_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
  1938. struct xhci_ring *ep_ring, struct xhci_td *td,
  1939. u32 trb_comp_code)
  1940. {
  1941. struct xhci_ep_ctx *ep_ctx;
  1942. ep_ctx = xhci_get_ep_ctx(xhci, ep->vdev->out_ctx, ep->ep_index);
  1943. switch (trb_comp_code) {
  1944. case COMP_STOPPED_LENGTH_INVALID:
  1945. case COMP_STOPPED_SHORT_PACKET:
  1946. case COMP_STOPPED:
  1947. /*
  1948. * The "Stop Endpoint" completion will take care of any
  1949. * stopped TDs. A stopped TD may be restarted, so don't update
  1950. * the ring dequeue pointer or take this TD off any lists yet.
  1951. */
  1952. return 0;
  1953. case COMP_USB_TRANSACTION_ERROR:
  1954. case COMP_BABBLE_DETECTED_ERROR:
  1955. case COMP_SPLIT_TRANSACTION_ERROR:
  1956. /*
  1957. * If endpoint context state is not halted we might be
  1958. * racing with a reset endpoint command issued by a unsuccessful
  1959. * stop endpoint completion (context error). In that case the
  1960. * td should be on the cancelled list, and EP_HALTED flag set.
  1961. *
  1962. * Or then it's not halted due to the 0.95 spec stating that a
  1963. * babbling control endpoint should not halt. The 0.96 spec
  1964. * again says it should. Some HW claims to be 0.95 compliant,
  1965. * but it halts the control endpoint anyway.
  1966. */
  1967. if (GET_EP_CTX_STATE(ep_ctx) != EP_STATE_HALTED) {
  1968. /*
  1969. * If EP_HALTED is set and TD is on the cancelled list
  1970. * the TD and dequeue pointer will be handled by reset
  1971. * ep command completion
  1972. */
  1973. if ((ep->ep_state & EP_HALTED) &&
  1974. !list_empty(&td->cancelled_td_list)) {
  1975. xhci_dbg(xhci, "Already resolving halted ep for 0x%llx\n",
  1976. (unsigned long long)xhci_trb_virt_to_dma(
  1977. td->start_seg, td->first_trb));
  1978. return 0;
  1979. }
  1980. /* endpoint not halted, don't reset it */
  1981. break;
  1982. }
  1983. /* Almost same procedure as for STALL_ERROR below */
  1984. xhci_clear_hub_tt_buffer(xhci, td, ep);
  1985. xhci_handle_halted_endpoint(xhci, ep, td, EP_HARD_RESET);
  1986. return 0;
  1987. case COMP_STALL_ERROR:
  1988. /*
  1989. * xhci internal endpoint state will go to a "halt" state for
  1990. * any stall, including default control pipe protocol stall.
  1991. * To clear the host side halt we need to issue a reset endpoint
  1992. * command, followed by a set dequeue command to move past the
  1993. * TD.
  1994. * Class drivers clear the device side halt from a functional
  1995. * stall later. Hub TT buffer should only be cleared for FS/LS
  1996. * devices behind HS hubs for functional stalls.
  1997. */
  1998. if (ep->ep_index != 0)
  1999. xhci_clear_hub_tt_buffer(xhci, td, ep);
  2000. xhci_handle_halted_endpoint(xhci, ep, td, EP_HARD_RESET);
  2001. return 0; /* xhci_handle_halted_endpoint marked td cancelled */
  2002. default:
  2003. break;
  2004. }
  2005. /* Update ring dequeue pointer */
  2006. ep_ring->dequeue = td->last_trb;
  2007. ep_ring->deq_seg = td->last_trb_seg;
  2008. inc_deq(xhci, ep_ring);
  2009. return xhci_td_cleanup(xhci, td, ep_ring, td->status);
  2010. }
  2011. /* sum trb lengths from ring dequeue up to stop_trb, _excluding_ stop_trb */
  2012. static int sum_trb_lengths(struct xhci_hcd *xhci, struct xhci_ring *ring,
  2013. union xhci_trb *stop_trb)
  2014. {
  2015. u32 sum;
  2016. union xhci_trb *trb = ring->dequeue;
  2017. struct xhci_segment *seg = ring->deq_seg;
  2018. for (sum = 0; trb != stop_trb; next_trb(xhci, ring, &seg, &trb)) {
  2019. if (!trb_is_noop(trb) && !trb_is_link(trb))
  2020. sum += TRB_LEN(le32_to_cpu(trb->generic.field[2]));
  2021. }
  2022. return sum;
  2023. }
  2024. /*
  2025. * Process control tds, update urb status and actual_length.
  2026. */
  2027. static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
  2028. struct xhci_ring *ep_ring, struct xhci_td *td,
  2029. union xhci_trb *ep_trb, struct xhci_transfer_event *event)
  2030. {
  2031. struct xhci_ep_ctx *ep_ctx;
  2032. u32 trb_comp_code;
  2033. u32 remaining, requested;
  2034. u32 trb_type;
  2035. trb_type = TRB_FIELD_TO_TYPE(le32_to_cpu(ep_trb->generic.field[3]));
  2036. ep_ctx = xhci_get_ep_ctx(xhci, ep->vdev->out_ctx, ep->ep_index);
  2037. trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
  2038. requested = td->urb->transfer_buffer_length;
  2039. remaining = EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
  2040. switch (trb_comp_code) {
  2041. case COMP_SUCCESS:
  2042. if (trb_type != TRB_STATUS) {
  2043. xhci_warn(xhci, "WARN: Success on ctrl %s TRB without IOC set?\n",
  2044. (trb_type == TRB_DATA) ? "data" : "setup");
  2045. td->status = -ESHUTDOWN;
  2046. break;
  2047. }
  2048. td->status = 0;
  2049. break;
  2050. case COMP_SHORT_PACKET:
  2051. td->status = 0;
  2052. break;
  2053. case COMP_STOPPED_SHORT_PACKET:
  2054. if (trb_type == TRB_DATA || trb_type == TRB_NORMAL)
  2055. td->urb->actual_length = remaining;
  2056. else
  2057. xhci_warn(xhci, "WARN: Stopped Short Packet on ctrl setup or status TRB\n");
  2058. goto finish_td;
  2059. case COMP_STOPPED:
  2060. switch (trb_type) {
  2061. case TRB_SETUP:
  2062. td->urb->actual_length = 0;
  2063. goto finish_td;
  2064. case TRB_DATA:
  2065. case TRB_NORMAL:
  2066. td->urb->actual_length = requested - remaining;
  2067. goto finish_td;
  2068. case TRB_STATUS:
  2069. td->urb->actual_length = requested;
  2070. goto finish_td;
  2071. default:
  2072. xhci_warn(xhci, "WARN: unexpected TRB Type %d\n",
  2073. trb_type);
  2074. goto finish_td;
  2075. }
  2076. case COMP_STOPPED_LENGTH_INVALID:
  2077. goto finish_td;
  2078. default:
  2079. if (!xhci_halted_host_endpoint(ep_ctx, trb_comp_code))
  2080. break;
  2081. xhci_dbg(xhci, "TRB error %u, halted endpoint index = %u\n",
  2082. trb_comp_code, ep->ep_index);
  2083. fallthrough;
  2084. case COMP_STALL_ERROR:
  2085. /* Did we transfer part of the data (middle) phase? */
  2086. if (trb_type == TRB_DATA || trb_type == TRB_NORMAL)
  2087. td->urb->actual_length = requested - remaining;
  2088. else if (!td->urb_length_set)
  2089. td->urb->actual_length = 0;
  2090. goto finish_td;
  2091. }
  2092. /* stopped at setup stage, no data transferred */
  2093. if (trb_type == TRB_SETUP)
  2094. goto finish_td;
  2095. /*
  2096. * if on data stage then update the actual_length of the URB and flag it
  2097. * as set, so it won't be overwritten in the event for the last TRB.
  2098. */
  2099. if (trb_type == TRB_DATA ||
  2100. trb_type == TRB_NORMAL) {
  2101. td->urb_length_set = true;
  2102. td->urb->actual_length = requested - remaining;
  2103. xhci_dbg(xhci, "Waiting for status stage event\n");
  2104. return 0;
  2105. }
  2106. /* at status stage */
  2107. if (!td->urb_length_set)
  2108. td->urb->actual_length = requested;
  2109. finish_td:
  2110. return finish_td(xhci, ep, ep_ring, td, trb_comp_code);
  2111. }
  2112. /*
  2113. * Process isochronous tds, update urb packet status and actual_length.
  2114. */
  2115. static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
  2116. struct xhci_ring *ep_ring, struct xhci_td *td,
  2117. union xhci_trb *ep_trb, struct xhci_transfer_event *event)
  2118. {
  2119. struct urb_priv *urb_priv;
  2120. int idx;
  2121. struct usb_iso_packet_descriptor *frame;
  2122. u32 trb_comp_code;
  2123. bool sum_trbs_for_length = false;
  2124. u32 remaining, requested, ep_trb_len;
  2125. int short_framestatus;
  2126. trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
  2127. urb_priv = td->urb->hcpriv;
  2128. idx = urb_priv->num_tds_done;
  2129. frame = &td->urb->iso_frame_desc[idx];
  2130. requested = frame->length;
  2131. remaining = EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
  2132. ep_trb_len = TRB_LEN(le32_to_cpu(ep_trb->generic.field[2]));
  2133. short_framestatus = td->urb->transfer_flags & URB_SHORT_NOT_OK ?
  2134. -EREMOTEIO : 0;
  2135. /* handle completion code */
  2136. switch (trb_comp_code) {
  2137. case COMP_SUCCESS:
  2138. /* Don't overwrite status if TD had an error, see xHCI 4.9.1 */
  2139. if (td->error_mid_td)
  2140. break;
  2141. if (remaining) {
  2142. frame->status = short_framestatus;
  2143. sum_trbs_for_length = true;
  2144. break;
  2145. }
  2146. frame->status = 0;
  2147. break;
  2148. case COMP_SHORT_PACKET:
  2149. frame->status = short_framestatus;
  2150. sum_trbs_for_length = true;
  2151. break;
  2152. case COMP_BANDWIDTH_OVERRUN_ERROR:
  2153. frame->status = -ECOMM;
  2154. break;
  2155. case COMP_BABBLE_DETECTED_ERROR:
  2156. sum_trbs_for_length = true;
  2157. fallthrough;
  2158. case COMP_ISOCH_BUFFER_OVERRUN:
  2159. frame->status = -EOVERFLOW;
  2160. if (ep_trb != td->last_trb)
  2161. td->error_mid_td = true;
  2162. break;
  2163. case COMP_INCOMPATIBLE_DEVICE_ERROR:
  2164. case COMP_STALL_ERROR:
  2165. frame->status = -EPROTO;
  2166. break;
  2167. case COMP_USB_TRANSACTION_ERROR:
  2168. frame->status = -EPROTO;
  2169. sum_trbs_for_length = true;
  2170. if (ep_trb != td->last_trb)
  2171. td->error_mid_td = true;
  2172. break;
  2173. case COMP_STOPPED:
  2174. sum_trbs_for_length = true;
  2175. break;
  2176. case COMP_STOPPED_SHORT_PACKET:
  2177. /* field normally containing residue now contains tranferred */
  2178. frame->status = short_framestatus;
  2179. requested = remaining;
  2180. break;
  2181. case COMP_STOPPED_LENGTH_INVALID:
  2182. /* exclude stopped trb with invalid length from length sum */
  2183. sum_trbs_for_length = true;
  2184. ep_trb_len = 0;
  2185. remaining = 0;
  2186. break;
  2187. default:
  2188. sum_trbs_for_length = true;
  2189. frame->status = -1;
  2190. break;
  2191. }
  2192. if (td->urb_length_set)
  2193. goto finish_td;
  2194. if (sum_trbs_for_length)
  2195. frame->actual_length = sum_trb_lengths(xhci, ep->ring, ep_trb) +
  2196. ep_trb_len - remaining;
  2197. else
  2198. frame->actual_length = requested;
  2199. td->urb->actual_length += frame->actual_length;
  2200. finish_td:
  2201. /* Don't give back TD yet if we encountered an error mid TD */
  2202. if (td->error_mid_td && ep_trb != td->last_trb) {
  2203. xhci_dbg(xhci, "Error mid isoc TD, wait for final completion event\n");
  2204. td->urb_length_set = true;
  2205. return 0;
  2206. }
  2207. return finish_td(xhci, ep, ep_ring, td, trb_comp_code);
  2208. }
  2209. static int skip_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
  2210. struct xhci_virt_ep *ep, int status)
  2211. {
  2212. struct urb_priv *urb_priv;
  2213. struct usb_iso_packet_descriptor *frame;
  2214. int idx;
  2215. urb_priv = td->urb->hcpriv;
  2216. idx = urb_priv->num_tds_done;
  2217. frame = &td->urb->iso_frame_desc[idx];
  2218. /* The transfer is partly done. */
  2219. frame->status = -EXDEV;
  2220. /* calc actual length */
  2221. frame->actual_length = 0;
  2222. /* Update ring dequeue pointer */
  2223. ep->ring->dequeue = td->last_trb;
  2224. ep->ring->deq_seg = td->last_trb_seg;
  2225. inc_deq(xhci, ep->ring);
  2226. return xhci_td_cleanup(xhci, td, ep->ring, status);
  2227. }
  2228. /*
  2229. * Process bulk and interrupt tds, update urb status and actual_length.
  2230. */
  2231. static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
  2232. struct xhci_ring *ep_ring, struct xhci_td *td,
  2233. union xhci_trb *ep_trb, struct xhci_transfer_event *event)
  2234. {
  2235. struct xhci_slot_ctx *slot_ctx;
  2236. u32 trb_comp_code;
  2237. u32 remaining, requested, ep_trb_len;
  2238. slot_ctx = xhci_get_slot_ctx(xhci, ep->vdev->out_ctx);
  2239. trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
  2240. remaining = EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
  2241. ep_trb_len = TRB_LEN(le32_to_cpu(ep_trb->generic.field[2]));
  2242. requested = td->urb->transfer_buffer_length;
  2243. switch (trb_comp_code) {
  2244. case COMP_SUCCESS:
  2245. ep->err_count = 0;
  2246. /* handle success with untransferred data as short packet */
  2247. if (ep_trb != td->last_trb || remaining) {
  2248. xhci_warn(xhci, "WARN Successful completion on short TX\n");
  2249. xhci_dbg(xhci, "ep %#x - asked for %d bytes, %d bytes untransferred\n",
  2250. td->urb->ep->desc.bEndpointAddress,
  2251. requested, remaining);
  2252. }
  2253. td->status = 0;
  2254. break;
  2255. case COMP_SHORT_PACKET:
  2256. td->status = 0;
  2257. break;
  2258. case COMP_STOPPED_SHORT_PACKET:
  2259. td->urb->actual_length = remaining;
  2260. goto finish_td;
  2261. case COMP_STOPPED_LENGTH_INVALID:
  2262. /* stopped on ep trb with invalid length, exclude it */
  2263. td->urb->actual_length = sum_trb_lengths(xhci, ep_ring, ep_trb);
  2264. goto finish_td;
  2265. case COMP_USB_TRANSACTION_ERROR:
  2266. if (xhci->quirks & XHCI_NO_SOFT_RETRY ||
  2267. (ep->err_count++ > MAX_SOFT_RETRY) ||
  2268. le32_to_cpu(slot_ctx->tt_info) & TT_SLOT)
  2269. break;
  2270. td->status = 0;
  2271. xhci_handle_halted_endpoint(xhci, ep, td, EP_SOFT_RESET);
  2272. return 0;
  2273. default:
  2274. /* do nothing */
  2275. break;
  2276. }
  2277. if (ep_trb == td->last_trb)
  2278. td->urb->actual_length = requested - remaining;
  2279. else
  2280. td->urb->actual_length =
  2281. sum_trb_lengths(xhci, ep_ring, ep_trb) +
  2282. ep_trb_len - remaining;
  2283. finish_td:
  2284. if (remaining > requested) {
  2285. xhci_warn(xhci, "bad transfer trb length %d in event trb\n",
  2286. remaining);
  2287. td->urb->actual_length = 0;
  2288. }
  2289. return finish_td(xhci, ep, ep_ring, td, trb_comp_code);
  2290. }
  2291. /* Transfer events which don't point to a transfer TRB, see xhci 4.17.4 */
  2292. static int handle_transferless_tx_event(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
  2293. u32 trb_comp_code)
  2294. {
  2295. switch (trb_comp_code) {
  2296. case COMP_STALL_ERROR:
  2297. case COMP_USB_TRANSACTION_ERROR:
  2298. case COMP_INVALID_STREAM_TYPE_ERROR:
  2299. case COMP_INVALID_STREAM_ID_ERROR:
  2300. xhci_dbg(xhci, "Stream transaction error ep %u no id\n", ep->ep_index);
  2301. if (ep->err_count++ > MAX_SOFT_RETRY)
  2302. xhci_handle_halted_endpoint(xhci, ep, NULL, EP_HARD_RESET);
  2303. else
  2304. xhci_handle_halted_endpoint(xhci, ep, NULL, EP_SOFT_RESET);
  2305. break;
  2306. case COMP_RING_UNDERRUN:
  2307. case COMP_RING_OVERRUN:
  2308. case COMP_STOPPED_LENGTH_INVALID:
  2309. break;
  2310. default:
  2311. xhci_err(xhci, "Transfer event %u for unknown stream ring slot %u ep %u\n",
  2312. trb_comp_code, ep->vdev->slot_id, ep->ep_index);
  2313. return -ENODEV;
  2314. }
  2315. return 0;
  2316. }
  2317. /*
  2318. * If this function returns an error condition, it means it got a Transfer
  2319. * event with a corrupted Slot ID, Endpoint ID, or TRB DMA address.
  2320. * At this point, the host controller is probably hosed and should be reset.
  2321. */
  2322. static int handle_tx_event(struct xhci_hcd *xhci,
  2323. struct xhci_interrupter *ir,
  2324. struct xhci_transfer_event *event)
  2325. {
  2326. struct xhci_virt_ep *ep;
  2327. struct xhci_ring *ep_ring;
  2328. unsigned int slot_id;
  2329. int ep_index;
  2330. struct xhci_td *td = NULL;
  2331. dma_addr_t ep_trb_dma;
  2332. struct xhci_segment *ep_seg;
  2333. union xhci_trb *ep_trb;
  2334. int status = -EINPROGRESS;
  2335. struct xhci_ep_ctx *ep_ctx;
  2336. u32 trb_comp_code;
  2337. slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
  2338. ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1;
  2339. trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
  2340. ep_trb_dma = le64_to_cpu(event->buffer);
  2341. ep = xhci_get_virt_ep(xhci, slot_id, ep_index);
  2342. if (!ep) {
  2343. xhci_err(xhci, "ERROR Invalid Transfer event\n");
  2344. goto err_out;
  2345. }
  2346. ep_ring = xhci_dma_to_transfer_ring(ep, ep_trb_dma);
  2347. ep_ctx = xhci_get_ep_ctx(xhci, ep->vdev->out_ctx, ep_index);
  2348. if (GET_EP_CTX_STATE(ep_ctx) == EP_STATE_DISABLED) {
  2349. xhci_err(xhci,
  2350. "ERROR Transfer event for disabled endpoint slot %u ep %u\n",
  2351. slot_id, ep_index);
  2352. goto err_out;
  2353. }
  2354. if (!ep_ring)
  2355. return handle_transferless_tx_event(xhci, ep, trb_comp_code);
  2356. /* Look for common error cases */
  2357. switch (trb_comp_code) {
  2358. /* Skip codes that require special handling depending on
  2359. * transfer type
  2360. */
  2361. case COMP_SUCCESS:
  2362. if (EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) != 0) {
  2363. trb_comp_code = COMP_SHORT_PACKET;
  2364. xhci_dbg(xhci, "Successful completion on short TX for slot %u ep %u with last td short %d\n",
  2365. slot_id, ep_index, ep_ring->last_td_was_short);
  2366. }
  2367. break;
  2368. case COMP_SHORT_PACKET:
  2369. break;
  2370. /* Completion codes for endpoint stopped state */
  2371. case COMP_STOPPED:
  2372. xhci_dbg(xhci, "Stopped on Transfer TRB for slot %u ep %u\n",
  2373. slot_id, ep_index);
  2374. break;
  2375. case COMP_STOPPED_LENGTH_INVALID:
  2376. xhci_dbg(xhci,
  2377. "Stopped on No-op or Link TRB for slot %u ep %u\n",
  2378. slot_id, ep_index);
  2379. break;
  2380. case COMP_STOPPED_SHORT_PACKET:
  2381. xhci_dbg(xhci,
  2382. "Stopped with short packet transfer detected for slot %u ep %u\n",
  2383. slot_id, ep_index);
  2384. break;
  2385. /* Completion codes for endpoint halted state */
  2386. case COMP_STALL_ERROR:
  2387. xhci_dbg(xhci, "Stalled endpoint for slot %u ep %u\n", slot_id,
  2388. ep_index);
  2389. status = -EPIPE;
  2390. break;
  2391. case COMP_SPLIT_TRANSACTION_ERROR:
  2392. xhci_dbg(xhci, "Split transaction error for slot %u ep %u\n",
  2393. slot_id, ep_index);
  2394. status = -EPROTO;
  2395. break;
  2396. case COMP_USB_TRANSACTION_ERROR:
  2397. xhci_dbg(xhci, "Transfer error for slot %u ep %u on endpoint\n",
  2398. slot_id, ep_index);
  2399. status = -EPROTO;
  2400. break;
  2401. case COMP_BABBLE_DETECTED_ERROR:
  2402. xhci_dbg(xhci, "Babble error for slot %u ep %u on endpoint\n",
  2403. slot_id, ep_index);
  2404. status = -EOVERFLOW;
  2405. break;
  2406. /* Completion codes for endpoint error state */
  2407. case COMP_TRB_ERROR:
  2408. xhci_warn(xhci,
  2409. "WARN: TRB error for slot %u ep %u on endpoint\n",
  2410. slot_id, ep_index);
  2411. status = -EILSEQ;
  2412. break;
  2413. /* completion codes not indicating endpoint state change */
  2414. case COMP_DATA_BUFFER_ERROR:
  2415. xhci_warn(xhci,
  2416. "WARN: HC couldn't access mem fast enough for slot %u ep %u\n",
  2417. slot_id, ep_index);
  2418. status = -ENOSR;
  2419. break;
  2420. case COMP_BANDWIDTH_OVERRUN_ERROR:
  2421. xhci_warn(xhci,
  2422. "WARN: bandwidth overrun event for slot %u ep %u on endpoint\n",
  2423. slot_id, ep_index);
  2424. break;
  2425. case COMP_ISOCH_BUFFER_OVERRUN:
  2426. xhci_warn(xhci,
  2427. "WARN: buffer overrun event for slot %u ep %u on endpoint",
  2428. slot_id, ep_index);
  2429. break;
  2430. case COMP_RING_UNDERRUN:
  2431. /*
  2432. * When the Isoch ring is empty, the xHC will generate
  2433. * a Ring Overrun Event for IN Isoch endpoint or Ring
  2434. * Underrun Event for OUT Isoch endpoint.
  2435. */
  2436. xhci_dbg(xhci, "Underrun event on slot %u ep %u\n", slot_id, ep_index);
  2437. if (ep->skip)
  2438. break;
  2439. return 0;
  2440. case COMP_RING_OVERRUN:
  2441. xhci_dbg(xhci, "Overrun event on slot %u ep %u\n", slot_id, ep_index);
  2442. if (ep->skip)
  2443. break;
  2444. return 0;
  2445. case COMP_MISSED_SERVICE_ERROR:
  2446. /*
  2447. * When encounter missed service error, one or more isoc tds
  2448. * may be missed by xHC.
  2449. * Set skip flag of the ep_ring; Complete the missed tds as
  2450. * short transfer when process the ep_ring next time.
  2451. */
  2452. ep->skip = true;
  2453. xhci_dbg(xhci,
  2454. "Miss service interval error for slot %u ep %u, set skip flag\n",
  2455. slot_id, ep_index);
  2456. return 0;
  2457. case COMP_NO_PING_RESPONSE_ERROR:
  2458. ep->skip = true;
  2459. xhci_dbg(xhci,
  2460. "No Ping response error for slot %u ep %u, Skip one Isoc TD\n",
  2461. slot_id, ep_index);
  2462. return 0;
  2463. case COMP_INCOMPATIBLE_DEVICE_ERROR:
  2464. /* needs disable slot command to recover */
  2465. xhci_warn(xhci,
  2466. "WARN: detect an incompatible device for slot %u ep %u",
  2467. slot_id, ep_index);
  2468. status = -EPROTO;
  2469. break;
  2470. default:
  2471. if (xhci_is_vendor_info_code(xhci, trb_comp_code)) {
  2472. status = 0;
  2473. break;
  2474. }
  2475. xhci_warn(xhci,
  2476. "ERROR Unknown event condition %u for slot %u ep %u , HC probably busted\n",
  2477. trb_comp_code, slot_id, ep_index);
  2478. if (ep->skip)
  2479. break;
  2480. return 0;
  2481. }
  2482. /*
  2483. * xhci 4.10.2 states isoc endpoints should continue
  2484. * processing the next TD if there was an error mid TD.
  2485. * So host like NEC don't generate an event for the last
  2486. * isoc TRB even if the IOC flag is set.
  2487. * xhci 4.9.1 states that if there are errors in mult-TRB
  2488. * TDs xHC should generate an error for that TRB, and if xHC
  2489. * proceeds to the next TD it should genete an event for
  2490. * any TRB with IOC flag on the way. Other host follow this.
  2491. *
  2492. * We wait for the final IOC event, but if we get an event
  2493. * anywhere outside this TD, just give it back already.
  2494. */
  2495. td = list_first_entry_or_null(&ep_ring->td_list, struct xhci_td, td_list);
  2496. if (td && td->error_mid_td && !trb_in_td(xhci, td, ep_trb_dma, false)) {
  2497. xhci_dbg(xhci, "Missing TD completion event after mid TD error\n");
  2498. ep_ring->dequeue = td->last_trb;
  2499. ep_ring->deq_seg = td->last_trb_seg;
  2500. inc_deq(xhci, ep_ring);
  2501. xhci_td_cleanup(xhci, td, ep_ring, td->status);
  2502. }
  2503. if (list_empty(&ep_ring->td_list)) {
  2504. /*
  2505. * Don't print wanings if ring is empty due to a stopped endpoint generating an
  2506. * extra completion event if the device was suspended. Or, a event for the last TRB
  2507. * of a short TD we already got a short event for. The short TD is already removed
  2508. * from the TD list.
  2509. */
  2510. if (trb_comp_code != COMP_STOPPED &&
  2511. trb_comp_code != COMP_STOPPED_LENGTH_INVALID &&
  2512. !ep_ring->last_td_was_short) {
  2513. xhci_warn(xhci, "Event TRB for slot %u ep %u with no TDs queued\n",
  2514. slot_id, ep_index);
  2515. }
  2516. ep->skip = false;
  2517. goto check_endpoint_halted;
  2518. }
  2519. do {
  2520. td = list_first_entry(&ep_ring->td_list, struct xhci_td,
  2521. td_list);
  2522. /* Is this a TRB in the currently executing TD? */
  2523. ep_seg = trb_in_td(xhci, td, ep_trb_dma, false);
  2524. if (!ep_seg) {
  2525. if (ep->skip && usb_endpoint_xfer_isoc(&td->urb->ep->desc)) {
  2526. skip_isoc_td(xhci, td, ep, status);
  2527. if (!list_empty(&ep_ring->td_list))
  2528. continue;
  2529. xhci_dbg(xhci, "All TDs skipped for slot %u ep %u. Clear skip flag.\n",
  2530. slot_id, ep_index);
  2531. ep->skip = false;
  2532. td = NULL;
  2533. goto check_endpoint_halted;
  2534. }
  2535. /*
  2536. * Skip the Force Stopped Event. The 'ep_trb' of FSE is not in the current
  2537. * TD pointed by 'ep_ring->dequeue' because that the hardware dequeue
  2538. * pointer still at the previous TRB of the current TD. The previous TRB
  2539. * maybe a Link TD or the last TRB of the previous TD. The command
  2540. * completion handle will take care the rest.
  2541. */
  2542. if (trb_comp_code == COMP_STOPPED ||
  2543. trb_comp_code == COMP_STOPPED_LENGTH_INVALID) {
  2544. return 0;
  2545. }
  2546. /*
  2547. * Some hosts give a spurious success event after a short
  2548. * transfer. Ignore it.
  2549. */
  2550. if ((xhci->quirks & XHCI_SPURIOUS_SUCCESS) &&
  2551. ep_ring->last_td_was_short) {
  2552. ep_ring->last_td_was_short = false;
  2553. return 0;
  2554. }
  2555. /* HC is busted, give up! */
  2556. xhci_err(xhci,
  2557. "ERROR Transfer event TRB DMA ptr not part of current TD ep_index %d comp_code %u\n",
  2558. ep_index, trb_comp_code);
  2559. trb_in_td(xhci, td, ep_trb_dma, true);
  2560. return -ESHUTDOWN;
  2561. }
  2562. if (ep->skip) {
  2563. xhci_dbg(xhci,
  2564. "Found td. Clear skip flag for slot %u ep %u.\n",
  2565. slot_id, ep_index);
  2566. ep->skip = false;
  2567. }
  2568. /*
  2569. * If ep->skip is set, it means there are missed tds on the
  2570. * endpoint ring need to take care of.
  2571. * Process them as short transfer until reach the td pointed by
  2572. * the event.
  2573. */
  2574. } while (ep->skip);
  2575. if (trb_comp_code == COMP_SHORT_PACKET)
  2576. ep_ring->last_td_was_short = true;
  2577. else
  2578. ep_ring->last_td_was_short = false;
  2579. ep_trb = &ep_seg->trbs[(ep_trb_dma - ep_seg->dma) / sizeof(*ep_trb)];
  2580. trace_xhci_handle_transfer(ep_ring, (struct xhci_generic_trb *) ep_trb);
  2581. /*
  2582. * No-op TRB could trigger interrupts in a case where a URB was killed
  2583. * and a STALL_ERROR happens right after the endpoint ring stopped.
  2584. * Reset the halted endpoint. Otherwise, the endpoint remains stalled
  2585. * indefinitely.
  2586. */
  2587. if (trb_is_noop(ep_trb))
  2588. goto check_endpoint_halted;
  2589. td->status = status;
  2590. /* update the urb's actual_length and give back to the core */
  2591. if (usb_endpoint_xfer_control(&td->urb->ep->desc))
  2592. process_ctrl_td(xhci, ep, ep_ring, td, ep_trb, event);
  2593. else if (usb_endpoint_xfer_isoc(&td->urb->ep->desc))
  2594. process_isoc_td(xhci, ep, ep_ring, td, ep_trb, event);
  2595. else
  2596. process_bulk_intr_td(xhci, ep, ep_ring, td, ep_trb, event);
  2597. return 0;
  2598. check_endpoint_halted:
  2599. if (xhci_halted_host_endpoint(ep_ctx, trb_comp_code))
  2600. xhci_handle_halted_endpoint(xhci, ep, td, EP_HARD_RESET);
  2601. return 0;
  2602. err_out:
  2603. xhci_err(xhci, "@%016llx %08x %08x %08x %08x\n",
  2604. (unsigned long long) xhci_trb_virt_to_dma(
  2605. ir->event_ring->deq_seg,
  2606. ir->event_ring->dequeue),
  2607. lower_32_bits(le64_to_cpu(event->buffer)),
  2608. upper_32_bits(le64_to_cpu(event->buffer)),
  2609. le32_to_cpu(event->transfer_len),
  2610. le32_to_cpu(event->flags));
  2611. return -ENODEV;
  2612. }
  2613. /*
  2614. * This function handles one OS-owned event on the event ring. It may drop
  2615. * xhci->lock between event processing (e.g. to pass up port status changes).
  2616. */
  2617. static int xhci_handle_event_trb(struct xhci_hcd *xhci, struct xhci_interrupter *ir,
  2618. union xhci_trb *event)
  2619. {
  2620. u32 trb_type;
  2621. trace_xhci_handle_event(ir->event_ring, &event->generic);
  2622. /*
  2623. * Barrier between reading the TRB_CYCLE (valid) flag before, and any
  2624. * speculative reads of the event's flags/data below.
  2625. */
  2626. rmb();
  2627. trb_type = TRB_FIELD_TO_TYPE(le32_to_cpu(event->event_cmd.flags));
  2628. /* FIXME: Handle more event types. */
  2629. switch (trb_type) {
  2630. case TRB_COMPLETION:
  2631. handle_cmd_completion(xhci, &event->event_cmd);
  2632. break;
  2633. case TRB_PORT_STATUS:
  2634. handle_port_status(xhci, event);
  2635. break;
  2636. case TRB_TRANSFER:
  2637. handle_tx_event(xhci, ir, &event->trans_event);
  2638. break;
  2639. case TRB_DEV_NOTE:
  2640. handle_device_notification(xhci, event);
  2641. break;
  2642. default:
  2643. if (trb_type >= TRB_VENDOR_DEFINED_LOW)
  2644. handle_vendor_event(xhci, event, trb_type);
  2645. else
  2646. xhci_warn(xhci, "ERROR unknown event type %d\n", trb_type);
  2647. }
  2648. /* Any of the above functions may drop and re-acquire the lock, so check
  2649. * to make sure a watchdog timer didn't mark the host as non-responsive.
  2650. */
  2651. if (xhci->xhc_state & XHCI_STATE_DYING) {
  2652. xhci_dbg(xhci, "xHCI host dying, returning from event handler.\n");
  2653. return -ENODEV;
  2654. }
  2655. return 0;
  2656. }
  2657. /*
  2658. * Update Event Ring Dequeue Pointer:
  2659. * - When all events have finished
  2660. * - To avoid "Event Ring Full Error" condition
  2661. */
  2662. static void xhci_update_erst_dequeue(struct xhci_hcd *xhci,
  2663. struct xhci_interrupter *ir,
  2664. bool clear_ehb)
  2665. {
  2666. u64 temp_64;
  2667. dma_addr_t deq;
  2668. temp_64 = xhci_read_64(xhci, &ir->ir_set->erst_dequeue);
  2669. deq = xhci_trb_virt_to_dma(ir->event_ring->deq_seg,
  2670. ir->event_ring->dequeue);
  2671. if (deq == 0)
  2672. xhci_warn(xhci, "WARN something wrong with SW event ring dequeue ptr\n");
  2673. /*
  2674. * Per 4.9.4, Software writes to the ERDP register shall always advance
  2675. * the Event Ring Dequeue Pointer value.
  2676. */
  2677. if ((temp_64 & ERST_PTR_MASK) == (deq & ERST_PTR_MASK) && !clear_ehb)
  2678. return;
  2679. /* Update HC event ring dequeue pointer */
  2680. temp_64 = ir->event_ring->deq_seg->num & ERST_DESI_MASK;
  2681. temp_64 |= deq & ERST_PTR_MASK;
  2682. /* Clear the event handler busy flag (RW1C) */
  2683. if (clear_ehb)
  2684. temp_64 |= ERST_EHB;
  2685. xhci_write_64(xhci, temp_64, &ir->ir_set->erst_dequeue);
  2686. }
  2687. /* Clear the interrupt pending bit for a specific interrupter. */
  2688. static void xhci_clear_interrupt_pending(struct xhci_interrupter *ir)
  2689. {
  2690. if (!ir->ip_autoclear) {
  2691. u32 irq_pending;
  2692. irq_pending = readl(&ir->ir_set->irq_pending);
  2693. irq_pending |= IMAN_IP;
  2694. writel(irq_pending, &ir->ir_set->irq_pending);
  2695. }
  2696. }
  2697. /*
  2698. * Handle all OS-owned events on an interrupter event ring. It may drop
  2699. * and reaquire xhci->lock between event processing.
  2700. */
  2701. static int xhci_handle_events(struct xhci_hcd *xhci, struct xhci_interrupter *ir)
  2702. {
  2703. int event_loop = 0;
  2704. int err;
  2705. u64 temp;
  2706. xhci_clear_interrupt_pending(ir);
  2707. /* Event ring hasn't been allocated yet. */
  2708. if (!ir->event_ring || !ir->event_ring->dequeue) {
  2709. xhci_err(xhci, "ERROR interrupter event ring not ready\n");
  2710. return -ENOMEM;
  2711. }
  2712. if (xhci->xhc_state & XHCI_STATE_DYING ||
  2713. xhci->xhc_state & XHCI_STATE_HALTED) {
  2714. xhci_dbg(xhci, "xHCI dying, ignoring interrupt. Shouldn't IRQs be disabled?\n");
  2715. /* Clear the event handler busy flag (RW1C) */
  2716. temp = xhci_read_64(xhci, &ir->ir_set->erst_dequeue);
  2717. xhci_write_64(xhci, temp | ERST_EHB, &ir->ir_set->erst_dequeue);
  2718. return -ENODEV;
  2719. }
  2720. /* Process all OS owned event TRBs on this event ring */
  2721. while (unhandled_event_trb(ir->event_ring)) {
  2722. err = xhci_handle_event_trb(xhci, ir, ir->event_ring->dequeue);
  2723. /*
  2724. * If half a segment of events have been handled in one go then
  2725. * update ERDP, and force isoc trbs to interrupt more often
  2726. */
  2727. if (event_loop++ > TRBS_PER_SEGMENT / 2) {
  2728. xhci_update_erst_dequeue(xhci, ir, false);
  2729. if (ir->isoc_bei_interval > AVOID_BEI_INTERVAL_MIN)
  2730. ir->isoc_bei_interval = ir->isoc_bei_interval / 2;
  2731. event_loop = 0;
  2732. }
  2733. /* Update SW event ring dequeue pointer */
  2734. inc_deq(xhci, ir->event_ring);
  2735. if (err)
  2736. break;
  2737. }
  2738. xhci_update_erst_dequeue(xhci, ir, true);
  2739. return 0;
  2740. }
  2741. /*
  2742. * xHCI spec says we can get an interrupt, and if the HC has an error condition,
  2743. * we might get bad data out of the event ring. Section 4.10.2.7 has a list of
  2744. * indicators of an event TRB error, but we check the status *first* to be safe.
  2745. */
  2746. irqreturn_t xhci_irq(struct usb_hcd *hcd)
  2747. {
  2748. struct xhci_hcd *xhci = hcd_to_xhci(hcd);
  2749. irqreturn_t ret = IRQ_HANDLED;
  2750. u32 status;
  2751. spin_lock(&xhci->lock);
  2752. /* Check if the xHC generated the interrupt, or the irq is shared */
  2753. status = readl(&xhci->op_regs->status);
  2754. if (status == ~(u32)0) {
  2755. xhci_hc_died(xhci);
  2756. goto out;
  2757. }
  2758. if (!(status & STS_EINT)) {
  2759. ret = IRQ_NONE;
  2760. goto out;
  2761. }
  2762. if (status & STS_HCE) {
  2763. xhci_warn(xhci, "WARNING: Host Controller Error\n");
  2764. goto out;
  2765. }
  2766. if (status & STS_FATAL) {
  2767. xhci_warn(xhci, "WARNING: Host System Error\n");
  2768. xhci_halt(xhci);
  2769. goto out;
  2770. }
  2771. /*
  2772. * Clear the op reg interrupt status first,
  2773. * so we can receive interrupts from other MSI-X interrupters.
  2774. * Write 1 to clear the interrupt status.
  2775. */
  2776. status |= STS_EINT;
  2777. writel(status, &xhci->op_regs->status);
  2778. /* This is the handler of the primary interrupter */
  2779. xhci_handle_events(xhci, xhci->interrupters[0]);
  2780. out:
  2781. spin_unlock(&xhci->lock);
  2782. return ret;
  2783. }
  2784. irqreturn_t xhci_msi_irq(int irq, void *hcd)
  2785. {
  2786. return xhci_irq(hcd);
  2787. }
  2788. EXPORT_SYMBOL_GPL(xhci_msi_irq);
  2789. /**** Endpoint Ring Operations ****/
  2790. /*
  2791. * Generic function for queueing a TRB on a ring.
  2792. * The caller must have checked to make sure there's room on the ring.
  2793. *
  2794. * @more_trbs_coming: Will you enqueue more TRBs before calling
  2795. * prepare_transfer()?
  2796. */
  2797. static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
  2798. bool more_trbs_coming,
  2799. u32 field1, u32 field2, u32 field3, u32 field4)
  2800. {
  2801. struct xhci_generic_trb *trb;
  2802. trb = &ring->enqueue->generic;
  2803. trb->field[0] = cpu_to_le32(field1);
  2804. trb->field[1] = cpu_to_le32(field2);
  2805. trb->field[2] = cpu_to_le32(field3);
  2806. /* make sure TRB is fully written before giving it to the controller */
  2807. wmb();
  2808. trb->field[3] = cpu_to_le32(field4);
  2809. trace_xhci_queue_trb(ring, trb);
  2810. inc_enq(xhci, ring, more_trbs_coming);
  2811. }
  2812. /*
  2813. * Does various checks on the endpoint ring, and makes it ready to queue num_trbs.
  2814. * expand ring if it start to be full.
  2815. */
  2816. static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
  2817. u32 ep_state, unsigned int num_trbs, gfp_t mem_flags)
  2818. {
  2819. unsigned int link_trb_count = 0;
  2820. unsigned int new_segs = 0;
  2821. /* Make sure the endpoint has been added to xHC schedule */
  2822. switch (ep_state) {
  2823. case EP_STATE_DISABLED:
  2824. /*
  2825. * USB core changed config/interfaces without notifying us,
  2826. * or hardware is reporting the wrong state.
  2827. */
  2828. xhci_warn(xhci, "WARN urb submitted to disabled ep\n");
  2829. return -ENOENT;
  2830. case EP_STATE_ERROR:
  2831. xhci_warn(xhci, "WARN waiting for error on ep to be cleared\n");
  2832. /* FIXME event handling code for error needs to clear it */
  2833. /* XXX not sure if this should be -ENOENT or not */
  2834. return -EINVAL;
  2835. case EP_STATE_HALTED:
  2836. xhci_dbg(xhci, "WARN halted endpoint, queueing URB anyway.\n");
  2837. break;
  2838. case EP_STATE_STOPPED:
  2839. case EP_STATE_RUNNING:
  2840. break;
  2841. default:
  2842. xhci_err(xhci, "ERROR unknown endpoint state for ep\n");
  2843. /*
  2844. * FIXME issue Configure Endpoint command to try to get the HC
  2845. * back into a known state.
  2846. */
  2847. return -EINVAL;
  2848. }
  2849. if (ep_ring != xhci->cmd_ring) {
  2850. new_segs = xhci_ring_expansion_needed(xhci, ep_ring, num_trbs);
  2851. } else if (xhci_num_trbs_free(ep_ring) <= num_trbs) {
  2852. xhci_err(xhci, "Do not support expand command ring\n");
  2853. return -ENOMEM;
  2854. }
  2855. if (new_segs) {
  2856. xhci_dbg_trace(xhci, trace_xhci_dbg_ring_expansion,
  2857. "ERROR no room on ep ring, try ring expansion");
  2858. if (xhci_ring_expansion(xhci, ep_ring, new_segs, mem_flags)) {
  2859. xhci_err(xhci, "Ring expansion failed\n");
  2860. return -ENOMEM;
  2861. }
  2862. }
  2863. while (trb_is_link(ep_ring->enqueue)) {
  2864. /* If we're not dealing with 0.95 hardware or isoc rings
  2865. * on AMD 0.96 host, clear the chain bit.
  2866. */
  2867. if (!xhci_link_chain_quirk(xhci, ep_ring->type))
  2868. ep_ring->enqueue->link.control &=
  2869. cpu_to_le32(~TRB_CHAIN);
  2870. else
  2871. ep_ring->enqueue->link.control |=
  2872. cpu_to_le32(TRB_CHAIN);
  2873. wmb();
  2874. ep_ring->enqueue->link.control ^= cpu_to_le32(TRB_CYCLE);
  2875. /* Toggle the cycle bit after the last ring segment. */
  2876. if (link_trb_toggles_cycle(ep_ring->enqueue))
  2877. ep_ring->cycle_state ^= 1;
  2878. ep_ring->enq_seg = ep_ring->enq_seg->next;
  2879. ep_ring->enqueue = ep_ring->enq_seg->trbs;
  2880. /* prevent infinite loop if all first trbs are link trbs */
  2881. if (link_trb_count++ > ep_ring->num_segs) {
  2882. xhci_warn(xhci, "Ring is an endless link TRB loop\n");
  2883. return -EINVAL;
  2884. }
  2885. }
  2886. if (last_trb_on_seg(ep_ring->enq_seg, ep_ring->enqueue)) {
  2887. xhci_warn(xhci, "Missing link TRB at end of ring segment\n");
  2888. return -EINVAL;
  2889. }
  2890. return 0;
  2891. }
  2892. static int prepare_transfer(struct xhci_hcd *xhci,
  2893. struct xhci_virt_device *xdev,
  2894. unsigned int ep_index,
  2895. unsigned int stream_id,
  2896. unsigned int num_trbs,
  2897. struct urb *urb,
  2898. unsigned int td_index,
  2899. gfp_t mem_flags)
  2900. {
  2901. int ret;
  2902. struct urb_priv *urb_priv;
  2903. struct xhci_td *td;
  2904. struct xhci_ring *ep_ring;
  2905. struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
  2906. ep_ring = xhci_triad_to_transfer_ring(xhci, xdev->slot_id, ep_index,
  2907. stream_id);
  2908. if (!ep_ring) {
  2909. xhci_dbg(xhci, "Can't prepare ring for bad stream ID %u\n",
  2910. stream_id);
  2911. return -EINVAL;
  2912. }
  2913. ret = prepare_ring(xhci, ep_ring, GET_EP_CTX_STATE(ep_ctx),
  2914. num_trbs, mem_flags);
  2915. if (ret)
  2916. return ret;
  2917. urb_priv = urb->hcpriv;
  2918. td = &urb_priv->td[td_index];
  2919. INIT_LIST_HEAD(&td->td_list);
  2920. INIT_LIST_HEAD(&td->cancelled_td_list);
  2921. if (td_index == 0) {
  2922. ret = usb_hcd_link_urb_to_ep(bus_to_hcd(urb->dev->bus), urb);
  2923. if (unlikely(ret))
  2924. return ret;
  2925. }
  2926. td->urb = urb;
  2927. /* Add this TD to the tail of the endpoint ring's TD list */
  2928. list_add_tail(&td->td_list, &ep_ring->td_list);
  2929. td->start_seg = ep_ring->enq_seg;
  2930. td->first_trb = ep_ring->enqueue;
  2931. return 0;
  2932. }
  2933. unsigned int count_trbs(u64 addr, u64 len)
  2934. {
  2935. unsigned int num_trbs;
  2936. num_trbs = DIV_ROUND_UP(len + (addr & (TRB_MAX_BUFF_SIZE - 1)),
  2937. TRB_MAX_BUFF_SIZE);
  2938. if (num_trbs == 0)
  2939. num_trbs++;
  2940. return num_trbs;
  2941. }
  2942. static inline unsigned int count_trbs_needed(struct urb *urb)
  2943. {
  2944. return count_trbs(urb->transfer_dma, urb->transfer_buffer_length);
  2945. }
  2946. static unsigned int count_sg_trbs_needed(struct urb *urb)
  2947. {
  2948. struct scatterlist *sg;
  2949. unsigned int i, len, full_len, num_trbs = 0;
  2950. full_len = urb->transfer_buffer_length;
  2951. for_each_sg(urb->sg, sg, urb->num_mapped_sgs, i) {
  2952. len = sg_dma_len(sg);
  2953. num_trbs += count_trbs(sg_dma_address(sg), len);
  2954. len = min_t(unsigned int, len, full_len);
  2955. full_len -= len;
  2956. if (full_len == 0)
  2957. break;
  2958. }
  2959. return num_trbs;
  2960. }
  2961. static unsigned int count_isoc_trbs_needed(struct urb *urb, int i)
  2962. {
  2963. u64 addr, len;
  2964. addr = (u64) (urb->transfer_dma + urb->iso_frame_desc[i].offset);
  2965. len = urb->iso_frame_desc[i].length;
  2966. return count_trbs(addr, len);
  2967. }
  2968. static void check_trb_math(struct urb *urb, int running_total)
  2969. {
  2970. if (unlikely(running_total != urb->transfer_buffer_length))
  2971. dev_err(&urb->dev->dev, "%s - ep %#x - Miscalculated tx length, "
  2972. "queued %#x (%d), asked for %#x (%d)\n",
  2973. __func__,
  2974. urb->ep->desc.bEndpointAddress,
  2975. running_total, running_total,
  2976. urb->transfer_buffer_length,
  2977. urb->transfer_buffer_length);
  2978. }
  2979. static void giveback_first_trb(struct xhci_hcd *xhci, int slot_id,
  2980. unsigned int ep_index, unsigned int stream_id, int start_cycle,
  2981. struct xhci_generic_trb *start_trb)
  2982. {
  2983. /*
  2984. * Pass all the TRBs to the hardware at once and make sure this write
  2985. * isn't reordered.
  2986. */
  2987. wmb();
  2988. if (start_cycle)
  2989. start_trb->field[3] |= cpu_to_le32(start_cycle);
  2990. else
  2991. start_trb->field[3] &= cpu_to_le32(~TRB_CYCLE);
  2992. xhci_ring_ep_doorbell(xhci, slot_id, ep_index, stream_id);
  2993. }
  2994. static void check_interval(struct urb *urb, struct xhci_ep_ctx *ep_ctx)
  2995. {
  2996. int xhci_interval;
  2997. int ep_interval;
  2998. xhci_interval = EP_INTERVAL_TO_UFRAMES(le32_to_cpu(ep_ctx->ep_info));
  2999. ep_interval = urb->interval;
  3000. /* Convert to microframes */
  3001. if (urb->dev->speed == USB_SPEED_LOW ||
  3002. urb->dev->speed == USB_SPEED_FULL)
  3003. ep_interval *= 8;
  3004. /* FIXME change this to a warning and a suggestion to use the new API
  3005. * to set the polling interval (once the API is added).
  3006. */
  3007. if (xhci_interval != ep_interval) {
  3008. dev_dbg_ratelimited(&urb->dev->dev,
  3009. "Driver uses different interval (%d microframe%s) than xHCI (%d microframe%s)\n",
  3010. ep_interval, ep_interval == 1 ? "" : "s",
  3011. xhci_interval, xhci_interval == 1 ? "" : "s");
  3012. urb->interval = xhci_interval;
  3013. /* Convert back to frames for LS/FS devices */
  3014. if (urb->dev->speed == USB_SPEED_LOW ||
  3015. urb->dev->speed == USB_SPEED_FULL)
  3016. urb->interval /= 8;
  3017. }
  3018. }
  3019. /*
  3020. * xHCI uses normal TRBs for both bulk and interrupt. When the interrupt
  3021. * endpoint is to be serviced, the xHC will consume (at most) one TD. A TD
  3022. * (comprised of sg list entries) can take several service intervals to
  3023. * transmit.
  3024. */
  3025. int xhci_queue_intr_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
  3026. struct urb *urb, int slot_id, unsigned int ep_index)
  3027. {
  3028. struct xhci_ep_ctx *ep_ctx;
  3029. ep_ctx = xhci_get_ep_ctx(xhci, xhci->devs[slot_id]->out_ctx, ep_index);
  3030. check_interval(urb, ep_ctx);
  3031. return xhci_queue_bulk_tx(xhci, mem_flags, urb, slot_id, ep_index);
  3032. }
  3033. /*
  3034. * For xHCI 1.0 host controllers, TD size is the number of max packet sized
  3035. * packets remaining in the TD (*not* including this TRB).
  3036. *
  3037. * Total TD packet count = total_packet_count =
  3038. * DIV_ROUND_UP(TD size in bytes / wMaxPacketSize)
  3039. *
  3040. * Packets transferred up to and including this TRB = packets_transferred =
  3041. * rounddown(total bytes transferred including this TRB / wMaxPacketSize)
  3042. *
  3043. * TD size = total_packet_count - packets_transferred
  3044. *
  3045. * For xHCI 0.96 and older, TD size field should be the remaining bytes
  3046. * including this TRB, right shifted by 10
  3047. *
  3048. * For all hosts it must fit in bits 21:17, so it can't be bigger than 31.
  3049. * This is taken care of in the TRB_TD_SIZE() macro
  3050. *
  3051. * The last TRB in a TD must have the TD size set to zero.
  3052. */
  3053. static u32 xhci_td_remainder(struct xhci_hcd *xhci, int transferred,
  3054. int trb_buff_len, unsigned int td_total_len,
  3055. struct urb *urb, bool more_trbs_coming)
  3056. {
  3057. u32 maxp, total_packet_count;
  3058. /* MTK xHCI 0.96 contains some features from 1.0 */
  3059. if (xhci->hci_version < 0x100 && !(xhci->quirks & XHCI_MTK_HOST))
  3060. return ((td_total_len - transferred) >> 10);
  3061. /* One TRB with a zero-length data packet. */
  3062. if (!more_trbs_coming || (transferred == 0 && trb_buff_len == 0) ||
  3063. trb_buff_len == td_total_len)
  3064. return 0;
  3065. /* for MTK xHCI 0.96, TD size include this TRB, but not in 1.x */
  3066. if ((xhci->quirks & XHCI_MTK_HOST) && (xhci->hci_version < 0x100))
  3067. trb_buff_len = 0;
  3068. maxp = usb_endpoint_maxp(&urb->ep->desc);
  3069. total_packet_count = DIV_ROUND_UP(td_total_len, maxp);
  3070. /* Queueing functions don't count the current TRB into transferred */
  3071. return (total_packet_count - ((transferred + trb_buff_len) / maxp));
  3072. }
  3073. static int xhci_align_td(struct xhci_hcd *xhci, struct urb *urb, u32 enqd_len,
  3074. u32 *trb_buff_len, struct xhci_segment *seg)
  3075. {
  3076. struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
  3077. unsigned int unalign;
  3078. unsigned int max_pkt;
  3079. u32 new_buff_len;
  3080. size_t len;
  3081. max_pkt = usb_endpoint_maxp(&urb->ep->desc);
  3082. unalign = (enqd_len + *trb_buff_len) % max_pkt;
  3083. /* we got lucky, last normal TRB data on segment is packet aligned */
  3084. if (unalign == 0)
  3085. return 0;
  3086. xhci_dbg(xhci, "Unaligned %d bytes, buff len %d\n",
  3087. unalign, *trb_buff_len);
  3088. /* is the last nornal TRB alignable by splitting it */
  3089. if (*trb_buff_len > unalign) {
  3090. *trb_buff_len -= unalign;
  3091. xhci_dbg(xhci, "split align, new buff len %d\n", *trb_buff_len);
  3092. return 0;
  3093. }
  3094. /*
  3095. * We want enqd_len + trb_buff_len to sum up to a number aligned to
  3096. * number which is divisible by the endpoint's wMaxPacketSize. IOW:
  3097. * (size of currently enqueued TRBs + remainder) % wMaxPacketSize == 0.
  3098. */
  3099. new_buff_len = max_pkt - (enqd_len % max_pkt);
  3100. if (new_buff_len > (urb->transfer_buffer_length - enqd_len))
  3101. new_buff_len = (urb->transfer_buffer_length - enqd_len);
  3102. /* create a max max_pkt sized bounce buffer pointed to by last trb */
  3103. if (usb_urb_dir_out(urb)) {
  3104. if (urb->num_sgs) {
  3105. len = sg_pcopy_to_buffer(urb->sg, urb->num_sgs,
  3106. seg->bounce_buf, new_buff_len, enqd_len);
  3107. if (len != new_buff_len)
  3108. xhci_warn(xhci, "WARN Wrong bounce buffer write length: %zu != %d\n",
  3109. len, new_buff_len);
  3110. } else {
  3111. memcpy(seg->bounce_buf, urb->transfer_buffer + enqd_len, new_buff_len);
  3112. }
  3113. seg->bounce_dma = dma_map_single(dev, seg->bounce_buf,
  3114. max_pkt, DMA_TO_DEVICE);
  3115. } else {
  3116. seg->bounce_dma = dma_map_single(dev, seg->bounce_buf,
  3117. max_pkt, DMA_FROM_DEVICE);
  3118. }
  3119. if (dma_mapping_error(dev, seg->bounce_dma)) {
  3120. /* try without aligning. Some host controllers survive */
  3121. xhci_warn(xhci, "Failed mapping bounce buffer, not aligning\n");
  3122. return 0;
  3123. }
  3124. *trb_buff_len = new_buff_len;
  3125. seg->bounce_len = new_buff_len;
  3126. seg->bounce_offs = enqd_len;
  3127. xhci_dbg(xhci, "Bounce align, new buff len %d\n", *trb_buff_len);
  3128. return 1;
  3129. }
  3130. /* This is very similar to what ehci-q.c qtd_fill() does */
  3131. int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
  3132. struct urb *urb, int slot_id, unsigned int ep_index)
  3133. {
  3134. struct xhci_ring *ring;
  3135. struct urb_priv *urb_priv;
  3136. struct xhci_td *td;
  3137. struct xhci_generic_trb *start_trb;
  3138. struct scatterlist *sg = NULL;
  3139. bool more_trbs_coming = true;
  3140. bool need_zero_pkt = false;
  3141. bool first_trb = true;
  3142. unsigned int num_trbs;
  3143. unsigned int start_cycle, num_sgs = 0;
  3144. unsigned int enqd_len, block_len, trb_buff_len, full_len;
  3145. int sent_len, ret;
  3146. u32 field, length_field, remainder;
  3147. u64 addr, send_addr;
  3148. ring = xhci_urb_to_transfer_ring(xhci, urb);
  3149. if (!ring)
  3150. return -EINVAL;
  3151. full_len = urb->transfer_buffer_length;
  3152. /* If we have scatter/gather list, we use it. */
  3153. if (urb->num_sgs && !(urb->transfer_flags & URB_DMA_MAP_SINGLE)) {
  3154. num_sgs = urb->num_mapped_sgs;
  3155. sg = urb->sg;
  3156. addr = (u64) sg_dma_address(sg);
  3157. block_len = sg_dma_len(sg);
  3158. num_trbs = count_sg_trbs_needed(urb);
  3159. } else {
  3160. num_trbs = count_trbs_needed(urb);
  3161. addr = (u64) urb->transfer_dma;
  3162. block_len = full_len;
  3163. }
  3164. ret = prepare_transfer(xhci, xhci->devs[slot_id],
  3165. ep_index, urb->stream_id,
  3166. num_trbs, urb, 0, mem_flags);
  3167. if (unlikely(ret < 0))
  3168. return ret;
  3169. urb_priv = urb->hcpriv;
  3170. /* Deal with URB_ZERO_PACKET - need one more td/trb */
  3171. if (urb->transfer_flags & URB_ZERO_PACKET && urb_priv->num_tds > 1)
  3172. need_zero_pkt = true;
  3173. td = &urb_priv->td[0];
  3174. /*
  3175. * Don't give the first TRB to the hardware (by toggling the cycle bit)
  3176. * until we've finished creating all the other TRBs. The ring's cycle
  3177. * state may change as we enqueue the other TRBs, so save it too.
  3178. */
  3179. start_trb = &ring->enqueue->generic;
  3180. start_cycle = ring->cycle_state;
  3181. send_addr = addr;
  3182. /* Queue the TRBs, even if they are zero-length */
  3183. for (enqd_len = 0; first_trb || enqd_len < full_len;
  3184. enqd_len += trb_buff_len) {
  3185. field = TRB_TYPE(TRB_NORMAL);
  3186. /* TRB buffer should not cross 64KB boundaries */
  3187. trb_buff_len = TRB_BUFF_LEN_UP_TO_BOUNDARY(addr);
  3188. trb_buff_len = min_t(unsigned int, trb_buff_len, block_len);
  3189. if (enqd_len + trb_buff_len > full_len)
  3190. trb_buff_len = full_len - enqd_len;
  3191. /* Don't change the cycle bit of the first TRB until later */
  3192. if (first_trb) {
  3193. first_trb = false;
  3194. if (start_cycle == 0)
  3195. field |= TRB_CYCLE;
  3196. } else
  3197. field |= ring->cycle_state;
  3198. /* Chain all the TRBs together; clear the chain bit in the last
  3199. * TRB to indicate it's the last TRB in the chain.
  3200. */
  3201. if (enqd_len + trb_buff_len < full_len) {
  3202. field |= TRB_CHAIN;
  3203. if (trb_is_link(ring->enqueue + 1)) {
  3204. if (xhci_align_td(xhci, urb, enqd_len,
  3205. &trb_buff_len,
  3206. ring->enq_seg)) {
  3207. send_addr = ring->enq_seg->bounce_dma;
  3208. /* assuming TD won't span 2 segs */
  3209. td->bounce_seg = ring->enq_seg;
  3210. }
  3211. }
  3212. }
  3213. if (enqd_len + trb_buff_len >= full_len) {
  3214. field &= ~TRB_CHAIN;
  3215. field |= TRB_IOC;
  3216. more_trbs_coming = false;
  3217. td->last_trb = ring->enqueue;
  3218. td->last_trb_seg = ring->enq_seg;
  3219. if (xhci_urb_suitable_for_idt(urb)) {
  3220. memcpy(&send_addr, urb->transfer_buffer,
  3221. trb_buff_len);
  3222. le64_to_cpus(&send_addr);
  3223. field |= TRB_IDT;
  3224. }
  3225. }
  3226. /* Only set interrupt on short packet for IN endpoints */
  3227. if (usb_urb_dir_in(urb))
  3228. field |= TRB_ISP;
  3229. /* Set the TRB length, TD size, and interrupter fields. */
  3230. remainder = xhci_td_remainder(xhci, enqd_len, trb_buff_len,
  3231. full_len, urb, more_trbs_coming);
  3232. length_field = TRB_LEN(trb_buff_len) |
  3233. TRB_TD_SIZE(remainder) |
  3234. TRB_INTR_TARGET(0);
  3235. queue_trb(xhci, ring, more_trbs_coming | need_zero_pkt,
  3236. lower_32_bits(send_addr),
  3237. upper_32_bits(send_addr),
  3238. length_field,
  3239. field);
  3240. addr += trb_buff_len;
  3241. sent_len = trb_buff_len;
  3242. while (sg && sent_len >= block_len) {
  3243. /* New sg entry */
  3244. --num_sgs;
  3245. sent_len -= block_len;
  3246. sg = sg_next(sg);
  3247. if (num_sgs != 0 && sg) {
  3248. block_len = sg_dma_len(sg);
  3249. addr = (u64) sg_dma_address(sg);
  3250. addr += sent_len;
  3251. }
  3252. }
  3253. block_len -= sent_len;
  3254. send_addr = addr;
  3255. }
  3256. if (need_zero_pkt) {
  3257. ret = prepare_transfer(xhci, xhci->devs[slot_id],
  3258. ep_index, urb->stream_id,
  3259. 1, urb, 1, mem_flags);
  3260. urb_priv->td[1].last_trb = ring->enqueue;
  3261. urb_priv->td[1].last_trb_seg = ring->enq_seg;
  3262. field = TRB_TYPE(TRB_NORMAL) | ring->cycle_state | TRB_IOC;
  3263. queue_trb(xhci, ring, 0, 0, 0, TRB_INTR_TARGET(0), field);
  3264. }
  3265. check_trb_math(urb, enqd_len);
  3266. giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
  3267. start_cycle, start_trb);
  3268. return 0;
  3269. }
  3270. /* Caller must have locked xhci->lock */
  3271. int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
  3272. struct urb *urb, int slot_id, unsigned int ep_index)
  3273. {
  3274. struct xhci_ring *ep_ring;
  3275. int num_trbs;
  3276. int ret;
  3277. struct usb_ctrlrequest *setup;
  3278. struct xhci_generic_trb *start_trb;
  3279. int start_cycle;
  3280. u32 field;
  3281. struct urb_priv *urb_priv;
  3282. struct xhci_td *td;
  3283. ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
  3284. if (!ep_ring)
  3285. return -EINVAL;
  3286. /*
  3287. * Need to copy setup packet into setup TRB, so we can't use the setup
  3288. * DMA address.
  3289. */
  3290. if (!urb->setup_packet)
  3291. return -EINVAL;
  3292. if ((xhci->quirks & XHCI_ETRON_HOST) &&
  3293. urb->dev->speed >= USB_SPEED_SUPER) {
  3294. /*
  3295. * If next available TRB is the Link TRB in the ring segment then
  3296. * enqueue a No Op TRB, this can prevent the Setup and Data Stage
  3297. * TRB to be breaked by the Link TRB.
  3298. */
  3299. if (trb_is_link(ep_ring->enqueue + 1)) {
  3300. field = TRB_TYPE(TRB_TR_NOOP) | ep_ring->cycle_state;
  3301. queue_trb(xhci, ep_ring, false, 0, 0,
  3302. TRB_INTR_TARGET(0), field);
  3303. }
  3304. }
  3305. /* 1 TRB for setup, 1 for status */
  3306. num_trbs = 2;
  3307. /*
  3308. * Don't need to check if we need additional event data and normal TRBs,
  3309. * since data in control transfers will never get bigger than 16MB
  3310. * XXX: can we get a buffer that crosses 64KB boundaries?
  3311. */
  3312. if (urb->transfer_buffer_length > 0)
  3313. num_trbs++;
  3314. ret = prepare_transfer(xhci, xhci->devs[slot_id],
  3315. ep_index, urb->stream_id,
  3316. num_trbs, urb, 0, mem_flags);
  3317. if (ret < 0)
  3318. return ret;
  3319. urb_priv = urb->hcpriv;
  3320. td = &urb_priv->td[0];
  3321. /*
  3322. * Don't give the first TRB to the hardware (by toggling the cycle bit)
  3323. * until we've finished creating all the other TRBs. The ring's cycle
  3324. * state may change as we enqueue the other TRBs, so save it too.
  3325. */
  3326. start_trb = &ep_ring->enqueue->generic;
  3327. start_cycle = ep_ring->cycle_state;
  3328. /* Queue setup TRB - see section 6.4.1.2.1 */
  3329. /* FIXME better way to translate setup_packet into two u32 fields? */
  3330. setup = (struct usb_ctrlrequest *) urb->setup_packet;
  3331. field = 0;
  3332. field |= TRB_IDT | TRB_TYPE(TRB_SETUP);
  3333. if (start_cycle == 0)
  3334. field |= 0x1;
  3335. /* xHCI 1.0/1.1 6.4.1.2.1: Transfer Type field */
  3336. if ((xhci->hci_version >= 0x100) || (xhci->quirks & XHCI_MTK_HOST)) {
  3337. if (urb->transfer_buffer_length > 0) {
  3338. if (setup->bRequestType & USB_DIR_IN)
  3339. field |= TRB_TX_TYPE(TRB_DATA_IN);
  3340. else
  3341. field |= TRB_TX_TYPE(TRB_DATA_OUT);
  3342. }
  3343. }
  3344. queue_trb(xhci, ep_ring, true,
  3345. setup->bRequestType | setup->bRequest << 8 | le16_to_cpu(setup->wValue) << 16,
  3346. le16_to_cpu(setup->wIndex) | le16_to_cpu(setup->wLength) << 16,
  3347. TRB_LEN(8) | TRB_INTR_TARGET(0),
  3348. /* Immediate data in pointer */
  3349. field);
  3350. /* If there's data, queue data TRBs */
  3351. /* Only set interrupt on short packet for IN endpoints */
  3352. if (usb_urb_dir_in(urb))
  3353. field = TRB_ISP | TRB_TYPE(TRB_DATA);
  3354. else
  3355. field = TRB_TYPE(TRB_DATA);
  3356. if (urb->transfer_buffer_length > 0) {
  3357. u32 length_field, remainder;
  3358. u64 addr;
  3359. if (xhci_urb_suitable_for_idt(urb)) {
  3360. memcpy(&addr, urb->transfer_buffer,
  3361. urb->transfer_buffer_length);
  3362. le64_to_cpus(&addr);
  3363. field |= TRB_IDT;
  3364. } else {
  3365. addr = (u64) urb->transfer_dma;
  3366. }
  3367. remainder = xhci_td_remainder(xhci, 0,
  3368. urb->transfer_buffer_length,
  3369. urb->transfer_buffer_length,
  3370. urb, 1);
  3371. length_field = TRB_LEN(urb->transfer_buffer_length) |
  3372. TRB_TD_SIZE(remainder) |
  3373. TRB_INTR_TARGET(0);
  3374. if (setup->bRequestType & USB_DIR_IN)
  3375. field |= TRB_DIR_IN;
  3376. queue_trb(xhci, ep_ring, true,
  3377. lower_32_bits(addr),
  3378. upper_32_bits(addr),
  3379. length_field,
  3380. field | ep_ring->cycle_state);
  3381. }
  3382. /* Save the DMA address of the last TRB in the TD */
  3383. td->last_trb = ep_ring->enqueue;
  3384. td->last_trb_seg = ep_ring->enq_seg;
  3385. /* Queue status TRB - see Table 7 and sections 4.11.2.2 and 6.4.1.2.3 */
  3386. /* If the device sent data, the status stage is an OUT transfer */
  3387. if (urb->transfer_buffer_length > 0 && setup->bRequestType & USB_DIR_IN)
  3388. field = 0;
  3389. else
  3390. field = TRB_DIR_IN;
  3391. queue_trb(xhci, ep_ring, false,
  3392. 0,
  3393. 0,
  3394. TRB_INTR_TARGET(0),
  3395. /* Event on completion */
  3396. field | TRB_IOC | TRB_TYPE(TRB_STATUS) | ep_ring->cycle_state);
  3397. giveback_first_trb(xhci, slot_id, ep_index, 0,
  3398. start_cycle, start_trb);
  3399. return 0;
  3400. }
  3401. /*
  3402. * The transfer burst count field of the isochronous TRB defines the number of
  3403. * bursts that are required to move all packets in this TD. Only SuperSpeed
  3404. * devices can burst up to bMaxBurst number of packets per service interval.
  3405. * This field is zero based, meaning a value of zero in the field means one
  3406. * burst. Basically, for everything but SuperSpeed devices, this field will be
  3407. * zero. Only xHCI 1.0 host controllers support this field.
  3408. */
  3409. static unsigned int xhci_get_burst_count(struct xhci_hcd *xhci,
  3410. struct urb *urb, unsigned int total_packet_count)
  3411. {
  3412. unsigned int max_burst;
  3413. if (xhci->hci_version < 0x100 || urb->dev->speed < USB_SPEED_SUPER)
  3414. return 0;
  3415. max_burst = urb->ep->ss_ep_comp.bMaxBurst;
  3416. return DIV_ROUND_UP(total_packet_count, max_burst + 1) - 1;
  3417. }
  3418. /*
  3419. * Returns the number of packets in the last "burst" of packets. This field is
  3420. * valid for all speeds of devices. USB 2.0 devices can only do one "burst", so
  3421. * the last burst packet count is equal to the total number of packets in the
  3422. * TD. SuperSpeed endpoints can have up to 3 bursts. All but the last burst
  3423. * must contain (bMaxBurst + 1) number of packets, but the last burst can
  3424. * contain 1 to (bMaxBurst + 1) packets.
  3425. */
  3426. static unsigned int xhci_get_last_burst_packet_count(struct xhci_hcd *xhci,
  3427. struct urb *urb, unsigned int total_packet_count)
  3428. {
  3429. unsigned int max_burst;
  3430. unsigned int residue;
  3431. if (xhci->hci_version < 0x100)
  3432. return 0;
  3433. if (urb->dev->speed >= USB_SPEED_SUPER) {
  3434. /* bMaxBurst is zero based: 0 means 1 packet per burst */
  3435. max_burst = urb->ep->ss_ep_comp.bMaxBurst;
  3436. residue = total_packet_count % (max_burst + 1);
  3437. /* If residue is zero, the last burst contains (max_burst + 1)
  3438. * number of packets, but the TLBPC field is zero-based.
  3439. */
  3440. if (residue == 0)
  3441. return max_burst;
  3442. return residue - 1;
  3443. }
  3444. if (total_packet_count == 0)
  3445. return 0;
  3446. return total_packet_count - 1;
  3447. }
  3448. /*
  3449. * Calculates Frame ID field of the isochronous TRB identifies the
  3450. * target frame that the Interval associated with this Isochronous
  3451. * Transfer Descriptor will start on. Refer to 4.11.2.5 in 1.1 spec.
  3452. *
  3453. * Returns actual frame id on success, negative value on error.
  3454. */
  3455. static int xhci_get_isoc_frame_id(struct xhci_hcd *xhci,
  3456. struct urb *urb, int index)
  3457. {
  3458. int start_frame, ist, ret = 0;
  3459. int start_frame_id, end_frame_id, current_frame_id;
  3460. if (urb->dev->speed == USB_SPEED_LOW ||
  3461. urb->dev->speed == USB_SPEED_FULL)
  3462. start_frame = urb->start_frame + index * urb->interval;
  3463. else
  3464. start_frame = (urb->start_frame + index * urb->interval) >> 3;
  3465. /* Isochronous Scheduling Threshold (IST, bits 0~3 in HCSPARAMS2):
  3466. *
  3467. * If bit [3] of IST is cleared to '0', software can add a TRB no
  3468. * later than IST[2:0] Microframes before that TRB is scheduled to
  3469. * be executed.
  3470. * If bit [3] of IST is set to '1', software can add a TRB no later
  3471. * than IST[2:0] Frames before that TRB is scheduled to be executed.
  3472. */
  3473. ist = HCS_IST(xhci->hcs_params2) & 0x7;
  3474. if (HCS_IST(xhci->hcs_params2) & (1 << 3))
  3475. ist <<= 3;
  3476. /* Software shall not schedule an Isoch TD with a Frame ID value that
  3477. * is less than the Start Frame ID or greater than the End Frame ID,
  3478. * where:
  3479. *
  3480. * End Frame ID = (Current MFINDEX register value + 895 ms.) MOD 2048
  3481. * Start Frame ID = (Current MFINDEX register value + IST + 1) MOD 2048
  3482. *
  3483. * Both the End Frame ID and Start Frame ID values are calculated
  3484. * in microframes. When software determines the valid Frame ID value;
  3485. * The End Frame ID value should be rounded down to the nearest Frame
  3486. * boundary, and the Start Frame ID value should be rounded up to the
  3487. * nearest Frame boundary.
  3488. */
  3489. current_frame_id = readl(&xhci->run_regs->microframe_index);
  3490. start_frame_id = roundup(current_frame_id + ist + 1, 8);
  3491. end_frame_id = rounddown(current_frame_id + 895 * 8, 8);
  3492. start_frame &= 0x7ff;
  3493. start_frame_id = (start_frame_id >> 3) & 0x7ff;
  3494. end_frame_id = (end_frame_id >> 3) & 0x7ff;
  3495. if (start_frame_id < end_frame_id) {
  3496. if (start_frame > end_frame_id ||
  3497. start_frame < start_frame_id)
  3498. ret = -EINVAL;
  3499. } else if (start_frame_id > end_frame_id) {
  3500. if ((start_frame > end_frame_id &&
  3501. start_frame < start_frame_id))
  3502. ret = -EINVAL;
  3503. } else {
  3504. ret = -EINVAL;
  3505. }
  3506. if (index == 0) {
  3507. if (ret == -EINVAL || start_frame == start_frame_id) {
  3508. start_frame = start_frame_id + 1;
  3509. if (urb->dev->speed == USB_SPEED_LOW ||
  3510. urb->dev->speed == USB_SPEED_FULL)
  3511. urb->start_frame = start_frame;
  3512. else
  3513. urb->start_frame = start_frame << 3;
  3514. ret = 0;
  3515. }
  3516. }
  3517. if (ret) {
  3518. xhci_warn(xhci, "Frame ID %d (reg %d, index %d) beyond range (%d, %d)\n",
  3519. start_frame, current_frame_id, index,
  3520. start_frame_id, end_frame_id);
  3521. xhci_warn(xhci, "Ignore frame ID field, use SIA bit instead\n");
  3522. return ret;
  3523. }
  3524. return start_frame;
  3525. }
  3526. /* Check if we should generate event interrupt for a TD in an isoc URB */
  3527. static bool trb_block_event_intr(struct xhci_hcd *xhci, int num_tds, int i,
  3528. struct xhci_interrupter *ir)
  3529. {
  3530. if (xhci->hci_version < 0x100)
  3531. return false;
  3532. /* always generate an event interrupt for the last TD */
  3533. if (i == num_tds - 1)
  3534. return false;
  3535. /*
  3536. * If AVOID_BEI is set the host handles full event rings poorly,
  3537. * generate an event at least every 8th TD to clear the event ring
  3538. */
  3539. if (i && ir->isoc_bei_interval && xhci->quirks & XHCI_AVOID_BEI)
  3540. return !!(i % ir->isoc_bei_interval);
  3541. return true;
  3542. }
  3543. /* This is for isoc transfer */
  3544. static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
  3545. struct urb *urb, int slot_id, unsigned int ep_index)
  3546. {
  3547. struct xhci_interrupter *ir;
  3548. struct xhci_ring *ep_ring;
  3549. struct urb_priv *urb_priv;
  3550. struct xhci_td *td;
  3551. int num_tds, trbs_per_td;
  3552. struct xhci_generic_trb *start_trb;
  3553. bool first_trb;
  3554. int start_cycle;
  3555. u32 field, length_field;
  3556. int running_total, trb_buff_len, td_len, td_remain_len, ret;
  3557. u64 start_addr, addr;
  3558. int i, j;
  3559. bool more_trbs_coming;
  3560. struct xhci_virt_ep *xep;
  3561. int frame_id;
  3562. xep = &xhci->devs[slot_id]->eps[ep_index];
  3563. ep_ring = xhci->devs[slot_id]->eps[ep_index].ring;
  3564. ir = xhci->interrupters[0];
  3565. num_tds = urb->number_of_packets;
  3566. if (num_tds < 1) {
  3567. xhci_dbg(xhci, "Isoc URB with zero packets?\n");
  3568. return -EINVAL;
  3569. }
  3570. start_addr = (u64) urb->transfer_dma;
  3571. start_trb = &ep_ring->enqueue->generic;
  3572. start_cycle = ep_ring->cycle_state;
  3573. urb_priv = urb->hcpriv;
  3574. /* Queue the TRBs for each TD, even if they are zero-length */
  3575. for (i = 0; i < num_tds; i++) {
  3576. unsigned int total_pkt_count, max_pkt;
  3577. unsigned int burst_count, last_burst_pkt_count;
  3578. u32 sia_frame_id;
  3579. first_trb = true;
  3580. running_total = 0;
  3581. addr = start_addr + urb->iso_frame_desc[i].offset;
  3582. td_len = urb->iso_frame_desc[i].length;
  3583. td_remain_len = td_len;
  3584. max_pkt = usb_endpoint_maxp(&urb->ep->desc);
  3585. total_pkt_count = DIV_ROUND_UP(td_len, max_pkt);
  3586. /* A zero-length transfer still involves at least one packet. */
  3587. if (total_pkt_count == 0)
  3588. total_pkt_count++;
  3589. burst_count = xhci_get_burst_count(xhci, urb, total_pkt_count);
  3590. last_burst_pkt_count = xhci_get_last_burst_packet_count(xhci,
  3591. urb, total_pkt_count);
  3592. trbs_per_td = count_isoc_trbs_needed(urb, i);
  3593. ret = prepare_transfer(xhci, xhci->devs[slot_id], ep_index,
  3594. urb->stream_id, trbs_per_td, urb, i, mem_flags);
  3595. if (ret < 0) {
  3596. if (i == 0)
  3597. return ret;
  3598. goto cleanup;
  3599. }
  3600. td = &urb_priv->td[i];
  3601. /* use SIA as default, if frame id is used overwrite it */
  3602. sia_frame_id = TRB_SIA;
  3603. if (!(urb->transfer_flags & URB_ISO_ASAP) &&
  3604. HCC_CFC(xhci->hcc_params)) {
  3605. frame_id = xhci_get_isoc_frame_id(xhci, urb, i);
  3606. if (frame_id >= 0)
  3607. sia_frame_id = TRB_FRAME_ID(frame_id);
  3608. }
  3609. /*
  3610. * Set isoc specific data for the first TRB in a TD.
  3611. * Prevent HW from getting the TRBs by keeping the cycle state
  3612. * inverted in the first TDs isoc TRB.
  3613. */
  3614. field = TRB_TYPE(TRB_ISOC) |
  3615. TRB_TLBPC(last_burst_pkt_count) |
  3616. sia_frame_id |
  3617. (i ? ep_ring->cycle_state : !start_cycle);
  3618. /* xhci 1.1 with ETE uses TD_Size field for TBC, old is Rsvdz */
  3619. if (!xep->use_extended_tbc)
  3620. field |= TRB_TBC(burst_count);
  3621. /* fill the rest of the TRB fields, and remaining normal TRBs */
  3622. for (j = 0; j < trbs_per_td; j++) {
  3623. u32 remainder = 0;
  3624. /* only first TRB is isoc, overwrite otherwise */
  3625. if (!first_trb)
  3626. field = TRB_TYPE(TRB_NORMAL) |
  3627. ep_ring->cycle_state;
  3628. /* Only set interrupt on short packet for IN EPs */
  3629. if (usb_urb_dir_in(urb))
  3630. field |= TRB_ISP;
  3631. /* Set the chain bit for all except the last TRB */
  3632. if (j < trbs_per_td - 1) {
  3633. more_trbs_coming = true;
  3634. field |= TRB_CHAIN;
  3635. } else {
  3636. more_trbs_coming = false;
  3637. td->last_trb = ep_ring->enqueue;
  3638. td->last_trb_seg = ep_ring->enq_seg;
  3639. field |= TRB_IOC;
  3640. if (trb_block_event_intr(xhci, num_tds, i, ir))
  3641. field |= TRB_BEI;
  3642. }
  3643. /* Calculate TRB length */
  3644. trb_buff_len = TRB_BUFF_LEN_UP_TO_BOUNDARY(addr);
  3645. if (trb_buff_len > td_remain_len)
  3646. trb_buff_len = td_remain_len;
  3647. /* Set the TRB length, TD size, & interrupter fields. */
  3648. remainder = xhci_td_remainder(xhci, running_total,
  3649. trb_buff_len, td_len,
  3650. urb, more_trbs_coming);
  3651. length_field = TRB_LEN(trb_buff_len) |
  3652. TRB_INTR_TARGET(0);
  3653. /* xhci 1.1 with ETE uses TD Size field for TBC */
  3654. if (first_trb && xep->use_extended_tbc)
  3655. length_field |= TRB_TD_SIZE_TBC(burst_count);
  3656. else
  3657. length_field |= TRB_TD_SIZE(remainder);
  3658. first_trb = false;
  3659. queue_trb(xhci, ep_ring, more_trbs_coming,
  3660. lower_32_bits(addr),
  3661. upper_32_bits(addr),
  3662. length_field,
  3663. field);
  3664. running_total += trb_buff_len;
  3665. addr += trb_buff_len;
  3666. td_remain_len -= trb_buff_len;
  3667. }
  3668. /* Check TD length */
  3669. if (running_total != td_len) {
  3670. xhci_err(xhci, "ISOC TD length unmatch\n");
  3671. ret = -EINVAL;
  3672. goto cleanup;
  3673. }
  3674. }
  3675. /* store the next frame id */
  3676. if (HCC_CFC(xhci->hcc_params))
  3677. xep->next_frame_id = urb->start_frame + num_tds * urb->interval;
  3678. if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs == 0) {
  3679. if (xhci->quirks & XHCI_AMD_PLL_FIX)
  3680. usb_amd_quirk_pll_disable();
  3681. }
  3682. xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs++;
  3683. giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
  3684. start_cycle, start_trb);
  3685. return 0;
  3686. cleanup:
  3687. /* Clean up a partially enqueued isoc transfer. */
  3688. for (i--; i >= 0; i--)
  3689. list_del_init(&urb_priv->td[i].td_list);
  3690. /* Use the first TD as a temporary variable to turn the TDs we've queued
  3691. * into No-ops with a software-owned cycle bit. That way the hardware
  3692. * won't accidentally start executing bogus TDs when we partially
  3693. * overwrite them. td->first_trb and td->start_seg are already set.
  3694. */
  3695. urb_priv->td[0].last_trb = ep_ring->enqueue;
  3696. /* Every TRB except the first & last will have its cycle bit flipped. */
  3697. td_to_noop(xhci, ep_ring, &urb_priv->td[0], true);
  3698. /* Reset the ring enqueue back to the first TRB and its cycle bit. */
  3699. ep_ring->enqueue = urb_priv->td[0].first_trb;
  3700. ep_ring->enq_seg = urb_priv->td[0].start_seg;
  3701. ep_ring->cycle_state = start_cycle;
  3702. usb_hcd_unlink_urb_from_ep(bus_to_hcd(urb->dev->bus), urb);
  3703. return ret;
  3704. }
  3705. /*
  3706. * Check transfer ring to guarantee there is enough room for the urb.
  3707. * Update ISO URB start_frame and interval.
  3708. * Update interval as xhci_queue_intr_tx does. Use xhci frame_index to
  3709. * update urb->start_frame if URB_ISO_ASAP is set in transfer_flags or
  3710. * Contiguous Frame ID is not supported by HC.
  3711. */
  3712. int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags,
  3713. struct urb *urb, int slot_id, unsigned int ep_index)
  3714. {
  3715. struct xhci_virt_device *xdev;
  3716. struct xhci_ring *ep_ring;
  3717. struct xhci_ep_ctx *ep_ctx;
  3718. int start_frame;
  3719. int num_tds, num_trbs, i;
  3720. int ret;
  3721. struct xhci_virt_ep *xep;
  3722. int ist;
  3723. xdev = xhci->devs[slot_id];
  3724. xep = &xhci->devs[slot_id]->eps[ep_index];
  3725. ep_ring = xdev->eps[ep_index].ring;
  3726. ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
  3727. num_trbs = 0;
  3728. num_tds = urb->number_of_packets;
  3729. for (i = 0; i < num_tds; i++)
  3730. num_trbs += count_isoc_trbs_needed(urb, i);
  3731. /* Check the ring to guarantee there is enough room for the whole urb.
  3732. * Do not insert any td of the urb to the ring if the check failed.
  3733. */
  3734. ret = prepare_ring(xhci, ep_ring, GET_EP_CTX_STATE(ep_ctx),
  3735. num_trbs, mem_flags);
  3736. if (ret)
  3737. return ret;
  3738. /*
  3739. * Check interval value. This should be done before we start to
  3740. * calculate the start frame value.
  3741. */
  3742. check_interval(urb, ep_ctx);
  3743. /* Calculate the start frame and put it in urb->start_frame. */
  3744. if (HCC_CFC(xhci->hcc_params) && !list_empty(&ep_ring->td_list)) {
  3745. if (GET_EP_CTX_STATE(ep_ctx) == EP_STATE_RUNNING) {
  3746. urb->start_frame = xep->next_frame_id;
  3747. goto skip_start_over;
  3748. }
  3749. }
  3750. start_frame = readl(&xhci->run_regs->microframe_index);
  3751. start_frame &= 0x3fff;
  3752. /*
  3753. * Round up to the next frame and consider the time before trb really
  3754. * gets scheduled by hardare.
  3755. */
  3756. ist = HCS_IST(xhci->hcs_params2) & 0x7;
  3757. if (HCS_IST(xhci->hcs_params2) & (1 << 3))
  3758. ist <<= 3;
  3759. start_frame += ist + XHCI_CFC_DELAY;
  3760. start_frame = roundup(start_frame, 8);
  3761. /*
  3762. * Round up to the next ESIT (Endpoint Service Interval Time) if ESIT
  3763. * is greate than 8 microframes.
  3764. */
  3765. if (urb->dev->speed == USB_SPEED_LOW ||
  3766. urb->dev->speed == USB_SPEED_FULL) {
  3767. start_frame = roundup(start_frame, urb->interval << 3);
  3768. urb->start_frame = start_frame >> 3;
  3769. } else {
  3770. start_frame = roundup(start_frame, urb->interval);
  3771. urb->start_frame = start_frame;
  3772. }
  3773. skip_start_over:
  3774. return xhci_queue_isoc_tx(xhci, mem_flags, urb, slot_id, ep_index);
  3775. }
  3776. /**** Command Ring Operations ****/
  3777. /* Generic function for queueing a command TRB on the command ring.
  3778. * Check to make sure there's room on the command ring for one command TRB.
  3779. * Also check that there's room reserved for commands that must not fail.
  3780. * If this is a command that must not fail, meaning command_must_succeed = TRUE,
  3781. * then only check for the number of reserved spots.
  3782. * Don't decrement xhci->cmd_ring_reserved_trbs after we've queued the TRB
  3783. * because the command event handler may want to resubmit a failed command.
  3784. */
  3785. static int queue_command(struct xhci_hcd *xhci, struct xhci_command *cmd,
  3786. u32 field1, u32 field2,
  3787. u32 field3, u32 field4, bool command_must_succeed)
  3788. {
  3789. int reserved_trbs = xhci->cmd_ring_reserved_trbs;
  3790. int ret;
  3791. if ((xhci->xhc_state & XHCI_STATE_DYING) ||
  3792. (xhci->xhc_state & XHCI_STATE_HALTED)) {
  3793. xhci_dbg(xhci, "xHCI dying or halted, can't queue_command\n");
  3794. return -ESHUTDOWN;
  3795. }
  3796. if (!command_must_succeed)
  3797. reserved_trbs++;
  3798. ret = prepare_ring(xhci, xhci->cmd_ring, EP_STATE_RUNNING,
  3799. reserved_trbs, GFP_ATOMIC);
  3800. if (ret < 0) {
  3801. xhci_err(xhci, "ERR: No room for command on command ring\n");
  3802. if (command_must_succeed)
  3803. xhci_err(xhci, "ERR: Reserved TRB counting for "
  3804. "unfailable commands failed.\n");
  3805. return ret;
  3806. }
  3807. cmd->command_trb = xhci->cmd_ring->enqueue;
  3808. /* if there are no other commands queued we start the timeout timer */
  3809. if (list_empty(&xhci->cmd_list)) {
  3810. xhci->current_cmd = cmd;
  3811. xhci_mod_cmd_timer(xhci);
  3812. }
  3813. list_add_tail(&cmd->cmd_list, &xhci->cmd_list);
  3814. queue_trb(xhci, xhci->cmd_ring, false, field1, field2, field3,
  3815. field4 | xhci->cmd_ring->cycle_state);
  3816. return 0;
  3817. }
  3818. /* Queue a slot enable or disable request on the command ring */
  3819. int xhci_queue_slot_control(struct xhci_hcd *xhci, struct xhci_command *cmd,
  3820. u32 trb_type, u32 slot_id)
  3821. {
  3822. return queue_command(xhci, cmd, 0, 0, 0,
  3823. TRB_TYPE(trb_type) | SLOT_ID_FOR_TRB(slot_id), false);
  3824. }
  3825. /* Queue an address device command TRB */
  3826. int xhci_queue_address_device(struct xhci_hcd *xhci, struct xhci_command *cmd,
  3827. dma_addr_t in_ctx_ptr, u32 slot_id, enum xhci_setup_dev setup)
  3828. {
  3829. return queue_command(xhci, cmd, lower_32_bits(in_ctx_ptr),
  3830. upper_32_bits(in_ctx_ptr), 0,
  3831. TRB_TYPE(TRB_ADDR_DEV) | SLOT_ID_FOR_TRB(slot_id)
  3832. | (setup == SETUP_CONTEXT_ONLY ? TRB_BSR : 0), false);
  3833. }
  3834. int xhci_queue_vendor_command(struct xhci_hcd *xhci, struct xhci_command *cmd,
  3835. u32 field1, u32 field2, u32 field3, u32 field4)
  3836. {
  3837. return queue_command(xhci, cmd, field1, field2, field3, field4, false);
  3838. }
  3839. /* Queue a reset device command TRB */
  3840. int xhci_queue_reset_device(struct xhci_hcd *xhci, struct xhci_command *cmd,
  3841. u32 slot_id)
  3842. {
  3843. return queue_command(xhci, cmd, 0, 0, 0,
  3844. TRB_TYPE(TRB_RESET_DEV) | SLOT_ID_FOR_TRB(slot_id),
  3845. false);
  3846. }
  3847. /* Queue a configure endpoint command TRB */
  3848. int xhci_queue_configure_endpoint(struct xhci_hcd *xhci,
  3849. struct xhci_command *cmd, dma_addr_t in_ctx_ptr,
  3850. u32 slot_id, bool command_must_succeed)
  3851. {
  3852. return queue_command(xhci, cmd, lower_32_bits(in_ctx_ptr),
  3853. upper_32_bits(in_ctx_ptr), 0,
  3854. TRB_TYPE(TRB_CONFIG_EP) | SLOT_ID_FOR_TRB(slot_id),
  3855. command_must_succeed);
  3856. }
  3857. /* Queue an evaluate context command TRB */
  3858. int xhci_queue_evaluate_context(struct xhci_hcd *xhci, struct xhci_command *cmd,
  3859. dma_addr_t in_ctx_ptr, u32 slot_id, bool command_must_succeed)
  3860. {
  3861. return queue_command(xhci, cmd, lower_32_bits(in_ctx_ptr),
  3862. upper_32_bits(in_ctx_ptr), 0,
  3863. TRB_TYPE(TRB_EVAL_CONTEXT) | SLOT_ID_FOR_TRB(slot_id),
  3864. command_must_succeed);
  3865. }
  3866. /*
  3867. * Suspend is set to indicate "Stop Endpoint Command" is being issued to stop
  3868. * activity on an endpoint that is about to be suspended.
  3869. */
  3870. int xhci_queue_stop_endpoint(struct xhci_hcd *xhci, struct xhci_command *cmd,
  3871. int slot_id, unsigned int ep_index, int suspend)
  3872. {
  3873. u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id);
  3874. u32 trb_ep_index = EP_INDEX_FOR_TRB(ep_index);
  3875. u32 type = TRB_TYPE(TRB_STOP_RING);
  3876. u32 trb_suspend = SUSPEND_PORT_FOR_TRB(suspend);
  3877. return queue_command(xhci, cmd, 0, 0, 0,
  3878. trb_slot_id | trb_ep_index | type | trb_suspend, false);
  3879. }
  3880. int xhci_queue_reset_ep(struct xhci_hcd *xhci, struct xhci_command *cmd,
  3881. int slot_id, unsigned int ep_index,
  3882. enum xhci_ep_reset_type reset_type)
  3883. {
  3884. u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id);
  3885. u32 trb_ep_index = EP_INDEX_FOR_TRB(ep_index);
  3886. u32 type = TRB_TYPE(TRB_RESET_EP);
  3887. if (reset_type == EP_SOFT_RESET)
  3888. type |= TRB_TSP;
  3889. return queue_command(xhci, cmd, 0, 0, 0,
  3890. trb_slot_id | trb_ep_index | type, false);
  3891. }