gadget.c 149 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403440444054406440744084409441044114412441344144415441644174418441944204421442244234424442544264427442844294430443144324433443444354436443744384439444044414442444344444445444644474448444944504451445244534454445544564457445844594460446144624463446444654466446744684469447044714472447344744475447644774478447944804481448244834484448544864487448844894490449144924493449444954496449744984499450045014502450345044505450645074508450945104511451245134514451545164517451845194520452145224523452445254526452745284529453045314532453345344535453645374538453945404541454245434544454545464547454845494550455145524553455445554556455745584559456045614562456345644565456645674568456945704571457245734574457545764577457845794580458145824583458445854586458745884589459045914592459345944595459645974598459946004601460246034604460546064607460846094610461146124613461446154616461746184619462046214622462346244625462646274628462946304631463246334634463546364637463846394640464146424643464446454646464746484649465046514652465346544655465646574658465946604661466246634664466546664667466846694670467146724673467446754676467746784679468046814682468346844685468646874688468946904691469246934694469546964697469846994700470147024703470447054706470747084709471047114712471347144715471647174718471947204721472247234724472547264727472847294730473147324733473447354736473747384739474047414742474347444745474647474748474947504751475247534754475547564757475847594760476147624763476447654766476747684769477047714772477347744775477647774778477947804781478247834784478547864787478847894790479147924793479447954796479747984799480048014802480348044805480648074808480948104811481248134814481548164817481848194820482148224823482448254826482748284829483048314832483348344835483648374838483948404841484248434844484548464847484848494850485148524853485448554856485748584859486048614862486348644865486648674868486948704871487248734874487548764877487848794880488148824883488448854886488748884889489048914892489348944895489648974898489949004901490249034904490549064907490849094910491149124913491449154916491749184919492049214922492349244925492649274928492949304931493249334934493549364937493849394940494149424943494449454946494749484949495049514952495349544955495649574958495949604961496249634964496549664967496849694970497149724973497449754976497749784979498049814982498349844985498649874988498949904991499249934994499549964997499849995000500150025003500450055006500750085009501050115012501350145015501650175018501950205021502250235024502550265027502850295030503150325033503450355036503750385039504050415042504350445045504650475048504950505051505250535054505550565057505850595060506150625063506450655066506750685069507050715072507350745075507650775078507950805081508250835084508550865087508850895090509150925093509450955096509750985099510051015102510351045105510651075108510951105111511251135114511551165117511851195120512151225123512451255126512751285129513051315132513351345135513651375138513951405141514251435144514551465147514851495150515151525153515451555156515751585159516051615162516351645165516651675168516951705171517251735174517551765177517851795180518151825183518451855186518751885189519051915192519351945195519651975198519952005201520252035204520552065207520852095210521152125213521452155216521752185219522052215222522352245225522652275228522952305231523252335234523552365237523852395240524152425243524452455246524752485249525052515252525352545255525652575258525952605261526252635264526552665267526852695270527152725273527452755276527752785279528052815282528352845285528652875288528952905291529252935294529552965297529852995300530153025303530453055306530753085309531053115312531353145315531653175318531953205321532253235324532553265327532853295330533153325333533453355336533753385339534053415342534353445345534653475348534953505351535253535354535553565357535853595360536153625363536453655366536753685369537053715372537353745375537653775378537953805381538253835384538553865387538853895390539153925393539453955396539753985399540054015402540354045405540654075408540954105411541254135414541554165417541854195420542154225423542454255426542754285429543054315432543354345435543654375438543954405441544254435444544554465447544854495450545154525453545454555456545754585459546054615462546354645465546654675468546954705471547254735474547554765477547854795480548154825483548454855486548754885489549054915492549354945495549654975498549955005501550255035504550555065507550855095510551155125513551455155516551755185519552055215522552355245525552655275528552955305531553255335534553555365537553855395540554155425543554455455546554755485549555055515552555355545555555655575558555955605561556255635564556555665567556855695570557155725573557455755576557755785579558055815582558355845585558655875588558955905591559255935594559555965597559855995600560156025603560456055606560756085609561056115612561356145615561656175618561956205621562256235624562556265627562856295630563156325633563456355636563756385639564056415642564356445645564656475648564956505651565256535654565556565657565856595660566156625663566456655666566756685669567056715672567356745675567656775678567956805681568256835684568556865687568856895690569156925693569456955696569756985699570057015702570357045705570657075708570957105711571257135714571557165717571857195720572157225723572457255726572757285729573057315732573357345735573657375738573957405741574257435744574557465747574857495750575157525753575457555756575757585759576057615762576357645765576657675768576957705771577257735774577557765777
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (c) 2011 Samsung Electronics Co., Ltd.
  4. * http://www.samsung.com
  5. *
  6. * Copyright 2008 Openmoko, Inc.
  7. * Copyright 2008 Simtec Electronics
  8. * Ben Dooks <ben@simtec.co.uk>
  9. * http://armlinux.simtec.co.uk/
  10. *
  11. * S3C USB2.0 High-speed / OtG driver
  12. */
  13. #include <linux/kernel.h>
  14. #include <linux/module.h>
  15. #include <linux/spinlock.h>
  16. #include <linux/interrupt.h>
  17. #include <linux/platform_device.h>
  18. #include <linux/dma-mapping.h>
  19. #include <linux/mutex.h>
  20. #include <linux/seq_file.h>
  21. #include <linux/delay.h>
  22. #include <linux/io.h>
  23. #include <linux/slab.h>
  24. #include <linux/usb/ch9.h>
  25. #include <linux/usb/gadget.h>
  26. #include <linux/usb/phy.h>
  27. #include <linux/usb/composite.h>
  28. #include "core.h"
  29. #include "hw.h"
  30. /* conversion functions */
  31. static inline struct dwc2_hsotg_req *our_req(struct usb_request *req)
  32. {
  33. return container_of(req, struct dwc2_hsotg_req, req);
  34. }
  35. static inline struct dwc2_hsotg_ep *our_ep(struct usb_ep *ep)
  36. {
  37. return container_of(ep, struct dwc2_hsotg_ep, ep);
  38. }
  39. static inline struct dwc2_hsotg *to_hsotg(struct usb_gadget *gadget)
  40. {
  41. return container_of(gadget, struct dwc2_hsotg, gadget);
  42. }
  43. static inline void dwc2_set_bit(struct dwc2_hsotg *hsotg, u32 offset, u32 val)
  44. {
  45. dwc2_writel(hsotg, dwc2_readl(hsotg, offset) | val, offset);
  46. }
  47. static inline void dwc2_clear_bit(struct dwc2_hsotg *hsotg, u32 offset, u32 val)
  48. {
  49. dwc2_writel(hsotg, dwc2_readl(hsotg, offset) & ~val, offset);
  50. }
  51. static inline struct dwc2_hsotg_ep *index_to_ep(struct dwc2_hsotg *hsotg,
  52. u32 ep_index, u32 dir_in)
  53. {
  54. if (dir_in)
  55. return hsotg->eps_in[ep_index];
  56. else
  57. return hsotg->eps_out[ep_index];
  58. }
  59. /* forward declaration of functions */
  60. static void dwc2_hsotg_dump(struct dwc2_hsotg *hsotg);
  61. /**
  62. * using_dma - return the DMA status of the driver.
  63. * @hsotg: The driver state.
  64. *
  65. * Return true if we're using DMA.
  66. *
  67. * Currently, we have the DMA support code worked into everywhere
  68. * that needs it, but the AMBA DMA implementation in the hardware can
  69. * only DMA from 32bit aligned addresses. This means that gadgets such
  70. * as the CDC Ethernet cannot work as they often pass packets which are
  71. * not 32bit aligned.
  72. *
  73. * Unfortunately the choice to use DMA or not is global to the controller
  74. * and seems to be only settable when the controller is being put through
  75. * a core reset. This means we either need to fix the gadgets to take
  76. * account of DMA alignment, or add bounce buffers (yuerk).
  77. *
  78. * g_using_dma is set depending on dts flag.
  79. */
  80. static inline bool using_dma(struct dwc2_hsotg *hsotg)
  81. {
  82. return hsotg->params.g_dma;
  83. }
  84. /*
  85. * using_desc_dma - return the descriptor DMA status of the driver.
  86. * @hsotg: The driver state.
  87. *
  88. * Return true if we're using descriptor DMA.
  89. */
  90. static inline bool using_desc_dma(struct dwc2_hsotg *hsotg)
  91. {
  92. return hsotg->params.g_dma_desc;
  93. }
  94. /**
  95. * dwc2_gadget_incr_frame_num - Increments the targeted frame number.
  96. * @hs_ep: The endpoint
  97. *
  98. * This function will also check if the frame number overruns DSTS_SOFFN_LIMIT.
  99. * If an overrun occurs it will wrap the value and set the frame_overrun flag.
  100. */
  101. static inline void dwc2_gadget_incr_frame_num(struct dwc2_hsotg_ep *hs_ep)
  102. {
  103. struct dwc2_hsotg *hsotg = hs_ep->parent;
  104. u16 limit = DSTS_SOFFN_LIMIT;
  105. if (hsotg->gadget.speed != USB_SPEED_HIGH)
  106. limit >>= 3;
  107. hs_ep->target_frame += hs_ep->interval;
  108. if (hs_ep->target_frame > limit) {
  109. hs_ep->frame_overrun = true;
  110. hs_ep->target_frame &= limit;
  111. } else {
  112. hs_ep->frame_overrun = false;
  113. }
  114. }
  115. /**
  116. * dwc2_gadget_dec_frame_num_by_one - Decrements the targeted frame number
  117. * by one.
  118. * @hs_ep: The endpoint.
  119. *
  120. * This function used in service interval based scheduling flow to calculate
  121. * descriptor frame number filed value. For service interval mode frame
  122. * number in descriptor should point to last (u)frame in the interval.
  123. *
  124. */
  125. static inline void dwc2_gadget_dec_frame_num_by_one(struct dwc2_hsotg_ep *hs_ep)
  126. {
  127. struct dwc2_hsotg *hsotg = hs_ep->parent;
  128. u16 limit = DSTS_SOFFN_LIMIT;
  129. if (hsotg->gadget.speed != USB_SPEED_HIGH)
  130. limit >>= 3;
  131. if (hs_ep->target_frame)
  132. hs_ep->target_frame -= 1;
  133. else
  134. hs_ep->target_frame = limit;
  135. }
  136. /**
  137. * dwc2_hsotg_en_gsint - enable one or more of the general interrupt
  138. * @hsotg: The device state
  139. * @ints: A bitmask of the interrupts to enable
  140. */
  141. static void dwc2_hsotg_en_gsint(struct dwc2_hsotg *hsotg, u32 ints)
  142. {
  143. u32 gsintmsk = dwc2_readl(hsotg, GINTMSK);
  144. u32 new_gsintmsk;
  145. new_gsintmsk = gsintmsk | ints;
  146. if (new_gsintmsk != gsintmsk) {
  147. dev_dbg(hsotg->dev, "gsintmsk now 0x%08x\n", new_gsintmsk);
  148. dwc2_writel(hsotg, new_gsintmsk, GINTMSK);
  149. }
  150. }
  151. /**
  152. * dwc2_hsotg_disable_gsint - disable one or more of the general interrupt
  153. * @hsotg: The device state
  154. * @ints: A bitmask of the interrupts to enable
  155. */
  156. static void dwc2_hsotg_disable_gsint(struct dwc2_hsotg *hsotg, u32 ints)
  157. {
  158. u32 gsintmsk = dwc2_readl(hsotg, GINTMSK);
  159. u32 new_gsintmsk;
  160. new_gsintmsk = gsintmsk & ~ints;
  161. if (new_gsintmsk != gsintmsk)
  162. dwc2_writel(hsotg, new_gsintmsk, GINTMSK);
  163. }
  164. /**
  165. * dwc2_hsotg_ctrl_epint - enable/disable an endpoint irq
  166. * @hsotg: The device state
  167. * @ep: The endpoint index
  168. * @dir_in: True if direction is in.
  169. * @en: The enable value, true to enable
  170. *
  171. * Set or clear the mask for an individual endpoint's interrupt
  172. * request.
  173. */
  174. static void dwc2_hsotg_ctrl_epint(struct dwc2_hsotg *hsotg,
  175. unsigned int ep, unsigned int dir_in,
  176. unsigned int en)
  177. {
  178. unsigned long flags;
  179. u32 bit = 1 << ep;
  180. u32 daint;
  181. if (!dir_in)
  182. bit <<= 16;
  183. local_irq_save(flags);
  184. daint = dwc2_readl(hsotg, DAINTMSK);
  185. if (en)
  186. daint |= bit;
  187. else
  188. daint &= ~bit;
  189. dwc2_writel(hsotg, daint, DAINTMSK);
  190. local_irq_restore(flags);
  191. }
  192. /**
  193. * dwc2_hsotg_tx_fifo_count - return count of TX FIFOs in device mode
  194. *
  195. * @hsotg: Programming view of the DWC_otg controller
  196. */
  197. int dwc2_hsotg_tx_fifo_count(struct dwc2_hsotg *hsotg)
  198. {
  199. if (hsotg->hw_params.en_multiple_tx_fifo)
  200. /* In dedicated FIFO mode we need count of IN EPs */
  201. return hsotg->hw_params.num_dev_in_eps;
  202. else
  203. /* In shared FIFO mode we need count of Periodic IN EPs */
  204. return hsotg->hw_params.num_dev_perio_in_ep;
  205. }
  206. /**
  207. * dwc2_hsotg_tx_fifo_total_depth - return total FIFO depth available for
  208. * device mode TX FIFOs
  209. *
  210. * @hsotg: Programming view of the DWC_otg controller
  211. */
  212. int dwc2_hsotg_tx_fifo_total_depth(struct dwc2_hsotg *hsotg)
  213. {
  214. int addr;
  215. int tx_addr_max;
  216. u32 np_tx_fifo_size;
  217. np_tx_fifo_size = min_t(u32, hsotg->hw_params.dev_nperio_tx_fifo_size,
  218. hsotg->params.g_np_tx_fifo_size);
  219. /* Get Endpoint Info Control block size in DWORDs. */
  220. tx_addr_max = hsotg->hw_params.total_fifo_size;
  221. addr = hsotg->params.g_rx_fifo_size + np_tx_fifo_size;
  222. if (tx_addr_max <= addr)
  223. return 0;
  224. return tx_addr_max - addr;
  225. }
  226. /**
  227. * dwc2_gadget_wkup_alert_handler - Handler for WKUP_ALERT interrupt
  228. *
  229. * @hsotg: Programming view of the DWC_otg controller
  230. *
  231. */
  232. static void dwc2_gadget_wkup_alert_handler(struct dwc2_hsotg *hsotg)
  233. {
  234. u32 gintsts2;
  235. u32 gintmsk2;
  236. gintsts2 = dwc2_readl(hsotg, GINTSTS2);
  237. gintmsk2 = dwc2_readl(hsotg, GINTMSK2);
  238. gintsts2 &= gintmsk2;
  239. if (gintsts2 & GINTSTS2_WKUP_ALERT_INT) {
  240. dev_dbg(hsotg->dev, "%s: Wkup_Alert_Int\n", __func__);
  241. dwc2_set_bit(hsotg, GINTSTS2, GINTSTS2_WKUP_ALERT_INT);
  242. dwc2_set_bit(hsotg, DCTL, DCTL_RMTWKUPSIG);
  243. }
  244. }
  245. /**
  246. * dwc2_hsotg_tx_fifo_average_depth - returns average depth of device mode
  247. * TX FIFOs
  248. *
  249. * @hsotg: Programming view of the DWC_otg controller
  250. */
  251. int dwc2_hsotg_tx_fifo_average_depth(struct dwc2_hsotg *hsotg)
  252. {
  253. int tx_fifo_count;
  254. int tx_fifo_depth;
  255. tx_fifo_depth = dwc2_hsotg_tx_fifo_total_depth(hsotg);
  256. tx_fifo_count = dwc2_hsotg_tx_fifo_count(hsotg);
  257. if (!tx_fifo_count)
  258. return tx_fifo_depth;
  259. else
  260. return tx_fifo_depth / tx_fifo_count;
  261. }
  262. /**
  263. * dwc2_hsotg_init_fifo - initialise non-periodic FIFOs
  264. * @hsotg: The device instance.
  265. */
  266. static void dwc2_hsotg_init_fifo(struct dwc2_hsotg *hsotg)
  267. {
  268. unsigned int ep;
  269. unsigned int addr;
  270. int timeout;
  271. u32 val;
  272. u32 *txfsz = hsotg->params.g_tx_fifo_size;
  273. /* Reset fifo map if not correctly cleared during previous session */
  274. WARN_ON(hsotg->fifo_map);
  275. hsotg->fifo_map = 0;
  276. /* set RX/NPTX FIFO sizes */
  277. dwc2_writel(hsotg, hsotg->params.g_rx_fifo_size, GRXFSIZ);
  278. dwc2_writel(hsotg, (hsotg->params.g_rx_fifo_size <<
  279. FIFOSIZE_STARTADDR_SHIFT) |
  280. (hsotg->params.g_np_tx_fifo_size << FIFOSIZE_DEPTH_SHIFT),
  281. GNPTXFSIZ);
  282. /*
  283. * arange all the rest of the TX FIFOs, as some versions of this
  284. * block have overlapping default addresses. This also ensures
  285. * that if the settings have been changed, then they are set to
  286. * known values.
  287. */
  288. /* start at the end of the GNPTXFSIZ, rounded up */
  289. addr = hsotg->params.g_rx_fifo_size + hsotg->params.g_np_tx_fifo_size;
  290. /*
  291. * Configure fifos sizes from provided configuration and assign
  292. * them to endpoints dynamically according to maxpacket size value of
  293. * given endpoint.
  294. */
  295. for (ep = 1; ep < MAX_EPS_CHANNELS; ep++) {
  296. if (!txfsz[ep])
  297. continue;
  298. val = addr;
  299. val |= txfsz[ep] << FIFOSIZE_DEPTH_SHIFT;
  300. WARN_ONCE(addr + txfsz[ep] > hsotg->fifo_mem,
  301. "insufficient fifo memory");
  302. addr += txfsz[ep];
  303. dwc2_writel(hsotg, val, DPTXFSIZN(ep));
  304. val = dwc2_readl(hsotg, DPTXFSIZN(ep));
  305. }
  306. dwc2_writel(hsotg, hsotg->hw_params.total_fifo_size |
  307. addr << GDFIFOCFG_EPINFOBASE_SHIFT,
  308. GDFIFOCFG);
  309. /*
  310. * according to p428 of the design guide, we need to ensure that
  311. * all fifos are flushed before continuing
  312. */
  313. dwc2_writel(hsotg, GRSTCTL_TXFNUM(0x10) | GRSTCTL_TXFFLSH |
  314. GRSTCTL_RXFFLSH, GRSTCTL);
  315. /* wait until the fifos are both flushed */
  316. timeout = 100;
  317. while (1) {
  318. val = dwc2_readl(hsotg, GRSTCTL);
  319. if ((val & (GRSTCTL_TXFFLSH | GRSTCTL_RXFFLSH)) == 0)
  320. break;
  321. if (--timeout == 0) {
  322. dev_err(hsotg->dev,
  323. "%s: timeout flushing fifos (GRSTCTL=%08x)\n",
  324. __func__, val);
  325. break;
  326. }
  327. udelay(1);
  328. }
  329. dev_dbg(hsotg->dev, "FIFOs reset, timeout at %d\n", timeout);
  330. }
  331. /**
  332. * dwc2_hsotg_ep_alloc_request - allocate USB rerequest structure
  333. * @ep: USB endpoint to allocate request for.
  334. * @flags: Allocation flags
  335. *
  336. * Allocate a new USB request structure appropriate for the specified endpoint
  337. */
  338. static struct usb_request *dwc2_hsotg_ep_alloc_request(struct usb_ep *ep,
  339. gfp_t flags)
  340. {
  341. struct dwc2_hsotg_req *req;
  342. req = kzalloc(sizeof(*req), flags);
  343. if (!req)
  344. return NULL;
  345. INIT_LIST_HEAD(&req->queue);
  346. return &req->req;
  347. }
  348. /**
  349. * is_ep_periodic - return true if the endpoint is in periodic mode.
  350. * @hs_ep: The endpoint to query.
  351. *
  352. * Returns true if the endpoint is in periodic mode, meaning it is being
  353. * used for an Interrupt or ISO transfer.
  354. */
  355. static inline int is_ep_periodic(struct dwc2_hsotg_ep *hs_ep)
  356. {
  357. return hs_ep->periodic;
  358. }
  359. /**
  360. * dwc2_hsotg_unmap_dma - unmap the DMA memory being used for the request
  361. * @hsotg: The device state.
  362. * @hs_ep: The endpoint for the request
  363. * @hs_req: The request being processed.
  364. *
  365. * This is the reverse of dwc2_hsotg_map_dma(), called for the completion
  366. * of a request to ensure the buffer is ready for access by the caller.
  367. */
  368. static void dwc2_hsotg_unmap_dma(struct dwc2_hsotg *hsotg,
  369. struct dwc2_hsotg_ep *hs_ep,
  370. struct dwc2_hsotg_req *hs_req)
  371. {
  372. struct usb_request *req = &hs_req->req;
  373. usb_gadget_unmap_request(&hsotg->gadget, req, hs_ep->map_dir);
  374. }
  375. /*
  376. * dwc2_gadget_alloc_ctrl_desc_chains - allocate DMA descriptor chains
  377. * for Control endpoint
  378. * @hsotg: The device state.
  379. *
  380. * This function will allocate 4 descriptor chains for EP 0: 2 for
  381. * Setup stage, per one for IN and OUT data/status transactions.
  382. */
  383. static int dwc2_gadget_alloc_ctrl_desc_chains(struct dwc2_hsotg *hsotg)
  384. {
  385. hsotg->setup_desc[0] =
  386. dmam_alloc_coherent(hsotg->dev,
  387. sizeof(struct dwc2_dma_desc),
  388. &hsotg->setup_desc_dma[0],
  389. GFP_KERNEL);
  390. if (!hsotg->setup_desc[0])
  391. goto fail;
  392. hsotg->setup_desc[1] =
  393. dmam_alloc_coherent(hsotg->dev,
  394. sizeof(struct dwc2_dma_desc),
  395. &hsotg->setup_desc_dma[1],
  396. GFP_KERNEL);
  397. if (!hsotg->setup_desc[1])
  398. goto fail;
  399. hsotg->ctrl_in_desc =
  400. dmam_alloc_coherent(hsotg->dev,
  401. sizeof(struct dwc2_dma_desc),
  402. &hsotg->ctrl_in_desc_dma,
  403. GFP_KERNEL);
  404. if (!hsotg->ctrl_in_desc)
  405. goto fail;
  406. hsotg->ctrl_out_desc =
  407. dmam_alloc_coherent(hsotg->dev,
  408. sizeof(struct dwc2_dma_desc),
  409. &hsotg->ctrl_out_desc_dma,
  410. GFP_KERNEL);
  411. if (!hsotg->ctrl_out_desc)
  412. goto fail;
  413. return 0;
  414. fail:
  415. return -ENOMEM;
  416. }
  417. /**
  418. * dwc2_hsotg_write_fifo - write packet Data to the TxFIFO
  419. * @hsotg: The controller state.
  420. * @hs_ep: The endpoint we're going to write for.
  421. * @hs_req: The request to write data for.
  422. *
  423. * This is called when the TxFIFO has some space in it to hold a new
  424. * transmission and we have something to give it. The actual setup of
  425. * the data size is done elsewhere, so all we have to do is to actually
  426. * write the data.
  427. *
  428. * The return value is zero if there is more space (or nothing was done)
  429. * otherwise -ENOSPC is returned if the FIFO space was used up.
  430. *
  431. * This routine is only needed for PIO
  432. */
  433. static int dwc2_hsotg_write_fifo(struct dwc2_hsotg *hsotg,
  434. struct dwc2_hsotg_ep *hs_ep,
  435. struct dwc2_hsotg_req *hs_req)
  436. {
  437. bool periodic = is_ep_periodic(hs_ep);
  438. u32 gnptxsts = dwc2_readl(hsotg, GNPTXSTS);
  439. int buf_pos = hs_req->req.actual;
  440. int to_write = hs_ep->size_loaded;
  441. void *data;
  442. int can_write;
  443. int pkt_round;
  444. int max_transfer;
  445. to_write -= (buf_pos - hs_ep->last_load);
  446. /* if there's nothing to write, get out early */
  447. if (to_write == 0)
  448. return 0;
  449. if (periodic && !hsotg->dedicated_fifos) {
  450. u32 epsize = dwc2_readl(hsotg, DIEPTSIZ(hs_ep->index));
  451. int size_left;
  452. int size_done;
  453. /*
  454. * work out how much data was loaded so we can calculate
  455. * how much data is left in the fifo.
  456. */
  457. size_left = DXEPTSIZ_XFERSIZE_GET(epsize);
  458. /*
  459. * if shared fifo, we cannot write anything until the
  460. * previous data has been completely sent.
  461. */
  462. if (hs_ep->fifo_load != 0) {
  463. dwc2_hsotg_en_gsint(hsotg, GINTSTS_PTXFEMP);
  464. return -ENOSPC;
  465. }
  466. dev_dbg(hsotg->dev, "%s: left=%d, load=%d, fifo=%d, size %d\n",
  467. __func__, size_left,
  468. hs_ep->size_loaded, hs_ep->fifo_load, hs_ep->fifo_size);
  469. /* how much of the data has moved */
  470. size_done = hs_ep->size_loaded - size_left;
  471. /* how much data is left in the fifo */
  472. can_write = hs_ep->fifo_load - size_done;
  473. dev_dbg(hsotg->dev, "%s: => can_write1=%d\n",
  474. __func__, can_write);
  475. can_write = hs_ep->fifo_size - can_write;
  476. dev_dbg(hsotg->dev, "%s: => can_write2=%d\n",
  477. __func__, can_write);
  478. if (can_write <= 0) {
  479. dwc2_hsotg_en_gsint(hsotg, GINTSTS_PTXFEMP);
  480. return -ENOSPC;
  481. }
  482. } else if (hsotg->dedicated_fifos && hs_ep->index != 0) {
  483. can_write = dwc2_readl(hsotg,
  484. DTXFSTS(hs_ep->fifo_index));
  485. can_write &= 0xffff;
  486. can_write *= 4;
  487. } else {
  488. if (GNPTXSTS_NP_TXQ_SPC_AVAIL_GET(gnptxsts) == 0) {
  489. dev_dbg(hsotg->dev,
  490. "%s: no queue slots available (0x%08x)\n",
  491. __func__, gnptxsts);
  492. dwc2_hsotg_en_gsint(hsotg, GINTSTS_NPTXFEMP);
  493. return -ENOSPC;
  494. }
  495. can_write = GNPTXSTS_NP_TXF_SPC_AVAIL_GET(gnptxsts);
  496. can_write *= 4; /* fifo size is in 32bit quantities. */
  497. }
  498. max_transfer = hs_ep->ep.maxpacket * hs_ep->mc;
  499. dev_dbg(hsotg->dev, "%s: GNPTXSTS=%08x, can=%d, to=%d, max_transfer %d\n",
  500. __func__, gnptxsts, can_write, to_write, max_transfer);
  501. /*
  502. * limit to 512 bytes of data, it seems at least on the non-periodic
  503. * FIFO, requests of >512 cause the endpoint to get stuck with a
  504. * fragment of the end of the transfer in it.
  505. */
  506. if (can_write > 512 && !periodic)
  507. can_write = 512;
  508. /*
  509. * limit the write to one max-packet size worth of data, but allow
  510. * the transfer to return that it did not run out of fifo space
  511. * doing it.
  512. */
  513. if (to_write > max_transfer) {
  514. to_write = max_transfer;
  515. /* it's needed only when we do not use dedicated fifos */
  516. if (!hsotg->dedicated_fifos)
  517. dwc2_hsotg_en_gsint(hsotg,
  518. periodic ? GINTSTS_PTXFEMP :
  519. GINTSTS_NPTXFEMP);
  520. }
  521. /* see if we can write data */
  522. if (to_write > can_write) {
  523. to_write = can_write;
  524. pkt_round = to_write % max_transfer;
  525. /*
  526. * Round the write down to an
  527. * exact number of packets.
  528. *
  529. * Note, we do not currently check to see if we can ever
  530. * write a full packet or not to the FIFO.
  531. */
  532. if (pkt_round)
  533. to_write -= pkt_round;
  534. /*
  535. * enable correct FIFO interrupt to alert us when there
  536. * is more room left.
  537. */
  538. /* it's needed only when we do not use dedicated fifos */
  539. if (!hsotg->dedicated_fifos)
  540. dwc2_hsotg_en_gsint(hsotg,
  541. periodic ? GINTSTS_PTXFEMP :
  542. GINTSTS_NPTXFEMP);
  543. }
  544. dev_dbg(hsotg->dev, "write %d/%d, can_write %d, done %d\n",
  545. to_write, hs_req->req.length, can_write, buf_pos);
  546. if (to_write <= 0)
  547. return -ENOSPC;
  548. hs_req->req.actual = buf_pos + to_write;
  549. hs_ep->total_data += to_write;
  550. if (periodic)
  551. hs_ep->fifo_load += to_write;
  552. to_write = DIV_ROUND_UP(to_write, 4);
  553. data = hs_req->req.buf + buf_pos;
  554. dwc2_writel_rep(hsotg, EPFIFO(hs_ep->index), data, to_write);
  555. return (to_write >= can_write) ? -ENOSPC : 0;
  556. }
  557. /**
  558. * get_ep_limit - get the maximum data legnth for this endpoint
  559. * @hs_ep: The endpoint
  560. *
  561. * Return the maximum data that can be queued in one go on a given endpoint
  562. * so that transfers that are too long can be split.
  563. */
  564. static unsigned int get_ep_limit(struct dwc2_hsotg_ep *hs_ep)
  565. {
  566. int index = hs_ep->index;
  567. unsigned int maxsize;
  568. unsigned int maxpkt;
  569. if (index != 0) {
  570. maxsize = DXEPTSIZ_XFERSIZE_LIMIT + 1;
  571. maxpkt = DXEPTSIZ_PKTCNT_LIMIT + 1;
  572. } else {
  573. maxsize = 64 + 64;
  574. if (hs_ep->dir_in)
  575. maxpkt = DIEPTSIZ0_PKTCNT_LIMIT + 1;
  576. else
  577. maxpkt = 2;
  578. }
  579. /* we made the constant loading easier above by using +1 */
  580. maxpkt--;
  581. maxsize--;
  582. /*
  583. * constrain by packet count if maxpkts*pktsize is greater
  584. * than the length register size.
  585. */
  586. if ((maxpkt * hs_ep->ep.maxpacket) < maxsize)
  587. maxsize = maxpkt * hs_ep->ep.maxpacket;
  588. return maxsize;
  589. }
  590. /**
  591. * dwc2_hsotg_read_frameno - read current frame number
  592. * @hsotg: The device instance
  593. *
  594. * Return the current frame number
  595. */
  596. static u32 dwc2_hsotg_read_frameno(struct dwc2_hsotg *hsotg)
  597. {
  598. u32 dsts;
  599. dsts = dwc2_readl(hsotg, DSTS);
  600. dsts &= DSTS_SOFFN_MASK;
  601. dsts >>= DSTS_SOFFN_SHIFT;
  602. return dsts;
  603. }
  604. /**
  605. * dwc2_gadget_get_chain_limit - get the maximum data payload value of the
  606. * DMA descriptor chain prepared for specific endpoint
  607. * @hs_ep: The endpoint
  608. *
  609. * Return the maximum data that can be queued in one go on a given endpoint
  610. * depending on its descriptor chain capacity so that transfers that
  611. * are too long can be split.
  612. */
  613. static unsigned int dwc2_gadget_get_chain_limit(struct dwc2_hsotg_ep *hs_ep)
  614. {
  615. const struct usb_endpoint_descriptor *ep_desc = hs_ep->ep.desc;
  616. int is_isoc = hs_ep->isochronous;
  617. unsigned int maxsize;
  618. u32 mps = hs_ep->ep.maxpacket;
  619. int dir_in = hs_ep->dir_in;
  620. if (is_isoc)
  621. maxsize = (hs_ep->dir_in ? DEV_DMA_ISOC_TX_NBYTES_LIMIT :
  622. DEV_DMA_ISOC_RX_NBYTES_LIMIT) *
  623. MAX_DMA_DESC_NUM_HS_ISOC;
  624. else
  625. maxsize = DEV_DMA_NBYTES_LIMIT * MAX_DMA_DESC_NUM_GENERIC;
  626. /* Interrupt OUT EP with mps not multiple of 4 */
  627. if (hs_ep->index)
  628. if (usb_endpoint_xfer_int(ep_desc) && !dir_in && (mps % 4))
  629. maxsize = mps * MAX_DMA_DESC_NUM_GENERIC;
  630. return maxsize;
  631. }
  632. /*
  633. * dwc2_gadget_get_desc_params - get DMA descriptor parameters.
  634. * @hs_ep: The endpoint
  635. * @mask: RX/TX bytes mask to be defined
  636. *
  637. * Returns maximum data payload for one descriptor after analyzing endpoint
  638. * characteristics.
  639. * DMA descriptor transfer bytes limit depends on EP type:
  640. * Control out - MPS,
  641. * Isochronous - descriptor rx/tx bytes bitfield limit,
  642. * Control In/Bulk/Interrupt - multiple of mps. This will allow to not
  643. * have concatenations from various descriptors within one packet.
  644. * Interrupt OUT - if mps not multiple of 4 then a single packet corresponds
  645. * to a single descriptor.
  646. *
  647. * Selects corresponding mask for RX/TX bytes as well.
  648. */
  649. static u32 dwc2_gadget_get_desc_params(struct dwc2_hsotg_ep *hs_ep, u32 *mask)
  650. {
  651. const struct usb_endpoint_descriptor *ep_desc = hs_ep->ep.desc;
  652. u32 mps = hs_ep->ep.maxpacket;
  653. int dir_in = hs_ep->dir_in;
  654. u32 desc_size = 0;
  655. if (!hs_ep->index && !dir_in) {
  656. desc_size = mps;
  657. *mask = DEV_DMA_NBYTES_MASK;
  658. } else if (hs_ep->isochronous) {
  659. if (dir_in) {
  660. desc_size = DEV_DMA_ISOC_TX_NBYTES_LIMIT;
  661. *mask = DEV_DMA_ISOC_TX_NBYTES_MASK;
  662. } else {
  663. desc_size = DEV_DMA_ISOC_RX_NBYTES_LIMIT;
  664. *mask = DEV_DMA_ISOC_RX_NBYTES_MASK;
  665. }
  666. } else {
  667. desc_size = DEV_DMA_NBYTES_LIMIT;
  668. *mask = DEV_DMA_NBYTES_MASK;
  669. /* Round down desc_size to be mps multiple */
  670. desc_size -= desc_size % mps;
  671. }
  672. /* Interrupt OUT EP with mps not multiple of 4 */
  673. if (hs_ep->index)
  674. if (usb_endpoint_xfer_int(ep_desc) && !dir_in && (mps % 4)) {
  675. desc_size = mps;
  676. *mask = DEV_DMA_NBYTES_MASK;
  677. }
  678. return desc_size;
  679. }
  680. static void dwc2_gadget_fill_nonisoc_xfer_ddma_one(struct dwc2_hsotg_ep *hs_ep,
  681. struct dwc2_dma_desc **desc,
  682. dma_addr_t dma_buff,
  683. unsigned int len,
  684. bool true_last)
  685. {
  686. int dir_in = hs_ep->dir_in;
  687. u32 mps = hs_ep->ep.maxpacket;
  688. u32 maxsize = 0;
  689. u32 offset = 0;
  690. u32 mask = 0;
  691. int i;
  692. maxsize = dwc2_gadget_get_desc_params(hs_ep, &mask);
  693. hs_ep->desc_count = (len / maxsize) +
  694. ((len % maxsize) ? 1 : 0);
  695. if (len == 0)
  696. hs_ep->desc_count = 1;
  697. for (i = 0; i < hs_ep->desc_count; ++i) {
  698. (*desc)->status = 0;
  699. (*desc)->status |= (DEV_DMA_BUFF_STS_HBUSY
  700. << DEV_DMA_BUFF_STS_SHIFT);
  701. if (len > maxsize) {
  702. if (!hs_ep->index && !dir_in)
  703. (*desc)->status |= (DEV_DMA_L | DEV_DMA_IOC);
  704. (*desc)->status |=
  705. maxsize << DEV_DMA_NBYTES_SHIFT & mask;
  706. (*desc)->buf = dma_buff + offset;
  707. len -= maxsize;
  708. offset += maxsize;
  709. } else {
  710. if (true_last)
  711. (*desc)->status |= (DEV_DMA_L | DEV_DMA_IOC);
  712. if (dir_in)
  713. (*desc)->status |= (len % mps) ? DEV_DMA_SHORT :
  714. ((hs_ep->send_zlp && true_last) ?
  715. DEV_DMA_SHORT : 0);
  716. (*desc)->status |=
  717. len << DEV_DMA_NBYTES_SHIFT & mask;
  718. (*desc)->buf = dma_buff + offset;
  719. }
  720. (*desc)->status &= ~DEV_DMA_BUFF_STS_MASK;
  721. (*desc)->status |= (DEV_DMA_BUFF_STS_HREADY
  722. << DEV_DMA_BUFF_STS_SHIFT);
  723. (*desc)++;
  724. }
  725. }
  726. /*
  727. * dwc2_gadget_config_nonisoc_xfer_ddma - prepare non ISOC DMA desc chain.
  728. * @hs_ep: The endpoint
  729. * @ureq: Request to transfer
  730. * @offset: offset in bytes
  731. * @len: Length of the transfer
  732. *
  733. * This function will iterate over descriptor chain and fill its entries
  734. * with corresponding information based on transfer data.
  735. */
  736. static void dwc2_gadget_config_nonisoc_xfer_ddma(struct dwc2_hsotg_ep *hs_ep,
  737. dma_addr_t dma_buff,
  738. unsigned int len)
  739. {
  740. struct usb_request *ureq = NULL;
  741. struct dwc2_dma_desc *desc = hs_ep->desc_list;
  742. struct scatterlist *sg;
  743. int i;
  744. u8 desc_count = 0;
  745. if (hs_ep->req)
  746. ureq = &hs_ep->req->req;
  747. /* non-DMA sg buffer */
  748. if (!ureq || !ureq->num_sgs) {
  749. dwc2_gadget_fill_nonisoc_xfer_ddma_one(hs_ep, &desc,
  750. dma_buff, len, true);
  751. return;
  752. }
  753. /* DMA sg buffer */
  754. for_each_sg(ureq->sg, sg, ureq->num_mapped_sgs, i) {
  755. dwc2_gadget_fill_nonisoc_xfer_ddma_one(hs_ep, &desc,
  756. sg_dma_address(sg) + sg->offset, sg_dma_len(sg),
  757. (i == (ureq->num_mapped_sgs - 1)));
  758. desc_count += hs_ep->desc_count;
  759. }
  760. hs_ep->desc_count = desc_count;
  761. }
  762. /*
  763. * dwc2_gadget_fill_isoc_desc - fills next isochronous descriptor in chain.
  764. * @hs_ep: The isochronous endpoint.
  765. * @dma_buff: usb requests dma buffer.
  766. * @len: usb request transfer length.
  767. *
  768. * Fills next free descriptor with the data of the arrived usb request,
  769. * frame info, sets Last and IOC bits increments next_desc. If filled
  770. * descriptor is not the first one, removes L bit from the previous descriptor
  771. * status.
  772. */
  773. static int dwc2_gadget_fill_isoc_desc(struct dwc2_hsotg_ep *hs_ep,
  774. dma_addr_t dma_buff, unsigned int len)
  775. {
  776. struct dwc2_dma_desc *desc;
  777. struct dwc2_hsotg *hsotg = hs_ep->parent;
  778. u32 index;
  779. u32 mask = 0;
  780. u8 pid = 0;
  781. dwc2_gadget_get_desc_params(hs_ep, &mask);
  782. index = hs_ep->next_desc;
  783. desc = &hs_ep->desc_list[index];
  784. /* Check if descriptor chain full */
  785. if ((desc->status >> DEV_DMA_BUFF_STS_SHIFT) ==
  786. DEV_DMA_BUFF_STS_HREADY) {
  787. dev_dbg(hsotg->dev, "%s: desc chain full\n", __func__);
  788. return 1;
  789. }
  790. /* Clear L bit of previous desc if more than one entries in the chain */
  791. if (hs_ep->next_desc)
  792. hs_ep->desc_list[index - 1].status &= ~DEV_DMA_L;
  793. dev_dbg(hsotg->dev, "%s: Filling ep %d, dir %s isoc desc # %d\n",
  794. __func__, hs_ep->index, hs_ep->dir_in ? "in" : "out", index);
  795. desc->status = 0;
  796. desc->status |= (DEV_DMA_BUFF_STS_HBUSY << DEV_DMA_BUFF_STS_SHIFT);
  797. desc->buf = dma_buff;
  798. desc->status |= (DEV_DMA_L | DEV_DMA_IOC |
  799. ((len << DEV_DMA_NBYTES_SHIFT) & mask));
  800. if (hs_ep->dir_in) {
  801. if (len)
  802. pid = DIV_ROUND_UP(len, hs_ep->ep.maxpacket);
  803. else
  804. pid = 1;
  805. desc->status |= ((pid << DEV_DMA_ISOC_PID_SHIFT) &
  806. DEV_DMA_ISOC_PID_MASK) |
  807. ((len % hs_ep->ep.maxpacket) ?
  808. DEV_DMA_SHORT : 0) |
  809. ((hs_ep->target_frame <<
  810. DEV_DMA_ISOC_FRNUM_SHIFT) &
  811. DEV_DMA_ISOC_FRNUM_MASK);
  812. }
  813. desc->status &= ~DEV_DMA_BUFF_STS_MASK;
  814. desc->status |= (DEV_DMA_BUFF_STS_HREADY << DEV_DMA_BUFF_STS_SHIFT);
  815. /* Increment frame number by interval for IN */
  816. if (hs_ep->dir_in)
  817. dwc2_gadget_incr_frame_num(hs_ep);
  818. /* Update index of last configured entry in the chain */
  819. hs_ep->next_desc++;
  820. if (hs_ep->next_desc >= MAX_DMA_DESC_NUM_HS_ISOC)
  821. hs_ep->next_desc = 0;
  822. return 0;
  823. }
  824. /*
  825. * dwc2_gadget_start_isoc_ddma - start isochronous transfer in DDMA
  826. * @hs_ep: The isochronous endpoint.
  827. *
  828. * Prepare descriptor chain for isochronous endpoints. Afterwards
  829. * write DMA address to HW and enable the endpoint.
  830. */
  831. static void dwc2_gadget_start_isoc_ddma(struct dwc2_hsotg_ep *hs_ep)
  832. {
  833. struct dwc2_hsotg *hsotg = hs_ep->parent;
  834. struct dwc2_hsotg_req *hs_req, *treq;
  835. int index = hs_ep->index;
  836. int ret;
  837. int i;
  838. u32 dma_reg;
  839. u32 depctl;
  840. u32 ctrl;
  841. struct dwc2_dma_desc *desc;
  842. if (list_empty(&hs_ep->queue)) {
  843. hs_ep->target_frame = TARGET_FRAME_INITIAL;
  844. dev_dbg(hsotg->dev, "%s: No requests in queue\n", __func__);
  845. return;
  846. }
  847. /* Initialize descriptor chain by Host Busy status */
  848. for (i = 0; i < MAX_DMA_DESC_NUM_HS_ISOC; i++) {
  849. desc = &hs_ep->desc_list[i];
  850. desc->status = 0;
  851. desc->status |= (DEV_DMA_BUFF_STS_HBUSY
  852. << DEV_DMA_BUFF_STS_SHIFT);
  853. }
  854. hs_ep->next_desc = 0;
  855. list_for_each_entry_safe(hs_req, treq, &hs_ep->queue, queue) {
  856. dma_addr_t dma_addr = hs_req->req.dma;
  857. if (hs_req->req.num_sgs) {
  858. WARN_ON(hs_req->req.num_sgs > 1);
  859. dma_addr = sg_dma_address(hs_req->req.sg);
  860. }
  861. ret = dwc2_gadget_fill_isoc_desc(hs_ep, dma_addr,
  862. hs_req->req.length);
  863. if (ret)
  864. break;
  865. }
  866. hs_ep->compl_desc = 0;
  867. depctl = hs_ep->dir_in ? DIEPCTL(index) : DOEPCTL(index);
  868. dma_reg = hs_ep->dir_in ? DIEPDMA(index) : DOEPDMA(index);
  869. /* write descriptor chain address to control register */
  870. dwc2_writel(hsotg, hs_ep->desc_list_dma, dma_reg);
  871. ctrl = dwc2_readl(hsotg, depctl);
  872. ctrl |= DXEPCTL_EPENA | DXEPCTL_CNAK;
  873. dwc2_writel(hsotg, ctrl, depctl);
  874. }
  875. static bool dwc2_gadget_target_frame_elapsed(struct dwc2_hsotg_ep *hs_ep);
  876. static void dwc2_hsotg_complete_request(struct dwc2_hsotg *hsotg,
  877. struct dwc2_hsotg_ep *hs_ep,
  878. struct dwc2_hsotg_req *hs_req,
  879. int result);
  880. /**
  881. * dwc2_hsotg_start_req - start a USB request from an endpoint's queue
  882. * @hsotg: The controller state.
  883. * @hs_ep: The endpoint to process a request for
  884. * @hs_req: The request to start.
  885. * @continuing: True if we are doing more for the current request.
  886. *
  887. * Start the given request running by setting the endpoint registers
  888. * appropriately, and writing any data to the FIFOs.
  889. */
  890. static void dwc2_hsotg_start_req(struct dwc2_hsotg *hsotg,
  891. struct dwc2_hsotg_ep *hs_ep,
  892. struct dwc2_hsotg_req *hs_req,
  893. bool continuing)
  894. {
  895. struct usb_request *ureq = &hs_req->req;
  896. int index = hs_ep->index;
  897. int dir_in = hs_ep->dir_in;
  898. u32 epctrl_reg;
  899. u32 epsize_reg;
  900. u32 epsize;
  901. u32 ctrl;
  902. unsigned int length;
  903. unsigned int packets;
  904. unsigned int maxreq;
  905. unsigned int dma_reg;
  906. if (index != 0) {
  907. if (hs_ep->req && !continuing) {
  908. dev_err(hsotg->dev, "%s: active request\n", __func__);
  909. WARN_ON(1);
  910. return;
  911. } else if (hs_ep->req != hs_req && continuing) {
  912. dev_err(hsotg->dev,
  913. "%s: continue different req\n", __func__);
  914. WARN_ON(1);
  915. return;
  916. }
  917. } else if (using_dma(hsotg) && hs_ep->dir_in) {
  918. unsigned char epnum;
  919. for (epnum = 0; epnum < hsotg->num_of_eps; epnum++) {
  920. u32 next = DXEPCTL_NEXTEP((epnum + 1) % 15);
  921. u32 epinctl = dwc2_readl(hsotg, DIEPCTL(epnum));
  922. dwc2_writel(hsotg, next | epinctl, DIEPCTL(epnum));
  923. }
  924. }
  925. dma_reg = dir_in ? DIEPDMA(index) : DOEPDMA(index);
  926. epctrl_reg = dir_in ? DIEPCTL(index) : DOEPCTL(index);
  927. epsize_reg = dir_in ? DIEPTSIZ(index) : DOEPTSIZ(index);
  928. dev_dbg(hsotg->dev, "%s: DxEPCTL=0x%08x, ep %d, dir %s\n",
  929. __func__, dwc2_readl(hsotg, epctrl_reg), index,
  930. hs_ep->dir_in ? "in" : "out");
  931. /* If endpoint is stalled, we will restart request later */
  932. ctrl = dwc2_readl(hsotg, epctrl_reg);
  933. if (index && ctrl & DXEPCTL_STALL) {
  934. dev_warn(hsotg->dev, "%s: ep%d is stalled\n", __func__, index);
  935. return;
  936. }
  937. length = ureq->length - ureq->actual;
  938. dev_dbg(hsotg->dev, "ureq->length:%d ureq->actual:%d\n",
  939. ureq->length, ureq->actual);
  940. if (!using_desc_dma(hsotg))
  941. maxreq = get_ep_limit(hs_ep);
  942. else
  943. maxreq = dwc2_gadget_get_chain_limit(hs_ep);
  944. if (length > maxreq) {
  945. int round = maxreq % hs_ep->ep.maxpacket;
  946. dev_dbg(hsotg->dev, "%s: length %d, max-req %d, r %d\n",
  947. __func__, length, maxreq, round);
  948. /* round down to multiple of packets */
  949. if (round)
  950. maxreq -= round;
  951. length = maxreq;
  952. }
  953. if (length)
  954. packets = DIV_ROUND_UP(length, hs_ep->ep.maxpacket);
  955. else
  956. packets = 1; /* send one packet if length is zero. */
  957. if (dir_in && index != 0)
  958. if (hs_ep->isochronous)
  959. epsize = DXEPTSIZ_MC(packets);
  960. else
  961. epsize = DXEPTSIZ_MC(1);
  962. else
  963. epsize = 0;
  964. /*
  965. * zero length packet should be programmed on its own and should not
  966. * be counted in DIEPTSIZ.PktCnt with other packets.
  967. */
  968. if (dir_in && ureq->zero && !continuing) {
  969. /* Test if zlp is actually required. */
  970. if ((ureq->length >= hs_ep->ep.maxpacket) &&
  971. !(ureq->length % hs_ep->ep.maxpacket))
  972. hs_ep->send_zlp = 1;
  973. }
  974. epsize |= DXEPTSIZ_PKTCNT(packets);
  975. epsize |= DXEPTSIZ_XFERSIZE(length);
  976. dev_dbg(hsotg->dev, "%s: %d@%d/%d, 0x%08x => 0x%08x\n",
  977. __func__, packets, length, ureq->length, epsize, epsize_reg);
  978. /* store the request as the current one we're doing */
  979. hs_ep->req = hs_req;
  980. if (using_desc_dma(hsotg)) {
  981. u32 offset = 0;
  982. u32 mps = hs_ep->ep.maxpacket;
  983. /* Adjust length: EP0 - MPS, other OUT EPs - multiple of MPS */
  984. if (!dir_in) {
  985. if (!index)
  986. length = mps;
  987. else if (length % mps)
  988. length += (mps - (length % mps));
  989. }
  990. if (continuing)
  991. offset = ureq->actual;
  992. /* Fill DDMA chain entries */
  993. dwc2_gadget_config_nonisoc_xfer_ddma(hs_ep, ureq->dma + offset,
  994. length);
  995. /* write descriptor chain address to control register */
  996. dwc2_writel(hsotg, hs_ep->desc_list_dma, dma_reg);
  997. dev_dbg(hsotg->dev, "%s: %08x pad => 0x%08x\n",
  998. __func__, (u32)hs_ep->desc_list_dma, dma_reg);
  999. } else {
  1000. /* write size / packets */
  1001. dwc2_writel(hsotg, epsize, epsize_reg);
  1002. if (using_dma(hsotg) && !continuing && (length != 0)) {
  1003. /*
  1004. * write DMA address to control register, buffer
  1005. * already synced by dwc2_hsotg_ep_queue().
  1006. */
  1007. dwc2_writel(hsotg, ureq->dma, dma_reg);
  1008. dev_dbg(hsotg->dev, "%s: %pad => 0x%08x\n",
  1009. __func__, &ureq->dma, dma_reg);
  1010. }
  1011. }
  1012. if (hs_ep->isochronous) {
  1013. if (!dwc2_gadget_target_frame_elapsed(hs_ep)) {
  1014. if (hs_ep->interval == 1) {
  1015. if (hs_ep->target_frame & 0x1)
  1016. ctrl |= DXEPCTL_SETODDFR;
  1017. else
  1018. ctrl |= DXEPCTL_SETEVENFR;
  1019. }
  1020. ctrl |= DXEPCTL_CNAK;
  1021. } else {
  1022. hs_req->req.frame_number = hs_ep->target_frame;
  1023. hs_req->req.actual = 0;
  1024. dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, -ENODATA);
  1025. return;
  1026. }
  1027. }
  1028. ctrl |= DXEPCTL_EPENA; /* ensure ep enabled */
  1029. dev_dbg(hsotg->dev, "ep0 state:%d\n", hsotg->ep0_state);
  1030. /* For Setup request do not clear NAK */
  1031. if (!(index == 0 && hsotg->ep0_state == DWC2_EP0_SETUP))
  1032. ctrl |= DXEPCTL_CNAK; /* clear NAK set by core */
  1033. dev_dbg(hsotg->dev, "%s: DxEPCTL=0x%08x\n", __func__, ctrl);
  1034. dwc2_writel(hsotg, ctrl, epctrl_reg);
  1035. /*
  1036. * set these, it seems that DMA support increments past the end
  1037. * of the packet buffer so we need to calculate the length from
  1038. * this information.
  1039. */
  1040. hs_ep->size_loaded = length;
  1041. hs_ep->last_load = ureq->actual;
  1042. if (dir_in && !using_dma(hsotg)) {
  1043. /* set these anyway, we may need them for non-periodic in */
  1044. hs_ep->fifo_load = 0;
  1045. dwc2_hsotg_write_fifo(hsotg, hs_ep, hs_req);
  1046. }
  1047. /*
  1048. * Note, trying to clear the NAK here causes problems with transmit
  1049. * on the S3C6400 ending up with the TXFIFO becoming full.
  1050. */
  1051. /* check ep is enabled */
  1052. if (!(dwc2_readl(hsotg, epctrl_reg) & DXEPCTL_EPENA))
  1053. dev_dbg(hsotg->dev,
  1054. "ep%d: failed to become enabled (DXEPCTL=0x%08x)?\n",
  1055. index, dwc2_readl(hsotg, epctrl_reg));
  1056. dev_dbg(hsotg->dev, "%s: DXEPCTL=0x%08x\n",
  1057. __func__, dwc2_readl(hsotg, epctrl_reg));
  1058. /* enable ep interrupts */
  1059. dwc2_hsotg_ctrl_epint(hsotg, hs_ep->index, hs_ep->dir_in, 1);
  1060. }
  1061. /**
  1062. * dwc2_hsotg_map_dma - map the DMA memory being used for the request
  1063. * @hsotg: The device state.
  1064. * @hs_ep: The endpoint the request is on.
  1065. * @req: The request being processed.
  1066. *
  1067. * We've been asked to queue a request, so ensure that the memory buffer
  1068. * is correctly setup for DMA. If we've been passed an extant DMA address
  1069. * then ensure the buffer has been synced to memory. If our buffer has no
  1070. * DMA memory, then we map the memory and mark our request to allow us to
  1071. * cleanup on completion.
  1072. */
  1073. static int dwc2_hsotg_map_dma(struct dwc2_hsotg *hsotg,
  1074. struct dwc2_hsotg_ep *hs_ep,
  1075. struct usb_request *req)
  1076. {
  1077. int ret;
  1078. hs_ep->map_dir = hs_ep->dir_in;
  1079. ret = usb_gadget_map_request(&hsotg->gadget, req, hs_ep->dir_in);
  1080. if (ret)
  1081. goto dma_error;
  1082. return 0;
  1083. dma_error:
  1084. dev_err(hsotg->dev, "%s: failed to map buffer %p, %d bytes\n",
  1085. __func__, req->buf, req->length);
  1086. return -EIO;
  1087. }
  1088. static int dwc2_hsotg_handle_unaligned_buf_start(struct dwc2_hsotg *hsotg,
  1089. struct dwc2_hsotg_ep *hs_ep,
  1090. struct dwc2_hsotg_req *hs_req)
  1091. {
  1092. void *req_buf = hs_req->req.buf;
  1093. /* If dma is not being used or buffer is aligned */
  1094. if (!using_dma(hsotg) || !((long)req_buf & 3))
  1095. return 0;
  1096. WARN_ON(hs_req->saved_req_buf);
  1097. dev_dbg(hsotg->dev, "%s: %s: buf=%p length=%d\n", __func__,
  1098. hs_ep->ep.name, req_buf, hs_req->req.length);
  1099. hs_req->req.buf = kmalloc(hs_req->req.length, GFP_ATOMIC);
  1100. if (!hs_req->req.buf) {
  1101. hs_req->req.buf = req_buf;
  1102. dev_err(hsotg->dev,
  1103. "%s: unable to allocate memory for bounce buffer\n",
  1104. __func__);
  1105. return -ENOMEM;
  1106. }
  1107. /* Save actual buffer */
  1108. hs_req->saved_req_buf = req_buf;
  1109. if (hs_ep->dir_in)
  1110. memcpy(hs_req->req.buf, req_buf, hs_req->req.length);
  1111. return 0;
  1112. }
  1113. static void
  1114. dwc2_hsotg_handle_unaligned_buf_complete(struct dwc2_hsotg *hsotg,
  1115. struct dwc2_hsotg_ep *hs_ep,
  1116. struct dwc2_hsotg_req *hs_req)
  1117. {
  1118. /* If dma is not being used or buffer was aligned */
  1119. if (!using_dma(hsotg) || !hs_req->saved_req_buf)
  1120. return;
  1121. dev_dbg(hsotg->dev, "%s: %s: status=%d actual-length=%d\n", __func__,
  1122. hs_ep->ep.name, hs_req->req.status, hs_req->req.actual);
  1123. /* Copy data from bounce buffer on successful out transfer */
  1124. if (!hs_ep->dir_in && !hs_req->req.status)
  1125. memcpy(hs_req->saved_req_buf, hs_req->req.buf,
  1126. hs_req->req.actual);
  1127. /* Free bounce buffer */
  1128. kfree(hs_req->req.buf);
  1129. hs_req->req.buf = hs_req->saved_req_buf;
  1130. hs_req->saved_req_buf = NULL;
  1131. }
  1132. /**
  1133. * dwc2_gadget_target_frame_elapsed - Checks target frame
  1134. * @hs_ep: The driver endpoint to check
  1135. *
  1136. * Returns 1 if targeted frame elapsed. If returned 1 then we need to drop
  1137. * corresponding transfer.
  1138. */
  1139. static bool dwc2_gadget_target_frame_elapsed(struct dwc2_hsotg_ep *hs_ep)
  1140. {
  1141. struct dwc2_hsotg *hsotg = hs_ep->parent;
  1142. u32 target_frame = hs_ep->target_frame;
  1143. u32 current_frame = hsotg->frame_number;
  1144. bool frame_overrun = hs_ep->frame_overrun;
  1145. u16 limit = DSTS_SOFFN_LIMIT;
  1146. if (hsotg->gadget.speed != USB_SPEED_HIGH)
  1147. limit >>= 3;
  1148. if (!frame_overrun && current_frame >= target_frame)
  1149. return true;
  1150. if (frame_overrun && current_frame >= target_frame &&
  1151. ((current_frame - target_frame) < limit / 2))
  1152. return true;
  1153. return false;
  1154. }
  1155. /*
  1156. * dwc2_gadget_set_ep0_desc_chain - Set EP's desc chain pointers
  1157. * @hsotg: The driver state
  1158. * @hs_ep: the ep descriptor chain is for
  1159. *
  1160. * Called to update EP0 structure's pointers depend on stage of
  1161. * control transfer.
  1162. */
  1163. static int dwc2_gadget_set_ep0_desc_chain(struct dwc2_hsotg *hsotg,
  1164. struct dwc2_hsotg_ep *hs_ep)
  1165. {
  1166. switch (hsotg->ep0_state) {
  1167. case DWC2_EP0_SETUP:
  1168. case DWC2_EP0_STATUS_OUT:
  1169. hs_ep->desc_list = hsotg->setup_desc[0];
  1170. hs_ep->desc_list_dma = hsotg->setup_desc_dma[0];
  1171. break;
  1172. case DWC2_EP0_DATA_IN:
  1173. case DWC2_EP0_STATUS_IN:
  1174. hs_ep->desc_list = hsotg->ctrl_in_desc;
  1175. hs_ep->desc_list_dma = hsotg->ctrl_in_desc_dma;
  1176. break;
  1177. case DWC2_EP0_DATA_OUT:
  1178. hs_ep->desc_list = hsotg->ctrl_out_desc;
  1179. hs_ep->desc_list_dma = hsotg->ctrl_out_desc_dma;
  1180. break;
  1181. default:
  1182. dev_err(hsotg->dev, "invalid EP 0 state in queue %d\n",
  1183. hsotg->ep0_state);
  1184. return -EINVAL;
  1185. }
  1186. return 0;
  1187. }
  1188. static int dwc2_hsotg_ep_queue(struct usb_ep *ep, struct usb_request *req,
  1189. gfp_t gfp_flags)
  1190. {
  1191. struct dwc2_hsotg_req *hs_req = our_req(req);
  1192. struct dwc2_hsotg_ep *hs_ep = our_ep(ep);
  1193. struct dwc2_hsotg *hs = hs_ep->parent;
  1194. bool first;
  1195. int ret;
  1196. u32 maxsize = 0;
  1197. u32 mask = 0;
  1198. dev_dbg(hs->dev, "%s: req %p: %d@%p, noi=%d, zero=%d, snok=%d\n",
  1199. ep->name, req, req->length, req->buf, req->no_interrupt,
  1200. req->zero, req->short_not_ok);
  1201. if (hs->lx_state == DWC2_L1) {
  1202. dwc2_wakeup_from_lpm_l1(hs, true);
  1203. }
  1204. /* Prevent new request submission when controller is suspended */
  1205. if (hs->lx_state != DWC2_L0) {
  1206. dev_dbg(hs->dev, "%s: submit request only in active state\n",
  1207. __func__);
  1208. return -EAGAIN;
  1209. }
  1210. /* initialise status of the request */
  1211. INIT_LIST_HEAD(&hs_req->queue);
  1212. req->actual = 0;
  1213. req->status = -EINPROGRESS;
  1214. /* Don't queue ISOC request if length greater than mps*mc */
  1215. if (hs_ep->isochronous &&
  1216. req->length > (hs_ep->mc * hs_ep->ep.maxpacket)) {
  1217. dev_err(hs->dev, "req length > maxpacket*mc\n");
  1218. return -EINVAL;
  1219. }
  1220. /* In DDMA mode for ISOC's don't queue request if length greater
  1221. * than descriptor limits.
  1222. */
  1223. if (using_desc_dma(hs) && hs_ep->isochronous) {
  1224. maxsize = dwc2_gadget_get_desc_params(hs_ep, &mask);
  1225. if (hs_ep->dir_in && req->length > maxsize) {
  1226. dev_err(hs->dev, "wrong length %d (maxsize=%d)\n",
  1227. req->length, maxsize);
  1228. return -EINVAL;
  1229. }
  1230. if (!hs_ep->dir_in && req->length > hs_ep->ep.maxpacket) {
  1231. dev_err(hs->dev, "ISOC OUT: wrong length %d (mps=%d)\n",
  1232. req->length, hs_ep->ep.maxpacket);
  1233. return -EINVAL;
  1234. }
  1235. }
  1236. ret = dwc2_hsotg_handle_unaligned_buf_start(hs, hs_ep, hs_req);
  1237. if (ret)
  1238. return ret;
  1239. /* if we're using DMA, sync the buffers as necessary */
  1240. if (using_dma(hs)) {
  1241. ret = dwc2_hsotg_map_dma(hs, hs_ep, req);
  1242. if (ret)
  1243. return ret;
  1244. }
  1245. /* If using descriptor DMA configure EP0 descriptor chain pointers */
  1246. if (using_desc_dma(hs) && !hs_ep->index) {
  1247. ret = dwc2_gadget_set_ep0_desc_chain(hs, hs_ep);
  1248. if (ret)
  1249. return ret;
  1250. }
  1251. first = list_empty(&hs_ep->queue);
  1252. list_add_tail(&hs_req->queue, &hs_ep->queue);
  1253. /*
  1254. * Handle DDMA isochronous transfers separately - just add new entry
  1255. * to the descriptor chain.
  1256. * Transfer will be started once SW gets either one of NAK or
  1257. * OutTknEpDis interrupts.
  1258. */
  1259. if (using_desc_dma(hs) && hs_ep->isochronous) {
  1260. if (hs_ep->target_frame != TARGET_FRAME_INITIAL) {
  1261. dma_addr_t dma_addr = hs_req->req.dma;
  1262. if (hs_req->req.num_sgs) {
  1263. WARN_ON(hs_req->req.num_sgs > 1);
  1264. dma_addr = sg_dma_address(hs_req->req.sg);
  1265. }
  1266. dwc2_gadget_fill_isoc_desc(hs_ep, dma_addr,
  1267. hs_req->req.length);
  1268. }
  1269. return 0;
  1270. }
  1271. /* Change EP direction if status phase request is after data out */
  1272. if (!hs_ep->index && !req->length && !hs_ep->dir_in &&
  1273. hs->ep0_state == DWC2_EP0_DATA_OUT)
  1274. hs_ep->dir_in = 1;
  1275. if (first) {
  1276. if (!hs_ep->isochronous) {
  1277. dwc2_hsotg_start_req(hs, hs_ep, hs_req, false);
  1278. return 0;
  1279. }
  1280. /* Update current frame number value. */
  1281. hs->frame_number = dwc2_hsotg_read_frameno(hs);
  1282. while (dwc2_gadget_target_frame_elapsed(hs_ep)) {
  1283. dwc2_gadget_incr_frame_num(hs_ep);
  1284. /* Update current frame number value once more as it
  1285. * changes here.
  1286. */
  1287. hs->frame_number = dwc2_hsotg_read_frameno(hs);
  1288. }
  1289. if (hs_ep->target_frame != TARGET_FRAME_INITIAL)
  1290. dwc2_hsotg_start_req(hs, hs_ep, hs_req, false);
  1291. }
  1292. return 0;
  1293. }
  1294. static int dwc2_hsotg_ep_queue_lock(struct usb_ep *ep, struct usb_request *req,
  1295. gfp_t gfp_flags)
  1296. {
  1297. struct dwc2_hsotg_ep *hs_ep = our_ep(ep);
  1298. struct dwc2_hsotg *hs = hs_ep->parent;
  1299. unsigned long flags;
  1300. int ret;
  1301. spin_lock_irqsave(&hs->lock, flags);
  1302. ret = dwc2_hsotg_ep_queue(ep, req, gfp_flags);
  1303. spin_unlock_irqrestore(&hs->lock, flags);
  1304. return ret;
  1305. }
  1306. static void dwc2_hsotg_ep_free_request(struct usb_ep *ep,
  1307. struct usb_request *req)
  1308. {
  1309. struct dwc2_hsotg_req *hs_req = our_req(req);
  1310. kfree(hs_req);
  1311. }
  1312. /**
  1313. * dwc2_hsotg_complete_oursetup - setup completion callback
  1314. * @ep: The endpoint the request was on.
  1315. * @req: The request completed.
  1316. *
  1317. * Called on completion of any requests the driver itself
  1318. * submitted that need cleaning up.
  1319. */
  1320. static void dwc2_hsotg_complete_oursetup(struct usb_ep *ep,
  1321. struct usb_request *req)
  1322. {
  1323. struct dwc2_hsotg_ep *hs_ep = our_ep(ep);
  1324. struct dwc2_hsotg *hsotg = hs_ep->parent;
  1325. dev_dbg(hsotg->dev, "%s: ep %p, req %p\n", __func__, ep, req);
  1326. dwc2_hsotg_ep_free_request(ep, req);
  1327. }
  1328. /**
  1329. * ep_from_windex - convert control wIndex value to endpoint
  1330. * @hsotg: The driver state.
  1331. * @windex: The control request wIndex field (in host order).
  1332. *
  1333. * Convert the given wIndex into a pointer to an driver endpoint
  1334. * structure, or return NULL if it is not a valid endpoint.
  1335. */
  1336. static struct dwc2_hsotg_ep *ep_from_windex(struct dwc2_hsotg *hsotg,
  1337. u32 windex)
  1338. {
  1339. int dir = (windex & USB_DIR_IN) ? 1 : 0;
  1340. int idx = windex & 0x7F;
  1341. if (windex >= 0x100)
  1342. return NULL;
  1343. if (idx > hsotg->num_of_eps)
  1344. return NULL;
  1345. return index_to_ep(hsotg, idx, dir);
  1346. }
  1347. /**
  1348. * dwc2_hsotg_set_test_mode - Enable usb Test Modes
  1349. * @hsotg: The driver state.
  1350. * @testmode: requested usb test mode
  1351. * Enable usb Test Mode requested by the Host.
  1352. */
  1353. int dwc2_hsotg_set_test_mode(struct dwc2_hsotg *hsotg, int testmode)
  1354. {
  1355. int dctl = dwc2_readl(hsotg, DCTL);
  1356. dctl &= ~DCTL_TSTCTL_MASK;
  1357. switch (testmode) {
  1358. case USB_TEST_J:
  1359. case USB_TEST_K:
  1360. case USB_TEST_SE0_NAK:
  1361. case USB_TEST_PACKET:
  1362. case USB_TEST_FORCE_ENABLE:
  1363. dctl |= testmode << DCTL_TSTCTL_SHIFT;
  1364. break;
  1365. default:
  1366. return -EINVAL;
  1367. }
  1368. dwc2_writel(hsotg, dctl, DCTL);
  1369. return 0;
  1370. }
  1371. /**
  1372. * dwc2_hsotg_send_reply - send reply to control request
  1373. * @hsotg: The device state
  1374. * @ep: Endpoint 0
  1375. * @buff: Buffer for request
  1376. * @length: Length of reply.
  1377. *
  1378. * Create a request and queue it on the given endpoint. This is useful as
  1379. * an internal method of sending replies to certain control requests, etc.
  1380. */
  1381. static int dwc2_hsotg_send_reply(struct dwc2_hsotg *hsotg,
  1382. struct dwc2_hsotg_ep *ep,
  1383. void *buff,
  1384. int length)
  1385. {
  1386. struct usb_request *req;
  1387. int ret;
  1388. dev_dbg(hsotg->dev, "%s: buff %p, len %d\n", __func__, buff, length);
  1389. req = dwc2_hsotg_ep_alloc_request(&ep->ep, GFP_ATOMIC);
  1390. hsotg->ep0_reply = req;
  1391. if (!req) {
  1392. dev_warn(hsotg->dev, "%s: cannot alloc req\n", __func__);
  1393. return -ENOMEM;
  1394. }
  1395. req->buf = hsotg->ep0_buff;
  1396. req->length = length;
  1397. /*
  1398. * zero flag is for sending zlp in DATA IN stage. It has no impact on
  1399. * STATUS stage.
  1400. */
  1401. req->zero = 0;
  1402. req->complete = dwc2_hsotg_complete_oursetup;
  1403. if (length)
  1404. memcpy(req->buf, buff, length);
  1405. ret = dwc2_hsotg_ep_queue(&ep->ep, req, GFP_ATOMIC);
  1406. if (ret) {
  1407. dev_warn(hsotg->dev, "%s: cannot queue req\n", __func__);
  1408. return ret;
  1409. }
  1410. return 0;
  1411. }
  1412. /**
  1413. * dwc2_hsotg_process_req_status - process request GET_STATUS
  1414. * @hsotg: The device state
  1415. * @ctrl: USB control request
  1416. */
  1417. static int dwc2_hsotg_process_req_status(struct dwc2_hsotg *hsotg,
  1418. struct usb_ctrlrequest *ctrl)
  1419. {
  1420. struct dwc2_hsotg_ep *ep0 = hsotg->eps_out[0];
  1421. struct dwc2_hsotg_ep *ep;
  1422. __le16 reply;
  1423. u16 status;
  1424. int ret;
  1425. dev_dbg(hsotg->dev, "%s: USB_REQ_GET_STATUS\n", __func__);
  1426. if (!ep0->dir_in) {
  1427. dev_warn(hsotg->dev, "%s: direction out?\n", __func__);
  1428. return -EINVAL;
  1429. }
  1430. switch (ctrl->bRequestType & USB_RECIP_MASK) {
  1431. case USB_RECIP_DEVICE:
  1432. status = hsotg->gadget.is_selfpowered <<
  1433. USB_DEVICE_SELF_POWERED;
  1434. status |= hsotg->remote_wakeup_allowed <<
  1435. USB_DEVICE_REMOTE_WAKEUP;
  1436. reply = cpu_to_le16(status);
  1437. break;
  1438. case USB_RECIP_INTERFACE:
  1439. /* currently, the data result should be zero */
  1440. reply = cpu_to_le16(0);
  1441. break;
  1442. case USB_RECIP_ENDPOINT:
  1443. ep = ep_from_windex(hsotg, le16_to_cpu(ctrl->wIndex));
  1444. if (!ep)
  1445. return -ENOENT;
  1446. reply = cpu_to_le16(ep->halted ? 1 : 0);
  1447. break;
  1448. default:
  1449. return 0;
  1450. }
  1451. if (le16_to_cpu(ctrl->wLength) != 2)
  1452. return -EINVAL;
  1453. ret = dwc2_hsotg_send_reply(hsotg, ep0, &reply, 2);
  1454. if (ret) {
  1455. dev_err(hsotg->dev, "%s: failed to send reply\n", __func__);
  1456. return ret;
  1457. }
  1458. return 1;
  1459. }
  1460. static int dwc2_hsotg_ep_sethalt(struct usb_ep *ep, int value, bool now);
  1461. /**
  1462. * get_ep_head - return the first request on the endpoint
  1463. * @hs_ep: The controller endpoint to get
  1464. *
  1465. * Get the first request on the endpoint.
  1466. */
  1467. static struct dwc2_hsotg_req *get_ep_head(struct dwc2_hsotg_ep *hs_ep)
  1468. {
  1469. return list_first_entry_or_null(&hs_ep->queue, struct dwc2_hsotg_req,
  1470. queue);
  1471. }
  1472. /**
  1473. * dwc2_gadget_start_next_request - Starts next request from ep queue
  1474. * @hs_ep: Endpoint structure
  1475. *
  1476. * If queue is empty and EP is ISOC-OUT - unmasks OUTTKNEPDIS which is masked
  1477. * in its handler. Hence we need to unmask it here to be able to do
  1478. * resynchronization.
  1479. */
  1480. static void dwc2_gadget_start_next_request(struct dwc2_hsotg_ep *hs_ep)
  1481. {
  1482. struct dwc2_hsotg *hsotg = hs_ep->parent;
  1483. int dir_in = hs_ep->dir_in;
  1484. struct dwc2_hsotg_req *hs_req;
  1485. if (!list_empty(&hs_ep->queue)) {
  1486. hs_req = get_ep_head(hs_ep);
  1487. dwc2_hsotg_start_req(hsotg, hs_ep, hs_req, false);
  1488. return;
  1489. }
  1490. if (!hs_ep->isochronous)
  1491. return;
  1492. if (dir_in) {
  1493. dev_dbg(hsotg->dev, "%s: No more ISOC-IN requests\n",
  1494. __func__);
  1495. } else {
  1496. dev_dbg(hsotg->dev, "%s: No more ISOC-OUT requests\n",
  1497. __func__);
  1498. }
  1499. }
  1500. /**
  1501. * dwc2_hsotg_process_req_feature - process request {SET,CLEAR}_FEATURE
  1502. * @hsotg: The device state
  1503. * @ctrl: USB control request
  1504. */
  1505. static int dwc2_hsotg_process_req_feature(struct dwc2_hsotg *hsotg,
  1506. struct usb_ctrlrequest *ctrl)
  1507. {
  1508. struct dwc2_hsotg_ep *ep0 = hsotg->eps_out[0];
  1509. struct dwc2_hsotg_req *hs_req;
  1510. bool set = (ctrl->bRequest == USB_REQ_SET_FEATURE);
  1511. struct dwc2_hsotg_ep *ep;
  1512. int ret;
  1513. bool halted;
  1514. u32 recip;
  1515. u32 wValue;
  1516. u32 wIndex;
  1517. dev_dbg(hsotg->dev, "%s: %s_FEATURE\n",
  1518. __func__, set ? "SET" : "CLEAR");
  1519. wValue = le16_to_cpu(ctrl->wValue);
  1520. wIndex = le16_to_cpu(ctrl->wIndex);
  1521. recip = ctrl->bRequestType & USB_RECIP_MASK;
  1522. switch (recip) {
  1523. case USB_RECIP_DEVICE:
  1524. switch (wValue) {
  1525. case USB_DEVICE_REMOTE_WAKEUP:
  1526. if (set)
  1527. hsotg->remote_wakeup_allowed = 1;
  1528. else
  1529. hsotg->remote_wakeup_allowed = 0;
  1530. break;
  1531. case USB_DEVICE_TEST_MODE:
  1532. if ((wIndex & 0xff) != 0)
  1533. return -EINVAL;
  1534. if (!set)
  1535. return -EINVAL;
  1536. hsotg->test_mode = wIndex >> 8;
  1537. break;
  1538. default:
  1539. return -ENOENT;
  1540. }
  1541. ret = dwc2_hsotg_send_reply(hsotg, ep0, NULL, 0);
  1542. if (ret) {
  1543. dev_err(hsotg->dev,
  1544. "%s: failed to send reply\n", __func__);
  1545. return ret;
  1546. }
  1547. break;
  1548. case USB_RECIP_ENDPOINT:
  1549. ep = ep_from_windex(hsotg, wIndex);
  1550. if (!ep) {
  1551. dev_dbg(hsotg->dev, "%s: no endpoint for 0x%04x\n",
  1552. __func__, wIndex);
  1553. return -ENOENT;
  1554. }
  1555. switch (wValue) {
  1556. case USB_ENDPOINT_HALT:
  1557. halted = ep->halted;
  1558. if (!ep->wedged)
  1559. dwc2_hsotg_ep_sethalt(&ep->ep, set, true);
  1560. ret = dwc2_hsotg_send_reply(hsotg, ep0, NULL, 0);
  1561. if (ret) {
  1562. dev_err(hsotg->dev,
  1563. "%s: failed to send reply\n", __func__);
  1564. return ret;
  1565. }
  1566. /*
  1567. * we have to complete all requests for ep if it was
  1568. * halted, and the halt was cleared by CLEAR_FEATURE
  1569. */
  1570. if (!set && halted) {
  1571. /*
  1572. * If we have request in progress,
  1573. * then complete it
  1574. */
  1575. if (ep->req) {
  1576. hs_req = ep->req;
  1577. ep->req = NULL;
  1578. list_del_init(&hs_req->queue);
  1579. if (hs_req->req.complete) {
  1580. spin_unlock(&hsotg->lock);
  1581. usb_gadget_giveback_request(
  1582. &ep->ep, &hs_req->req);
  1583. spin_lock(&hsotg->lock);
  1584. }
  1585. }
  1586. /* If we have pending request, then start it */
  1587. if (!ep->req)
  1588. dwc2_gadget_start_next_request(ep);
  1589. }
  1590. break;
  1591. default:
  1592. return -ENOENT;
  1593. }
  1594. break;
  1595. default:
  1596. return -ENOENT;
  1597. }
  1598. return 1;
  1599. }
  1600. static void dwc2_hsotg_enqueue_setup(struct dwc2_hsotg *hsotg);
  1601. /**
  1602. * dwc2_hsotg_stall_ep0 - stall ep0
  1603. * @hsotg: The device state
  1604. *
  1605. * Set stall for ep0 as response for setup request.
  1606. */
  1607. static void dwc2_hsotg_stall_ep0(struct dwc2_hsotg *hsotg)
  1608. {
  1609. struct dwc2_hsotg_ep *ep0 = hsotg->eps_out[0];
  1610. u32 reg;
  1611. u32 ctrl;
  1612. dev_dbg(hsotg->dev, "ep0 stall (dir=%d)\n", ep0->dir_in);
  1613. reg = (ep0->dir_in) ? DIEPCTL0 : DOEPCTL0;
  1614. /*
  1615. * DxEPCTL_Stall will be cleared by EP once it has
  1616. * taken effect, so no need to clear later.
  1617. */
  1618. ctrl = dwc2_readl(hsotg, reg);
  1619. ctrl |= DXEPCTL_STALL;
  1620. ctrl |= DXEPCTL_CNAK;
  1621. dwc2_writel(hsotg, ctrl, reg);
  1622. dev_dbg(hsotg->dev,
  1623. "written DXEPCTL=0x%08x to %08x (DXEPCTL=0x%08x)\n",
  1624. ctrl, reg, dwc2_readl(hsotg, reg));
  1625. /*
  1626. * complete won't be called, so we enqueue
  1627. * setup request here
  1628. */
  1629. dwc2_hsotg_enqueue_setup(hsotg);
  1630. }
  1631. /**
  1632. * dwc2_hsotg_process_control - process a control request
  1633. * @hsotg: The device state
  1634. * @ctrl: The control request received
  1635. *
  1636. * The controller has received the SETUP phase of a control request, and
  1637. * needs to work out what to do next (and whether to pass it on to the
  1638. * gadget driver).
  1639. */
  1640. static void dwc2_hsotg_process_control(struct dwc2_hsotg *hsotg,
  1641. struct usb_ctrlrequest *ctrl)
  1642. {
  1643. struct dwc2_hsotg_ep *ep0 = hsotg->eps_out[0];
  1644. int ret = 0;
  1645. u32 dcfg;
  1646. dev_dbg(hsotg->dev,
  1647. "ctrl Type=%02x, Req=%02x, V=%04x, I=%04x, L=%04x\n",
  1648. ctrl->bRequestType, ctrl->bRequest, ctrl->wValue,
  1649. ctrl->wIndex, ctrl->wLength);
  1650. if (ctrl->wLength == 0) {
  1651. ep0->dir_in = 1;
  1652. hsotg->ep0_state = DWC2_EP0_STATUS_IN;
  1653. } else if (ctrl->bRequestType & USB_DIR_IN) {
  1654. ep0->dir_in = 1;
  1655. hsotg->ep0_state = DWC2_EP0_DATA_IN;
  1656. } else {
  1657. ep0->dir_in = 0;
  1658. hsotg->ep0_state = DWC2_EP0_DATA_OUT;
  1659. }
  1660. if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) {
  1661. switch (ctrl->bRequest) {
  1662. case USB_REQ_SET_ADDRESS:
  1663. hsotg->connected = 1;
  1664. dcfg = dwc2_readl(hsotg, DCFG);
  1665. dcfg &= ~DCFG_DEVADDR_MASK;
  1666. dcfg |= (le16_to_cpu(ctrl->wValue) <<
  1667. DCFG_DEVADDR_SHIFT) & DCFG_DEVADDR_MASK;
  1668. dwc2_writel(hsotg, dcfg, DCFG);
  1669. dev_info(hsotg->dev, "new address %d\n", ctrl->wValue);
  1670. ret = dwc2_hsotg_send_reply(hsotg, ep0, NULL, 0);
  1671. return;
  1672. case USB_REQ_GET_STATUS:
  1673. ret = dwc2_hsotg_process_req_status(hsotg, ctrl);
  1674. break;
  1675. case USB_REQ_CLEAR_FEATURE:
  1676. case USB_REQ_SET_FEATURE:
  1677. ret = dwc2_hsotg_process_req_feature(hsotg, ctrl);
  1678. break;
  1679. }
  1680. }
  1681. /* as a fallback, try delivering it to the driver to deal with */
  1682. if (ret == 0 && hsotg->driver) {
  1683. spin_unlock(&hsotg->lock);
  1684. ret = hsotg->driver->setup(&hsotg->gadget, ctrl);
  1685. spin_lock(&hsotg->lock);
  1686. if (ret < 0)
  1687. dev_dbg(hsotg->dev, "driver->setup() ret %d\n", ret);
  1688. }
  1689. hsotg->delayed_status = false;
  1690. if (ret == USB_GADGET_DELAYED_STATUS)
  1691. hsotg->delayed_status = true;
  1692. /*
  1693. * the request is either unhandlable, or is not formatted correctly
  1694. * so respond with a STALL for the status stage to indicate failure.
  1695. */
  1696. if (ret < 0)
  1697. dwc2_hsotg_stall_ep0(hsotg);
  1698. }
  1699. /**
  1700. * dwc2_hsotg_complete_setup - completion of a setup transfer
  1701. * @ep: The endpoint the request was on.
  1702. * @req: The request completed.
  1703. *
  1704. * Called on completion of any requests the driver itself submitted for
  1705. * EP0 setup packets
  1706. */
  1707. static void dwc2_hsotg_complete_setup(struct usb_ep *ep,
  1708. struct usb_request *req)
  1709. {
  1710. struct dwc2_hsotg_ep *hs_ep = our_ep(ep);
  1711. struct dwc2_hsotg *hsotg = hs_ep->parent;
  1712. if (req->status < 0) {
  1713. dev_dbg(hsotg->dev, "%s: failed %d\n", __func__, req->status);
  1714. return;
  1715. }
  1716. spin_lock(&hsotg->lock);
  1717. if (req->actual == 0)
  1718. dwc2_hsotg_enqueue_setup(hsotg);
  1719. else
  1720. dwc2_hsotg_process_control(hsotg, req->buf);
  1721. spin_unlock(&hsotg->lock);
  1722. }
  1723. /**
  1724. * dwc2_hsotg_enqueue_setup - start a request for EP0 packets
  1725. * @hsotg: The device state.
  1726. *
  1727. * Enqueue a request on EP0 if necessary to received any SETUP packets
  1728. * received from the host.
  1729. */
  1730. static void dwc2_hsotg_enqueue_setup(struct dwc2_hsotg *hsotg)
  1731. {
  1732. struct usb_request *req = hsotg->ctrl_req;
  1733. struct dwc2_hsotg_req *hs_req = our_req(req);
  1734. int ret;
  1735. dev_dbg(hsotg->dev, "%s: queueing setup request\n", __func__);
  1736. req->zero = 0;
  1737. req->length = 8;
  1738. req->buf = hsotg->ctrl_buff;
  1739. req->complete = dwc2_hsotg_complete_setup;
  1740. if (!list_empty(&hs_req->queue)) {
  1741. dev_dbg(hsotg->dev, "%s already queued???\n", __func__);
  1742. return;
  1743. }
  1744. hsotg->eps_out[0]->dir_in = 0;
  1745. hsotg->eps_out[0]->send_zlp = 0;
  1746. hsotg->ep0_state = DWC2_EP0_SETUP;
  1747. ret = dwc2_hsotg_ep_queue(&hsotg->eps_out[0]->ep, req, GFP_ATOMIC);
  1748. if (ret < 0) {
  1749. dev_err(hsotg->dev, "%s: failed queue (%d)\n", __func__, ret);
  1750. /*
  1751. * Don't think there's much we can do other than watch the
  1752. * driver fail.
  1753. */
  1754. }
  1755. }
  1756. static void dwc2_hsotg_program_zlp(struct dwc2_hsotg *hsotg,
  1757. struct dwc2_hsotg_ep *hs_ep)
  1758. {
  1759. u32 ctrl;
  1760. u8 index = hs_ep->index;
  1761. u32 epctl_reg = hs_ep->dir_in ? DIEPCTL(index) : DOEPCTL(index);
  1762. u32 epsiz_reg = hs_ep->dir_in ? DIEPTSIZ(index) : DOEPTSIZ(index);
  1763. if (hs_ep->dir_in)
  1764. dev_dbg(hsotg->dev, "Sending zero-length packet on ep%d\n",
  1765. index);
  1766. else
  1767. dev_dbg(hsotg->dev, "Receiving zero-length packet on ep%d\n",
  1768. index);
  1769. if (using_desc_dma(hsotg)) {
  1770. /* Not specific buffer needed for ep0 ZLP */
  1771. dma_addr_t dma = hs_ep->desc_list_dma;
  1772. if (!index)
  1773. dwc2_gadget_set_ep0_desc_chain(hsotg, hs_ep);
  1774. dwc2_gadget_config_nonisoc_xfer_ddma(hs_ep, dma, 0);
  1775. } else {
  1776. dwc2_writel(hsotg, DXEPTSIZ_MC(1) | DXEPTSIZ_PKTCNT(1) |
  1777. DXEPTSIZ_XFERSIZE(0),
  1778. epsiz_reg);
  1779. }
  1780. ctrl = dwc2_readl(hsotg, epctl_reg);
  1781. ctrl |= DXEPCTL_CNAK; /* clear NAK set by core */
  1782. ctrl |= DXEPCTL_EPENA; /* ensure ep enabled */
  1783. ctrl |= DXEPCTL_USBACTEP;
  1784. dwc2_writel(hsotg, ctrl, epctl_reg);
  1785. }
  1786. /**
  1787. * dwc2_hsotg_complete_request - complete a request given to us
  1788. * @hsotg: The device state.
  1789. * @hs_ep: The endpoint the request was on.
  1790. * @hs_req: The request to complete.
  1791. * @result: The result code (0 => Ok, otherwise errno)
  1792. *
  1793. * The given request has finished, so call the necessary completion
  1794. * if it has one and then look to see if we can start a new request
  1795. * on the endpoint.
  1796. *
  1797. * Note, expects the ep to already be locked as appropriate.
  1798. */
  1799. static void dwc2_hsotg_complete_request(struct dwc2_hsotg *hsotg,
  1800. struct dwc2_hsotg_ep *hs_ep,
  1801. struct dwc2_hsotg_req *hs_req,
  1802. int result)
  1803. {
  1804. if (!hs_req) {
  1805. dev_dbg(hsotg->dev, "%s: nothing to complete?\n", __func__);
  1806. return;
  1807. }
  1808. dev_dbg(hsotg->dev, "complete: ep %p %s, req %p, %d => %p\n",
  1809. hs_ep, hs_ep->ep.name, hs_req, result, hs_req->req.complete);
  1810. /*
  1811. * only replace the status if we've not already set an error
  1812. * from a previous transaction
  1813. */
  1814. if (hs_req->req.status == -EINPROGRESS)
  1815. hs_req->req.status = result;
  1816. if (using_dma(hsotg))
  1817. dwc2_hsotg_unmap_dma(hsotg, hs_ep, hs_req);
  1818. dwc2_hsotg_handle_unaligned_buf_complete(hsotg, hs_ep, hs_req);
  1819. hs_ep->req = NULL;
  1820. list_del_init(&hs_req->queue);
  1821. /*
  1822. * call the complete request with the locks off, just in case the
  1823. * request tries to queue more work for this endpoint.
  1824. */
  1825. if (hs_req->req.complete) {
  1826. spin_unlock(&hsotg->lock);
  1827. usb_gadget_giveback_request(&hs_ep->ep, &hs_req->req);
  1828. spin_lock(&hsotg->lock);
  1829. }
  1830. /* In DDMA don't need to proceed to starting of next ISOC request */
  1831. if (using_desc_dma(hsotg) && hs_ep->isochronous)
  1832. return;
  1833. /*
  1834. * Look to see if there is anything else to do. Note, the completion
  1835. * of the previous request may have caused a new request to be started
  1836. * so be careful when doing this.
  1837. */
  1838. if (!hs_ep->req && result >= 0)
  1839. dwc2_gadget_start_next_request(hs_ep);
  1840. }
  1841. /*
  1842. * dwc2_gadget_complete_isoc_request_ddma - complete an isoc request in DDMA
  1843. * @hs_ep: The endpoint the request was on.
  1844. *
  1845. * Get first request from the ep queue, determine descriptor on which complete
  1846. * happened. SW discovers which descriptor currently in use by HW, adjusts
  1847. * dma_address and calculates index of completed descriptor based on the value
  1848. * of DEPDMA register. Update actual length of request, giveback to gadget.
  1849. */
  1850. static void dwc2_gadget_complete_isoc_request_ddma(struct dwc2_hsotg_ep *hs_ep)
  1851. {
  1852. struct dwc2_hsotg *hsotg = hs_ep->parent;
  1853. struct dwc2_hsotg_req *hs_req;
  1854. struct usb_request *ureq;
  1855. u32 desc_sts;
  1856. u32 mask;
  1857. desc_sts = hs_ep->desc_list[hs_ep->compl_desc].status;
  1858. /* Process only descriptors with buffer status set to DMA done */
  1859. while ((desc_sts & DEV_DMA_BUFF_STS_MASK) >>
  1860. DEV_DMA_BUFF_STS_SHIFT == DEV_DMA_BUFF_STS_DMADONE) {
  1861. hs_req = get_ep_head(hs_ep);
  1862. if (!hs_req) {
  1863. dev_warn(hsotg->dev, "%s: ISOC EP queue empty\n", __func__);
  1864. return;
  1865. }
  1866. ureq = &hs_req->req;
  1867. /* Check completion status */
  1868. if ((desc_sts & DEV_DMA_STS_MASK) >> DEV_DMA_STS_SHIFT ==
  1869. DEV_DMA_STS_SUCC) {
  1870. mask = hs_ep->dir_in ? DEV_DMA_ISOC_TX_NBYTES_MASK :
  1871. DEV_DMA_ISOC_RX_NBYTES_MASK;
  1872. ureq->actual = ureq->length - ((desc_sts & mask) >>
  1873. DEV_DMA_ISOC_NBYTES_SHIFT);
  1874. /* Adjust actual len for ISOC Out if len is
  1875. * not align of 4
  1876. */
  1877. if (!hs_ep->dir_in && ureq->length & 0x3)
  1878. ureq->actual += 4 - (ureq->length & 0x3);
  1879. /* Set actual frame number for completed transfers */
  1880. ureq->frame_number =
  1881. (desc_sts & DEV_DMA_ISOC_FRNUM_MASK) >>
  1882. DEV_DMA_ISOC_FRNUM_SHIFT;
  1883. }
  1884. dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, 0);
  1885. hs_ep->compl_desc++;
  1886. if (hs_ep->compl_desc > (MAX_DMA_DESC_NUM_HS_ISOC - 1))
  1887. hs_ep->compl_desc = 0;
  1888. desc_sts = hs_ep->desc_list[hs_ep->compl_desc].status;
  1889. }
  1890. }
  1891. /*
  1892. * dwc2_gadget_handle_isoc_bna - handle BNA interrupt for ISOC.
  1893. * @hs_ep: The isochronous endpoint.
  1894. *
  1895. * If EP ISOC OUT then need to flush RX FIFO to remove source of BNA
  1896. * interrupt. Reset target frame and next_desc to allow to start
  1897. * ISOC's on NAK interrupt for IN direction or on OUTTKNEPDIS
  1898. * interrupt for OUT direction.
  1899. */
  1900. static void dwc2_gadget_handle_isoc_bna(struct dwc2_hsotg_ep *hs_ep)
  1901. {
  1902. struct dwc2_hsotg *hsotg = hs_ep->parent;
  1903. if (!hs_ep->dir_in)
  1904. dwc2_flush_rx_fifo(hsotg);
  1905. dwc2_hsotg_complete_request(hsotg, hs_ep, get_ep_head(hs_ep), 0);
  1906. hs_ep->target_frame = TARGET_FRAME_INITIAL;
  1907. hs_ep->next_desc = 0;
  1908. hs_ep->compl_desc = 0;
  1909. }
  1910. /**
  1911. * dwc2_hsotg_rx_data - receive data from the FIFO for an endpoint
  1912. * @hsotg: The device state.
  1913. * @ep_idx: The endpoint index for the data
  1914. * @size: The size of data in the fifo, in bytes
  1915. *
  1916. * The FIFO status shows there is data to read from the FIFO for a given
  1917. * endpoint, so sort out whether we need to read the data into a request
  1918. * that has been made for that endpoint.
  1919. */
  1920. static void dwc2_hsotg_rx_data(struct dwc2_hsotg *hsotg, int ep_idx, int size)
  1921. {
  1922. struct dwc2_hsotg_ep *hs_ep = hsotg->eps_out[ep_idx];
  1923. struct dwc2_hsotg_req *hs_req = NULL;//hs_ep->req;
  1924. int to_read;
  1925. int max_req;
  1926. int read_ptr;
  1927. if (hs_ep == NULL) {
  1928. printk("hs_ep is NULL at %s:%d\n", __func__, __LINE__);
  1929. return;
  1930. }
  1931. hs_req = hs_ep->req;
  1932. if (!hs_req) {
  1933. u32 epctl = dwc2_readl(hsotg, DOEPCTL(ep_idx));
  1934. int ptr;
  1935. dev_dbg(hsotg->dev,
  1936. "%s: FIFO %d bytes on ep%d but no req (DXEPCTl=0x%08x)\n",
  1937. __func__, size, ep_idx, epctl);
  1938. /* dump the data from the FIFO, we've nothing we can do */
  1939. for (ptr = 0; ptr < size; ptr += 4)
  1940. (void)dwc2_readl(hsotg, EPFIFO(ep_idx));
  1941. return;
  1942. }
  1943. to_read = size;
  1944. read_ptr = hs_req->req.actual;
  1945. max_req = hs_req->req.length - read_ptr;
  1946. dev_dbg(hsotg->dev, "%s: read %d/%d, done %d/%d\n",
  1947. __func__, to_read, max_req, read_ptr, hs_req->req.length);
  1948. if (to_read > max_req) {
  1949. /*
  1950. * more data appeared than we where willing
  1951. * to deal with in this request.
  1952. */
  1953. /* currently we don't deal this */
  1954. //WARN_ON_ONCE(1);
  1955. printk("##WARN_ON_ONCE(1);\n");
  1956. //return;
  1957. }
  1958. hs_ep->total_data += to_read;
  1959. hs_req->req.actual += to_read;
  1960. to_read = DIV_ROUND_UP(to_read, 4);
  1961. /*
  1962. * note, we might over-write the buffer end by 3 bytes depending on
  1963. * alignment of the data.
  1964. */
  1965. dwc2_readl_rep(hsotg, EPFIFO(ep_idx),
  1966. hs_req->req.buf + read_ptr, to_read);
  1967. }
  1968. /**
  1969. * dwc2_hsotg_ep0_zlp - send/receive zero-length packet on control endpoint
  1970. * @hsotg: The device instance
  1971. * @dir_in: If IN zlp
  1972. *
  1973. * Generate a zero-length IN packet request for terminating a SETUP
  1974. * transaction.
  1975. *
  1976. * Note, since we don't write any data to the TxFIFO, then it is
  1977. * currently believed that we do not need to wait for any space in
  1978. * the TxFIFO.
  1979. */
  1980. static void dwc2_hsotg_ep0_zlp(struct dwc2_hsotg *hsotg, bool dir_in)
  1981. {
  1982. /* eps_out[0] is used in both directions */
  1983. hsotg->eps_out[0]->dir_in = dir_in;
  1984. hsotg->ep0_state = dir_in ? DWC2_EP0_STATUS_IN : DWC2_EP0_STATUS_OUT;
  1985. dwc2_hsotg_program_zlp(hsotg, hsotg->eps_out[0]);
  1986. }
  1987. /*
  1988. * dwc2_gadget_get_xfersize_ddma - get transferred bytes amount from desc
  1989. * @hs_ep - The endpoint on which transfer went
  1990. *
  1991. * Iterate over endpoints descriptor chain and get info on bytes remained
  1992. * in DMA descriptors after transfer has completed. Used for non isoc EPs.
  1993. */
  1994. static unsigned int dwc2_gadget_get_xfersize_ddma(struct dwc2_hsotg_ep *hs_ep)
  1995. {
  1996. const struct usb_endpoint_descriptor *ep_desc = hs_ep->ep.desc;
  1997. struct dwc2_hsotg *hsotg = hs_ep->parent;
  1998. unsigned int bytes_rem = 0;
  1999. unsigned int bytes_rem_correction = 0;
  2000. struct dwc2_dma_desc *desc = hs_ep->desc_list;
  2001. int i;
  2002. u32 status;
  2003. u32 mps = hs_ep->ep.maxpacket;
  2004. int dir_in = hs_ep->dir_in;
  2005. if (!desc)
  2006. return -EINVAL;
  2007. /* Interrupt OUT EP with mps not multiple of 4 */
  2008. if (hs_ep->index)
  2009. if (usb_endpoint_xfer_int(ep_desc) && !dir_in && (mps % 4))
  2010. bytes_rem_correction = 4 - (mps % 4);
  2011. for (i = 0; i < hs_ep->desc_count; ++i) {
  2012. status = desc->status;
  2013. bytes_rem += status & DEV_DMA_NBYTES_MASK;
  2014. bytes_rem -= bytes_rem_correction;
  2015. if (status & DEV_DMA_STS_MASK)
  2016. dev_err(hsotg->dev, "descriptor %d closed with %x\n",
  2017. i, status & DEV_DMA_STS_MASK);
  2018. if (status & DEV_DMA_L)
  2019. break;
  2020. desc++;
  2021. }
  2022. return bytes_rem;
  2023. }
  2024. /**
  2025. * dwc2_hsotg_handle_outdone - handle receiving OutDone/SetupDone from RXFIFO
  2026. * @hsotg: The device instance
  2027. * @epnum: The endpoint received from
  2028. *
  2029. * The RXFIFO has delivered an OutDone event, which means that the data
  2030. * transfer for an OUT endpoint has been completed, either by a short
  2031. * packet or by the finish of a transfer.
  2032. */
  2033. static void dwc2_hsotg_handle_outdone(struct dwc2_hsotg *hsotg, int epnum)
  2034. {
  2035. u32 epsize = dwc2_readl(hsotg, DOEPTSIZ(epnum));
  2036. struct dwc2_hsotg_ep *hs_ep = hsotg->eps_out[epnum];
  2037. struct dwc2_hsotg_req *hs_req = NULL;
  2038. struct usb_request *req = NULL;
  2039. unsigned int size_left = DXEPTSIZ_XFERSIZE_GET(epsize);
  2040. int result = 0;
  2041. if (hs_ep == NULL) {
  2042. printk("hs_ep is NULL at %s:%d\n", __func__, __LINE__);
  2043. return;
  2044. }
  2045. hs_req = hs_ep->req;
  2046. if (!hs_req) {
  2047. dev_dbg(hsotg->dev, "%s: no request active\n", __func__);
  2048. return;
  2049. }
  2050. req = &hs_req->req;
  2051. if (epnum == 0 && hsotg->ep0_state == DWC2_EP0_STATUS_OUT) {
  2052. dev_dbg(hsotg->dev, "zlp packet received\n");
  2053. dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, 0);
  2054. dwc2_hsotg_enqueue_setup(hsotg);
  2055. return;
  2056. }
  2057. if (using_desc_dma(hsotg))
  2058. size_left = dwc2_gadget_get_xfersize_ddma(hs_ep);
  2059. if (using_dma(hsotg)) {
  2060. unsigned int size_done;
  2061. /*
  2062. * Calculate the size of the transfer by checking how much
  2063. * is left in the endpoint size register and then working it
  2064. * out from the amount we loaded for the transfer.
  2065. *
  2066. * We need to do this as DMA pointers are always 32bit aligned
  2067. * so may overshoot/undershoot the transfer.
  2068. */
  2069. size_done = hs_ep->size_loaded - size_left;
  2070. size_done += hs_ep->last_load;
  2071. req->actual = size_done;
  2072. }
  2073. /* if there is more request to do, schedule new transfer */
  2074. if (req->actual < req->length && size_left == 0) {
  2075. dwc2_hsotg_start_req(hsotg, hs_ep, hs_req, true);
  2076. return;
  2077. }
  2078. if (req->actual < req->length && req->short_not_ok) {
  2079. dev_dbg(hsotg->dev, "%s: got %d/%d (short not ok) => error\n",
  2080. __func__, req->actual, req->length);
  2081. /*
  2082. * todo - what should we return here? there's no one else
  2083. * even bothering to check the status.
  2084. */
  2085. }
  2086. /* DDMA IN status phase will start from StsPhseRcvd interrupt */
  2087. if (!using_desc_dma(hsotg) && epnum == 0 &&
  2088. hsotg->ep0_state == DWC2_EP0_DATA_OUT) {
  2089. /* Move to STATUS IN */
  2090. if (!hsotg->delayed_status)
  2091. dwc2_hsotg_ep0_zlp(hsotg, true);
  2092. }
  2093. /* Set actual frame number for completed transfers */
  2094. if (!using_desc_dma(hsotg) && hs_ep->isochronous) {
  2095. req->frame_number = hs_ep->target_frame;
  2096. dwc2_gadget_incr_frame_num(hs_ep);
  2097. }
  2098. dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, result);
  2099. }
  2100. /**
  2101. * dwc2_hsotg_handle_rx - RX FIFO has data
  2102. * @hsotg: The device instance
  2103. *
  2104. * The IRQ handler has detected that the RX FIFO has some data in it
  2105. * that requires processing, so find out what is in there and do the
  2106. * appropriate read.
  2107. *
  2108. * The RXFIFO is a true FIFO, the packets coming out are still in packet
  2109. * chunks, so if you have x packets received on an endpoint you'll get x
  2110. * FIFO events delivered, each with a packet's worth of data in it.
  2111. *
  2112. * When using DMA, we should not be processing events from the RXFIFO
  2113. * as the actual data should be sent to the memory directly and we turn
  2114. * on the completion interrupts to get notifications of transfer completion.
  2115. */
  2116. static void dwc2_hsotg_handle_rx(struct dwc2_hsotg *hsotg)
  2117. {
  2118. u32 grxstsr = dwc2_readl(hsotg, GRXSTSP);
  2119. u32 epnum, status, size;
  2120. WARN_ON(using_dma(hsotg));
  2121. epnum = grxstsr & GRXSTS_EPNUM_MASK;
  2122. status = grxstsr & GRXSTS_PKTSTS_MASK;
  2123. size = grxstsr & GRXSTS_BYTECNT_MASK;
  2124. size >>= GRXSTS_BYTECNT_SHIFT;
  2125. dev_dbg(hsotg->dev, "%s: GRXSTSP=0x%08x (%d@%d)\n",
  2126. __func__, grxstsr, size, epnum);
  2127. switch ((status & GRXSTS_PKTSTS_MASK) >> GRXSTS_PKTSTS_SHIFT) {
  2128. case GRXSTS_PKTSTS_GLOBALOUTNAK:
  2129. dev_dbg(hsotg->dev, "GLOBALOUTNAK\n");
  2130. break;
  2131. case GRXSTS_PKTSTS_OUTDONE:
  2132. dev_dbg(hsotg->dev, "OutDone (Frame=0x%08x)\n",
  2133. dwc2_hsotg_read_frameno(hsotg));
  2134. if (!using_dma(hsotg))
  2135. dwc2_hsotg_handle_outdone(hsotg, epnum);
  2136. break;
  2137. case GRXSTS_PKTSTS_SETUPDONE:
  2138. dev_dbg(hsotg->dev,
  2139. "SetupDone (Frame=0x%08x, DOPEPCTL=0x%08x)\n",
  2140. dwc2_hsotg_read_frameno(hsotg),
  2141. dwc2_readl(hsotg, DOEPCTL(0)));
  2142. /*
  2143. * Call dwc2_hsotg_handle_outdone here if it was not called from
  2144. * GRXSTS_PKTSTS_OUTDONE. That is, if the core didn't
  2145. * generate GRXSTS_PKTSTS_OUTDONE for setup packet.
  2146. */
  2147. if (hsotg->ep0_state == DWC2_EP0_SETUP)
  2148. dwc2_hsotg_handle_outdone(hsotg, epnum);
  2149. break;
  2150. case GRXSTS_PKTSTS_OUTRX:
  2151. dwc2_hsotg_rx_data(hsotg, epnum, size);
  2152. break;
  2153. case GRXSTS_PKTSTS_SETUPRX:
  2154. //printk("GRXSTS_PKTSTS_SETUPRX ep0_state:%d\n", hsotg->ep0_state);
  2155. dev_dbg(hsotg->dev,
  2156. "SetupRX (Frame=0x%08x, DOPEPCTL=0x%08x)\n",
  2157. dwc2_hsotg_read_frameno(hsotg),
  2158. dwc2_readl(hsotg, DOEPCTL(0)));
  2159. WARN_ON(hsotg->ep0_state != DWC2_EP0_SETUP);
  2160. dwc2_hsotg_rx_data(hsotg, epnum, size);
  2161. break;
  2162. default:
  2163. dev_warn(hsotg->dev, "%s: unknown status %08x\n",
  2164. __func__, grxstsr);
  2165. dwc2_hsotg_dump(hsotg);
  2166. break;
  2167. }
  2168. }
  2169. /**
  2170. * dwc2_hsotg_ep0_mps - turn max packet size into register setting
  2171. * @mps: The maximum packet size in bytes.
  2172. */
  2173. static u32 dwc2_hsotg_ep0_mps(unsigned int mps)
  2174. {
  2175. switch (mps) {
  2176. case 64:
  2177. return D0EPCTL_MPS_64;
  2178. case 32:
  2179. return D0EPCTL_MPS_32;
  2180. case 16:
  2181. return D0EPCTL_MPS_16;
  2182. case 8:
  2183. return D0EPCTL_MPS_8;
  2184. }
  2185. /* bad max packet size, warn and return invalid result */
  2186. WARN_ON(1);
  2187. return (u32)-1;
  2188. }
  2189. /**
  2190. * dwc2_hsotg_set_ep_maxpacket - set endpoint's max-packet field
  2191. * @hsotg: The driver state.
  2192. * @ep: The index number of the endpoint
  2193. * @mps: The maximum packet size in bytes
  2194. * @mc: The multicount value
  2195. * @dir_in: True if direction is in.
  2196. *
  2197. * Configure the maximum packet size for the given endpoint, updating
  2198. * the hardware control registers to reflect this.
  2199. */
  2200. static void dwc2_hsotg_set_ep_maxpacket(struct dwc2_hsotg *hsotg,
  2201. unsigned int ep, unsigned int mps,
  2202. unsigned int mc, unsigned int dir_in)
  2203. {
  2204. struct dwc2_hsotg_ep *hs_ep;
  2205. u32 reg;
  2206. hs_ep = index_to_ep(hsotg, ep, dir_in);
  2207. if (!hs_ep)
  2208. return;
  2209. if (ep == 0) {
  2210. u32 mps_bytes = mps;
  2211. /* EP0 is a special case */
  2212. mps = dwc2_hsotg_ep0_mps(mps_bytes);
  2213. if (mps > 3)
  2214. goto bad_mps;
  2215. hs_ep->ep.maxpacket = mps_bytes;
  2216. hs_ep->mc = 1;
  2217. } else {
  2218. if (mps > 1024)
  2219. goto bad_mps;
  2220. hs_ep->mc = mc;
  2221. if (mc > 3)
  2222. goto bad_mps;
  2223. hs_ep->ep.maxpacket = mps;
  2224. }
  2225. if (dir_in) {
  2226. reg = dwc2_readl(hsotg, DIEPCTL(ep));
  2227. reg &= ~DXEPCTL_MPS_MASK;
  2228. reg |= mps;
  2229. dwc2_writel(hsotg, reg, DIEPCTL(ep));
  2230. } else {
  2231. reg = dwc2_readl(hsotg, DOEPCTL(ep));
  2232. reg &= ~DXEPCTL_MPS_MASK;
  2233. reg |= mps;
  2234. dwc2_writel(hsotg, reg, DOEPCTL(ep));
  2235. }
  2236. return;
  2237. bad_mps:
  2238. dev_err(hsotg->dev, "ep%d: bad mps of %d\n", ep, mps);
  2239. }
  2240. /**
  2241. * dwc2_hsotg_txfifo_flush - flush Tx FIFO
  2242. * @hsotg: The driver state
  2243. * @idx: The index for the endpoint (0..15)
  2244. */
  2245. static void dwc2_hsotg_txfifo_flush(struct dwc2_hsotg *hsotg, unsigned int idx)
  2246. {
  2247. dwc2_writel(hsotg, GRSTCTL_TXFNUM(idx) | GRSTCTL_TXFFLSH,
  2248. GRSTCTL);
  2249. /* wait until the fifo is flushed */
  2250. if (dwc2_hsotg_wait_bit_clear(hsotg, GRSTCTL, GRSTCTL_TXFFLSH, 100))
  2251. dev_warn(hsotg->dev, "%s: timeout flushing fifo GRSTCTL_TXFFLSH\n",
  2252. __func__);
  2253. }
  2254. /**
  2255. * dwc2_hsotg_trytx - check to see if anything needs transmitting
  2256. * @hsotg: The driver state
  2257. * @hs_ep: The driver endpoint to check.
  2258. *
  2259. * Check to see if there is a request that has data to send, and if so
  2260. * make an attempt to write data into the FIFO.
  2261. */
  2262. static int dwc2_hsotg_trytx(struct dwc2_hsotg *hsotg,
  2263. struct dwc2_hsotg_ep *hs_ep)
  2264. {
  2265. struct dwc2_hsotg_req *hs_req = hs_ep->req;
  2266. if (!hs_ep->dir_in || !hs_req) {
  2267. /**
  2268. * if request is not enqueued, we disable interrupts
  2269. * for endpoints, excepting ep0
  2270. */
  2271. if (hs_ep->index != 0)
  2272. dwc2_hsotg_ctrl_epint(hsotg, hs_ep->index,
  2273. hs_ep->dir_in, 0);
  2274. return 0;
  2275. }
  2276. if (hs_req->req.actual < hs_req->req.length) {
  2277. dev_dbg(hsotg->dev, "trying to write more for ep%d\n",
  2278. hs_ep->index);
  2279. return dwc2_hsotg_write_fifo(hsotg, hs_ep, hs_req);
  2280. }
  2281. return 0;
  2282. }
  2283. /**
  2284. * dwc2_hsotg_complete_in - complete IN transfer
  2285. * @hsotg: The device state.
  2286. * @hs_ep: The endpoint that has just completed.
  2287. *
  2288. * An IN transfer has been completed, update the transfer's state and then
  2289. * call the relevant completion routines.
  2290. */
  2291. static void dwc2_hsotg_complete_in(struct dwc2_hsotg *hsotg,
  2292. struct dwc2_hsotg_ep *hs_ep)
  2293. {
  2294. struct dwc2_hsotg_req *hs_req = hs_ep->req;
  2295. u32 epsize = dwc2_readl(hsotg, DIEPTSIZ(hs_ep->index));
  2296. int size_left, size_done;
  2297. if (!hs_req) {
  2298. dev_dbg(hsotg->dev, "XferCompl but no req\n");
  2299. return;
  2300. }
  2301. /* Finish ZLP handling for IN EP0 transactions */
  2302. if (hs_ep->index == 0 && hsotg->ep0_state == DWC2_EP0_STATUS_IN) {
  2303. dev_dbg(hsotg->dev, "zlp packet sent\n");
  2304. /*
  2305. * While send zlp for DWC2_EP0_STATUS_IN EP direction was
  2306. * changed to IN. Change back to complete OUT transfer request
  2307. */
  2308. hs_ep->dir_in = 0;
  2309. dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, 0);
  2310. if (hsotg->test_mode) {
  2311. int ret;
  2312. ret = dwc2_hsotg_set_test_mode(hsotg, hsotg->test_mode);
  2313. if (ret < 0) {
  2314. dev_dbg(hsotg->dev, "Invalid Test #%d\n",
  2315. hsotg->test_mode);
  2316. dwc2_hsotg_stall_ep0(hsotg);
  2317. return;
  2318. }
  2319. }
  2320. dwc2_hsotg_enqueue_setup(hsotg);
  2321. return;
  2322. }
  2323. /*
  2324. * Calculate the size of the transfer by checking how much is left
  2325. * in the endpoint size register and then working it out from
  2326. * the amount we loaded for the transfer.
  2327. *
  2328. * We do this even for DMA, as the transfer may have incremented
  2329. * past the end of the buffer (DMA transfers are always 32bit
  2330. * aligned).
  2331. */
  2332. if (using_desc_dma(hsotg)) {
  2333. size_left = dwc2_gadget_get_xfersize_ddma(hs_ep);
  2334. if (size_left < 0)
  2335. dev_err(hsotg->dev, "error parsing DDMA results %d\n",
  2336. size_left);
  2337. } else {
  2338. size_left = DXEPTSIZ_XFERSIZE_GET(epsize);
  2339. }
  2340. size_done = hs_ep->size_loaded - size_left;
  2341. size_done += hs_ep->last_load;
  2342. if (hs_req->req.actual != size_done)
  2343. dev_dbg(hsotg->dev, "%s: adjusting size done %d => %d\n",
  2344. __func__, hs_req->req.actual, size_done);
  2345. hs_req->req.actual = size_done;
  2346. dev_dbg(hsotg->dev, "req->length:%d req->actual:%d req->zero:%d\n",
  2347. hs_req->req.length, hs_req->req.actual, hs_req->req.zero);
  2348. if (!size_left && hs_req->req.actual < hs_req->req.length) {
  2349. dev_dbg(hsotg->dev, "%s trying more for req...\n", __func__);
  2350. dwc2_hsotg_start_req(hsotg, hs_ep, hs_req, true);
  2351. return;
  2352. }
  2353. /* Zlp for all endpoints in non DDMA, for ep0 only in DATA IN stage */
  2354. if (hs_ep->send_zlp) {
  2355. hs_ep->send_zlp = 0;
  2356. if (!using_desc_dma(hsotg)) {
  2357. dwc2_hsotg_program_zlp(hsotg, hs_ep);
  2358. /* transfer will be completed on next complete interrupt */
  2359. return;
  2360. }
  2361. }
  2362. if (hs_ep->index == 0 && hsotg->ep0_state == DWC2_EP0_DATA_IN) {
  2363. /* Move to STATUS OUT */
  2364. dwc2_hsotg_ep0_zlp(hsotg, false);
  2365. return;
  2366. }
  2367. /* Set actual frame number for completed transfers */
  2368. if (!using_desc_dma(hsotg) && hs_ep->isochronous) {
  2369. hs_req->req.frame_number = hs_ep->target_frame;
  2370. dwc2_gadget_incr_frame_num(hs_ep);
  2371. }
  2372. dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, 0);
  2373. }
  2374. /**
  2375. * dwc2_gadget_read_ep_interrupts - reads interrupts for given ep
  2376. * @hsotg: The device state.
  2377. * @idx: Index of ep.
  2378. * @dir_in: Endpoint direction 1-in 0-out.
  2379. *
  2380. * Reads for endpoint with given index and direction, by masking
  2381. * epint_reg with coresponding mask.
  2382. */
  2383. static u32 dwc2_gadget_read_ep_interrupts(struct dwc2_hsotg *hsotg,
  2384. unsigned int idx, int dir_in)
  2385. {
  2386. u32 epmsk_reg = dir_in ? DIEPMSK : DOEPMSK;
  2387. u32 epint_reg = dir_in ? DIEPINT(idx) : DOEPINT(idx);
  2388. u32 ints;
  2389. u32 mask;
  2390. u32 diepempmsk;
  2391. mask = dwc2_readl(hsotg, epmsk_reg);
  2392. diepempmsk = dwc2_readl(hsotg, DIEPEMPMSK);
  2393. mask |= ((diepempmsk >> idx) & 0x1) ? DIEPMSK_TXFIFOEMPTY : 0;
  2394. mask |= DXEPINT_SETUP_RCVD;
  2395. ints = dwc2_readl(hsotg, epint_reg);
  2396. ints &= mask;
  2397. return ints;
  2398. }
  2399. /**
  2400. * dwc2_gadget_handle_ep_disabled - handle DXEPINT_EPDISBLD
  2401. * @hs_ep: The endpoint on which interrupt is asserted.
  2402. *
  2403. * This interrupt indicates that the endpoint has been disabled per the
  2404. * application's request.
  2405. *
  2406. * For IN endpoints flushes txfifo, in case of BULK clears DCTL_CGNPINNAK,
  2407. * in case of ISOC completes current request.
  2408. *
  2409. * For ISOC-OUT endpoints completes expired requests. If there is remaining
  2410. * request starts it.
  2411. */
  2412. static void dwc2_gadget_handle_ep_disabled(struct dwc2_hsotg_ep *hs_ep)
  2413. {
  2414. struct dwc2_hsotg *hsotg = hs_ep->parent;
  2415. struct dwc2_hsotg_req *hs_req;
  2416. unsigned char idx = hs_ep->index;
  2417. int dir_in = hs_ep->dir_in;
  2418. u32 epctl_reg = dir_in ? DIEPCTL(idx) : DOEPCTL(idx);
  2419. int dctl = dwc2_readl(hsotg, DCTL);
  2420. dev_dbg(hsotg->dev, "%s: EPDisbld\n", __func__);
  2421. if (dir_in) {
  2422. int epctl = dwc2_readl(hsotg, epctl_reg);
  2423. dwc2_hsotg_txfifo_flush(hsotg, hs_ep->fifo_index);
  2424. if ((epctl & DXEPCTL_STALL) && (epctl & DXEPCTL_EPTYPE_BULK)) {
  2425. int dctl = dwc2_readl(hsotg, DCTL);
  2426. dctl |= DCTL_CGNPINNAK;
  2427. dwc2_writel(hsotg, dctl, DCTL);
  2428. }
  2429. } else {
  2430. if (dctl & DCTL_GOUTNAKSTS) {
  2431. dctl |= DCTL_CGOUTNAK;
  2432. dwc2_writel(hsotg, dctl, DCTL);
  2433. }
  2434. }
  2435. if (!hs_ep->isochronous)
  2436. return;
  2437. if (list_empty(&hs_ep->queue)) {
  2438. dev_dbg(hsotg->dev, "%s: complete_ep 0x%p, ep->queue empty!\n",
  2439. __func__, hs_ep);
  2440. return;
  2441. }
  2442. do {
  2443. hs_req = get_ep_head(hs_ep);
  2444. if (hs_req) {
  2445. hs_req->req.frame_number = hs_ep->target_frame;
  2446. hs_req->req.actual = 0;
  2447. dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req,
  2448. -ENODATA);
  2449. }
  2450. dwc2_gadget_incr_frame_num(hs_ep);
  2451. /* Update current frame number value. */
  2452. hsotg->frame_number = dwc2_hsotg_read_frameno(hsotg);
  2453. } while (dwc2_gadget_target_frame_elapsed(hs_ep));
  2454. }
  2455. /**
  2456. * dwc2_gadget_handle_out_token_ep_disabled - handle DXEPINT_OUTTKNEPDIS
  2457. * @ep: The endpoint on which interrupt is asserted.
  2458. *
  2459. * This is starting point for ISOC-OUT transfer, synchronization done with
  2460. * first out token received from host while corresponding EP is disabled.
  2461. *
  2462. * Device does not know initial frame in which out token will come. For this
  2463. * HW generates OUTTKNEPDIS - out token is received while EP is disabled. Upon
  2464. * getting this interrupt SW starts calculation for next transfer frame.
  2465. */
  2466. static void dwc2_gadget_handle_out_token_ep_disabled(struct dwc2_hsotg_ep *ep)
  2467. {
  2468. struct dwc2_hsotg *hsotg = ep->parent;
  2469. struct dwc2_hsotg_req *hs_req;
  2470. int dir_in = ep->dir_in;
  2471. if (dir_in || !ep->isochronous)
  2472. return;
  2473. if (using_desc_dma(hsotg)) {
  2474. if (ep->target_frame == TARGET_FRAME_INITIAL) {
  2475. /* Start first ISO Out */
  2476. ep->target_frame = hsotg->frame_number;
  2477. dwc2_gadget_start_isoc_ddma(ep);
  2478. }
  2479. return;
  2480. }
  2481. if (ep->target_frame == TARGET_FRAME_INITIAL) {
  2482. u32 ctrl;
  2483. ep->target_frame = hsotg->frame_number;
  2484. if (ep->interval > 1) {
  2485. ctrl = dwc2_readl(hsotg, DOEPCTL(ep->index));
  2486. if (ep->target_frame & 0x1)
  2487. ctrl |= DXEPCTL_SETODDFR;
  2488. else
  2489. ctrl |= DXEPCTL_SETEVENFR;
  2490. dwc2_writel(hsotg, ctrl, DOEPCTL(ep->index));
  2491. }
  2492. }
  2493. while (dwc2_gadget_target_frame_elapsed(ep)) {
  2494. hs_req = get_ep_head(ep);
  2495. if (hs_req) {
  2496. hs_req->req.frame_number = ep->target_frame;
  2497. hs_req->req.actual = 0;
  2498. dwc2_hsotg_complete_request(hsotg, ep, hs_req, -ENODATA);
  2499. }
  2500. dwc2_gadget_incr_frame_num(ep);
  2501. /* Update current frame number value. */
  2502. hsotg->frame_number = dwc2_hsotg_read_frameno(hsotg);
  2503. }
  2504. if (!ep->req)
  2505. dwc2_gadget_start_next_request(ep);
  2506. }
  2507. static void dwc2_hsotg_ep_stop_xfr(struct dwc2_hsotg *hsotg,
  2508. struct dwc2_hsotg_ep *hs_ep);
  2509. /**
  2510. * dwc2_gadget_handle_nak - handle NAK interrupt
  2511. * @hs_ep: The endpoint on which interrupt is asserted.
  2512. *
  2513. * This is starting point for ISOC-IN transfer, synchronization done with
  2514. * first IN token received from host while corresponding EP is disabled.
  2515. *
  2516. * Device does not know when first one token will arrive from host. On first
  2517. * token arrival HW generates 2 interrupts: 'in token received while FIFO empty'
  2518. * and 'NAK'. NAK interrupt for ISOC-IN means that token has arrived and ZLP was
  2519. * sent in response to that as there was no data in FIFO. SW is basing on this
  2520. * interrupt to obtain frame in which token has come and then based on the
  2521. * interval calculates next frame for transfer.
  2522. */
  2523. static void dwc2_gadget_handle_nak(struct dwc2_hsotg_ep *hs_ep)
  2524. {
  2525. struct dwc2_hsotg *hsotg = hs_ep->parent;
  2526. struct dwc2_hsotg_req *hs_req;
  2527. int dir_in = hs_ep->dir_in;
  2528. u32 ctrl;
  2529. if (!dir_in || !hs_ep->isochronous)
  2530. return;
  2531. if (hs_ep->target_frame == TARGET_FRAME_INITIAL) {
  2532. if (using_desc_dma(hsotg)) {
  2533. hs_ep->target_frame = hsotg->frame_number;
  2534. dwc2_gadget_incr_frame_num(hs_ep);
  2535. /* In service interval mode target_frame must
  2536. * be set to last (u)frame of the service interval.
  2537. */
  2538. if (hsotg->params.service_interval) {
  2539. /* Set target_frame to the first (u)frame of
  2540. * the service interval
  2541. */
  2542. hs_ep->target_frame &= ~hs_ep->interval + 1;
  2543. /* Set target_frame to the last (u)frame of
  2544. * the service interval
  2545. */
  2546. dwc2_gadget_incr_frame_num(hs_ep);
  2547. dwc2_gadget_dec_frame_num_by_one(hs_ep);
  2548. }
  2549. dwc2_gadget_start_isoc_ddma(hs_ep);
  2550. return;
  2551. }
  2552. hs_ep->target_frame = hsotg->frame_number;
  2553. if (hs_ep->interval > 1) {
  2554. u32 ctrl = dwc2_readl(hsotg,
  2555. DIEPCTL(hs_ep->index));
  2556. if (hs_ep->target_frame & 0x1)
  2557. ctrl |= DXEPCTL_SETODDFR;
  2558. else
  2559. ctrl |= DXEPCTL_SETEVENFR;
  2560. dwc2_writel(hsotg, ctrl, DIEPCTL(hs_ep->index));
  2561. }
  2562. }
  2563. if (using_desc_dma(hsotg))
  2564. return;
  2565. ctrl = dwc2_readl(hsotg, DIEPCTL(hs_ep->index));
  2566. if (ctrl & DXEPCTL_EPENA)
  2567. dwc2_hsotg_ep_stop_xfr(hsotg, hs_ep);
  2568. else
  2569. dwc2_hsotg_txfifo_flush(hsotg, hs_ep->fifo_index);
  2570. while (dwc2_gadget_target_frame_elapsed(hs_ep)) {
  2571. hs_req = get_ep_head(hs_ep);
  2572. if (hs_req) {
  2573. hs_req->req.frame_number = hs_ep->target_frame;
  2574. hs_req->req.actual = 0;
  2575. dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, -ENODATA);
  2576. }
  2577. dwc2_gadget_incr_frame_num(hs_ep);
  2578. /* Update current frame number value. */
  2579. hsotg->frame_number = dwc2_hsotg_read_frameno(hsotg);
  2580. }
  2581. if (!hs_ep->req)
  2582. dwc2_gadget_start_next_request(hs_ep);
  2583. }
  2584. /**
  2585. * dwc2_hsotg_epint - handle an in/out endpoint interrupt
  2586. * @hsotg: The driver state
  2587. * @idx: The index for the endpoint (0..15)
  2588. * @dir_in: Set if this is an IN endpoint
  2589. *
  2590. * Process and clear any interrupt pending for an individual endpoint
  2591. */
  2592. static void dwc2_hsotg_epint(struct dwc2_hsotg *hsotg, unsigned int idx,
  2593. int dir_in)
  2594. {
  2595. struct dwc2_hsotg_ep *hs_ep = index_to_ep(hsotg, idx, dir_in);
  2596. u32 epint_reg = dir_in ? DIEPINT(idx) : DOEPINT(idx);
  2597. u32 epctl_reg = dir_in ? DIEPCTL(idx) : DOEPCTL(idx);
  2598. u32 epsiz_reg = dir_in ? DIEPTSIZ(idx) : DOEPTSIZ(idx);
  2599. u32 ints;
  2600. ints = dwc2_gadget_read_ep_interrupts(hsotg, idx, dir_in);
  2601. /* Clear endpoint interrupts */
  2602. dwc2_writel(hsotg, ints, epint_reg);
  2603. if (!hs_ep) {
  2604. dev_err(hsotg->dev, "%s:Interrupt for unconfigured ep%d(%s)\n",
  2605. __func__, idx, dir_in ? "in" : "out");
  2606. return;
  2607. }
  2608. dev_dbg(hsotg->dev, "%s: ep%d(%s) DxEPINT=0x%08x\n",
  2609. __func__, idx, dir_in ? "in" : "out", ints);
  2610. /* Don't process XferCompl interrupt if it is a setup packet */
  2611. if (idx == 0 && (ints & (DXEPINT_SETUP | DXEPINT_SETUP_RCVD)))
  2612. ints &= ~DXEPINT_XFERCOMPL;
  2613. /*
  2614. * Don't process XferCompl interrupt in DDMA if EP0 is still in SETUP
  2615. * stage and xfercomplete was generated without SETUP phase done
  2616. * interrupt. SW should parse received setup packet only after host's
  2617. * exit from setup phase of control transfer.
  2618. */
  2619. if (using_desc_dma(hsotg) && idx == 0 && !hs_ep->dir_in &&
  2620. hsotg->ep0_state == DWC2_EP0_SETUP && !(ints & DXEPINT_SETUP))
  2621. ints &= ~DXEPINT_XFERCOMPL;
  2622. if (ints & DXEPINT_XFERCOMPL) {
  2623. dev_dbg(hsotg->dev,
  2624. "%s: XferCompl: DxEPCTL=0x%08x, DXEPTSIZ=%08x\n",
  2625. __func__, dwc2_readl(hsotg, epctl_reg),
  2626. dwc2_readl(hsotg, epsiz_reg));
  2627. /* In DDMA handle isochronous requests separately */
  2628. if (using_desc_dma(hsotg) && hs_ep->isochronous) {
  2629. dwc2_gadget_complete_isoc_request_ddma(hs_ep);
  2630. } else if (dir_in) {
  2631. /*
  2632. * We get OutDone from the FIFO, so we only
  2633. * need to look at completing IN requests here
  2634. * if operating slave mode
  2635. */
  2636. if (!hs_ep->isochronous || !(ints & DXEPINT_NAKINTRPT))
  2637. dwc2_hsotg_complete_in(hsotg, hs_ep);
  2638. if (idx == 0 && !hs_ep->req)
  2639. dwc2_hsotg_enqueue_setup(hsotg);
  2640. } else if (using_dma(hsotg)) {
  2641. /*
  2642. * We're using DMA, we need to fire an OutDone here
  2643. * as we ignore the RXFIFO.
  2644. */
  2645. if (!hs_ep->isochronous || !(ints & DXEPINT_OUTTKNEPDIS))
  2646. dwc2_hsotg_handle_outdone(hsotg, idx);
  2647. }
  2648. }
  2649. if (ints & DXEPINT_EPDISBLD)
  2650. dwc2_gadget_handle_ep_disabled(hs_ep);
  2651. if (ints & DXEPINT_OUTTKNEPDIS)
  2652. dwc2_gadget_handle_out_token_ep_disabled(hs_ep);
  2653. if (ints & DXEPINT_NAKINTRPT)
  2654. dwc2_gadget_handle_nak(hs_ep);
  2655. if (ints & DXEPINT_AHBERR)
  2656. dev_dbg(hsotg->dev, "%s: AHBErr\n", __func__);
  2657. if (ints & DXEPINT_SETUP) { /* Setup or Timeout */
  2658. dev_dbg(hsotg->dev, "%s: Setup/Timeout\n", __func__);
  2659. if (using_dma(hsotg) && idx == 0) {
  2660. /*
  2661. * this is the notification we've received a
  2662. * setup packet. In non-DMA mode we'd get this
  2663. * from the RXFIFO, instead we need to process
  2664. * the setup here.
  2665. */
  2666. if (dir_in)
  2667. WARN_ON_ONCE(1);
  2668. else
  2669. dwc2_hsotg_handle_outdone(hsotg, 0);
  2670. }
  2671. }
  2672. if (ints & DXEPINT_STSPHSERCVD) {
  2673. dev_dbg(hsotg->dev, "%s: StsPhseRcvd\n", __func__);
  2674. /* Safety check EP0 state when STSPHSERCVD asserted */
  2675. if (hsotg->ep0_state == DWC2_EP0_DATA_OUT) {
  2676. /* Move to STATUS IN for DDMA */
  2677. if (using_desc_dma(hsotg)) {
  2678. if (!hsotg->delayed_status)
  2679. dwc2_hsotg_ep0_zlp(hsotg, true);
  2680. else
  2681. /* In case of 3 stage Control Write with delayed
  2682. * status, when Status IN transfer started
  2683. * before STSPHSERCVD asserted, NAKSTS bit not
  2684. * cleared by CNAK in dwc2_hsotg_start_req()
  2685. * function. Clear now NAKSTS to allow complete
  2686. * transfer.
  2687. */
  2688. dwc2_set_bit(hsotg, DIEPCTL(0),
  2689. DXEPCTL_CNAK);
  2690. }
  2691. }
  2692. }
  2693. if (ints & DXEPINT_BACK2BACKSETUP)
  2694. dev_dbg(hsotg->dev, "%s: B2BSetup/INEPNakEff\n", __func__);
  2695. if (ints & DXEPINT_BNAINTR) {
  2696. dev_dbg(hsotg->dev, "%s: BNA interrupt\n", __func__);
  2697. if (hs_ep->isochronous)
  2698. dwc2_gadget_handle_isoc_bna(hs_ep);
  2699. }
  2700. if (dir_in && !hs_ep->isochronous) {
  2701. /* not sure if this is important, but we'll clear it anyway */
  2702. if (ints & DXEPINT_INTKNTXFEMP) {
  2703. dev_dbg(hsotg->dev, "%s: ep%d: INTknTXFEmpMsk\n",
  2704. __func__, idx);
  2705. }
  2706. /* this probably means something bad is happening */
  2707. if (ints & DXEPINT_INTKNEPMIS) {
  2708. dev_warn(hsotg->dev, "%s: ep%d: INTknEP\n",
  2709. __func__, idx);
  2710. }
  2711. /* FIFO has space or is empty (see GAHBCFG) */
  2712. if (hsotg->dedicated_fifos &&
  2713. ints & DXEPINT_TXFEMP) {
  2714. dev_dbg(hsotg->dev, "%s: ep%d: TxFIFOEmpty\n",
  2715. __func__, idx);
  2716. if (!using_dma(hsotg))
  2717. dwc2_hsotg_trytx(hsotg, hs_ep);
  2718. }
  2719. }
  2720. }
  2721. /**
  2722. * dwc2_hsotg_irq_enumdone - Handle EnumDone interrupt (enumeration done)
  2723. * @hsotg: The device state.
  2724. *
  2725. * Handle updating the device settings after the enumeration phase has
  2726. * been completed.
  2727. */
  2728. static void dwc2_hsotg_irq_enumdone(struct dwc2_hsotg *hsotg)
  2729. {
  2730. u32 dsts = dwc2_readl(hsotg, DSTS);
  2731. int ep0_mps = 0, ep_mps = 8;
  2732. /*
  2733. * This should signal the finish of the enumeration phase
  2734. * of the USB handshaking, so we should now know what rate
  2735. * we connected at.
  2736. */
  2737. dev_dbg(hsotg->dev, "EnumDone (DSTS=0x%08x)\n", dsts);
  2738. /*
  2739. * note, since we're limited by the size of transfer on EP0, and
  2740. * it seems IN transfers must be a even number of packets we do
  2741. * not advertise a 64byte MPS on EP0.
  2742. */
  2743. /* catch both EnumSpd_FS and EnumSpd_FS48 */
  2744. switch ((dsts & DSTS_ENUMSPD_MASK) >> DSTS_ENUMSPD_SHIFT) {
  2745. case DSTS_ENUMSPD_FS:
  2746. case DSTS_ENUMSPD_FS48:
  2747. hsotg->gadget.speed = USB_SPEED_FULL;
  2748. ep0_mps = EP0_MPS_LIMIT;
  2749. ep_mps = 1023;
  2750. break;
  2751. case DSTS_ENUMSPD_HS:
  2752. hsotg->gadget.speed = USB_SPEED_HIGH;
  2753. ep0_mps = EP0_MPS_LIMIT;
  2754. ep_mps = 1024;
  2755. break;
  2756. case DSTS_ENUMSPD_LS:
  2757. hsotg->gadget.speed = USB_SPEED_LOW;
  2758. ep0_mps = 8;
  2759. ep_mps = 8;
  2760. /*
  2761. * note, we don't actually support LS in this driver at the
  2762. * moment, and the documentation seems to imply that it isn't
  2763. * supported by the PHYs on some of the devices.
  2764. */
  2765. break;
  2766. }
  2767. dev_info(hsotg->dev, "new device is %s\n",
  2768. usb_speed_string(hsotg->gadget.speed));
  2769. /*
  2770. * we should now know the maximum packet size for an
  2771. * endpoint, so set the endpoints to a default value.
  2772. */
  2773. if (ep0_mps) {
  2774. int i;
  2775. /* Initialize ep0 for both in and out directions */
  2776. dwc2_hsotg_set_ep_maxpacket(hsotg, 0, ep0_mps, 0, 1);
  2777. dwc2_hsotg_set_ep_maxpacket(hsotg, 0, ep0_mps, 0, 0);
  2778. for (i = 1; i < hsotg->num_of_eps; i++) {
  2779. if (hsotg->eps_in[i])
  2780. dwc2_hsotg_set_ep_maxpacket(hsotg, i, ep_mps,
  2781. 0, 1);
  2782. if (hsotg->eps_out[i])
  2783. dwc2_hsotg_set_ep_maxpacket(hsotg, i, ep_mps,
  2784. 0, 0);
  2785. }
  2786. }
  2787. /* ensure after enumeration our EP0 is active */
  2788. dwc2_hsotg_enqueue_setup(hsotg);
  2789. dev_dbg(hsotg->dev, "EP0: DIEPCTL0=0x%08x, DOEPCTL0=0x%08x\n",
  2790. dwc2_readl(hsotg, DIEPCTL0),
  2791. dwc2_readl(hsotg, DOEPCTL0));
  2792. }
  2793. /**
  2794. * kill_all_requests - remove all requests from the endpoint's queue
  2795. * @hsotg: The device state.
  2796. * @ep: The endpoint the requests may be on.
  2797. * @result: The result code to use.
  2798. *
  2799. * Go through the requests on the given endpoint and mark them
  2800. * completed with the given result code.
  2801. */
  2802. static void kill_all_requests(struct dwc2_hsotg *hsotg,
  2803. struct dwc2_hsotg_ep *ep,
  2804. int result)
  2805. {
  2806. unsigned int size;
  2807. ep->req = NULL;
  2808. while (!list_empty(&ep->queue)) {
  2809. struct dwc2_hsotg_req *req = get_ep_head(ep);
  2810. dwc2_hsotg_complete_request(hsotg, ep, req, result);
  2811. }
  2812. if (!hsotg->dedicated_fifos)
  2813. return;
  2814. size = (dwc2_readl(hsotg, DTXFSTS(ep->fifo_index)) & 0xffff) * 4;
  2815. if (size < ep->fifo_size)
  2816. dwc2_hsotg_txfifo_flush(hsotg, ep->fifo_index);
  2817. }
  2818. /**
  2819. * dwc2_hsotg_disconnect - disconnect service
  2820. * @hsotg: The device state.
  2821. *
  2822. * The device has been disconnected. Remove all current
  2823. * transactions and signal the gadget driver that this
  2824. * has happened.
  2825. */
  2826. void dwc2_hsotg_disconnect(struct dwc2_hsotg *hsotg)
  2827. {
  2828. unsigned int ep;
  2829. if (!hsotg->connected)
  2830. return;
  2831. hsotg->connected = 0;
  2832. hsotg->test_mode = 0;
  2833. /* all endpoints should be shutdown */
  2834. for (ep = 0; ep < hsotg->num_of_eps; ep++) {
  2835. if (hsotg->eps_in[ep])
  2836. kill_all_requests(hsotg, hsotg->eps_in[ep],
  2837. -ESHUTDOWN);
  2838. if (hsotg->eps_out[ep])
  2839. kill_all_requests(hsotg, hsotg->eps_out[ep],
  2840. -ESHUTDOWN);
  2841. }
  2842. call_gadget(hsotg, disconnect);
  2843. hsotg->lx_state = DWC2_L3;
  2844. usb_gadget_set_state(&hsotg->gadget, USB_STATE_NOTATTACHED);
  2845. }
  2846. /**
  2847. * dwc2_hsotg_irq_fifoempty - TX FIFO empty interrupt handler
  2848. * @hsotg: The device state:
  2849. * @periodic: True if this is a periodic FIFO interrupt
  2850. */
  2851. static void dwc2_hsotg_irq_fifoempty(struct dwc2_hsotg *hsotg, bool periodic)
  2852. {
  2853. struct dwc2_hsotg_ep *ep;
  2854. int epno, ret;
  2855. /* look through for any more data to transmit */
  2856. for (epno = 0; epno < hsotg->num_of_eps; epno++) {
  2857. ep = index_to_ep(hsotg, epno, 1);
  2858. if (!ep)
  2859. continue;
  2860. if (!ep->dir_in)
  2861. continue;
  2862. if ((periodic && !ep->periodic) ||
  2863. (!periodic && ep->periodic))
  2864. continue;
  2865. ret = dwc2_hsotg_trytx(hsotg, ep);
  2866. if (ret < 0)
  2867. break;
  2868. }
  2869. }
  2870. /* IRQ flags which will trigger a retry around the IRQ loop */
  2871. #define IRQ_RETRY_MASK (GINTSTS_NPTXFEMP | \
  2872. GINTSTS_PTXFEMP | \
  2873. GINTSTS_RXFLVL)
  2874. static int dwc2_hsotg_ep_disable(struct usb_ep *ep);
  2875. /**
  2876. * dwc2_hsotg_core_init_disconnected - issue softreset to the core
  2877. * @hsotg: The device state
  2878. * @is_usb_reset: Usb resetting flag
  2879. *
  2880. * Issue a soft reset to the core, and await the core finishing it.
  2881. */
  2882. void dwc2_hsotg_core_init_disconnected(struct dwc2_hsotg *hsotg,
  2883. bool is_usb_reset)
  2884. {
  2885. u32 intmsk;
  2886. u32 val;
  2887. u32 usbcfg;
  2888. u32 dcfg = 0;
  2889. int ep;
  2890. /* Kill any ep0 requests as controller will be reinitialized */
  2891. kill_all_requests(hsotg, hsotg->eps_out[0], -ECONNRESET);
  2892. if (!is_usb_reset) {
  2893. if (dwc2_core_reset(hsotg, true))
  2894. return;
  2895. } else {
  2896. /* all endpoints should be shutdown */
  2897. for (ep = 1; ep < hsotg->num_of_eps; ep++) {
  2898. if (hsotg->eps_in[ep])
  2899. dwc2_hsotg_ep_disable(&hsotg->eps_in[ep]->ep);
  2900. if (hsotg->eps_out[ep])
  2901. dwc2_hsotg_ep_disable(&hsotg->eps_out[ep]->ep);
  2902. }
  2903. }
  2904. /*
  2905. * we must now enable ep0 ready for host detection and then
  2906. * set configuration.
  2907. */
  2908. /* keep other bits untouched (so e.g. forced modes are not lost) */
  2909. usbcfg = dwc2_readl(hsotg, GUSBCFG);
  2910. usbcfg &= ~GUSBCFG_TOUTCAL_MASK;
  2911. usbcfg |= GUSBCFG_TOUTCAL(7);
  2912. /* remove the HNP/SRP and set the PHY */
  2913. usbcfg &= ~(GUSBCFG_SRPCAP | GUSBCFG_HNPCAP);
  2914. dwc2_writel(hsotg, usbcfg, GUSBCFG);
  2915. dwc2_phy_init(hsotg, true);
  2916. dwc2_hsotg_init_fifo(hsotg);
  2917. if (!is_usb_reset) {
  2918. dwc2_set_bit(hsotg, DCTL, DCTL_SFTDISCON);
  2919. if (hsotg->params.eusb2_disc)
  2920. dwc2_set_bit(hsotg, GOTGCTL, GOTGCTL_EUSB2_DISC_SUPP);
  2921. }
  2922. dcfg |= DCFG_EPMISCNT(1);
  2923. switch (hsotg->params.speed) {
  2924. case DWC2_SPEED_PARAM_LOW:
  2925. dcfg |= DCFG_DEVSPD_LS;
  2926. break;
  2927. case DWC2_SPEED_PARAM_FULL:
  2928. if (hsotg->params.phy_type == DWC2_PHY_TYPE_PARAM_FS)
  2929. dcfg |= DCFG_DEVSPD_FS48;
  2930. else
  2931. dcfg |= DCFG_DEVSPD_FS;
  2932. break;
  2933. default:
  2934. dcfg |= DCFG_DEVSPD_HS;
  2935. }
  2936. if (hsotg->params.ipg_isoc_en)
  2937. dcfg |= DCFG_IPG_ISOC_SUPPORDED;
  2938. dwc2_writel(hsotg, dcfg, DCFG);
  2939. /* Clear any pending OTG interrupts */
  2940. dwc2_writel(hsotg, 0xffffffff, GOTGINT);
  2941. /* Clear any pending interrupts */
  2942. dwc2_writel(hsotg, 0xffffffff, GINTSTS);
  2943. intmsk = GINTSTS_ERLYSUSP | GINTSTS_SESSREQINT |
  2944. GINTSTS_GOUTNAKEFF | GINTSTS_GINNAKEFF |
  2945. GINTSTS_USBRST | GINTSTS_RESETDET |
  2946. GINTSTS_ENUMDONE | GINTSTS_OTGINT |
  2947. GINTSTS_USBSUSP | GINTSTS_WKUPINT |
  2948. GINTSTS_LPMTRANRCVD;
  2949. if (!using_desc_dma(hsotg))
  2950. intmsk |= GINTSTS_INCOMPL_SOIN | GINTSTS_INCOMPL_SOOUT;
  2951. if (!hsotg->params.external_id_pin_ctl)
  2952. intmsk |= GINTSTS_CONIDSTSCHNG;
  2953. dwc2_writel(hsotg, intmsk, GINTMSK);
  2954. if (using_dma(hsotg)) {
  2955. dwc2_writel(hsotg, GAHBCFG_GLBL_INTR_EN | GAHBCFG_DMA_EN |
  2956. hsotg->params.ahbcfg,
  2957. GAHBCFG);
  2958. /* Set DDMA mode support in the core if needed */
  2959. if (using_desc_dma(hsotg))
  2960. dwc2_set_bit(hsotg, DCFG, DCFG_DESCDMA_EN);
  2961. } else {
  2962. dwc2_writel(hsotg, ((hsotg->dedicated_fifos) ?
  2963. (GAHBCFG_NP_TXF_EMP_LVL |
  2964. GAHBCFG_P_TXF_EMP_LVL) : 0) |
  2965. GAHBCFG_GLBL_INTR_EN, GAHBCFG);
  2966. }
  2967. /*
  2968. * If INTknTXFEmpMsk is enabled, it's important to disable ep interrupts
  2969. * when we have no data to transfer. Otherwise we get being flooded by
  2970. * interrupts.
  2971. */
  2972. dwc2_writel(hsotg, ((hsotg->dedicated_fifos && !using_dma(hsotg)) ?
  2973. DIEPMSK_TXFIFOEMPTY | DIEPMSK_INTKNTXFEMPMSK : 0) |
  2974. DIEPMSK_EPDISBLDMSK | DIEPMSK_XFERCOMPLMSK |
  2975. DIEPMSK_TIMEOUTMSK | DIEPMSK_AHBERRMSK,
  2976. DIEPMSK);
  2977. /*
  2978. * don't need XferCompl, we get that from RXFIFO in slave mode. In
  2979. * DMA mode we may need this and StsPhseRcvd.
  2980. */
  2981. dwc2_writel(hsotg, (using_dma(hsotg) ? (DIEPMSK_XFERCOMPLMSK |
  2982. DOEPMSK_STSPHSERCVDMSK) : 0) |
  2983. DOEPMSK_EPDISBLDMSK | DOEPMSK_AHBERRMSK |
  2984. DOEPMSK_SETUPMSK,
  2985. DOEPMSK);
  2986. /* Enable BNA interrupt for DDMA */
  2987. if (using_desc_dma(hsotg)) {
  2988. dwc2_set_bit(hsotg, DOEPMSK, DOEPMSK_BNAMSK);
  2989. dwc2_set_bit(hsotg, DIEPMSK, DIEPMSK_BNAININTRMSK);
  2990. }
  2991. /* Enable Service Interval mode if supported */
  2992. if (using_desc_dma(hsotg) && hsotg->params.service_interval)
  2993. dwc2_set_bit(hsotg, DCTL, DCTL_SERVICE_INTERVAL_SUPPORTED);
  2994. dwc2_writel(hsotg, 0, DAINTMSK);
  2995. dev_dbg(hsotg->dev, "EP0: DIEPCTL0=0x%08x, DOEPCTL0=0x%08x\n",
  2996. dwc2_readl(hsotg, DIEPCTL0),
  2997. dwc2_readl(hsotg, DOEPCTL0));
  2998. /* enable in and out endpoint interrupts */
  2999. dwc2_hsotg_en_gsint(hsotg, GINTSTS_OEPINT | GINTSTS_IEPINT);
  3000. /*
  3001. * Enable the RXFIFO when in slave mode, as this is how we collect
  3002. * the data. In DMA mode, we get events from the FIFO but also
  3003. * things we cannot process, so do not use it.
  3004. */
  3005. if (!using_dma(hsotg))
  3006. dwc2_hsotg_en_gsint(hsotg, GINTSTS_RXFLVL);
  3007. /* Enable interrupts for EP0 in and out */
  3008. dwc2_hsotg_ctrl_epint(hsotg, 0, 0, 1);
  3009. dwc2_hsotg_ctrl_epint(hsotg, 0, 1, 1);
  3010. if (!is_usb_reset) {
  3011. dwc2_set_bit(hsotg, DCTL, DCTL_PWRONPRGDONE);
  3012. udelay(10); /* see openiboot */
  3013. dwc2_clear_bit(hsotg, DCTL, DCTL_PWRONPRGDONE);
  3014. }
  3015. dev_dbg(hsotg->dev, "DCTL=0x%08x\n", dwc2_readl(hsotg, DCTL));
  3016. /*
  3017. * DxEPCTL_USBActEp says RO in manual, but seems to be set by
  3018. * writing to the EPCTL register..
  3019. */
  3020. /* set to read 1 8byte packet */
  3021. dwc2_writel(hsotg, DXEPTSIZ_MC(1) | DXEPTSIZ_PKTCNT(1) |
  3022. DXEPTSIZ_XFERSIZE(8), DOEPTSIZ0);
  3023. dwc2_writel(hsotg, dwc2_hsotg_ep0_mps(hsotg->eps_out[0]->ep.maxpacket) |
  3024. DXEPCTL_CNAK | DXEPCTL_EPENA |
  3025. DXEPCTL_USBACTEP,
  3026. DOEPCTL0);
  3027. /* enable, but don't activate EP0in */
  3028. dwc2_writel(hsotg, dwc2_hsotg_ep0_mps(hsotg->eps_out[0]->ep.maxpacket) |
  3029. DXEPCTL_USBACTEP, DIEPCTL0);
  3030. /* clear global NAKs */
  3031. val = DCTL_CGOUTNAK | DCTL_CGNPINNAK;
  3032. if (!is_usb_reset)
  3033. val |= DCTL_SFTDISCON;
  3034. dwc2_set_bit(hsotg, DCTL, val);
  3035. /* configure the core to support LPM */
  3036. dwc2_gadget_init_lpm(hsotg);
  3037. /* program GREFCLK register if needed */
  3038. if (using_desc_dma(hsotg) && hsotg->params.service_interval)
  3039. dwc2_gadget_program_ref_clk(hsotg);
  3040. /* must be at-least 3ms to allow bus to see disconnect */
  3041. mdelay(3);
  3042. hsotg->lx_state = DWC2_L0;
  3043. dwc2_hsotg_enqueue_setup(hsotg);
  3044. dev_dbg(hsotg->dev, "EP0: DIEPCTL0=0x%08x, DOEPCTL0=0x%08x\n",
  3045. dwc2_readl(hsotg, DIEPCTL0),
  3046. dwc2_readl(hsotg, DOEPCTL0));
  3047. }
  3048. void dwc2_hsotg_core_disconnect(struct dwc2_hsotg *hsotg)
  3049. {
  3050. /* set the soft-disconnect bit */
  3051. dwc2_set_bit(hsotg, DCTL, DCTL_SFTDISCON);
  3052. }
  3053. void dwc2_hsotg_core_connect(struct dwc2_hsotg *hsotg)
  3054. {
  3055. /* remove the soft-disconnect and let's go */
  3056. if (!hsotg->role_sw || (dwc2_readl(hsotg, GOTGCTL) & GOTGCTL_BSESVLD))
  3057. dwc2_clear_bit(hsotg, DCTL, DCTL_SFTDISCON);
  3058. }
  3059. /**
  3060. * dwc2_gadget_handle_incomplete_isoc_in - handle incomplete ISO IN Interrupt.
  3061. * @hsotg: The device state:
  3062. *
  3063. * This interrupt indicates one of the following conditions occurred while
  3064. * transmitting an ISOC transaction.
  3065. * - Corrupted IN Token for ISOC EP.
  3066. * - Packet not complete in FIFO.
  3067. *
  3068. * The following actions will be taken:
  3069. * - Determine the EP
  3070. * - Disable EP; when 'Endpoint Disabled' interrupt is received Flush FIFO
  3071. */
  3072. static void dwc2_gadget_handle_incomplete_isoc_in(struct dwc2_hsotg *hsotg)
  3073. {
  3074. struct dwc2_hsotg_ep *hs_ep;
  3075. u32 epctrl;
  3076. u32 daintmsk;
  3077. u32 idx;
  3078. dev_dbg(hsotg->dev, "Incomplete isoc in interrupt received:\n");
  3079. daintmsk = dwc2_readl(hsotg, DAINTMSK);
  3080. for (idx = 1; idx < hsotg->num_of_eps; idx++) {
  3081. hs_ep = hsotg->eps_in[idx];
  3082. /* Proceed only unmasked ISOC EPs */
  3083. if ((BIT(idx) & ~daintmsk) || !hs_ep->isochronous)
  3084. continue;
  3085. epctrl = dwc2_readl(hsotg, DIEPCTL(idx));
  3086. if ((epctrl & DXEPCTL_EPENA) &&
  3087. dwc2_gadget_target_frame_elapsed(hs_ep)) {
  3088. epctrl |= DXEPCTL_SNAK;
  3089. epctrl |= DXEPCTL_EPDIS;
  3090. dwc2_writel(hsotg, epctrl, DIEPCTL(idx));
  3091. }
  3092. }
  3093. /* Clear interrupt */
  3094. dwc2_writel(hsotg, GINTSTS_INCOMPL_SOIN, GINTSTS);
  3095. }
  3096. /**
  3097. * dwc2_gadget_handle_incomplete_isoc_out - handle incomplete ISO OUT Interrupt
  3098. * @hsotg: The device state:
  3099. *
  3100. * This interrupt indicates one of the following conditions occurred while
  3101. * transmitting an ISOC transaction.
  3102. * - Corrupted OUT Token for ISOC EP.
  3103. * - Packet not complete in FIFO.
  3104. *
  3105. * The following actions will be taken:
  3106. * - Determine the EP
  3107. * - Set DCTL_SGOUTNAK and unmask GOUTNAKEFF if target frame elapsed.
  3108. */
  3109. static void dwc2_gadget_handle_incomplete_isoc_out(struct dwc2_hsotg *hsotg)
  3110. {
  3111. u32 gintsts;
  3112. u32 gintmsk;
  3113. u32 daintmsk;
  3114. u32 epctrl;
  3115. struct dwc2_hsotg_ep *hs_ep;
  3116. int idx;
  3117. dev_dbg(hsotg->dev, "%s: GINTSTS_INCOMPL_SOOUT\n", __func__);
  3118. daintmsk = dwc2_readl(hsotg, DAINTMSK);
  3119. daintmsk >>= DAINT_OUTEP_SHIFT;
  3120. for (idx = 1; idx < hsotg->num_of_eps; idx++) {
  3121. hs_ep = hsotg->eps_out[idx];
  3122. /* Proceed only unmasked ISOC EPs */
  3123. if ((BIT(idx) & ~daintmsk) || !hs_ep->isochronous)
  3124. continue;
  3125. epctrl = dwc2_readl(hsotg, DOEPCTL(idx));
  3126. if ((epctrl & DXEPCTL_EPENA) &&
  3127. dwc2_gadget_target_frame_elapsed(hs_ep)) {
  3128. /* Unmask GOUTNAKEFF interrupt */
  3129. gintmsk = dwc2_readl(hsotg, GINTMSK);
  3130. gintmsk |= GINTSTS_GOUTNAKEFF;
  3131. dwc2_writel(hsotg, gintmsk, GINTMSK);
  3132. gintsts = dwc2_readl(hsotg, GINTSTS);
  3133. if (!(gintsts & GINTSTS_GOUTNAKEFF)) {
  3134. dwc2_set_bit(hsotg, DCTL, DCTL_SGOUTNAK);
  3135. break;
  3136. }
  3137. }
  3138. }
  3139. /* Clear interrupt */
  3140. dwc2_writel(hsotg, GINTSTS_INCOMPL_SOOUT, GINTSTS);
  3141. }
  3142. /**
  3143. * dwc2_hsotg_irq - handle device interrupt
  3144. * @irq: The IRQ number triggered
  3145. * @pw: The pw value when registered the handler.
  3146. */
  3147. static irqreturn_t dwc2_hsotg_irq(int irq, void *pw)
  3148. {
  3149. struct dwc2_hsotg *hsotg = pw;
  3150. int retry_count = 8;
  3151. u32 gintsts;
  3152. u32 gintmsk;
  3153. if (!dwc2_is_device_mode(hsotg))
  3154. return IRQ_NONE;
  3155. spin_lock(&hsotg->lock);
  3156. irq_retry:
  3157. gintsts = dwc2_readl(hsotg, GINTSTS);
  3158. gintmsk = dwc2_readl(hsotg, GINTMSK);
  3159. dev_dbg(hsotg->dev, "%s: %08x %08x (%08x) retry %d\n",
  3160. __func__, gintsts, gintsts & gintmsk, gintmsk, retry_count);
  3161. gintsts &= gintmsk;
  3162. if (gintsts & GINTSTS_RESETDET) {
  3163. dev_dbg(hsotg->dev, "%s: USBRstDet\n", __func__);
  3164. dwc2_writel(hsotg, GINTSTS_RESETDET, GINTSTS);
  3165. /* This event must be used only if controller is suspended */
  3166. if (hsotg->in_ppd && hsotg->lx_state == DWC2_L2)
  3167. dwc2_exit_partial_power_down(hsotg, 0, true);
  3168. /* Exit gadget mode clock gating. */
  3169. if (hsotg->params.power_down ==
  3170. DWC2_POWER_DOWN_PARAM_NONE && hsotg->bus_suspended &&
  3171. !hsotg->params.no_clock_gating)
  3172. dwc2_gadget_exit_clock_gating(hsotg, 0);
  3173. hsotg->lx_state = DWC2_L0;
  3174. }
  3175. if (gintsts & (GINTSTS_USBRST | GINTSTS_RESETDET)) {
  3176. u32 usb_status = dwc2_readl(hsotg, GOTGCTL);
  3177. u32 connected = hsotg->connected;
  3178. dev_dbg(hsotg->dev, "%s: USBRst\n", __func__);
  3179. dev_dbg(hsotg->dev, "GNPTXSTS=%08x\n",
  3180. dwc2_readl(hsotg, GNPTXSTS));
  3181. dwc2_writel(hsotg, GINTSTS_USBRST, GINTSTS);
  3182. /* Report disconnection if it is not already done. */
  3183. dwc2_hsotg_disconnect(hsotg);
  3184. /* Reset device address to zero */
  3185. dwc2_clear_bit(hsotg, DCFG, DCFG_DEVADDR_MASK);
  3186. if (usb_status & GOTGCTL_BSESVLD && connected)
  3187. dwc2_hsotg_core_init_disconnected(hsotg, true);
  3188. }
  3189. if (gintsts & GINTSTS_ENUMDONE) {
  3190. dwc2_writel(hsotg, GINTSTS_ENUMDONE, GINTSTS);
  3191. dwc2_hsotg_irq_enumdone(hsotg);
  3192. }
  3193. if (gintsts & (GINTSTS_OEPINT | GINTSTS_IEPINT)) {
  3194. u32 daint = dwc2_readl(hsotg, DAINT);
  3195. u32 daintmsk = dwc2_readl(hsotg, DAINTMSK);
  3196. u32 daint_out, daint_in;
  3197. int ep;
  3198. daint &= daintmsk;
  3199. daint_out = daint >> DAINT_OUTEP_SHIFT;
  3200. daint_in = daint & ~(daint_out << DAINT_OUTEP_SHIFT);
  3201. dev_dbg(hsotg->dev, "%s: daint=%08x\n", __func__, daint);
  3202. for (ep = 0; ep < hsotg->num_of_eps && daint_out;
  3203. ep++, daint_out >>= 1) {
  3204. if (daint_out & 1)
  3205. dwc2_hsotg_epint(hsotg, ep, 0);
  3206. }
  3207. for (ep = 0; ep < hsotg->num_of_eps && daint_in;
  3208. ep++, daint_in >>= 1) {
  3209. if (daint_in & 1)
  3210. dwc2_hsotg_epint(hsotg, ep, 1);
  3211. }
  3212. }
  3213. /* check both FIFOs */
  3214. if (gintsts & GINTSTS_NPTXFEMP) {
  3215. dev_dbg(hsotg->dev, "NPTxFEmp\n");
  3216. /*
  3217. * Disable the interrupt to stop it happening again
  3218. * unless one of these endpoint routines decides that
  3219. * it needs re-enabling
  3220. */
  3221. dwc2_hsotg_disable_gsint(hsotg, GINTSTS_NPTXFEMP);
  3222. dwc2_hsotg_irq_fifoempty(hsotg, false);
  3223. }
  3224. if (gintsts & GINTSTS_PTXFEMP) {
  3225. dev_dbg(hsotg->dev, "PTxFEmp\n");
  3226. /* See note in GINTSTS_NPTxFEmp */
  3227. dwc2_hsotg_disable_gsint(hsotg, GINTSTS_PTXFEMP);
  3228. dwc2_hsotg_irq_fifoempty(hsotg, true);
  3229. }
  3230. if (gintsts & GINTSTS_RXFLVL) {
  3231. /*
  3232. * note, since GINTSTS_RxFLvl doubles as FIFO-not-empty,
  3233. * we need to retry dwc2_hsotg_handle_rx if this is still
  3234. * set.
  3235. */
  3236. dwc2_hsotg_handle_rx(hsotg);
  3237. }
  3238. if (gintsts & GINTSTS_ERLYSUSP) {
  3239. dev_dbg(hsotg->dev, "GINTSTS_ErlySusp\n");
  3240. dwc2_writel(hsotg, GINTSTS_ERLYSUSP, GINTSTS);
  3241. }
  3242. /*
  3243. * these next two seem to crop-up occasionally causing the core
  3244. * to shutdown the USB transfer, so try clearing them and logging
  3245. * the occurrence.
  3246. */
  3247. if (gintsts & GINTSTS_GOUTNAKEFF) {
  3248. u8 idx;
  3249. u32 epctrl;
  3250. u32 gintmsk;
  3251. u32 daintmsk;
  3252. struct dwc2_hsotg_ep *hs_ep;
  3253. daintmsk = dwc2_readl(hsotg, DAINTMSK);
  3254. daintmsk >>= DAINT_OUTEP_SHIFT;
  3255. /* Mask this interrupt */
  3256. gintmsk = dwc2_readl(hsotg, GINTMSK);
  3257. gintmsk &= ~GINTSTS_GOUTNAKEFF;
  3258. dwc2_writel(hsotg, gintmsk, GINTMSK);
  3259. dev_dbg(hsotg->dev, "GOUTNakEff triggered\n");
  3260. for (idx = 1; idx < hsotg->num_of_eps; idx++) {
  3261. hs_ep = hsotg->eps_out[idx];
  3262. /* Proceed only unmasked ISOC EPs */
  3263. if (BIT(idx) & ~daintmsk)
  3264. continue;
  3265. epctrl = dwc2_readl(hsotg, DOEPCTL(idx));
  3266. //ISOC Ep's only
  3267. if ((epctrl & DXEPCTL_EPENA) && hs_ep->isochronous) {
  3268. epctrl |= DXEPCTL_SNAK;
  3269. epctrl |= DXEPCTL_EPDIS;
  3270. dwc2_writel(hsotg, epctrl, DOEPCTL(idx));
  3271. continue;
  3272. }
  3273. //Non-ISOC EP's
  3274. if (hs_ep->halted) {
  3275. if (!(epctrl & DXEPCTL_EPENA))
  3276. epctrl |= DXEPCTL_EPENA;
  3277. epctrl |= DXEPCTL_EPDIS;
  3278. epctrl |= DXEPCTL_STALL;
  3279. dwc2_writel(hsotg, epctrl, DOEPCTL(idx));
  3280. }
  3281. }
  3282. /* This interrupt bit is cleared in DXEPINT_EPDISBLD handler */
  3283. }
  3284. if (gintsts & GINTSTS_GINNAKEFF) {
  3285. dev_info(hsotg->dev, "GINNakEff triggered\n");
  3286. dwc2_set_bit(hsotg, DCTL, DCTL_CGNPINNAK);
  3287. dwc2_hsotg_dump(hsotg);
  3288. }
  3289. if (gintsts & GINTSTS_INCOMPL_SOIN)
  3290. dwc2_gadget_handle_incomplete_isoc_in(hsotg);
  3291. if (gintsts & GINTSTS_INCOMPL_SOOUT)
  3292. dwc2_gadget_handle_incomplete_isoc_out(hsotg);
  3293. /*
  3294. * if we've had fifo events, we should try and go around the
  3295. * loop again to see if there's any point in returning yet.
  3296. */
  3297. if (gintsts & IRQ_RETRY_MASK && --retry_count > 0)
  3298. goto irq_retry;
  3299. /* Check WKUP_ALERT interrupt*/
  3300. if (hsotg->params.service_interval)
  3301. dwc2_gadget_wkup_alert_handler(hsotg);
  3302. spin_unlock(&hsotg->lock);
  3303. return IRQ_HANDLED;
  3304. }
  3305. static void dwc2_hsotg_ep_stop_xfr(struct dwc2_hsotg *hsotg,
  3306. struct dwc2_hsotg_ep *hs_ep)
  3307. {
  3308. u32 epctrl_reg;
  3309. u32 epint_reg;
  3310. epctrl_reg = hs_ep->dir_in ? DIEPCTL(hs_ep->index) :
  3311. DOEPCTL(hs_ep->index);
  3312. epint_reg = hs_ep->dir_in ? DIEPINT(hs_ep->index) :
  3313. DOEPINT(hs_ep->index);
  3314. dev_dbg(hsotg->dev, "%s: stopping transfer on %s\n", __func__,
  3315. hs_ep->name);
  3316. if (hs_ep->dir_in) {
  3317. if (hsotg->dedicated_fifos || hs_ep->periodic) {
  3318. dwc2_set_bit(hsotg, epctrl_reg, DXEPCTL_SNAK);
  3319. /* Wait for Nak effect */
  3320. if (dwc2_hsotg_wait_bit_set(hsotg, epint_reg,
  3321. DXEPINT_INEPNAKEFF, 100))
  3322. dev_warn(hsotg->dev,
  3323. "%s: timeout DIEPINT.NAKEFF\n",
  3324. __func__);
  3325. } else {
  3326. dwc2_set_bit(hsotg, DCTL, DCTL_SGNPINNAK);
  3327. /* Wait for Nak effect */
  3328. if (dwc2_hsotg_wait_bit_set(hsotg, GINTSTS,
  3329. GINTSTS_GINNAKEFF, 100))
  3330. dev_warn(hsotg->dev,
  3331. "%s: timeout GINTSTS.GINNAKEFF\n",
  3332. __func__);
  3333. }
  3334. } else {
  3335. /* Mask GINTSTS_GOUTNAKEFF interrupt */
  3336. dwc2_hsotg_disable_gsint(hsotg, GINTSTS_GOUTNAKEFF);
  3337. if (!(dwc2_readl(hsotg, GINTSTS) & GINTSTS_GOUTNAKEFF))
  3338. dwc2_set_bit(hsotg, DCTL, DCTL_SGOUTNAK);
  3339. if (!using_dma(hsotg)) {
  3340. /* Wait for GINTSTS_RXFLVL interrupt */
  3341. if (dwc2_hsotg_wait_bit_set(hsotg, GINTSTS,
  3342. GINTSTS_RXFLVL, 100)) {
  3343. dev_warn(hsotg->dev, "%s: timeout GINTSTS.RXFLVL\n",
  3344. __func__);
  3345. } else {
  3346. /*
  3347. * Pop GLOBAL OUT NAK status packet from RxFIFO
  3348. * to assert GOUTNAKEFF interrupt
  3349. */
  3350. dwc2_readl(hsotg, GRXSTSP);
  3351. }
  3352. }
  3353. /* Wait for global nak to take effect */
  3354. if (dwc2_hsotg_wait_bit_set(hsotg, GINTSTS,
  3355. GINTSTS_GOUTNAKEFF, 100))
  3356. dev_warn(hsotg->dev, "%s: timeout GINTSTS.GOUTNAKEFF\n",
  3357. __func__);
  3358. }
  3359. /* Disable ep */
  3360. dwc2_set_bit(hsotg, epctrl_reg, DXEPCTL_EPDIS | DXEPCTL_SNAK);
  3361. /* Wait for ep to be disabled */
  3362. if (dwc2_hsotg_wait_bit_set(hsotg, epint_reg, DXEPINT_EPDISBLD, 100))
  3363. dev_warn(hsotg->dev,
  3364. "%s: timeout DOEPCTL.EPDisable\n", __func__);
  3365. /* Clear EPDISBLD interrupt */
  3366. dwc2_set_bit(hsotg, epint_reg, DXEPINT_EPDISBLD);
  3367. if (hs_ep->dir_in) {
  3368. unsigned short fifo_index;
  3369. if (hsotg->dedicated_fifos || hs_ep->periodic)
  3370. fifo_index = hs_ep->fifo_index;
  3371. else
  3372. fifo_index = 0;
  3373. /* Flush TX FIFO */
  3374. dwc2_flush_tx_fifo(hsotg, fifo_index);
  3375. /* Clear Global In NP NAK in Shared FIFO for non periodic ep */
  3376. if (!hsotg->dedicated_fifos && !hs_ep->periodic)
  3377. dwc2_set_bit(hsotg, DCTL, DCTL_CGNPINNAK);
  3378. } else {
  3379. /* Remove global NAKs */
  3380. dwc2_set_bit(hsotg, DCTL, DCTL_CGOUTNAK);
  3381. }
  3382. }
  3383. /**
  3384. * dwc2_hsotg_ep_enable - enable the given endpoint
  3385. * @ep: The USB endpint to configure
  3386. * @desc: The USB endpoint descriptor to configure with.
  3387. *
  3388. * This is called from the USB gadget code's usb_ep_enable().
  3389. */
  3390. static int dwc2_hsotg_ep_enable(struct usb_ep *ep,
  3391. const struct usb_endpoint_descriptor *desc)
  3392. {
  3393. struct dwc2_hsotg_ep *hs_ep = our_ep(ep);
  3394. struct dwc2_hsotg *hsotg = hs_ep->parent;
  3395. unsigned long flags;
  3396. unsigned int index = hs_ep->index;
  3397. u32 epctrl_reg;
  3398. u32 epctrl;
  3399. u32 mps;
  3400. u32 mc;
  3401. u32 mask;
  3402. unsigned int dir_in;
  3403. unsigned int i, val, size;
  3404. int ret = 0;
  3405. unsigned char ep_type;
  3406. int desc_num;
  3407. dev_dbg(hsotg->dev,
  3408. "%s: ep %s: a 0x%02x, attr 0x%02x, mps 0x%04x, intr %d\n",
  3409. __func__, ep->name, desc->bEndpointAddress, desc->bmAttributes,
  3410. desc->wMaxPacketSize, desc->bInterval);
  3411. /* not to be called for EP0 */
  3412. if (index == 0) {
  3413. dev_err(hsotg->dev, "%s: called for EP 0\n", __func__);
  3414. return -EINVAL;
  3415. }
  3416. dir_in = (desc->bEndpointAddress & USB_ENDPOINT_DIR_MASK) ? 1 : 0;
  3417. if (dir_in != hs_ep->dir_in) {
  3418. dev_err(hsotg->dev, "%s: direction mismatch!\n", __func__);
  3419. return -EINVAL;
  3420. }
  3421. ep_type = desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK;
  3422. mps = usb_endpoint_maxp(desc);
  3423. mc = usb_endpoint_maxp_mult(desc);
  3424. /* ISOC IN in DDMA supported bInterval up to 10 */
  3425. if (using_desc_dma(hsotg) && ep_type == USB_ENDPOINT_XFER_ISOC &&
  3426. dir_in && desc->bInterval > 10) {
  3427. dev_err(hsotg->dev,
  3428. "%s: ISOC IN, DDMA: bInterval>10 not supported!\n", __func__);
  3429. return -EINVAL;
  3430. }
  3431. /* High bandwidth ISOC OUT in DDMA not supported */
  3432. if (using_desc_dma(hsotg) && ep_type == USB_ENDPOINT_XFER_ISOC &&
  3433. !dir_in && mc > 1) {
  3434. dev_err(hsotg->dev,
  3435. "%s: ISOC OUT, DDMA: HB not supported!\n", __func__);
  3436. return -EINVAL;
  3437. }
  3438. /* note, we handle this here instead of dwc2_hsotg_set_ep_maxpacket */
  3439. epctrl_reg = dir_in ? DIEPCTL(index) : DOEPCTL(index);
  3440. epctrl = dwc2_readl(hsotg, epctrl_reg);
  3441. dev_dbg(hsotg->dev, "%s: read DxEPCTL=0x%08x from 0x%08x\n",
  3442. __func__, epctrl, epctrl_reg);
  3443. if (using_desc_dma(hsotg) && ep_type == USB_ENDPOINT_XFER_ISOC)
  3444. desc_num = MAX_DMA_DESC_NUM_HS_ISOC;
  3445. else
  3446. desc_num = MAX_DMA_DESC_NUM_GENERIC;
  3447. /* Allocate DMA descriptor chain for non-ctrl endpoints */
  3448. if (using_desc_dma(hsotg) && !hs_ep->desc_list) {
  3449. hs_ep->desc_list = dmam_alloc_coherent(hsotg->dev,
  3450. desc_num * sizeof(struct dwc2_dma_desc),
  3451. &hs_ep->desc_list_dma, GFP_ATOMIC);
  3452. if (!hs_ep->desc_list) {
  3453. ret = -ENOMEM;
  3454. goto error2;
  3455. }
  3456. }
  3457. spin_lock_irqsave(&hsotg->lock, flags);
  3458. epctrl &= ~(DXEPCTL_EPTYPE_MASK | DXEPCTL_MPS_MASK);
  3459. epctrl |= DXEPCTL_MPS(mps);
  3460. /*
  3461. * mark the endpoint as active, otherwise the core may ignore
  3462. * transactions entirely for this endpoint
  3463. */
  3464. epctrl |= DXEPCTL_USBACTEP;
  3465. /* update the endpoint state */
  3466. dwc2_hsotg_set_ep_maxpacket(hsotg, hs_ep->index, mps, mc, dir_in);
  3467. /* default, set to non-periodic */
  3468. hs_ep->isochronous = 0;
  3469. hs_ep->periodic = 0;
  3470. hs_ep->halted = 0;
  3471. hs_ep->wedged = 0;
  3472. hs_ep->interval = desc->bInterval;
  3473. switch (ep_type) {
  3474. case USB_ENDPOINT_XFER_ISOC:
  3475. epctrl |= DXEPCTL_EPTYPE_ISO;
  3476. epctrl |= DXEPCTL_SETEVENFR;
  3477. hs_ep->isochronous = 1;
  3478. hs_ep->interval = 1 << (desc->bInterval - 1);
  3479. hs_ep->target_frame = TARGET_FRAME_INITIAL;
  3480. hs_ep->next_desc = 0;
  3481. hs_ep->compl_desc = 0;
  3482. if (dir_in) {
  3483. hs_ep->periodic = 1;
  3484. mask = dwc2_readl(hsotg, DIEPMSK);
  3485. mask |= DIEPMSK_NAKMSK;
  3486. dwc2_writel(hsotg, mask, DIEPMSK);
  3487. } else {
  3488. epctrl |= DXEPCTL_SNAK;
  3489. mask = dwc2_readl(hsotg, DOEPMSK);
  3490. mask |= DOEPMSK_OUTTKNEPDISMSK;
  3491. dwc2_writel(hsotg, mask, DOEPMSK);
  3492. }
  3493. break;
  3494. case USB_ENDPOINT_XFER_BULK:
  3495. epctrl |= DXEPCTL_EPTYPE_BULK;
  3496. break;
  3497. case USB_ENDPOINT_XFER_INT:
  3498. if (dir_in)
  3499. hs_ep->periodic = 1;
  3500. if (hsotg->gadget.speed == USB_SPEED_HIGH)
  3501. hs_ep->interval = 1 << (desc->bInterval - 1);
  3502. epctrl |= DXEPCTL_EPTYPE_INTERRUPT;
  3503. break;
  3504. case USB_ENDPOINT_XFER_CONTROL:
  3505. epctrl |= DXEPCTL_EPTYPE_CONTROL;
  3506. break;
  3507. }
  3508. /*
  3509. * if the hardware has dedicated fifos, we must give each IN EP
  3510. * a unique tx-fifo even if it is non-periodic.
  3511. */
  3512. if (dir_in && (hsotg->dedicated_fifos || (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) == USB_ENDPOINT_XFER_INT
  3513. || (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) == USB_ENDPOINT_XFER_ISOC)) {
  3514. unsigned fifo_count = dwc2_hsotg_tx_fifo_count(hsotg);
  3515. u32 fifo_index = 0;
  3516. u32 fifo_size = UINT_MAX;
  3517. size = hs_ep->ep.maxpacket * hs_ep->mc;
  3518. for (i = 1; i <= fifo_count; ++i) {
  3519. if (hsotg->fifo_map & (1 << i))
  3520. continue;
  3521. val = dwc2_readl(hsotg, DPTXFSIZN(i));
  3522. val = (val >> FIFOSIZE_DEPTH_SHIFT) * 4;
  3523. if (val < size)
  3524. continue;
  3525. /* Search for smallest acceptable fifo */
  3526. if (val < fifo_size) {
  3527. fifo_size = val;
  3528. fifo_index = i;
  3529. }
  3530. }
  3531. if (!fifo_index) {
  3532. dev_err(hsotg->dev,
  3533. "%s: No suitable fifo found\n", __func__);
  3534. ret = -ENOMEM;
  3535. goto error1;
  3536. }
  3537. epctrl &= ~(DXEPCTL_TXFNUM_LIMIT << DXEPCTL_TXFNUM_SHIFT);
  3538. hsotg->fifo_map |= 1 << fifo_index;
  3539. epctrl |= DXEPCTL_TXFNUM(fifo_index);
  3540. hs_ep->fifo_index = fifo_index;
  3541. hs_ep->fifo_size = fifo_size;
  3542. }
  3543. /* for non control endpoints, set PID to D0 */
  3544. if (index && !hs_ep->isochronous)
  3545. epctrl |= DXEPCTL_SETD0PID;
  3546. /* WA for Full speed ISOC IN in DDMA mode.
  3547. * By Clear NAK status of EP, core will send ZLP
  3548. * to IN token and assert NAK interrupt relying
  3549. * on TxFIFO status only
  3550. */
  3551. if (hsotg->gadget.speed == USB_SPEED_FULL &&
  3552. hs_ep->isochronous && dir_in) {
  3553. /* The WA applies only to core versions from 2.72a
  3554. * to 4.00a (including both). Also for FS_IOT_1.00a
  3555. * and HS_IOT_1.00a.
  3556. */
  3557. u32 gsnpsid = dwc2_readl(hsotg, GSNPSID);
  3558. if ((gsnpsid >= DWC2_CORE_REV_2_72a &&
  3559. gsnpsid <= DWC2_CORE_REV_4_00a) ||
  3560. gsnpsid == DWC2_FS_IOT_REV_1_00a ||
  3561. gsnpsid == DWC2_HS_IOT_REV_1_00a)
  3562. epctrl |= DXEPCTL_CNAK;
  3563. }
  3564. dev_dbg(hsotg->dev, "%s: write DxEPCTL=0x%08x\n",
  3565. __func__, epctrl);
  3566. dwc2_writel(hsotg, epctrl, epctrl_reg);
  3567. dev_dbg(hsotg->dev, "%s: read DxEPCTL=0x%08x\n",
  3568. __func__, dwc2_readl(hsotg, epctrl_reg));
  3569. /* enable the endpoint interrupt */
  3570. dwc2_hsotg_ctrl_epint(hsotg, index, dir_in, 1);
  3571. error1:
  3572. spin_unlock_irqrestore(&hsotg->lock, flags);
  3573. error2:
  3574. if (ret && using_desc_dma(hsotg) && hs_ep->desc_list) {
  3575. dmam_free_coherent(hsotg->dev, desc_num *
  3576. sizeof(struct dwc2_dma_desc),
  3577. hs_ep->desc_list, hs_ep->desc_list_dma);
  3578. hs_ep->desc_list = NULL;
  3579. }
  3580. return ret;
  3581. }
  3582. /**
  3583. * dwc2_hsotg_ep_disable - disable given endpoint
  3584. * @ep: The endpoint to disable.
  3585. */
  3586. static int dwc2_hsotg_ep_disable(struct usb_ep *ep)
  3587. {
  3588. struct dwc2_hsotg_ep *hs_ep = our_ep(ep);
  3589. struct dwc2_hsotg *hsotg = hs_ep->parent;
  3590. int dir_in = hs_ep->dir_in;
  3591. int index = hs_ep->index;
  3592. u32 epctrl_reg;
  3593. u32 ctrl;
  3594. dev_dbg(hsotg->dev, "%s(ep %p)\n", __func__, ep);
  3595. if (ep == &hsotg->eps_out[0]->ep) {
  3596. dev_err(hsotg->dev, "%s: called for ep0\n", __func__);
  3597. return -EINVAL;
  3598. }
  3599. if (hsotg->op_state != OTG_STATE_B_PERIPHERAL) {
  3600. dev_err(hsotg->dev, "%s: called in host mode?\n", __func__);
  3601. return -EINVAL;
  3602. }
  3603. epctrl_reg = dir_in ? DIEPCTL(index) : DOEPCTL(index);
  3604. ctrl = dwc2_readl(hsotg, epctrl_reg);
  3605. if (ctrl & DXEPCTL_EPENA)
  3606. dwc2_hsotg_ep_stop_xfr(hsotg, hs_ep);
  3607. ctrl &= ~DXEPCTL_EPENA;
  3608. ctrl &= ~DXEPCTL_USBACTEP;
  3609. ctrl |= DXEPCTL_SNAK;
  3610. dev_dbg(hsotg->dev, "%s: DxEPCTL=0x%08x\n", __func__, ctrl);
  3611. dwc2_writel(hsotg, ctrl, epctrl_reg);
  3612. /* disable endpoint interrupts */
  3613. dwc2_hsotg_ctrl_epint(hsotg, hs_ep->index, hs_ep->dir_in, 0);
  3614. /* terminate all requests with shutdown */
  3615. kill_all_requests(hsotg, hs_ep, -ESHUTDOWN);
  3616. hsotg->fifo_map &= ~(1 << hs_ep->fifo_index);
  3617. hs_ep->fifo_index = 0;
  3618. hs_ep->fifo_size = 0;
  3619. return 0;
  3620. }
  3621. static int dwc2_hsotg_ep_disable_lock(struct usb_ep *ep)
  3622. {
  3623. struct dwc2_hsotg_ep *hs_ep = our_ep(ep);
  3624. struct dwc2_hsotg *hsotg = hs_ep->parent;
  3625. unsigned long flags;
  3626. int ret;
  3627. spin_lock_irqsave(&hsotg->lock, flags);
  3628. ret = dwc2_hsotg_ep_disable(ep);
  3629. spin_unlock_irqrestore(&hsotg->lock, flags);
  3630. return ret;
  3631. }
  3632. /**
  3633. * on_list - check request is on the given endpoint
  3634. * @ep: The endpoint to check.
  3635. * @test: The request to test if it is on the endpoint.
  3636. */
  3637. static bool on_list(struct dwc2_hsotg_ep *ep, struct dwc2_hsotg_req *test)
  3638. {
  3639. struct dwc2_hsotg_req *req, *treq;
  3640. list_for_each_entry_safe(req, treq, &ep->queue, queue) {
  3641. if (req == test)
  3642. return true;
  3643. }
  3644. return false;
  3645. }
  3646. /**
  3647. * dwc2_hsotg_ep_dequeue - dequeue given endpoint
  3648. * @ep: The endpoint to dequeue.
  3649. * @req: The request to be removed from a queue.
  3650. */
  3651. static int dwc2_hsotg_ep_dequeue(struct usb_ep *ep, struct usb_request *req)
  3652. {
  3653. struct dwc2_hsotg_req *hs_req = our_req(req);
  3654. struct dwc2_hsotg_ep *hs_ep = our_ep(ep);
  3655. struct dwc2_hsotg *hs = hs_ep->parent;
  3656. unsigned long flags;
  3657. dev_dbg(hs->dev, "ep_dequeue(%p,%p)\n", ep, req);
  3658. spin_lock_irqsave(&hs->lock, flags);
  3659. if (!on_list(hs_ep, hs_req)) {
  3660. spin_unlock_irqrestore(&hs->lock, flags);
  3661. return -EINVAL;
  3662. }
  3663. /* Dequeue already started request */
  3664. if (req == &hs_ep->req->req)
  3665. dwc2_hsotg_ep_stop_xfr(hs, hs_ep);
  3666. dwc2_hsotg_complete_request(hs, hs_ep, hs_req, -ECONNRESET);
  3667. spin_unlock_irqrestore(&hs->lock, flags);
  3668. return 0;
  3669. }
  3670. /**
  3671. * dwc2_gadget_ep_set_wedge - set wedge on a given endpoint
  3672. * @ep: The endpoint to be wedged.
  3673. *
  3674. */
  3675. static int dwc2_gadget_ep_set_wedge(struct usb_ep *ep)
  3676. {
  3677. struct dwc2_hsotg_ep *hs_ep = our_ep(ep);
  3678. struct dwc2_hsotg *hs = hs_ep->parent;
  3679. unsigned long flags;
  3680. int ret;
  3681. spin_lock_irqsave(&hs->lock, flags);
  3682. hs_ep->wedged = 1;
  3683. ret = dwc2_hsotg_ep_sethalt(ep, 1, false);
  3684. spin_unlock_irqrestore(&hs->lock, flags);
  3685. return ret;
  3686. }
  3687. /**
  3688. * dwc2_hsotg_ep_sethalt - set halt on a given endpoint
  3689. * @ep: The endpoint to set halt.
  3690. * @value: Set or unset the halt.
  3691. * @now: If true, stall the endpoint now. Otherwise return -EAGAIN if
  3692. * the endpoint is busy processing requests.
  3693. *
  3694. * We need to stall the endpoint immediately if request comes from set_feature
  3695. * protocol command handler.
  3696. */
  3697. static int dwc2_hsotg_ep_sethalt(struct usb_ep *ep, int value, bool now)
  3698. {
  3699. struct dwc2_hsotg_ep *hs_ep = our_ep(ep);
  3700. struct dwc2_hsotg *hs = hs_ep->parent;
  3701. int index = hs_ep->index;
  3702. u32 epreg;
  3703. u32 epctl;
  3704. u32 xfertype;
  3705. return 0;
  3706. dev_info(hs->dev, "%s(ep %p %s, %d)\n", __func__, ep, ep->name, value);
  3707. if (index == 0) {
  3708. if (value)
  3709. dwc2_hsotg_stall_ep0(hs);
  3710. else
  3711. dev_warn(hs->dev,
  3712. "%s: can't clear halt on ep0\n", __func__);
  3713. return 0;
  3714. }
  3715. if (hs_ep->isochronous) {
  3716. dev_err(hs->dev, "%s is Isochronous Endpoint\n", ep->name);
  3717. return -EINVAL;
  3718. }
  3719. if (!now && value && !list_empty(&hs_ep->queue)) {
  3720. dev_dbg(hs->dev, "%s request is pending, cannot halt\n",
  3721. ep->name);
  3722. return -EAGAIN;
  3723. }
  3724. if (hs_ep->dir_in) {
  3725. epreg = DIEPCTL(index);
  3726. epctl = dwc2_readl(hs, epreg);
  3727. if (value) {
  3728. epctl |= DXEPCTL_STALL | DXEPCTL_SNAK;
  3729. if (epctl & DXEPCTL_EPENA)
  3730. epctl |= DXEPCTL_EPDIS;
  3731. } else {
  3732. epctl &= ~DXEPCTL_STALL;
  3733. hs_ep->wedged = 0;
  3734. xfertype = epctl & DXEPCTL_EPTYPE_MASK;
  3735. if (xfertype == DXEPCTL_EPTYPE_BULK ||
  3736. xfertype == DXEPCTL_EPTYPE_INTERRUPT)
  3737. epctl |= DXEPCTL_SETD0PID;
  3738. }
  3739. dwc2_writel(hs, epctl, epreg);
  3740. } else {
  3741. epreg = DOEPCTL(index);
  3742. epctl = dwc2_readl(hs, epreg);
  3743. if (value) {
  3744. /* Unmask GOUTNAKEFF interrupt */
  3745. dwc2_hsotg_en_gsint(hs, GINTSTS_GOUTNAKEFF);
  3746. if (!(dwc2_readl(hs, GINTSTS) & GINTSTS_GOUTNAKEFF))
  3747. dwc2_set_bit(hs, DCTL, DCTL_SGOUTNAK);
  3748. // STALL bit will be set in GOUTNAKEFF interrupt handler
  3749. } else {
  3750. epctl &= ~DXEPCTL_STALL;
  3751. hs_ep->wedged = 0;
  3752. xfertype = epctl & DXEPCTL_EPTYPE_MASK;
  3753. if (xfertype == DXEPCTL_EPTYPE_BULK ||
  3754. xfertype == DXEPCTL_EPTYPE_INTERRUPT)
  3755. epctl |= DXEPCTL_SETD0PID;
  3756. dwc2_writel(hs, epctl, epreg);
  3757. }
  3758. }
  3759. hs_ep->halted = value;
  3760. return 0;
  3761. }
  3762. /**
  3763. * dwc2_hsotg_ep_sethalt_lock - set halt on a given endpoint with lock held
  3764. * @ep: The endpoint to set halt.
  3765. * @value: Set or unset the halt.
  3766. */
  3767. static int dwc2_hsotg_ep_sethalt_lock(struct usb_ep *ep, int value)
  3768. {
  3769. struct dwc2_hsotg_ep *hs_ep = our_ep(ep);
  3770. struct dwc2_hsotg *hs = hs_ep->parent;
  3771. unsigned long flags;
  3772. int ret;
  3773. spin_lock_irqsave(&hs->lock, flags);
  3774. ret = dwc2_hsotg_ep_sethalt(ep, value, false);
  3775. spin_unlock_irqrestore(&hs->lock, flags);
  3776. return ret;
  3777. }
  3778. static const struct usb_ep_ops dwc2_hsotg_ep_ops = {
  3779. .enable = dwc2_hsotg_ep_enable,
  3780. .disable = dwc2_hsotg_ep_disable_lock,
  3781. .alloc_request = dwc2_hsotg_ep_alloc_request,
  3782. .free_request = dwc2_hsotg_ep_free_request,
  3783. .queue = dwc2_hsotg_ep_queue_lock,
  3784. .dequeue = dwc2_hsotg_ep_dequeue,
  3785. .set_halt = dwc2_hsotg_ep_sethalt_lock,
  3786. .set_wedge = dwc2_gadget_ep_set_wedge,
  3787. /* note, don't believe we have any call for the fifo routines */
  3788. };
  3789. /**
  3790. * dwc2_hsotg_init - initialize the usb core
  3791. * @hsotg: The driver state
  3792. */
  3793. static void dwc2_hsotg_init(struct dwc2_hsotg *hsotg)
  3794. {
  3795. /* unmask subset of endpoint interrupts */
  3796. dwc2_writel(hsotg, DIEPMSK_TIMEOUTMSK | DIEPMSK_AHBERRMSK |
  3797. DIEPMSK_EPDISBLDMSK | DIEPMSK_XFERCOMPLMSK,
  3798. DIEPMSK);
  3799. dwc2_writel(hsotg, DOEPMSK_SETUPMSK | DOEPMSK_AHBERRMSK |
  3800. DOEPMSK_EPDISBLDMSK | DOEPMSK_XFERCOMPLMSK,
  3801. DOEPMSK);
  3802. dwc2_writel(hsotg, 0, DAINTMSK);
  3803. /* Be in disconnected state until gadget is registered */
  3804. dwc2_set_bit(hsotg, DCTL, DCTL_SFTDISCON);
  3805. /* setup fifos */
  3806. dev_dbg(hsotg->dev, "GRXFSIZ=0x%08x, GNPTXFSIZ=0x%08x\n",
  3807. dwc2_readl(hsotg, GRXFSIZ),
  3808. dwc2_readl(hsotg, GNPTXFSIZ));
  3809. dwc2_hsotg_init_fifo(hsotg);
  3810. if (using_dma(hsotg))
  3811. dwc2_set_bit(hsotg, GAHBCFG, GAHBCFG_DMA_EN);
  3812. }
  3813. /**
  3814. * dwc2_hsotg_udc_start - prepare the udc for work
  3815. * @gadget: The usb gadget state
  3816. * @driver: The usb gadget driver
  3817. *
  3818. * Perform initialization to prepare udc device and driver
  3819. * to work.
  3820. */
  3821. static int dwc2_hsotg_udc_start(struct usb_gadget *gadget,
  3822. struct usb_gadget_driver *driver)
  3823. {
  3824. struct dwc2_hsotg *hsotg = to_hsotg(gadget);
  3825. unsigned long flags;
  3826. int ret;
  3827. if (!hsotg) {
  3828. pr_err("%s: called with no device\n", __func__);
  3829. return -ENODEV;
  3830. }
  3831. if (!driver) {
  3832. dev_err(hsotg->dev, "%s: no driver\n", __func__);
  3833. return -EINVAL;
  3834. }
  3835. if (driver->max_speed < USB_SPEED_FULL)
  3836. dev_err(hsotg->dev, "%s: bad speed\n", __func__);
  3837. if (!driver->setup) {
  3838. dev_err(hsotg->dev, "%s: missing entry points\n", __func__);
  3839. return -EINVAL;
  3840. }
  3841. WARN_ON(hsotg->driver);
  3842. hsotg->driver = driver;
  3843. hsotg->gadget.dev.of_node = hsotg->dev->of_node;
  3844. hsotg->gadget.speed = USB_SPEED_UNKNOWN;
  3845. if (hsotg->dr_mode == USB_DR_MODE_PERIPHERAL) {
  3846. ret = dwc2_lowlevel_hw_enable(hsotg);
  3847. if (ret)
  3848. goto err;
  3849. }
  3850. if (!IS_ERR_OR_NULL(hsotg->uphy))
  3851. otg_set_peripheral(hsotg->uphy->otg, &hsotg->gadget);
  3852. spin_lock_irqsave(&hsotg->lock, flags);
  3853. if (dwc2_hw_is_device(hsotg)) {
  3854. dwc2_hsotg_init(hsotg);
  3855. dwc2_hsotg_core_init_disconnected(hsotg, false);
  3856. }
  3857. hsotg->enabled = 0;
  3858. spin_unlock_irqrestore(&hsotg->lock, flags);
  3859. gadget->sg_supported = using_desc_dma(hsotg);
  3860. dev_info(hsotg->dev, "bound driver %s\n", driver->driver.name);
  3861. return 0;
  3862. err:
  3863. hsotg->driver = NULL;
  3864. return ret;
  3865. }
  3866. /**
  3867. * dwc2_hsotg_udc_stop - stop the udc
  3868. * @gadget: The usb gadget state
  3869. *
  3870. * Stop udc hw block and stay tunned for future transmissions
  3871. */
  3872. static int dwc2_hsotg_udc_stop(struct usb_gadget *gadget)
  3873. {
  3874. struct dwc2_hsotg *hsotg = to_hsotg(gadget);
  3875. unsigned long flags;
  3876. int ep;
  3877. if (!hsotg)
  3878. return -ENODEV;
  3879. /* Exit clock gating when driver is stopped. */
  3880. if (hsotg->params.power_down == DWC2_POWER_DOWN_PARAM_NONE &&
  3881. hsotg->bus_suspended && !hsotg->params.no_clock_gating) {
  3882. dwc2_gadget_exit_clock_gating(hsotg, 0);
  3883. }
  3884. /* all endpoints should be shutdown */
  3885. for (ep = 1; ep < hsotg->num_of_eps; ep++) {
  3886. if (hsotg->eps_in[ep])
  3887. dwc2_hsotg_ep_disable_lock(&hsotg->eps_in[ep]->ep);
  3888. if (hsotg->eps_out[ep])
  3889. dwc2_hsotg_ep_disable_lock(&hsotg->eps_out[ep]->ep);
  3890. }
  3891. spin_lock_irqsave(&hsotg->lock, flags);
  3892. hsotg->driver = NULL;
  3893. hsotg->gadget.dev.of_node = NULL;
  3894. hsotg->gadget.speed = USB_SPEED_UNKNOWN;
  3895. hsotg->enabled = 0;
  3896. spin_unlock_irqrestore(&hsotg->lock, flags);
  3897. if (!IS_ERR_OR_NULL(hsotg->uphy))
  3898. otg_set_peripheral(hsotg->uphy->otg, NULL);
  3899. if (hsotg->dr_mode == USB_DR_MODE_PERIPHERAL)
  3900. dwc2_lowlevel_hw_disable(hsotg);
  3901. return 0;
  3902. }
  3903. /**
  3904. * dwc2_hsotg_gadget_getframe - read the frame number
  3905. * @gadget: The usb gadget state
  3906. *
  3907. * Read the {micro} frame number
  3908. */
  3909. static int dwc2_hsotg_gadget_getframe(struct usb_gadget *gadget)
  3910. {
  3911. return dwc2_hsotg_read_frameno(to_hsotg(gadget));
  3912. }
  3913. /**
  3914. * dwc2_hsotg_set_selfpowered - set if device is self/bus powered
  3915. * @gadget: The usb gadget state
  3916. * @is_selfpowered: Whether the device is self-powered
  3917. *
  3918. * Set if the device is self or bus powered.
  3919. */
  3920. static int dwc2_hsotg_set_selfpowered(struct usb_gadget *gadget,
  3921. int is_selfpowered)
  3922. {
  3923. struct dwc2_hsotg *hsotg = to_hsotg(gadget);
  3924. unsigned long flags;
  3925. spin_lock_irqsave(&hsotg->lock, flags);
  3926. gadget->is_selfpowered = !!is_selfpowered;
  3927. spin_unlock_irqrestore(&hsotg->lock, flags);
  3928. return 0;
  3929. }
  3930. /**
  3931. * dwc2_hsotg_pullup - connect/disconnect the USB PHY
  3932. * @gadget: The usb gadget state
  3933. * @is_on: Current state of the USB PHY
  3934. *
  3935. * Connect/Disconnect the USB PHY pullup
  3936. */
  3937. static int dwc2_hsotg_pullup(struct usb_gadget *gadget, int is_on)
  3938. {
  3939. struct dwc2_hsotg *hsotg = to_hsotg(gadget);
  3940. unsigned long flags;
  3941. dev_dbg(hsotg->dev, "%s: is_on: %d op_state: %d\n", __func__, is_on,
  3942. hsotg->op_state);
  3943. /* Don't modify pullup state while in host mode */
  3944. if (hsotg->op_state != OTG_STATE_B_PERIPHERAL) {
  3945. hsotg->enabled = is_on;
  3946. return 0;
  3947. }
  3948. spin_lock_irqsave(&hsotg->lock, flags);
  3949. if (is_on) {
  3950. hsotg->enabled = 1;
  3951. dwc2_hsotg_core_init_disconnected(hsotg, false);
  3952. /* Enable ACG feature in device mode,if supported */
  3953. dwc2_enable_acg(hsotg);
  3954. dwc2_hsotg_core_connect(hsotg);
  3955. } else {
  3956. dwc2_hsotg_core_disconnect(hsotg);
  3957. dwc2_hsotg_disconnect(hsotg);
  3958. hsotg->enabled = 0;
  3959. }
  3960. hsotg->gadget.speed = USB_SPEED_UNKNOWN;
  3961. spin_unlock_irqrestore(&hsotg->lock, flags);
  3962. return 0;
  3963. }
  3964. static int dwc2_hsotg_vbus_session(struct usb_gadget *gadget, int is_active)
  3965. {
  3966. struct dwc2_hsotg *hsotg = to_hsotg(gadget);
  3967. unsigned long flags;
  3968. dev_dbg(hsotg->dev, "%s: is_active: %d\n", __func__, is_active);
  3969. spin_lock_irqsave(&hsotg->lock, flags);
  3970. /*
  3971. * If controller is in partial power down state, it must exit from
  3972. * that state before being initialized / de-initialized
  3973. */
  3974. if (hsotg->lx_state == DWC2_L2 && hsotg->in_ppd)
  3975. /*
  3976. * No need to check the return value as
  3977. * registers are not being restored.
  3978. */
  3979. dwc2_exit_partial_power_down(hsotg, 0, false);
  3980. if (is_active) {
  3981. hsotg->op_state = OTG_STATE_B_PERIPHERAL;
  3982. dwc2_hsotg_core_init_disconnected(hsotg, false);
  3983. if (hsotg->enabled) {
  3984. /* Enable ACG feature in device mode,if supported */
  3985. dwc2_enable_acg(hsotg);
  3986. dwc2_hsotg_core_connect(hsotg);
  3987. }
  3988. } else {
  3989. dwc2_hsotg_core_disconnect(hsotg);
  3990. dwc2_hsotg_disconnect(hsotg);
  3991. }
  3992. spin_unlock_irqrestore(&hsotg->lock, flags);
  3993. return 0;
  3994. }
  3995. /**
  3996. * dwc2_hsotg_vbus_draw - report bMaxPower field
  3997. * @gadget: The usb gadget state
  3998. * @mA: Amount of current
  3999. *
  4000. * Report how much power the device may consume to the phy.
  4001. */
  4002. static int dwc2_hsotg_vbus_draw(struct usb_gadget *gadget, unsigned int mA)
  4003. {
  4004. struct dwc2_hsotg *hsotg = to_hsotg(gadget);
  4005. if (IS_ERR_OR_NULL(hsotg->uphy))
  4006. return -ENOTSUPP;
  4007. return usb_phy_set_power(hsotg->uphy, mA);
  4008. }
  4009. static void dwc2_gadget_set_speed(struct usb_gadget *g, enum usb_device_speed speed)
  4010. {
  4011. struct dwc2_hsotg *hsotg = to_hsotg(g);
  4012. unsigned long flags;
  4013. spin_lock_irqsave(&hsotg->lock, flags);
  4014. switch (speed) {
  4015. case USB_SPEED_HIGH:
  4016. hsotg->params.speed = DWC2_SPEED_PARAM_HIGH;
  4017. break;
  4018. case USB_SPEED_FULL:
  4019. hsotg->params.speed = DWC2_SPEED_PARAM_FULL;
  4020. break;
  4021. case USB_SPEED_LOW:
  4022. hsotg->params.speed = DWC2_SPEED_PARAM_LOW;
  4023. break;
  4024. default:
  4025. dev_err(hsotg->dev, "invalid speed (%d)\n", speed);
  4026. }
  4027. spin_unlock_irqrestore(&hsotg->lock, flags);
  4028. }
  4029. static const struct usb_gadget_ops dwc2_hsotg_gadget_ops = {
  4030. .get_frame = dwc2_hsotg_gadget_getframe,
  4031. .set_selfpowered = dwc2_hsotg_set_selfpowered,
  4032. .udc_start = dwc2_hsotg_udc_start,
  4033. .udc_stop = dwc2_hsotg_udc_stop,
  4034. .pullup = dwc2_hsotg_pullup,
  4035. .udc_set_speed = dwc2_gadget_set_speed,
  4036. .vbus_session = dwc2_hsotg_vbus_session,
  4037. .vbus_draw = dwc2_hsotg_vbus_draw,
  4038. };
  4039. /**
  4040. * dwc2_hsotg_initep - initialise a single endpoint
  4041. * @hsotg: The device state.
  4042. * @hs_ep: The endpoint to be initialised.
  4043. * @epnum: The endpoint number
  4044. * @dir_in: True if direction is in.
  4045. *
  4046. * Initialise the given endpoint (as part of the probe and device state
  4047. * creation) to give to the gadget driver. Setup the endpoint name, any
  4048. * direction information and other state that may be required.
  4049. */
  4050. static void dwc2_hsotg_initep(struct dwc2_hsotg *hsotg,
  4051. struct dwc2_hsotg_ep *hs_ep,
  4052. int epnum,
  4053. bool dir_in)
  4054. {
  4055. char *dir;
  4056. if (epnum == 0)
  4057. dir = "";
  4058. else if (dir_in)
  4059. dir = "in";
  4060. else
  4061. dir = "out";
  4062. hs_ep->dir_in = dir_in;
  4063. hs_ep->index = epnum;
  4064. snprintf(hs_ep->name, sizeof(hs_ep->name), "ep%d%s", epnum, dir);
  4065. printk("%s:%d name:%s dir:%d\n", __func__, __LINE__, hs_ep->name, dir_in);
  4066. INIT_LIST_HEAD(&hs_ep->queue);
  4067. INIT_LIST_HEAD(&hs_ep->ep.ep_list);
  4068. /* add to the list of endpoints known by the gadget driver */
  4069. if (epnum)
  4070. list_add_tail(&hs_ep->ep.ep_list, &hsotg->gadget.ep_list);
  4071. hs_ep->parent = hsotg;
  4072. hs_ep->ep.name = hs_ep->name;
  4073. if (hsotg->params.speed == DWC2_SPEED_PARAM_LOW)
  4074. usb_ep_set_maxpacket_limit(&hs_ep->ep, 8);
  4075. else
  4076. usb_ep_set_maxpacket_limit(&hs_ep->ep,
  4077. epnum ? 1024 : EP0_MPS_LIMIT);
  4078. hs_ep->ep.ops = &dwc2_hsotg_ep_ops;
  4079. if (epnum == 0) {
  4080. hs_ep->ep.caps.type_control = true;
  4081. } else {
  4082. if (hsotg->params.speed != DWC2_SPEED_PARAM_LOW) {
  4083. hs_ep->ep.caps.type_iso = true;
  4084. hs_ep->ep.caps.type_bulk = true;
  4085. }
  4086. hs_ep->ep.caps.type_int = true;
  4087. }
  4088. if (dir_in)
  4089. hs_ep->ep.caps.dir_in = true;
  4090. else
  4091. hs_ep->ep.caps.dir_out = true;
  4092. /*
  4093. * if we're using dma, we need to set the next-endpoint pointer
  4094. * to be something valid.
  4095. */
  4096. if (using_dma(hsotg)) {
  4097. u32 next = DXEPCTL_NEXTEP((epnum + 1) % 15);
  4098. if (dir_in)
  4099. dwc2_writel(hsotg, next, DIEPCTL(epnum));
  4100. else
  4101. dwc2_writel(hsotg, next, DOEPCTL(epnum));
  4102. }
  4103. }
  4104. /**
  4105. * dwc2_hsotg_hw_cfg - read HW configuration registers
  4106. * @hsotg: Programming view of the DWC_otg controller
  4107. *
  4108. * Read the USB core HW configuration registers
  4109. */
  4110. static int dwc2_hsotg_hw_cfg(struct dwc2_hsotg *hsotg)
  4111. {
  4112. u32 cfg;
  4113. u32 ep_type;
  4114. u32 i;
  4115. /* check hardware configuration */
  4116. hsotg->num_of_eps = hsotg->hw_params.num_dev_ep;
  4117. /* Add ep0 */
  4118. hsotg->num_of_eps++;
  4119. hsotg->eps_in[0] = devm_kzalloc(hsotg->dev,
  4120. sizeof(struct dwc2_hsotg_ep),
  4121. GFP_KERNEL);
  4122. if (!hsotg->eps_in[0])
  4123. return -ENOMEM;
  4124. /* Same dwc2_hsotg_ep is used in both directions for ep0 */
  4125. hsotg->eps_out[0] = hsotg->eps_in[0];
  4126. cfg = hsotg->hw_params.dev_ep_dirs;
  4127. for (i = 1, cfg >>= 2; i < hsotg->num_of_eps; i++, cfg >>= 2) {
  4128. ep_type = cfg & 3;
  4129. /* Direction in or both */
  4130. if (!(ep_type & 2)) {
  4131. hsotg->eps_in[i] = devm_kzalloc(hsotg->dev,
  4132. sizeof(struct dwc2_hsotg_ep), GFP_KERNEL);
  4133. if (!hsotg->eps_in[i])
  4134. return -ENOMEM;
  4135. }
  4136. /* Direction out or both */
  4137. if (!(ep_type & 1)) {
  4138. hsotg->eps_out[i] = devm_kzalloc(hsotg->dev,
  4139. sizeof(struct dwc2_hsotg_ep), GFP_KERNEL);
  4140. if (!hsotg->eps_out[i])
  4141. return -ENOMEM;
  4142. }
  4143. }
  4144. hsotg->fifo_mem = hsotg->hw_params.total_fifo_size;
  4145. hsotg->dedicated_fifos = hsotg->hw_params.en_multiple_tx_fifo;
  4146. dev_info(hsotg->dev, "EPs: %d, %s fifos, %d entries in SPRAM\n",
  4147. hsotg->num_of_eps,
  4148. hsotg->dedicated_fifos ? "dedicated" : "shared",
  4149. hsotg->fifo_mem);
  4150. return 0;
  4151. }
  4152. /**
  4153. * dwc2_hsotg_dump - dump state of the udc
  4154. * @hsotg: Programming view of the DWC_otg controller
  4155. *
  4156. */
  4157. static void dwc2_hsotg_dump(struct dwc2_hsotg *hsotg)
  4158. {
  4159. #ifdef DEBUG
  4160. struct device *dev = hsotg->dev;
  4161. u32 val;
  4162. int idx;
  4163. dev_info(dev, "DCFG=0x%08x, DCTL=0x%08x, DIEPMSK=%08x\n",
  4164. dwc2_readl(hsotg, DCFG), dwc2_readl(hsotg, DCTL),
  4165. dwc2_readl(hsotg, DIEPMSK));
  4166. dev_info(dev, "GAHBCFG=0x%08x, GHWCFG1=0x%08x\n",
  4167. dwc2_readl(hsotg, GAHBCFG), dwc2_readl(hsotg, GHWCFG1));
  4168. dev_info(dev, "GRXFSIZ=0x%08x, GNPTXFSIZ=0x%08x\n",
  4169. dwc2_readl(hsotg, GRXFSIZ), dwc2_readl(hsotg, GNPTXFSIZ));
  4170. /* show periodic fifo settings */
  4171. for (idx = 1; idx < hsotg->num_of_eps; idx++) {
  4172. val = dwc2_readl(hsotg, DPTXFSIZN(idx));
  4173. dev_info(dev, "DPTx[%d] FSize=%d, StAddr=0x%08x\n", idx,
  4174. val >> FIFOSIZE_DEPTH_SHIFT,
  4175. val & FIFOSIZE_STARTADDR_MASK);
  4176. }
  4177. for (idx = 0; idx < hsotg->num_of_eps; idx++) {
  4178. dev_info(dev,
  4179. "ep%d-in: EPCTL=0x%08x, SIZ=0x%08x, DMA=0x%08x\n", idx,
  4180. dwc2_readl(hsotg, DIEPCTL(idx)),
  4181. dwc2_readl(hsotg, DIEPTSIZ(idx)),
  4182. dwc2_readl(hsotg, DIEPDMA(idx)));
  4183. val = dwc2_readl(hsotg, DOEPCTL(idx));
  4184. dev_info(dev,
  4185. "ep%d-out: EPCTL=0x%08x, SIZ=0x%08x, DMA=0x%08x\n",
  4186. idx, dwc2_readl(hsotg, DOEPCTL(idx)),
  4187. dwc2_readl(hsotg, DOEPTSIZ(idx)),
  4188. dwc2_readl(hsotg, DOEPDMA(idx)));
  4189. }
  4190. dev_info(dev, "DVBUSDIS=0x%08x, DVBUSPULSE=%08x\n",
  4191. dwc2_readl(hsotg, DVBUSDIS), dwc2_readl(hsotg, DVBUSPULSE));
  4192. #endif
  4193. }
  4194. /**
  4195. * dwc2_gadget_init - init function for gadget
  4196. * @hsotg: Programming view of the DWC_otg controller
  4197. *
  4198. */
  4199. int dwc2_gadget_init(struct dwc2_hsotg *hsotg)
  4200. {
  4201. struct device *dev = hsotg->dev;
  4202. int epnum;
  4203. int ret;
  4204. /* Dump fifo information */
  4205. dev_dbg(dev, "NonPeriodic TXFIFO size: %d\n",
  4206. hsotg->params.g_np_tx_fifo_size);
  4207. dev_dbg(dev, "RXFIFO size: %d\n", hsotg->params.g_rx_fifo_size);
  4208. switch (hsotg->params.speed) {
  4209. case DWC2_SPEED_PARAM_LOW:
  4210. hsotg->gadget.max_speed = USB_SPEED_LOW;
  4211. break;
  4212. case DWC2_SPEED_PARAM_FULL:
  4213. hsotg->gadget.max_speed = USB_SPEED_FULL;
  4214. break;
  4215. default:
  4216. hsotg->gadget.max_speed = USB_SPEED_HIGH;
  4217. break;
  4218. }
  4219. hsotg->gadget.ops = &dwc2_hsotg_gadget_ops;
  4220. hsotg->gadget.name = dev_name(dev);
  4221. hsotg->gadget.otg_caps = &hsotg->params.otg_caps;
  4222. hsotg->remote_wakeup_allowed = 0;
  4223. if (hsotg->params.lpm)
  4224. hsotg->gadget.lpm_capable = true;
  4225. if (hsotg->dr_mode == USB_DR_MODE_OTG)
  4226. hsotg->gadget.is_otg = 1;
  4227. else if (hsotg->dr_mode == USB_DR_MODE_PERIPHERAL)
  4228. hsotg->op_state = OTG_STATE_B_PERIPHERAL;
  4229. ret = dwc2_hsotg_hw_cfg(hsotg);
  4230. if (ret) {
  4231. dev_err(hsotg->dev, "Hardware configuration failed: %d\n", ret);
  4232. return ret;
  4233. }
  4234. hsotg->ctrl_buff = devm_kzalloc(hsotg->dev,
  4235. DWC2_CTRL_BUFF_SIZE, GFP_KERNEL);
  4236. if (!hsotg->ctrl_buff)
  4237. return -ENOMEM;
  4238. hsotg->ep0_buff = devm_kzalloc(hsotg->dev,
  4239. DWC2_CTRL_BUFF_SIZE, GFP_KERNEL);
  4240. if (!hsotg->ep0_buff)
  4241. return -ENOMEM;
  4242. if (using_desc_dma(hsotg)) {
  4243. ret = dwc2_gadget_alloc_ctrl_desc_chains(hsotg);
  4244. if (ret < 0)
  4245. return ret;
  4246. }
  4247. ret = devm_request_irq(hsotg->dev, hsotg->irq, dwc2_hsotg_irq,
  4248. IRQF_SHARED, dev_name(hsotg->dev), hsotg);
  4249. if (ret < 0) {
  4250. dev_err(dev, "cannot claim IRQ for gadget\n");
  4251. return ret;
  4252. }
  4253. /* hsotg->num_of_eps holds number of EPs other than ep0 */
  4254. if (hsotg->num_of_eps == 0) {
  4255. dev_err(dev, "wrong number of EPs (zero)\n");
  4256. return -EINVAL;
  4257. }
  4258. /* setup endpoint information */
  4259. INIT_LIST_HEAD(&hsotg->gadget.ep_list);
  4260. hsotg->gadget.ep0 = &hsotg->eps_out[0]->ep;
  4261. /* allocate EP0 request */
  4262. hsotg->ctrl_req = dwc2_hsotg_ep_alloc_request(&hsotg->eps_out[0]->ep,
  4263. GFP_KERNEL);
  4264. if (!hsotg->ctrl_req) {
  4265. dev_err(dev, "failed to allocate ctrl req\n");
  4266. return -ENOMEM;
  4267. }
  4268. #if 0
  4269. /* initialise the endpoints now the core has been initialised */
  4270. for (epnum = 0; epnum < hsotg->num_of_eps; epnum++) {
  4271. if (hsotg->eps_in[epnum] && (epnum % 2))
  4272. dwc2_hsotg_initep(hsotg, hsotg->eps_in[epnum],
  4273. epnum, 1);
  4274. else if (epnum)
  4275. hsotg->eps_in[epnum] = NULL;
  4276. if (hsotg->eps_out[epnum] && (!(epnum % 2)))
  4277. dwc2_hsotg_initep(hsotg, hsotg->eps_out[epnum],
  4278. epnum, 0);
  4279. else if (epnum)
  4280. hsotg->eps_out[epnum] = NULL;
  4281. }
  4282. #else
  4283. for (epnum = 0; epnum < hsotg->num_of_eps; epnum++) {
  4284. if (hsotg->eps_in[epnum])
  4285. dwc2_hsotg_initep(hsotg, hsotg->eps_in[epnum],
  4286. epnum, 1);
  4287. if (hsotg->eps_out[epnum])
  4288. dwc2_hsotg_initep(hsotg, hsotg->eps_out[epnum],
  4289. epnum, 0);
  4290. }
  4291. #endif
  4292. dwc2_hsotg_dump(hsotg);
  4293. return 0;
  4294. }
  4295. /**
  4296. * dwc2_hsotg_remove - remove function for hsotg driver
  4297. * @hsotg: Programming view of the DWC_otg controller
  4298. *
  4299. */
  4300. int dwc2_hsotg_remove(struct dwc2_hsotg *hsotg)
  4301. {
  4302. usb_del_gadget_udc(&hsotg->gadget);
  4303. dwc2_hsotg_ep_free_request(&hsotg->eps_out[0]->ep, hsotg->ctrl_req);
  4304. return 0;
  4305. }
  4306. int dwc2_hsotg_suspend(struct dwc2_hsotg *hsotg)
  4307. {
  4308. unsigned long flags;
  4309. if (hsotg->lx_state != DWC2_L0)
  4310. return 0;
  4311. if (hsotg->driver) {
  4312. int ep;
  4313. dev_info(hsotg->dev, "suspending usb gadget %s\n",
  4314. hsotg->driver->driver.name);
  4315. spin_lock_irqsave(&hsotg->lock, flags);
  4316. if (hsotg->enabled)
  4317. dwc2_hsotg_core_disconnect(hsotg);
  4318. dwc2_hsotg_disconnect(hsotg);
  4319. hsotg->gadget.speed = USB_SPEED_UNKNOWN;
  4320. spin_unlock_irqrestore(&hsotg->lock, flags);
  4321. for (ep = 1; ep < hsotg->num_of_eps; ep++) {
  4322. if (hsotg->eps_in[ep])
  4323. dwc2_hsotg_ep_disable_lock(&hsotg->eps_in[ep]->ep);
  4324. if (hsotg->eps_out[ep])
  4325. dwc2_hsotg_ep_disable_lock(&hsotg->eps_out[ep]->ep);
  4326. }
  4327. }
  4328. return 0;
  4329. }
  4330. int dwc2_hsotg_resume(struct dwc2_hsotg *hsotg)
  4331. {
  4332. unsigned long flags;
  4333. if (hsotg->lx_state == DWC2_L2)
  4334. return 0;
  4335. if (hsotg->driver) {
  4336. dev_info(hsotg->dev, "resuming usb gadget %s\n",
  4337. hsotg->driver->driver.name);
  4338. spin_lock_irqsave(&hsotg->lock, flags);
  4339. dwc2_hsotg_core_init_disconnected(hsotg, false);
  4340. if (hsotg->enabled) {
  4341. /* Enable ACG feature in device mode,if supported */
  4342. dwc2_enable_acg(hsotg);
  4343. dwc2_hsotg_core_connect(hsotg);
  4344. }
  4345. spin_unlock_irqrestore(&hsotg->lock, flags);
  4346. }
  4347. return 0;
  4348. }
  4349. /**
  4350. * dwc2_backup_device_registers() - Backup controller device registers.
  4351. * When suspending usb bus, registers needs to be backuped
  4352. * if controller power is disabled once suspended.
  4353. *
  4354. * @hsotg: Programming view of the DWC_otg controller
  4355. */
  4356. int dwc2_backup_device_registers(struct dwc2_hsotg *hsotg)
  4357. {
  4358. struct dwc2_dregs_backup *dr;
  4359. int i;
  4360. dev_dbg(hsotg->dev, "%s\n", __func__);
  4361. /* Backup dev regs */
  4362. dr = &hsotg->dr_backup;
  4363. dr->dcfg = dwc2_readl(hsotg, DCFG);
  4364. dr->dctl = dwc2_readl(hsotg, DCTL);
  4365. dr->daintmsk = dwc2_readl(hsotg, DAINTMSK);
  4366. dr->diepmsk = dwc2_readl(hsotg, DIEPMSK);
  4367. dr->doepmsk = dwc2_readl(hsotg, DOEPMSK);
  4368. for (i = 0; i < hsotg->num_of_eps; i++) {
  4369. /* Backup IN EPs */
  4370. dr->diepctl[i] = dwc2_readl(hsotg, DIEPCTL(i));
  4371. /* Ensure DATA PID is correctly configured */
  4372. if (dr->diepctl[i] & DXEPCTL_DPID)
  4373. dr->diepctl[i] |= DXEPCTL_SETD1PID;
  4374. else
  4375. dr->diepctl[i] |= DXEPCTL_SETD0PID;
  4376. dr->dieptsiz[i] = dwc2_readl(hsotg, DIEPTSIZ(i));
  4377. dr->diepdma[i] = dwc2_readl(hsotg, DIEPDMA(i));
  4378. /* Backup OUT EPs */
  4379. dr->doepctl[i] = dwc2_readl(hsotg, DOEPCTL(i));
  4380. /* Ensure DATA PID is correctly configured */
  4381. if (dr->doepctl[i] & DXEPCTL_DPID)
  4382. dr->doepctl[i] |= DXEPCTL_SETD1PID;
  4383. else
  4384. dr->doepctl[i] |= DXEPCTL_SETD0PID;
  4385. dr->doeptsiz[i] = dwc2_readl(hsotg, DOEPTSIZ(i));
  4386. dr->doepdma[i] = dwc2_readl(hsotg, DOEPDMA(i));
  4387. dr->dtxfsiz[i] = dwc2_readl(hsotg, DPTXFSIZN(i));
  4388. }
  4389. dr->valid = true;
  4390. return 0;
  4391. }
  4392. /**
  4393. * dwc2_restore_device_registers() - Restore controller device registers.
  4394. * When resuming usb bus, device registers needs to be restored
  4395. * if controller power were disabled.
  4396. *
  4397. * @hsotg: Programming view of the DWC_otg controller
  4398. * @remote_wakeup: Indicates whether resume is initiated by Device or Host.
  4399. *
  4400. * Return: 0 if successful, negative error code otherwise
  4401. */
  4402. int dwc2_restore_device_registers(struct dwc2_hsotg *hsotg, int remote_wakeup)
  4403. {
  4404. struct dwc2_dregs_backup *dr;
  4405. int i;
  4406. dev_dbg(hsotg->dev, "%s\n", __func__);
  4407. /* Restore dev regs */
  4408. dr = &hsotg->dr_backup;
  4409. if (!dr->valid) {
  4410. dev_err(hsotg->dev, "%s: no device registers to restore\n",
  4411. __func__);
  4412. return -EINVAL;
  4413. }
  4414. dr->valid = false;
  4415. if (!remote_wakeup)
  4416. dwc2_writel(hsotg, dr->dctl, DCTL);
  4417. dwc2_writel(hsotg, dr->daintmsk, DAINTMSK);
  4418. dwc2_writel(hsotg, dr->diepmsk, DIEPMSK);
  4419. dwc2_writel(hsotg, dr->doepmsk, DOEPMSK);
  4420. for (i = 0; i < hsotg->num_of_eps; i++) {
  4421. /* Restore IN EPs */
  4422. dwc2_writel(hsotg, dr->dieptsiz[i], DIEPTSIZ(i));
  4423. dwc2_writel(hsotg, dr->diepdma[i], DIEPDMA(i));
  4424. dwc2_writel(hsotg, dr->doeptsiz[i], DOEPTSIZ(i));
  4425. /** WA for enabled EPx's IN in DDMA mode. On entering to
  4426. * hibernation wrong value read and saved from DIEPDMAx,
  4427. * as result BNA interrupt asserted on hibernation exit
  4428. * by restoring from saved area.
  4429. */
  4430. if (using_desc_dma(hsotg) &&
  4431. (dr->diepctl[i] & DXEPCTL_EPENA))
  4432. dr->diepdma[i] = hsotg->eps_in[i]->desc_list_dma;
  4433. dwc2_writel(hsotg, dr->dtxfsiz[i], DPTXFSIZN(i));
  4434. dwc2_writel(hsotg, dr->diepctl[i], DIEPCTL(i));
  4435. /* Restore OUT EPs */
  4436. dwc2_writel(hsotg, dr->doeptsiz[i], DOEPTSIZ(i));
  4437. /* WA for enabled EPx's OUT in DDMA mode. On entering to
  4438. * hibernation wrong value read and saved from DOEPDMAx,
  4439. * as result BNA interrupt asserted on hibernation exit
  4440. * by restoring from saved area.
  4441. */
  4442. if (using_desc_dma(hsotg) &&
  4443. (dr->doepctl[i] & DXEPCTL_EPENA))
  4444. dr->doepdma[i] = hsotg->eps_out[i]->desc_list_dma;
  4445. dwc2_writel(hsotg, dr->doepdma[i], DOEPDMA(i));
  4446. dwc2_writel(hsotg, dr->doepctl[i], DOEPCTL(i));
  4447. }
  4448. return 0;
  4449. }
  4450. /**
  4451. * dwc2_gadget_init_lpm - Configure the core to support LPM in device mode
  4452. *
  4453. * @hsotg: Programming view of DWC_otg controller
  4454. *
  4455. */
  4456. void dwc2_gadget_init_lpm(struct dwc2_hsotg *hsotg)
  4457. {
  4458. u32 val;
  4459. if (!hsotg->params.lpm)
  4460. return;
  4461. val = GLPMCFG_LPMCAP | GLPMCFG_APPL1RES;
  4462. val |= hsotg->params.hird_threshold_en ? GLPMCFG_HIRD_THRES_EN : 0;
  4463. val |= hsotg->params.lpm_clock_gating ? GLPMCFG_ENBLSLPM : 0;
  4464. val |= hsotg->params.hird_threshold << GLPMCFG_HIRD_THRES_SHIFT;
  4465. val |= hsotg->params.besl ? GLPMCFG_ENBESL : 0;
  4466. val |= GLPMCFG_LPM_REJECT_CTRL_CONTROL;
  4467. val |= GLPMCFG_LPM_ACCEPT_CTRL_ISOC;
  4468. dwc2_writel(hsotg, val, GLPMCFG);
  4469. dev_dbg(hsotg->dev, "GLPMCFG=0x%08x\n", dwc2_readl(hsotg, GLPMCFG));
  4470. /* Unmask WKUP_ALERT Interrupt */
  4471. if (hsotg->params.service_interval)
  4472. dwc2_set_bit(hsotg, GINTMSK2, GINTMSK2_WKUP_ALERT_INT_MSK);
  4473. }
  4474. /**
  4475. * dwc2_gadget_program_ref_clk - Program GREFCLK register in device mode
  4476. *
  4477. * @hsotg: Programming view of DWC_otg controller
  4478. *
  4479. */
  4480. void dwc2_gadget_program_ref_clk(struct dwc2_hsotg *hsotg)
  4481. {
  4482. u32 val = 0;
  4483. val |= GREFCLK_REF_CLK_MODE;
  4484. val |= hsotg->params.ref_clk_per << GREFCLK_REFCLKPER_SHIFT;
  4485. val |= hsotg->params.sof_cnt_wkup_alert <<
  4486. GREFCLK_SOF_CNT_WKUP_ALERT_SHIFT;
  4487. dwc2_writel(hsotg, val, GREFCLK);
  4488. dev_dbg(hsotg->dev, "GREFCLK=0x%08x\n", dwc2_readl(hsotg, GREFCLK));
  4489. }
  4490. /**
  4491. * dwc2_gadget_enter_hibernation() - Put controller in Hibernation.
  4492. *
  4493. * @hsotg: Programming view of the DWC_otg controller
  4494. *
  4495. * Return non-zero if failed to enter to hibernation.
  4496. */
  4497. int dwc2_gadget_enter_hibernation(struct dwc2_hsotg *hsotg)
  4498. {
  4499. u32 gpwrdn;
  4500. u32 gusbcfg;
  4501. u32 pcgcctl;
  4502. int ret = 0;
  4503. /* Change to L2(suspend) state */
  4504. hsotg->lx_state = DWC2_L2;
  4505. dev_dbg(hsotg->dev, "Start of hibernation completed\n");
  4506. ret = dwc2_backup_global_registers(hsotg);
  4507. if (ret) {
  4508. dev_err(hsotg->dev, "%s: failed to backup global registers\n",
  4509. __func__);
  4510. return ret;
  4511. }
  4512. ret = dwc2_backup_device_registers(hsotg);
  4513. if (ret) {
  4514. dev_err(hsotg->dev, "%s: failed to backup device registers\n",
  4515. __func__);
  4516. return ret;
  4517. }
  4518. gpwrdn = GPWRDN_PWRDNRSTN;
  4519. udelay(10);
  4520. gusbcfg = dwc2_readl(hsotg, GUSBCFG);
  4521. if (gusbcfg & GUSBCFG_ULPI_UTMI_SEL) {
  4522. /* ULPI interface */
  4523. gpwrdn |= GPWRDN_ULPI_LATCH_EN_DURING_HIB_ENTRY;
  4524. dwc2_writel(hsotg, gpwrdn, GPWRDN);
  4525. udelay(10);
  4526. /* Suspend the Phy Clock */
  4527. pcgcctl = dwc2_readl(hsotg, PCGCTL);
  4528. pcgcctl |= PCGCTL_STOPPCLK;
  4529. dwc2_writel(hsotg, pcgcctl, PCGCTL);
  4530. udelay(10);
  4531. gpwrdn = dwc2_readl(hsotg, GPWRDN);
  4532. gpwrdn |= GPWRDN_PMUACTV;
  4533. dwc2_writel(hsotg, gpwrdn, GPWRDN);
  4534. udelay(10);
  4535. } else {
  4536. /* UTMI+ Interface */
  4537. dwc2_writel(hsotg, gpwrdn, GPWRDN);
  4538. udelay(10);
  4539. gpwrdn = dwc2_readl(hsotg, GPWRDN);
  4540. gpwrdn |= GPWRDN_PMUACTV;
  4541. dwc2_writel(hsotg, gpwrdn, GPWRDN);
  4542. udelay(10);
  4543. pcgcctl = dwc2_readl(hsotg, PCGCTL);
  4544. pcgcctl |= PCGCTL_STOPPCLK;
  4545. dwc2_writel(hsotg, pcgcctl, PCGCTL);
  4546. udelay(10);
  4547. }
  4548. /* Set flag to indicate that we are in hibernation */
  4549. hsotg->hibernated = 1;
  4550. /* Enable interrupts from wake up logic */
  4551. gpwrdn = dwc2_readl(hsotg, GPWRDN);
  4552. gpwrdn |= GPWRDN_PMUINTSEL;
  4553. dwc2_writel(hsotg, gpwrdn, GPWRDN);
  4554. udelay(10);
  4555. /* Unmask device mode interrupts in GPWRDN */
  4556. gpwrdn = dwc2_readl(hsotg, GPWRDN);
  4557. gpwrdn |= GPWRDN_RST_DET_MSK;
  4558. gpwrdn |= GPWRDN_LNSTSCHG_MSK;
  4559. gpwrdn |= GPWRDN_STS_CHGINT_MSK;
  4560. dwc2_writel(hsotg, gpwrdn, GPWRDN);
  4561. udelay(10);
  4562. /* Enable Power Down Clamp */
  4563. gpwrdn = dwc2_readl(hsotg, GPWRDN);
  4564. gpwrdn |= GPWRDN_PWRDNCLMP;
  4565. dwc2_writel(hsotg, gpwrdn, GPWRDN);
  4566. udelay(10);
  4567. /* Switch off VDD */
  4568. gpwrdn = dwc2_readl(hsotg, GPWRDN);
  4569. gpwrdn |= GPWRDN_PWRDNSWTCH;
  4570. dwc2_writel(hsotg, gpwrdn, GPWRDN);
  4571. udelay(10);
  4572. /* Save gpwrdn register for further usage if stschng interrupt */
  4573. hsotg->gr_backup.gpwrdn = dwc2_readl(hsotg, GPWRDN);
  4574. dev_dbg(hsotg->dev, "Hibernation completed\n");
  4575. return ret;
  4576. }
  4577. /**
  4578. * dwc2_gadget_exit_hibernation()
  4579. * This function is for exiting from Device mode hibernation by host initiated
  4580. * resume/reset and device initiated remote-wakeup.
  4581. *
  4582. * @hsotg: Programming view of the DWC_otg controller
  4583. * @rem_wakeup: indicates whether resume is initiated by Device or Host.
  4584. * @reset: indicates whether resume is initiated by Reset.
  4585. *
  4586. * Return non-zero if failed to exit from hibernation.
  4587. */
  4588. int dwc2_gadget_exit_hibernation(struct dwc2_hsotg *hsotg,
  4589. int rem_wakeup, int reset)
  4590. {
  4591. u32 pcgcctl;
  4592. u32 gpwrdn;
  4593. u32 dctl;
  4594. int ret = 0;
  4595. struct dwc2_gregs_backup *gr;
  4596. struct dwc2_dregs_backup *dr;
  4597. gr = &hsotg->gr_backup;
  4598. dr = &hsotg->dr_backup;
  4599. if (!hsotg->hibernated) {
  4600. dev_dbg(hsotg->dev, "Already exited from Hibernation\n");
  4601. return 1;
  4602. }
  4603. dev_dbg(hsotg->dev,
  4604. "%s: called with rem_wakeup = %d reset = %d\n",
  4605. __func__, rem_wakeup, reset);
  4606. dwc2_hib_restore_common(hsotg, rem_wakeup, 0);
  4607. if (!reset) {
  4608. /* Clear all pending interupts */
  4609. dwc2_writel(hsotg, 0xffffffff, GINTSTS);
  4610. }
  4611. /* De-assert Restore */
  4612. gpwrdn = dwc2_readl(hsotg, GPWRDN);
  4613. gpwrdn &= ~GPWRDN_RESTORE;
  4614. dwc2_writel(hsotg, gpwrdn, GPWRDN);
  4615. udelay(10);
  4616. if (!rem_wakeup) {
  4617. pcgcctl = dwc2_readl(hsotg, PCGCTL);
  4618. pcgcctl &= ~PCGCTL_RSTPDWNMODULE;
  4619. dwc2_writel(hsotg, pcgcctl, PCGCTL);
  4620. }
  4621. /* Restore GUSBCFG, DCFG and DCTL */
  4622. dwc2_writel(hsotg, gr->gusbcfg, GUSBCFG);
  4623. dwc2_writel(hsotg, dr->dcfg, DCFG);
  4624. dwc2_writel(hsotg, dr->dctl, DCTL);
  4625. /* On USB Reset, reset device address to zero */
  4626. if (reset)
  4627. dwc2_clear_bit(hsotg, DCFG, DCFG_DEVADDR_MASK);
  4628. /* Reset ULPI latch */
  4629. gpwrdn = dwc2_readl(hsotg, GPWRDN);
  4630. gpwrdn &= ~GPWRDN_ULPI_LATCH_EN_DURING_HIB_ENTRY;
  4631. dwc2_writel(hsotg, gpwrdn, GPWRDN);
  4632. /* De-assert Wakeup Logic */
  4633. gpwrdn = dwc2_readl(hsotg, GPWRDN);
  4634. gpwrdn &= ~GPWRDN_PMUACTV;
  4635. dwc2_writel(hsotg, gpwrdn, GPWRDN);
  4636. if (rem_wakeup) {
  4637. udelay(10);
  4638. /* Start Remote Wakeup Signaling */
  4639. dwc2_writel(hsotg, dr->dctl | DCTL_RMTWKUPSIG, DCTL);
  4640. } else {
  4641. udelay(50);
  4642. /* Set Device programming done bit */
  4643. dctl = dwc2_readl(hsotg, DCTL);
  4644. dctl |= DCTL_PWRONPRGDONE;
  4645. dwc2_writel(hsotg, dctl, DCTL);
  4646. }
  4647. /* Wait for interrupts which must be cleared */
  4648. mdelay(2);
  4649. /* Clear all pending interupts */
  4650. dwc2_writel(hsotg, 0xffffffff, GINTSTS);
  4651. /* Restore global registers */
  4652. ret = dwc2_restore_global_registers(hsotg);
  4653. if (ret) {
  4654. dev_err(hsotg->dev, "%s: failed to restore registers\n",
  4655. __func__);
  4656. return ret;
  4657. }
  4658. /* Restore device registers */
  4659. ret = dwc2_restore_device_registers(hsotg, rem_wakeup);
  4660. if (ret) {
  4661. dev_err(hsotg->dev, "%s: failed to restore device registers\n",
  4662. __func__);
  4663. return ret;
  4664. }
  4665. if (rem_wakeup) {
  4666. mdelay(10);
  4667. dctl = dwc2_readl(hsotg, DCTL);
  4668. dctl &= ~DCTL_RMTWKUPSIG;
  4669. dwc2_writel(hsotg, dctl, DCTL);
  4670. }
  4671. hsotg->hibernated = 0;
  4672. hsotg->lx_state = DWC2_L0;
  4673. dev_dbg(hsotg->dev, "Hibernation recovery completes here\n");
  4674. return ret;
  4675. }
  4676. /**
  4677. * dwc2_gadget_enter_partial_power_down() - Put controller in partial
  4678. * power down.
  4679. *
  4680. * @hsotg: Programming view of the DWC_otg controller
  4681. *
  4682. * Return: non-zero if failed to enter device partial power down.
  4683. *
  4684. * This function is for entering device mode partial power down.
  4685. */
  4686. int dwc2_gadget_enter_partial_power_down(struct dwc2_hsotg *hsotg)
  4687. {
  4688. u32 pcgcctl;
  4689. int ret = 0;
  4690. dev_dbg(hsotg->dev, "Entering device partial power down started.\n");
  4691. /* Backup all registers */
  4692. ret = dwc2_backup_global_registers(hsotg);
  4693. if (ret) {
  4694. dev_err(hsotg->dev, "%s: failed to backup global registers\n",
  4695. __func__);
  4696. return ret;
  4697. }
  4698. ret = dwc2_backup_device_registers(hsotg);
  4699. if (ret) {
  4700. dev_err(hsotg->dev, "%s: failed to backup device registers\n",
  4701. __func__);
  4702. return ret;
  4703. }
  4704. /*
  4705. * Clear any pending interrupts since dwc2 will not be able to
  4706. * clear them after entering partial_power_down.
  4707. */
  4708. dwc2_writel(hsotg, 0xffffffff, GINTSTS);
  4709. /* Put the controller in low power state */
  4710. pcgcctl = dwc2_readl(hsotg, PCGCTL);
  4711. pcgcctl |= PCGCTL_PWRCLMP;
  4712. dwc2_writel(hsotg, pcgcctl, PCGCTL);
  4713. udelay(5);
  4714. pcgcctl |= PCGCTL_RSTPDWNMODULE;
  4715. dwc2_writel(hsotg, pcgcctl, PCGCTL);
  4716. udelay(5);
  4717. pcgcctl |= PCGCTL_STOPPCLK;
  4718. dwc2_writel(hsotg, pcgcctl, PCGCTL);
  4719. /* Set in_ppd flag to 1 as here core enters suspend. */
  4720. hsotg->in_ppd = 1;
  4721. hsotg->lx_state = DWC2_L2;
  4722. dev_dbg(hsotg->dev, "Entering device partial power down completed.\n");
  4723. return ret;
  4724. }
  4725. /*
  4726. * dwc2_gadget_exit_partial_power_down() - Exit controller from device partial
  4727. * power down.
  4728. *
  4729. * @hsotg: Programming view of the DWC_otg controller
  4730. * @restore: indicates whether need to restore the registers or not.
  4731. *
  4732. * Return: non-zero if failed to exit device partial power down.
  4733. *
  4734. * This function is for exiting from device mode partial power down.
  4735. */
  4736. int dwc2_gadget_exit_partial_power_down(struct dwc2_hsotg *hsotg,
  4737. bool restore)
  4738. {
  4739. u32 pcgcctl;
  4740. u32 dctl;
  4741. struct dwc2_dregs_backup *dr;
  4742. int ret = 0;
  4743. dr = &hsotg->dr_backup;
  4744. dev_dbg(hsotg->dev, "Exiting device partial Power Down started.\n");
  4745. pcgcctl = dwc2_readl(hsotg, PCGCTL);
  4746. pcgcctl &= ~PCGCTL_STOPPCLK;
  4747. dwc2_writel(hsotg, pcgcctl, PCGCTL);
  4748. pcgcctl = dwc2_readl(hsotg, PCGCTL);
  4749. pcgcctl &= ~PCGCTL_PWRCLMP;
  4750. dwc2_writel(hsotg, pcgcctl, PCGCTL);
  4751. pcgcctl = dwc2_readl(hsotg, PCGCTL);
  4752. pcgcctl &= ~PCGCTL_RSTPDWNMODULE;
  4753. dwc2_writel(hsotg, pcgcctl, PCGCTL);
  4754. udelay(100);
  4755. if (restore) {
  4756. ret = dwc2_restore_global_registers(hsotg);
  4757. if (ret) {
  4758. dev_err(hsotg->dev, "%s: failed to restore registers\n",
  4759. __func__);
  4760. return ret;
  4761. }
  4762. /* Restore DCFG */
  4763. dwc2_writel(hsotg, dr->dcfg, DCFG);
  4764. ret = dwc2_restore_device_registers(hsotg, 0);
  4765. if (ret) {
  4766. dev_err(hsotg->dev, "%s: failed to restore device registers\n",
  4767. __func__);
  4768. return ret;
  4769. }
  4770. }
  4771. /* Set the Power-On Programming done bit */
  4772. dctl = dwc2_readl(hsotg, DCTL);
  4773. dctl |= DCTL_PWRONPRGDONE;
  4774. dwc2_writel(hsotg, dctl, DCTL);
  4775. /* Set in_ppd flag to 0 as here core exits from suspend. */
  4776. hsotg->in_ppd = 0;
  4777. hsotg->lx_state = DWC2_L0;
  4778. dev_dbg(hsotg->dev, "Exiting device partial Power Down completed.\n");
  4779. return ret;
  4780. }
  4781. /**
  4782. * dwc2_gadget_enter_clock_gating() - Put controller in clock gating.
  4783. *
  4784. * @hsotg: Programming view of the DWC_otg controller
  4785. *
  4786. * Return: non-zero if failed to enter device partial power down.
  4787. *
  4788. * This function is for entering device mode clock gating.
  4789. */
  4790. void dwc2_gadget_enter_clock_gating(struct dwc2_hsotg *hsotg)
  4791. {
  4792. u32 pcgctl;
  4793. dev_dbg(hsotg->dev, "Entering device clock gating.\n");
  4794. /* Set the Phy Clock bit as suspend is received. */
  4795. pcgctl = dwc2_readl(hsotg, PCGCTL);
  4796. pcgctl |= PCGCTL_STOPPCLK;
  4797. dwc2_writel(hsotg, pcgctl, PCGCTL);
  4798. udelay(5);
  4799. /* Set the Gate hclk as suspend is received. */
  4800. pcgctl = dwc2_readl(hsotg, PCGCTL);
  4801. pcgctl |= PCGCTL_GATEHCLK;
  4802. dwc2_writel(hsotg, pcgctl, PCGCTL);
  4803. udelay(5);
  4804. hsotg->lx_state = DWC2_L2;
  4805. hsotg->bus_suspended = true;
  4806. }
  4807. /*
  4808. * dwc2_gadget_exit_clock_gating() - Exit controller from device clock gating.
  4809. *
  4810. * @hsotg: Programming view of the DWC_otg controller
  4811. * @rem_wakeup: indicates whether remote wake up is enabled.
  4812. *
  4813. * This function is for exiting from device mode clock gating.
  4814. */
  4815. void dwc2_gadget_exit_clock_gating(struct dwc2_hsotg *hsotg, int rem_wakeup)
  4816. {
  4817. u32 pcgctl;
  4818. u32 dctl;
  4819. dev_dbg(hsotg->dev, "Exiting device clock gating.\n");
  4820. /* Clear the Gate hclk. */
  4821. pcgctl = dwc2_readl(hsotg, PCGCTL);
  4822. pcgctl &= ~PCGCTL_GATEHCLK;
  4823. dwc2_writel(hsotg, pcgctl, PCGCTL);
  4824. udelay(5);
  4825. /* Phy Clock bit. */
  4826. pcgctl = dwc2_readl(hsotg, PCGCTL);
  4827. pcgctl &= ~PCGCTL_STOPPCLK;
  4828. dwc2_writel(hsotg, pcgctl, PCGCTL);
  4829. udelay(5);
  4830. if (rem_wakeup) {
  4831. /* Set Remote Wakeup Signaling */
  4832. dctl = dwc2_readl(hsotg, DCTL);
  4833. dctl |= DCTL_RMTWKUPSIG;
  4834. dwc2_writel(hsotg, dctl, DCTL);
  4835. }
  4836. /* Change to L0 state */
  4837. call_gadget(hsotg, resume);
  4838. hsotg->lx_state = DWC2_L0;
  4839. hsotg->bus_suspended = false;
  4840. }