dhd_pcie.c 365 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938493949404941494249434944494549464947494849494950495149524953495449554956495749584959496049614962496349644965496649674968496949704971497249734974497549764977497849794980498149824983498449854986498749884989499049914992499349944995499649974998499950005001500250035004500550065007500850095010501150125013501450155016501750185019502050215022502350245025502650275028502950305031503250335034503550365037503850395040504150425043504450455046504750485049505050515052505350545055505650575058505950605061506250635064506550665067506850695070507150725073507450755076507750785079508050815082508350845085508650875088508950905091509250935094509550965097509850995100510151025103510451055106510751085109511051115112511351145115511651175118511951205121512251235124512551265127512851295130513151325133513451355136513751385139514051415142514351445145514651475148514951505151515251535154515551565157515851595160516151625163516451655166516751685169517051715172517351745175517651775178517951805181518251835184518551865187518851895190519151925193519451955196519751985199520052015202520352045205520652075208520952105211521252135214521552165217521852195220522152225223522452255226522752285229523052315232523352345235523652375238523952405241524252435244524552465247524852495250525152525253525452555256525752585259526052615262526352645265526652675268526952705271527252735274527552765277527852795280528152825283528452855286528752885289529052915292529352945295529652975298529953005301530253035304530553065307530853095310531153125313531453155316531753185319532053215322532353245325532653275328532953305331533253335334533553365337533853395340534153425343534453455346534753485349535053515352535353545355535653575358535953605361536253635364536553665367536853695370537153725373537453755376537753785379538053815382538353845385538653875388538953905391539253935394539553965397539853995400540154025403540454055406540754085409541054115412541354145415541654175418541954205421542254235424542554265427542854295430543154325433543454355436543754385439544054415442544354445445544654475448544954505451545254535454545554565457545854595460546154625463546454655466546754685469547054715472547354745475547654775478547954805481548254835484548554865487548854895490549154925493549454955496549754985499550055015502550355045505550655075508550955105511551255135514551555165517551855195520552155225523552455255526552755285529553055315532553355345535553655375538553955405541554255435544554555465547554855495550555155525553555455555556555755585559556055615562556355645565556655675568556955705571557255735574557555765577557855795580558155825583558455855586558755885589559055915592559355945595559655975598559956005601560256035604560556065607560856095610561156125613561456155616561756185619562056215622562356245625562656275628562956305631563256335634563556365637563856395640564156425643564456455646564756485649565056515652565356545655565656575658565956605661566256635664566556665667566856695670567156725673567456755676567756785679568056815682568356845685568656875688568956905691569256935694569556965697569856995700570157025703570457055706570757085709571057115712571357145715571657175718571957205721572257235724572557265727572857295730573157325733573457355736573757385739574057415742574357445745574657475748574957505751575257535754575557565757575857595760576157625763576457655766576757685769577057715772577357745775577657775778577957805781578257835784578557865787578857895790579157925793579457955796579757985799580058015802580358045805580658075808580958105811581258135814581558165817581858195820582158225823582458255826582758285829583058315832583358345835583658375838583958405841584258435844584558465847584858495850585158525853585458555856585758585859586058615862586358645865586658675868586958705871587258735874587558765877587858795880588158825883588458855886588758885889589058915892589358945895589658975898589959005901590259035904590559065907590859095910591159125913591459155916591759185919592059215922592359245925592659275928592959305931593259335934593559365937593859395940594159425943594459455946594759485949595059515952595359545955595659575958595959605961596259635964596559665967596859695970597159725973597459755976597759785979598059815982598359845985598659875988598959905991599259935994599559965997599859996000600160026003600460056006600760086009601060116012601360146015601660176018601960206021602260236024602560266027602860296030603160326033603460356036603760386039604060416042604360446045604660476048604960506051605260536054605560566057605860596060606160626063606460656066606760686069607060716072607360746075607660776078607960806081608260836084608560866087608860896090609160926093609460956096609760986099610061016102610361046105610661076108610961106111611261136114611561166117611861196120612161226123612461256126612761286129613061316132613361346135613661376138613961406141614261436144614561466147614861496150615161526153615461556156615761586159616061616162616361646165616661676168616961706171617261736174617561766177617861796180618161826183618461856186618761886189619061916192619361946195619661976198619962006201620262036204620562066207620862096210621162126213621462156216621762186219622062216222622362246225622662276228622962306231623262336234623562366237623862396240624162426243624462456246624762486249625062516252625362546255625662576258625962606261626262636264626562666267626862696270627162726273627462756276627762786279628062816282628362846285628662876288628962906291629262936294629562966297629862996300630163026303630463056306630763086309631063116312631363146315631663176318631963206321632263236324632563266327632863296330633163326333633463356336633763386339634063416342634363446345634663476348634963506351635263536354635563566357635863596360636163626363636463656366636763686369637063716372637363746375637663776378637963806381638263836384638563866387638863896390639163926393639463956396639763986399640064016402640364046405640664076408640964106411641264136414641564166417641864196420642164226423642464256426642764286429643064316432643364346435643664376438643964406441644264436444644564466447644864496450645164526453645464556456645764586459646064616462646364646465646664676468646964706471647264736474647564766477647864796480648164826483648464856486648764886489649064916492649364946495649664976498649965006501650265036504650565066507650865096510651165126513651465156516651765186519652065216522652365246525652665276528652965306531653265336534653565366537653865396540654165426543654465456546654765486549655065516552655365546555655665576558655965606561656265636564656565666567656865696570657165726573657465756576657765786579658065816582658365846585658665876588658965906591659265936594659565966597659865996600660166026603660466056606660766086609661066116612661366146615661666176618661966206621662266236624662566266627662866296630663166326633663466356636663766386639664066416642664366446645664666476648664966506651665266536654665566566657665866596660666166626663666466656666666766686669667066716672667366746675667666776678667966806681668266836684668566866687668866896690669166926693669466956696669766986699670067016702670367046705670667076708670967106711671267136714671567166717671867196720672167226723672467256726672767286729673067316732673367346735673667376738673967406741674267436744674567466747674867496750675167526753675467556756675767586759676067616762676367646765676667676768676967706771677267736774677567766777677867796780678167826783678467856786678767886789679067916792679367946795679667976798679968006801680268036804680568066807680868096810681168126813681468156816681768186819682068216822682368246825682668276828682968306831683268336834683568366837683868396840684168426843684468456846684768486849685068516852685368546855685668576858685968606861686268636864686568666867686868696870687168726873687468756876687768786879688068816882688368846885688668876888688968906891689268936894689568966897689868996900690169026903690469056906690769086909691069116912691369146915691669176918691969206921692269236924692569266927692869296930693169326933693469356936693769386939694069416942694369446945694669476948694969506951695269536954695569566957695869596960696169626963696469656966696769686969697069716972697369746975697669776978697969806981698269836984698569866987698869896990699169926993699469956996699769986999700070017002700370047005700670077008700970107011701270137014701570167017701870197020702170227023702470257026702770287029703070317032703370347035703670377038703970407041704270437044704570467047704870497050705170527053705470557056705770587059706070617062706370647065706670677068706970707071707270737074707570767077707870797080708170827083708470857086708770887089709070917092709370947095709670977098709971007101710271037104710571067107710871097110711171127113711471157116711771187119712071217122712371247125712671277128712971307131713271337134713571367137713871397140714171427143714471457146714771487149715071517152715371547155715671577158715971607161716271637164716571667167716871697170717171727173717471757176717771787179718071817182718371847185718671877188718971907191719271937194719571967197719871997200720172027203720472057206720772087209721072117212721372147215721672177218721972207221722272237224722572267227722872297230723172327233723472357236723772387239724072417242724372447245724672477248724972507251725272537254725572567257725872597260726172627263726472657266726772687269727072717272727372747275727672777278727972807281728272837284728572867287728872897290729172927293729472957296729772987299730073017302730373047305730673077308730973107311731273137314731573167317731873197320732173227323732473257326732773287329733073317332733373347335733673377338733973407341734273437344734573467347734873497350735173527353735473557356735773587359736073617362736373647365736673677368736973707371737273737374737573767377737873797380738173827383738473857386738773887389739073917392739373947395739673977398739974007401740274037404740574067407740874097410741174127413741474157416741774187419742074217422742374247425742674277428742974307431743274337434743574367437743874397440744174427443744474457446744774487449745074517452745374547455745674577458745974607461746274637464746574667467746874697470747174727473747474757476747774787479748074817482748374847485748674877488748974907491749274937494749574967497749874997500750175027503750475057506750775087509751075117512751375147515751675177518751975207521752275237524752575267527752875297530753175327533753475357536753775387539754075417542754375447545754675477548754975507551755275537554755575567557755875597560756175627563756475657566756775687569757075717572757375747575757675777578757975807581758275837584758575867587758875897590759175927593759475957596759775987599760076017602760376047605760676077608760976107611761276137614761576167617761876197620762176227623762476257626762776287629763076317632763376347635763676377638763976407641764276437644764576467647764876497650765176527653765476557656765776587659766076617662766376647665766676677668766976707671767276737674767576767677767876797680768176827683768476857686768776887689769076917692769376947695769676977698769977007701770277037704770577067707770877097710771177127713771477157716771777187719772077217722772377247725772677277728772977307731773277337734773577367737773877397740774177427743774477457746774777487749775077517752775377547755775677577758775977607761776277637764776577667767776877697770777177727773777477757776777777787779778077817782778377847785778677877788778977907791779277937794779577967797779877997800780178027803780478057806780778087809781078117812781378147815781678177818781978207821782278237824782578267827782878297830783178327833783478357836783778387839784078417842784378447845784678477848784978507851785278537854785578567857785878597860786178627863786478657866786778687869787078717872787378747875787678777878787978807881788278837884788578867887788878897890789178927893789478957896789778987899790079017902790379047905790679077908790979107911791279137914791579167917791879197920792179227923792479257926792779287929793079317932793379347935793679377938793979407941794279437944794579467947794879497950795179527953795479557956795779587959796079617962796379647965796679677968796979707971797279737974797579767977797879797980798179827983798479857986798779887989799079917992799379947995799679977998799980008001800280038004800580068007800880098010801180128013801480158016801780188019802080218022802380248025802680278028802980308031803280338034803580368037803880398040804180428043804480458046804780488049805080518052805380548055805680578058805980608061806280638064806580668067806880698070807180728073807480758076807780788079808080818082808380848085808680878088808980908091809280938094809580968097809880998100810181028103810481058106810781088109811081118112811381148115811681178118811981208121812281238124812581268127812881298130813181328133813481358136813781388139814081418142814381448145814681478148814981508151815281538154815581568157815881598160816181628163816481658166816781688169817081718172817381748175817681778178817981808181818281838184818581868187818881898190819181928193819481958196819781988199820082018202820382048205820682078208820982108211821282138214821582168217821882198220822182228223822482258226822782288229823082318232823382348235823682378238823982408241824282438244824582468247824882498250825182528253825482558256825782588259826082618262826382648265826682678268826982708271827282738274827582768277827882798280828182828283828482858286828782888289829082918292829382948295829682978298829983008301830283038304830583068307830883098310831183128313831483158316831783188319832083218322832383248325832683278328832983308331833283338334833583368337833883398340834183428343834483458346834783488349835083518352835383548355835683578358835983608361836283638364836583668367836883698370837183728373837483758376837783788379838083818382838383848385838683878388838983908391839283938394839583968397839883998400840184028403840484058406840784088409841084118412841384148415841684178418841984208421842284238424842584268427842884298430843184328433843484358436843784388439844084418442844384448445844684478448844984508451845284538454845584568457845884598460846184628463846484658466846784688469847084718472847384748475847684778478847984808481848284838484848584868487848884898490849184928493849484958496849784988499850085018502850385048505850685078508850985108511851285138514851585168517851885198520852185228523852485258526852785288529853085318532853385348535853685378538853985408541854285438544854585468547854885498550855185528553855485558556855785588559856085618562856385648565856685678568856985708571857285738574857585768577857885798580858185828583858485858586858785888589859085918592859385948595859685978598859986008601860286038604860586068607860886098610861186128613861486158616861786188619862086218622862386248625862686278628862986308631863286338634863586368637863886398640864186428643864486458646864786488649865086518652865386548655865686578658865986608661866286638664866586668667866886698670867186728673867486758676867786788679868086818682868386848685868686878688868986908691869286938694869586968697869886998700870187028703870487058706870787088709871087118712871387148715871687178718871987208721872287238724872587268727872887298730873187328733873487358736873787388739874087418742874387448745874687478748874987508751875287538754875587568757875887598760876187628763876487658766876787688769877087718772877387748775877687778778877987808781878287838784878587868787878887898790879187928793879487958796879787988799880088018802880388048805880688078808880988108811881288138814881588168817881888198820882188228823882488258826882788288829883088318832883388348835883688378838883988408841884288438844884588468847884888498850885188528853885488558856885788588859886088618862886388648865886688678868886988708871887288738874887588768877887888798880888188828883888488858886888788888889889088918892889388948895889688978898889989008901890289038904890589068907890889098910891189128913891489158916891789188919892089218922892389248925892689278928892989308931893289338934893589368937893889398940894189428943894489458946894789488949895089518952895389548955895689578958895989608961896289638964896589668967896889698970897189728973897489758976897789788979898089818982898389848985898689878988898989908991899289938994899589968997899889999000900190029003900490059006900790089009901090119012901390149015901690179018901990209021902290239024902590269027902890299030903190329033903490359036903790389039904090419042904390449045904690479048904990509051905290539054905590569057905890599060906190629063906490659066906790689069907090719072907390749075907690779078907990809081908290839084908590869087908890899090909190929093909490959096909790989099910091019102910391049105910691079108910991109111911291139114911591169117911891199120912191229123912491259126912791289129913091319132913391349135913691379138913991409141914291439144914591469147914891499150915191529153915491559156915791589159916091619162916391649165916691679168916991709171917291739174917591769177917891799180918191829183918491859186918791889189919091919192919391949195919691979198919992009201920292039204920592069207920892099210921192129213921492159216921792189219922092219222922392249225922692279228922992309231923292339234923592369237923892399240924192429243924492459246924792489249925092519252925392549255925692579258925992609261926292639264926592669267926892699270927192729273927492759276927792789279928092819282928392849285928692879288928992909291929292939294929592969297929892999300930193029303930493059306930793089309931093119312931393149315931693179318931993209321932293239324932593269327932893299330933193329333933493359336933793389339934093419342934393449345934693479348934993509351935293539354935593569357935893599360936193629363936493659366936793689369937093719372937393749375937693779378937993809381938293839384938593869387938893899390939193929393939493959396939793989399940094019402940394049405940694079408940994109411941294139414941594169417941894199420942194229423942494259426942794289429943094319432943394349435943694379438943994409441944294439444944594469447944894499450945194529453945494559456945794589459946094619462946394649465946694679468946994709471947294739474947594769477947894799480948194829483948494859486948794889489949094919492949394949495949694979498949995009501950295039504950595069507950895099510951195129513951495159516951795189519952095219522952395249525952695279528952995309531953295339534953595369537953895399540954195429543954495459546954795489549955095519552955395549555955695579558955995609561956295639564956595669567956895699570957195729573957495759576957795789579958095819582958395849585958695879588958995909591959295939594959595969597959895999600960196029603960496059606960796089609961096119612961396149615961696179618961996209621962296239624962596269627962896299630963196329633963496359636963796389639964096419642964396449645964696479648964996509651965296539654965596569657965896599660966196629663966496659666966796689669967096719672967396749675967696779678967996809681968296839684968596869687968896899690969196929693969496959696969796989699970097019702970397049705970697079708970997109711971297139714971597169717971897199720972197229723972497259726972797289729973097319732973397349735973697379738973997409741974297439744974597469747974897499750975197529753975497559756975797589759976097619762976397649765976697679768976997709771977297739774977597769777977897799780978197829783978497859786978797889789979097919792979397949795979697979798979998009801980298039804980598069807980898099810981198129813981498159816981798189819982098219822982398249825982698279828982998309831983298339834983598369837983898399840984198429843984498459846984798489849985098519852985398549855985698579858985998609861986298639864986598669867986898699870987198729873987498759876987798789879988098819882988398849885988698879888988998909891989298939894989598969897989898999900990199029903990499059906990799089909991099119912991399149915991699179918991999209921992299239924992599269927992899299930993199329933993499359936993799389939994099419942994399449945994699479948994999509951995299539954995599569957995899599960996199629963996499659966996799689969997099719972997399749975997699779978997999809981998299839984998599869987998899899990999199929993999499959996999799989999100001000110002100031000410005100061000710008100091001010011100121001310014100151001610017100181001910020100211002210023100241002510026100271002810029100301003110032100331003410035100361003710038100391004010041100421004310044100451004610047100481004910050100511005210053100541005510056100571005810059100601006110062100631006410065100661006710068100691007010071100721007310074100751007610077100781007910080100811008210083100841008510086100871008810089100901009110092100931009410095100961009710098100991010010101101021010310104101051010610107101081010910110101111011210113101141011510116101171011810119101201012110122101231012410125101261012710128101291013010131101321013310134101351013610137101381013910140101411014210143101441014510146101471014810149101501015110152101531015410155101561015710158101591016010161101621016310164101651016610167101681016910170101711017210173101741017510176101771017810179101801018110182101831018410185101861018710188101891019010191101921019310194101951019610197101981019910200102011020210203102041020510206102071020810209102101021110212102131021410215102161021710218102191022010221102221022310224102251022610227102281022910230102311023210233102341023510236102371023810239102401024110242102431024410245102461024710248102491025010251102521025310254102551025610257102581025910260102611026210263102641026510266102671026810269102701027110272102731027410275102761027710278102791028010281102821028310284102851028610287102881028910290102911029210293102941029510296102971029810299103001030110302103031030410305103061030710308103091031010311103121031310314103151031610317103181031910320103211032210323103241032510326103271032810329103301033110332103331033410335103361033710338103391034010341103421034310344103451034610347103481034910350103511035210353103541035510356103571035810359103601036110362103631036410365103661036710368103691037010371103721037310374103751037610377103781037910380103811038210383103841038510386103871038810389103901039110392103931039410395103961039710398103991040010401104021040310404104051040610407104081040910410104111041210413104141041510416104171041810419104201042110422104231042410425104261042710428104291043010431104321043310434104351043610437104381043910440104411044210443104441044510446104471044810449104501045110452104531045410455104561045710458104591046010461104621046310464104651046610467104681046910470104711047210473104741047510476104771047810479104801048110482104831048410485104861048710488104891049010491104921049310494104951049610497104981049910500105011050210503105041050510506105071050810509105101051110512105131051410515105161051710518105191052010521105221052310524105251052610527105281052910530105311053210533105341053510536105371053810539105401054110542105431054410545105461054710548105491055010551105521055310554105551055610557105581055910560105611056210563105641056510566105671056810569105701057110572105731057410575105761057710578105791058010581105821058310584105851058610587105881058910590105911059210593105941059510596105971059810599106001060110602106031060410605106061060710608106091061010611106121061310614106151061610617106181061910620106211062210623106241062510626106271062810629106301063110632106331063410635106361063710638106391064010641106421064310644106451064610647106481064910650106511065210653106541065510656106571065810659106601066110662106631066410665106661066710668106691067010671106721067310674106751067610677106781067910680106811068210683106841068510686106871068810689106901069110692106931069410695106961069710698106991070010701107021070310704107051070610707107081070910710107111071210713107141071510716107171071810719107201072110722107231072410725107261072710728107291073010731107321073310734107351073610737107381073910740107411074210743107441074510746107471074810749107501075110752107531075410755107561075710758107591076010761107621076310764107651076610767107681076910770107711077210773107741077510776107771077810779107801078110782107831078410785107861078710788107891079010791107921079310794107951079610797107981079910800108011080210803108041080510806108071080810809108101081110812108131081410815108161081710818108191082010821108221082310824108251082610827108281082910830108311083210833108341083510836108371083810839108401084110842108431084410845108461084710848108491085010851108521085310854108551085610857108581085910860108611086210863108641086510866108671086810869108701087110872108731087410875108761087710878108791088010881108821088310884108851088610887108881088910890108911089210893108941089510896108971089810899109001090110902109031090410905109061090710908109091091010911109121091310914109151091610917109181091910920109211092210923109241092510926109271092810929109301093110932109331093410935109361093710938109391094010941109421094310944109451094610947109481094910950109511095210953109541095510956109571095810959109601096110962109631096410965109661096710968109691097010971109721097310974109751097610977109781097910980109811098210983109841098510986109871098810989109901099110992109931099410995109961099710998109991100011001110021100311004110051100611007110081100911010110111101211013110141101511016110171101811019110201102111022110231102411025110261102711028110291103011031110321103311034110351103611037110381103911040110411104211043110441104511046110471104811049110501105111052110531105411055110561105711058110591106011061110621106311064110651106611067110681106911070110711107211073110741107511076110771107811079110801108111082110831108411085110861108711088110891109011091110921109311094110951109611097110981109911100111011110211103111041110511106111071110811109111101111111112111131111411115111161111711118111191112011121111221112311124111251112611127111281112911130111311113211133111341113511136111371113811139111401114111142111431114411145111461114711148111491115011151111521115311154111551115611157111581115911160111611116211163111641116511166111671116811169111701117111172111731117411175111761117711178111791118011181111821118311184111851118611187111881118911190111911119211193111941119511196111971119811199112001120111202112031120411205112061120711208112091121011211112121121311214112151121611217112181121911220112211122211223112241122511226112271122811229112301123111232112331123411235112361123711238112391124011241112421124311244112451124611247112481124911250112511125211253112541125511256112571125811259112601126111262112631126411265112661126711268112691127011271112721127311274112751127611277112781127911280112811128211283112841128511286112871128811289112901129111292112931129411295112961129711298112991130011301113021130311304113051130611307113081130911310113111131211313113141131511316113171131811319113201132111322113231132411325113261132711328113291133011331113321133311334113351133611337113381133911340113411134211343113441134511346113471134811349113501135111352113531135411355113561135711358113591136011361113621136311364113651136611367113681136911370113711137211373113741137511376113771137811379113801138111382113831138411385113861138711388113891139011391113921139311394113951139611397113981139911400114011140211403114041140511406114071140811409114101141111412114131141411415114161141711418114191142011421114221142311424114251142611427114281142911430114311143211433114341143511436114371143811439114401144111442114431144411445114461144711448114491145011451114521145311454114551145611457114581145911460114611146211463114641146511466114671146811469114701147111472114731147411475114761147711478114791148011481114821148311484114851148611487114881148911490114911149211493114941149511496114971149811499115001150111502115031150411505115061150711508115091151011511115121151311514115151151611517115181151911520115211152211523115241152511526115271152811529115301153111532115331153411535115361153711538115391154011541115421154311544115451154611547115481154911550115511155211553115541155511556115571155811559115601156111562115631156411565115661156711568115691157011571115721157311574115751157611577115781157911580115811158211583115841158511586115871158811589115901159111592115931159411595115961159711598115991160011601116021160311604116051160611607116081160911610116111161211613116141161511616116171161811619116201162111622116231162411625116261162711628116291163011631116321163311634116351163611637116381163911640116411164211643116441164511646116471164811649116501165111652116531165411655116561165711658116591166011661116621166311664116651166611667116681166911670116711167211673116741167511676116771167811679116801168111682116831168411685116861168711688116891169011691116921169311694116951169611697116981169911700117011170211703117041170511706117071170811709117101171111712117131171411715117161171711718117191172011721117221172311724117251172611727117281172911730117311173211733117341173511736117371173811739117401174111742117431174411745117461174711748117491175011751117521175311754117551175611757117581175911760117611176211763117641176511766117671176811769117701177111772117731177411775117761177711778117791178011781117821178311784117851178611787117881178911790117911179211793117941179511796117971179811799118001180111802118031180411805118061180711808118091181011811118121181311814118151181611817118181181911820118211182211823118241182511826118271182811829118301183111832118331183411835118361183711838118391184011841118421184311844118451184611847118481184911850118511185211853118541185511856118571185811859118601186111862118631186411865118661186711868118691187011871118721187311874118751187611877118781187911880118811188211883118841188511886118871188811889118901189111892118931189411895118961189711898118991190011901119021190311904119051190611907119081190911910119111191211913119141191511916119171191811919119201192111922119231192411925119261192711928119291193011931119321193311934119351193611937119381193911940119411194211943119441194511946119471194811949119501195111952119531195411955119561195711958119591196011961119621196311964119651196611967119681196911970119711197211973119741197511976119771197811979119801198111982119831198411985119861198711988119891199011991119921199311994119951199611997119981199912000120011200212003120041200512006120071200812009120101201112012120131201412015120161201712018120191202012021120221202312024120251202612027120281202912030120311203212033120341203512036120371203812039120401204112042120431204412045120461204712048120491205012051120521205312054120551205612057120581205912060120611206212063120641206512066120671206812069120701207112072120731207412075120761207712078120791208012081120821208312084120851208612087120881208912090120911209212093120941209512096120971209812099121001210112102121031210412105121061210712108121091211012111121121211312114121151211612117121181211912120121211212212123121241212512126121271212812129121301213112132121331213412135121361213712138121391214012141121421214312144121451214612147121481214912150121511215212153121541215512156121571215812159121601216112162121631216412165121661216712168121691217012171121721217312174121751217612177121781217912180121811218212183121841218512186121871218812189121901219112192121931219412195121961219712198121991220012201122021220312204122051220612207122081220912210122111221212213122141221512216122171221812219122201222112222122231222412225122261222712228122291223012231122321223312234122351223612237122381223912240122411224212243122441224512246122471224812249122501225112252122531225412255122561225712258122591226012261122621226312264122651226612267122681226912270122711227212273122741227512276122771227812279122801228112282122831228412285122861228712288122891229012291122921229312294122951229612297122981229912300123011230212303123041230512306123071230812309123101231112312123131231412315123161231712318123191232012321123221232312324123251232612327123281232912330123311233212333123341233512336
  1. /*
  2. * DHD Bus Module for PCIE
  3. *
  4. * Portions of this code are copyright (c) 2020 Cypress Semiconductor Corporation
  5. *
  6. * Copyright (C) 1999-2020, Broadcom Corporation
  7. *
  8. * Unless you and Broadcom execute a separate written software license
  9. * agreement governing use of this software, this software is licensed to you
  10. * under the terms of the GNU General Public License version 2 (the "GPL"),
  11. * available at http://www.broadcom.com/licenses/GPLv2.php, with the
  12. * following added to such license:
  13. *
  14. * As a special exception, the copyright holders of this software give you
  15. * permission to link this software with independent modules, and to copy and
  16. * distribute the resulting executable under terms of your choice, provided that
  17. * you also meet, for each linked independent module, the terms and conditions of
  18. * the license of that module. An independent module is a module which is not
  19. * derived from this software. The special exception does not apply to any
  20. * modifications of the software.
  21. *
  22. * Notwithstanding the above, under no circumstances may you combine this
  23. * software in any way with any other Broadcom software provided under a license
  24. * other than the GPL, without Broadcom's express prior written consent.
  25. *
  26. *
  27. * <<Broadcom-WL-IPTag/Open:>>
  28. *
  29. * $Id: dhd_pcie.c 702835 2017-06-05 07:19:55Z $
  30. */
  31. /* include files */
  32. #include <typedefs.h>
  33. #include <bcmutils.h>
  34. #include <bcmdevs.h>
  35. #include <siutils.h>
  36. #include <sbpcmcia.h>
  37. #include <hndoobr.h>
  38. #include <hndsoc.h>
  39. #include <hndpmu.h>
  40. #include <etd.h>
  41. #include <hnd_debug.h>
  42. #include <sbchipc.h>
  43. #include <sbhndarm.h>
  44. #include <hnd_armtrap.h>
  45. #if defined(DHD_DEBUG)
  46. #include <hnd_cons.h>
  47. #endif /* defined(DHD_DEBUG) */
  48. #include <dngl_stats.h>
  49. #include <pcie_core.h>
  50. #include <dhd.h>
  51. #include <dhd_bus.h>
  52. #include <dhd_flowring.h>
  53. #include <dhd_proto.h>
  54. #include <dhd_dbg.h>
  55. #include <dhd_debug.h>
  56. #include <dhd_daemon.h>
  57. #include <dhdioctl.h>
  58. #include <sdiovar.h>
  59. #include <bcmmsgbuf.h>
  60. #include <pcicfg.h>
  61. #include <dhd_pcie.h>
  62. #include <bcmpcie.h>
  63. #include <bcmendian.h>
  64. #include <bcmstdlib_s.h>
  65. #ifdef DHDTCPACK_SUPPRESS
  66. #include <dhd_ip.h>
  67. #endif /* DHDTCPACK_SUPPRESS */
  68. #include <bcmevent.h>
  69. extern uint32 hw_module_variant;
  70. #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
  71. #include <linux/pm_runtime.h>
  72. #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
  73. #if defined(DEBUGGER) || defined(DHD_DSCOPE)
  74. #include <debugger.h>
  75. #endif /* DEBUGGER || DHD_DSCOPE */
  76. #ifdef DNGL_AXI_ERROR_LOGGING
  77. #include <dhd_linux_wq.h>
  78. #include <dhd_linux.h>
  79. #endif /* DNGL_AXI_ERROR_LOGGING */
  80. #if defined(DHD_CONTROL_PCIE_CPUCORE_WIFI_TURNON)
  81. #include <dhd_linux_priv.h>
  82. #endif /* DHD_CONTROL_PCIE_CPUCORE_WIFI_TURNON */
  83. #include <otpdefs.h>
  84. #define EXTENDED_PCIE_DEBUG_DUMP 1 /* Enable Extended pcie registers dump */
  85. #define MEMBLOCK 2048 /* Block size used for downloading of dongle image */
  86. #define MAX_WKLK_IDLE_CHECK 3 /* times wake_lock checked before deciding not to suspend */
  87. #define DHD_MAX_ITEMS_HPP_TXCPL_RING 512
  88. #define DHD_MAX_ITEMS_HPP_RXCPL_RING 512
  89. #define ARMCR4REG_CORECAP (0x4/sizeof(uint32))
  90. #define ARMCR4REG_MPUCTRL (0x90/sizeof(uint32))
  91. #define ACC_MPU_SHIFT 25
  92. #define ACC_MPU_MASK (0x1u << ACC_MPU_SHIFT)
  93. #define ARMCR4REG_BANKIDX (0x40/sizeof(uint32))
  94. #define ARMCR4REG_BANKPDA (0x4C/sizeof(uint32))
  95. /* Temporary war to fix precommit till sync issue between trunk & precommit branch is resolved */
  96. /* CTO Prevention Recovery */
  97. #ifdef BCMQT_HW
  98. #define CTO_TO_CLEAR_WAIT_MS 10000
  99. #define CTO_TO_CLEAR_WAIT_MAX_CNT 100
  100. #else
  101. #define CTO_TO_CLEAR_WAIT_MS 1000
  102. #define CTO_TO_CLEAR_WAIT_MAX_CNT 10
  103. #endif // endif
  104. /* Fetch address of a member in the pciedev_shared structure in dongle memory */
  105. #define DHD_PCIE_SHARED_MEMBER_ADDR(bus, member) \
  106. (bus)->shared_addr + OFFSETOF(pciedev_shared_t, member)
  107. /* Fetch address of a member in rings_info_ptr structure in dongle memory */
  108. #define DHD_RING_INFO_MEMBER_ADDR(bus, member) \
  109. (bus)->pcie_sh->rings_info_ptr + OFFSETOF(ring_info_t, member)
  110. /* Fetch address of a member in the ring_mem structure in dongle memory */
  111. #define DHD_RING_MEM_MEMBER_ADDR(bus, ringid, member) \
  112. (bus)->ring_sh[ringid].ring_mem_addr + OFFSETOF(ring_mem_t, member)
  113. #if defined(SUPPORT_MULTIPLE_BOARD_REV)
  114. extern unsigned int system_rev;
  115. #endif /* SUPPORT_MULTIPLE_BOARD_REV */
  116. /* DHD module parameter */
  117. extern uint32 hw_module_variant;
  118. #ifdef EWP_EDL
  119. extern int host_edl_support;
  120. #endif // endif
  121. /* This can be overwritten by module parameter(dma_ring_indices) defined in dhd_linux.c */
  122. uint dma_ring_indices = 0;
  123. /* This can be overwritten by module parameter(h2d_phase) defined in dhd_linux.c */
  124. bool h2d_phase = 0;
  125. /* This can be overwritten by module parameter(force_trap_bad_h2d_phase)
  126. * defined in dhd_linux.c
  127. */
  128. bool force_trap_bad_h2d_phase = 0;
  129. int dhd_dongle_memsize;
  130. int dhd_dongle_ramsize;
  131. struct dhd_bus *g_dhd_bus = NULL;
  132. #ifdef DNGL_AXI_ERROR_LOGGING
  133. static void dhd_log_dump_axi_error(uint8 *axi_err);
  134. #endif /* DNGL_AXI_ERROR_LOGGING */
  135. static int dhdpcie_checkdied(dhd_bus_t *bus, char *data, uint size);
  136. static int dhdpcie_bus_readconsole(dhd_bus_t *bus);
  137. #if defined(DHD_FW_COREDUMP)
  138. static int dhdpcie_mem_dump(dhd_bus_t *bus);
  139. static int dhdpcie_get_mem_dump(dhd_bus_t *bus);
  140. #endif /* DHD_FW_COREDUMP */
  141. static int dhdpcie_bus_membytes(dhd_bus_t *bus, bool write, ulong address, uint8 *data, uint size);
  142. static int dhdpcie_bus_doiovar(dhd_bus_t *bus, const bcm_iovar_t *vi, uint32 actionid,
  143. const char *name, void *params,
  144. int plen, void *arg, int len, int val_size);
  145. static int dhdpcie_bus_lpback_req(struct dhd_bus *bus, uint32 intval);
  146. static int dhdpcie_bus_dmaxfer_req(struct dhd_bus *bus,
  147. uint32 len, uint32 srcdelay, uint32 destdelay,
  148. uint32 d11_lpbk, uint32 core_num, uint32 wait);
  149. static int dhdpcie_bus_download_state(dhd_bus_t *bus, bool enter);
  150. static int _dhdpcie_download_firmware(struct dhd_bus *bus);
  151. static int dhdpcie_download_firmware(dhd_bus_t *bus, osl_t *osh);
  152. static int dhdpcie_bus_write_vars(dhd_bus_t *bus);
  153. static bool dhdpcie_bus_process_mailbox_intr(dhd_bus_t *bus, uint32 intstatus);
  154. static bool dhdpci_bus_read_frames(dhd_bus_t *bus);
  155. static int dhdpcie_readshared(dhd_bus_t *bus);
  156. static void dhdpcie_init_shared_addr(dhd_bus_t *bus);
  157. static bool dhdpcie_dongle_attach(dhd_bus_t *bus);
  158. static void dhdpcie_bus_dongle_setmemsize(dhd_bus_t *bus, int mem_size);
  159. static void dhdpcie_bus_release_dongle(dhd_bus_t *bus, osl_t *osh,
  160. bool dongle_isolation, bool reset_flag);
  161. static void dhdpcie_bus_release_malloc(dhd_bus_t *bus, osl_t *osh);
  162. static int dhdpcie_downloadvars(dhd_bus_t *bus, void *arg, int len);
  163. static void dhdpcie_setbar1win(dhd_bus_t *bus, uint32 addr);
  164. static uint8 dhdpcie_bus_rtcm8(dhd_bus_t *bus, ulong offset);
  165. static void dhdpcie_bus_wtcm8(dhd_bus_t *bus, ulong offset, uint8 data);
  166. static void dhdpcie_bus_wtcm16(dhd_bus_t *bus, ulong offset, uint16 data);
  167. static uint16 dhdpcie_bus_rtcm16(dhd_bus_t *bus, ulong offset);
  168. static void dhdpcie_bus_wtcm32(dhd_bus_t *bus, ulong offset, uint32 data);
  169. static uint32 dhdpcie_bus_rtcm32(dhd_bus_t *bus, ulong offset);
  170. #ifdef DHD_SUPPORT_64BIT
  171. static void dhdpcie_bus_wtcm64(dhd_bus_t *bus, ulong offset, uint64 data) __attribute__ ((used));
  172. static uint64 dhdpcie_bus_rtcm64(dhd_bus_t *bus, ulong offset) __attribute__ ((used));
  173. #endif /* DHD_SUPPORT_64BIT */
  174. static void dhdpcie_bus_cfg_set_bar0_win(dhd_bus_t *bus, uint32 data);
  175. static void dhdpcie_bus_reg_unmap(osl_t *osh, volatile char *addr, int size);
  176. static int dhdpcie_cc_nvmshadow(dhd_bus_t *bus, struct bcmstrbuf *b);
  177. static int dhdpcie_sromotp_customvar(dhd_bus_t *bus, uint32 *customvar1, uint32 *customvar2);
  178. static void dhdpcie_fw_trap(dhd_bus_t *bus);
  179. static void dhd_fillup_ring_sharedptr_info(dhd_bus_t *bus, ring_info_t *ring_info);
  180. static void dhdpcie_handle_mb_data(dhd_bus_t *bus);
  181. extern void dhd_dpc_enable(dhd_pub_t *dhdp);
  182. extern void dhd_dpc_kill(dhd_pub_t *dhdp);
  183. #ifdef IDLE_TX_FLOW_MGMT
  184. static void dhd_bus_check_idle_scan(dhd_bus_t *bus);
  185. static void dhd_bus_idle_scan(dhd_bus_t *bus);
  186. #endif /* IDLE_TX_FLOW_MGMT */
  187. #ifdef EXYNOS_PCIE_DEBUG
  188. extern void exynos_pcie_register_dump(int ch_num);
  189. #endif /* EXYNOS_PCIE_DEBUG */
  190. #if defined(DHD_H2D_LOG_TIME_SYNC)
  191. static void dhdpci_bus_rte_log_time_sync_poll(dhd_bus_t *bus);
  192. #endif /* DHD_H2D_LOG_TIME_SYNC */
  193. #define PCI_VENDOR_ID_BROADCOM 0x14e4
  194. #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
  195. #define MAX_D3_ACK_TIMEOUT 100
  196. #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
  197. #define DHD_DEFAULT_DOORBELL_TIMEOUT 200 /* ms */
  198. static bool dhdpcie_check_firmware_compatible(uint32 f_api_version, uint32 h_api_version);
  199. static int dhdpcie_cto_error_recovery(struct dhd_bus *bus);
  200. static int dhdpcie_init_d11status(struct dhd_bus *bus);
  201. static int dhdpcie_wrt_rnd(struct dhd_bus *bus);
  202. extern uint16 dhd_prot_get_h2d_max_txpost(dhd_pub_t *dhd);
  203. extern void dhd_prot_set_h2d_max_txpost(dhd_pub_t *dhd, uint16 max_txpost);
  204. #ifdef DHD_HP2P
  205. extern enum hrtimer_restart dhd_hp2p_write(struct hrtimer *timer);
  206. static uint16 dhd_bus_set_hp2p_ring_max_size(struct dhd_bus *bus, bool tx, uint16 val);
  207. #endif // endif
  208. #define NUM_PATTERNS 2
  209. static bool dhd_bus_tcm_test(struct dhd_bus *bus);
  210. /* IOVar table */
  211. enum {
  212. IOV_INTR = 1,
  213. #ifdef DHD_BUS_MEM_ACCESS
  214. IOV_MEMBYTES,
  215. #endif /* DHD_BUS_MEM_ACCESS */
  216. IOV_MEMSIZE,
  217. IOV_SET_DOWNLOAD_STATE,
  218. IOV_DEVRESET,
  219. IOV_VARS,
  220. IOV_MSI_SIM,
  221. IOV_PCIE_LPBK,
  222. IOV_CC_NVMSHADOW,
  223. IOV_RAMSIZE,
  224. IOV_RAMSTART,
  225. IOV_SLEEP_ALLOWED,
  226. IOV_PCIE_DMAXFER,
  227. IOV_PCIE_SUSPEND,
  228. #ifdef DHD_PCIE_REG_ACCESS
  229. IOV_PCIEREG,
  230. IOV_PCIECFGREG,
  231. IOV_PCIECOREREG,
  232. IOV_PCIESERDESREG,
  233. IOV_PCIEASPM,
  234. IOV_BAR0_SECWIN_REG,
  235. IOV_SBREG,
  236. #endif /* DHD_PCIE_REG_ACCESS */
  237. IOV_DONGLEISOLATION,
  238. IOV_LTRSLEEPON_UNLOOAD,
  239. IOV_METADATA_DBG,
  240. IOV_RX_METADATALEN,
  241. IOV_TX_METADATALEN,
  242. IOV_TXP_THRESHOLD,
  243. IOV_BUZZZ_DUMP,
  244. IOV_DUMP_RINGUPD_BLOCK,
  245. IOV_DMA_RINGINDICES,
  246. IOV_FORCE_FW_TRAP,
  247. IOV_DB1_FOR_MB,
  248. IOV_FLOW_PRIO_MAP,
  249. #ifdef DHD_PCIE_RUNTIMEPM
  250. IOV_IDLETIME,
  251. #endif /* DHD_PCIE_RUNTIMEPM */
  252. IOV_RXBOUND,
  253. IOV_TXBOUND,
  254. IOV_HANGREPORT,
  255. IOV_H2D_MAILBOXDATA,
  256. IOV_INFORINGS,
  257. IOV_H2D_PHASE,
  258. IOV_H2D_ENABLE_TRAP_BADPHASE,
  259. IOV_H2D_TXPOST_MAX_ITEM,
  260. IOV_TRAPDATA,
  261. IOV_TRAPDATA_RAW,
  262. IOV_CTO_PREVENTION,
  263. IOV_PCIE_WD_RESET,
  264. IOV_DUMP_DONGLE,
  265. IOV_HWA_ENAB_BMAP,
  266. IOV_IDMA_ENABLE,
  267. IOV_IFRM_ENABLE,
  268. IOV_CLEAR_RING,
  269. IOV_DAR_ENABLE,
  270. IOV_DNGL_CAPS, /**< returns string with dongle capabilities */
  271. #if defined(DEBUGGER) || defined(DHD_DSCOPE)
  272. IOV_GDB_SERVER, /**< starts gdb server on given interface */
  273. #endif /* DEBUGGER || DHD_DSCOPE */
  274. IOV_INB_DW_ENABLE,
  275. IOV_CTO_THRESHOLD,
  276. IOV_HSCBSIZE, /* get HSCB buffer size */
  277. #ifdef DHD_BUS_MEM_ACCESS
  278. IOV_HSCBBYTES, /* copy HSCB buffer */
  279. #endif // endif
  280. IOV_HP2P_ENABLE,
  281. IOV_HP2P_PKT_THRESHOLD,
  282. IOV_HP2P_TIME_THRESHOLD,
  283. IOV_HP2P_PKT_EXPIRY,
  284. IOV_HP2P_TXCPL_MAXITEMS,
  285. IOV_HP2P_RXCPL_MAXITEMS,
  286. IOV_EXTDTXS_IN_TXCPL,
  287. IOV_HOSTRDY_AFTER_INIT,
  288. IOV_PCIE_LAST /**< unused IOVAR */
  289. };
  290. const bcm_iovar_t dhdpcie_iovars[] = {
  291. {"intr", IOV_INTR, 0, 0, IOVT_BOOL, 0 },
  292. #ifdef DHD_BUS_MEM_ACCESS
  293. {"membytes", IOV_MEMBYTES, 0, 0, IOVT_BUFFER, 2 * sizeof(int) },
  294. #endif /* DHD_BUS_MEM_ACCESS */
  295. {"memsize", IOV_MEMSIZE, 0, 0, IOVT_UINT32, 0 },
  296. {"dwnldstate", IOV_SET_DOWNLOAD_STATE, 0, 0, IOVT_BOOL, 0 },
  297. {"vars", IOV_VARS, 0, 0, IOVT_BUFFER, 0 },
  298. {"devreset", IOV_DEVRESET, 0, 0, IOVT_UINT8, 0 },
  299. {"pcie_device_trap", IOV_FORCE_FW_TRAP, 0, 0, 0, 0 },
  300. {"pcie_lpbk", IOV_PCIE_LPBK, 0, 0, IOVT_UINT32, 0 },
  301. {"cc_nvmshadow", IOV_CC_NVMSHADOW, 0, 0, IOVT_BUFFER, 0 },
  302. {"ramsize", IOV_RAMSIZE, 0, 0, IOVT_UINT32, 0 },
  303. {"ramstart", IOV_RAMSTART, 0, 0, IOVT_UINT32, 0 },
  304. #ifdef DHD_PCIE_REG_ACCESS
  305. {"pciereg", IOV_PCIEREG, 0, 0, IOVT_BUFFER, 2 * sizeof(int32) },
  306. {"pciecfgreg", IOV_PCIECFGREG, 0, 0, IOVT_BUFFER, 2 * sizeof(int32) },
  307. {"pciecorereg", IOV_PCIECOREREG, 0, 0, IOVT_BUFFER, 2 * sizeof(int32) },
  308. {"pcieserdesreg", IOV_PCIESERDESREG, 0, 0, IOVT_BUFFER, 3 * sizeof(int32) },
  309. {"bar0secwinreg", IOV_BAR0_SECWIN_REG, 0, 0, IOVT_BUFFER, sizeof(sdreg_t) },
  310. {"sbreg", IOV_SBREG, 0, 0, IOVT_BUFFER, sizeof(uint8) },
  311. #endif /* DHD_PCIE_REG_ACCESS */
  312. {"pcie_dmaxfer", IOV_PCIE_DMAXFER, 0, 0, IOVT_BUFFER, sizeof(dma_xfer_info_t)},
  313. {"pcie_suspend", IOV_PCIE_SUSPEND, DHD_IOVF_PWRREQ_BYPASS, 0, IOVT_UINT32, 0 },
  314. {"sleep_allowed", IOV_SLEEP_ALLOWED, 0, 0, IOVT_BOOL, 0 },
  315. {"dngl_isolation", IOV_DONGLEISOLATION, 0, 0, IOVT_UINT32, 0 },
  316. {"ltrsleep_on_unload", IOV_LTRSLEEPON_UNLOOAD, 0, 0, IOVT_UINT32, 0 },
  317. {"dump_ringupdblk", IOV_DUMP_RINGUPD_BLOCK, 0, 0, IOVT_BUFFER, 0 },
  318. {"dma_ring_indices", IOV_DMA_RINGINDICES, 0, 0, IOVT_UINT32, 0},
  319. {"metadata_dbg", IOV_METADATA_DBG, 0, 0, IOVT_BOOL, 0 },
  320. {"rx_metadata_len", IOV_RX_METADATALEN, 0, 0, IOVT_UINT32, 0 },
  321. {"tx_metadata_len", IOV_TX_METADATALEN, 0, 0, IOVT_UINT32, 0 },
  322. {"db1_for_mb", IOV_DB1_FOR_MB, 0, 0, IOVT_UINT32, 0 },
  323. {"txp_thresh", IOV_TXP_THRESHOLD, 0, 0, IOVT_UINT32, 0 },
  324. {"buzzz_dump", IOV_BUZZZ_DUMP, 0, 0, IOVT_UINT32, 0 },
  325. {"flow_prio_map", IOV_FLOW_PRIO_MAP, 0, 0, IOVT_UINT32, 0 },
  326. #ifdef DHD_PCIE_RUNTIMEPM
  327. {"idletime", IOV_IDLETIME, 0, 0, IOVT_INT32, 0 },
  328. #endif /* DHD_PCIE_RUNTIMEPM */
  329. {"rxbound", IOV_RXBOUND, 0, 0, IOVT_UINT32, 0 },
  330. {"txbound", IOV_TXBOUND, 0, 0, IOVT_UINT32, 0 },
  331. #ifdef DHD_PCIE_REG_ACCESS
  332. {"aspm", IOV_PCIEASPM, 0, 0, IOVT_INT32, 0 },
  333. #endif /* DHD_PCIE_REG_ACCESS */
  334. {"fw_hang_report", IOV_HANGREPORT, 0, 0, IOVT_BOOL, 0 },
  335. {"h2d_mb_data", IOV_H2D_MAILBOXDATA, 0, 0, IOVT_UINT32, 0 },
  336. {"inforings", IOV_INFORINGS, 0, 0, IOVT_UINT32, 0 },
  337. {"h2d_phase", IOV_H2D_PHASE, 0, 0, IOVT_UINT32, 0 },
  338. {"force_trap_bad_h2d_phase", IOV_H2D_ENABLE_TRAP_BADPHASE, 0, 0,
  339. IOVT_UINT32, 0 },
  340. {"h2d_max_txpost", IOV_H2D_TXPOST_MAX_ITEM, 0, 0, IOVT_UINT32, 0 },
  341. {"trap_data", IOV_TRAPDATA, 0, 0, IOVT_BUFFER, 0 },
  342. {"trap_data_raw", IOV_TRAPDATA_RAW, 0, 0, IOVT_BUFFER, 0 },
  343. {"cto_prevention", IOV_CTO_PREVENTION, 0, 0, IOVT_UINT32, 0 },
  344. {"pcie_wd_reset", IOV_PCIE_WD_RESET, 0, 0, IOVT_BOOL, 0 },
  345. {"dump_dongle", IOV_DUMP_DONGLE, 0, 0, IOVT_BUFFER,
  346. MAX(sizeof(dump_dongle_in_t), sizeof(dump_dongle_out_t))},
  347. {"clear_ring", IOV_CLEAR_RING, 0, 0, IOVT_UINT32, 0 },
  348. {"hwa_enab_bmap", IOV_HWA_ENAB_BMAP, 0, 0, IOVT_UINT32, 0 },
  349. {"idma_enable", IOV_IDMA_ENABLE, 0, 0, IOVT_UINT32, 0 },
  350. {"ifrm_enable", IOV_IFRM_ENABLE, 0, 0, IOVT_UINT32, 0 },
  351. {"dar_enable", IOV_DAR_ENABLE, 0, 0, IOVT_UINT32, 0 },
  352. {"cap", IOV_DNGL_CAPS, 0, 0, IOVT_BUFFER, 0},
  353. #if defined(DEBUGGER) || defined(DHD_DSCOPE)
  354. {"gdb_server", IOV_GDB_SERVER, 0, 0, IOVT_UINT32, 0 },
  355. #endif /* DEBUGGER || DHD_DSCOPE */
  356. {"inb_dw_enable", IOV_INB_DW_ENABLE, 0, 0, IOVT_UINT32, 0 },
  357. {"cto_threshold", IOV_CTO_THRESHOLD, 0, 0, IOVT_UINT32, 0 },
  358. {"hscbsize", IOV_HSCBSIZE, 0, 0, IOVT_UINT32, 0 },
  359. #ifdef DHD_BUS_MEM_ACCESS
  360. {"hscbbytes", IOV_HSCBBYTES, 0, 0, IOVT_BUFFER, 2 * sizeof(int32) },
  361. #endif // endif
  362. #ifdef DHD_HP2P
  363. {"hp2p_enable", IOV_HP2P_ENABLE, 0, 0, IOVT_UINT32, 0 },
  364. {"hp2p_pkt_thresh", IOV_HP2P_PKT_THRESHOLD, 0, 0, IOVT_UINT32, 0 },
  365. {"hp2p_time_thresh", IOV_HP2P_TIME_THRESHOLD, 0, 0, IOVT_UINT32, 0 },
  366. {"hp2p_pkt_expiry", IOV_HP2P_PKT_EXPIRY, 0, 0, IOVT_UINT32, 0 },
  367. {"hp2p_txcpl_maxitems", IOV_HP2P_TXCPL_MAXITEMS, 0, 0, IOVT_UINT32, 0 },
  368. {"hp2p_rxcpl_maxitems", IOV_HP2P_RXCPL_MAXITEMS, 0, 0, IOVT_UINT32, 0 },
  369. #endif // endif
  370. {"extdtxs_in_txcpl", IOV_EXTDTXS_IN_TXCPL, 0, 0, IOVT_UINT32, 0 },
  371. {"hostrdy_after_init", IOV_HOSTRDY_AFTER_INIT, 0, 0, IOVT_UINT32, 0 },
  372. {NULL, 0, 0, 0, 0, 0 }
  373. };
  374. #define MAX_READ_TIMEOUT 5 * 1000 * 1000
  375. #ifndef DHD_RXBOUND
  376. #define DHD_RXBOUND 64
  377. #endif // endif
  378. #ifndef DHD_TXBOUND
  379. #define DHD_TXBOUND 64
  380. #endif // endif
  381. #define DHD_INFORING_BOUND 32
  382. #define DHD_BTLOGRING_BOUND 32
  383. uint dhd_rxbound = DHD_RXBOUND;
  384. uint dhd_txbound = DHD_TXBOUND;
  385. #if defined(DEBUGGER) || defined(DHD_DSCOPE)
  386. /** the GDB debugger layer will call back into this (bus) layer to read/write dongle memory */
  387. static struct dhd_gdb_bus_ops_s bus_ops = {
  388. .read_u16 = dhdpcie_bus_rtcm16,
  389. .read_u32 = dhdpcie_bus_rtcm32,
  390. .write_u32 = dhdpcie_bus_wtcm32,
  391. };
  392. #endif /* DEBUGGER || DHD_DSCOPE */
  393. bool
  394. dhd_bus_get_flr_force_fail(struct dhd_bus *bus)
  395. {
  396. return bus->flr_force_fail;
  397. }
  398. /**
  399. * Register/Unregister functions are called by the main DHD entry point (eg module insertion) to
  400. * link with the bus driver, in order to look for or await the device.
  401. */
  402. int
  403. dhd_bus_register(void)
  404. {
  405. DHD_TRACE(("%s: Enter\n", __FUNCTION__));
  406. return dhdpcie_bus_register();
  407. }
  408. void
  409. dhd_bus_unregister(void)
  410. {
  411. DHD_TRACE(("%s: Enter\n", __FUNCTION__));
  412. dhdpcie_bus_unregister();
  413. return;
  414. }
  415. /** returns a host virtual address */
  416. uint32 *
  417. dhdpcie_bus_reg_map(osl_t *osh, ulong addr, int size)
  418. {
  419. return (uint32 *)REG_MAP(addr, size);
  420. }
  421. void
  422. dhdpcie_bus_reg_unmap(osl_t *osh, volatile char *addr, int size)
  423. {
  424. REG_UNMAP(addr);
  425. return;
  426. }
  427. /**
  428. * retrun H2D Doorbell registers address
  429. * use DAR registers instead of enum register for corerev >= 23 (4347B0)
  430. */
  431. static INLINE uint
  432. dhd_bus_db0_addr_get(struct dhd_bus *bus)
  433. {
  434. uint addr = PCIH2D_MailBox;
  435. uint dar_addr = DAR_PCIH2D_DB0_0(bus->sih->buscorerev);
  436. return ((DAR_ACTIVE(bus->dhd)) ? dar_addr : addr);
  437. }
  438. static INLINE uint
  439. dhd_bus_db0_addr_2_get(struct dhd_bus *bus)
  440. {
  441. return ((DAR_ACTIVE(bus->dhd)) ? DAR_PCIH2D_DB2_0(bus->sih->buscorerev) : PCIH2D_MailBox_2);
  442. }
  443. static INLINE uint
  444. dhd_bus_db1_addr_get(struct dhd_bus *bus)
  445. {
  446. return ((DAR_ACTIVE(bus->dhd)) ? DAR_PCIH2D_DB0_1(bus->sih->buscorerev) : PCIH2D_DB1);
  447. }
  448. static INLINE uint
  449. dhd_bus_db1_addr_1_get(struct dhd_bus *bus)
  450. {
  451. return ((DAR_ACTIVE(bus->dhd)) ? DAR_PCIH2D_DB1_1(bus->sih->buscorerev) : PCIH2D_DB1_1);
  452. }
  453. /*
  454. * WAR for SWWLAN-215055 - [4378B0] ARM fails to boot without DAR WL domain request
  455. */
  456. static INLINE void
  457. dhd_bus_pcie_pwr_req_wl_domain(struct dhd_bus *bus, bool enable)
  458. {
  459. if (enable) {
  460. si_corereg(bus->sih, bus->sih->buscoreidx,
  461. DAR_PCIE_PWR_CTRL((bus->sih)->buscorerev),
  462. SRPWR_DMN1_ARMBPSD_MASK << SRPWR_REQON_SHIFT,
  463. SRPWR_DMN1_ARMBPSD_MASK << SRPWR_REQON_SHIFT);
  464. } else {
  465. si_corereg(bus->sih, bus->sih->buscoreidx,
  466. DAR_PCIE_PWR_CTRL((bus->sih)->buscorerev),
  467. SRPWR_DMN1_ARMBPSD_MASK << SRPWR_REQON_SHIFT, 0);
  468. }
  469. }
  470. static INLINE void
  471. _dhd_bus_pcie_pwr_req_clear_cmn(struct dhd_bus *bus)
  472. {
  473. uint mask;
  474. /*
  475. * If multiple de-asserts, decrement ref and return
  476. * Clear power request when only one pending
  477. * so initial request is not removed unexpectedly
  478. */
  479. if (bus->pwr_req_ref > 1) {
  480. bus->pwr_req_ref--;
  481. return;
  482. }
  483. ASSERT(bus->pwr_req_ref == 1);
  484. if (MULTIBP_ENAB(bus->sih)) {
  485. /* Common BP controlled by HW so only need to toggle WL/ARM backplane */
  486. mask = SRPWR_DMN1_ARMBPSD_MASK;
  487. } else {
  488. mask = SRPWR_DMN0_PCIE_MASK | SRPWR_DMN1_ARMBPSD_MASK;
  489. }
  490. si_srpwr_request(bus->sih, mask, 0);
  491. bus->pwr_req_ref = 0;
  492. }
  493. static INLINE void
  494. dhd_bus_pcie_pwr_req_clear(struct dhd_bus *bus)
  495. {
  496. unsigned long flags = 0;
  497. DHD_GENERAL_LOCK(bus->dhd, flags);
  498. _dhd_bus_pcie_pwr_req_clear_cmn(bus);
  499. DHD_GENERAL_UNLOCK(bus->dhd, flags);
  500. }
  501. static INLINE void
  502. dhd_bus_pcie_pwr_req_clear_nolock(struct dhd_bus *bus)
  503. {
  504. _dhd_bus_pcie_pwr_req_clear_cmn(bus);
  505. }
  506. static INLINE void
  507. _dhd_bus_pcie_pwr_req_cmn(struct dhd_bus *bus)
  508. {
  509. uint mask, val;
  510. /* If multiple request entries, increment reference and return */
  511. if (bus->pwr_req_ref > 0) {
  512. bus->pwr_req_ref++;
  513. return;
  514. }
  515. ASSERT(bus->pwr_req_ref == 0);
  516. if (MULTIBP_ENAB(bus->sih)) {
  517. /* Common BP controlled by HW so only need to toggle WL/ARM backplane */
  518. mask = SRPWR_DMN1_ARMBPSD_MASK;
  519. val = SRPWR_DMN1_ARMBPSD_MASK;
  520. } else {
  521. mask = SRPWR_DMN0_PCIE_MASK | SRPWR_DMN1_ARMBPSD_MASK;
  522. val = SRPWR_DMN0_PCIE_MASK | SRPWR_DMN1_ARMBPSD_MASK;
  523. }
  524. si_srpwr_request(bus->sih, mask, val);
  525. bus->pwr_req_ref = 1;
  526. }
  527. static INLINE void
  528. dhd_bus_pcie_pwr_req(struct dhd_bus *bus)
  529. {
  530. unsigned long flags = 0;
  531. DHD_GENERAL_LOCK(bus->dhd, flags);
  532. _dhd_bus_pcie_pwr_req_cmn(bus);
  533. DHD_GENERAL_UNLOCK(bus->dhd, flags);
  534. }
  535. static INLINE void
  536. _dhd_bus_pcie_pwr_req_pd0123_cmn(struct dhd_bus *bus)
  537. {
  538. uint mask, val;
  539. mask = SRPWR_DMN_ALL_MASK(bus->sih);
  540. val = SRPWR_DMN_ALL_MASK(bus->sih);
  541. si_srpwr_request(bus->sih, mask, val);
  542. }
  543. static INLINE void
  544. dhd_bus_pcie_pwr_req_reload_war(struct dhd_bus *bus)
  545. {
  546. unsigned long flags = 0;
  547. DHD_GENERAL_LOCK(bus->dhd, flags);
  548. _dhd_bus_pcie_pwr_req_pd0123_cmn(bus);
  549. DHD_GENERAL_UNLOCK(bus->dhd, flags);
  550. }
  551. static INLINE void
  552. _dhd_bus_pcie_pwr_req_clear_pd0123_cmn(struct dhd_bus *bus)
  553. {
  554. uint mask;
  555. mask = SRPWR_DMN_ALL_MASK(bus->sih);
  556. si_srpwr_request(bus->sih, mask, 0);
  557. }
  558. static INLINE void
  559. dhd_bus_pcie_pwr_req_clear_reload_war(struct dhd_bus *bus)
  560. {
  561. unsigned long flags = 0;
  562. DHD_GENERAL_LOCK(bus->dhd, flags);
  563. _dhd_bus_pcie_pwr_req_clear_pd0123_cmn(bus);
  564. DHD_GENERAL_UNLOCK(bus->dhd, flags);
  565. }
  566. static INLINE void
  567. dhd_bus_pcie_pwr_req_nolock(struct dhd_bus *bus)
  568. {
  569. _dhd_bus_pcie_pwr_req_cmn(bus);
  570. }
  571. bool
  572. dhdpcie_chip_support_msi(dhd_bus_t *bus)
  573. {
  574. DHD_ERROR(("%s: buscorerev=%d chipid=0x%x\n",
  575. __FUNCTION__, bus->sih->buscorerev, si_chipid(bus->sih)));
  576. if (bus->sih->buscorerev <= 14 ||
  577. si_chipid(bus->sih) == BCM4375_CHIP_ID ||
  578. si_chipid(bus->sih) == BCM4362_CHIP_ID ||
  579. si_chipid(bus->sih) == BCM43751_CHIP_ID ||
  580. si_chipid(bus->sih) == BCM4361_CHIP_ID) {
  581. return FALSE;
  582. } else {
  583. return TRUE;
  584. }
  585. }
  586. /**
  587. * Called once for each hardware (dongle) instance that this DHD manages.
  588. *
  589. * 'regs' is the host virtual address that maps to the start of the PCIe BAR0 window. The first 4096
  590. * bytes in this window are mapped to the backplane address in the PCIEBAR0Window register. The
  591. * precondition is that the PCIEBAR0Window register 'points' at the PCIe core.
  592. *
  593. * 'tcm' is the *host* virtual address at which tcm is mapped.
  594. */
  595. int dhdpcie_bus_attach(osl_t *osh, dhd_bus_t **bus_ptr,
  596. volatile char *regs, volatile char *tcm, void *pci_dev)
  597. {
  598. dhd_bus_t *bus = NULL;
  599. int ret = BCME_OK;
  600. /* customvar1 and customvar2 are customer configurable CIS tuples in OTP.
  601. * In dual chip (PCIE) scenario, customvar2 is used as a hint to detect
  602. * the chip variants and load the right firmware and NVRAM
  603. */
  604. /* Below vars are set to 0x0 as OTPed value can not take 0x0 */
  605. uint32 customvar1 = 0x0;
  606. uint32 customvar2 = 0x0;
  607. uint32 otp_hw_module_variant = 0x0;
  608. DHD_TRACE(("%s: ENTER\n", __FUNCTION__));
  609. do {
  610. if (!(bus = MALLOCZ(osh, sizeof(dhd_bus_t)))) {
  611. DHD_ERROR(("%s: MALLOC of dhd_bus_t failed\n", __FUNCTION__));
  612. ret = BCME_NORESOURCE;
  613. break;
  614. }
  615. bus->regs = regs;
  616. bus->tcm = tcm;
  617. bus->osh = osh;
  618. /* Save pci_dev into dhd_bus, as it may be needed in dhd_attach */
  619. bus->dev = (struct pci_dev *)pci_dev;
  620. dll_init(&bus->flowring_active_list);
  621. #ifdef IDLE_TX_FLOW_MGMT
  622. bus->active_list_last_process_ts = OSL_SYSUPTIME();
  623. #endif /* IDLE_TX_FLOW_MGMT */
  624. /* Attach pcie shared structure */
  625. if (!(bus->pcie_sh = MALLOCZ(osh, sizeof(pciedev_shared_t)))) {
  626. DHD_ERROR(("%s: MALLOC of bus->pcie_sh failed\n", __FUNCTION__));
  627. ret = BCME_NORESOURCE;
  628. break;
  629. }
  630. /* dhd_common_init(osh); */
  631. if (dhdpcie_dongle_attach(bus)) {
  632. DHD_ERROR(("%s: dhdpcie_probe_attach failed\n", __FUNCTION__));
  633. ret = BCME_NOTREADY;
  634. break;
  635. }
  636. if (!hw_module_variant) {
  637. /* For single wifi module */
  638. goto enumerate_module;
  639. }
  640. /* read otp variable customvar and store in dhd->customvar1 and dhd->customvar2 */
  641. if (dhdpcie_sromotp_customvar(bus, &customvar1, &customvar2)) {
  642. DHD_ERROR(("%s: dhdpcie_sromotp_customvar failed\n", __FUNCTION__));
  643. break;
  644. }
  645. if (!customvar2) {
  646. DHD_ERROR(("%s:customvar2 is not OTPed"
  647. "hw_module_variant=0x%x\n",
  648. __FUNCTION__, hw_module_variant));
  649. goto enumerate_module;
  650. }
  651. /* customvar2=0xNNMMLLKK, LL is module variant */
  652. otp_hw_module_variant = (customvar2 >> 8) & 0xFF;
  653. DHD_TRACE(("%s hw_module_variant=0x%x and"
  654. "OTPed-module_variant=0x%x\n", __func__,
  655. hw_module_variant, otp_hw_module_variant));
  656. if (hw_module_variant != otp_hw_module_variant) {
  657. DHD_ERROR(("%s: Not going to enumerate this module as "
  658. "hw_module_variant=0x%x and "
  659. "OTPed-module_variant=0x%x didn't match\n",
  660. __FUNCTION__, hw_module_variant, otp_hw_module_variant));
  661. break;
  662. }
  663. DHD_TRACE(("%s: Going to enumerate this module as "
  664. "hw_module_variant=0x%x and "
  665. "OTPed-module_variant=0x%x match\n",
  666. __FUNCTION__, hw_module_variant, otp_hw_module_variant));
  667. enumerate_module:
  668. /* software resources */
  669. if (!(bus->dhd = dhd_attach(osh, bus, PCMSGBUF_HDRLEN))) {
  670. DHD_ERROR(("%s: dhd_attach failed\n", __FUNCTION__));
  671. break;
  672. }
  673. DHD_ERROR(("%s: making DHD_BUS_DOWN\n", __FUNCTION__));
  674. bus->dhd->busstate = DHD_BUS_DOWN;
  675. bus->dhd->hostrdy_after_init = TRUE;
  676. bus->db1_for_mb = TRUE;
  677. bus->dhd->hang_report = TRUE;
  678. bus->use_mailbox = FALSE;
  679. bus->use_d0_inform = FALSE;
  680. bus->intr_enabled = FALSE;
  681. bus->flr_force_fail = FALSE;
  682. /* By default disable HWA and enable it via iovar */
  683. bus->hwa_enab_bmap = 0;
  684. /* update the dma indices if set through module parameter. */
  685. if (dma_ring_indices != 0) {
  686. dhdpcie_set_dma_ring_indices(bus->dhd, dma_ring_indices);
  687. }
  688. /* update h2d phase support if set through module parameter */
  689. bus->dhd->h2d_phase_supported = h2d_phase ? TRUE : FALSE;
  690. /* update force trap on bad phase if set through module parameter */
  691. bus->dhd->force_dongletrap_on_bad_h2d_phase =
  692. force_trap_bad_h2d_phase ? TRUE : FALSE;
  693. #ifdef IDLE_TX_FLOW_MGMT
  694. bus->enable_idle_flowring_mgmt = FALSE;
  695. #endif /* IDLE_TX_FLOW_MGMT */
  696. bus->irq_registered = FALSE;
  697. #ifdef DHD_MSI_SUPPORT
  698. #ifdef DHD_FORCE_MSI
  699. bus->d2h_intr_method = PCIE_MSI;
  700. #else
  701. bus->d2h_intr_method = enable_msi && dhdpcie_chip_support_msi(bus) ?
  702. PCIE_MSI : PCIE_INTX;
  703. #endif /* DHD_FORCE_MSI */
  704. #else
  705. bus->d2h_intr_method = PCIE_INTX;
  706. #endif /* DHD_MSI_SUPPORT */
  707. #ifdef DHD_HP2P
  708. bus->hp2p_txcpl_max_items = DHD_MAX_ITEMS_HPP_TXCPL_RING;
  709. bus->hp2p_rxcpl_max_items = DHD_MAX_ITEMS_HPP_RXCPL_RING;
  710. #endif /* DHD_HP2P */
  711. DHD_TRACE(("%s: EXIT SUCCESS\n",
  712. __FUNCTION__));
  713. g_dhd_bus = bus;
  714. *bus_ptr = bus;
  715. return ret;
  716. } while (0);
  717. DHD_TRACE(("%s: EXIT FAILURE\n", __FUNCTION__));
  718. if (bus && bus->pcie_sh) {
  719. MFREE(osh, bus->pcie_sh, sizeof(pciedev_shared_t));
  720. }
  721. if (bus) {
  722. MFREE(osh, bus, sizeof(dhd_bus_t));
  723. }
  724. return ret;
  725. }
  726. bool
  727. dhd_bus_skip_clm(dhd_pub_t *dhdp)
  728. {
  729. switch (dhd_bus_chip_id(dhdp)) {
  730. case BCM4369_CHIP_ID:
  731. return TRUE;
  732. default:
  733. return FALSE;
  734. }
  735. }
  736. uint
  737. dhd_bus_chip(struct dhd_bus *bus)
  738. {
  739. ASSERT(bus->sih != NULL);
  740. return bus->sih->chip;
  741. }
  742. uint
  743. dhd_bus_chiprev(struct dhd_bus *bus)
  744. {
  745. ASSERT(bus);
  746. ASSERT(bus->sih != NULL);
  747. return bus->sih->chiprev;
  748. }
  749. void *
  750. dhd_bus_pub(struct dhd_bus *bus)
  751. {
  752. return bus->dhd;
  753. }
  754. void *
  755. dhd_bus_sih(struct dhd_bus *bus)
  756. {
  757. return (void *)bus->sih;
  758. }
  759. void *
  760. dhd_bus_txq(struct dhd_bus *bus)
  761. {
  762. return &bus->txq;
  763. }
  764. /** Get Chip ID version */
  765. uint dhd_bus_chip_id(dhd_pub_t *dhdp)
  766. {
  767. dhd_bus_t *bus = dhdp->bus;
  768. return bus->sih->chip;
  769. }
  770. /** Get Chip Rev ID version */
  771. uint dhd_bus_chiprev_id(dhd_pub_t *dhdp)
  772. {
  773. dhd_bus_t *bus = dhdp->bus;
  774. return bus->sih->chiprev;
  775. }
  776. /** Get Chip Pkg ID version */
  777. uint dhd_bus_chippkg_id(dhd_pub_t *dhdp)
  778. {
  779. dhd_bus_t *bus = dhdp->bus;
  780. return bus->sih->chippkg;
  781. }
  782. /** Conduct Loopback test */
  783. int
  784. dhd_bus_dmaxfer_lpbk(dhd_pub_t *dhdp, uint32 type)
  785. {
  786. dma_xfer_info_t dmaxfer_lpbk;
  787. int ret = BCME_OK;
  788. #define PCIE_DMAXFER_LPBK_LENGTH 4096
  789. memset(&dmaxfer_lpbk, 0, sizeof(dma_xfer_info_t));
  790. dmaxfer_lpbk.version = DHD_DMAXFER_VERSION;
  791. dmaxfer_lpbk.length = (uint16)sizeof(dma_xfer_info_t);
  792. dmaxfer_lpbk.num_bytes = PCIE_DMAXFER_LPBK_LENGTH;
  793. dmaxfer_lpbk.type = type;
  794. dmaxfer_lpbk.should_wait = TRUE;
  795. ret = dhd_bus_iovar_op(dhdp, "pcie_dmaxfer", NULL, 0,
  796. (char *)&dmaxfer_lpbk, sizeof(dma_xfer_info_t), IOV_SET);
  797. if (ret < 0) {
  798. DHD_ERROR(("failed to start PCIe Loopback Test!!! "
  799. "Type:%d Reason:%d\n", type, ret));
  800. return ret;
  801. }
  802. if (dmaxfer_lpbk.status != DMA_XFER_SUCCESS) {
  803. DHD_ERROR(("failed to check PCIe Loopback Test!!! "
  804. "Type:%d Status:%d Error code:%d\n", type,
  805. dmaxfer_lpbk.status, dmaxfer_lpbk.error_code));
  806. ret = BCME_ERROR;
  807. } else {
  808. DHD_ERROR(("successful to check PCIe Loopback Test"
  809. " Type:%d\n", type));
  810. }
  811. #undef PCIE_DMAXFER_LPBK_LENGTH
  812. return ret;
  813. }
  814. /* Log the lastest DPC schedule time */
  815. void
  816. dhd_bus_set_dpc_sched_time(dhd_pub_t *dhdp)
  817. {
  818. dhdp->bus->dpc_sched_time = OSL_LOCALTIME_NS();
  819. }
  820. /* Check if there is DPC scheduling errors */
  821. bool
  822. dhd_bus_query_dpc_sched_errors(dhd_pub_t *dhdp)
  823. {
  824. dhd_bus_t *bus = dhdp->bus;
  825. bool sched_err;
  826. if (bus->dpc_entry_time < bus->isr_exit_time) {
  827. /* Kernel doesn't schedule the DPC after processing PCIe IRQ */
  828. sched_err = TRUE;
  829. } else if (bus->dpc_entry_time < bus->resched_dpc_time) {
  830. /* Kernel doesn't schedule the DPC after DHD tries to reschedule
  831. * the DPC due to pending work items to be processed.
  832. */
  833. sched_err = TRUE;
  834. } else {
  835. sched_err = FALSE;
  836. }
  837. if (sched_err) {
  838. /* print out minimum timestamp info */
  839. DHD_ERROR(("isr_entry_time="SEC_USEC_FMT
  840. " isr_exit_time="SEC_USEC_FMT
  841. " dpc_entry_time="SEC_USEC_FMT
  842. "\ndpc_exit_time="SEC_USEC_FMT
  843. " dpc_sched_time="SEC_USEC_FMT
  844. " resched_dpc_time="SEC_USEC_FMT"\n",
  845. GET_SEC_USEC(bus->isr_entry_time),
  846. GET_SEC_USEC(bus->isr_exit_time),
  847. GET_SEC_USEC(bus->dpc_entry_time),
  848. GET_SEC_USEC(bus->dpc_exit_time),
  849. GET_SEC_USEC(bus->dpc_sched_time),
  850. GET_SEC_USEC(bus->resched_dpc_time)));
  851. }
  852. return sched_err;
  853. }
  854. /** Read and clear intstatus. This should be called with interrupts disabled or inside isr */
  855. uint32
  856. dhdpcie_bus_intstatus(dhd_bus_t *bus)
  857. {
  858. uint32 intstatus = 0;
  859. uint32 intmask = 0;
  860. if (bus->bus_low_power_state == DHD_BUS_D3_ACK_RECIEVED) {
  861. DHD_ERROR(("%s: trying to clear intstatus after D3 Ack\n", __FUNCTION__));
  862. return intstatus;
  863. }
  864. if ((bus->sih->buscorerev == 6) || (bus->sih->buscorerev == 4) ||
  865. (bus->sih->buscorerev == 2)) {
  866. intstatus = dhdpcie_bus_cfg_read_dword(bus, PCIIntstatus, 4);
  867. dhdpcie_bus_cfg_write_dword(bus, PCIIntstatus, 4, intstatus);
  868. intstatus &= I_MB;
  869. } else {
  870. /* this is a PCIE core register..not a config register... */
  871. intstatus = si_corereg(bus->sih, bus->sih->buscoreidx, bus->pcie_mailbox_int, 0, 0);
  872. /* this is a PCIE core register..not a config register... */
  873. intmask = si_corereg(bus->sih, bus->sih->buscoreidx, bus->pcie_mailbox_mask, 0, 0);
  874. /* Is device removed. intstatus & intmask read 0xffffffff */
  875. if (intstatus == (uint32)-1 || intmask == (uint32)-1) {
  876. DHD_ERROR(("%s: Device is removed or Link is down.\n", __FUNCTION__));
  877. DHD_ERROR(("%s: INTSTAT : 0x%x INTMASK : 0x%x.\n",
  878. __FUNCTION__, intstatus, intmask));
  879. bus->is_linkdown = TRUE;
  880. dhd_pcie_debug_info_dump(bus->dhd);
  881. #ifdef CUSTOMER_HW4_DEBUG
  882. #if defined(OEM_ANDROID)
  883. #ifdef SUPPORT_LINKDOWN_RECOVERY
  884. #ifdef CONFIG_ARCH_MSM
  885. bus->no_cfg_restore = 1;
  886. #endif /* CONFIG_ARCH_MSM */
  887. #endif /* SUPPORT_LINKDOWN_RECOVERY */
  888. bus->dhd->hang_reason = HANG_REASON_PCIE_LINK_DOWN_EP_DETECT;
  889. dhd_os_send_hang_message(bus->dhd);
  890. #endif /* OEM_ANDROID */
  891. #endif /* CUSTOMER_HW4_DEBUG */
  892. return intstatus;
  893. }
  894. intstatus &= intmask;
  895. /*
  896. * The fourth argument to si_corereg is the "mask" fields of the register to update
  897. * and the fifth field is the "value" to update. Now if we are interested in only
  898. * few fields of the "mask" bit map, we should not be writing back what we read
  899. * By doing so, we might clear/ack interrupts that are not handled yet.
  900. */
  901. si_corereg(bus->sih, bus->sih->buscoreidx, bus->pcie_mailbox_int, bus->def_intmask,
  902. intstatus);
  903. intstatus &= bus->def_intmask;
  904. }
  905. return intstatus;
  906. }
  907. void
  908. dhdpcie_cto_recovery_handler(dhd_pub_t *dhd)
  909. {
  910. dhd_bus_t *bus = dhd->bus;
  911. int ret;
  912. /* Disable PCIe Runtime PM to avoid D3_ACK timeout.
  913. */
  914. DHD_DISABLE_RUNTIME_PM(dhd);
  915. /* Sleep for 1 seconds so that any AXI timeout
  916. * if running on ALP clock also will be captured
  917. */
  918. OSL_SLEEP(1000);
  919. /* reset backplane and cto,
  920. * then access through pcie is recovered.
  921. */
  922. ret = dhdpcie_cto_error_recovery(bus);
  923. if (!ret) {
  924. /* Waiting for backplane reset */
  925. OSL_SLEEP(10);
  926. /* Dump debug Info */
  927. dhd_prot_debug_info_print(bus->dhd);
  928. /* Dump console buffer */
  929. dhd_bus_dump_console_buffer(bus);
  930. #if defined(DHD_FW_COREDUMP)
  931. /* save core dump or write to a file */
  932. if (!bus->is_linkdown && bus->dhd->memdump_enabled) {
  933. #ifdef DHD_SSSR_DUMP
  934. bus->dhd->collect_sssr = TRUE;
  935. #endif /* DHD_SSSR_DUMP */
  936. bus->dhd->memdump_type = DUMP_TYPE_CTO_RECOVERY;
  937. dhdpcie_mem_dump(bus);
  938. }
  939. #endif /* DHD_FW_COREDUMP */
  940. }
  941. #ifdef OEM_ANDROID
  942. #ifdef SUPPORT_LINKDOWN_RECOVERY
  943. #ifdef CONFIG_ARCH_MSM
  944. bus->no_cfg_restore = 1;
  945. #endif /* CONFIG_ARCH_MSM */
  946. #endif /* SUPPORT_LINKDOWN_RECOVERY */
  947. bus->is_linkdown = TRUE;
  948. bus->dhd->hang_reason = HANG_REASON_PCIE_CTO_DETECT;
  949. /* Send HANG event */
  950. dhd_os_send_hang_message(bus->dhd);
  951. #endif /* OEM_ANDROID */
  952. }
  953. /**
  954. * Name: dhdpcie_bus_isr
  955. * Parameters:
  956. * 1: IN int irq -- interrupt vector
  957. * 2: IN void *arg -- handle to private data structure
  958. * Return value:
  959. * Status (TRUE or FALSE)
  960. *
  961. * Description:
  962. * Interrupt Service routine checks for the status register,
  963. * disable interrupt and queue DPC if mail box interrupts are raised.
  964. */
  965. int32
  966. dhdpcie_bus_isr(dhd_bus_t *bus)
  967. {
  968. uint32 intstatus = 0;
  969. do {
  970. DHD_TRACE(("%s: Enter\n", __FUNCTION__));
  971. /* verify argument */
  972. if (!bus) {
  973. DHD_LOG_MEM(("%s : bus is null pointer, exit \n", __FUNCTION__));
  974. break;
  975. }
  976. if (bus->dhd->dongle_reset) {
  977. DHD_LOG_MEM(("%s : dongle is reset\n", __FUNCTION__));
  978. break;
  979. }
  980. if (bus->dhd->busstate == DHD_BUS_DOWN) {
  981. DHD_LOG_MEM(("%s : bus is down \n", __FUNCTION__));
  982. break;
  983. }
  984. /* avoid processing of interrupts until msgbuf prot is inited */
  985. if (!bus->intr_enabled) {
  986. DHD_INFO(("%s, not ready to receive interrupts\n", __FUNCTION__));
  987. break;
  988. }
  989. if (PCIECTO_ENAB(bus)) {
  990. /* read pci_intstatus */
  991. intstatus = dhdpcie_bus_cfg_read_dword(bus, PCI_INT_STATUS, 4);
  992. if (intstatus & PCI_CTO_INT_MASK) {
  993. DHD_ERROR(("%s: ##### CTO RECOVERY REPORTED BY DONGLE "
  994. "intstat=0x%x enab=%d\n", __FUNCTION__,
  995. intstatus, bus->cto_enable));
  996. bus->cto_triggered = 1;
  997. /*
  998. * DAR still accessible
  999. */
  1000. dhd_bus_dump_dar_registers(bus);
  1001. /* Disable further PCIe interrupts */
  1002. dhdpcie_disable_irq_nosync(bus); /* Disable interrupt!! */
  1003. /* Stop Tx flow */
  1004. dhd_bus_stop_queue(bus);
  1005. /* Schedule CTO recovery */
  1006. dhd_schedule_cto_recovery(bus->dhd);
  1007. return TRUE;
  1008. }
  1009. }
  1010. if (bus->d2h_intr_method == PCIE_MSI) {
  1011. /* For MSI, as intstatus is cleared by firmware, no need to read */
  1012. goto skip_intstatus_read;
  1013. }
  1014. intstatus = dhdpcie_bus_intstatus(bus);
  1015. /* Check if the interrupt is ours or not */
  1016. if (intstatus == 0) {
  1017. /* in EFI since we poll for interrupt, this message will flood the logs
  1018. * so disable this for EFI
  1019. */
  1020. DHD_LOG_MEM(("%s : this interrupt is not ours\n", __FUNCTION__));
  1021. bus->non_ours_irq_count++;
  1022. bus->last_non_ours_irq_time = OSL_LOCALTIME_NS();
  1023. break;
  1024. }
  1025. /* save the intstatus */
  1026. /* read interrupt status register!! Status bits will be cleared in DPC !! */
  1027. bus->intstatus = intstatus;
  1028. /* return error for 0xFFFFFFFF */
  1029. if (intstatus == (uint32)-1) {
  1030. DHD_LOG_MEM(("%s : wrong interrupt status val : 0x%x\n",
  1031. __FUNCTION__, intstatus));
  1032. dhdpcie_disable_irq_nosync(bus);
  1033. break;
  1034. }
  1035. skip_intstatus_read:
  1036. /* Overall operation:
  1037. * - Mask further interrupts
  1038. * - Read/ack intstatus
  1039. * - Take action based on bits and state
  1040. * - Reenable interrupts (as per state)
  1041. */
  1042. /* Count the interrupt call */
  1043. bus->intrcount++;
  1044. bus->ipend = TRUE;
  1045. bus->isr_intr_disable_count++;
  1046. /* For Linux, Macos etc (otherthan NDIS) instead of disabling
  1047. * dongle interrupt by clearing the IntMask, disable directly
  1048. * interrupt from the host side, so that host will not recieve
  1049. * any interrupts at all, even though dongle raises interrupts
  1050. */
  1051. dhdpcie_disable_irq_nosync(bus); /* Disable interrupt!! */
  1052. bus->intdis = TRUE;
  1053. #if defined(PCIE_ISR_THREAD)
  1054. DHD_TRACE(("Calling dhd_bus_dpc() from %s\n", __FUNCTION__));
  1055. DHD_OS_WAKE_LOCK(bus->dhd);
  1056. while (dhd_bus_dpc(bus));
  1057. DHD_OS_WAKE_UNLOCK(bus->dhd);
  1058. #else
  1059. bus->dpc_sched = TRUE;
  1060. dhd_sched_dpc(bus->dhd); /* queue DPC now!! */
  1061. #endif /* defined(SDIO_ISR_THREAD) */
  1062. DHD_TRACE(("%s: Exit Success DPC Queued\n", __FUNCTION__));
  1063. return TRUE;
  1064. } while (0);
  1065. DHD_TRACE(("%s: Exit Failure\n", __FUNCTION__));
  1066. return FALSE;
  1067. }
  1068. int
  1069. dhdpcie_set_pwr_state(dhd_bus_t *bus, uint state)
  1070. {
  1071. uint32 cur_state = 0;
  1072. uint32 pm_csr = 0;
  1073. osl_t *osh = bus->osh;
  1074. pm_csr = OSL_PCI_READ_CONFIG(osh, PCIECFGREG_PM_CSR, sizeof(uint32));
  1075. cur_state = pm_csr & PCIECFGREG_PM_CSR_STATE_MASK;
  1076. if (cur_state == state) {
  1077. DHD_ERROR(("%s: Already in state %u \n", __FUNCTION__, cur_state));
  1078. return BCME_OK;
  1079. }
  1080. if (state > PCIECFGREG_PM_CSR_STATE_D3_HOT)
  1081. return BCME_ERROR;
  1082. /* Validate the state transition
  1083. * if already in a lower power state, return error
  1084. */
  1085. if (state != PCIECFGREG_PM_CSR_STATE_D0 &&
  1086. cur_state <= PCIECFGREG_PM_CSR_STATE_D3_COLD &&
  1087. cur_state > state) {
  1088. DHD_ERROR(("%s: Invalid power state transition !\n", __FUNCTION__));
  1089. return BCME_ERROR;
  1090. }
  1091. pm_csr &= ~PCIECFGREG_PM_CSR_STATE_MASK;
  1092. pm_csr |= state;
  1093. OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_PM_CSR, sizeof(uint32), pm_csr);
  1094. /* need to wait for the specified mandatory pcie power transition delay time */
  1095. if (state == PCIECFGREG_PM_CSR_STATE_D3_HOT ||
  1096. cur_state == PCIECFGREG_PM_CSR_STATE_D3_HOT)
  1097. OSL_DELAY(DHDPCIE_PM_D3_DELAY);
  1098. else if (state == PCIECFGREG_PM_CSR_STATE_D2 ||
  1099. cur_state == PCIECFGREG_PM_CSR_STATE_D2)
  1100. OSL_DELAY(DHDPCIE_PM_D2_DELAY);
  1101. /* read back the power state and verify */
  1102. pm_csr = OSL_PCI_READ_CONFIG(osh, PCIECFGREG_PM_CSR, sizeof(uint32));
  1103. cur_state = pm_csr & PCIECFGREG_PM_CSR_STATE_MASK;
  1104. if (cur_state != state) {
  1105. DHD_ERROR(("%s: power transition failed ! Current state is %u \n",
  1106. __FUNCTION__, cur_state));
  1107. return BCME_ERROR;
  1108. } else {
  1109. DHD_ERROR(("%s: power transition to %u success \n",
  1110. __FUNCTION__, cur_state));
  1111. }
  1112. return BCME_OK;
  1113. }
  1114. int
  1115. dhdpcie_config_check(dhd_bus_t *bus)
  1116. {
  1117. uint32 i, val;
  1118. int ret = BCME_ERROR;
  1119. for (i = 0; i < DHDPCIE_CONFIG_CHECK_RETRY_COUNT; i++) {
  1120. val = OSL_PCI_READ_CONFIG(bus->osh, PCI_CFG_VID, sizeof(uint32));
  1121. if ((val & 0xFFFF) == VENDOR_BROADCOM) {
  1122. ret = BCME_OK;
  1123. break;
  1124. }
  1125. OSL_DELAY(DHDPCIE_CONFIG_CHECK_DELAY_MS * 1000);
  1126. }
  1127. return ret;
  1128. }
  1129. int
  1130. dhdpcie_config_restore(dhd_bus_t *bus, bool restore_pmcsr)
  1131. {
  1132. uint32 i;
  1133. osl_t *osh = bus->osh;
  1134. if (BCME_OK != dhdpcie_config_check(bus)) {
  1135. return BCME_ERROR;
  1136. }
  1137. for (i = PCI_CFG_REV >> 2; i < DHDPCIE_CONFIG_HDR_SIZE; i++) {
  1138. OSL_PCI_WRITE_CONFIG(osh, i << 2, sizeof(uint32), bus->saved_config.header[i]);
  1139. }
  1140. OSL_PCI_WRITE_CONFIG(osh, PCI_CFG_CMD, sizeof(uint32), bus->saved_config.header[1]);
  1141. if (restore_pmcsr)
  1142. OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_PM_CSR,
  1143. sizeof(uint32), bus->saved_config.pmcsr);
  1144. OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_MSI_CAP, sizeof(uint32), bus->saved_config.msi_cap);
  1145. OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_MSI_ADDR_L, sizeof(uint32),
  1146. bus->saved_config.msi_addr0);
  1147. OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_MSI_ADDR_H,
  1148. sizeof(uint32), bus->saved_config.msi_addr1);
  1149. OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_MSI_DATA,
  1150. sizeof(uint32), bus->saved_config.msi_data);
  1151. OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_DEV_STATUS_CTRL,
  1152. sizeof(uint32), bus->saved_config.exp_dev_ctrl_stat);
  1153. OSL_PCI_WRITE_CONFIG(osh, PCIECFGGEN_DEV_STATUS_CTRL2,
  1154. sizeof(uint32), bus->saved_config.exp_dev_ctrl_stat2);
  1155. OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_LINK_STATUS_CTRL,
  1156. sizeof(uint32), bus->saved_config.exp_link_ctrl_stat);
  1157. OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_LINK_STATUS_CTRL2,
  1158. sizeof(uint32), bus->saved_config.exp_link_ctrl_stat2);
  1159. OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_PML1_SUB_CTRL1,
  1160. sizeof(uint32), bus->saved_config.l1pm0);
  1161. OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_PML1_SUB_CTRL2,
  1162. sizeof(uint32), bus->saved_config.l1pm1);
  1163. OSL_PCI_WRITE_CONFIG(bus->osh, PCI_BAR0_WIN, sizeof(uint32),
  1164. bus->saved_config.bar0_win);
  1165. dhdpcie_setbar1win(bus, bus->saved_config.bar1_win);
  1166. return BCME_OK;
  1167. }
  1168. int
  1169. dhdpcie_config_save(dhd_bus_t *bus)
  1170. {
  1171. uint32 i;
  1172. osl_t *osh = bus->osh;
  1173. if (BCME_OK != dhdpcie_config_check(bus)) {
  1174. return BCME_ERROR;
  1175. }
  1176. for (i = 0; i < DHDPCIE_CONFIG_HDR_SIZE; i++) {
  1177. bus->saved_config.header[i] = OSL_PCI_READ_CONFIG(osh, i << 2, sizeof(uint32));
  1178. }
  1179. bus->saved_config.pmcsr = OSL_PCI_READ_CONFIG(osh, PCIECFGREG_PM_CSR, sizeof(uint32));
  1180. bus->saved_config.msi_cap = OSL_PCI_READ_CONFIG(osh, PCIECFGREG_MSI_CAP,
  1181. sizeof(uint32));
  1182. bus->saved_config.msi_addr0 = OSL_PCI_READ_CONFIG(osh, PCIECFGREG_MSI_ADDR_L,
  1183. sizeof(uint32));
  1184. bus->saved_config.msi_addr1 = OSL_PCI_READ_CONFIG(osh, PCIECFGREG_MSI_ADDR_H,
  1185. sizeof(uint32));
  1186. bus->saved_config.msi_data = OSL_PCI_READ_CONFIG(osh, PCIECFGREG_MSI_DATA,
  1187. sizeof(uint32));
  1188. bus->saved_config.exp_dev_ctrl_stat = OSL_PCI_READ_CONFIG(osh,
  1189. PCIECFGREG_DEV_STATUS_CTRL, sizeof(uint32));
  1190. bus->saved_config.exp_dev_ctrl_stat2 = OSL_PCI_READ_CONFIG(osh,
  1191. PCIECFGGEN_DEV_STATUS_CTRL2, sizeof(uint32));
  1192. bus->saved_config.exp_link_ctrl_stat = OSL_PCI_READ_CONFIG(osh,
  1193. PCIECFGREG_LINK_STATUS_CTRL, sizeof(uint32));
  1194. bus->saved_config.exp_link_ctrl_stat2 = OSL_PCI_READ_CONFIG(osh,
  1195. PCIECFGREG_LINK_STATUS_CTRL2, sizeof(uint32));
  1196. bus->saved_config.l1pm0 = OSL_PCI_READ_CONFIG(osh, PCIECFGREG_PML1_SUB_CTRL1,
  1197. sizeof(uint32));
  1198. bus->saved_config.l1pm1 = OSL_PCI_READ_CONFIG(osh, PCIECFGREG_PML1_SUB_CTRL2,
  1199. sizeof(uint32));
  1200. bus->saved_config.bar0_win = OSL_PCI_READ_CONFIG(osh, PCI_BAR0_WIN,
  1201. sizeof(uint32));
  1202. bus->saved_config.bar1_win = OSL_PCI_READ_CONFIG(osh, PCI_BAR1_WIN,
  1203. sizeof(uint32));
  1204. return BCME_OK;
  1205. }
  1206. #ifdef EXYNOS_PCIE_LINKDOWN_RECOVERY
  1207. dhd_pub_t *link_recovery = NULL;
  1208. #endif /* EXYNOS_PCIE_LINKDOWN_RECOVERY */
  1209. static void
  1210. dhdpcie_bus_intr_init(dhd_bus_t *bus)
  1211. {
  1212. uint buscorerev = bus->sih->buscorerev;
  1213. bus->pcie_mailbox_int = PCIMailBoxInt(buscorerev);
  1214. bus->pcie_mailbox_mask = PCIMailBoxMask(buscorerev);
  1215. bus->d2h_mb_mask = PCIE_MB_D2H_MB_MASK(buscorerev);
  1216. bus->def_intmask = PCIE_MB_D2H_MB_MASK(buscorerev);
  1217. if (buscorerev < 64) {
  1218. bus->def_intmask |= PCIE_MB_TOPCIE_FN0_0 | PCIE_MB_TOPCIE_FN0_1;
  1219. }
  1220. }
  1221. static void
  1222. dhdpcie_cc_watchdog_reset(dhd_bus_t *bus)
  1223. {
  1224. uint32 wd_en = (bus->sih->buscorerev >= 66) ? WD_SSRESET_PCIE_F0_EN :
  1225. (WD_SSRESET_PCIE_F0_EN | WD_SSRESET_PCIE_ALL_FN_EN);
  1226. pcie_watchdog_reset(bus->osh, bus->sih, WD_ENABLE_MASK, wd_en);
  1227. }
  1228. void
  1229. dhdpcie_dongle_reset(dhd_bus_t *bus)
  1230. {
  1231. /* if the pcie link is down, watchdog reset
  1232. * should not be done, as it may hang
  1233. */
  1234. if (bus->is_linkdown) {
  1235. return;
  1236. }
  1237. /* dhd_bus_perform_flr will return BCME_UNSUPPORTED if chip is not FLR capable */
  1238. if (dhd_bus_perform_flr(bus, FALSE) == BCME_UNSUPPORTED) {
  1239. #ifdef DHD_USE_BP_RESET
  1240. /* Backplane reset using SPROM cfg register(0x88) for buscorerev <= 24 */
  1241. dhd_bus_perform_bp_reset(bus);
  1242. #else
  1243. /* Legacy chipcommon watchdog reset */
  1244. dhdpcie_cc_watchdog_reset(bus);
  1245. #endif /* DHD_USE_BP_RESET */
  1246. }
  1247. }
  1248. #ifdef CHIPS_CUSTOMER_HW6
  1249. void
  1250. dhdpcie_bus_mpu_disable(dhd_bus_t *bus)
  1251. {
  1252. volatile uint32 *cr4_regs;
  1253. if (BCM4378_CHIP(bus->sih->chip)) {
  1254. cr4_regs = si_setcore(bus->sih, ARMCR4_CORE_ID, 0);
  1255. if (cr4_regs == NULL) {
  1256. DHD_ERROR(("%s: Failed to find ARM core!\n", __FUNCTION__));
  1257. return;
  1258. }
  1259. if (R_REG(bus->osh, cr4_regs + ARMCR4REG_CORECAP) & ACC_MPU_MASK) {
  1260. /* bus mpu is supported */
  1261. W_REG(bus->osh, cr4_regs + ARMCR4REG_MPUCTRL, 0);
  1262. }
  1263. }
  1264. }
  1265. #endif /* CHIPS_CUSTOMER_HW6 */
  1266. static bool
  1267. dhdpcie_dongle_attach(dhd_bus_t *bus)
  1268. {
  1269. osl_t *osh = bus->osh;
  1270. volatile void *regsva = (volatile void*)bus->regs;
  1271. uint16 devid;
  1272. uint32 val;
  1273. sbpcieregs_t *sbpcieregs;
  1274. bool dongle_isolation;
  1275. DHD_TRACE(("%s: ENTER\n", __FUNCTION__));
  1276. #ifdef EXYNOS_PCIE_LINKDOWN_RECOVERY
  1277. link_recovery = bus->dhd;
  1278. #endif /* EXYNOS_PCIE_LINKDOWN_RECOVERY */
  1279. bus->alp_only = TRUE;
  1280. bus->sih = NULL;
  1281. /* Checking PCIe bus status with reading configuration space */
  1282. val = OSL_PCI_READ_CONFIG(osh, PCI_CFG_VID, sizeof(uint32));
  1283. if ((val & 0xFFFF) != VENDOR_BROADCOM) {
  1284. DHD_ERROR(("%s : failed to read PCI configuration space!\n", __FUNCTION__));
  1285. goto fail;
  1286. }
  1287. devid = (val >> 16) & 0xFFFF;
  1288. bus->cl_devid = devid;
  1289. /* Set bar0 window to si_enum_base */
  1290. dhdpcie_bus_cfg_set_bar0_win(bus, si_enum_base(devid));
  1291. /*
  1292. * Checking PCI_SPROM_CONTROL register for preventing invalid address access
  1293. * due to switch address space from PCI_BUS to SI_BUS.
  1294. */
  1295. val = OSL_PCI_READ_CONFIG(osh, PCI_SPROM_CONTROL, sizeof(uint32));
  1296. if (val == 0xffffffff) {
  1297. DHD_ERROR(("%s : failed to read SPROM control register\n", __FUNCTION__));
  1298. goto fail;
  1299. }
  1300. /* si_attach() will provide an SI handle and scan the backplane */
  1301. if (!(bus->sih = si_attach((uint)devid, osh, regsva, PCI_BUS, bus,
  1302. &bus->vars, &bus->varsz))) {
  1303. DHD_ERROR(("%s: si_attach failed!\n", __FUNCTION__));
  1304. goto fail;
  1305. }
  1306. /* Configure CTO Prevention functionality */
  1307. #if defined(BCMFPGA_HW)
  1308. DHD_ERROR(("Disable CTO\n"));
  1309. bus->cto_enable = FALSE;
  1310. #else
  1311. #if defined(BCMPCIE_CTO_PREVENTION)
  1312. if (bus->sih->buscorerev >= 24) {
  1313. DHD_ERROR(("Enable CTO\n"));
  1314. bus->cto_enable = TRUE;
  1315. } else
  1316. #endif /* BCMPCIE_CTO_PREVENTION */
  1317. {
  1318. DHD_ERROR(("Disable CTO\n"));
  1319. bus->cto_enable = FALSE;
  1320. }
  1321. #endif /* BCMFPGA_HW */
  1322. if (PCIECTO_ENAB(bus)) {
  1323. dhdpcie_cto_init(bus, TRUE);
  1324. }
  1325. if (MULTIBP_ENAB(bus->sih) && (bus->sih->buscorerev >= 66)) {
  1326. /*
  1327. * HW JIRA - CRWLPCIEGEN2-672
  1328. * Producer Index Feature which is used by F1 gets reset on F0 FLR
  1329. * fixed in REV68
  1330. */
  1331. if (PCIE_ENUM_RESET_WAR_ENAB(bus->sih->buscorerev)) {
  1332. dhdpcie_ssreset_dis_enum_rst(bus);
  1333. }
  1334. /* IOV_DEVRESET could exercise si_detach()/si_attach() again so reset
  1335. * dhdpcie_bus_release_dongle() --> si_detach()
  1336. * dhdpcie_dongle_attach() --> si_attach()
  1337. */
  1338. bus->pwr_req_ref = 0;
  1339. }
  1340. if (MULTIBP_ENAB(bus->sih)) {
  1341. dhd_bus_pcie_pwr_req_nolock(bus);
  1342. }
  1343. /* Get info on the ARM and SOCRAM cores... */
  1344. /* Should really be qualified by device id */
  1345. if ((si_setcore(bus->sih, ARM7S_CORE_ID, 0)) ||
  1346. (si_setcore(bus->sih, ARMCM3_CORE_ID, 0)) ||
  1347. (si_setcore(bus->sih, ARMCR4_CORE_ID, 0)) ||
  1348. (si_setcore(bus->sih, ARMCA7_CORE_ID, 0))) {
  1349. bus->armrev = si_corerev(bus->sih);
  1350. bus->coreid = si_coreid(bus->sih);
  1351. } else {
  1352. DHD_ERROR(("%s: failed to find ARM core!\n", __FUNCTION__));
  1353. goto fail;
  1354. }
  1355. /* CA7 requires coherent bits on */
  1356. if (bus->coreid == ARMCA7_CORE_ID) {
  1357. val = dhdpcie_bus_cfg_read_dword(bus, PCIE_CFG_SUBSYSTEM_CONTROL, 4);
  1358. dhdpcie_bus_cfg_write_dword(bus, PCIE_CFG_SUBSYSTEM_CONTROL, 4,
  1359. (val | PCIE_BARCOHERENTACCEN_MASK));
  1360. }
  1361. /* Olympic EFI requirement - stop driver load if FW is already running
  1362. * need to do this here before pcie_watchdog_reset, because
  1363. * pcie_watchdog_reset will put the ARM back into halt state
  1364. */
  1365. if (!dhdpcie_is_arm_halted(bus)) {
  1366. DHD_ERROR(("%s: ARM is not halted,FW is already running! Abort.\n",
  1367. __FUNCTION__));
  1368. goto fail;
  1369. }
  1370. BCM_REFERENCE(dongle_isolation);
  1371. /* For inbuilt drivers pcie clk req will be done by RC,
  1372. * so do not do clkreq from dhd
  1373. */
  1374. if (dhd_download_fw_on_driverload)
  1375. {
  1376. /* Enable CLKREQ# */
  1377. dhdpcie_clkreq(bus->osh, 1, 1);
  1378. }
  1379. /*
  1380. * bus->dhd will be NULL if it is called from dhd_bus_attach, so need to reset
  1381. * without checking dongle_isolation flag, but if it is called via some other path
  1382. * like quiesce FLR, then based on dongle_isolation flag, watchdog_reset should
  1383. * be called.
  1384. */
  1385. if (bus->dhd == NULL) {
  1386. /* dhd_attach not yet happened, do watchdog reset */
  1387. dongle_isolation = FALSE;
  1388. } else {
  1389. dongle_isolation = bus->dhd->dongle_isolation;
  1390. }
  1391. #ifndef DHD_SKIP_DONGLE_RESET_IN_ATTACH
  1392. /*
  1393. * Issue CC watchdog to reset all the cores on the chip - similar to rmmod dhd
  1394. * This is required to avoid spurious interrupts to the Host and bring back
  1395. * dongle to a sane state (on host soft-reboot / watchdog-reboot).
  1396. */
  1397. if (dongle_isolation == FALSE) {
  1398. dhdpcie_dongle_reset(bus);
  1399. }
  1400. #endif /* !DHD_SKIP_DONGLE_RESET_IN_ATTACH */
  1401. /* need to set the force_bt_quiesce flag here
  1402. * before calling dhdpcie_dongle_flr_or_pwr_toggle
  1403. */
  1404. bus->force_bt_quiesce = TRUE;
  1405. /*
  1406. * For buscorerev = 66 and after, F0 FLR should be done independent from F1.
  1407. * So don't need BT quiesce.
  1408. */
  1409. if (bus->sih->buscorerev >= 66) {
  1410. bus->force_bt_quiesce = FALSE;
  1411. }
  1412. dhdpcie_dongle_flr_or_pwr_toggle(bus);
  1413. #ifdef CHIPS_CUSTOMER_HW6
  1414. dhdpcie_bus_mpu_disable(bus);
  1415. #endif /* CHIPS_CUSTOMER_HW6 */
  1416. si_setcore(bus->sih, PCIE2_CORE_ID, 0);
  1417. sbpcieregs = (sbpcieregs_t*)(bus->regs);
  1418. /* WAR where the BAR1 window may not be sized properly */
  1419. W_REG(osh, &sbpcieregs->configaddr, 0x4e0);
  1420. val = R_REG(osh, &sbpcieregs->configdata);
  1421. W_REG(osh, &sbpcieregs->configdata, val);
  1422. if (si_setcore(bus->sih, SYSMEM_CORE_ID, 0)) {
  1423. /* Only set dongle RAMSIZE to default value when BMC vs ARM usage of SYSMEM is not
  1424. * adjusted.
  1425. */
  1426. if (!bus->ramsize_adjusted) {
  1427. if (!(bus->orig_ramsize = si_sysmem_size(bus->sih))) {
  1428. DHD_ERROR(("%s: failed to find SYSMEM memory!\n", __FUNCTION__));
  1429. goto fail;
  1430. }
  1431. switch ((uint16)bus->sih->chip) {
  1432. #ifdef CHIPS_CUSTOMER_HW6
  1433. case BCM4368_CHIP_ID:
  1434. bus->dongle_ram_base = CA7_4368_RAM_BASE;
  1435. bus->orig_ramsize = 0x1c0000;
  1436. break;
  1437. CASE_BCM4367_CHIP:
  1438. bus->dongle_ram_base = CA7_4367_RAM_BASE;
  1439. bus->orig_ramsize = 0x1e0000;
  1440. break;
  1441. #endif /* CHIPS_CUSTOMER_HW6 */
  1442. default:
  1443. /* also populate base address */
  1444. bus->dongle_ram_base = CA7_4365_RAM_BASE;
  1445. bus->orig_ramsize = 0x1c0000; /* Reserve 1.75MB for CA7 */
  1446. break;
  1447. }
  1448. }
  1449. } else if (!si_setcore(bus->sih, ARMCR4_CORE_ID, 0)) {
  1450. if (!(bus->orig_ramsize = si_socram_size(bus->sih))) {
  1451. DHD_ERROR(("%s: failed to find SOCRAM memory!\n", __FUNCTION__));
  1452. goto fail;
  1453. }
  1454. } else {
  1455. /* cr4 has a different way to find the RAM size from TCM's */
  1456. if (!(bus->orig_ramsize = si_tcm_size(bus->sih))) {
  1457. DHD_ERROR(("%s: failed to find CR4-TCM memory!\n", __FUNCTION__));
  1458. goto fail;
  1459. }
  1460. /* also populate base address */
  1461. switch ((uint16)bus->sih->chip) {
  1462. case BCM4339_CHIP_ID:
  1463. case BCM4335_CHIP_ID:
  1464. bus->dongle_ram_base = CR4_4335_RAM_BASE;
  1465. break;
  1466. case BCM4358_CHIP_ID:
  1467. case BCM4354_CHIP_ID:
  1468. case BCM43567_CHIP_ID:
  1469. case BCM43569_CHIP_ID:
  1470. case BCM4350_CHIP_ID:
  1471. case BCM43570_CHIP_ID:
  1472. bus->dongle_ram_base = CR4_4350_RAM_BASE;
  1473. break;
  1474. case BCM4360_CHIP_ID:
  1475. bus->dongle_ram_base = CR4_4360_RAM_BASE;
  1476. break;
  1477. case BCM4364_CHIP_ID:
  1478. bus->dongle_ram_base = CR4_4364_RAM_BASE;
  1479. break;
  1480. CASE_BCM4345_CHIP:
  1481. bus->dongle_ram_base = (bus->sih->chiprev < 6) /* changed at 4345C0 */
  1482. ? CR4_4345_LT_C0_RAM_BASE : CR4_4345_GE_C0_RAM_BASE;
  1483. break;
  1484. CASE_BCM43602_CHIP:
  1485. bus->dongle_ram_base = CR4_43602_RAM_BASE;
  1486. break;
  1487. case BCM4349_CHIP_GRPID:
  1488. /* RAM based changed from 4349c0(revid=9) onwards */
  1489. bus->dongle_ram_base = ((bus->sih->chiprev < 9) ?
  1490. CR4_4349_RAM_BASE : CR4_4349_RAM_BASE_FROM_REV_9);
  1491. break;
  1492. case BCM4347_CHIP_ID:
  1493. case BCM4357_CHIP_ID:
  1494. case BCM4361_CHIP_ID:
  1495. bus->dongle_ram_base = CR4_4347_RAM_BASE;
  1496. break;
  1497. case BCM4362_CHIP_ID:
  1498. bus->dongle_ram_base = CR4_4362_RAM_BASE;
  1499. break;
  1500. case BCM43751_CHIP_ID:
  1501. bus->dongle_ram_base = CR4_43751_RAM_BASE;
  1502. break;
  1503. case BCM4373_CHIP_ID:
  1504. bus->dongle_ram_base = CR4_4373_RAM_BASE;
  1505. break;
  1506. #ifdef CHIPS_CUSTOMER_HW6
  1507. case BCM4378_CHIP_GRPID:
  1508. bus->dongle_ram_base = CR4_4378_RAM_BASE;
  1509. break;
  1510. case BCM4377_CHIP_ID:
  1511. bus->dongle_ram_base = CR4_4377_RAM_BASE;
  1512. break;
  1513. #endif /* CHIPS_CUSTOMER_HW6 */
  1514. case BCM4375_CHIP_ID:
  1515. case BCM4369_CHIP_ID:
  1516. bus->dongle_ram_base = CR4_4369_RAM_BASE;
  1517. break;
  1518. default:
  1519. bus->dongle_ram_base = 0;
  1520. DHD_ERROR(("%s: WARNING: Using default ram base at 0x%x\n",
  1521. __FUNCTION__, bus->dongle_ram_base));
  1522. }
  1523. }
  1524. bus->ramsize = bus->orig_ramsize;
  1525. if (dhd_dongle_memsize)
  1526. dhdpcie_bus_dongle_setmemsize(bus, dhd_dongle_memsize);
  1527. if (bus->ramsize > DONGLE_TCM_MAP_SIZE) {
  1528. DHD_ERROR(("%s : invalid ramsize %d(0x%x) is returned from dongle\n",
  1529. __FUNCTION__, bus->ramsize, bus->ramsize));
  1530. goto fail;
  1531. }
  1532. DHD_ERROR(("DHD: dongle ram size is set to %d(orig %d) at 0x%x\n",
  1533. bus->ramsize, bus->orig_ramsize, bus->dongle_ram_base));
  1534. bus->srmemsize = si_socram_srmem_size(bus->sih);
  1535. dhdpcie_bus_intr_init(bus);
  1536. /* Set the poll and/or interrupt flags */
  1537. bus->intr = (bool)dhd_intr;
  1538. #ifdef DHD_DISABLE_ASPM
  1539. dhd_bus_aspm_enable_rc_ep(bus, FALSE);
  1540. #endif /* DHD_DISABLE_ASPM */
  1541. bus->idma_enabled = TRUE;
  1542. bus->ifrm_enabled = TRUE;
  1543. DHD_TRACE(("%s: EXIT: SUCCESS\n", __FUNCTION__));
  1544. if (MULTIBP_ENAB(bus->sih)) {
  1545. dhd_bus_pcie_pwr_req_clear_nolock(bus);
  1546. /*
  1547. * One time clearing of Common Power Domain since HW default is set
  1548. * Needs to be after FLR because FLR resets PCIe enum back to HW defaults
  1549. * for 4378B0 (rev 68).
  1550. * On 4378A0 (rev 66), PCIe enum reset is disabled due to CRWLPCIEGEN2-672
  1551. */
  1552. si_srpwr_request(bus->sih, SRPWR_DMN0_PCIE_MASK, 0);
  1553. /*
  1554. * WAR to fix ARM cold boot;
  1555. * Assert WL domain in DAR helps but not enum
  1556. */
  1557. if (bus->sih->buscorerev >= 68) {
  1558. dhd_bus_pcie_pwr_req_wl_domain(bus, TRUE);
  1559. }
  1560. }
  1561. return 0;
  1562. fail:
  1563. if (bus->sih != NULL) {
  1564. if (MULTIBP_ENAB(bus->sih)) {
  1565. dhd_bus_pcie_pwr_req_clear_nolock(bus);
  1566. }
  1567. /* for EFI even if there is an error, load still succeeds
  1568. * so si_detach should not be called here, it is called during unload
  1569. */
  1570. si_detach(bus->sih);
  1571. bus->sih = NULL;
  1572. }
  1573. DHD_TRACE(("%s: EXIT: FAILURE\n", __FUNCTION__));
  1574. return -1;
  1575. }
  1576. int
  1577. dhpcie_bus_unmask_interrupt(dhd_bus_t *bus)
  1578. {
  1579. dhdpcie_bus_cfg_write_dword(bus, PCIIntmask, 4, I_MB);
  1580. return 0;
  1581. }
  1582. int
  1583. dhpcie_bus_mask_interrupt(dhd_bus_t *bus)
  1584. {
  1585. dhdpcie_bus_cfg_write_dword(bus, PCIIntmask, 4, 0x0);
  1586. return 0;
  1587. }
  1588. /* Non atomic function, caller should hold appropriate lock */
  1589. void
  1590. dhdpcie_bus_intr_enable(dhd_bus_t *bus)
  1591. {
  1592. DHD_TRACE(("%s Enter\n", __FUNCTION__));
  1593. if (bus) {
  1594. if (bus->sih && !bus->is_linkdown) {
  1595. /* Skip after recieving D3 ACK */
  1596. if (bus->bus_low_power_state == DHD_BUS_D3_ACK_RECIEVED) {
  1597. return;
  1598. }
  1599. if ((bus->sih->buscorerev == 2) || (bus->sih->buscorerev == 6) ||
  1600. (bus->sih->buscorerev == 4)) {
  1601. dhpcie_bus_unmask_interrupt(bus);
  1602. } else {
  1603. #if defined(BCMINTERNAL) && defined(DHD_DBG_DUMP)
  1604. dhd_bus_mmio_trace(bus, bus->pcie_mailbox_mask,
  1605. bus->def_intmask, TRUE);
  1606. #endif
  1607. si_corereg(bus->sih, bus->sih->buscoreidx, bus->pcie_mailbox_mask,
  1608. bus->def_intmask, bus->def_intmask);
  1609. }
  1610. }
  1611. }
  1612. DHD_TRACE(("%s Exit\n", __FUNCTION__));
  1613. }
  1614. /* Non atomic function, caller should hold appropriate lock */
  1615. void
  1616. dhdpcie_bus_intr_disable(dhd_bus_t *bus)
  1617. {
  1618. DHD_TRACE(("%s Enter\n", __FUNCTION__));
  1619. if (bus && bus->sih && !bus->is_linkdown) {
  1620. /* Skip after recieving D3 ACK */
  1621. if (bus->bus_low_power_state == DHD_BUS_D3_ACK_RECIEVED) {
  1622. return;
  1623. }
  1624. if ((bus->sih->buscorerev == 2) || (bus->sih->buscorerev == 6) ||
  1625. (bus->sih->buscorerev == 4)) {
  1626. dhpcie_bus_mask_interrupt(bus);
  1627. } else {
  1628. si_corereg(bus->sih, bus->sih->buscoreidx, bus->pcie_mailbox_mask,
  1629. bus->def_intmask, 0);
  1630. }
  1631. }
  1632. DHD_TRACE(("%s Exit\n", __FUNCTION__));
  1633. }
  1634. /*
  1635. * dhdpcie_advertise_bus_cleanup advertises that clean up is under progress
  1636. * to other bus user contexts like Tx, Rx, IOVAR, WD etc and it waits for other contexts
  1637. * to gracefully exit. All the bus usage contexts before marking busstate as busy, will check for
  1638. * whether the busstate is DHD_BUS_DOWN or DHD_BUS_DOWN_IN_PROGRESS, if so
  1639. * they will exit from there itself without marking dhd_bus_busy_state as BUSY.
  1640. */
  1641. void
  1642. dhdpcie_advertise_bus_cleanup(dhd_pub_t *dhdp)
  1643. {
  1644. unsigned long flags;
  1645. int timeleft;
  1646. #ifdef DHD_PCIE_RUNTIMEPM
  1647. dhdpcie_runtime_bus_wake(dhdp, TRUE, dhdpcie_advertise_bus_cleanup);
  1648. #endif /* DHD_PCIE_RUNTIMEPM */
  1649. dhdp->dhd_watchdog_ms_backup = dhd_watchdog_ms;
  1650. if (dhdp->dhd_watchdog_ms_backup) {
  1651. DHD_ERROR(("%s: Disabling wdtick before dhd deinit\n",
  1652. __FUNCTION__));
  1653. dhd_os_wd_timer(dhdp, 0);
  1654. }
  1655. if (dhdp->busstate != DHD_BUS_DOWN) {
  1656. DHD_GENERAL_LOCK(dhdp, flags);
  1657. dhdp->busstate = DHD_BUS_DOWN_IN_PROGRESS;
  1658. DHD_GENERAL_UNLOCK(dhdp, flags);
  1659. }
  1660. timeleft = dhd_os_busbusy_wait_negation(dhdp, &dhdp->dhd_bus_busy_state);
  1661. if ((timeleft == 0) || (timeleft == 1)) {
  1662. DHD_ERROR(("%s : Timeout due to dhd_bus_busy_state=0x%x\n",
  1663. __FUNCTION__, dhdp->dhd_bus_busy_state));
  1664. ASSERT(0);
  1665. }
  1666. return;
  1667. }
  1668. static void
  1669. dhdpcie_bus_remove_prep(dhd_bus_t *bus)
  1670. {
  1671. unsigned long flags;
  1672. DHD_TRACE(("%s Enter\n", __FUNCTION__));
  1673. DHD_GENERAL_LOCK(bus->dhd, flags);
  1674. DHD_ERROR(("%s: making DHD_BUS_DOWN\n", __FUNCTION__));
  1675. bus->dhd->busstate = DHD_BUS_DOWN;
  1676. DHD_GENERAL_UNLOCK(bus->dhd, flags);
  1677. dhd_os_sdlock(bus->dhd);
  1678. if (bus->sih && !bus->dhd->dongle_isolation) {
  1679. if (PCIE_RELOAD_WAR_ENAB(bus->sih->buscorerev)) {
  1680. dhd_bus_pcie_pwr_req_reload_war(bus);
  1681. }
  1682. /* Has insmod fails after rmmod issue in Brix Android */
  1683. #if !defined(OEM_ANDROID) && !defined(ANDROID)
  1684. /* HW4347-909 */
  1685. if ((bus->sih->buscorerev == 19) || (bus->sih->buscorerev == 23)) {
  1686. /* Set PCIE TRefUp time to 100us for 4347 */
  1687. pcie_set_trefup_time_100us(bus->sih);
  1688. }
  1689. /* disable fast lpo from 4347 */
  1690. /* For 4378/4387, do not disable fast lpo because we always enable fast lpo.
  1691. * it causes insmod/rmmod reload failure.
  1692. */
  1693. if ((PMUREV(bus->sih->pmurev) > 31) &&
  1694. (bus->sih->buscorerev != 66) &&
  1695. (bus->sih->buscorerev != 68) &&
  1696. (bus->sih->buscorerev != 69) &&
  1697. (bus->sih->buscorerev != 70)) {
  1698. si_pmu_fast_lpo_disable(bus->sih);
  1699. }
  1700. #endif /* !OEM_ANDROID && !ANDROID */
  1701. /* if the pcie link is down, watchdog reset
  1702. * should not be done, as it may hang
  1703. */
  1704. if (!bus->is_linkdown) {
  1705. #ifndef DHD_SKIP_DONGLE_RESET_IN_ATTACH
  1706. /* for efi, depending on bt over pcie mode
  1707. * we either power toggle or do F0 FLR
  1708. * from dhdpcie_bus_release dongle. So no need to
  1709. * do dongle reset from here
  1710. */
  1711. dhdpcie_dongle_reset(bus);
  1712. #endif /* !DHD_SKIP_DONGLE_RESET_IN_ATTACH */
  1713. }
  1714. bus->dhd->is_pcie_watchdog_reset = TRUE;
  1715. }
  1716. dhd_os_sdunlock(bus->dhd);
  1717. DHD_TRACE(("%s Exit\n", __FUNCTION__));
  1718. }
  1719. void
  1720. dhd_init_bus_lock(dhd_bus_t *bus)
  1721. {
  1722. if (!bus->bus_lock) {
  1723. bus->bus_lock = dhd_os_spin_lock_init(bus->dhd->osh);
  1724. }
  1725. }
  1726. void
  1727. dhd_deinit_bus_lock(dhd_bus_t *bus)
  1728. {
  1729. if (bus->bus_lock) {
  1730. dhd_os_spin_lock_deinit(bus->dhd->osh, bus->bus_lock);
  1731. bus->bus_lock = NULL;
  1732. }
  1733. }
  1734. void
  1735. dhd_init_backplane_access_lock(dhd_bus_t *bus)
  1736. {
  1737. if (!bus->backplane_access_lock) {
  1738. bus->backplane_access_lock = dhd_os_spin_lock_init(bus->dhd->osh);
  1739. }
  1740. }
  1741. void
  1742. dhd_deinit_backplane_access_lock(dhd_bus_t *bus)
  1743. {
  1744. if (bus->backplane_access_lock) {
  1745. dhd_os_spin_lock_deinit(bus->dhd->osh, bus->backplane_access_lock);
  1746. bus->backplane_access_lock = NULL;
  1747. }
  1748. }
  1749. /** Detach and free everything */
  1750. void
  1751. dhdpcie_bus_release(dhd_bus_t *bus)
  1752. {
  1753. bool dongle_isolation = FALSE;
  1754. osl_t *osh = NULL;
  1755. unsigned long flags_bus;
  1756. DHD_TRACE(("%s: Enter\n", __FUNCTION__));
  1757. if (bus) {
  1758. osh = bus->osh;
  1759. ASSERT(osh);
  1760. if (bus->dhd) {
  1761. #if defined(DEBUGGER) || defined(DHD_DSCOPE)
  1762. debugger_close();
  1763. #endif /* DEBUGGER || DHD_DSCOPE */
  1764. dhdpcie_advertise_bus_cleanup(bus->dhd);
  1765. dongle_isolation = bus->dhd->dongle_isolation;
  1766. bus->dhd->is_pcie_watchdog_reset = FALSE;
  1767. dhdpcie_bus_remove_prep(bus);
  1768. if (bus->intr) {
  1769. DHD_BUS_LOCK(bus->bus_lock, flags_bus);
  1770. dhdpcie_bus_intr_disable(bus);
  1771. DHD_BUS_UNLOCK(bus->bus_lock, flags_bus);
  1772. dhdpcie_free_irq(bus);
  1773. }
  1774. dhd_deinit_bus_lock(bus);
  1775. dhd_deinit_backplane_access_lock(bus);
  1776. /**
  1777. * dhdpcie_bus_release_dongle free bus->sih handle, which is needed to
  1778. * access Dongle registers.
  1779. * dhd_detach will communicate with dongle to delete flowring ..etc.
  1780. * So dhdpcie_bus_release_dongle should be called only after the dhd_detach.
  1781. */
  1782. /* Let's ensure we aren't leaving the bus asleep on the other side */
  1783. dhd_bus_l1ss_enable_rc_ep(bus, FALSE);
  1784. dhd_detach(bus->dhd);
  1785. dhdpcie_bus_release_dongle(bus, osh, dongle_isolation, TRUE);
  1786. dhd_free(bus->dhd);
  1787. bus->dhd = NULL;
  1788. }
  1789. /* unmap the regs and tcm here!! */
  1790. if (bus->regs) {
  1791. dhdpcie_bus_reg_unmap(osh, bus->regs, DONGLE_REG_MAP_SIZE);
  1792. bus->regs = NULL;
  1793. }
  1794. if (bus->tcm) {
  1795. dhdpcie_bus_reg_unmap(osh, bus->tcm, DONGLE_TCM_MAP_SIZE);
  1796. bus->tcm = NULL;
  1797. }
  1798. dhdpcie_bus_release_malloc(bus, osh);
  1799. /* Detach pcie shared structure */
  1800. if (bus->pcie_sh) {
  1801. MFREE(osh, bus->pcie_sh, sizeof(pciedev_shared_t));
  1802. bus->pcie_sh = NULL;
  1803. }
  1804. if (bus->console.buf != NULL) {
  1805. MFREE(osh, bus->console.buf, bus->console.bufsize);
  1806. }
  1807. /* Finally free bus info */
  1808. MFREE(osh, bus, sizeof(dhd_bus_t));
  1809. g_dhd_bus = NULL;
  1810. }
  1811. DHD_TRACE(("%s: Exit\n", __FUNCTION__));
  1812. } /* dhdpcie_bus_release */
  1813. void
  1814. dhdpcie_bus_release_dongle(dhd_bus_t *bus, osl_t *osh, bool dongle_isolation, bool reset_flag)
  1815. {
  1816. DHD_TRACE(("%s: Enter bus->dhd %p bus->dhd->dongle_reset %d \n", __FUNCTION__,
  1817. bus->dhd, bus->dhd->dongle_reset));
  1818. if ((bus->dhd && bus->dhd->dongle_reset) && reset_flag) {
  1819. DHD_TRACE(("%s Exit\n", __FUNCTION__));
  1820. return;
  1821. }
  1822. if (bus->is_linkdown) {
  1823. DHD_ERROR(("%s : Skip release dongle due to linkdown \n", __FUNCTION__));
  1824. return;
  1825. }
  1826. if (bus->sih) {
  1827. if (!dongle_isolation &&
  1828. (bus->dhd && !bus->dhd->is_pcie_watchdog_reset)) {
  1829. dhdpcie_dongle_reset(bus);
  1830. }
  1831. dhdpcie_dongle_flr_or_pwr_toggle(bus);
  1832. if (bus->ltrsleep_on_unload) {
  1833. si_corereg(bus->sih, bus->sih->buscoreidx,
  1834. OFFSETOF(sbpcieregs_t, u.pcie2.ltr_state), ~0, 0);
  1835. }
  1836. if (bus->sih->buscorerev == 13)
  1837. pcie_serdes_iddqdisable(bus->osh, bus->sih,
  1838. (sbpcieregs_t *) bus->regs);
  1839. /* For inbuilt drivers pcie clk req will be done by RC,
  1840. * so do not do clkreq from dhd
  1841. */
  1842. if (dhd_download_fw_on_driverload)
  1843. {
  1844. /* Disable CLKREQ# */
  1845. dhdpcie_clkreq(bus->osh, 1, 0);
  1846. }
  1847. if (bus->sih != NULL) {
  1848. si_detach(bus->sih);
  1849. bus->sih = NULL;
  1850. }
  1851. if (bus->vars && bus->varsz)
  1852. MFREE(osh, bus->vars, bus->varsz);
  1853. bus->vars = NULL;
  1854. }
  1855. DHD_TRACE(("%s Exit\n", __FUNCTION__));
  1856. }
  1857. uint32
  1858. dhdpcie_bus_cfg_read_dword(dhd_bus_t *bus, uint32 addr, uint32 size)
  1859. {
  1860. uint32 data = OSL_PCI_READ_CONFIG(bus->osh, addr, size);
  1861. return data;
  1862. }
  1863. /** 32 bit config write */
  1864. void
  1865. dhdpcie_bus_cfg_write_dword(dhd_bus_t *bus, uint32 addr, uint32 size, uint32 data)
  1866. {
  1867. OSL_PCI_WRITE_CONFIG(bus->osh, addr, size, data);
  1868. }
  1869. void
  1870. dhdpcie_bus_cfg_set_bar0_win(dhd_bus_t *bus, uint32 data)
  1871. {
  1872. OSL_PCI_WRITE_CONFIG(bus->osh, PCI_BAR0_WIN, 4, data);
  1873. }
  1874. void
  1875. dhdpcie_bus_dongle_setmemsize(struct dhd_bus *bus, int mem_size)
  1876. {
  1877. int32 min_size = DONGLE_MIN_MEMSIZE;
  1878. /* Restrict the memsize to user specified limit */
  1879. DHD_ERROR(("user: Restrict the dongle ram size to %d, min accepted %d\n",
  1880. dhd_dongle_memsize, min_size));
  1881. if ((dhd_dongle_memsize > min_size) &&
  1882. (dhd_dongle_memsize < (int32)bus->orig_ramsize))
  1883. bus->ramsize = dhd_dongle_memsize;
  1884. }
  1885. void
  1886. dhdpcie_bus_release_malloc(dhd_bus_t *bus, osl_t *osh)
  1887. {
  1888. DHD_TRACE(("%s: Enter\n", __FUNCTION__));
  1889. if (bus->dhd && bus->dhd->dongle_reset)
  1890. return;
  1891. if (bus->vars && bus->varsz) {
  1892. MFREE(osh, bus->vars, bus->varsz);
  1893. bus->vars = NULL;
  1894. }
  1895. DHD_TRACE(("%s: Exit\n", __FUNCTION__));
  1896. return;
  1897. }
  1898. /** Stop bus module: clear pending frames, disable data flow */
  1899. void dhd_bus_stop(struct dhd_bus *bus, bool enforce_mutex)
  1900. {
  1901. unsigned long flags, flags_bus;
  1902. DHD_TRACE(("%s: Enter\n", __FUNCTION__));
  1903. if (!bus->dhd)
  1904. return;
  1905. if (bus->dhd->busstate == DHD_BUS_DOWN) {
  1906. DHD_ERROR(("%s: already down by net_dev_reset\n", __FUNCTION__));
  1907. goto done;
  1908. }
  1909. DHD_DISABLE_RUNTIME_PM(bus->dhd);
  1910. DHD_GENERAL_LOCK(bus->dhd, flags);
  1911. DHD_ERROR(("%s: making DHD_BUS_DOWN\n", __FUNCTION__));
  1912. bus->dhd->busstate = DHD_BUS_DOWN;
  1913. DHD_GENERAL_UNLOCK(bus->dhd, flags);
  1914. #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
  1915. atomic_set(&bus->dhd->block_bus, TRUE);
  1916. #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
  1917. DHD_BUS_LOCK(bus->bus_lock, flags_bus);
  1918. dhdpcie_bus_intr_disable(bus);
  1919. DHD_BUS_UNLOCK(bus->bus_lock, flags_bus);
  1920. if (!bus->is_linkdown) {
  1921. uint32 status;
  1922. status = dhdpcie_bus_cfg_read_dword(bus, PCIIntstatus, 4);
  1923. dhdpcie_bus_cfg_write_dword(bus, PCIIntstatus, 4, status);
  1924. }
  1925. if (!dhd_download_fw_on_driverload) {
  1926. dhd_dpc_kill(bus->dhd);
  1927. }
  1928. #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
  1929. pm_runtime_disable(dhd_bus_to_dev(bus));
  1930. pm_runtime_set_suspended(dhd_bus_to_dev(bus));
  1931. pm_runtime_enable(dhd_bus_to_dev(bus));
  1932. #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
  1933. /* Clear rx control and wake any waiters */
  1934. dhd_os_set_ioctl_resp_timeout(IOCTL_DISABLE_TIMEOUT);
  1935. dhd_wakeup_ioctl_event(bus->dhd, IOCTL_RETURN_ON_BUS_STOP);
  1936. done:
  1937. return;
  1938. }
  1939. /**
  1940. * Watchdog timer function.
  1941. * @param dhd Represents a specific hardware (dongle) instance that this DHD manages
  1942. */
  1943. bool dhd_bus_watchdog(dhd_pub_t *dhd)
  1944. {
  1945. unsigned long flags;
  1946. dhd_bus_t *bus = dhd->bus;
  1947. DHD_GENERAL_LOCK(dhd, flags);
  1948. if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(dhd) ||
  1949. DHD_BUS_CHECK_SUSPEND_OR_SUSPEND_IN_PROGRESS(dhd)) {
  1950. DHD_GENERAL_UNLOCK(dhd, flags);
  1951. return FALSE;
  1952. }
  1953. DHD_BUS_BUSY_SET_IN_WD(dhd);
  1954. DHD_GENERAL_UNLOCK(dhd, flags);
  1955. #ifdef DHD_PCIE_RUNTIMEPM
  1956. dhdpcie_runtime_bus_wake(dhd, TRUE, __builtin_return_address(0));
  1957. #endif /* DHD_PCIE_RUNTIMEPM */
  1958. /* Poll for console output periodically */
  1959. if (dhd->busstate == DHD_BUS_DATA &&
  1960. dhd->dhd_console_ms != 0 &&
  1961. bus->bus_low_power_state == DHD_BUS_NO_LOW_POWER_STATE) {
  1962. bus->console.count += dhd_watchdog_ms;
  1963. if (bus->console.count >= dhd->dhd_console_ms) {
  1964. bus->console.count -= dhd->dhd_console_ms;
  1965. if (MULTIBP_ENAB(bus->sih)) {
  1966. dhd_bus_pcie_pwr_req(bus);
  1967. }
  1968. /* Make sure backplane clock is on */
  1969. if (dhdpcie_bus_readconsole(bus) < 0) {
  1970. dhd->dhd_console_ms = 0; /* On error, stop trying */
  1971. }
  1972. if (MULTIBP_ENAB(bus->sih)) {
  1973. dhd_bus_pcie_pwr_req_clear(bus);
  1974. }
  1975. }
  1976. }
  1977. DHD_GENERAL_LOCK(dhd, flags);
  1978. DHD_BUS_BUSY_CLEAR_IN_WD(dhd);
  1979. dhd_os_busbusy_wake(dhd);
  1980. DHD_GENERAL_UNLOCK(dhd, flags);
  1981. return TRUE;
  1982. } /* dhd_bus_watchdog */
  1983. #if defined(SUPPORT_MULTIPLE_REVISION)
  1984. static int concate_revision_bcm4358(dhd_bus_t *bus, char *fw_path, char *nv_path)
  1985. {
  1986. uint32 chiprev;
  1987. #if defined(SUPPORT_MULTIPLE_CHIPS)
  1988. char chipver_tag[20] = "_4358";
  1989. #else
  1990. char chipver_tag[10] = {0, };
  1991. #endif /* SUPPORT_MULTIPLE_CHIPS */
  1992. chiprev = dhd_bus_chiprev(bus);
  1993. if (chiprev == 0) {
  1994. DHD_ERROR(("----- CHIP 4358 A0 -----\n"));
  1995. strcat(chipver_tag, "_a0");
  1996. } else if (chiprev == 1) {
  1997. DHD_ERROR(("----- CHIP 4358 A1 -----\n"));
  1998. #if defined(SUPPORT_MULTIPLE_CHIPS) || defined(SUPPORT_MULTIPLE_MODULE_CIS)
  1999. strcat(chipver_tag, "_a1");
  2000. #endif /* defined(SUPPORT_MULTIPLE_CHIPS) || defined(SUPPORT_MULTIPLE_MODULE_CIS) */
  2001. } else if (chiprev == 3) {
  2002. DHD_ERROR(("----- CHIP 4358 A3 -----\n"));
  2003. #if defined(SUPPORT_MULTIPLE_CHIPS)
  2004. strcat(chipver_tag, "_a3");
  2005. #endif /* SUPPORT_MULTIPLE_CHIPS */
  2006. } else {
  2007. DHD_ERROR(("----- Unknown chip version, ver=%x -----\n", chiprev));
  2008. }
  2009. strcat(fw_path, chipver_tag);
  2010. #if defined(SUPPORT_MULTIPLE_MODULE_CIS) && defined(USE_CID_CHECK)
  2011. if (chiprev == 1 || chiprev == 3) {
  2012. int ret = dhd_check_module_b85a();
  2013. if ((chiprev == 1) && (ret < 0)) {
  2014. memset(chipver_tag, 0x00, sizeof(chipver_tag));
  2015. strcat(chipver_tag, "_b85");
  2016. strcat(chipver_tag, "_a1");
  2017. }
  2018. }
  2019. DHD_ERROR(("%s: chipver_tag %s \n", __FUNCTION__, chipver_tag));
  2020. #endif /* defined(SUPPORT_MULTIPLE_MODULE_CIS) && defined(USE_CID_CHECK) */
  2021. #if defined(SUPPORT_MULTIPLE_BOARD_REV)
  2022. if (system_rev >= 10) {
  2023. DHD_ERROR(("----- Board Rev [%d]-----\n", system_rev));
  2024. strcat(chipver_tag, "_r10");
  2025. }
  2026. #endif /* SUPPORT_MULTIPLE_BOARD_REV */
  2027. strcat(nv_path, chipver_tag);
  2028. return 0;
  2029. }
  2030. static int concate_revision_bcm4359(dhd_bus_t *bus, char *fw_path, char *nv_path)
  2031. {
  2032. uint32 chip_ver;
  2033. char chipver_tag[10] = {0, };
  2034. #if defined(SUPPORT_MULTIPLE_MODULE_CIS) && defined(USE_CID_CHECK) && \
  2035. defined(SUPPORT_BCM4359_MIXED_MODULES)
  2036. int module_type = -1;
  2037. #endif /* SUPPORT_MULTIPLE_MODULE_CIS && USE_CID_CHECK && SUPPORT_BCM4359_MIXED_MODULES */
  2038. chip_ver = bus->sih->chiprev;
  2039. if (chip_ver == 4) {
  2040. DHD_ERROR(("----- CHIP 4359 B0 -----\n"));
  2041. strncat(chipver_tag, "_b0", strlen("_b0"));
  2042. } else if (chip_ver == 5) {
  2043. DHD_ERROR(("----- CHIP 4359 B1 -----\n"));
  2044. strncat(chipver_tag, "_b1", strlen("_b1"));
  2045. } else if (chip_ver == 9) {
  2046. DHD_ERROR(("----- CHIP 4359 C0 -----\n"));
  2047. strncat(chipver_tag, "_c0", strlen("_c0"));
  2048. } else {
  2049. DHD_ERROR(("----- Unknown chip version, ver=%x -----\n", chip_ver));
  2050. return -1;
  2051. }
  2052. #if defined(SUPPORT_MULTIPLE_MODULE_CIS) && defined(USE_CID_CHECK) && \
  2053. defined(SUPPORT_BCM4359_MIXED_MODULES)
  2054. module_type = dhd_check_module_b90();
  2055. switch (module_type) {
  2056. case BCM4359_MODULE_TYPE_B90B:
  2057. strcat(fw_path, chipver_tag);
  2058. break;
  2059. case BCM4359_MODULE_TYPE_B90S:
  2060. default:
  2061. /*
  2062. * .cid.info file not exist case,
  2063. * loading B90S FW force for initial MFG boot up.
  2064. */
  2065. if (chip_ver == 5) {
  2066. strncat(fw_path, "_b90s", strlen("_b90s"));
  2067. }
  2068. strcat(fw_path, chipver_tag);
  2069. strcat(nv_path, chipver_tag);
  2070. break;
  2071. }
  2072. #else /* SUPPORT_MULTIPLE_MODULE_CIS && USE_CID_CHECK && SUPPORT_BCM4359_MIXED_MODULES */
  2073. strcat(fw_path, chipver_tag);
  2074. strcat(nv_path, chipver_tag);
  2075. #endif /* SUPPORT_MULTIPLE_MODULE_CIS && USE_CID_CHECK && SUPPORT_BCM4359_MIXED_MODULES */
  2076. return 0;
  2077. }
  2078. #if defined(USE_CID_CHECK)
  2079. #define MAX_EXTENSION 20
  2080. #define MODULE_BCM4361_INDEX 3
  2081. #define CHIP_REV_A0 1
  2082. #define CHIP_REV_A1 2
  2083. #define CHIP_REV_B0 3
  2084. #define CHIP_REV_B1 4
  2085. #define CHIP_REV_B2 5
  2086. #define CHIP_REV_C0 6
  2087. #define BOARD_TYPE_EPA 0x080f
  2088. #define BOARD_TYPE_IPA 0x0827
  2089. #define BOARD_TYPE_IPA_OLD 0x081a
  2090. #define DEFAULT_CIDINFO_FOR_EPA "r00a_e000_a0_ePA"
  2091. #define DEFAULT_CIDINFO_FOR_IPA "r00a_e000_a0_iPA"
  2092. #define DEFAULT_CIDINFO_FOR_A1 "r01a_e30a_a1"
  2093. #define DEFAULT_CIDINFO_FOR_B0 "r01i_e32_b0"
  2094. #define MAX_VID_LEN 8
  2095. #define CIS_TUPLE_HDR_LEN 2
  2096. #if defined(BCM4361_CHIP)
  2097. #define CIS_TUPLE_START_ADDRESS 0x18011110
  2098. #define CIS_TUPLE_END_ADDRESS 0x18011167
  2099. #elif defined(BCM4375_CHIP)
  2100. #define CIS_TUPLE_START_ADDRESS 0x18011120
  2101. #define CIS_TUPLE_END_ADDRESS 0x18011177
  2102. #endif /* defined(BCM4361_CHIP) */
  2103. #define CIS_TUPLE_MAX_COUNT (uint32)((CIS_TUPLE_END_ADDRESS - CIS_TUPLE_START_ADDRESS\
  2104. + 1) / sizeof(uint32))
  2105. #define CIS_TUPLE_TAG_START 0x80
  2106. #define CIS_TUPLE_TAG_VENDOR 0x81
  2107. #define CIS_TUPLE_TAG_BOARDTYPE 0x1b
  2108. #define CIS_TUPLE_TAG_LENGTH 1
  2109. #define NVRAM_FEM_MURATA "_murata"
  2110. #define CID_FEM_MURATA "_mur_"
  2111. typedef struct cis_tuple_format {
  2112. uint8 id;
  2113. uint8 len; /* total length of tag and data */
  2114. uint8 tag;
  2115. uint8 data[1];
  2116. } cis_tuple_format_t;
  2117. typedef struct {
  2118. char cid_ext[MAX_EXTENSION];
  2119. char nvram_ext[MAX_EXTENSION];
  2120. char fw_ext[MAX_EXTENSION];
  2121. } naming_info_t;
  2122. naming_info_t bcm4361_naming_table[] = {
  2123. { {""}, {""}, {""} },
  2124. { {"r00a_e000_a0_ePA"}, {"_a0_ePA"}, {"_a0_ePA"} },
  2125. { {"r00a_e000_a0_iPA"}, {"_a0"}, {"_a1"} },
  2126. { {"r01a_e30a_a1"}, {"_r01a_a1"}, {"_a1"} },
  2127. { {"r02a_e30a_a1"}, {"_r02a_a1"}, {"_a1"} },
  2128. { {"r02c_e30a_a1"}, {"_r02c_a1"}, {"_a1"} },
  2129. { {"r01d_e31_b0"}, {"_r01d_b0"}, {"_b0"} },
  2130. { {"r01f_e31_b0"}, {"_r01f_b0"}, {"_b0"} },
  2131. { {"r02g_e31_b0"}, {"_r02g_b0"}, {"_b0"} },
  2132. { {"r01h_e32_b0"}, {"_r01h_b0"}, {"_b0"} },
  2133. { {"r01i_e32_b0"}, {"_r01i_b0"}, {"_b0"} },
  2134. { {"r02j_e32_b0"}, {"_r02j_b0"}, {"_b0"} },
  2135. { {"r012_1kl_a1"}, {"_r012_a1"}, {"_a1"} },
  2136. { {"r013_1kl_b0"}, {"_r013_b0"}, {"_b0"} },
  2137. { {"r013_1kl_b0"}, {"_r013_b0"}, {"_b0"} },
  2138. { {"r014_1kl_b0"}, {"_r014_b0"}, {"_b0"} },
  2139. { {"r015_1kl_b0"}, {"_r015_b0"}, {"_b0"} },
  2140. { {"r020_1kl_b0"}, {"_r020_b0"}, {"_b0"} },
  2141. { {"r021_1kl_b0"}, {"_r021_b0"}, {"_b0"} },
  2142. { {"r022_1kl_b0"}, {"_r022_b0"}, {"_b0"} },
  2143. { {"r023_1kl_b0"}, {"_r023_b0"}, {"_b0"} },
  2144. { {"r024_1kl_b0"}, {"_r024_b0"}, {"_b0"} },
  2145. { {"r030_1kl_b0"}, {"_r030_b0"}, {"_b0"} },
  2146. { {"r031_1kl_b0"}, {"_r030_b0"}, {"_b0"} }, /* exceptional case : r31 -> r30 */
  2147. { {"r032_1kl_b0"}, {"_r032_b0"}, {"_b0"} },
  2148. { {"r033_1kl_b0"}, {"_r033_b0"}, {"_b0"} },
  2149. { {"r034_1kl_b0"}, {"_r034_b0"}, {"_b0"} },
  2150. { {"r02a_e32a_b2"}, {"_r02a_b2"}, {"_b2"} },
  2151. { {"r02b_e32a_b2"}, {"_r02b_b2"}, {"_b2"} },
  2152. { {"r020_1qw_b2"}, {"_r020_b2"}, {"_b2"} },
  2153. { {"r021_1qw_b2"}, {"_r021_b2"}, {"_b2"} },
  2154. { {"r022_1qw_b2"}, {"_r022_b2"}, {"_b2"} },
  2155. { {"r031_1qw_b2"}, {"_r031_b2"}, {"_b2"} },
  2156. { {"r032_1qw_b2"}, {"_r032_b2"}, {"_b2"} },
  2157. { {"r041_1qw_b2"}, {"_r041_b2"}, {"_b2"} }
  2158. };
  2159. #define MODULE_BCM4375_INDEX 3
  2160. naming_info_t bcm4375_naming_table[] = {
  2161. { {""}, {""}, {""} },
  2162. { {"e41_es11"}, {"_ES00_semco_b0"}, {"_b0"} },
  2163. { {"e43_es33"}, {"_ES01_semco_b0"}, {"_b0"} },
  2164. { {"e43_es34"}, {"_ES02_semco_b0"}, {"_b0"} },
  2165. { {"e43_es35"}, {"_ES02_semco_b0"}, {"_b0"} },
  2166. { {"e43_es36"}, {"_ES03_semco_b0"}, {"_b0"} },
  2167. { {"e43_cs41"}, {"_CS00_semco_b1"}, {"_b1"} },
  2168. { {"e43_cs51"}, {"_CS01_semco_b1"}, {"_b1"} },
  2169. { {"e43_cs53"}, {"_CS01_semco_b1"}, {"_b1"} },
  2170. { {"e43_cs61"}, {"_CS00_skyworks_b1"}, {"_b1"} },
  2171. { {"1rh_es10"}, {"_1rh_es10_b0"}, {"_b0"} },
  2172. { {"1rh_es11"}, {"_1rh_es11_b0"}, {"_b0"} },
  2173. { {"1rh_es12"}, {"_1rh_es12_b0"}, {"_b0"} },
  2174. { {"1rh_es13"}, {"_1rh_es13_b0"}, {"_b0"} },
  2175. { {"1rh_es20"}, {"_1rh_es20_b0"}, {"_b0"} },
  2176. { {"1rh_es32"}, {"_1rh_es32_b0"}, {"_b0"} },
  2177. { {"1rh_es41"}, {"_1rh_es41_b1"}, {"_b1"} },
  2178. { {"1rh_es42"}, {"_1rh_es42_b1"}, {"_b1"} },
  2179. { {"1rh_es43"}, {"_1rh_es43_b1"}, {"_b1"} },
  2180. { {"1rh_es44"}, {"_1rh_es44_b1"}, {"_b1"} }
  2181. };
  2182. static naming_info_t *
  2183. dhd_find_naming_info(naming_info_t table[], int table_size, char *module_type)
  2184. {
  2185. int index_found = 0, i = 0;
  2186. if (module_type && strlen(module_type) > 0) {
  2187. for (i = 1; i < table_size; i++) {
  2188. if (!strncmp(table[i].cid_ext, module_type, strlen(table[i].cid_ext))) {
  2189. index_found = i;
  2190. break;
  2191. }
  2192. }
  2193. }
  2194. DHD_INFO(("%s: index_found=%d\n", __FUNCTION__, index_found));
  2195. return &table[index_found];
  2196. }
  2197. static naming_info_t *
  2198. dhd_find_naming_info_by_cid(naming_info_t table[], int table_size,
  2199. char *cid_info)
  2200. {
  2201. int index_found = 0, i = 0;
  2202. char *ptr;
  2203. /* truncate extension */
  2204. for (i = 1, ptr = cid_info; i < MODULE_BCM4361_INDEX && ptr; i++) {
  2205. ptr = bcmstrstr(ptr, "_");
  2206. if (ptr) {
  2207. ptr++;
  2208. }
  2209. }
  2210. for (i = 1; i < table_size && ptr; i++) {
  2211. if (!strncmp(table[i].cid_ext, ptr, strlen(table[i].cid_ext))) {
  2212. index_found = i;
  2213. break;
  2214. }
  2215. }
  2216. DHD_INFO(("%s: index_found=%d\n", __FUNCTION__, index_found));
  2217. return &table[index_found];
  2218. }
  2219. static int
  2220. dhd_parse_board_information_bcm(dhd_bus_t *bus, int *boardtype,
  2221. unsigned char *vid, int *vid_length)
  2222. {
  2223. int boardtype_backplane_addr[] = {
  2224. 0x18010324, /* OTP Control 1 */
  2225. 0x18012618, /* PMU min resource mask */
  2226. };
  2227. int boardtype_backplane_data[] = {
  2228. 0x00fa0000,
  2229. 0x0e4fffff /* Keep on ARMHTAVAIL */
  2230. };
  2231. int int_val = 0, i = 0;
  2232. cis_tuple_format_t *tuple;
  2233. int totlen, len;
  2234. uint32 raw_data[CIS_TUPLE_MAX_COUNT];
  2235. for (i = 0; i < ARRAYSIZE(boardtype_backplane_addr); i++) {
  2236. /* Write new OTP and PMU configuration */
  2237. if (si_backplane_access(bus->sih, boardtype_backplane_addr[i], sizeof(int),
  2238. &boardtype_backplane_data[i], FALSE) != BCME_OK) {
  2239. DHD_ERROR(("invalid size/addr combination\n"));
  2240. return BCME_ERROR;
  2241. }
  2242. if (si_backplane_access(bus->sih, boardtype_backplane_addr[i], sizeof(int),
  2243. &int_val, TRUE) != BCME_OK) {
  2244. DHD_ERROR(("invalid size/addr combination\n"));
  2245. return BCME_ERROR;
  2246. }
  2247. DHD_INFO(("%s: boardtype_backplane_addr 0x%08x rdata 0x%04x\n",
  2248. __FUNCTION__, boardtype_backplane_addr[i], int_val));
  2249. }
  2250. /* read tuple raw data */
  2251. for (i = 0; i < CIS_TUPLE_MAX_COUNT; i++) {
  2252. if (si_backplane_access(bus->sih, CIS_TUPLE_START_ADDRESS + i * sizeof(uint32),
  2253. sizeof(uint32), &raw_data[i], TRUE) != BCME_OK) {
  2254. break;
  2255. }
  2256. }
  2257. totlen = i * sizeof(uint32);
  2258. tuple = (cis_tuple_format_t *)raw_data;
  2259. /* check the first tuple has tag 'start' */
  2260. if (tuple->id != CIS_TUPLE_TAG_START) {
  2261. return BCME_ERROR;
  2262. }
  2263. *vid_length = *boardtype = 0;
  2264. /* find tagged parameter */
  2265. while ((totlen >= (tuple->len + CIS_TUPLE_HDR_LEN)) &&
  2266. (*vid_length == 0 || *boardtype == 0)) {
  2267. len = tuple->len;
  2268. if ((tuple->tag == CIS_TUPLE_TAG_VENDOR) &&
  2269. (totlen >= (int)(len + CIS_TUPLE_HDR_LEN))) {
  2270. /* found VID */
  2271. memcpy(vid, tuple->data, tuple->len - CIS_TUPLE_TAG_LENGTH);
  2272. *vid_length = tuple->len - CIS_TUPLE_TAG_LENGTH;
  2273. prhex("OTP VID", tuple->data, tuple->len - CIS_TUPLE_TAG_LENGTH);
  2274. }
  2275. else if ((tuple->tag == CIS_TUPLE_TAG_BOARDTYPE) &&
  2276. (totlen >= (int)(len + CIS_TUPLE_HDR_LEN))) {
  2277. /* found boardtype */
  2278. *boardtype = (int)tuple->data[0];
  2279. prhex("OTP boardtype", tuple->data, tuple->len - CIS_TUPLE_TAG_LENGTH);
  2280. }
  2281. tuple = (cis_tuple_format_t*)((uint8*)tuple + (len + CIS_TUPLE_HDR_LEN));
  2282. totlen -= (len + CIS_TUPLE_HDR_LEN);
  2283. }
  2284. if (*vid_length <= 0 || *boardtype <= 0) {
  2285. DHD_ERROR(("failed to parse information (vid=%d, boardtype=%d)\n",
  2286. *vid_length, *boardtype));
  2287. return BCME_ERROR;
  2288. }
  2289. return BCME_OK;
  2290. }
  2291. static naming_info_t *
  2292. dhd_find_naming_info_by_chip_rev(naming_info_t table[], int table_size,
  2293. dhd_bus_t *bus, bool *is_murata_fem)
  2294. {
  2295. int board_type = 0, chip_rev = 0, vid_length = 0;
  2296. unsigned char vid[MAX_VID_LEN];
  2297. naming_info_t *info = &table[0];
  2298. char *cid_info = NULL;
  2299. if (!bus || !bus->sih) {
  2300. DHD_ERROR(("%s:bus(%p) or bus->sih is NULL\n", __FUNCTION__, bus));
  2301. return NULL;
  2302. }
  2303. chip_rev = bus->sih->chiprev;
  2304. if (dhd_parse_board_information_bcm(bus, &board_type, vid, &vid_length)
  2305. != BCME_OK) {
  2306. DHD_ERROR(("%s:failed to parse board information\n", __FUNCTION__));
  2307. return NULL;
  2308. }
  2309. DHD_INFO(("%s:chip version %d\n", __FUNCTION__, chip_rev));
  2310. #if defined(BCM4361_CHIP)
  2311. /* A0 chipset has exception only */
  2312. if (chip_rev == CHIP_REV_A0) {
  2313. if (board_type == BOARD_TYPE_EPA) {
  2314. info = dhd_find_naming_info(table, table_size,
  2315. DEFAULT_CIDINFO_FOR_EPA);
  2316. } else if ((board_type == BOARD_TYPE_IPA) ||
  2317. (board_type == BOARD_TYPE_IPA_OLD)) {
  2318. info = dhd_find_naming_info(table, table_size,
  2319. DEFAULT_CIDINFO_FOR_IPA);
  2320. }
  2321. } else {
  2322. cid_info = dhd_get_cid_info(vid, vid_length);
  2323. if (cid_info) {
  2324. info = dhd_find_naming_info_by_cid(table, table_size, cid_info);
  2325. if (strstr(cid_info, CID_FEM_MURATA)) {
  2326. *is_murata_fem = TRUE;
  2327. }
  2328. }
  2329. }
  2330. #else
  2331. cid_info = dhd_get_cid_info(vid, vid_length);
  2332. if (cid_info) {
  2333. info = dhd_find_naming_info_by_cid(table, table_size, cid_info);
  2334. if (strstr(cid_info, CID_FEM_MURATA)) {
  2335. *is_murata_fem = TRUE;
  2336. }
  2337. }
  2338. #endif /* BCM4361_CHIP */
  2339. return info;
  2340. }
  2341. #endif /* USE_CID_CHECK */
  2342. static int
  2343. concate_revision_bcm4361(dhd_bus_t *bus, char *fw_path, char *nv_path)
  2344. {
  2345. int ret = BCME_OK;
  2346. #if defined(SUPPORT_BCM4361_MIXED_MODULES) && defined(USE_CID_CHECK)
  2347. char module_type[MAX_VNAME_LEN];
  2348. naming_info_t *info = NULL;
  2349. bool is_murata_fem = FALSE;
  2350. memset(module_type, 0, sizeof(module_type));
  2351. if (dhd_check_module_bcm(module_type,
  2352. MODULE_BCM4361_INDEX, &is_murata_fem) == BCME_OK) {
  2353. info = dhd_find_naming_info(bcm4361_naming_table,
  2354. ARRAYSIZE(bcm4361_naming_table), module_type);
  2355. } else {
  2356. /* in case of .cid.info doesn't exists */
  2357. info = dhd_find_naming_info_by_chip_rev(bcm4361_naming_table,
  2358. ARRAYSIZE(bcm4361_naming_table), bus, &is_murata_fem);
  2359. }
  2360. if (bcmstrnstr(nv_path, PATH_MAX, "_murata", 7)) {
  2361. is_murata_fem = FALSE;
  2362. }
  2363. if (info) {
  2364. if (is_murata_fem) {
  2365. strncat(nv_path, NVRAM_FEM_MURATA, strlen(NVRAM_FEM_MURATA));
  2366. }
  2367. strncat(nv_path, info->nvram_ext, strlen(info->nvram_ext));
  2368. strncat(fw_path, info->fw_ext, strlen(info->fw_ext));
  2369. } else {
  2370. DHD_ERROR(("%s:failed to find extension for nvram and firmware\n", __FUNCTION__));
  2371. ret = BCME_ERROR;
  2372. }
  2373. #else /* SUPPORT_MULTIPLE_MODULE_CIS && USE_CID_CHECK */
  2374. char chipver_tag[10] = {0, };
  2375. strcat(fw_path, chipver_tag);
  2376. strcat(nv_path, chipver_tag);
  2377. #endif /* SUPPORT_MULTIPLE_MODULE_CIS && USE_CID_CHECK */
  2378. return ret;
  2379. }
  2380. static int
  2381. concate_revision_bcm4375(dhd_bus_t *bus, char *fw_path, char *nv_path)
  2382. {
  2383. int ret = BCME_OK;
  2384. #if defined(SUPPORT_BCM4375_MIXED_MODULES) && defined(USE_CID_CHECK)
  2385. char module_type[MAX_VNAME_LEN];
  2386. naming_info_t *info = NULL;
  2387. bool is_murata_fem = FALSE;
  2388. memset(module_type, 0, sizeof(module_type));
  2389. if (dhd_check_module_bcm(module_type,
  2390. MODULE_BCM4375_INDEX, &is_murata_fem) == BCME_OK) {
  2391. info = dhd_find_naming_info(bcm4375_naming_table,
  2392. ARRAYSIZE(bcm4375_naming_table), module_type);
  2393. } else {
  2394. /* in case of .cid.info doesn't exists */
  2395. info = dhd_find_naming_info_by_chip_rev(bcm4375_naming_table,
  2396. ARRAYSIZE(bcm4375_naming_table), bus, &is_murata_fem);
  2397. }
  2398. if (info) {
  2399. strncat(nv_path, info->nvram_ext, strlen(info->nvram_ext));
  2400. strncat(fw_path, info->fw_ext, strlen(info->fw_ext));
  2401. } else {
  2402. DHD_ERROR(("%s:failed to find extension for nvram and firmware\n", __FUNCTION__));
  2403. ret = BCME_ERROR;
  2404. }
  2405. #else /* SUPPORT_BCM4375_MIXED_MODULES && USE_CID_CHECK */
  2406. char chipver_tag[10] = {0, };
  2407. strcat(fw_path, chipver_tag);
  2408. strcat(nv_path, chipver_tag);
  2409. #endif /* SUPPORT_BCM4375_MIXED_MODULES && USE_CID_CHECK */
  2410. return ret;
  2411. }
  2412. int
  2413. concate_revision(dhd_bus_t *bus, char *fw_path, char *nv_path)
  2414. {
  2415. int res = 0;
  2416. if (!bus || !bus->sih) {
  2417. DHD_ERROR(("%s:Bus is Invalid\n", __FUNCTION__));
  2418. return -1;
  2419. }
  2420. if (!fw_path || !nv_path) {
  2421. DHD_ERROR(("fw_path or nv_path is null.\n"));
  2422. return res;
  2423. }
  2424. switch (si_chipid(bus->sih)) {
  2425. case BCM43569_CHIP_ID:
  2426. case BCM4358_CHIP_ID:
  2427. res = concate_revision_bcm4358(bus, fw_path, nv_path);
  2428. break;
  2429. case BCM4355_CHIP_ID:
  2430. case BCM4359_CHIP_ID:
  2431. res = concate_revision_bcm4359(bus, fw_path, nv_path);
  2432. break;
  2433. case BCM4361_CHIP_ID:
  2434. case BCM4347_CHIP_ID:
  2435. res = concate_revision_bcm4361(bus, fw_path, nv_path);
  2436. break;
  2437. case BCM4375_CHIP_ID:
  2438. res = concate_revision_bcm4375(bus, fw_path, nv_path);
  2439. break;
  2440. default:
  2441. DHD_ERROR(("REVISION SPECIFIC feature is not required\n"));
  2442. return res;
  2443. }
  2444. return res;
  2445. }
  2446. #endif /* SUPPORT_MULTIPLE_REVISION */
  2447. uint16
  2448. dhd_get_chipid(dhd_pub_t *dhd)
  2449. {
  2450. dhd_bus_t *bus = dhd->bus;
  2451. if (bus && bus->sih)
  2452. return (uint16)si_chipid(bus->sih);
  2453. else
  2454. return 0;
  2455. }
  2456. /**
  2457. * Loads firmware given by caller supplied path and nvram image into PCIe dongle.
  2458. *
  2459. * BCM_REQUEST_FW specific :
  2460. * Given the chip type, determines the to be used file paths within /lib/firmware/brcm/ containing
  2461. * firmware and nvm for that chip. If the download fails, retries download with a different nvm file
  2462. *
  2463. * BCMEMBEDIMAGE specific:
  2464. * If bus->fw_path is empty, or if the download of bus->fw_path failed, firmware contained in header
  2465. * file will be used instead.
  2466. *
  2467. * @return BCME_OK on success
  2468. */
  2469. int
  2470. dhd_bus_download_firmware(struct dhd_bus *bus, osl_t *osh,
  2471. char *pfw_path, char *pnv_path)
  2472. {
  2473. int ret;
  2474. bus->fw_path = pfw_path;
  2475. bus->nv_path = pnv_path;
  2476. #if defined(SUPPORT_MULTIPLE_REVISION)
  2477. if (concate_revision(bus, bus->fw_path, bus->nv_path) != 0) {
  2478. DHD_ERROR(("%s: fail to concatnate revison \n",
  2479. __FUNCTION__));
  2480. return BCME_BADARG;
  2481. }
  2482. #endif /* SUPPORT_MULTIPLE_REVISION */
  2483. #if defined(DHD_BLOB_EXISTENCE_CHECK)
  2484. dhd_set_blob_support(bus->dhd, bus->fw_path);
  2485. #endif /* DHD_BLOB_EXISTENCE_CHECK */
  2486. DHD_ERROR(("%s: firmware path=%s, nvram path=%s\n",
  2487. __FUNCTION__, bus->fw_path, bus->nv_path));
  2488. dhdpcie_dump_resource(bus);
  2489. ret = dhdpcie_download_firmware(bus, osh);
  2490. return ret;
  2491. }
  2492. /**
  2493. * Loads firmware given by 'bus->fw_path' into PCIe dongle.
  2494. *
  2495. * BCM_REQUEST_FW specific :
  2496. * Given the chip type, determines the to be used file paths within /lib/firmware/brcm/ containing
  2497. * firmware and nvm for that chip. If the download fails, retries download with a different nvm file
  2498. *
  2499. * BCMEMBEDIMAGE specific:
  2500. * If bus->fw_path is empty, or if the download of bus->fw_path failed, firmware contained in header
  2501. * file will be used instead.
  2502. *
  2503. * @return BCME_OK on success
  2504. */
  2505. static int
  2506. dhdpcie_download_firmware(struct dhd_bus *bus, osl_t *osh)
  2507. {
  2508. int ret = 0;
  2509. #if defined(BCM_REQUEST_FW)
  2510. uint chipid = bus->sih->chip;
  2511. uint revid = bus->sih->chiprev;
  2512. char fw_path[64] = "/lib/firmware/brcm/bcm"; /* path to firmware image */
  2513. char nv_path[64]; /* path to nvram vars file */
  2514. bus->fw_path = fw_path;
  2515. bus->nv_path = nv_path;
  2516. switch (chipid) {
  2517. case BCM43570_CHIP_ID:
  2518. bcmstrncat(fw_path, "43570", 5);
  2519. switch (revid) {
  2520. case 0:
  2521. bcmstrncat(fw_path, "a0", 2);
  2522. break;
  2523. case 2:
  2524. bcmstrncat(fw_path, "a2", 2);
  2525. break;
  2526. default:
  2527. DHD_ERROR(("%s: revid is not found %x\n", __FUNCTION__,
  2528. revid));
  2529. break;
  2530. }
  2531. break;
  2532. default:
  2533. DHD_ERROR(("%s: unsupported device %x\n", __FUNCTION__,
  2534. chipid));
  2535. return 0;
  2536. }
  2537. /* load board specific nvram file */
  2538. snprintf(bus->nv_path, sizeof(nv_path), "%s.nvm", fw_path);
  2539. /* load firmware */
  2540. snprintf(bus->fw_path, sizeof(fw_path), "%s-firmware.bin", fw_path);
  2541. #endif /* BCM_REQUEST_FW */
  2542. DHD_OS_WAKE_LOCK(bus->dhd);
  2543. ret = _dhdpcie_download_firmware(bus);
  2544. DHD_OS_WAKE_UNLOCK(bus->dhd);
  2545. return ret;
  2546. } /* dhdpcie_download_firmware */
  2547. #define DHD_MEMORY_SET_PATTERN 0xAA
  2548. /**
  2549. * Downloads a file containing firmware into dongle memory. In case of a .bea file, the DHD
  2550. * is updated with the event logging partitions within that file as well.
  2551. *
  2552. * @param pfw_path Path to .bin or .bea file
  2553. */
  2554. static int
  2555. dhdpcie_download_code_file(struct dhd_bus *bus, char *pfw_path)
  2556. {
  2557. int bcmerror = BCME_ERROR;
  2558. int offset = 0;
  2559. int len = 0;
  2560. bool store_reset;
  2561. char *imgbuf = NULL;
  2562. uint8 *memblock = NULL, *memptr = NULL;
  2563. int offset_end = bus->ramsize;
  2564. uint32 file_size = 0, read_len = 0;
  2565. #if defined(DHD_FW_MEM_CORRUPTION)
  2566. if (dhd_bus_get_fw_mode(bus->dhd) == DHD_FLAG_MFG_MODE) {
  2567. dhd_tcm_test_enable = TRUE;
  2568. } else {
  2569. dhd_tcm_test_enable = FALSE;
  2570. }
  2571. #endif /* DHD_FW_MEM_CORRUPTION */
  2572. DHD_ERROR(("%s: dhd_tcm_test_enable %u\n", __FUNCTION__, dhd_tcm_test_enable));
  2573. /* TCM check */
  2574. if (dhd_tcm_test_enable && !dhd_bus_tcm_test(bus)) {
  2575. DHD_ERROR(("dhd_bus_tcm_test failed\n"));
  2576. bcmerror = BCME_ERROR;
  2577. goto err;
  2578. }
  2579. DHD_ERROR(("%s: download firmware %s\n", __FUNCTION__, pfw_path));
  2580. /* Should succeed in opening image if it is actually given through registry
  2581. * entry or in module param.
  2582. */
  2583. imgbuf = dhd_os_open_image1(bus->dhd, pfw_path);
  2584. if (imgbuf == NULL) {
  2585. goto err;
  2586. }
  2587. file_size = dhd_os_get_image_size(imgbuf);
  2588. if (!file_size) {
  2589. DHD_ERROR(("%s: get file size fails ! \n", __FUNCTION__));
  2590. goto err;
  2591. }
  2592. memptr = memblock = MALLOC(bus->dhd->osh, MEMBLOCK + DHD_SDALIGN);
  2593. if (memblock == NULL) {
  2594. DHD_ERROR(("%s: Failed to allocate memory %d bytes\n", __FUNCTION__, MEMBLOCK));
  2595. bcmerror = BCME_NOMEM;
  2596. goto err;
  2597. }
  2598. if ((uint32)(uintptr)memblock % DHD_SDALIGN) {
  2599. memptr += (DHD_SDALIGN - ((uint32)(uintptr)memblock % DHD_SDALIGN));
  2600. }
  2601. /* check if CR4/CA7 */
  2602. store_reset = (si_setcore(bus->sih, ARMCR4_CORE_ID, 0) ||
  2603. si_setcore(bus->sih, ARMCA7_CORE_ID, 0));
  2604. /* Download image with MEMBLOCK size */
  2605. while ((len = dhd_os_get_image_block((char*)memptr, MEMBLOCK, imgbuf))) {
  2606. if (len < 0) {
  2607. DHD_ERROR(("%s: dhd_os_get_image_block failed (%d)\n", __FUNCTION__, len));
  2608. bcmerror = BCME_ERROR;
  2609. goto err;
  2610. }
  2611. read_len += len;
  2612. if (read_len > file_size) {
  2613. DHD_ERROR(("%s: WARNING! reading beyond EOF, len=%d; read_len=%u;"
  2614. " file_size=%u truncating len to %d \n", __FUNCTION__,
  2615. len, read_len, file_size, (len - (read_len - file_size))));
  2616. len -= (read_len - file_size);
  2617. }
  2618. /* if address is 0, store the reset instruction to be written in 0 */
  2619. if (store_reset) {
  2620. ASSERT(offset == 0);
  2621. bus->resetinstr = *(((uint32*)memptr));
  2622. /* Add start of RAM address to the address given by user */
  2623. offset += bus->dongle_ram_base;
  2624. offset_end += offset;
  2625. store_reset = FALSE;
  2626. }
  2627. bcmerror = dhdpcie_bus_membytes(bus, TRUE, offset, (uint8 *)memptr, len);
  2628. if (bcmerror) {
  2629. DHD_ERROR(("%s: error %d on writing %d membytes at 0x%08x\n",
  2630. __FUNCTION__, bcmerror, MEMBLOCK, offset));
  2631. goto err;
  2632. }
  2633. offset += MEMBLOCK;
  2634. if (offset >= offset_end) {
  2635. DHD_ERROR(("%s: invalid address access to %x (offset end: %x)\n",
  2636. __FUNCTION__, offset, offset_end));
  2637. bcmerror = BCME_ERROR;
  2638. goto err;
  2639. }
  2640. if (read_len >= file_size) {
  2641. break;
  2642. }
  2643. }
  2644. err:
  2645. if (memblock) {
  2646. MFREE(bus->dhd->osh, memblock, MEMBLOCK + DHD_SDALIGN);
  2647. }
  2648. if (imgbuf) {
  2649. dhd_os_close_image1(bus->dhd, imgbuf);
  2650. }
  2651. return bcmerror;
  2652. } /* dhdpcie_download_code_file */
  2653. #ifdef CUSTOMER_HW4_DEBUG
  2654. #define MIN_NVRAMVARS_SIZE 128
  2655. #endif /* CUSTOMER_HW4_DEBUG */
  2656. static int
  2657. dhdpcie_download_nvram(struct dhd_bus *bus)
  2658. {
  2659. int bcmerror = BCME_ERROR;
  2660. uint len;
  2661. char * memblock = NULL;
  2662. char *bufp;
  2663. char *pnv_path;
  2664. bool nvram_file_exists;
  2665. bool nvram_uefi_exists = FALSE;
  2666. bool local_alloc = FALSE;
  2667. pnv_path = bus->nv_path;
  2668. nvram_file_exists = ((pnv_path != NULL) && (pnv_path[0] != '\0'));
  2669. /* First try UEFI */
  2670. len = MAX_NVRAMBUF_SIZE;
  2671. dhd_get_download_buffer(bus->dhd, NULL, NVRAM, &memblock, (int *)&len);
  2672. /* If UEFI empty, then read from file system */
  2673. if ((len <= 0) || (memblock == NULL)) {
  2674. if (nvram_file_exists) {
  2675. len = MAX_NVRAMBUF_SIZE;
  2676. dhd_get_download_buffer(bus->dhd, pnv_path, NVRAM, &memblock, (int *)&len);
  2677. if ((len <= 0 || len > MAX_NVRAMBUF_SIZE)) {
  2678. goto err;
  2679. }
  2680. }
  2681. else {
  2682. /* For SROM OTP no external file or UEFI required */
  2683. bcmerror = BCME_OK;
  2684. }
  2685. } else {
  2686. nvram_uefi_exists = TRUE;
  2687. }
  2688. DHD_ERROR(("%s: dhd_get_download_buffer len %d\n", __FUNCTION__, len));
  2689. if (len > 0 && len <= MAX_NVRAMBUF_SIZE && memblock != NULL) {
  2690. bufp = (char *) memblock;
  2691. {
  2692. bufp[len] = 0;
  2693. if (nvram_uefi_exists || nvram_file_exists) {
  2694. len = process_nvram_vars(bufp, len);
  2695. }
  2696. }
  2697. DHD_ERROR(("%s: process_nvram_vars len %d\n", __FUNCTION__, len));
  2698. #ifdef CUSTOMER_HW4_DEBUG
  2699. if (len < MIN_NVRAMVARS_SIZE) {
  2700. DHD_ERROR(("%s: invalid nvram size in process_nvram_vars \n",
  2701. __FUNCTION__));
  2702. bcmerror = BCME_ERROR;
  2703. goto err;
  2704. }
  2705. #endif /* CUSTOMER_HW4_DEBUG */
  2706. if (len % 4) {
  2707. len += 4 - (len % 4);
  2708. }
  2709. bufp += len;
  2710. *bufp++ = 0;
  2711. if (len)
  2712. bcmerror = dhdpcie_downloadvars(bus, memblock, len + 1);
  2713. if (bcmerror) {
  2714. DHD_ERROR(("%s: error downloading vars: %d\n",
  2715. __FUNCTION__, bcmerror));
  2716. }
  2717. }
  2718. err:
  2719. if (memblock) {
  2720. if (local_alloc) {
  2721. MFREE(bus->dhd->osh, memblock, MAX_NVRAMBUF_SIZE);
  2722. } else {
  2723. dhd_free_download_buffer(bus->dhd, memblock, MAX_NVRAMBUF_SIZE);
  2724. }
  2725. }
  2726. return bcmerror;
  2727. }
  2728. static int
  2729. dhdpcie_ramsize_read_image(struct dhd_bus *bus, char *buf, int len)
  2730. {
  2731. int bcmerror = BCME_ERROR;
  2732. char *imgbuf = NULL;
  2733. if (buf == NULL || len == 0)
  2734. goto err;
  2735. /* External image takes precedence if specified */
  2736. if ((bus->fw_path != NULL) && (bus->fw_path[0] != '\0')) {
  2737. // opens and seeks to correct file offset:
  2738. imgbuf = dhd_os_open_image1(bus->dhd, bus->fw_path);
  2739. if (imgbuf == NULL) {
  2740. DHD_ERROR(("%s: Failed to open firmware file\n", __FUNCTION__));
  2741. goto err;
  2742. }
  2743. /* Read it */
  2744. if (len != dhd_os_get_image_block(buf, len, imgbuf)) {
  2745. DHD_ERROR(("%s: Failed to read %d bytes data\n", __FUNCTION__, len));
  2746. goto err;
  2747. }
  2748. bcmerror = BCME_OK;
  2749. }
  2750. err:
  2751. if (imgbuf)
  2752. dhd_os_close_image1(bus->dhd, imgbuf);
  2753. return bcmerror;
  2754. }
  2755. /* The ramsize can be changed in the dongle image, for example 4365 chip share the sysmem
  2756. * with BMC and we can adjust how many sysmem belong to CA7 during dongle compilation.
  2757. * So in DHD we need to detect this case and update the correct dongle RAMSIZE as well.
  2758. */
  2759. static void
  2760. dhdpcie_ramsize_adj(struct dhd_bus *bus)
  2761. {
  2762. int i, search_len = 0;
  2763. uint8 *memptr = NULL;
  2764. uint8 *ramsizeptr = NULL;
  2765. uint ramsizelen;
  2766. uint32 ramsize_ptr_ptr[] = {RAMSIZE_PTR_PTR_LIST};
  2767. hnd_ramsize_ptr_t ramsize_info;
  2768. DHD_TRACE(("%s: Enter\n", __FUNCTION__));
  2769. /* Adjust dongle RAMSIZE already called. */
  2770. if (bus->ramsize_adjusted) {
  2771. return;
  2772. }
  2773. /* success or failure, we don't want to be here
  2774. * more than once.
  2775. */
  2776. bus->ramsize_adjusted = TRUE;
  2777. /* Not handle if user restrict dongle ram size enabled */
  2778. if (dhd_dongle_memsize) {
  2779. DHD_ERROR(("%s: user restrict dongle ram size to %d.\n", __FUNCTION__,
  2780. dhd_dongle_memsize));
  2781. return;
  2782. }
  2783. /* Out immediately if no image to download */
  2784. if ((bus->fw_path == NULL) || (bus->fw_path[0] == '\0')) {
  2785. DHD_ERROR(("%s: no fimrware file\n", __FUNCTION__));
  2786. return;
  2787. }
  2788. /* Get maximum RAMSIZE info search length */
  2789. for (i = 0; ; i++) {
  2790. if (ramsize_ptr_ptr[i] == RAMSIZE_PTR_PTR_END)
  2791. break;
  2792. if (search_len < (int)ramsize_ptr_ptr[i])
  2793. search_len = (int)ramsize_ptr_ptr[i];
  2794. }
  2795. if (!search_len)
  2796. return;
  2797. search_len += sizeof(hnd_ramsize_ptr_t);
  2798. memptr = MALLOC(bus->dhd->osh, search_len);
  2799. if (memptr == NULL) {
  2800. DHD_ERROR(("%s: Failed to allocate memory %d bytes\n", __FUNCTION__, search_len));
  2801. return;
  2802. }
  2803. /* External image takes precedence if specified */
  2804. if (dhdpcie_ramsize_read_image(bus, (char *)memptr, search_len) != BCME_OK) {
  2805. goto err;
  2806. }
  2807. else {
  2808. ramsizeptr = memptr;
  2809. ramsizelen = search_len;
  2810. }
  2811. if (ramsizeptr) {
  2812. /* Check Magic */
  2813. for (i = 0; ; i++) {
  2814. if (ramsize_ptr_ptr[i] == RAMSIZE_PTR_PTR_END)
  2815. break;
  2816. if (ramsize_ptr_ptr[i] + sizeof(hnd_ramsize_ptr_t) > ramsizelen)
  2817. continue;
  2818. memcpy((char *)&ramsize_info, ramsizeptr + ramsize_ptr_ptr[i],
  2819. sizeof(hnd_ramsize_ptr_t));
  2820. if (ramsize_info.magic == HTOL32(HND_RAMSIZE_PTR_MAGIC)) {
  2821. bus->orig_ramsize = LTOH32(ramsize_info.ram_size);
  2822. bus->ramsize = LTOH32(ramsize_info.ram_size);
  2823. DHD_ERROR(("%s: Adjust dongle RAMSIZE to 0x%x\n", __FUNCTION__,
  2824. bus->ramsize));
  2825. break;
  2826. }
  2827. }
  2828. }
  2829. err:
  2830. if (memptr)
  2831. MFREE(bus->dhd->osh, memptr, search_len);
  2832. return;
  2833. } /* dhdpcie_ramsize_adj */
  2834. /**
  2835. * Downloads firmware file given by 'bus->fw_path' into PCIe dongle
  2836. *
  2837. * BCMEMBEDIMAGE specific:
  2838. * If bus->fw_path is empty, or if the download of bus->fw_path failed, firmware contained in header
  2839. * file will be used instead.
  2840. *
  2841. */
  2842. static int
  2843. _dhdpcie_download_firmware(struct dhd_bus *bus)
  2844. {
  2845. int bcmerror = -1;
  2846. bool embed = FALSE; /* download embedded firmware */
  2847. bool dlok = FALSE; /* download firmware succeeded */
  2848. /* Out immediately if no image to download */
  2849. if ((bus->fw_path == NULL) || (bus->fw_path[0] == '\0')) {
  2850. DHD_ERROR(("%s: no fimrware file\n", __FUNCTION__));
  2851. return 0;
  2852. }
  2853. /* Adjust ram size */
  2854. dhdpcie_ramsize_adj(bus);
  2855. /* Keep arm in reset */
  2856. if (dhdpcie_bus_download_state(bus, TRUE)) {
  2857. DHD_ERROR(("%s: error placing ARM core in reset\n", __FUNCTION__));
  2858. goto err;
  2859. }
  2860. /* External image takes precedence if specified */
  2861. if ((bus->fw_path != NULL) && (bus->fw_path[0] != '\0')) {
  2862. if (dhdpcie_download_code_file(bus, bus->fw_path)) {
  2863. DHD_ERROR(("%s:%d dongle image file download failed\n", __FUNCTION__,
  2864. __LINE__));
  2865. goto err;
  2866. } else {
  2867. embed = FALSE;
  2868. dlok = TRUE;
  2869. }
  2870. }
  2871. BCM_REFERENCE(embed);
  2872. if (!dlok) {
  2873. DHD_ERROR(("%s:%d dongle image download failed\n", __FUNCTION__, __LINE__));
  2874. goto err;
  2875. }
  2876. /* EXAMPLE: nvram_array */
  2877. /* If a valid nvram_arry is specified as above, it can be passed down to dongle */
  2878. /* dhd_bus_set_nvram_params(bus, (char *)&nvram_array); */
  2879. /* External nvram takes precedence if specified */
  2880. if (dhdpcie_download_nvram(bus)) {
  2881. DHD_ERROR(("%s:%d dongle nvram file download failed\n", __FUNCTION__, __LINE__));
  2882. goto err;
  2883. }
  2884. /* Take arm out of reset */
  2885. if (dhdpcie_bus_download_state(bus, FALSE)) {
  2886. DHD_ERROR(("%s: error getting out of ARM core reset\n", __FUNCTION__));
  2887. goto err;
  2888. }
  2889. bcmerror = 0;
  2890. err:
  2891. return bcmerror;
  2892. } /* _dhdpcie_download_firmware */
  2893. static int
  2894. dhdpcie_bus_readconsole(dhd_bus_t *bus)
  2895. {
  2896. dhd_console_t *c = &bus->console;
  2897. uint8 line[CONSOLE_LINE_MAX], ch;
  2898. uint32 n, idx, addr;
  2899. int rv;
  2900. uint readlen = 0;
  2901. uint i = 0;
  2902. /* Don't do anything until FWREADY updates console address */
  2903. if (bus->console_addr == 0)
  2904. return -1;
  2905. /* Read console log struct */
  2906. addr = bus->console_addr + OFFSETOF(hnd_cons_t, log);
  2907. if ((rv = dhdpcie_bus_membytes(bus, FALSE, addr, (uint8 *)&c->log, sizeof(c->log))) < 0)
  2908. return rv;
  2909. /* Allocate console buffer (one time only) */
  2910. if (c->buf == NULL) {
  2911. c->bufsize = ltoh32(c->log.buf_size);
  2912. if ((c->buf = MALLOC(bus->dhd->osh, c->bufsize)) == NULL)
  2913. return BCME_NOMEM;
  2914. DHD_INFO(("conlog: bufsize=0x%x\n", c->bufsize));
  2915. }
  2916. idx = ltoh32(c->log.idx);
  2917. /* Protect against corrupt value */
  2918. if (idx > c->bufsize)
  2919. return BCME_ERROR;
  2920. /* Skip reading the console buffer if the index pointer has not moved */
  2921. if (idx == c->last)
  2922. return BCME_OK;
  2923. DHD_INFO(("conlog: addr=0x%x, idx=0x%x, last=0x%x \n", c->log.buf,
  2924. idx, c->last));
  2925. /* Read the console buffer data to a local buffer */
  2926. /* optimize and read only the portion of the buffer needed, but
  2927. * important to handle wrap-around.
  2928. */
  2929. addr = ltoh32(c->log.buf);
  2930. /* wrap around case - write ptr < read ptr */
  2931. if (idx < c->last) {
  2932. /* from read ptr to end of buffer */
  2933. readlen = c->bufsize - c->last;
  2934. if ((rv = dhdpcie_bus_membytes(bus, FALSE,
  2935. addr + c->last, c->buf, readlen)) < 0) {
  2936. DHD_ERROR(("conlog: read error[1] ! \n"));
  2937. return rv;
  2938. }
  2939. /* from beginning of buffer to write ptr */
  2940. if ((rv = dhdpcie_bus_membytes(bus, FALSE,
  2941. addr, c->buf + readlen,
  2942. idx)) < 0) {
  2943. DHD_ERROR(("conlog: read error[2] ! \n"));
  2944. return rv;
  2945. }
  2946. readlen += idx;
  2947. } else {
  2948. /* non-wraparound case, write ptr > read ptr */
  2949. readlen = (uint)idx - c->last;
  2950. if ((rv = dhdpcie_bus_membytes(bus, FALSE,
  2951. addr + c->last, c->buf, readlen)) < 0) {
  2952. DHD_ERROR(("conlog: read error[3] ! \n"));
  2953. return rv;
  2954. }
  2955. }
  2956. /* update read ptr */
  2957. c->last = idx;
  2958. /* now output the read data from the local buffer to the host console */
  2959. while (i < readlen) {
  2960. for (n = 0; n < CONSOLE_LINE_MAX - 2 && i < readlen; n++) {
  2961. ch = c->buf[i];
  2962. ++i;
  2963. if (ch == '\n')
  2964. break;
  2965. line[n] = ch;
  2966. }
  2967. if (n > 0) {
  2968. if (line[n - 1] == '\r')
  2969. n--;
  2970. line[n] = 0;
  2971. DHD_FWLOG(("CONSOLE: %s\n", line));
  2972. }
  2973. }
  2974. return BCME_OK;
  2975. } /* dhdpcie_bus_readconsole */
  2976. void
  2977. dhd_bus_dump_console_buffer(dhd_bus_t *bus)
  2978. {
  2979. uint32 n, i;
  2980. uint32 addr;
  2981. char *console_buffer = NULL;
  2982. uint32 console_ptr, console_size, console_index;
  2983. uint8 line[CONSOLE_LINE_MAX], ch;
  2984. int rv;
  2985. DHD_ERROR(("%s: Dump Complete Console Buffer\n", __FUNCTION__));
  2986. if (bus->is_linkdown) {
  2987. DHD_ERROR(("%s: Skip dump Console Buffer due to PCIe link down\n", __FUNCTION__));
  2988. return;
  2989. }
  2990. addr = bus->pcie_sh->console_addr + OFFSETOF(hnd_cons_t, log);
  2991. if ((rv = dhdpcie_bus_membytes(bus, FALSE, addr,
  2992. (uint8 *)&console_ptr, sizeof(console_ptr))) < 0) {
  2993. goto exit;
  2994. }
  2995. addr = bus->pcie_sh->console_addr + OFFSETOF(hnd_cons_t, log.buf_size);
  2996. if ((rv = dhdpcie_bus_membytes(bus, FALSE, addr,
  2997. (uint8 *)&console_size, sizeof(console_size))) < 0) {
  2998. goto exit;
  2999. }
  3000. addr = bus->pcie_sh->console_addr + OFFSETOF(hnd_cons_t, log.idx);
  3001. if ((rv = dhdpcie_bus_membytes(bus, FALSE, addr,
  3002. (uint8 *)&console_index, sizeof(console_index))) < 0) {
  3003. goto exit;
  3004. }
  3005. console_ptr = ltoh32(console_ptr);
  3006. console_size = ltoh32(console_size);
  3007. console_index = ltoh32(console_index);
  3008. if (console_size > CONSOLE_BUFFER_MAX ||
  3009. !(console_buffer = MALLOC(bus->dhd->osh, console_size))) {
  3010. goto exit;
  3011. }
  3012. if ((rv = dhdpcie_bus_membytes(bus, FALSE, console_ptr,
  3013. (uint8 *)console_buffer, console_size)) < 0) {
  3014. goto exit;
  3015. }
  3016. for (i = 0, n = 0; i < console_size; i += n + 1) {
  3017. for (n = 0; n < CONSOLE_LINE_MAX - 2; n++) {
  3018. ch = console_buffer[(console_index + i + n) % console_size];
  3019. if (ch == '\n')
  3020. break;
  3021. line[n] = ch;
  3022. }
  3023. if (n > 0) {
  3024. if (line[n - 1] == '\r')
  3025. n--;
  3026. line[n] = 0;
  3027. /* Don't use DHD_ERROR macro since we print
  3028. * a lot of information quickly. The macro
  3029. * will truncate a lot of the printfs
  3030. */
  3031. DHD_FWLOG(("CONSOLE: %s\n", line));
  3032. }
  3033. }
  3034. exit:
  3035. if (console_buffer)
  3036. MFREE(bus->dhd->osh, console_buffer, console_size);
  3037. return;
  3038. }
  3039. /**
  3040. * Opens the file given by bus->fw_path, reads part of the file into a buffer and closes the file.
  3041. *
  3042. * @return BCME_OK on success
  3043. */
  3044. static int
  3045. dhdpcie_checkdied(dhd_bus_t *bus, char *data, uint size)
  3046. {
  3047. int bcmerror = 0;
  3048. uint msize = 512;
  3049. char *mbuffer = NULL;
  3050. uint maxstrlen = 256;
  3051. char *str = NULL;
  3052. pciedev_shared_t *local_pciedev_shared = bus->pcie_sh;
  3053. struct bcmstrbuf strbuf;
  3054. unsigned long flags;
  3055. bool dongle_trap_occured = FALSE;
  3056. DHD_TRACE(("%s: Enter\n", __FUNCTION__));
  3057. if (DHD_NOCHECKDIED_ON()) {
  3058. return 0;
  3059. }
  3060. if (data == NULL) {
  3061. /*
  3062. * Called after a rx ctrl timeout. "data" is NULL.
  3063. * allocate memory to trace the trap or assert.
  3064. */
  3065. size = msize;
  3066. mbuffer = data = MALLOC(bus->dhd->osh, msize);
  3067. if (mbuffer == NULL) {
  3068. DHD_ERROR(("%s: MALLOC(%d) failed \n", __FUNCTION__, msize));
  3069. bcmerror = BCME_NOMEM;
  3070. goto done2;
  3071. }
  3072. }
  3073. if ((str = MALLOC(bus->dhd->osh, maxstrlen)) == NULL) {
  3074. DHD_ERROR(("%s: MALLOC(%d) failed \n", __FUNCTION__, maxstrlen));
  3075. bcmerror = BCME_NOMEM;
  3076. goto done2;
  3077. }
  3078. DHD_GENERAL_LOCK(bus->dhd, flags);
  3079. DHD_BUS_BUSY_SET_IN_CHECKDIED(bus->dhd);
  3080. DHD_GENERAL_UNLOCK(bus->dhd, flags);
  3081. if (MULTIBP_ENAB(bus->sih)) {
  3082. dhd_bus_pcie_pwr_req(bus);
  3083. }
  3084. if ((bcmerror = dhdpcie_readshared(bus)) < 0) {
  3085. goto done1;
  3086. }
  3087. bcm_binit(&strbuf, data, size);
  3088. bcm_bprintf(&strbuf, "msgtrace address : 0x%08X\nconsole address : 0x%08X\n",
  3089. local_pciedev_shared->msgtrace_addr, local_pciedev_shared->console_addr);
  3090. if ((local_pciedev_shared->flags & PCIE_SHARED_ASSERT_BUILT) == 0) {
  3091. /* NOTE: Misspelled assert is intentional - DO NOT FIX.
  3092. * (Avoids conflict with real asserts for programmatic parsing of output.)
  3093. */
  3094. bcm_bprintf(&strbuf, "Assrt not built in dongle\n");
  3095. }
  3096. if ((bus->pcie_sh->flags & (PCIE_SHARED_ASSERT|PCIE_SHARED_TRAP)) == 0) {
  3097. /* NOTE: Misspelled assert is intentional - DO NOT FIX.
  3098. * (Avoids conflict with real asserts for programmatic parsing of output.)
  3099. */
  3100. bcm_bprintf(&strbuf, "No trap%s in dongle",
  3101. (bus->pcie_sh->flags & PCIE_SHARED_ASSERT_BUILT)
  3102. ?"/assrt" :"");
  3103. } else {
  3104. if (bus->pcie_sh->flags & PCIE_SHARED_ASSERT) {
  3105. /* Download assert */
  3106. bcm_bprintf(&strbuf, "Dongle assert");
  3107. if (bus->pcie_sh->assert_exp_addr != 0) {
  3108. str[0] = '\0';
  3109. if ((bcmerror = dhdpcie_bus_membytes(bus, FALSE,
  3110. bus->pcie_sh->assert_exp_addr,
  3111. (uint8 *)str, maxstrlen)) < 0) {
  3112. goto done1;
  3113. }
  3114. str[maxstrlen - 1] = '\0';
  3115. bcm_bprintf(&strbuf, " expr \"%s\"", str);
  3116. }
  3117. if (bus->pcie_sh->assert_file_addr != 0) {
  3118. str[0] = '\0';
  3119. if ((bcmerror = dhdpcie_bus_membytes(bus, FALSE,
  3120. bus->pcie_sh->assert_file_addr,
  3121. (uint8 *)str, maxstrlen)) < 0) {
  3122. goto done1;
  3123. }
  3124. str[maxstrlen - 1] = '\0';
  3125. bcm_bprintf(&strbuf, " file \"%s\"", str);
  3126. }
  3127. bcm_bprintf(&strbuf, " line %d ", bus->pcie_sh->assert_line);
  3128. }
  3129. if (bus->pcie_sh->flags & PCIE_SHARED_TRAP) {
  3130. trap_t *tr = &bus->dhd->last_trap_info;
  3131. dongle_trap_occured = TRUE;
  3132. if ((bcmerror = dhdpcie_bus_membytes(bus, FALSE,
  3133. bus->pcie_sh->trap_addr, (uint8*)tr, sizeof(trap_t))) < 0) {
  3134. bus->dhd->dongle_trap_occured = TRUE;
  3135. goto done1;
  3136. }
  3137. dhd_bus_dump_trap_info(bus, &strbuf);
  3138. }
  3139. }
  3140. if (bus->pcie_sh->flags & (PCIE_SHARED_ASSERT | PCIE_SHARED_TRAP)) {
  3141. DHD_FWLOG(("%s: %s\n", __FUNCTION__, strbuf.origbuf));
  3142. dhd_bus_dump_console_buffer(bus);
  3143. dhd_prot_debug_info_print(bus->dhd);
  3144. #if defined(DHD_FW_COREDUMP)
  3145. /* save core dump or write to a file */
  3146. if (bus->dhd->memdump_enabled) {
  3147. #ifdef DHD_SSSR_DUMP
  3148. bus->dhd->collect_sssr = TRUE;
  3149. #endif /* DHD_SSSR_DUMP */
  3150. bus->dhd->memdump_type = DUMP_TYPE_DONGLE_TRAP;
  3151. dhdpcie_mem_dump(bus);
  3152. }
  3153. #endif /* DHD_FW_COREDUMP */
  3154. /* set the trap occured flag only after all the memdump,
  3155. * logdump and sssr dump collection has been scheduled
  3156. */
  3157. if (dongle_trap_occured) {
  3158. bus->dhd->dongle_trap_occured = TRUE;
  3159. }
  3160. /* wake up IOCTL wait event */
  3161. dhd_wakeup_ioctl_event(bus->dhd, IOCTL_RETURN_ON_TRAP);
  3162. #ifdef WL_CFGVENDOR_SEND_HANG_EVENT
  3163. copy_hang_info_trap(bus->dhd);
  3164. #endif /* WL_CFGVENDOR_SEND_HANG_EVENT */
  3165. dhd_schedule_reset(bus->dhd);
  3166. }
  3167. done1:
  3168. if (MULTIBP_ENAB(bus->sih)) {
  3169. dhd_bus_pcie_pwr_req_clear(bus);
  3170. }
  3171. DHD_GENERAL_LOCK(bus->dhd, flags);
  3172. DHD_BUS_BUSY_CLEAR_IN_CHECKDIED(bus->dhd);
  3173. dhd_os_busbusy_wake(bus->dhd);
  3174. DHD_GENERAL_UNLOCK(bus->dhd, flags);
  3175. done2:
  3176. if (mbuffer)
  3177. MFREE(bus->dhd->osh, mbuffer, msize);
  3178. if (str)
  3179. MFREE(bus->dhd->osh, str, maxstrlen);
  3180. return bcmerror;
  3181. } /* dhdpcie_checkdied */
  3182. /* Custom copy of dhdpcie_mem_dump() that can be called at interrupt level */
  3183. void dhdpcie_mem_dump_bugcheck(dhd_bus_t *bus, uint8 *buf)
  3184. {
  3185. int ret = 0;
  3186. int size; /* Full mem size */
  3187. int start; /* Start address */
  3188. int read_size = 0; /* Read size of each iteration */
  3189. uint8 *databuf = buf;
  3190. if (bus == NULL) {
  3191. return;
  3192. }
  3193. start = bus->dongle_ram_base;
  3194. read_size = 4;
  3195. /* check for dead bus */
  3196. {
  3197. uint test_word = 0;
  3198. ret = dhdpcie_bus_membytes(bus, FALSE, start, (uint8*)&test_word, read_size);
  3199. /* if read error or bus timeout */
  3200. if (ret || (test_word == 0xFFFFFFFF)) {
  3201. return;
  3202. }
  3203. }
  3204. /* Get full mem size */
  3205. size = bus->ramsize;
  3206. /* Read mem content */
  3207. while (size)
  3208. {
  3209. read_size = MIN(MEMBLOCK, size);
  3210. if ((ret = dhdpcie_bus_membytes(bus, FALSE, start, databuf, read_size))) {
  3211. return;
  3212. }
  3213. /* Decrement size and increment start address */
  3214. size -= read_size;
  3215. start += read_size;
  3216. databuf += read_size;
  3217. }
  3218. bus->dhd->soc_ram = buf;
  3219. bus->dhd->soc_ram_length = bus->ramsize;
  3220. return;
  3221. }
  3222. #if defined(DHD_FW_COREDUMP)
  3223. static int
  3224. dhdpcie_get_mem_dump(dhd_bus_t *bus)
  3225. {
  3226. int ret = BCME_OK;
  3227. int size = 0;
  3228. int start = 0;
  3229. int read_size = 0; /* Read size of each iteration */
  3230. uint8 *p_buf = NULL, *databuf = NULL;
  3231. if (!bus) {
  3232. DHD_ERROR(("%s: bus is NULL\n", __FUNCTION__));
  3233. return BCME_ERROR;
  3234. }
  3235. if (!bus->dhd) {
  3236. DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__));
  3237. return BCME_ERROR;
  3238. }
  3239. size = bus->ramsize; /* Full mem size */
  3240. start = bus->dongle_ram_base; /* Start address */
  3241. /* Get full mem size */
  3242. p_buf = dhd_get_fwdump_buf(bus->dhd, size);
  3243. if (!p_buf) {
  3244. DHD_ERROR(("%s: Out of memory (%d bytes)\n",
  3245. __FUNCTION__, size));
  3246. return BCME_ERROR;
  3247. }
  3248. /* Read mem content */
  3249. DHD_TRACE_HW4(("Dump dongle memory\n"));
  3250. databuf = p_buf;
  3251. while (size > 0) {
  3252. read_size = MIN(MEMBLOCK, size);
  3253. ret = dhdpcie_bus_membytes(bus, FALSE, start, databuf, read_size);
  3254. if (ret) {
  3255. DHD_ERROR(("%s: Error membytes %d\n", __FUNCTION__, ret));
  3256. #ifdef DHD_DEBUG_UART
  3257. bus->dhd->memdump_success = FALSE;
  3258. #endif /* DHD_DEBUG_UART */
  3259. break;
  3260. }
  3261. DHD_TRACE(("."));
  3262. /* Decrement size and increment start address */
  3263. size -= read_size;
  3264. start += read_size;
  3265. databuf += read_size;
  3266. }
  3267. return ret;
  3268. }
  3269. static int
  3270. dhdpcie_mem_dump(dhd_bus_t *bus)
  3271. {
  3272. dhd_pub_t *dhdp;
  3273. int ret;
  3274. #ifdef EXYNOS_PCIE_DEBUG
  3275. exynos_pcie_register_dump(1);
  3276. #endif /* EXYNOS_PCIE_DEBUG */
  3277. #ifdef SUPPORT_LINKDOWN_RECOVERY
  3278. if (bus->is_linkdown) {
  3279. DHD_ERROR(("%s: PCIe link is down so skip\n", __FUNCTION__));
  3280. /* panic only for DUMP_MEMFILE_BUGON */
  3281. ASSERT(bus->dhd->memdump_enabled != DUMP_MEMFILE_BUGON);
  3282. return BCME_ERROR;
  3283. }
  3284. #endif /* SUPPORT_LINKDOWN_RECOVERY */
  3285. dhdp = bus->dhd;
  3286. if (!dhdp) {
  3287. DHD_ERROR(("%s: dhdp is NULL\n", __FUNCTION__));
  3288. return BCME_ERROR;
  3289. }
  3290. if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(dhdp)) {
  3291. DHD_ERROR(("%s: bus is down! can't collect mem dump. \n", __FUNCTION__));
  3292. return BCME_ERROR;
  3293. }
  3294. #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
  3295. if (pm_runtime_get_sync(dhd_bus_to_dev(bus)) < 0)
  3296. return BCME_ERROR;
  3297. #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
  3298. ret = dhdpcie_get_mem_dump(bus);
  3299. if (ret) {
  3300. DHD_ERROR(("%s: failed to get mem dump, err=%d\n",
  3301. __FUNCTION__, ret));
  3302. return ret;
  3303. }
  3304. dhd_schedule_memdump(dhdp, dhdp->soc_ram, dhdp->soc_ram_length);
  3305. /* buf, actually soc_ram free handled in dhd_{free,clear} */
  3306. #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
  3307. pm_runtime_mark_last_busy(dhd_bus_to_dev(bus));
  3308. pm_runtime_put_autosuspend(dhd_bus_to_dev(bus));
  3309. #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
  3310. return ret;
  3311. }
  3312. int
  3313. dhd_bus_get_mem_dump(dhd_pub_t *dhdp)
  3314. {
  3315. if (!dhdp) {
  3316. DHD_ERROR(("%s: dhdp is NULL\n", __FUNCTION__));
  3317. return BCME_ERROR;
  3318. }
  3319. return dhdpcie_get_mem_dump(dhdp->bus);
  3320. }
  3321. int
  3322. dhd_bus_mem_dump(dhd_pub_t *dhdp)
  3323. {
  3324. dhd_bus_t *bus = dhdp->bus;
  3325. int ret = BCME_ERROR;
  3326. if (dhdp->busstate == DHD_BUS_DOWN) {
  3327. DHD_ERROR(("%s bus is down\n", __FUNCTION__));
  3328. return BCME_ERROR;
  3329. }
  3330. /* Try to resume if already suspended or suspend in progress */
  3331. #ifdef DHD_PCIE_RUNTIMEPM
  3332. dhdpcie_runtime_bus_wake(dhdp, CAN_SLEEP(), __builtin_return_address(0));
  3333. #endif /* DHD_PCIE_RUNTIMEPM */
  3334. /* Skip if still in suspended or suspend in progress */
  3335. if (DHD_BUS_CHECK_SUSPEND_OR_ANY_SUSPEND_IN_PROGRESS(dhdp)) {
  3336. DHD_ERROR(("%s: bus is in suspend(%d) or suspending(0x%x) state, so skip\n",
  3337. __FUNCTION__, dhdp->busstate, dhdp->dhd_bus_busy_state));
  3338. return BCME_ERROR;
  3339. }
  3340. DHD_OS_WAKE_LOCK(dhdp);
  3341. ret = dhdpcie_mem_dump(bus);
  3342. DHD_OS_WAKE_UNLOCK(dhdp);
  3343. return ret;
  3344. }
  3345. #endif /* DHD_FW_COREDUMP */
  3346. int
  3347. dhd_socram_dump(dhd_bus_t *bus)
  3348. {
  3349. #if defined(DHD_FW_COREDUMP)
  3350. DHD_OS_WAKE_LOCK(bus->dhd);
  3351. dhd_bus_mem_dump(bus->dhd);
  3352. DHD_OS_WAKE_UNLOCK(bus->dhd);
  3353. return 0;
  3354. #else
  3355. return -1;
  3356. #endif // endif
  3357. }
  3358. /**
  3359. * Transfers bytes from host to dongle using pio mode.
  3360. * Parameter 'address' is a backplane address.
  3361. */
  3362. static int
  3363. dhdpcie_bus_membytes(dhd_bus_t *bus, bool write, ulong address, uint8 *data, uint size)
  3364. {
  3365. uint dsize;
  3366. int detect_endian_flag = 0x01;
  3367. bool little_endian;
  3368. if (write && bus->is_linkdown) {
  3369. DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__));
  3370. return BCME_ERROR;
  3371. }
  3372. if (MULTIBP_ENAB(bus->sih)) {
  3373. dhd_bus_pcie_pwr_req(bus);
  3374. }
  3375. /* Detect endianness. */
  3376. little_endian = *(char *)&detect_endian_flag;
  3377. /* In remap mode, adjust address beyond socram and redirect
  3378. * to devram at SOCDEVRAM_BP_ADDR since remap address > orig_ramsize
  3379. * is not backplane accessible
  3380. */
  3381. /* Determine initial transfer parameters */
  3382. #ifdef DHD_SUPPORT_64BIT
  3383. dsize = sizeof(uint64);
  3384. #else /* !DHD_SUPPORT_64BIT */
  3385. dsize = sizeof(uint32);
  3386. #endif /* DHD_SUPPORT_64BIT */
  3387. /* Do the transfer(s) */
  3388. if (write) {
  3389. while (size) {
  3390. #ifdef DHD_SUPPORT_64BIT
  3391. if (size >= sizeof(uint64) && little_endian && !(address % 8)) {
  3392. dhdpcie_bus_wtcm64(bus, address, *((uint64 *)data));
  3393. }
  3394. #else /* !DHD_SUPPORT_64BIT */
  3395. if (size >= sizeof(uint32) && little_endian && !(address % 4)) {
  3396. dhdpcie_bus_wtcm32(bus, address, *((uint32*)data));
  3397. }
  3398. #endif /* DHD_SUPPORT_64BIT */
  3399. else {
  3400. dsize = sizeof(uint8);
  3401. dhdpcie_bus_wtcm8(bus, address, *data);
  3402. }
  3403. /* Adjust for next transfer (if any) */
  3404. if ((size -= dsize)) {
  3405. data += dsize;
  3406. address += dsize;
  3407. }
  3408. }
  3409. } else {
  3410. while (size) {
  3411. #ifdef DHD_SUPPORT_64BIT
  3412. if (size >= sizeof(uint64) && little_endian && !(address % 8))
  3413. {
  3414. *(uint64 *)data = dhdpcie_bus_rtcm64(bus, address);
  3415. }
  3416. #else /* !DHD_SUPPORT_64BIT */
  3417. if (size >= sizeof(uint32) && little_endian && !(address % 4))
  3418. {
  3419. *(uint32 *)data = dhdpcie_bus_rtcm32(bus, address);
  3420. }
  3421. #endif /* DHD_SUPPORT_64BIT */
  3422. else {
  3423. dsize = sizeof(uint8);
  3424. *data = dhdpcie_bus_rtcm8(bus, address);
  3425. }
  3426. /* Adjust for next transfer (if any) */
  3427. if ((size -= dsize) > 0) {
  3428. data += dsize;
  3429. address += dsize;
  3430. }
  3431. }
  3432. }
  3433. if (MULTIBP_ENAB(bus->sih)) {
  3434. dhd_bus_pcie_pwr_req_clear(bus);
  3435. }
  3436. return BCME_OK;
  3437. } /* dhdpcie_bus_membytes */
  3438. /**
  3439. * Transfers one transmit (ethernet) packet that was queued in the (flow controlled) flow ring queue
  3440. * to the (non flow controlled) flow ring.
  3441. */
  3442. int BCMFASTPATH
  3443. dhd_bus_schedule_queue(struct dhd_bus *bus, uint16 flow_id, bool txs)
  3444. {
  3445. flow_ring_node_t *flow_ring_node;
  3446. int ret = BCME_OK;
  3447. #ifdef DHD_LOSSLESS_ROAMING
  3448. dhd_pub_t *dhdp = bus->dhd;
  3449. #endif // endif
  3450. DHD_INFO(("%s: flow_id is %d\n", __FUNCTION__, flow_id));
  3451. /* ASSERT on flow_id */
  3452. if (flow_id >= bus->max_submission_rings) {
  3453. DHD_ERROR(("%s: flow_id is invalid %d, max %d\n", __FUNCTION__,
  3454. flow_id, bus->max_submission_rings));
  3455. return 0;
  3456. }
  3457. flow_ring_node = DHD_FLOW_RING(bus->dhd, flow_id);
  3458. if (flow_ring_node->prot_info == NULL) {
  3459. DHD_ERROR((" %s : invalid flow_ring_node \n", __FUNCTION__));
  3460. return BCME_NOTREADY;
  3461. }
  3462. #ifdef DHD_LOSSLESS_ROAMING
  3463. if ((dhdp->dequeue_prec_map & (1 << flow_ring_node->flow_info.tid)) == 0) {
  3464. DHD_INFO(("%s: tid %d is not in precedence map. block scheduling\n",
  3465. __FUNCTION__, flow_ring_node->flow_info.tid));
  3466. return BCME_OK;
  3467. }
  3468. #endif /* DHD_LOSSLESS_ROAMING */
  3469. {
  3470. unsigned long flags;
  3471. void *txp = NULL;
  3472. flow_queue_t *queue;
  3473. #ifdef DHD_LOSSLESS_ROAMING
  3474. struct ether_header *eh;
  3475. uint8 *pktdata;
  3476. #endif /* DHD_LOSSLESS_ROAMING */
  3477. queue = &flow_ring_node->queue; /* queue associated with flow ring */
  3478. DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
  3479. if (flow_ring_node->status != FLOW_RING_STATUS_OPEN) {
  3480. DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
  3481. return BCME_NOTREADY;
  3482. }
  3483. while ((txp = dhd_flow_queue_dequeue(bus->dhd, queue)) != NULL) {
  3484. PKTORPHAN(txp);
  3485. /*
  3486. * Modifying the packet length caused P2P cert failures.
  3487. * Specifically on test cases where a packet of size 52 bytes
  3488. * was injected, the sniffer capture showed 62 bytes because of
  3489. * which the cert tests failed. So making the below change
  3490. * only Router specific.
  3491. */
  3492. #ifdef DHDTCPACK_SUPPRESS
  3493. if (bus->dhd->tcpack_sup_mode != TCPACK_SUP_HOLD) {
  3494. ret = dhd_tcpack_check_xmit(bus->dhd, txp);
  3495. if (ret != BCME_OK) {
  3496. DHD_ERROR(("%s: dhd_tcpack_check_xmit() error.\n",
  3497. __FUNCTION__));
  3498. }
  3499. }
  3500. #endif /* DHDTCPACK_SUPPRESS */
  3501. #ifdef DHD_LOSSLESS_ROAMING
  3502. pktdata = (uint8 *)PKTDATA(OSH_NULL, txp);
  3503. eh = (struct ether_header *) pktdata;
  3504. if (eh->ether_type == hton16(ETHER_TYPE_802_1X)) {
  3505. uint8 prio = (uint8)PKTPRIO(txp);
  3506. /* Restore to original priority for 802.1X packet */
  3507. if (prio == PRIO_8021D_NC) {
  3508. PKTSETPRIO(txp, dhdp->prio_8021x);
  3509. }
  3510. }
  3511. #endif /* DHD_LOSSLESS_ROAMING */
  3512. /* Attempt to transfer packet over flow ring */
  3513. ret = dhd_prot_txdata(bus->dhd, txp, flow_ring_node->flow_info.ifindex);
  3514. if (ret != BCME_OK) { /* may not have resources in flow ring */
  3515. DHD_INFO(("%s: Reinserrt %d\n", __FUNCTION__, ret));
  3516. dhd_prot_txdata_write_flush(bus->dhd, flow_id);
  3517. /* reinsert at head */
  3518. dhd_flow_queue_reinsert(bus->dhd, queue, txp);
  3519. DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
  3520. /* If we are able to requeue back, return success */
  3521. return BCME_OK;
  3522. }
  3523. }
  3524. #ifdef DHD_HP2P
  3525. if (!flow_ring_node->hp2p_ring) {
  3526. dhd_prot_txdata_write_flush(bus->dhd, flow_id);
  3527. }
  3528. #else
  3529. dhd_prot_txdata_write_flush(bus->dhd, flow_id);
  3530. #endif // endif
  3531. DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
  3532. }
  3533. return ret;
  3534. } /* dhd_bus_schedule_queue */
  3535. /** Sends an (ethernet) data frame (in 'txp') to the dongle. Callee disposes of txp. */
  3536. int BCMFASTPATH
  3537. dhd_bus_txdata(struct dhd_bus *bus, void *txp, uint8 ifidx)
  3538. {
  3539. uint16 flowid;
  3540. #ifdef IDLE_TX_FLOW_MGMT
  3541. uint8 node_status;
  3542. #endif /* IDLE_TX_FLOW_MGMT */
  3543. flow_queue_t *queue;
  3544. flow_ring_node_t *flow_ring_node;
  3545. unsigned long flags;
  3546. int ret = BCME_OK;
  3547. void *txp_pend = NULL;
  3548. if (!bus->dhd->flowid_allocator) {
  3549. DHD_ERROR(("%s: Flow ring not intited yet \n", __FUNCTION__));
  3550. goto toss;
  3551. }
  3552. flowid = DHD_PKT_GET_FLOWID(txp);
  3553. flow_ring_node = DHD_FLOW_RING(bus->dhd, flowid);
  3554. DHD_TRACE(("%s: pkt flowid %d, status %d active %d\n",
  3555. __FUNCTION__, flowid, flow_ring_node->status, flow_ring_node->active));
  3556. DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
  3557. if ((flowid >= bus->dhd->num_flow_rings) ||
  3558. #ifdef IDLE_TX_FLOW_MGMT
  3559. (!flow_ring_node->active))
  3560. #else
  3561. (!flow_ring_node->active) ||
  3562. (flow_ring_node->status == FLOW_RING_STATUS_DELETE_PENDING) ||
  3563. (flow_ring_node->status == FLOW_RING_STATUS_STA_FREEING))
  3564. #endif /* IDLE_TX_FLOW_MGMT */
  3565. {
  3566. DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
  3567. DHD_INFO(("%s: Dropping pkt flowid %d, status %d active %d\n",
  3568. __FUNCTION__, flowid, flow_ring_node->status,
  3569. flow_ring_node->active));
  3570. ret = BCME_ERROR;
  3571. goto toss;
  3572. }
  3573. #ifdef IDLE_TX_FLOW_MGMT
  3574. node_status = flow_ring_node->status;
  3575. /* handle diffrent status states here!! */
  3576. switch (node_status)
  3577. {
  3578. case FLOW_RING_STATUS_OPEN:
  3579. if (bus->enable_idle_flowring_mgmt) {
  3580. /* Move the node to the head of active list */
  3581. dhd_flow_ring_move_to_active_list_head(bus, flow_ring_node);
  3582. }
  3583. break;
  3584. case FLOW_RING_STATUS_SUSPENDED:
  3585. DHD_INFO(("Need to Initiate TX Flow resume\n"));
  3586. /* Issue resume_ring request */
  3587. dhd_bus_flow_ring_resume_request(bus,
  3588. flow_ring_node);
  3589. break;
  3590. case FLOW_RING_STATUS_CREATE_PENDING:
  3591. case FLOW_RING_STATUS_RESUME_PENDING:
  3592. /* Dont do anything here!! */
  3593. DHD_INFO(("Waiting for Flow create/resume! status is %u\n",
  3594. node_status));
  3595. break;
  3596. case FLOW_RING_STATUS_DELETE_PENDING:
  3597. default:
  3598. DHD_ERROR(("Dropping packet!! flowid %u status is %u\n",
  3599. flowid, node_status));
  3600. /* error here!! */
  3601. ret = BCME_ERROR;
  3602. DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
  3603. goto toss;
  3604. }
  3605. /* Now queue the packet */
  3606. #endif /* IDLE_TX_FLOW_MGMT */
  3607. queue = &flow_ring_node->queue; /* queue associated with flow ring */
  3608. if ((ret = dhd_flow_queue_enqueue(bus->dhd, queue, txp)) != BCME_OK)
  3609. txp_pend = txp;
  3610. DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
  3611. if (flow_ring_node->status) {
  3612. DHD_INFO(("%s: Enq pkt flowid %d, status %d active %d\n",
  3613. __FUNCTION__, flowid, flow_ring_node->status,
  3614. flow_ring_node->active));
  3615. if (txp_pend) {
  3616. txp = txp_pend;
  3617. goto toss;
  3618. }
  3619. return BCME_OK;
  3620. }
  3621. ret = dhd_bus_schedule_queue(bus, flowid, FALSE); /* from queue to flowring */
  3622. /* If we have anything pending, try to push into q */
  3623. if (txp_pend) {
  3624. DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
  3625. if ((ret = dhd_flow_queue_enqueue(bus->dhd, queue, txp_pend)) != BCME_OK) {
  3626. DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
  3627. txp = txp_pend;
  3628. goto toss;
  3629. }
  3630. DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
  3631. }
  3632. return ret;
  3633. toss:
  3634. DHD_INFO(("%s: Toss %d\n", __FUNCTION__, ret));
  3635. PKTCFREE(bus->dhd->osh, txp, TRUE);
  3636. return ret;
  3637. } /* dhd_bus_txdata */
  3638. void
  3639. dhd_bus_stop_queue(struct dhd_bus *bus)
  3640. {
  3641. dhd_txflowcontrol(bus->dhd, ALL_INTERFACES, ON);
  3642. }
  3643. void
  3644. dhd_bus_start_queue(struct dhd_bus *bus)
  3645. {
  3646. /*
  3647. * Tx queue has been stopped due to resource shortage (or)
  3648. * bus is not in a state to turn on.
  3649. *
  3650. * Note that we try to re-start network interface only
  3651. * when we have enough resources, one has to first change the
  3652. * flag indicating we have all the resources.
  3653. */
  3654. if (dhd_prot_check_tx_resource(bus->dhd)) {
  3655. DHD_ERROR(("%s: Interface NOT started, previously stopped "
  3656. "due to resource shortage\n", __FUNCTION__));
  3657. return;
  3658. }
  3659. dhd_txflowcontrol(bus->dhd, ALL_INTERFACES, OFF);
  3660. }
  3661. /* Device console input function */
  3662. int dhd_bus_console_in(dhd_pub_t *dhd, uchar *msg, uint msglen)
  3663. {
  3664. dhd_bus_t *bus = dhd->bus;
  3665. uint32 addr, val;
  3666. int rv;
  3667. /* Address could be zero if CONSOLE := 0 in dongle Makefile */
  3668. if (bus->console_addr == 0)
  3669. return BCME_UNSUPPORTED;
  3670. /* Don't allow input if dongle is in reset */
  3671. if (bus->dhd->dongle_reset) {
  3672. return BCME_NOTREADY;
  3673. }
  3674. /* Zero cbuf_index */
  3675. addr = bus->console_addr + OFFSETOF(hnd_cons_t, cbuf_idx);
  3676. val = htol32(0);
  3677. if ((rv = dhdpcie_bus_membytes(bus, TRUE, addr, (uint8 *)&val, sizeof(val))) < 0)
  3678. goto done;
  3679. /* Write message into cbuf */
  3680. addr = bus->console_addr + OFFSETOF(hnd_cons_t, cbuf);
  3681. if ((rv = dhdpcie_bus_membytes(bus, TRUE, addr, (uint8 *)msg, msglen)) < 0)
  3682. goto done;
  3683. /* Write length into vcons_in */
  3684. addr = bus->console_addr + OFFSETOF(hnd_cons_t, vcons_in);
  3685. val = htol32(msglen);
  3686. if ((rv = dhdpcie_bus_membytes(bus, TRUE, addr, (uint8 *)&val, sizeof(val))) < 0)
  3687. goto done;
  3688. /* generate an interrupt to dongle to indicate that it needs to process cons command */
  3689. dhdpcie_send_mb_data(bus, H2D_HOST_CONS_INT);
  3690. done:
  3691. return rv;
  3692. } /* dhd_bus_console_in */
  3693. /**
  3694. * Called on frame reception, the frame was received from the dongle on interface 'ifidx' and is
  3695. * contained in 'pkt'. Processes rx frame, forwards up the layer to netif.
  3696. */
  3697. void BCMFASTPATH
  3698. dhd_bus_rx_frame(struct dhd_bus *bus, void* pkt, int ifidx, uint pkt_count)
  3699. {
  3700. dhd_rx_frame(bus->dhd, ifidx, pkt, pkt_count, 0);
  3701. }
  3702. void
  3703. dhdpcie_setbar1win(dhd_bus_t *bus, uint32 addr)
  3704. {
  3705. dhdpcie_os_setbar1win(bus, addr);
  3706. }
  3707. /** 'offset' is a backplane address */
  3708. void
  3709. dhdpcie_bus_wtcm8(dhd_bus_t *bus, ulong offset, uint8 data)
  3710. {
  3711. if (bus->is_linkdown) {
  3712. DHD_LOG_MEM(("%s: PCIe link was down\n", __FUNCTION__));
  3713. return;
  3714. } else {
  3715. dhdpcie_os_wtcm8(bus, offset, data);
  3716. }
  3717. }
  3718. uint8
  3719. dhdpcie_bus_rtcm8(dhd_bus_t *bus, ulong offset)
  3720. {
  3721. volatile uint8 data;
  3722. if (bus->is_linkdown) {
  3723. DHD_LOG_MEM(("%s: PCIe link was down\n", __FUNCTION__));
  3724. data = (uint8)-1;
  3725. } else {
  3726. data = dhdpcie_os_rtcm8(bus, offset);
  3727. }
  3728. return data;
  3729. }
  3730. void
  3731. dhdpcie_bus_wtcm32(dhd_bus_t *bus, ulong offset, uint32 data)
  3732. {
  3733. if (bus->is_linkdown) {
  3734. DHD_LOG_MEM(("%s: PCIe link was down\n", __FUNCTION__));
  3735. return;
  3736. } else {
  3737. dhdpcie_os_wtcm32(bus, offset, data);
  3738. }
  3739. }
  3740. void
  3741. dhdpcie_bus_wtcm16(dhd_bus_t *bus, ulong offset, uint16 data)
  3742. {
  3743. if (bus->is_linkdown) {
  3744. DHD_LOG_MEM(("%s: PCIe link was down\n", __FUNCTION__));
  3745. return;
  3746. } else {
  3747. dhdpcie_os_wtcm16(bus, offset, data);
  3748. }
  3749. }
  3750. #ifdef DHD_SUPPORT_64BIT
  3751. void
  3752. dhdpcie_bus_wtcm64(dhd_bus_t *bus, ulong offset, uint64 data)
  3753. {
  3754. if (bus->is_linkdown) {
  3755. DHD_LOG_MEM(("%s: PCIe link was down\n", __FUNCTION__));
  3756. return;
  3757. } else {
  3758. dhdpcie_os_wtcm64(bus, offset, data);
  3759. }
  3760. }
  3761. #endif /* DHD_SUPPORT_64BIT */
  3762. uint16
  3763. dhdpcie_bus_rtcm16(dhd_bus_t *bus, ulong offset)
  3764. {
  3765. volatile uint16 data;
  3766. if (bus->is_linkdown) {
  3767. DHD_LOG_MEM(("%s: PCIe link was down\n", __FUNCTION__));
  3768. data = (uint16)-1;
  3769. } else {
  3770. data = dhdpcie_os_rtcm16(bus, offset);
  3771. }
  3772. return data;
  3773. }
  3774. uint32
  3775. dhdpcie_bus_rtcm32(dhd_bus_t *bus, ulong offset)
  3776. {
  3777. volatile uint32 data;
  3778. if (bus->is_linkdown) {
  3779. DHD_LOG_MEM(("%s: PCIe link was down\n", __FUNCTION__));
  3780. data = (uint32)-1;
  3781. } else {
  3782. data = dhdpcie_os_rtcm32(bus, offset);
  3783. }
  3784. return data;
  3785. }
  3786. #ifdef DHD_SUPPORT_64BIT
  3787. uint64
  3788. dhdpcie_bus_rtcm64(dhd_bus_t *bus, ulong offset)
  3789. {
  3790. volatile uint64 data;
  3791. if (bus->is_linkdown) {
  3792. DHD_LOG_MEM(("%s: PCIe link was down\n", __FUNCTION__));
  3793. data = (uint64)-1;
  3794. } else {
  3795. data = dhdpcie_os_rtcm64(bus, offset);
  3796. }
  3797. return data;
  3798. }
  3799. #endif /* DHD_SUPPORT_64BIT */
  3800. /** A snippet of dongle memory is shared between host and dongle */
  3801. void
  3802. dhd_bus_cmn_writeshared(dhd_bus_t *bus, void *data, uint32 len, uint8 type, uint16 ringid)
  3803. {
  3804. uint64 long_data;
  3805. ulong addr; /* dongle address */
  3806. DHD_INFO(("%s: writing to dongle type %d len %d\n", __FUNCTION__, type, len));
  3807. if (bus->is_linkdown) {
  3808. DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__));
  3809. return;
  3810. }
  3811. if (MULTIBP_ENAB(bus->sih)) {
  3812. dhd_bus_pcie_pwr_req(bus);
  3813. }
  3814. switch (type) {
  3815. case D2H_DMA_SCRATCH_BUF:
  3816. addr = DHD_PCIE_SHARED_MEMBER_ADDR(bus, host_dma_scratch_buffer);
  3817. long_data = HTOL64(*(uint64 *)data);
  3818. dhdpcie_bus_membytes(bus, TRUE, addr, (uint8*) &long_data, len);
  3819. if (dhd_msg_level & DHD_INFO_VAL) {
  3820. prhex(__FUNCTION__, data, len);
  3821. }
  3822. break;
  3823. case D2H_DMA_SCRATCH_BUF_LEN :
  3824. addr = DHD_PCIE_SHARED_MEMBER_ADDR(bus, host_dma_scratch_buffer_len);
  3825. dhdpcie_bus_wtcm32(bus, addr, (uint32) HTOL32(*(uint32 *)data));
  3826. if (dhd_msg_level & DHD_INFO_VAL) {
  3827. prhex(__FUNCTION__, data, len);
  3828. }
  3829. break;
  3830. case H2D_DMA_INDX_WR_BUF:
  3831. long_data = HTOL64(*(uint64 *)data);
  3832. addr = DHD_RING_INFO_MEMBER_ADDR(bus, h2d_w_idx_hostaddr);
  3833. dhdpcie_bus_membytes(bus, TRUE, addr, (uint8*) &long_data, len);
  3834. if (dhd_msg_level & DHD_INFO_VAL) {
  3835. prhex(__FUNCTION__, data, len);
  3836. }
  3837. break;
  3838. case H2D_DMA_INDX_RD_BUF:
  3839. long_data = HTOL64(*(uint64 *)data);
  3840. addr = DHD_RING_INFO_MEMBER_ADDR(bus, h2d_r_idx_hostaddr);
  3841. dhdpcie_bus_membytes(bus, TRUE, addr, (uint8*) &long_data, len);
  3842. if (dhd_msg_level & DHD_INFO_VAL) {
  3843. prhex(__FUNCTION__, data, len);
  3844. }
  3845. break;
  3846. case D2H_DMA_INDX_WR_BUF:
  3847. long_data = HTOL64(*(uint64 *)data);
  3848. addr = DHD_RING_INFO_MEMBER_ADDR(bus, d2h_w_idx_hostaddr);
  3849. dhdpcie_bus_membytes(bus, TRUE, addr, (uint8*) &long_data, len);
  3850. if (dhd_msg_level & DHD_INFO_VAL) {
  3851. prhex(__FUNCTION__, data, len);
  3852. }
  3853. break;
  3854. case D2H_DMA_INDX_RD_BUF:
  3855. long_data = HTOL64(*(uint64 *)data);
  3856. addr = DHD_RING_INFO_MEMBER_ADDR(bus, d2h_r_idx_hostaddr);
  3857. dhdpcie_bus_membytes(bus, TRUE, addr, (uint8*) &long_data, len);
  3858. if (dhd_msg_level & DHD_INFO_VAL) {
  3859. prhex(__FUNCTION__, data, len);
  3860. }
  3861. break;
  3862. case H2D_IFRM_INDX_WR_BUF:
  3863. long_data = HTOL64(*(uint64 *)data);
  3864. addr = DHD_RING_INFO_MEMBER_ADDR(bus, ifrm_w_idx_hostaddr);
  3865. dhdpcie_bus_membytes(bus, TRUE, addr, (uint8*) &long_data, len);
  3866. if (dhd_msg_level & DHD_INFO_VAL) {
  3867. prhex(__FUNCTION__, data, len);
  3868. }
  3869. break;
  3870. case RING_ITEM_LEN :
  3871. addr = DHD_RING_MEM_MEMBER_ADDR(bus, ringid, len_items);
  3872. dhdpcie_bus_wtcm16(bus, addr, (uint16) HTOL16(*(uint16 *)data));
  3873. break;
  3874. case RING_MAX_ITEMS :
  3875. addr = DHD_RING_MEM_MEMBER_ADDR(bus, ringid, max_item);
  3876. dhdpcie_bus_wtcm16(bus, addr, (uint16) HTOL16(*(uint16 *)data));
  3877. break;
  3878. case RING_BUF_ADDR :
  3879. long_data = HTOL64(*(uint64 *)data);
  3880. addr = DHD_RING_MEM_MEMBER_ADDR(bus, ringid, base_addr);
  3881. dhdpcie_bus_membytes(bus, TRUE, addr, (uint8 *) &long_data, len);
  3882. if (dhd_msg_level & DHD_INFO_VAL) {
  3883. prhex(__FUNCTION__, data, len);
  3884. }
  3885. break;
  3886. case RING_WR_UPD :
  3887. addr = bus->ring_sh[ringid].ring_state_w;
  3888. dhdpcie_bus_wtcm16(bus, addr, (uint16) HTOL16(*(uint16 *)data));
  3889. break;
  3890. case RING_RD_UPD :
  3891. addr = bus->ring_sh[ringid].ring_state_r;
  3892. dhdpcie_bus_wtcm16(bus, addr, (uint16) HTOL16(*(uint16 *)data));
  3893. break;
  3894. case D2H_MB_DATA:
  3895. addr = bus->d2h_mb_data_ptr_addr;
  3896. dhdpcie_bus_wtcm32(bus, addr, (uint32) HTOL32(*(uint32 *)data));
  3897. break;
  3898. case H2D_MB_DATA:
  3899. addr = bus->h2d_mb_data_ptr_addr;
  3900. dhdpcie_bus_wtcm32(bus, addr, (uint32) HTOL32(*(uint32 *)data));
  3901. break;
  3902. case HOST_API_VERSION:
  3903. addr = DHD_PCIE_SHARED_MEMBER_ADDR(bus, host_cap);
  3904. dhdpcie_bus_wtcm32(bus, addr, (uint32) HTOL32(*(uint32 *)data));
  3905. break;
  3906. case DNGL_TO_HOST_TRAP_ADDR:
  3907. long_data = HTOL64(*(uint64 *)data);
  3908. addr = DHD_PCIE_SHARED_MEMBER_ADDR(bus, host_trap_addr);
  3909. dhdpcie_bus_membytes(bus, TRUE, addr, (uint8 *) &long_data, len);
  3910. DHD_INFO(("Wrote trap addr:0x%x\n", (uint32) HTOL32(*(uint32 *)data)));
  3911. break;
  3912. case HOST_SCB_ADDR:
  3913. addr = DHD_PCIE_SHARED_MEMBER_ADDR(bus, host_scb_addr);
  3914. #ifdef DHD_SUPPORT_64BIT
  3915. dhdpcie_bus_wtcm64(bus, addr, (uint64) HTOL64(*(uint64 *)data));
  3916. #else /* !DHD_SUPPORT_64BIT */
  3917. dhdpcie_bus_wtcm32(bus, addr, *((uint32*)data));
  3918. #endif /* DHD_SUPPORT_64BIT */
  3919. DHD_INFO(("Wrote host_scb_addr:0x%x\n",
  3920. (uint32) HTOL32(*(uint32 *)data)));
  3921. break;
  3922. default:
  3923. break;
  3924. }
  3925. if (MULTIBP_ENAB(bus->sih)) {
  3926. dhd_bus_pcie_pwr_req_clear(bus);
  3927. }
  3928. } /* dhd_bus_cmn_writeshared */
  3929. /** A snippet of dongle memory is shared between host and dongle */
  3930. void
  3931. dhd_bus_cmn_readshared(dhd_bus_t *bus, void* data, uint8 type, uint16 ringid)
  3932. {
  3933. ulong addr; /* dongle address */
  3934. if (MULTIBP_ENAB(bus->sih)) {
  3935. dhd_bus_pcie_pwr_req(bus);
  3936. }
  3937. switch (type) {
  3938. case RING_WR_UPD :
  3939. addr = bus->ring_sh[ringid].ring_state_w;
  3940. *(uint16*)data = LTOH16(dhdpcie_bus_rtcm16(bus, addr));
  3941. break;
  3942. case RING_RD_UPD :
  3943. addr = bus->ring_sh[ringid].ring_state_r;
  3944. *(uint16*)data = LTOH16(dhdpcie_bus_rtcm16(bus, addr));
  3945. break;
  3946. case TOTAL_LFRAG_PACKET_CNT :
  3947. addr = DHD_PCIE_SHARED_MEMBER_ADDR(bus, total_lfrag_pkt_cnt);
  3948. *(uint16*)data = LTOH16(dhdpcie_bus_rtcm16(bus, addr));
  3949. break;
  3950. case H2D_MB_DATA:
  3951. addr = bus->h2d_mb_data_ptr_addr;
  3952. *(uint32*)data = LTOH32(dhdpcie_bus_rtcm32(bus, addr));
  3953. break;
  3954. case D2H_MB_DATA:
  3955. addr = bus->d2h_mb_data_ptr_addr;
  3956. *(uint32*)data = LTOH32(dhdpcie_bus_rtcm32(bus, addr));
  3957. break;
  3958. case MAX_HOST_RXBUFS :
  3959. addr = DHD_PCIE_SHARED_MEMBER_ADDR(bus, max_host_rxbufs);
  3960. *(uint16*)data = LTOH16(dhdpcie_bus_rtcm16(bus, addr));
  3961. break;
  3962. case HOST_SCB_ADDR:
  3963. addr = DHD_PCIE_SHARED_MEMBER_ADDR(bus, host_scb_size);
  3964. *(uint32*)data = LTOH32(dhdpcie_bus_rtcm32(bus, addr));
  3965. break;
  3966. default :
  3967. break;
  3968. }
  3969. if (MULTIBP_ENAB(bus->sih)) {
  3970. dhd_bus_pcie_pwr_req_clear(bus);
  3971. }
  3972. }
  3973. uint32 dhd_bus_get_sharedflags(dhd_bus_t *bus)
  3974. {
  3975. return ((pciedev_shared_t*)bus->pcie_sh)->flags;
  3976. }
  3977. void
  3978. dhd_bus_clearcounts(dhd_pub_t *dhdp)
  3979. {
  3980. }
  3981. /**
  3982. * @param params input buffer, NULL for 'set' operation.
  3983. * @param plen length of 'params' buffer, 0 for 'set' operation.
  3984. * @param arg output buffer
  3985. */
  3986. int
  3987. dhd_bus_iovar_op(dhd_pub_t *dhdp, const char *name,
  3988. void *params, int plen, void *arg, int len, bool set)
  3989. {
  3990. dhd_bus_t *bus = dhdp->bus;
  3991. const bcm_iovar_t *vi = NULL;
  3992. int bcmerror = BCME_UNSUPPORTED;
  3993. int val_size;
  3994. uint32 actionid;
  3995. DHD_TRACE(("%s: Enter\n", __FUNCTION__));
  3996. ASSERT(name);
  3997. ASSERT(len >= 0);
  3998. if (!name || len < 0)
  3999. return BCME_BADARG;
  4000. /* Get MUST have return space */
  4001. ASSERT(set || (arg && len));
  4002. if (!(set || (arg && len)))
  4003. return BCME_BADARG;
  4004. /* Set does NOT take qualifiers */
  4005. ASSERT(!set || (!params && !plen));
  4006. if (!(!set || (!params && !plen)))
  4007. return BCME_BADARG;
  4008. DHD_INFO(("%s: %s %s, len %d plen %d\n", __FUNCTION__,
  4009. name, (set ? "set" : "get"), len, plen));
  4010. /* Look up var locally; if not found pass to host driver */
  4011. if ((vi = bcm_iovar_lookup(dhdpcie_iovars, name)) == NULL) {
  4012. goto exit;
  4013. }
  4014. if (MULTIBP_ENAB(bus->sih)) {
  4015. if (vi->flags & DHD_IOVF_PWRREQ_BYPASS) {
  4016. DHD_ERROR(("%s: Bypass pwr request\n", __FUNCTION__));
  4017. } else {
  4018. dhd_bus_pcie_pwr_req(bus);
  4019. }
  4020. }
  4021. /* set up 'params' pointer in case this is a set command so that
  4022. * the convenience int and bool code can be common to set and get
  4023. */
  4024. if (params == NULL) {
  4025. params = arg;
  4026. plen = len;
  4027. }
  4028. if (vi->type == IOVT_VOID)
  4029. val_size = 0;
  4030. else if (vi->type == IOVT_BUFFER)
  4031. val_size = len;
  4032. else
  4033. /* all other types are integer sized */
  4034. val_size = sizeof(int);
  4035. actionid = set ? IOV_SVAL(vi->varid) : IOV_GVAL(vi->varid);
  4036. bcmerror = dhdpcie_bus_doiovar(bus, vi, actionid, name, params, plen, arg, len, val_size);
  4037. exit:
  4038. /* In DEVRESET_QUIESCE/DEVRESET_ON,
  4039. * this includes dongle re-attach which initialize pwr_req_ref count to 0 and
  4040. * causes pwr_req_ref count miss-match in pwr req clear function and hang.
  4041. * In this case, bypass pwr req clear.
  4042. */
  4043. if (bcmerror == BCME_DNGL_DEVRESET) {
  4044. bcmerror = BCME_OK;
  4045. } else {
  4046. if (MULTIBP_ENAB(bus->sih)) {
  4047. if (vi->flags & DHD_IOVF_PWRREQ_BYPASS) {
  4048. DHD_ERROR(("%s: Bypass pwr request clear\n", __FUNCTION__));
  4049. } else {
  4050. dhd_bus_pcie_pwr_req_clear(bus);
  4051. }
  4052. }
  4053. }
  4054. return bcmerror;
  4055. } /* dhd_bus_iovar_op */
  4056. #ifdef BCM_BUZZZ
  4057. #include <bcm_buzzz.h>
  4058. int
  4059. dhd_buzzz_dump_cntrs(char *p, uint32 *core, uint32 *log,
  4060. const int num_counters)
  4061. {
  4062. int bytes = 0;
  4063. uint32 ctr;
  4064. uint32 curr[BCM_BUZZZ_COUNTERS_MAX], prev[BCM_BUZZZ_COUNTERS_MAX];
  4065. uint32 delta[BCM_BUZZZ_COUNTERS_MAX];
  4066. /* Compute elapsed counter values per counter event type */
  4067. for (ctr = 0U; ctr < num_counters; ctr++) {
  4068. prev[ctr] = core[ctr];
  4069. curr[ctr] = *log++;
  4070. core[ctr] = curr[ctr]; /* saved for next log */
  4071. if (curr[ctr] < prev[ctr])
  4072. delta[ctr] = curr[ctr] + (~0U - prev[ctr]);
  4073. else
  4074. delta[ctr] = (curr[ctr] - prev[ctr]);
  4075. bytes += sprintf(p + bytes, "%12u ", delta[ctr]);
  4076. }
  4077. return bytes;
  4078. }
  4079. typedef union cm3_cnts { /* export this in bcm_buzzz.h */
  4080. uint32 u32;
  4081. uint8 u8[4];
  4082. struct {
  4083. uint8 cpicnt;
  4084. uint8 exccnt;
  4085. uint8 sleepcnt;
  4086. uint8 lsucnt;
  4087. };
  4088. } cm3_cnts_t;
  4089. int
  4090. dhd_bcm_buzzz_dump_cntrs6(char *p, uint32 *core, uint32 *log)
  4091. {
  4092. int bytes = 0;
  4093. uint32 cyccnt, instrcnt;
  4094. cm3_cnts_t cm3_cnts;
  4095. uint8 foldcnt;
  4096. { /* 32bit cyccnt */
  4097. uint32 curr, prev, delta;
  4098. prev = core[0]; curr = *log++; core[0] = curr;
  4099. if (curr < prev)
  4100. delta = curr + (~0U - prev);
  4101. else
  4102. delta = (curr - prev);
  4103. bytes += sprintf(p + bytes, "%12u ", delta);
  4104. cyccnt = delta;
  4105. }
  4106. { /* Extract the 4 cnts: cpi, exc, sleep and lsu */
  4107. int i;
  4108. uint8 max8 = ~0;
  4109. cm3_cnts_t curr, prev, delta;
  4110. prev.u32 = core[1]; curr.u32 = * log++; core[1] = curr.u32;
  4111. for (i = 0; i < 4; i++) {
  4112. if (curr.u8[i] < prev.u8[i])
  4113. delta.u8[i] = curr.u8[i] + (max8 - prev.u8[i]);
  4114. else
  4115. delta.u8[i] = (curr.u8[i] - prev.u8[i]);
  4116. bytes += sprintf(p + bytes, "%4u ", delta.u8[i]);
  4117. }
  4118. cm3_cnts.u32 = delta.u32;
  4119. }
  4120. { /* Extract the foldcnt from arg0 */
  4121. uint8 curr, prev, delta, max8 = ~0;
  4122. bcm_buzzz_arg0_t arg0; arg0.u32 = *log;
  4123. prev = core[2]; curr = arg0.klog.cnt; core[2] = curr;
  4124. if (curr < prev)
  4125. delta = curr + (max8 - prev);
  4126. else
  4127. delta = (curr - prev);
  4128. bytes += sprintf(p + bytes, "%4u ", delta);
  4129. foldcnt = delta;
  4130. }
  4131. instrcnt = cyccnt - (cm3_cnts.u8[0] + cm3_cnts.u8[1] + cm3_cnts.u8[2]
  4132. + cm3_cnts.u8[3]) + foldcnt;
  4133. if (instrcnt > 0xFFFFFF00)
  4134. bytes += sprintf(p + bytes, "[%10s] ", "~");
  4135. else
  4136. bytes += sprintf(p + bytes, "[%10u] ", instrcnt);
  4137. return bytes;
  4138. }
  4139. int
  4140. dhd_buzzz_dump_log(char *p, uint32 *core, uint32 *log, bcm_buzzz_t *buzzz)
  4141. {
  4142. int bytes = 0;
  4143. bcm_buzzz_arg0_t arg0;
  4144. static uint8 * fmt[] = BCM_BUZZZ_FMT_STRINGS;
  4145. if (buzzz->counters == 6) {
  4146. bytes += dhd_bcm_buzzz_dump_cntrs6(p, core, log);
  4147. log += 2; /* 32bit cyccnt + (4 x 8bit) CM3 */
  4148. } else {
  4149. bytes += dhd_buzzz_dump_cntrs(p, core, log, buzzz->counters);
  4150. log += buzzz->counters; /* (N x 32bit) CR4=3, CA7=4 */
  4151. }
  4152. /* Dump the logged arguments using the registered formats */
  4153. arg0.u32 = *log++;
  4154. switch (arg0.klog.args) {
  4155. case 0:
  4156. bytes += sprintf(p + bytes, fmt[arg0.klog.id]);
  4157. break;
  4158. case 1:
  4159. {
  4160. uint32 arg1 = *log++;
  4161. bytes += sprintf(p + bytes, fmt[arg0.klog.id], arg1);
  4162. break;
  4163. }
  4164. case 2:
  4165. {
  4166. uint32 arg1, arg2;
  4167. arg1 = *log++; arg2 = *log++;
  4168. bytes += sprintf(p + bytes, fmt[arg0.klog.id], arg1, arg2);
  4169. break;
  4170. }
  4171. case 3:
  4172. {
  4173. uint32 arg1, arg2, arg3;
  4174. arg1 = *log++; arg2 = *log++; arg3 = *log++;
  4175. bytes += sprintf(p + bytes, fmt[arg0.klog.id], arg1, arg2, arg3);
  4176. break;
  4177. }
  4178. case 4:
  4179. {
  4180. uint32 arg1, arg2, arg3, arg4;
  4181. arg1 = *log++; arg2 = *log++;
  4182. arg3 = *log++; arg4 = *log++;
  4183. bytes += sprintf(p + bytes, fmt[arg0.klog.id], arg1, arg2, arg3, arg4);
  4184. break;
  4185. }
  4186. default:
  4187. printf("Maximum one argument supported\n");
  4188. break;
  4189. }
  4190. bytes += sprintf(p + bytes, "\n");
  4191. return bytes;
  4192. }
  4193. void dhd_buzzz_dump(bcm_buzzz_t *buzzz_p, void *buffer_p, char *p)
  4194. {
  4195. int i;
  4196. uint32 total, part1, part2, log_sz, core[BCM_BUZZZ_COUNTERS_MAX];
  4197. void * log;
  4198. for (i = 0; i < BCM_BUZZZ_COUNTERS_MAX; i++) {
  4199. core[i] = 0;
  4200. }
  4201. log_sz = buzzz_p->log_sz;
  4202. part1 = ((uint32)buzzz_p->cur - (uint32)buzzz_p->log) / log_sz;
  4203. if (buzzz_p->wrap == TRUE) {
  4204. part2 = ((uint32)buzzz_p->end - (uint32)buzzz_p->cur) / log_sz;
  4205. total = (buzzz_p->buffer_sz - BCM_BUZZZ_LOGENTRY_MAXSZ) / log_sz;
  4206. } else {
  4207. part2 = 0U;
  4208. total = buzzz_p->count;
  4209. }
  4210. if (total == 0U) {
  4211. printf("bcm_buzzz_dump total<%u> done\n", total);
  4212. return;
  4213. } else {
  4214. printf("bcm_buzzz_dump total<%u> : part2<%u> + part1<%u>\n",
  4215. total, part2, part1);
  4216. }
  4217. if (part2) { /* with wrap */
  4218. log = (void*)((size_t)buffer_p + (buzzz_p->cur - buzzz_p->log));
  4219. while (part2--) { /* from cur to end : part2 */
  4220. p[0] = '\0';
  4221. dhd_buzzz_dump_log(p, core, (uint32 *)log, buzzz_p);
  4222. printf("%s", p);
  4223. log = (void*)((size_t)log + buzzz_p->log_sz);
  4224. }
  4225. }
  4226. log = (void*)buffer_p;
  4227. while (part1--) {
  4228. p[0] = '\0';
  4229. dhd_buzzz_dump_log(p, core, (uint32 *)log, buzzz_p);
  4230. printf("%s", p);
  4231. log = (void*)((size_t)log + buzzz_p->log_sz);
  4232. }
  4233. printf("bcm_buzzz_dump done.\n");
  4234. }
  4235. int dhd_buzzz_dump_dngl(dhd_bus_t *bus)
  4236. {
  4237. bcm_buzzz_t * buzzz_p = NULL;
  4238. void * buffer_p = NULL;
  4239. char * page_p = NULL;
  4240. pciedev_shared_t *sh;
  4241. int ret = 0;
  4242. if (bus->dhd->busstate != DHD_BUS_DATA) {
  4243. return BCME_UNSUPPORTED;
  4244. }
  4245. if ((page_p = (char *)MALLOC(bus->dhd->osh, 4096)) == NULL) {
  4246. printf("Page memory allocation failure\n");
  4247. goto done;
  4248. }
  4249. if ((buzzz_p = MALLOC(bus->dhd->osh, sizeof(bcm_buzzz_t))) == NULL) {
  4250. printf("BCM BUZZZ memory allocation failure\n");
  4251. goto done;
  4252. }
  4253. ret = dhdpcie_readshared(bus);
  4254. if (ret < 0) {
  4255. DHD_ERROR(("%s :Shared area read failed \n", __FUNCTION__));
  4256. goto done;
  4257. }
  4258. sh = bus->pcie_sh;
  4259. DHD_INFO(("%s buzzz:%08x\n", __FUNCTION__, sh->buzz_dbg_ptr));
  4260. if (sh->buzz_dbg_ptr != 0U) { /* Fetch and display dongle BUZZZ Trace */
  4261. dhdpcie_bus_membytes(bus, FALSE, (ulong)sh->buzz_dbg_ptr,
  4262. (uint8 *)buzzz_p, sizeof(bcm_buzzz_t));
  4263. printf("BUZZZ[0x%08x]: log<0x%08x> cur<0x%08x> end<0x%08x> "
  4264. "count<%u> status<%u> wrap<%u>\n"
  4265. "cpu<0x%02X> counters<%u> group<%u> buffer_sz<%u> log_sz<%u>\n",
  4266. (int)sh->buzz_dbg_ptr,
  4267. (int)buzzz_p->log, (int)buzzz_p->cur, (int)buzzz_p->end,
  4268. buzzz_p->count, buzzz_p->status, buzzz_p->wrap,
  4269. buzzz_p->cpu_idcode, buzzz_p->counters, buzzz_p->group,
  4270. buzzz_p->buffer_sz, buzzz_p->log_sz);
  4271. if (buzzz_p->count == 0) {
  4272. printf("Empty dongle BUZZZ trace\n\n");
  4273. goto done;
  4274. }
  4275. /* Allocate memory for trace buffer and format strings */
  4276. buffer_p = MALLOC(bus->dhd->osh, buzzz_p->buffer_sz);
  4277. if (buffer_p == NULL) {
  4278. printf("Buffer memory allocation failure\n");
  4279. goto done;
  4280. }
  4281. /* Fetch the trace. format strings are exported via bcm_buzzz.h */
  4282. dhdpcie_bus_membytes(bus, FALSE, (uint32)buzzz_p->log, /* Trace */
  4283. (uint8 *)buffer_p, buzzz_p->buffer_sz);
  4284. /* Process and display the trace using formatted output */
  4285. {
  4286. int ctr;
  4287. for (ctr = 0; ctr < buzzz_p->counters; ctr++) {
  4288. printf("<Evt[%02X]> ", buzzz_p->eventid[ctr]);
  4289. }
  4290. printf("<code execution point>\n");
  4291. }
  4292. dhd_buzzz_dump(buzzz_p, buffer_p, page_p);
  4293. printf("----- End of dongle BCM BUZZZ Trace -----\n\n");
  4294. MFREE(bus->dhd->osh, buffer_p, buzzz_p->buffer_sz); buffer_p = NULL;
  4295. }
  4296. done:
  4297. if (page_p) MFREE(bus->dhd->osh, page_p, 4096);
  4298. if (buzzz_p) MFREE(bus->dhd->osh, buzzz_p, sizeof(bcm_buzzz_t));
  4299. if (buffer_p) MFREE(bus->dhd->osh, buffer_p, buzzz_p->buffer_sz);
  4300. return BCME_OK;
  4301. }
  4302. #endif /* BCM_BUZZZ */
  4303. #define PCIE_GEN2(sih) ((BUSTYPE((sih)->bustype) == PCI_BUS) && \
  4304. ((sih)->buscoretype == PCIE2_CORE_ID))
  4305. #ifdef DHD_PCIE_REG_ACCESS
  4306. static bool
  4307. pcie2_mdiosetblock(dhd_bus_t *bus, uint blk)
  4308. {
  4309. uint mdiodata, mdioctrl, i = 0;
  4310. uint pcie_serdes_spinwait = 200;
  4311. mdioctrl = MDIOCTL2_DIVISOR_VAL | (0x1F << MDIOCTL2_REGADDR_SHF);
  4312. mdiodata = (blk << MDIODATA2_DEVADDR_SHF) | MDIODATA2_DONE;
  4313. si_corereg(bus->sih, bus->sih->buscoreidx, PCIE2_MDIO_CONTROL, ~0, mdioctrl);
  4314. si_corereg(bus->sih, bus->sih->buscoreidx, PCIE2_MDIO_WR_DATA, ~0, mdiodata);
  4315. OSL_DELAY(10);
  4316. /* retry till the transaction is complete */
  4317. while (i < pcie_serdes_spinwait) {
  4318. uint mdioctrl_read = si_corereg(bus->sih, bus->sih->buscoreidx, PCIE2_MDIO_WR_DATA,
  4319. 0, 0);
  4320. if (!(mdioctrl_read & MDIODATA2_DONE)) {
  4321. break;
  4322. }
  4323. OSL_DELAY(1000);
  4324. i++;
  4325. }
  4326. if (i >= pcie_serdes_spinwait) {
  4327. DHD_ERROR(("pcie_mdiosetblock: timed out\n"));
  4328. return FALSE;
  4329. }
  4330. return TRUE;
  4331. }
  4332. #endif /* DHD_PCIE_REG_ACCESS */
  4333. #define PCIE_FLR_CAPAB_BIT 28
  4334. #define PCIE_FUNCTION_LEVEL_RESET_BIT 15
  4335. /* Change delays for only QT HW, FPGA and silicon uses same delay */
  4336. #ifdef BCMQT_HW
  4337. #define DHD_FUNCTION_LEVEL_RESET_DELAY 300000u
  4338. #define DHD_SSRESET_STATUS_RETRY_DELAY 10000u
  4339. #else
  4340. #define DHD_FUNCTION_LEVEL_RESET_DELAY 70u /* 70 msec delay */
  4341. #define DHD_SSRESET_STATUS_RETRY_DELAY 40u
  4342. #endif // endif
  4343. /*
  4344. * Increase SSReset de-assert time to 8ms.
  4345. * since it takes longer time if re-scan time on 4378B0.
  4346. */
  4347. #define DHD_SSRESET_STATUS_RETRIES 200u
  4348. static void
  4349. dhdpcie_enum_reg_init(dhd_bus_t *bus)
  4350. {
  4351. /* initialize Function control register (clear bit 4) to HW init value */
  4352. si_corereg(bus->sih, bus->sih->buscoreidx,
  4353. OFFSETOF(sbpcieregs_t, ftn_ctrl.control), ~0,
  4354. PCIE_CPLCA_ENABLE | PCIE_DLY_PERST_TO_COE);
  4355. /* clear IntMask */
  4356. si_corereg(bus->sih, bus->sih->buscoreidx,
  4357. OFFSETOF(sbpcieregs_t, ftn_ctrl.intmask), ~0, 0);
  4358. /* clear IntStatus */
  4359. si_corereg(bus->sih, bus->sih->buscoreidx,
  4360. OFFSETOF(sbpcieregs_t, ftn_ctrl.intstatus), ~0,
  4361. si_corereg(bus->sih, bus->sih->buscoreidx,
  4362. OFFSETOF(sbpcieregs_t, ftn_ctrl.intstatus), 0, 0));
  4363. /* clear MSIVector */
  4364. si_corereg(bus->sih, bus->sih->buscoreidx,
  4365. OFFSETOF(sbpcieregs_t, ftn_ctrl.msi_vector), ~0, 0);
  4366. /* clear MSIIntMask */
  4367. si_corereg(bus->sih, bus->sih->buscoreidx,
  4368. OFFSETOF(sbpcieregs_t, ftn_ctrl.msi_intmask), ~0, 0);
  4369. /* clear MSIIntStatus */
  4370. si_corereg(bus->sih, bus->sih->buscoreidx,
  4371. OFFSETOF(sbpcieregs_t, ftn_ctrl.msi_intstatus), ~0,
  4372. si_corereg(bus->sih, bus->sih->buscoreidx,
  4373. OFFSETOF(sbpcieregs_t, ftn_ctrl.msi_intstatus), 0, 0));
  4374. /* clear PowerIntMask */
  4375. si_corereg(bus->sih, bus->sih->buscoreidx,
  4376. OFFSETOF(sbpcieregs_t, ftn_ctrl.pwr_intmask), ~0, 0);
  4377. /* clear PowerIntStatus */
  4378. si_corereg(bus->sih, bus->sih->buscoreidx,
  4379. OFFSETOF(sbpcieregs_t, ftn_ctrl.pwr_intstatus), ~0,
  4380. si_corereg(bus->sih, bus->sih->buscoreidx,
  4381. OFFSETOF(sbpcieregs_t, ftn_ctrl.pwr_intstatus), 0, 0));
  4382. /* clear MailboxIntMask */
  4383. si_corereg(bus->sih, bus->sih->buscoreidx,
  4384. OFFSETOF(sbpcieregs_t, ftn_ctrl.mbox_intmask), ~0, 0);
  4385. /* clear MailboxInt */
  4386. si_corereg(bus->sih, bus->sih->buscoreidx,
  4387. OFFSETOF(sbpcieregs_t, ftn_ctrl.mbox_intstatus), ~0,
  4388. si_corereg(bus->sih, bus->sih->buscoreidx,
  4389. OFFSETOF(sbpcieregs_t, ftn_ctrl.mbox_intstatus), 0, 0));
  4390. }
  4391. int
  4392. dhd_bus_perform_flr(dhd_bus_t *bus, bool force_fail)
  4393. {
  4394. uint flr_capab;
  4395. uint val;
  4396. int retry = 0;
  4397. DHD_ERROR(("******** Perform FLR ********\n"));
  4398. if (PCIE_ENUM_RESET_WAR_ENAB(bus->sih->buscorerev)) {
  4399. if (bus->pcie_mailbox_mask != 0) {
  4400. dhdpcie_bus_intr_disable(bus);
  4401. }
  4402. /* initialize F0 enum registers before FLR for rev66/67 */
  4403. dhdpcie_enum_reg_init(bus);
  4404. }
  4405. /* Read PCIE_CFG_DEVICE_CAPABILITY bit 28 to check FLR capability */
  4406. val = OSL_PCI_READ_CONFIG(bus->osh, PCIE_CFG_DEVICE_CAPABILITY, sizeof(val));
  4407. flr_capab = val & (1 << PCIE_FLR_CAPAB_BIT);
  4408. DHD_INFO(("Read Device Capability: reg=0x%x read val=0x%x flr_capab=0x%x\n",
  4409. PCIE_CFG_DEVICE_CAPABILITY, val, flr_capab));
  4410. if (!flr_capab) {
  4411. DHD_ERROR(("Chip does not support FLR\n"));
  4412. return BCME_UNSUPPORTED;
  4413. }
  4414. /* Save pcie config space */
  4415. DHD_INFO(("Save Pcie Config Space\n"));
  4416. DHD_PCIE_CONFIG_SAVE(bus);
  4417. /* Set bit 15 of PCIE_CFG_DEVICE_CONTROL */
  4418. DHD_INFO(("Set PCIE_FUNCTION_LEVEL_RESET_BIT(%d) of PCIE_CFG_DEVICE_CONTROL(0x%x)\n",
  4419. PCIE_FUNCTION_LEVEL_RESET_BIT, PCIE_CFG_DEVICE_CONTROL));
  4420. val = OSL_PCI_READ_CONFIG(bus->osh, PCIE_CFG_DEVICE_CONTROL, sizeof(val));
  4421. DHD_INFO(("read_config: reg=0x%x read val=0x%x\n", PCIE_CFG_DEVICE_CONTROL, val));
  4422. val = val | (1 << PCIE_FUNCTION_LEVEL_RESET_BIT);
  4423. DHD_INFO(("write_config: reg=0x%x write val=0x%x\n", PCIE_CFG_DEVICE_CONTROL, val));
  4424. OSL_PCI_WRITE_CONFIG(bus->osh, PCIE_CFG_DEVICE_CONTROL, sizeof(val), val);
  4425. /* wait for DHD_FUNCTION_LEVEL_RESET_DELAY msec */
  4426. DHD_INFO(("Delay of %d msec\n", DHD_FUNCTION_LEVEL_RESET_DELAY));
  4427. OSL_DELAY(DHD_FUNCTION_LEVEL_RESET_DELAY * 1000u);
  4428. if (force_fail) {
  4429. DHD_ERROR(("Set PCIE_SSRESET_DISABLE_BIT(%d) of PCIE_CFG_SUBSYSTEM_CONTROL(0x%x)\n",
  4430. PCIE_SSRESET_DISABLE_BIT, PCIE_CFG_SUBSYSTEM_CONTROL));
  4431. val = OSL_PCI_READ_CONFIG(bus->osh, PCIE_CFG_SUBSYSTEM_CONTROL, sizeof(val));
  4432. DHD_ERROR(("read_config: reg=0x%x read val=0x%x\n", PCIE_CFG_SUBSYSTEM_CONTROL,
  4433. val));
  4434. val = val | (1 << PCIE_SSRESET_DISABLE_BIT);
  4435. DHD_ERROR(("write_config: reg=0x%x write val=0x%x\n", PCIE_CFG_SUBSYSTEM_CONTROL,
  4436. val));
  4437. OSL_PCI_WRITE_CONFIG(bus->osh, PCIE_CFG_SUBSYSTEM_CONTROL, sizeof(val), val);
  4438. val = OSL_PCI_READ_CONFIG(bus->osh, PCIE_CFG_SUBSYSTEM_CONTROL, sizeof(val));
  4439. DHD_ERROR(("read_config: reg=0x%x read val=0x%x\n", PCIE_CFG_SUBSYSTEM_CONTROL,
  4440. val));
  4441. }
  4442. /* Clear bit 15 of PCIE_CFG_DEVICE_CONTROL */
  4443. DHD_INFO(("Clear PCIE_FUNCTION_LEVEL_RESET_BIT(%d) of PCIE_CFG_DEVICE_CONTROL(0x%x)\n",
  4444. PCIE_FUNCTION_LEVEL_RESET_BIT, PCIE_CFG_DEVICE_CONTROL));
  4445. val = OSL_PCI_READ_CONFIG(bus->osh, PCIE_CFG_DEVICE_CONTROL, sizeof(val));
  4446. DHD_INFO(("read_config: reg=0x%x read val=0x%x\n", PCIE_CFG_DEVICE_CONTROL, val));
  4447. val = val & ~(1 << PCIE_FUNCTION_LEVEL_RESET_BIT);
  4448. DHD_INFO(("write_config: reg=0x%x write val=0x%x\n", PCIE_CFG_DEVICE_CONTROL, val));
  4449. OSL_PCI_WRITE_CONFIG(bus->osh, PCIE_CFG_DEVICE_CONTROL, sizeof(val), val);
  4450. /* Wait till bit 13 of PCIE_CFG_SUBSYSTEM_CONTROL is cleared */
  4451. DHD_INFO(("Wait till PCIE_SSRESET_STATUS_BIT(%d) of PCIE_CFG_SUBSYSTEM_CONTROL(0x%x)"
  4452. "is cleared\n", PCIE_SSRESET_STATUS_BIT, PCIE_CFG_SUBSYSTEM_CONTROL));
  4453. do {
  4454. val = OSL_PCI_READ_CONFIG(bus->osh, PCIE_CFG_SUBSYSTEM_CONTROL, sizeof(val));
  4455. DHD_ERROR(("read_config: reg=0x%x read val=0x%x\n",
  4456. PCIE_CFG_SUBSYSTEM_CONTROL, val));
  4457. val = val & (1 << PCIE_SSRESET_STATUS_BIT);
  4458. OSL_DELAY(DHD_SSRESET_STATUS_RETRY_DELAY);
  4459. } while (val && (retry++ < DHD_SSRESET_STATUS_RETRIES));
  4460. if (val) {
  4461. DHD_ERROR(("ERROR: reg=0x%x bit %d is not cleared\n",
  4462. PCIE_CFG_SUBSYSTEM_CONTROL, PCIE_SSRESET_STATUS_BIT));
  4463. /* User has to fire the IOVAR again, if force_fail is needed */
  4464. if (force_fail) {
  4465. bus->flr_force_fail = FALSE;
  4466. DHD_ERROR(("%s cleared flr_force_fail flag\n", __FUNCTION__));
  4467. }
  4468. return BCME_DONGLE_DOWN;
  4469. }
  4470. /* Restore pcie config space */
  4471. DHD_INFO(("Restore Pcie Config Space\n"));
  4472. DHD_PCIE_CONFIG_RESTORE(bus);
  4473. DHD_ERROR(("******** FLR Succedeed ********\n"));
  4474. return BCME_OK;
  4475. }
  4476. #ifdef DHD_USE_BP_RESET
  4477. #define DHD_BP_RESET_ASPM_DISABLE_DELAY 500u /* usec */
  4478. #define DHD_BP_RESET_STATUS_RETRY_DELAY 40u /* usec */
  4479. #define DHD_BP_RESET_STATUS_RETRIES 50u
  4480. #define PCIE_CFG_SPROM_CTRL_SB_RESET_BIT 10
  4481. #define PCIE_CFG_CLOCK_CTRL_STATUS_BP_RESET_BIT 21
  4482. int
  4483. dhd_bus_perform_bp_reset(struct dhd_bus *bus)
  4484. {
  4485. uint val;
  4486. int retry = 0;
  4487. uint dar_clk_ctrl_status_reg = DAR_CLK_CTRL(bus->sih->buscorerev);
  4488. int ret = BCME_OK;
  4489. bool cond;
  4490. DHD_ERROR(("******** Perform BP reset ********\n"));
  4491. /* Disable ASPM */
  4492. DHD_INFO(("Disable ASPM: Clear bits(1-0) of PCIECFGREG_LINK_STATUS_CTRL(0x%x)\n",
  4493. PCIECFGREG_LINK_STATUS_CTRL));
  4494. val = OSL_PCI_READ_CONFIG(bus->osh, PCIECFGREG_LINK_STATUS_CTRL, sizeof(val));
  4495. DHD_INFO(("read_config: reg=0x%x read val=0x%x\n", PCIECFGREG_LINK_STATUS_CTRL, val));
  4496. val = val & (~PCIE_ASPM_ENAB);
  4497. DHD_INFO(("write_config: reg=0x%x write val=0x%x\n", PCIECFGREG_LINK_STATUS_CTRL, val));
  4498. OSL_PCI_WRITE_CONFIG(bus->osh, PCIECFGREG_LINK_STATUS_CTRL, sizeof(val), val);
  4499. /* wait for delay usec */
  4500. DHD_INFO(("Delay of %d usec\n", DHD_BP_RESET_ASPM_DISABLE_DELAY));
  4501. OSL_DELAY(DHD_BP_RESET_ASPM_DISABLE_DELAY);
  4502. /* Set bit 10 of PCIECFGREG_SPROM_CTRL */
  4503. DHD_INFO(("Set PCIE_CFG_SPROM_CTRL_SB_RESET_BIT(%d) of PCIECFGREG_SPROM_CTRL(0x%x)\n",
  4504. PCIE_CFG_SPROM_CTRL_SB_RESET_BIT, PCIECFGREG_SPROM_CTRL));
  4505. val = OSL_PCI_READ_CONFIG(bus->osh, PCIECFGREG_SPROM_CTRL, sizeof(val));
  4506. DHD_INFO(("read_config: reg=0x%x read val=0x%x\n", PCIECFGREG_SPROM_CTRL, val));
  4507. val = val | (1 << PCIE_CFG_SPROM_CTRL_SB_RESET_BIT);
  4508. DHD_INFO(("write_config: reg=0x%x write val=0x%x\n", PCIECFGREG_SPROM_CTRL, val));
  4509. OSL_PCI_WRITE_CONFIG(bus->osh, PCIECFGREG_SPROM_CTRL, sizeof(val), val);
  4510. /* Wait till bit backplane reset is ASSERTED i,e
  4511. * bit 10 of PCIECFGREG_SPROM_CTRL is cleared.
  4512. * Only after this, poll for 21st bit of DAR reg 0xAE0 is valid
  4513. * else DAR register will read previous old value
  4514. */
  4515. DHD_INFO(("Wait till PCIE_CFG_SPROM_CTRL_SB_RESET_BIT(%d) of "
  4516. "PCIECFGREG_SPROM_CTRL(0x%x) is cleared\n",
  4517. PCIE_CFG_SPROM_CTRL_SB_RESET_BIT, PCIECFGREG_SPROM_CTRL));
  4518. do {
  4519. val = OSL_PCI_READ_CONFIG(bus->osh, PCIECFGREG_SPROM_CTRL, sizeof(val));
  4520. DHD_INFO(("read_config: reg=0x%x read val=0x%x\n", PCIECFGREG_SPROM_CTRL, val));
  4521. cond = val & (1 << PCIE_CFG_SPROM_CTRL_SB_RESET_BIT);
  4522. OSL_DELAY(DHD_BP_RESET_STATUS_RETRY_DELAY);
  4523. } while (cond && (retry++ < DHD_BP_RESET_STATUS_RETRIES));
  4524. if (cond) {
  4525. DHD_ERROR(("ERROR: reg=0x%x bit %d is not cleared\n",
  4526. PCIECFGREG_SPROM_CTRL, PCIE_CFG_SPROM_CTRL_SB_RESET_BIT));
  4527. ret = BCME_ERROR;
  4528. goto aspm_enab;
  4529. }
  4530. /* Wait till bit 21 of dar_clk_ctrl_status_reg is cleared */
  4531. DHD_INFO(("Wait till PCIE_CFG_CLOCK_CTRL_STATUS_BP_RESET_BIT(%d) of "
  4532. "dar_clk_ctrl_status_reg(0x%x) is cleared\n",
  4533. PCIE_CFG_CLOCK_CTRL_STATUS_BP_RESET_BIT, dar_clk_ctrl_status_reg));
  4534. do {
  4535. val = si_corereg(bus->sih, bus->sih->buscoreidx,
  4536. dar_clk_ctrl_status_reg, 0, 0);
  4537. DHD_INFO(("read_dar si_corereg: reg=0x%x read val=0x%x\n",
  4538. dar_clk_ctrl_status_reg, val));
  4539. cond = val & (1 << PCIE_CFG_CLOCK_CTRL_STATUS_BP_RESET_BIT);
  4540. OSL_DELAY(DHD_BP_RESET_STATUS_RETRY_DELAY);
  4541. } while (cond && (retry++ < DHD_BP_RESET_STATUS_RETRIES));
  4542. if (cond) {
  4543. DHD_ERROR(("ERROR: reg=0x%x bit %d is not cleared\n",
  4544. dar_clk_ctrl_status_reg, PCIE_CFG_CLOCK_CTRL_STATUS_BP_RESET_BIT));
  4545. ret = BCME_ERROR;
  4546. }
  4547. aspm_enab:
  4548. /* Enable ASPM */
  4549. DHD_INFO(("Enable ASPM: set bit 1 of PCIECFGREG_LINK_STATUS_CTRL(0x%x)\n",
  4550. PCIECFGREG_LINK_STATUS_CTRL));
  4551. val = OSL_PCI_READ_CONFIG(bus->osh, PCIECFGREG_LINK_STATUS_CTRL, sizeof(val));
  4552. DHD_INFO(("read_config: reg=0x%x read val=0x%x\n", PCIECFGREG_LINK_STATUS_CTRL, val));
  4553. val = val | (PCIE_ASPM_L1_ENAB);
  4554. DHD_INFO(("write_config: reg=0x%x write val=0x%x\n", PCIECFGREG_LINK_STATUS_CTRL, val));
  4555. OSL_PCI_WRITE_CONFIG(bus->osh, PCIECFGREG_LINK_STATUS_CTRL, sizeof(val), val);
  4556. DHD_ERROR(("******** BP reset Succedeed ********\n"));
  4557. return ret;
  4558. }
  4559. #endif /* DHD_USE_BP_RESET */
  4560. int
  4561. dhd_bus_devreset(dhd_pub_t *dhdp, uint8 flag)
  4562. {
  4563. dhd_bus_t *bus = dhdp->bus;
  4564. int bcmerror = 0;
  4565. unsigned long flags;
  4566. unsigned long flags_bus;
  4567. #ifdef CONFIG_ARCH_MSM
  4568. int retry = POWERUP_MAX_RETRY;
  4569. #endif /* CONFIG_ARCH_MSM */
  4570. if (flag == TRUE) { /* Turn off WLAN */
  4571. /* Removing Power */
  4572. DHD_ERROR(("%s: == Power OFF ==\n", __FUNCTION__));
  4573. DHD_ERROR(("%s: making dhdpub up FALSE\n", __FUNCTION__));
  4574. bus->dhd->up = FALSE;
  4575. /* wait for other contexts to finish -- if required a call
  4576. * to OSL_DELAY for 1s can be added to give other contexts
  4577. * a chance to finish
  4578. */
  4579. dhdpcie_advertise_bus_cleanup(bus->dhd);
  4580. if (bus->dhd->busstate != DHD_BUS_DOWN) {
  4581. #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
  4582. atomic_set(&bus->dhd->block_bus, TRUE);
  4583. dhd_flush_rx_tx_wq(bus->dhd);
  4584. #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
  4585. #ifdef BCMPCIE_OOB_HOST_WAKE
  4586. /* Clean up any pending host wake IRQ */
  4587. dhd_bus_oob_intr_set(bus->dhd, FALSE);
  4588. dhd_bus_oob_intr_unregister(bus->dhd);
  4589. #endif /* BCMPCIE_OOB_HOST_WAKE */
  4590. dhd_os_wd_timer(dhdp, 0);
  4591. dhd_bus_stop(bus, TRUE);
  4592. if (bus->intr) {
  4593. DHD_BUS_LOCK(bus->bus_lock, flags_bus);
  4594. dhdpcie_bus_intr_disable(bus);
  4595. DHD_BUS_UNLOCK(bus->bus_lock, flags_bus);
  4596. dhdpcie_free_irq(bus);
  4597. }
  4598. dhd_deinit_bus_lock(bus);
  4599. dhd_deinit_backplane_access_lock(bus);
  4600. dhd_bus_release_dongle(bus);
  4601. dhdpcie_bus_free_resource(bus);
  4602. bcmerror = dhdpcie_bus_disable_device(bus);
  4603. if (bcmerror) {
  4604. DHD_ERROR(("%s: dhdpcie_bus_disable_device: %d\n",
  4605. __FUNCTION__, bcmerror));
  4606. #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
  4607. atomic_set(&bus->dhd->block_bus, FALSE);
  4608. #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
  4609. }
  4610. /* Clean up protocol data after Bus Master Enable bit clear
  4611. * so that host can safely unmap DMA and remove the allocated buffers
  4612. * from the PKTID MAP. Some Applicantion Processors supported
  4613. * System MMU triggers Kernel panic when they detect to attempt to
  4614. * DMA-unmapped memory access from the devices which use the
  4615. * System MMU. Therefore, Kernel panic can be happened since it is
  4616. * possible that dongle can access to DMA-unmapped memory after
  4617. * calling the dhd_prot_reset().
  4618. * For this reason, the dhd_prot_reset() and dhd_clear() functions
  4619. * should be located after the dhdpcie_bus_disable_device().
  4620. */
  4621. dhd_prot_reset(dhdp);
  4622. dhd_clear(dhdp);
  4623. #ifdef CONFIG_ARCH_MSM
  4624. bcmerror = dhdpcie_bus_clock_stop(bus);
  4625. if (bcmerror) {
  4626. DHD_ERROR(("%s: host clock stop failed: %d\n",
  4627. __FUNCTION__, bcmerror));
  4628. #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
  4629. atomic_set(&bus->dhd->block_bus, FALSE);
  4630. #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
  4631. goto done;
  4632. }
  4633. #endif /* CONFIG_ARCH_MSM */
  4634. DHD_GENERAL_LOCK(bus->dhd, flags);
  4635. DHD_ERROR(("%s: making DHD_BUS_DOWN\n", __FUNCTION__));
  4636. bus->dhd->busstate = DHD_BUS_DOWN;
  4637. DHD_GENERAL_UNLOCK(bus->dhd, flags);
  4638. #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
  4639. atomic_set(&bus->dhd->block_bus, FALSE);
  4640. #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
  4641. } else {
  4642. if (bus->intr) {
  4643. dhdpcie_free_irq(bus);
  4644. }
  4645. #ifdef BCMPCIE_OOB_HOST_WAKE
  4646. /* Clean up any pending host wake IRQ */
  4647. dhd_bus_oob_intr_set(bus->dhd, FALSE);
  4648. dhd_bus_oob_intr_unregister(bus->dhd);
  4649. #endif /* BCMPCIE_OOB_HOST_WAKE */
  4650. dhd_dpc_kill(bus->dhd);
  4651. if (!bus->no_bus_init) {
  4652. dhd_bus_release_dongle(bus);
  4653. dhdpcie_bus_free_resource(bus);
  4654. bcmerror = dhdpcie_bus_disable_device(bus);
  4655. if (bcmerror) {
  4656. DHD_ERROR(("%s: dhdpcie_bus_disable_device: %d\n",
  4657. __FUNCTION__, bcmerror));
  4658. }
  4659. /* Clean up protocol data after Bus Master Enable bit clear
  4660. * so that host can safely unmap DMA and remove the allocated
  4661. * buffers from the PKTID MAP. Some Applicantion Processors
  4662. * supported System MMU triggers Kernel panic when they detect
  4663. * to attempt to DMA-unmapped memory access from the devices
  4664. * which use the System MMU.
  4665. * Therefore, Kernel panic can be happened since it is possible
  4666. * that dongle can access to DMA-unmapped memory after calling
  4667. * the dhd_prot_reset().
  4668. * For this reason, the dhd_prot_reset() and dhd_clear() functions
  4669. * should be located after the dhdpcie_bus_disable_device().
  4670. */
  4671. dhd_prot_reset(dhdp);
  4672. dhd_clear(dhdp);
  4673. } else {
  4674. bus->no_bus_init = FALSE;
  4675. }
  4676. #ifdef CONFIG_ARCH_MSM
  4677. bcmerror = dhdpcie_bus_clock_stop(bus);
  4678. if (bcmerror) {
  4679. DHD_ERROR(("%s: host clock stop failed: %d\n",
  4680. __FUNCTION__, bcmerror));
  4681. goto done;
  4682. }
  4683. #endif /* CONFIG_ARCH_MSM */
  4684. }
  4685. bus->dhd->dongle_reset = TRUE;
  4686. DHD_ERROR(("%s: WLAN OFF Done\n", __FUNCTION__));
  4687. } else { /* Turn on WLAN */
  4688. if (bus->dhd->busstate == DHD_BUS_DOWN) {
  4689. /* Powering On */
  4690. DHD_ERROR(("%s: == Power ON ==\n", __FUNCTION__));
  4691. #ifdef CONFIG_ARCH_MSM
  4692. while (--retry) {
  4693. bcmerror = dhdpcie_bus_clock_start(bus);
  4694. if (!bcmerror) {
  4695. DHD_ERROR(("%s: dhdpcie_bus_clock_start OK\n",
  4696. __FUNCTION__));
  4697. break;
  4698. } else {
  4699. OSL_SLEEP(10);
  4700. }
  4701. }
  4702. if (bcmerror && !retry) {
  4703. DHD_ERROR(("%s: host pcie clock enable failed: %d\n",
  4704. __FUNCTION__, bcmerror));
  4705. goto done;
  4706. }
  4707. #if defined(DHD_CONTROL_PCIE_ASPM_WIFI_TURNON)
  4708. dhd_bus_aspm_enable_rc_ep(bus, FALSE);
  4709. #endif /* DHD_CONTROL_PCIE_ASPM_WIFI_TURNON */
  4710. #endif /* CONFIG_ARCH_MSM */
  4711. bus->is_linkdown = 0;
  4712. bus->cto_triggered = 0;
  4713. #ifdef SUPPORT_LINKDOWN_RECOVERY
  4714. bus->read_shm_fail = FALSE;
  4715. #endif /* SUPPORT_LINKDOWN_RECOVERY */
  4716. bcmerror = dhdpcie_bus_enable_device(bus);
  4717. if (bcmerror) {
  4718. DHD_ERROR(("%s: host configuration restore failed: %d\n",
  4719. __FUNCTION__, bcmerror));
  4720. goto done;
  4721. }
  4722. bcmerror = dhdpcie_bus_alloc_resource(bus);
  4723. if (bcmerror) {
  4724. DHD_ERROR(("%s: dhdpcie_bus_resource_alloc failed: %d\n",
  4725. __FUNCTION__, bcmerror));
  4726. goto done;
  4727. }
  4728. bcmerror = dhdpcie_bus_dongle_attach(bus);
  4729. if (bcmerror) {
  4730. DHD_ERROR(("%s: dhdpcie_bus_dongle_attach failed: %d\n",
  4731. __FUNCTION__, bcmerror));
  4732. goto done;
  4733. }
  4734. bcmerror = dhd_bus_request_irq(bus);
  4735. if (bcmerror) {
  4736. DHD_ERROR(("%s: dhd_bus_request_irq failed: %d\n",
  4737. __FUNCTION__, bcmerror));
  4738. goto done;
  4739. }
  4740. bus->dhd->dongle_reset = FALSE;
  4741. #if defined(DHD_CONTROL_PCIE_CPUCORE_WIFI_TURNON)
  4742. dhd_irq_set_affinity(bus->dhd, cpumask_of(1));
  4743. #endif /* DHD_CONTROL_PCIE_CPUCORE_WIFI_TURNON */
  4744. bcmerror = dhd_bus_start(dhdp);
  4745. if (bcmerror) {
  4746. DHD_ERROR(("%s: dhd_bus_start: %d\n",
  4747. __FUNCTION__, bcmerror));
  4748. goto done;
  4749. }
  4750. bus->dhd->up = TRUE;
  4751. /* Renabling watchdog which is disabled in dhdpcie_advertise_bus_cleanup */
  4752. if (bus->dhd->dhd_watchdog_ms_backup) {
  4753. DHD_ERROR(("%s: Enabling wdtick after dhd init\n",
  4754. __FUNCTION__));
  4755. dhd_os_wd_timer(bus->dhd, bus->dhd->dhd_watchdog_ms_backup);
  4756. }
  4757. DHD_ERROR(("%s: WLAN Power On Done\n", __FUNCTION__));
  4758. } else {
  4759. DHD_ERROR(("%s: what should we do here\n", __FUNCTION__));
  4760. goto done;
  4761. }
  4762. }
  4763. done:
  4764. if (bcmerror) {
  4765. DHD_GENERAL_LOCK(bus->dhd, flags);
  4766. DHD_ERROR(("%s: making DHD_BUS_DOWN\n", __FUNCTION__));
  4767. bus->dhd->busstate = DHD_BUS_DOWN;
  4768. DHD_GENERAL_UNLOCK(bus->dhd, flags);
  4769. }
  4770. return bcmerror;
  4771. }
  4772. #ifdef DHD_PCIE_REG_ACCESS
  4773. static int
  4774. pcie2_mdioop(dhd_bus_t *bus, uint physmedia, uint regaddr, bool write, uint *val,
  4775. bool slave_bypass)
  4776. {
  4777. uint pcie_serdes_spinwait = 200, i = 0, mdio_ctrl;
  4778. uint32 reg32;
  4779. pcie2_mdiosetblock(bus, physmedia);
  4780. /* enable mdio access to SERDES */
  4781. mdio_ctrl = MDIOCTL2_DIVISOR_VAL;
  4782. mdio_ctrl |= (regaddr << MDIOCTL2_REGADDR_SHF);
  4783. if (slave_bypass)
  4784. mdio_ctrl |= MDIOCTL2_SLAVE_BYPASS;
  4785. if (!write)
  4786. mdio_ctrl |= MDIOCTL2_READ;
  4787. si_corereg(bus->sih, bus->sih->buscoreidx, PCIE2_MDIO_CONTROL, ~0, mdio_ctrl);
  4788. if (write) {
  4789. reg32 = PCIE2_MDIO_WR_DATA;
  4790. si_corereg(bus->sih, bus->sih->buscoreidx, PCIE2_MDIO_WR_DATA, ~0,
  4791. *val | MDIODATA2_DONE);
  4792. } else
  4793. reg32 = PCIE2_MDIO_RD_DATA;
  4794. /* retry till the transaction is complete */
  4795. while (i < pcie_serdes_spinwait) {
  4796. uint done_val = si_corereg(bus->sih, bus->sih->buscoreidx, reg32, 0, 0);
  4797. if (!(done_val & MDIODATA2_DONE)) {
  4798. if (!write) {
  4799. *val = si_corereg(bus->sih, bus->sih->buscoreidx,
  4800. PCIE2_MDIO_RD_DATA, 0, 0);
  4801. *val = *val & MDIODATA2_MASK;
  4802. }
  4803. return 0;
  4804. }
  4805. OSL_DELAY(1000);
  4806. i++;
  4807. }
  4808. return -1;
  4809. }
  4810. #endif /* DHD_PCIE_REG_ACCESS */
  4811. /* si_backplane_access() manages a shared resource - BAR0 mapping, hence its
  4812. * calls shall be serialized. This wrapper function provides such serialization
  4813. * and shall be used everywjer einstead of direct call of si_backplane_access()
  4814. *
  4815. * Linux DHD driver calls si_backplane_access() from 3 three contexts: tasklet
  4816. * (that may call dhdpcie_sssr_dump() from dhdpcie_sssr_dump()), iovar
  4817. * ("sbreg", "membyres", etc.) and procfs (used by GDB proxy). To avoid race
  4818. * conditions calls of si_backplane_access() shall be serialized. Presence of
  4819. * tasklet context implies that serialization shall b ebased on spinlock. Hence
  4820. * Linux implementation of dhd_pcie_backplane_access_[un]lock() is
  4821. * spinlock-based.
  4822. *
  4823. * Other platforms may add their own implementations of
  4824. * dhd_pcie_backplane_access_[un]lock() as needed (e.g. if serialization is not
  4825. * needed implementation might be empty)
  4826. */
  4827. static uint
  4828. serialized_backplane_access(dhd_bus_t *bus, uint addr, uint size, uint *val, bool read)
  4829. {
  4830. uint ret;
  4831. unsigned long flags;
  4832. DHD_BACKPLANE_ACCESS_LOCK(bus->backplane_access_lock, flags);
  4833. ret = si_backplane_access(bus->sih, addr, size, val, read);
  4834. DHD_BACKPLANE_ACCESS_UNLOCK(bus->backplane_access_lock, flags);
  4835. return ret;
  4836. }
  4837. static int
  4838. dhdpcie_get_dma_ring_indices(dhd_pub_t *dhd)
  4839. {
  4840. int h2d_support, d2h_support;
  4841. d2h_support = dhd->dma_d2h_ring_upd_support ? 1 : 0;
  4842. h2d_support = dhd->dma_h2d_ring_upd_support ? 1 : 0;
  4843. return (d2h_support | (h2d_support << 1));
  4844. }
  4845. int
  4846. dhdpcie_set_dma_ring_indices(dhd_pub_t *dhd, int32 int_val)
  4847. {
  4848. int bcmerror = 0;
  4849. /* Can change it only during initialization/FW download */
  4850. if (dhd->busstate == DHD_BUS_DOWN) {
  4851. if ((int_val > 3) || (int_val < 0)) {
  4852. DHD_ERROR(("Bad argument. Possible values: 0, 1, 2 & 3\n"));
  4853. bcmerror = BCME_BADARG;
  4854. } else {
  4855. dhd->dma_d2h_ring_upd_support = (int_val & 1) ? TRUE : FALSE;
  4856. dhd->dma_h2d_ring_upd_support = (int_val & 2) ? TRUE : FALSE;
  4857. dhd->dma_ring_upd_overwrite = TRUE;
  4858. }
  4859. } else {
  4860. DHD_ERROR(("%s: Can change only when bus down (before FW download)\n",
  4861. __FUNCTION__));
  4862. bcmerror = BCME_NOTDOWN;
  4863. }
  4864. return bcmerror;
  4865. }
  4866. /**
  4867. * IOVAR handler of the DHD bus layer (in this case, the PCIe bus).
  4868. *
  4869. * @param actionid e.g. IOV_SVAL(IOV_PCIEREG)
  4870. * @param params input buffer
  4871. * @param plen length in [bytes] of input buffer 'params'
  4872. * @param arg output buffer
  4873. * @param len length in [bytes] of output buffer 'arg'
  4874. */
  4875. static int
  4876. dhdpcie_bus_doiovar(dhd_bus_t *bus, const bcm_iovar_t *vi, uint32 actionid, const char *name,
  4877. void *params, int plen, void *arg, int len, int val_size)
  4878. {
  4879. int bcmerror = 0;
  4880. int32 int_val = 0;
  4881. int32 int_val2 = 0;
  4882. int32 int_val3 = 0;
  4883. bool bool_val = 0;
  4884. DHD_TRACE(("%s: Enter, action %d name %s params %p plen %d arg %p len %d val_size %d\n",
  4885. __FUNCTION__, actionid, name, params, plen, arg, len, val_size));
  4886. if ((bcmerror = bcm_iovar_lencheck(vi, arg, len, IOV_ISSET(actionid))) != 0)
  4887. goto exit;
  4888. if (plen >= (int)sizeof(int_val))
  4889. bcopy(params, &int_val, sizeof(int_val));
  4890. if (plen >= (int)sizeof(int_val) * 2)
  4891. bcopy((void*)((uintptr)params + sizeof(int_val)), &int_val2, sizeof(int_val2));
  4892. if (plen >= (int)sizeof(int_val) * 3)
  4893. bcopy((void*)((uintptr)params + 2 * sizeof(int_val)), &int_val3, sizeof(int_val3));
  4894. bool_val = (int_val != 0) ? TRUE : FALSE;
  4895. /* Check if dongle is in reset. If so, only allow DEVRESET iovars */
  4896. if (bus->dhd->dongle_reset && !(actionid == IOV_SVAL(IOV_DEVRESET) ||
  4897. actionid == IOV_GVAL(IOV_DEVRESET))) {
  4898. bcmerror = BCME_NOTREADY;
  4899. goto exit;
  4900. }
  4901. switch (actionid) {
  4902. case IOV_SVAL(IOV_VARS):
  4903. bcmerror = dhdpcie_downloadvars(bus, arg, len);
  4904. break;
  4905. #ifdef DHD_PCIE_REG_ACCESS
  4906. case IOV_SVAL(IOV_PCIEREG):
  4907. si_corereg(bus->sih, bus->sih->buscoreidx, OFFSETOF(sbpcieregs_t, configaddr), ~0,
  4908. int_val);
  4909. si_corereg(bus->sih, bus->sih->buscoreidx, OFFSETOF(sbpcieregs_t, configdata), ~0,
  4910. int_val2);
  4911. break;
  4912. case IOV_GVAL(IOV_PCIEREG):
  4913. si_corereg(bus->sih, bus->sih->buscoreidx, OFFSETOF(sbpcieregs_t, configaddr), ~0,
  4914. int_val);
  4915. int_val = si_corereg(bus->sih, bus->sih->buscoreidx,
  4916. OFFSETOF(sbpcieregs_t, configdata), 0, 0);
  4917. bcopy(&int_val, arg, sizeof(int_val));
  4918. break;
  4919. case IOV_SVAL(IOV_PCIECOREREG):
  4920. si_corereg(bus->sih, bus->sih->buscoreidx, int_val, ~0, int_val2);
  4921. break;
  4922. case IOV_GVAL(IOV_BAR0_SECWIN_REG):
  4923. {
  4924. sdreg_t sdreg;
  4925. uint32 addr, size;
  4926. bcopy(params, &sdreg, sizeof(sdreg));
  4927. addr = sdreg.offset;
  4928. size = sdreg.func;
  4929. if (serialized_backplane_access(bus, addr, size, (uint *)&int_val, TRUE) != BCME_OK)
  4930. {
  4931. DHD_ERROR(("Invalid size/addr combination \n"));
  4932. bcmerror = BCME_ERROR;
  4933. break;
  4934. }
  4935. bcopy(&int_val, arg, sizeof(int32));
  4936. break;
  4937. }
  4938. case IOV_SVAL(IOV_BAR0_SECWIN_REG):
  4939. {
  4940. sdreg_t sdreg;
  4941. uint32 addr, size;
  4942. bcopy(params, &sdreg, sizeof(sdreg));
  4943. addr = sdreg.offset;
  4944. size = sdreg.func;
  4945. if (serialized_backplane_access(bus, addr, size,
  4946. (uint *)(&sdreg.value), FALSE) != BCME_OK) {
  4947. DHD_ERROR(("Invalid size/addr combination \n"));
  4948. bcmerror = BCME_ERROR;
  4949. }
  4950. break;
  4951. }
  4952. case IOV_GVAL(IOV_SBREG):
  4953. {
  4954. sdreg_t sdreg;
  4955. uint32 addr, size;
  4956. bcopy(params, &sdreg, sizeof(sdreg));
  4957. addr = sdreg.offset | SI_ENUM_BASE(bus->sih);
  4958. size = sdreg.func;
  4959. if (serialized_backplane_access(bus, addr, size, (uint *)&int_val, TRUE) != BCME_OK)
  4960. {
  4961. DHD_ERROR(("Invalid size/addr combination \n"));
  4962. bcmerror = BCME_ERROR;
  4963. break;
  4964. }
  4965. bcopy(&int_val, arg, size);
  4966. break;
  4967. }
  4968. case IOV_SVAL(IOV_SBREG):
  4969. {
  4970. sdreg_t sdreg;
  4971. uint32 addr, size;
  4972. bcopy(params, &sdreg, sizeof(sdreg));
  4973. addr = sdreg.offset | SI_ENUM_BASE(bus->sih);
  4974. size = sdreg.func;
  4975. if (serialized_backplane_access(bus, addr, size,
  4976. (uint *)(&sdreg.value), FALSE) != BCME_OK) {
  4977. DHD_ERROR(("Invalid size/addr combination \n"));
  4978. bcmerror = BCME_ERROR;
  4979. }
  4980. break;
  4981. }
  4982. case IOV_GVAL(IOV_PCIESERDESREG):
  4983. {
  4984. uint val;
  4985. if (!PCIE_GEN2(bus->sih)) {
  4986. DHD_ERROR(("supported only in pcie gen2\n"));
  4987. bcmerror = BCME_ERROR;
  4988. break;
  4989. }
  4990. if (!pcie2_mdioop(bus, int_val, int_val2, FALSE, &val, FALSE)) {
  4991. bcopy(&val, arg, sizeof(int32));
  4992. } else {
  4993. DHD_ERROR(("pcie2_mdioop failed.\n"));
  4994. bcmerror = BCME_ERROR;
  4995. }
  4996. break;
  4997. }
  4998. case IOV_SVAL(IOV_PCIESERDESREG):
  4999. if (!PCIE_GEN2(bus->sih)) {
  5000. DHD_ERROR(("supported only in pcie gen2\n"));
  5001. bcmerror = BCME_ERROR;
  5002. break;
  5003. }
  5004. if (pcie2_mdioop(bus, int_val, int_val2, TRUE, (uint *)&int_val3, FALSE)) {
  5005. DHD_ERROR(("pcie2_mdioop failed.\n"));
  5006. bcmerror = BCME_ERROR;
  5007. }
  5008. break;
  5009. case IOV_GVAL(IOV_PCIECOREREG):
  5010. int_val = si_corereg(bus->sih, bus->sih->buscoreidx, int_val, 0, 0);
  5011. bcopy(&int_val, arg, sizeof(int_val));
  5012. break;
  5013. case IOV_SVAL(IOV_PCIECFGREG):
  5014. OSL_PCI_WRITE_CONFIG(bus->osh, int_val, 4, int_val2);
  5015. break;
  5016. case IOV_GVAL(IOV_PCIECFGREG):
  5017. int_val = OSL_PCI_READ_CONFIG(bus->osh, int_val, 4);
  5018. bcopy(&int_val, arg, sizeof(int_val));
  5019. break;
  5020. #endif /* DHD_PCIE_REG_ACCESS */
  5021. case IOV_SVAL(IOV_PCIE_LPBK):
  5022. bcmerror = dhdpcie_bus_lpback_req(bus, int_val);
  5023. break;
  5024. case IOV_SVAL(IOV_PCIE_DMAXFER): {
  5025. dma_xfer_info_t *dmaxfer = (dma_xfer_info_t *)arg;
  5026. if (!dmaxfer)
  5027. return BCME_BADARG;
  5028. if (dmaxfer->version != DHD_DMAXFER_VERSION)
  5029. return BCME_VERSION;
  5030. if (dmaxfer->length != sizeof(dma_xfer_info_t)) {
  5031. return BCME_BADLEN;
  5032. }
  5033. bcmerror = dhdpcie_bus_dmaxfer_req(bus, dmaxfer->num_bytes,
  5034. dmaxfer->src_delay, dmaxfer->dest_delay,
  5035. dmaxfer->type, dmaxfer->core_num,
  5036. dmaxfer->should_wait);
  5037. if (dmaxfer->should_wait && bcmerror >= 0) {
  5038. bcmerror = dhdmsgbuf_dmaxfer_status(bus->dhd, dmaxfer);
  5039. }
  5040. break;
  5041. }
  5042. case IOV_GVAL(IOV_PCIE_DMAXFER): {
  5043. dma_xfer_info_t *dmaxfer = (dma_xfer_info_t *)params;
  5044. if (!dmaxfer)
  5045. return BCME_BADARG;
  5046. if (dmaxfer->version != DHD_DMAXFER_VERSION)
  5047. return BCME_VERSION;
  5048. if (dmaxfer->length != sizeof(dma_xfer_info_t)) {
  5049. return BCME_BADLEN;
  5050. }
  5051. bcmerror = dhdmsgbuf_dmaxfer_status(bus->dhd, dmaxfer);
  5052. break;
  5053. }
  5054. case IOV_GVAL(IOV_PCIE_SUSPEND):
  5055. int_val = (bus->dhd->busstate == DHD_BUS_SUSPEND) ? 1 : 0;
  5056. bcopy(&int_val, arg, val_size);
  5057. break;
  5058. case IOV_SVAL(IOV_PCIE_SUSPEND):
  5059. if (bool_val) { /* Suspend */
  5060. int ret;
  5061. unsigned long flags;
  5062. /*
  5063. * If some other context is busy, wait until they are done,
  5064. * before starting suspend
  5065. */
  5066. ret = dhd_os_busbusy_wait_condition(bus->dhd,
  5067. &bus->dhd->dhd_bus_busy_state, DHD_BUS_BUSY_IN_DHD_IOVAR);
  5068. if (ret == 0) {
  5069. DHD_ERROR(("%s:Wait Timedout, dhd_bus_busy_state = 0x%x\n",
  5070. __FUNCTION__, bus->dhd->dhd_bus_busy_state));
  5071. return BCME_BUSY;
  5072. }
  5073. DHD_GENERAL_LOCK(bus->dhd, flags);
  5074. DHD_BUS_BUSY_SET_SUSPEND_IN_PROGRESS(bus->dhd);
  5075. DHD_GENERAL_UNLOCK(bus->dhd, flags);
  5076. #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
  5077. dhdpcie_bus_suspend(bus, TRUE, TRUE);
  5078. #else
  5079. dhdpcie_bus_suspend(bus, TRUE);
  5080. #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
  5081. DHD_GENERAL_LOCK(bus->dhd, flags);
  5082. DHD_BUS_BUSY_CLEAR_SUSPEND_IN_PROGRESS(bus->dhd);
  5083. dhd_os_busbusy_wake(bus->dhd);
  5084. DHD_GENERAL_UNLOCK(bus->dhd, flags);
  5085. } else { /* Resume */
  5086. unsigned long flags;
  5087. DHD_GENERAL_LOCK(bus->dhd, flags);
  5088. DHD_BUS_BUSY_SET_RESUME_IN_PROGRESS(bus->dhd);
  5089. DHD_GENERAL_UNLOCK(bus->dhd, flags);
  5090. dhdpcie_bus_suspend(bus, FALSE);
  5091. DHD_GENERAL_LOCK(bus->dhd, flags);
  5092. DHD_BUS_BUSY_CLEAR_RESUME_IN_PROGRESS(bus->dhd);
  5093. dhd_os_busbusy_wake(bus->dhd);
  5094. DHD_GENERAL_UNLOCK(bus->dhd, flags);
  5095. }
  5096. break;
  5097. case IOV_GVAL(IOV_MEMSIZE):
  5098. int_val = (int32)bus->ramsize;
  5099. bcopy(&int_val, arg, val_size);
  5100. break;
  5101. #ifdef DHD_BUS_MEM_ACCESS
  5102. case IOV_SVAL(IOV_MEMBYTES):
  5103. case IOV_GVAL(IOV_MEMBYTES):
  5104. {
  5105. uint32 address; /* absolute backplane address */
  5106. uint size, dsize;
  5107. uint8 *data;
  5108. bool set = (actionid == IOV_SVAL(IOV_MEMBYTES));
  5109. ASSERT(plen >= 2*sizeof(int));
  5110. address = (uint32)int_val;
  5111. bcopy((char *)params + sizeof(int_val), &int_val, sizeof(int_val));
  5112. size = (uint)int_val;
  5113. /* Do some validation */
  5114. dsize = set ? plen - (2 * sizeof(int)) : len;
  5115. if (dsize < size) {
  5116. DHD_ERROR(("%s: error on %s membytes, addr 0x%08x size %d dsize %d\n",
  5117. __FUNCTION__, (set ? "set" : "get"), address, size, dsize));
  5118. bcmerror = BCME_BADARG;
  5119. break;
  5120. }
  5121. DHD_INFO(("%s: Request to %s %d bytes at address 0x%08x\n dsize %d ", __FUNCTION__,
  5122. (set ? "write" : "read"), size, address, dsize));
  5123. /* check if CR4 */
  5124. if (si_setcore(bus->sih, ARMCR4_CORE_ID, 0) ||
  5125. si_setcore(bus->sih, SYSMEM_CORE_ID, 0)) {
  5126. /* if address is 0, store the reset instruction to be written in 0 */
  5127. if (set && address == bus->dongle_ram_base) {
  5128. bus->resetinstr = *(((uint32*)params) + 2);
  5129. }
  5130. } else {
  5131. /* If we know about SOCRAM, check for a fit */
  5132. if ((bus->orig_ramsize) &&
  5133. ((address > bus->orig_ramsize) || (address + size > bus->orig_ramsize)))
  5134. {
  5135. uint8 enable, protect, remap;
  5136. si_socdevram(bus->sih, FALSE, &enable, &protect, &remap);
  5137. if (!enable || protect) {
  5138. DHD_ERROR(("%s: ramsize 0x%08x doesn't have %d bytes at 0x%08x\n",
  5139. __FUNCTION__, bus->orig_ramsize, size, address));
  5140. DHD_ERROR(("%s: socram enable %d, protect %d\n",
  5141. __FUNCTION__, enable, protect));
  5142. bcmerror = BCME_BADARG;
  5143. break;
  5144. }
  5145. if (!REMAP_ENAB(bus) && (address >= SOCDEVRAM_ARM_ADDR)) {
  5146. uint32 devramsize = si_socdevram_size(bus->sih);
  5147. if ((address < SOCDEVRAM_ARM_ADDR) ||
  5148. (address + size > (SOCDEVRAM_ARM_ADDR + devramsize))) {
  5149. DHD_ERROR(("%s: bad address 0x%08x, size 0x%08x\n",
  5150. __FUNCTION__, address, size));
  5151. DHD_ERROR(("%s: socram range 0x%08x,size 0x%08x\n",
  5152. __FUNCTION__, SOCDEVRAM_ARM_ADDR, devramsize));
  5153. bcmerror = BCME_BADARG;
  5154. break;
  5155. }
  5156. /* move it such that address is real now */
  5157. address -= SOCDEVRAM_ARM_ADDR;
  5158. address += SOCDEVRAM_BP_ADDR;
  5159. DHD_INFO(("%s: Request to %s %d bytes @ Mapped address 0x%08x\n",
  5160. __FUNCTION__, (set ? "write" : "read"), size, address));
  5161. } else if (REMAP_ENAB(bus) && REMAP_ISADDR(bus, address) && remap) {
  5162. /* Can not access remap region while devram remap bit is set
  5163. * ROM content would be returned in this case
  5164. */
  5165. DHD_ERROR(("%s: Need to disable remap for address 0x%08x\n",
  5166. __FUNCTION__, address));
  5167. bcmerror = BCME_ERROR;
  5168. break;
  5169. }
  5170. }
  5171. }
  5172. /* Generate the actual data pointer */
  5173. data = set ? (uint8*)params + 2 * sizeof(int): (uint8*)arg;
  5174. /* Call to do the transfer */
  5175. bcmerror = dhdpcie_bus_membytes(bus, set, address, data, size);
  5176. break;
  5177. }
  5178. #endif /* DHD_BUS_MEM_ACCESS */
  5179. /* Debug related. Dumps core registers or one of the dongle memory */
  5180. case IOV_GVAL(IOV_DUMP_DONGLE):
  5181. {
  5182. dump_dongle_in_t ddi = *(dump_dongle_in_t*)params;
  5183. dump_dongle_out_t *ddo = (dump_dongle_out_t*)arg;
  5184. uint32 *p = ddo->val;
  5185. const uint max_offset = 4096 - 1; /* one core contains max 4096/4 registers */
  5186. if (plen < sizeof(ddi) || len < sizeof(ddo)) {
  5187. bcmerror = BCME_BADARG;
  5188. break;
  5189. }
  5190. switch (ddi.type) {
  5191. case DUMP_DONGLE_COREREG:
  5192. ddo->n_bytes = 0;
  5193. if (si_setcoreidx(bus->sih, ddi.index) == NULL) {
  5194. break; // beyond last core: core enumeration ended
  5195. }
  5196. ddo->address = si_addrspace(bus->sih, CORE_SLAVE_PORT_0, CORE_BASE_ADDR_0);
  5197. ddo->address += ddi.offset; // BP address at which this dump starts
  5198. ddo->id = si_coreid(bus->sih);
  5199. ddo->rev = si_corerev(bus->sih);
  5200. while (ddi.offset < max_offset &&
  5201. sizeof(dump_dongle_out_t) + ddo->n_bytes < (uint)len) {
  5202. *p++ = si_corereg(bus->sih, ddi.index, ddi.offset, 0, 0);
  5203. ddi.offset += sizeof(uint32);
  5204. ddo->n_bytes += sizeof(uint32);
  5205. }
  5206. break;
  5207. default:
  5208. // TODO: implement d11 SHM/TPL dumping
  5209. bcmerror = BCME_BADARG;
  5210. break;
  5211. }
  5212. break;
  5213. }
  5214. /* Debug related. Returns a string with dongle capabilities */
  5215. case IOV_GVAL(IOV_DNGL_CAPS):
  5216. {
  5217. strncpy(arg, bus->dhd->fw_capabilities,
  5218. MIN(strlen(bus->dhd->fw_capabilities), (size_t)len));
  5219. ((char*)arg)[len - 1] = '\0';
  5220. break;
  5221. }
  5222. #if defined(DEBUGGER) || defined(DHD_DSCOPE)
  5223. case IOV_SVAL(IOV_GDB_SERVER):
  5224. /* debugger_*() functions may sleep, so cannot hold spinlock */
  5225. DHD_PERIM_UNLOCK(bus->dhd);
  5226. if (int_val > 0) {
  5227. debugger_init((void *) bus, &bus_ops, int_val, SI_ENUM_BASE(bus->sih));
  5228. } else {
  5229. debugger_close();
  5230. }
  5231. DHD_PERIM_LOCK(bus->dhd);
  5232. break;
  5233. #endif /* DEBUGGER || DHD_DSCOPE */
  5234. #ifdef BCM_BUZZZ
  5235. /* Dump dongle side buzzz trace to console */
  5236. case IOV_GVAL(IOV_BUZZZ_DUMP):
  5237. bcmerror = dhd_buzzz_dump_dngl(bus);
  5238. break;
  5239. #endif /* BCM_BUZZZ */
  5240. case IOV_SVAL(IOV_SET_DOWNLOAD_STATE):
  5241. bcmerror = dhdpcie_bus_download_state(bus, bool_val);
  5242. break;
  5243. case IOV_GVAL(IOV_RAMSIZE):
  5244. int_val = (int32)bus->ramsize;
  5245. bcopy(&int_val, arg, val_size);
  5246. break;
  5247. case IOV_SVAL(IOV_RAMSIZE):
  5248. bus->ramsize = int_val;
  5249. bus->orig_ramsize = int_val;
  5250. break;
  5251. case IOV_GVAL(IOV_RAMSTART):
  5252. int_val = (int32)bus->dongle_ram_base;
  5253. bcopy(&int_val, arg, val_size);
  5254. break;
  5255. case IOV_GVAL(IOV_CC_NVMSHADOW):
  5256. {
  5257. struct bcmstrbuf dump_b;
  5258. bcm_binit(&dump_b, arg, len);
  5259. bcmerror = dhdpcie_cc_nvmshadow(bus, &dump_b);
  5260. break;
  5261. }
  5262. case IOV_GVAL(IOV_SLEEP_ALLOWED):
  5263. bool_val = bus->sleep_allowed;
  5264. bcopy(&bool_val, arg, val_size);
  5265. break;
  5266. case IOV_SVAL(IOV_SLEEP_ALLOWED):
  5267. bus->sleep_allowed = bool_val;
  5268. break;
  5269. case IOV_GVAL(IOV_DONGLEISOLATION):
  5270. int_val = bus->dhd->dongle_isolation;
  5271. bcopy(&int_val, arg, val_size);
  5272. break;
  5273. case IOV_SVAL(IOV_DONGLEISOLATION):
  5274. bus->dhd->dongle_isolation = bool_val;
  5275. break;
  5276. case IOV_GVAL(IOV_LTRSLEEPON_UNLOOAD):
  5277. int_val = bus->ltrsleep_on_unload;
  5278. bcopy(&int_val, arg, val_size);
  5279. break;
  5280. case IOV_SVAL(IOV_LTRSLEEPON_UNLOOAD):
  5281. bus->ltrsleep_on_unload = bool_val;
  5282. break;
  5283. case IOV_GVAL(IOV_DUMP_RINGUPD_BLOCK):
  5284. {
  5285. struct bcmstrbuf dump_b;
  5286. bcm_binit(&dump_b, arg, len);
  5287. bcmerror = dhd_prot_ringupd_dump(bus->dhd, &dump_b);
  5288. break;
  5289. }
  5290. case IOV_GVAL(IOV_DMA_RINGINDICES):
  5291. {
  5292. int_val = dhdpcie_get_dma_ring_indices(bus->dhd);
  5293. bcopy(&int_val, arg, sizeof(int_val));
  5294. break;
  5295. }
  5296. case IOV_SVAL(IOV_DMA_RINGINDICES):
  5297. bcmerror = dhdpcie_set_dma_ring_indices(bus->dhd, int_val);
  5298. break;
  5299. case IOV_GVAL(IOV_METADATA_DBG):
  5300. int_val = dhd_prot_metadata_dbg_get(bus->dhd);
  5301. bcopy(&int_val, arg, val_size);
  5302. break;
  5303. case IOV_SVAL(IOV_METADATA_DBG):
  5304. dhd_prot_metadata_dbg_set(bus->dhd, (int_val != 0));
  5305. break;
  5306. case IOV_GVAL(IOV_RX_METADATALEN):
  5307. int_val = dhd_prot_metadatalen_get(bus->dhd, TRUE);
  5308. bcopy(&int_val, arg, val_size);
  5309. break;
  5310. case IOV_SVAL(IOV_RX_METADATALEN):
  5311. if (int_val > 64) {
  5312. bcmerror = BCME_BUFTOOLONG;
  5313. break;
  5314. }
  5315. dhd_prot_metadatalen_set(bus->dhd, int_val, TRUE);
  5316. break;
  5317. case IOV_SVAL(IOV_TXP_THRESHOLD):
  5318. dhd_prot_txp_threshold(bus->dhd, TRUE, int_val);
  5319. break;
  5320. case IOV_GVAL(IOV_TXP_THRESHOLD):
  5321. int_val = dhd_prot_txp_threshold(bus->dhd, FALSE, int_val);
  5322. bcopy(&int_val, arg, val_size);
  5323. break;
  5324. case IOV_SVAL(IOV_DB1_FOR_MB):
  5325. if (int_val)
  5326. bus->db1_for_mb = TRUE;
  5327. else
  5328. bus->db1_for_mb = FALSE;
  5329. break;
  5330. case IOV_GVAL(IOV_DB1_FOR_MB):
  5331. if (bus->db1_for_mb)
  5332. int_val = 1;
  5333. else
  5334. int_val = 0;
  5335. bcopy(&int_val, arg, val_size);
  5336. break;
  5337. case IOV_GVAL(IOV_TX_METADATALEN):
  5338. int_val = dhd_prot_metadatalen_get(bus->dhd, FALSE);
  5339. bcopy(&int_val, arg, val_size);
  5340. break;
  5341. case IOV_SVAL(IOV_TX_METADATALEN):
  5342. if (int_val > 64) {
  5343. bcmerror = BCME_BUFTOOLONG;
  5344. break;
  5345. }
  5346. dhd_prot_metadatalen_set(bus->dhd, int_val, FALSE);
  5347. break;
  5348. case IOV_SVAL(IOV_DEVRESET):
  5349. switch (int_val) {
  5350. case DHD_BUS_DEVRESET_ON:
  5351. bcmerror = dhd_bus_devreset(bus->dhd, (uint8)int_val);
  5352. break;
  5353. case DHD_BUS_DEVRESET_OFF:
  5354. bcmerror = dhd_bus_devreset(bus->dhd, (uint8)int_val);
  5355. break;
  5356. case DHD_BUS_DEVRESET_FLR:
  5357. bcmerror = dhd_bus_perform_flr(bus, bus->flr_force_fail);
  5358. break;
  5359. case DHD_BUS_DEVRESET_FLR_FORCE_FAIL:
  5360. bus->flr_force_fail = TRUE;
  5361. break;
  5362. default:
  5363. DHD_ERROR(("%s: invalid argument for devreset\n", __FUNCTION__));
  5364. break;
  5365. }
  5366. break;
  5367. case IOV_SVAL(IOV_FORCE_FW_TRAP):
  5368. if (bus->dhd->busstate == DHD_BUS_DATA)
  5369. dhdpcie_fw_trap(bus);
  5370. else {
  5371. DHD_ERROR(("%s: Bus is NOT up\n", __FUNCTION__));
  5372. bcmerror = BCME_NOTUP;
  5373. }
  5374. break;
  5375. case IOV_GVAL(IOV_FLOW_PRIO_MAP):
  5376. int_val = bus->dhd->flow_prio_map_type;
  5377. bcopy(&int_val, arg, val_size);
  5378. break;
  5379. case IOV_SVAL(IOV_FLOW_PRIO_MAP):
  5380. int_val = (int32)dhd_update_flow_prio_map(bus->dhd, (uint8)int_val);
  5381. bcopy(&int_val, arg, val_size);
  5382. break;
  5383. #ifdef DHD_PCIE_RUNTIMEPM
  5384. case IOV_GVAL(IOV_IDLETIME):
  5385. if (!(bus->dhd->op_mode & DHD_FLAG_MFG_MODE)) {
  5386. int_val = bus->idletime;
  5387. } else {
  5388. int_val = 0;
  5389. }
  5390. bcopy(&int_val, arg, val_size);
  5391. break;
  5392. case IOV_SVAL(IOV_IDLETIME):
  5393. if (int_val < 0) {
  5394. bcmerror = BCME_BADARG;
  5395. } else {
  5396. bus->idletime = int_val;
  5397. if (bus->idletime) {
  5398. DHD_ENABLE_RUNTIME_PM(bus->dhd);
  5399. } else {
  5400. DHD_DISABLE_RUNTIME_PM(bus->dhd);
  5401. }
  5402. }
  5403. break;
  5404. #endif /* DHD_PCIE_RUNTIMEPM */
  5405. case IOV_GVAL(IOV_TXBOUND):
  5406. int_val = (int32)dhd_txbound;
  5407. bcopy(&int_val, arg, val_size);
  5408. break;
  5409. case IOV_SVAL(IOV_TXBOUND):
  5410. dhd_txbound = (uint)int_val;
  5411. break;
  5412. case IOV_SVAL(IOV_H2D_MAILBOXDATA):
  5413. dhdpcie_send_mb_data(bus, (uint)int_val);
  5414. break;
  5415. case IOV_SVAL(IOV_INFORINGS):
  5416. dhd_prot_init_info_rings(bus->dhd);
  5417. break;
  5418. case IOV_SVAL(IOV_H2D_PHASE):
  5419. if (bus->dhd->busstate != DHD_BUS_DOWN) {
  5420. DHD_ERROR(("%s: Can change only when bus down (before FW download)\n",
  5421. __FUNCTION__));
  5422. bcmerror = BCME_NOTDOWN;
  5423. break;
  5424. }
  5425. if (int_val)
  5426. bus->dhd->h2d_phase_supported = TRUE;
  5427. else
  5428. bus->dhd->h2d_phase_supported = FALSE;
  5429. break;
  5430. case IOV_GVAL(IOV_H2D_PHASE):
  5431. int_val = (int32) bus->dhd->h2d_phase_supported;
  5432. bcopy(&int_val, arg, val_size);
  5433. break;
  5434. case IOV_SVAL(IOV_H2D_ENABLE_TRAP_BADPHASE):
  5435. if (bus->dhd->busstate != DHD_BUS_DOWN) {
  5436. DHD_ERROR(("%s: Can change only when bus down (before FW download)\n",
  5437. __FUNCTION__));
  5438. bcmerror = BCME_NOTDOWN;
  5439. break;
  5440. }
  5441. if (int_val)
  5442. bus->dhd->force_dongletrap_on_bad_h2d_phase = TRUE;
  5443. else
  5444. bus->dhd->force_dongletrap_on_bad_h2d_phase = FALSE;
  5445. break;
  5446. case IOV_GVAL(IOV_H2D_ENABLE_TRAP_BADPHASE):
  5447. int_val = (int32) bus->dhd->force_dongletrap_on_bad_h2d_phase;
  5448. bcopy(&int_val, arg, val_size);
  5449. break;
  5450. case IOV_SVAL(IOV_H2D_TXPOST_MAX_ITEM):
  5451. if (bus->dhd->busstate != DHD_BUS_DOWN) {
  5452. DHD_ERROR(("%s: Can change only when bus down (before FW download)\n",
  5453. __FUNCTION__));
  5454. bcmerror = BCME_NOTDOWN;
  5455. break;
  5456. }
  5457. dhd_prot_set_h2d_max_txpost(bus->dhd, (uint16)int_val);
  5458. break;
  5459. case IOV_GVAL(IOV_H2D_TXPOST_MAX_ITEM):
  5460. int_val = dhd_prot_get_h2d_max_txpost(bus->dhd);
  5461. bcopy(&int_val, arg, val_size);
  5462. break;
  5463. case IOV_GVAL(IOV_RXBOUND):
  5464. int_val = (int32)dhd_rxbound;
  5465. bcopy(&int_val, arg, val_size);
  5466. break;
  5467. case IOV_SVAL(IOV_RXBOUND):
  5468. dhd_rxbound = (uint)int_val;
  5469. break;
  5470. case IOV_GVAL(IOV_TRAPDATA):
  5471. {
  5472. struct bcmstrbuf dump_b;
  5473. bcm_binit(&dump_b, arg, len);
  5474. bcmerror = dhd_prot_dump_extended_trap(bus->dhd, &dump_b, FALSE);
  5475. break;
  5476. }
  5477. case IOV_GVAL(IOV_TRAPDATA_RAW):
  5478. {
  5479. struct bcmstrbuf dump_b;
  5480. bcm_binit(&dump_b, arg, len);
  5481. bcmerror = dhd_prot_dump_extended_trap(bus->dhd, &dump_b, TRUE);
  5482. break;
  5483. }
  5484. #ifdef DHD_PCIE_REG_ACCESS
  5485. case IOV_GVAL(IOV_PCIEASPM): {
  5486. uint8 clkreq = 0;
  5487. uint32 aspm = 0;
  5488. /* this command is to hide the details, but match the lcreg
  5489. #define PCIE_CLKREQ_ENAB 0x100
  5490. #define PCIE_ASPM_L1_ENAB 2
  5491. #define PCIE_ASPM_L0s_ENAB 1
  5492. */
  5493. clkreq = dhdpcie_clkreq(bus->dhd->osh, 0, 0);
  5494. aspm = dhdpcie_lcreg(bus->dhd->osh, 0, 0);
  5495. int_val = ((clkreq & 0x1) << 8) | (aspm & PCIE_ASPM_ENAB);
  5496. bcopy(&int_val, arg, val_size);
  5497. break;
  5498. }
  5499. case IOV_SVAL(IOV_PCIEASPM): {
  5500. uint32 tmp;
  5501. tmp = dhdpcie_lcreg(bus->dhd->osh, 0, 0);
  5502. dhdpcie_lcreg(bus->dhd->osh, PCIE_ASPM_ENAB,
  5503. (tmp & ~PCIE_ASPM_ENAB) | (int_val & PCIE_ASPM_ENAB));
  5504. dhdpcie_clkreq(bus->dhd->osh, 1, ((int_val & 0x100) >> 8));
  5505. break;
  5506. }
  5507. #endif /* DHD_PCIE_REG_ACCESS */
  5508. case IOV_SVAL(IOV_HANGREPORT):
  5509. bus->dhd->hang_report = bool_val;
  5510. DHD_ERROR(("%s: Set hang_report as %d\n",
  5511. __FUNCTION__, bus->dhd->hang_report));
  5512. break;
  5513. case IOV_GVAL(IOV_HANGREPORT):
  5514. int_val = (int32)bus->dhd->hang_report;
  5515. bcopy(&int_val, arg, val_size);
  5516. break;
  5517. case IOV_SVAL(IOV_CTO_PREVENTION):
  5518. bcmerror = dhdpcie_cto_init(bus, bool_val);
  5519. break;
  5520. case IOV_GVAL(IOV_CTO_PREVENTION):
  5521. if (bus->sih->buscorerev < 19) {
  5522. bcmerror = BCME_UNSUPPORTED;
  5523. break;
  5524. }
  5525. int_val = (int32)bus->cto_enable;
  5526. bcopy(&int_val, arg, val_size);
  5527. break;
  5528. case IOV_SVAL(IOV_CTO_THRESHOLD):
  5529. {
  5530. if (bus->sih->buscorerev < 19) {
  5531. bcmerror = BCME_UNSUPPORTED;
  5532. break;
  5533. }
  5534. bus->cto_threshold = (uint32)int_val;
  5535. }
  5536. break;
  5537. case IOV_GVAL(IOV_CTO_THRESHOLD):
  5538. if (bus->sih->buscorerev < 19) {
  5539. bcmerror = BCME_UNSUPPORTED;
  5540. break;
  5541. }
  5542. if (bus->cto_threshold)
  5543. int_val = (int32)bus->cto_threshold;
  5544. else
  5545. int_val = (int32)PCIE_CTO_TO_THRESH_DEFAULT;
  5546. bcopy(&int_val, arg, val_size);
  5547. break;
  5548. case IOV_SVAL(IOV_PCIE_WD_RESET):
  5549. if (bool_val) {
  5550. /* Legacy chipcommon watchdog reset */
  5551. dhdpcie_cc_watchdog_reset(bus);
  5552. }
  5553. break;
  5554. case IOV_GVAL(IOV_HWA_ENAB_BMAP):
  5555. int_val = bus->hwa_enab_bmap;
  5556. bcopy(&int_val, arg, val_size);
  5557. break;
  5558. case IOV_SVAL(IOV_HWA_ENAB_BMAP):
  5559. bus->hwa_enab_bmap = (uint8)int_val;
  5560. break;
  5561. case IOV_GVAL(IOV_IDMA_ENABLE):
  5562. int_val = bus->idma_enabled;
  5563. bcopy(&int_val, arg, val_size);
  5564. break;
  5565. case IOV_SVAL(IOV_IDMA_ENABLE):
  5566. bus->idma_enabled = (bool)int_val;
  5567. break;
  5568. case IOV_GVAL(IOV_IFRM_ENABLE):
  5569. int_val = bus->ifrm_enabled;
  5570. bcopy(&int_val, arg, val_size);
  5571. break;
  5572. case IOV_SVAL(IOV_IFRM_ENABLE):
  5573. bus->ifrm_enabled = (bool)int_val;
  5574. break;
  5575. case IOV_GVAL(IOV_CLEAR_RING):
  5576. bcopy(&int_val, arg, val_size);
  5577. dhd_flow_rings_flush(bus->dhd, 0);
  5578. break;
  5579. case IOV_GVAL(IOV_DAR_ENABLE):
  5580. int_val = bus->dar_enabled;
  5581. bcopy(&int_val, arg, val_size);
  5582. break;
  5583. case IOV_SVAL(IOV_DAR_ENABLE):
  5584. bus->dar_enabled = (bool)int_val;
  5585. break;
  5586. case IOV_GVAL(IOV_HSCBSIZE):
  5587. bcmerror = dhd_get_hscb_info(bus->dhd, NULL, (uint32 *)arg);
  5588. break;
  5589. #ifdef DHD_BUS_MEM_ACCESS
  5590. case IOV_GVAL(IOV_HSCBBYTES):
  5591. bcmerror = dhd_get_hscb_buff(bus->dhd, int_val, int_val2, (void*)arg);
  5592. break;
  5593. #endif // endif
  5594. #ifdef DHD_HP2P
  5595. case IOV_SVAL(IOV_HP2P_ENABLE):
  5596. dhd_prot_hp2p_enable(bus->dhd, TRUE, int_val);
  5597. break;
  5598. case IOV_GVAL(IOV_HP2P_ENABLE):
  5599. int_val = dhd_prot_hp2p_enable(bus->dhd, FALSE, int_val);
  5600. bcopy(&int_val, arg, val_size);
  5601. break;
  5602. case IOV_SVAL(IOV_HP2P_PKT_THRESHOLD):
  5603. dhd_prot_pkt_threshold(bus->dhd, TRUE, int_val);
  5604. break;
  5605. case IOV_GVAL(IOV_HP2P_PKT_THRESHOLD):
  5606. int_val = dhd_prot_pkt_threshold(bus->dhd, FALSE, int_val);
  5607. bcopy(&int_val, arg, val_size);
  5608. break;
  5609. case IOV_SVAL(IOV_HP2P_TIME_THRESHOLD):
  5610. dhd_prot_time_threshold(bus->dhd, TRUE, int_val);
  5611. break;
  5612. case IOV_GVAL(IOV_HP2P_TIME_THRESHOLD):
  5613. int_val = dhd_prot_time_threshold(bus->dhd, FALSE, int_val);
  5614. bcopy(&int_val, arg, val_size);
  5615. break;
  5616. case IOV_SVAL(IOV_HP2P_PKT_EXPIRY):
  5617. dhd_prot_pkt_expiry(bus->dhd, TRUE, int_val);
  5618. break;
  5619. case IOV_GVAL(IOV_HP2P_PKT_EXPIRY):
  5620. int_val = dhd_prot_pkt_expiry(bus->dhd, FALSE, int_val);
  5621. bcopy(&int_val, arg, val_size);
  5622. break;
  5623. case IOV_SVAL(IOV_HP2P_TXCPL_MAXITEMS):
  5624. if (bus->dhd->busstate != DHD_BUS_DOWN) {
  5625. return BCME_NOTDOWN;
  5626. }
  5627. dhd_bus_set_hp2p_ring_max_size(bus, TRUE, int_val);
  5628. break;
  5629. case IOV_GVAL(IOV_HP2P_TXCPL_MAXITEMS):
  5630. int_val = dhd_bus_get_hp2p_ring_max_size(bus, TRUE);
  5631. bcopy(&int_val, arg, val_size);
  5632. break;
  5633. case IOV_SVAL(IOV_HP2P_RXCPL_MAXITEMS):
  5634. if (bus->dhd->busstate != DHD_BUS_DOWN) {
  5635. return BCME_NOTDOWN;
  5636. }
  5637. dhd_bus_set_hp2p_ring_max_size(bus, FALSE, int_val);
  5638. break;
  5639. case IOV_GVAL(IOV_HP2P_RXCPL_MAXITEMS):
  5640. int_val = dhd_bus_get_hp2p_ring_max_size(bus, FALSE);
  5641. bcopy(&int_val, arg, val_size);
  5642. break;
  5643. #endif /* DHD_HP2P */
  5644. case IOV_SVAL(IOV_EXTDTXS_IN_TXCPL):
  5645. if (bus->dhd->busstate != DHD_BUS_DOWN) {
  5646. return BCME_NOTDOWN;
  5647. }
  5648. if (int_val)
  5649. bus->dhd->extdtxs_in_txcpl = TRUE;
  5650. else
  5651. bus->dhd->extdtxs_in_txcpl = FALSE;
  5652. break;
  5653. case IOV_GVAL(IOV_EXTDTXS_IN_TXCPL):
  5654. int_val = bus->dhd->extdtxs_in_txcpl;
  5655. bcopy(&int_val, arg, val_size);
  5656. break;
  5657. case IOV_SVAL(IOV_HOSTRDY_AFTER_INIT):
  5658. if (bus->dhd->busstate != DHD_BUS_DOWN) {
  5659. return BCME_NOTDOWN;
  5660. }
  5661. if (int_val)
  5662. bus->dhd->hostrdy_after_init = TRUE;
  5663. else
  5664. bus->dhd->hostrdy_after_init = FALSE;
  5665. break;
  5666. case IOV_GVAL(IOV_HOSTRDY_AFTER_INIT):
  5667. int_val = bus->dhd->hostrdy_after_init;
  5668. bcopy(&int_val, arg, val_size);
  5669. break;
  5670. default:
  5671. bcmerror = BCME_UNSUPPORTED;
  5672. break;
  5673. }
  5674. exit:
  5675. return bcmerror;
  5676. } /* dhdpcie_bus_doiovar */
  5677. /** Transfers bytes from host to dongle using pio mode */
  5678. static int
  5679. dhdpcie_bus_lpback_req(struct dhd_bus *bus, uint32 len)
  5680. {
  5681. if (bus->dhd == NULL) {
  5682. DHD_ERROR(("bus not inited\n"));
  5683. return 0;
  5684. }
  5685. if (bus->dhd->prot == NULL) {
  5686. DHD_ERROR(("prot is not inited\n"));
  5687. return 0;
  5688. }
  5689. if (bus->dhd->busstate != DHD_BUS_DATA) {
  5690. DHD_ERROR(("not in a readystate to LPBK is not inited\n"));
  5691. return 0;
  5692. }
  5693. dhdmsgbuf_lpbk_req(bus->dhd, len);
  5694. return 0;
  5695. }
  5696. void
  5697. dhd_bus_dump_dar_registers(struct dhd_bus *bus)
  5698. {
  5699. uint32 dar_clk_ctrl_val, dar_pwr_ctrl_val, dar_intstat_val,
  5700. dar_errlog_val, dar_erraddr_val, dar_pcie_mbint_val;
  5701. uint32 dar_clk_ctrl_reg, dar_pwr_ctrl_reg, dar_intstat_reg,
  5702. dar_errlog_reg, dar_erraddr_reg, dar_pcie_mbint_reg;
  5703. if (bus->is_linkdown && !bus->cto_triggered) {
  5704. DHD_ERROR(("%s: link is down\n", __FUNCTION__));
  5705. return;
  5706. }
  5707. dar_clk_ctrl_reg = (uint32)DAR_CLK_CTRL(bus->sih->buscorerev);
  5708. dar_pwr_ctrl_reg = (uint32)DAR_PCIE_PWR_CTRL(bus->sih->buscorerev);
  5709. dar_intstat_reg = (uint32)DAR_INTSTAT(bus->sih->buscorerev);
  5710. dar_errlog_reg = (uint32)DAR_ERRLOG(bus->sih->buscorerev);
  5711. dar_erraddr_reg = (uint32)DAR_ERRADDR(bus->sih->buscorerev);
  5712. dar_pcie_mbint_reg = (uint32)DAR_PCIMailBoxInt(bus->sih->buscorerev);
  5713. if (bus->sih->buscorerev < 24) {
  5714. DHD_ERROR(("%s: DAR not supported for corerev(%d) < 24\n",
  5715. __FUNCTION__, bus->sih->buscorerev));
  5716. return;
  5717. }
  5718. dar_clk_ctrl_val = si_corereg(bus->sih, bus->sih->buscoreidx, dar_clk_ctrl_reg, 0, 0);
  5719. dar_pwr_ctrl_val = si_corereg(bus->sih, bus->sih->buscoreidx, dar_pwr_ctrl_reg, 0, 0);
  5720. dar_intstat_val = si_corereg(bus->sih, bus->sih->buscoreidx, dar_intstat_reg, 0, 0);
  5721. dar_errlog_val = si_corereg(bus->sih, bus->sih->buscoreidx, dar_errlog_reg, 0, 0);
  5722. dar_erraddr_val = si_corereg(bus->sih, bus->sih->buscoreidx, dar_erraddr_reg, 0, 0);
  5723. dar_pcie_mbint_val = si_corereg(bus->sih, bus->sih->buscoreidx, dar_pcie_mbint_reg, 0, 0);
  5724. DHD_ERROR(("%s: dar_clk_ctrl(0x%x:0x%x) dar_pwr_ctrl(0x%x:0x%x) dar_intstat(0x%x:0x%x)\n",
  5725. __FUNCTION__, dar_clk_ctrl_reg, dar_clk_ctrl_val,
  5726. dar_pwr_ctrl_reg, dar_pwr_ctrl_val, dar_intstat_reg, dar_intstat_val));
  5727. DHD_ERROR(("%s: dar_errlog(0x%x:0x%x) dar_erraddr(0x%x:0x%x) dar_pcie_mbint(0x%x:0x%x)\n",
  5728. __FUNCTION__, dar_errlog_reg, dar_errlog_val,
  5729. dar_erraddr_reg, dar_erraddr_val, dar_pcie_mbint_reg, dar_pcie_mbint_val));
  5730. }
  5731. /* Ring DoorBell1 to indicate Hostready i.e. D3 Exit */
  5732. void
  5733. dhd_bus_hostready(struct dhd_bus *bus)
  5734. {
  5735. if (!bus->dhd->d2h_hostrdy_supported) {
  5736. return;
  5737. }
  5738. if (bus->is_linkdown) {
  5739. DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__));
  5740. return;
  5741. }
  5742. DHD_ERROR(("%s : Read PCICMD Reg: 0x%08X\n", __FUNCTION__,
  5743. dhd_pcie_config_read(bus->osh, PCI_CFG_CMD, sizeof(uint32))));
  5744. if (DAR_PWRREQ(bus)) {
  5745. dhd_bus_pcie_pwr_req(bus);
  5746. }
  5747. dhd_bus_dump_dar_registers(bus);
  5748. si_corereg(bus->sih, bus->sih->buscoreidx, dhd_bus_db1_addr_get(bus), ~0, 0x12345678);
  5749. bus->hostready_count ++;
  5750. DHD_ERROR(("%s: Ring Hostready:%d\n", __FUNCTION__, bus->hostready_count));
  5751. }
  5752. /* Clear INTSTATUS */
  5753. void
  5754. dhdpcie_bus_clear_intstatus(struct dhd_bus *bus)
  5755. {
  5756. uint32 intstatus = 0;
  5757. if ((bus->sih->buscorerev == 6) || (bus->sih->buscorerev == 4) ||
  5758. (bus->sih->buscorerev == 2)) {
  5759. intstatus = dhdpcie_bus_cfg_read_dword(bus, PCIIntstatus, 4);
  5760. dhdpcie_bus_cfg_write_dword(bus, PCIIntstatus, 4, intstatus);
  5761. } else {
  5762. /* this is a PCIE core register..not a config register... */
  5763. intstatus = si_corereg(bus->sih, bus->sih->buscoreidx, bus->pcie_mailbox_int, 0, 0);
  5764. si_corereg(bus->sih, bus->sih->buscoreidx, bus->pcie_mailbox_int, bus->def_intmask,
  5765. intstatus);
  5766. }
  5767. }
  5768. int
  5769. #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
  5770. dhdpcie_bus_suspend(struct dhd_bus *bus, bool state, bool byint)
  5771. #else
  5772. dhdpcie_bus_suspend(struct dhd_bus *bus, bool state)
  5773. #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
  5774. {
  5775. int timeleft;
  5776. int rc = 0;
  5777. unsigned long flags, flags_bus;
  5778. #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
  5779. int d3_read_retry = 0;
  5780. uint32 d2h_mb_data = 0;
  5781. uint32 zero = 0;
  5782. #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
  5783. if (bus->dhd == NULL) {
  5784. DHD_ERROR(("bus not inited\n"));
  5785. return BCME_ERROR;
  5786. }
  5787. if (bus->dhd->prot == NULL) {
  5788. DHD_ERROR(("prot is not inited\n"));
  5789. return BCME_ERROR;
  5790. }
  5791. if (dhd_query_bus_erros(bus->dhd)) {
  5792. return BCME_ERROR;
  5793. }
  5794. DHD_GENERAL_LOCK(bus->dhd, flags);
  5795. if (!(bus->dhd->busstate == DHD_BUS_DATA || bus->dhd->busstate == DHD_BUS_SUSPEND)) {
  5796. DHD_ERROR(("not in a readystate\n"));
  5797. DHD_GENERAL_UNLOCK(bus->dhd, flags);
  5798. return BCME_ERROR;
  5799. }
  5800. DHD_GENERAL_UNLOCK(bus->dhd, flags);
  5801. if (bus->dhd->dongle_reset) {
  5802. DHD_ERROR(("Dongle is in reset state.\n"));
  5803. return -EIO;
  5804. }
  5805. /* Check whether we are already in the requested state.
  5806. * state=TRUE means Suspend
  5807. * state=FALSE meanse Resume
  5808. */
  5809. if (state == TRUE && bus->dhd->busstate == DHD_BUS_SUSPEND) {
  5810. DHD_ERROR(("Bus is already in SUSPEND state.\n"));
  5811. return BCME_OK;
  5812. } else if (state == FALSE && bus->dhd->busstate == DHD_BUS_DATA) {
  5813. DHD_ERROR(("Bus is already in RESUME state.\n"));
  5814. return BCME_OK;
  5815. }
  5816. if (state) {
  5817. #ifdef OEM_ANDROID
  5818. int idle_retry = 0;
  5819. int active;
  5820. #endif /* OEM_ANDROID */
  5821. if (bus->is_linkdown) {
  5822. DHD_ERROR(("%s: PCIe link was down, state=%d\n",
  5823. __FUNCTION__, state));
  5824. return BCME_ERROR;
  5825. }
  5826. /* Suspend */
  5827. DHD_ERROR(("%s: Entering suspend state\n", __FUNCTION__));
  5828. bus->dhd->dhd_watchdog_ms_backup = dhd_watchdog_ms;
  5829. if (bus->dhd->dhd_watchdog_ms_backup) {
  5830. DHD_ERROR(("%s: Disabling wdtick before going to suspend\n",
  5831. __FUNCTION__));
  5832. dhd_os_wd_timer(bus->dhd, 0);
  5833. }
  5834. DHD_GENERAL_LOCK(bus->dhd, flags);
  5835. if (DHD_BUS_BUSY_CHECK_IN_TX(bus->dhd)) {
  5836. DHD_ERROR(("Tx Request is not ended\n"));
  5837. bus->dhd->busstate = DHD_BUS_DATA;
  5838. DHD_GENERAL_UNLOCK(bus->dhd, flags);
  5839. return -EBUSY;
  5840. }
  5841. bus->last_suspend_start_time = OSL_LOCALTIME_NS();
  5842. /* stop all interface network queue. */
  5843. dhd_bus_stop_queue(bus);
  5844. DHD_GENERAL_UNLOCK(bus->dhd, flags);
  5845. #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
  5846. if (byint) {
  5847. DHD_OS_WAKE_LOCK_WAIVE(bus->dhd);
  5848. /* Clear wait_for_d3_ack before sending D3_INFORM */
  5849. bus->wait_for_d3_ack = 0;
  5850. dhdpcie_send_mb_data(bus, H2D_HOST_D3_INFORM);
  5851. timeleft = dhd_os_d3ack_wait(bus->dhd, &bus->wait_for_d3_ack);
  5852. DHD_OS_WAKE_LOCK_RESTORE(bus->dhd);
  5853. } else {
  5854. /* Clear wait_for_d3_ack before sending D3_INFORM */
  5855. bus->wait_for_d3_ack = 0;
  5856. dhdpcie_send_mb_data(bus, H2D_HOST_D3_INFORM | H2D_HOST_ACK_NOINT);
  5857. while (!bus->wait_for_d3_ack && d3_read_retry < MAX_D3_ACK_TIMEOUT) {
  5858. dhdpcie_handle_mb_data(bus);
  5859. usleep_range(1000, 1500);
  5860. d3_read_retry++;
  5861. }
  5862. }
  5863. #else
  5864. DHD_OS_WAKE_LOCK_WAIVE(bus->dhd);
  5865. /* Clear wait_for_d3_ack before sending D3_INFORM */
  5866. bus->wait_for_d3_ack = 0;
  5867. /*
  5868. * Send H2D_HOST_D3_INFORM to dongle and mark bus->bus_low_power_state
  5869. * to DHD_BUS_D3_INFORM_SENT in dhd_prot_ring_write_complete_mbdata
  5870. * inside atomic context, so that no more DBs will be
  5871. * rung after sending D3_INFORM
  5872. */
  5873. dhdpcie_send_mb_data(bus, H2D_HOST_D3_INFORM);
  5874. /* Wait for D3 ACK for D3_ACK_RESP_TIMEOUT seconds */
  5875. timeleft = dhd_os_d3ack_wait(bus->dhd, &bus->wait_for_d3_ack);
  5876. #ifdef DHD_RECOVER_TIMEOUT
  5877. if (bus->wait_for_d3_ack == 0) {
  5878. /* If wait_for_d3_ack was not updated because D2H MB was not received */
  5879. uint32 intstatus = si_corereg(bus->sih, bus->sih->buscoreidx,
  5880. bus->pcie_mailbox_int, 0, 0);
  5881. int host_irq_disabled = dhdpcie_irq_disabled(bus);
  5882. if ((intstatus) && (intstatus != (uint32)-1) &&
  5883. (timeleft == 0) && (!dhd_query_bus_erros(bus->dhd))) {
  5884. DHD_ERROR(("%s: D3 ACK trying again intstatus=%x"
  5885. " host_irq_disabled=%d\n",
  5886. __FUNCTION__, intstatus, host_irq_disabled));
  5887. dhd_pcie_intr_count_dump(bus->dhd);
  5888. dhd_print_tasklet_status(bus->dhd);
  5889. if (bus->api.fw_rev >= PCIE_SHARED_VERSION_6 &&
  5890. !bus->use_mailbox) {
  5891. dhd_prot_process_ctrlbuf(bus->dhd);
  5892. } else {
  5893. dhdpcie_handle_mb_data(bus);
  5894. }
  5895. timeleft = dhd_os_d3ack_wait(bus->dhd, &bus->wait_for_d3_ack);
  5896. /* Clear Interrupts */
  5897. dhdpcie_bus_clear_intstatus(bus);
  5898. }
  5899. } /* bus->wait_for_d3_ack was 0 */
  5900. #endif /* DHD_RECOVER_TIMEOUT */
  5901. DHD_OS_WAKE_LOCK_RESTORE(bus->dhd);
  5902. #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
  5903. #ifdef OEM_ANDROID
  5904. /* To allow threads that got pre-empted to complete.
  5905. */
  5906. while ((active = dhd_os_check_wakelock_all(bus->dhd)) &&
  5907. (idle_retry < MAX_WKLK_IDLE_CHECK)) {
  5908. OSL_SLEEP(1);
  5909. idle_retry++;
  5910. }
  5911. #endif /* OEM_ANDROID */
  5912. if (bus->wait_for_d3_ack) {
  5913. DHD_ERROR(("%s: Got D3 Ack \n", __FUNCTION__));
  5914. /* Got D3 Ack. Suspend the bus */
  5915. #ifdef OEM_ANDROID
  5916. if (active) {
  5917. DHD_ERROR(("%s():Suspend failed because of wakelock"
  5918. "restoring Dongle to D0\n", __FUNCTION__));
  5919. if (bus->dhd->dhd_watchdog_ms_backup) {
  5920. DHD_ERROR(("%s: Enabling wdtick due to wakelock active\n",
  5921. __FUNCTION__));
  5922. dhd_os_wd_timer(bus->dhd,
  5923. bus->dhd->dhd_watchdog_ms_backup);
  5924. }
  5925. /*
  5926. * Dongle still thinks that it has to be in D3 state until
  5927. * it gets a D0 Inform, but we are backing off from suspend.
  5928. * Ensure that Dongle is brought back to D0.
  5929. *
  5930. * Bringing back Dongle from D3 Ack state to D0 state is a
  5931. * 2 step process. Dongle would want to know that D0 Inform
  5932. * would be sent as a MB interrupt to bring it out of D3 Ack
  5933. * state to D0 state. So we have to send both this message.
  5934. */
  5935. /* Clear wait_for_d3_ack to send D0_INFORM or host_ready */
  5936. bus->wait_for_d3_ack = 0;
  5937. DHD_BUS_LOCK(bus->bus_lock, flags_bus);
  5938. bus->bus_low_power_state = DHD_BUS_NO_LOW_POWER_STATE;
  5939. /* Enable back the intmask which was cleared in DPC
  5940. * after getting D3_ACK.
  5941. */
  5942. bus->resume_intr_enable_count++;
  5943. /* For Linux, Macos etc (otherthan NDIS) enable back the dongle
  5944. * interrupts using intmask and host interrupts
  5945. * which were disabled in the dhdpcie_bus_isr()->
  5946. * dhd_bus_handle_d3_ack().
  5947. */
  5948. /* Enable back interrupt using Intmask!! */
  5949. dhdpcie_bus_intr_enable(bus);
  5950. /* Enable back interrupt from Host side!! */
  5951. dhdpcie_enable_irq(bus);
  5952. DHD_BUS_UNLOCK(bus->bus_lock, flags_bus);
  5953. if (bus->use_d0_inform) {
  5954. DHD_OS_WAKE_LOCK_WAIVE(bus->dhd);
  5955. dhdpcie_send_mb_data(bus,
  5956. (H2D_HOST_D0_INFORM_IN_USE | H2D_HOST_D0_INFORM));
  5957. DHD_OS_WAKE_LOCK_RESTORE(bus->dhd);
  5958. }
  5959. /* ring doorbell 1 (hostready) */
  5960. dhd_bus_hostready(bus);
  5961. DHD_GENERAL_LOCK(bus->dhd, flags);
  5962. bus->dhd->busstate = DHD_BUS_DATA;
  5963. /* resume all interface network queue. */
  5964. dhd_bus_start_queue(bus);
  5965. DHD_GENERAL_UNLOCK(bus->dhd, flags);
  5966. rc = BCME_ERROR;
  5967. } else {
  5968. /* Actual Suspend after no wakelock */
  5969. #endif /* OEM_ANDROID */
  5970. /* At this time bus->bus_low_power_state will be
  5971. * made to DHD_BUS_D3_ACK_RECIEVED after recieving D3_ACK
  5972. * in dhd_bus_handle_d3_ack()
  5973. */
  5974. if (bus->use_d0_inform &&
  5975. (bus->api.fw_rev < PCIE_SHARED_VERSION_6)) {
  5976. DHD_OS_WAKE_LOCK_WAIVE(bus->dhd);
  5977. dhdpcie_send_mb_data(bus, (H2D_HOST_D0_INFORM_IN_USE));
  5978. DHD_OS_WAKE_LOCK_RESTORE(bus->dhd);
  5979. }
  5980. #if defined(BCMPCIE_OOB_HOST_WAKE)
  5981. if (bus->dhd->dhd_induce_error == DHD_INDUCE_DROP_OOB_IRQ) {
  5982. DHD_ERROR(("%s: Inducing DROP OOB IRQ\n", __FUNCTION__));
  5983. } else {
  5984. dhdpcie_oob_intr_set(bus, TRUE);
  5985. }
  5986. #endif /* BCMPCIE_OOB_HOST_WAKE */
  5987. DHD_GENERAL_LOCK(bus->dhd, flags);
  5988. /* The Host cannot process interrupts now so disable the same.
  5989. * No need to disable the dongle INTR using intmask, as we are
  5990. * already calling disabling INTRs from DPC context after
  5991. * getting D3_ACK in dhd_bus_handle_d3_ack.
  5992. * Code may not look symmetric between Suspend and
  5993. * Resume paths but this is done to close down the timing window
  5994. * between DPC and suspend context and bus->bus_low_power_state
  5995. * will be set to DHD_BUS_D3_ACK_RECIEVED in DPC.
  5996. */
  5997. bus->dhd->d3ackcnt_timeout = 0;
  5998. bus->dhd->busstate = DHD_BUS_SUSPEND;
  5999. DHD_GENERAL_UNLOCK(bus->dhd, flags);
  6000. dhdpcie_dump_resource(bus);
  6001. /* Handle Host Suspend */
  6002. rc = dhdpcie_pci_suspend_resume(bus, state);
  6003. if (!rc) {
  6004. bus->last_suspend_end_time = OSL_LOCALTIME_NS();
  6005. }
  6006. #ifdef OEM_ANDROID
  6007. }
  6008. #endif /* OEM_ANDROID */
  6009. } else if (timeleft == 0) { /* D3 ACK Timeout */
  6010. #ifdef DHD_FW_COREDUMP
  6011. uint32 cur_memdump_mode = bus->dhd->memdump_enabled;
  6012. #endif /* DHD_FW_COREDUMP */
  6013. /* check if the D3 ACK timeout due to scheduling issue */
  6014. bus->dhd->is_sched_error = !dhd_query_bus_erros(bus->dhd) &&
  6015. bus->isr_entry_time > bus->last_d3_inform_time &&
  6016. dhd_bus_query_dpc_sched_errors(bus->dhd);
  6017. bus->dhd->d3ack_timeout_occured = TRUE;
  6018. /* If the D3 Ack has timeout */
  6019. bus->dhd->d3ackcnt_timeout++;
  6020. DHD_ERROR(("%s: resumed on timeout for D3 ACK%s d3_inform_cnt %d\n",
  6021. __FUNCTION__, bus->dhd->is_sched_error ?
  6022. " due to scheduling problem" : "", bus->dhd->d3ackcnt_timeout));
  6023. #if defined(DHD_KERNEL_SCHED_DEBUG) && defined(DHD_FW_COREDUMP)
  6024. if (bus->dhd->is_sched_error && cur_memdump_mode == DUMP_MEMFILE_BUGON) {
  6025. /* change g_assert_type to trigger Kernel panic */
  6026. g_assert_type = 2;
  6027. /* use ASSERT() to trigger panic */
  6028. ASSERT(0);
  6029. }
  6030. #endif /* DHD_KERNEL_SCHED_DEBUG && DHD_FW_COREDUMP */
  6031. DHD_BUS_LOCK(bus->bus_lock, flags_bus);
  6032. bus->bus_low_power_state = DHD_BUS_NO_LOW_POWER_STATE;
  6033. DHD_BUS_UNLOCK(bus->bus_lock, flags_bus);
  6034. DHD_GENERAL_LOCK(bus->dhd, flags);
  6035. bus->dhd->busstate = DHD_BUS_DATA;
  6036. /* resume all interface network queue. */
  6037. dhd_bus_start_queue(bus);
  6038. DHD_GENERAL_UNLOCK(bus->dhd, flags);
  6039. if (!bus->dhd->dongle_trap_occured &&
  6040. !bus->is_linkdown &&
  6041. !bus->cto_triggered) {
  6042. uint32 intstatus = 0;
  6043. /* Check if PCIe bus status is valid */
  6044. intstatus = si_corereg(bus->sih, bus->sih->buscoreidx,
  6045. bus->pcie_mailbox_int, 0, 0);
  6046. if (intstatus == (uint32)-1) {
  6047. /* Invalidate PCIe bus status */
  6048. bus->is_linkdown = 1;
  6049. }
  6050. dhd_bus_dump_console_buffer(bus);
  6051. dhd_prot_debug_info_print(bus->dhd);
  6052. #ifdef DHD_FW_COREDUMP
  6053. if (cur_memdump_mode) {
  6054. /* write core dump to file */
  6055. bus->dhd->memdump_type = DUMP_TYPE_D3_ACK_TIMEOUT;
  6056. dhdpcie_mem_dump(bus);
  6057. }
  6058. #endif /* DHD_FW_COREDUMP */
  6059. #ifdef OEM_ANDROID
  6060. DHD_ERROR(("%s: Event HANG send up due to D3_ACK timeout\n",
  6061. __FUNCTION__));
  6062. #ifdef SUPPORT_LINKDOWN_RECOVERY
  6063. #ifdef CONFIG_ARCH_MSM
  6064. bus->no_cfg_restore = 1;
  6065. #endif /* CONFIG_ARCH_MSM */
  6066. #endif /* SUPPORT_LINKDOWN_RECOVERY */
  6067. dhd_os_check_hang(bus->dhd, 0, -ETIMEDOUT);
  6068. #endif /* OEM_ANDROID */
  6069. }
  6070. #if defined(DHD_ERPOM)
  6071. dhd_schedule_reset(bus->dhd);
  6072. #endif // endif
  6073. rc = -ETIMEDOUT;
  6074. }
  6075. } else {
  6076. /* Resume */
  6077. DHD_ERROR(("%s: Entering resume state\n", __FUNCTION__));
  6078. bus->last_resume_start_time = OSL_LOCALTIME_NS();
  6079. /**
  6080. * PCIE2_BAR0_CORE2_WIN gets reset after D3 cold.
  6081. * si_backplane_access(function to read/write backplane)
  6082. * updates the window(PCIE2_BAR0_CORE2_WIN) only if
  6083. * window being accessed is different form the window
  6084. * being pointed by second_bar0win.
  6085. * Since PCIE2_BAR0_CORE2_WIN is already reset because of D3 cold,
  6086. * invalidating second_bar0win after resume updates
  6087. * PCIE2_BAR0_CORE2_WIN with right window.
  6088. */
  6089. si_invalidate_second_bar0win(bus->sih);
  6090. #if defined(OEM_ANDROID)
  6091. #if defined(BCMPCIE_OOB_HOST_WAKE)
  6092. DHD_OS_OOB_IRQ_WAKE_UNLOCK(bus->dhd);
  6093. #endif /* BCMPCIE_OOB_HOST_WAKE */
  6094. #endif /* linux && OEM_ANDROID */
  6095. rc = dhdpcie_pci_suspend_resume(bus, state);
  6096. dhdpcie_dump_resource(bus);
  6097. DHD_BUS_LOCK(bus->bus_lock, flags_bus);
  6098. /* set bus_low_power_state to DHD_BUS_NO_LOW_POWER_STATE */
  6099. bus->bus_low_power_state = DHD_BUS_NO_LOW_POWER_STATE;
  6100. DHD_BUS_UNLOCK(bus->bus_lock, flags_bus);
  6101. if (!rc && bus->dhd->busstate == DHD_BUS_SUSPEND) {
  6102. if (bus->use_d0_inform) {
  6103. DHD_OS_WAKE_LOCK_WAIVE(bus->dhd);
  6104. dhdpcie_send_mb_data(bus, (H2D_HOST_D0_INFORM));
  6105. DHD_OS_WAKE_LOCK_RESTORE(bus->dhd);
  6106. }
  6107. /* ring doorbell 1 (hostready) */
  6108. dhd_bus_hostready(bus);
  6109. }
  6110. DHD_GENERAL_LOCK(bus->dhd, flags);
  6111. bus->dhd->busstate = DHD_BUS_DATA;
  6112. #ifdef DHD_PCIE_RUNTIMEPM
  6113. if (DHD_BUS_BUSY_CHECK_RPM_SUSPEND_DONE(bus->dhd)) {
  6114. bus->bus_wake = 1;
  6115. OSL_SMP_WMB();
  6116. wake_up_interruptible(&bus->rpm_queue);
  6117. }
  6118. #endif /* DHD_PCIE_RUNTIMEPM */
  6119. /* resume all interface network queue. */
  6120. dhd_bus_start_queue(bus);
  6121. /* TODO: for NDIS also we need to use enable_irq in future */
  6122. bus->resume_intr_enable_count++;
  6123. /* For Linux, Macos etc (otherthan NDIS) enable back the dongle interrupts
  6124. * using intmask and host interrupts
  6125. * which were disabled in the dhdpcie_bus_isr()->dhd_bus_handle_d3_ack().
  6126. */
  6127. dhdpcie_bus_intr_enable(bus); /* Enable back interrupt using Intmask!! */
  6128. dhdpcie_enable_irq(bus); /* Enable back interrupt from Host side!! */
  6129. DHD_GENERAL_UNLOCK(bus->dhd, flags);
  6130. if (bus->dhd->dhd_watchdog_ms_backup) {
  6131. DHD_ERROR(("%s: Enabling wdtick after resume\n",
  6132. __FUNCTION__));
  6133. dhd_os_wd_timer(bus->dhd, bus->dhd->dhd_watchdog_ms_backup);
  6134. }
  6135. bus->last_resume_end_time = OSL_LOCALTIME_NS();
  6136. /* Update TCM rd index for EDL ring */
  6137. DHD_EDL_RING_TCM_RD_UPDATE(bus->dhd);
  6138. }
  6139. return rc;
  6140. }
  6141. uint32
  6142. dhdpcie_force_alp(struct dhd_bus *bus, bool enable)
  6143. {
  6144. ASSERT(bus && bus->sih);
  6145. if (enable) {
  6146. si_corereg(bus->sih, bus->sih->buscoreidx,
  6147. OFFSETOF(sbpcieregs_t, u.pcie2.clk_ctl_st), CCS_FORCEALP, CCS_FORCEALP);
  6148. } else {
  6149. si_corereg(bus->sih, bus->sih->buscoreidx,
  6150. OFFSETOF(sbpcieregs_t, u.pcie2.clk_ctl_st), CCS_FORCEALP, 0);
  6151. }
  6152. return 0;
  6153. }
  6154. /* set pcie l1 entry time: dhd pciereg 0x1004[22:16] */
  6155. uint32
  6156. dhdpcie_set_l1_entry_time(struct dhd_bus *bus, int l1_entry_time)
  6157. {
  6158. uint reg_val;
  6159. ASSERT(bus && bus->sih);
  6160. si_corereg(bus->sih, bus->sih->buscoreidx, OFFSETOF(sbpcieregs_t, configaddr), ~0,
  6161. 0x1004);
  6162. reg_val = si_corereg(bus->sih, bus->sih->buscoreidx,
  6163. OFFSETOF(sbpcieregs_t, configdata), 0, 0);
  6164. reg_val = (reg_val & ~(0x7f << 16)) | ((l1_entry_time & 0x7f) << 16);
  6165. si_corereg(bus->sih, bus->sih->buscoreidx, OFFSETOF(sbpcieregs_t, configdata), ~0,
  6166. reg_val);
  6167. return 0;
  6168. }
  6169. static uint32
  6170. dhd_apply_d11_war_length(struct dhd_bus *bus, uint32 len, uint32 d11_lpbk)
  6171. {
  6172. uint16 chipid = si_chipid(bus->sih);
  6173. if ((chipid == BCM4375_CHIP_ID ||
  6174. chipid == BCM4362_CHIP_ID ||
  6175. chipid == BCM43751_CHIP_ID ||
  6176. chipid == BCM4377_CHIP_ID) &&
  6177. (d11_lpbk != M2M_DMA_LPBK && d11_lpbk != M2M_NON_DMA_LPBK)) {
  6178. len += 8;
  6179. }
  6180. DHD_ERROR(("%s: len %d\n", __FUNCTION__, len));
  6181. return len;
  6182. }
  6183. /** Transfers bytes from host to dongle and to host again using DMA */
  6184. static int
  6185. dhdpcie_bus_dmaxfer_req(struct dhd_bus *bus,
  6186. uint32 len, uint32 srcdelay, uint32 destdelay,
  6187. uint32 d11_lpbk, uint32 core_num, uint32 wait)
  6188. {
  6189. int ret = 0;
  6190. if (bus->dhd == NULL) {
  6191. DHD_ERROR(("bus not inited\n"));
  6192. return BCME_ERROR;
  6193. }
  6194. if (bus->dhd->prot == NULL) {
  6195. DHD_ERROR(("prot is not inited\n"));
  6196. return BCME_ERROR;
  6197. }
  6198. if (bus->dhd->busstate != DHD_BUS_DATA) {
  6199. DHD_ERROR(("not in a readystate to LPBK is not inited\n"));
  6200. return BCME_ERROR;
  6201. }
  6202. if (len < 5 || len > 4194296) {
  6203. DHD_ERROR(("len is too small or too large\n"));
  6204. return BCME_ERROR;
  6205. }
  6206. len = dhd_apply_d11_war_length(bus, len, d11_lpbk);
  6207. bus->dmaxfer_complete = FALSE;
  6208. ret = dhdmsgbuf_dmaxfer_req(bus->dhd, len, srcdelay, destdelay,
  6209. d11_lpbk, core_num);
  6210. if (ret != BCME_OK || !wait) {
  6211. DHD_INFO(("%s: dmaxfer req returns status %u; wait = %u\n", __FUNCTION__,
  6212. ret, wait));
  6213. } else {
  6214. ret = dhd_os_dmaxfer_wait(bus->dhd, &bus->dmaxfer_complete);
  6215. if (ret < 0)
  6216. ret = BCME_NOTREADY;
  6217. }
  6218. return ret;
  6219. }
  6220. bool
  6221. dhd_bus_is_multibp_capable(struct dhd_bus *bus)
  6222. {
  6223. return MULTIBP_CAP(bus->sih);
  6224. }
  6225. #define PCIE_REV_FOR_4378A0 66 /* dhd_bus_perform_flr_with_quiesce() causes problems */
  6226. #define PCIE_REV_FOR_4378B0 68
  6227. static int
  6228. dhdpcie_bus_download_state(dhd_bus_t *bus, bool enter)
  6229. {
  6230. int bcmerror = 0;
  6231. volatile uint32 *cr4_regs;
  6232. bool do_flr;
  6233. if (!bus->sih) {
  6234. DHD_ERROR(("%s: NULL sih!!\n", __FUNCTION__));
  6235. return BCME_ERROR;
  6236. }
  6237. do_flr = ((bus->sih->buscorerev != PCIE_REV_FOR_4378A0) &&
  6238. (bus->sih->buscorerev != PCIE_REV_FOR_4378B0));
  6239. if (MULTIBP_ENAB(bus->sih) && !do_flr) {
  6240. dhd_bus_pcie_pwr_req(bus);
  6241. }
  6242. /* To enter download state, disable ARM and reset SOCRAM.
  6243. * To exit download state, simply reset ARM (default is RAM boot).
  6244. */
  6245. if (enter) {
  6246. /* Make sure BAR1 maps to backplane address 0 */
  6247. dhdpcie_setbar1win(bus, 0x00000000);
  6248. bus->alp_only = TRUE;
  6249. /* some chips (e.g. 43602) have two ARM cores, the CR4 is receives the firmware. */
  6250. cr4_regs = si_setcore(bus->sih, ARMCR4_CORE_ID, 0);
  6251. if (cr4_regs == NULL && !(si_setcore(bus->sih, ARM7S_CORE_ID, 0)) &&
  6252. !(si_setcore(bus->sih, ARMCM3_CORE_ID, 0)) &&
  6253. !(si_setcore(bus->sih, ARMCA7_CORE_ID, 0))) {
  6254. DHD_ERROR(("%s: Failed to find ARM core!\n", __FUNCTION__));
  6255. bcmerror = BCME_ERROR;
  6256. goto fail;
  6257. }
  6258. if (si_setcore(bus->sih, ARMCA7_CORE_ID, 0)) {
  6259. /* Halt ARM & remove reset */
  6260. si_core_reset(bus->sih, SICF_CPUHALT, SICF_CPUHALT);
  6261. if (!(si_setcore(bus->sih, SYSMEM_CORE_ID, 0))) {
  6262. DHD_ERROR(("%s: Failed to find SYSMEM core!\n", __FUNCTION__));
  6263. bcmerror = BCME_ERROR;
  6264. goto fail;
  6265. }
  6266. si_core_reset(bus->sih, 0, 0);
  6267. /* reset last 4 bytes of RAM address. to be used for shared area */
  6268. dhdpcie_init_shared_addr(bus);
  6269. } else if (cr4_regs == NULL) { /* no CR4 present on chip */
  6270. si_core_disable(bus->sih, 0);
  6271. if (!(si_setcore(bus->sih, SOCRAM_CORE_ID, 0))) {
  6272. DHD_ERROR(("%s: Failed to find SOCRAM core!\n", __FUNCTION__));
  6273. bcmerror = BCME_ERROR;
  6274. goto fail;
  6275. }
  6276. si_core_reset(bus->sih, 0, 0);
  6277. /* Clear the top bit of memory */
  6278. if (bus->ramsize) {
  6279. uint32 zeros = 0;
  6280. if (dhdpcie_bus_membytes(bus, TRUE, bus->ramsize - 4,
  6281. (uint8*)&zeros, 4) < 0) {
  6282. bcmerror = BCME_ERROR;
  6283. goto fail;
  6284. }
  6285. }
  6286. } else {
  6287. /* For CR4,
  6288. * Halt ARM
  6289. * Remove ARM reset
  6290. * Read RAM base address [0x18_0000]
  6291. * [next] Download firmware
  6292. * [done at else] Populate the reset vector
  6293. * [done at else] Remove ARM halt
  6294. */
  6295. /* Halt ARM & remove reset */
  6296. si_core_reset(bus->sih, SICF_CPUHALT, SICF_CPUHALT);
  6297. if (BCM43602_CHIP(bus->sih->chip)) {
  6298. W_REG(bus->pcie_mb_intr_osh, cr4_regs + ARMCR4REG_BANKIDX, 5);
  6299. W_REG(bus->pcie_mb_intr_osh, cr4_regs + ARMCR4REG_BANKPDA, 0);
  6300. W_REG(bus->pcie_mb_intr_osh, cr4_regs + ARMCR4REG_BANKIDX, 7);
  6301. W_REG(bus->pcie_mb_intr_osh, cr4_regs + ARMCR4REG_BANKPDA, 0);
  6302. }
  6303. /* reset last 4 bytes of RAM address. to be used for shared area */
  6304. dhdpcie_init_shared_addr(bus);
  6305. }
  6306. } else {
  6307. if (si_setcore(bus->sih, ARMCA7_CORE_ID, 0)) {
  6308. /* write vars */
  6309. if ((bcmerror = dhdpcie_bus_write_vars(bus))) {
  6310. DHD_ERROR(("%s: could not write vars to RAM\n", __FUNCTION__));
  6311. goto fail;
  6312. }
  6313. /* write random numbers to sysmem for the purpose of
  6314. * randomizing heap address space.
  6315. */
  6316. if ((bcmerror = dhdpcie_wrt_rnd(bus)) != BCME_OK) {
  6317. DHD_ERROR(("%s: Failed to get random seed to write to TCM !\n",
  6318. __FUNCTION__));
  6319. goto fail;
  6320. }
  6321. /* switch back to arm core again */
  6322. if (!(si_setcore(bus->sih, ARMCA7_CORE_ID, 0))) {
  6323. DHD_ERROR(("%s: Failed to find ARM CA7 core!\n", __FUNCTION__));
  6324. bcmerror = BCME_ERROR;
  6325. goto fail;
  6326. }
  6327. /* write address 0 with reset instruction */
  6328. bcmerror = dhdpcie_bus_membytes(bus, TRUE, 0,
  6329. (uint8 *)&bus->resetinstr, sizeof(bus->resetinstr));
  6330. /* now remove reset and halt and continue to run CA7 */
  6331. } else if (!si_setcore(bus->sih, ARMCR4_CORE_ID, 0)) {
  6332. if (!(si_setcore(bus->sih, SOCRAM_CORE_ID, 0))) {
  6333. DHD_ERROR(("%s: Failed to find SOCRAM core!\n", __FUNCTION__));
  6334. bcmerror = BCME_ERROR;
  6335. goto fail;
  6336. }
  6337. if (!si_iscoreup(bus->sih)) {
  6338. DHD_ERROR(("%s: SOCRAM core is down after reset?\n", __FUNCTION__));
  6339. bcmerror = BCME_ERROR;
  6340. goto fail;
  6341. }
  6342. /* Enable remap before ARM reset but after vars.
  6343. * No backplane access in remap mode
  6344. */
  6345. if (!si_setcore(bus->sih, PCMCIA_CORE_ID, 0) &&
  6346. !si_setcore(bus->sih, SDIOD_CORE_ID, 0)) {
  6347. DHD_ERROR(("%s: Can't change back to SDIO core?\n", __FUNCTION__));
  6348. bcmerror = BCME_ERROR;
  6349. goto fail;
  6350. }
  6351. if (!(si_setcore(bus->sih, ARM7S_CORE_ID, 0)) &&
  6352. !(si_setcore(bus->sih, ARMCM3_CORE_ID, 0))) {
  6353. DHD_ERROR(("%s: Failed to find ARM core!\n", __FUNCTION__));
  6354. bcmerror = BCME_ERROR;
  6355. goto fail;
  6356. }
  6357. } else {
  6358. if (BCM43602_CHIP(bus->sih->chip)) {
  6359. /* Firmware crashes on SOCSRAM access when core is in reset */
  6360. if (!(si_setcore(bus->sih, SOCRAM_CORE_ID, 0))) {
  6361. DHD_ERROR(("%s: Failed to find SOCRAM core!\n",
  6362. __FUNCTION__));
  6363. bcmerror = BCME_ERROR;
  6364. goto fail;
  6365. }
  6366. si_core_reset(bus->sih, 0, 0);
  6367. si_setcore(bus->sih, ARMCR4_CORE_ID, 0);
  6368. }
  6369. /* write vars */
  6370. if ((bcmerror = dhdpcie_bus_write_vars(bus))) {
  6371. DHD_ERROR(("%s: could not write vars to RAM\n", __FUNCTION__));
  6372. goto fail;
  6373. }
  6374. /* write a random number to TCM for the purpose of
  6375. * randomizing heap address space.
  6376. */
  6377. if ((bcmerror = dhdpcie_wrt_rnd(bus)) != BCME_OK) {
  6378. DHD_ERROR(("%s: Failed to get random seed to write to TCM !\n",
  6379. __FUNCTION__));
  6380. goto fail;
  6381. }
  6382. /* switch back to arm core again */
  6383. if (!(si_setcore(bus->sih, ARMCR4_CORE_ID, 0))) {
  6384. DHD_ERROR(("%s: Failed to find ARM CR4 core!\n", __FUNCTION__));
  6385. bcmerror = BCME_ERROR;
  6386. goto fail;
  6387. }
  6388. /* write address 0 with reset instruction */
  6389. bcmerror = dhdpcie_bus_membytes(bus, TRUE, 0,
  6390. (uint8 *)&bus->resetinstr, sizeof(bus->resetinstr));
  6391. if (bcmerror == BCME_OK) {
  6392. uint32 tmp;
  6393. bcmerror = dhdpcie_bus_membytes(bus, FALSE, 0,
  6394. (uint8 *)&tmp, sizeof(tmp));
  6395. if (bcmerror == BCME_OK && tmp != bus->resetinstr) {
  6396. DHD_ERROR(("%s: Failed to write 0x%08x to addr 0\n",
  6397. __FUNCTION__, bus->resetinstr));
  6398. DHD_ERROR(("%s: contents of addr 0 is 0x%08x\n",
  6399. __FUNCTION__, tmp));
  6400. bcmerror = BCME_ERROR;
  6401. goto fail;
  6402. }
  6403. }
  6404. /* now remove reset and halt and continue to run CR4 */
  6405. }
  6406. si_core_reset(bus->sih, 0, 0);
  6407. /* Allow HT Clock now that the ARM is running. */
  6408. bus->alp_only = FALSE;
  6409. bus->dhd->busstate = DHD_BUS_LOAD;
  6410. }
  6411. fail:
  6412. /* Always return to PCIE core */
  6413. si_setcore(bus->sih, PCIE2_CORE_ID, 0);
  6414. if (MULTIBP_ENAB(bus->sih) && !do_flr) {
  6415. dhd_bus_pcie_pwr_req_clear(bus);
  6416. }
  6417. return bcmerror;
  6418. } /* dhdpcie_bus_download_state */
  6419. static int
  6420. dhdpcie_bus_write_vars(dhd_bus_t *bus)
  6421. {
  6422. int bcmerror = 0;
  6423. uint32 varsize, phys_size;
  6424. uint32 varaddr;
  6425. uint8 *vbuffer;
  6426. uint32 varsizew;
  6427. #ifdef DHD_DEBUG
  6428. uint8 *nvram_ularray;
  6429. #endif /* DHD_DEBUG */
  6430. /* Even if there are no vars are to be written, we still need to set the ramsize. */
  6431. varsize = bus->varsz ? ROUNDUP(bus->varsz, 4) : 0;
  6432. varaddr = (bus->ramsize - 4) - varsize;
  6433. varaddr += bus->dongle_ram_base;
  6434. if (bus->vars) {
  6435. vbuffer = (uint8 *)MALLOC(bus->dhd->osh, varsize);
  6436. if (!vbuffer)
  6437. return BCME_NOMEM;
  6438. bzero(vbuffer, varsize);
  6439. bcopy(bus->vars, vbuffer, bus->varsz);
  6440. /* Write the vars list */
  6441. bcmerror = dhdpcie_bus_membytes(bus, TRUE, varaddr, vbuffer, varsize);
  6442. /* Implement read back and verify later */
  6443. #ifdef DHD_DEBUG
  6444. /* Verify NVRAM bytes */
  6445. DHD_INFO(("Compare NVRAM dl & ul; varsize=%d\n", varsize));
  6446. nvram_ularray = (uint8*)MALLOC(bus->dhd->osh, varsize);
  6447. if (!nvram_ularray) {
  6448. MFREE(bus->dhd->osh, vbuffer, varsize);
  6449. return BCME_NOMEM;
  6450. }
  6451. /* Upload image to verify downloaded contents. */
  6452. memset(nvram_ularray, 0xaa, varsize);
  6453. /* Read the vars list to temp buffer for comparison */
  6454. bcmerror = dhdpcie_bus_membytes(bus, FALSE, varaddr, nvram_ularray, varsize);
  6455. if (bcmerror) {
  6456. DHD_ERROR(("%s: error %d on reading %d nvram bytes at 0x%08x\n",
  6457. __FUNCTION__, bcmerror, varsize, varaddr));
  6458. }
  6459. /* Compare the org NVRAM with the one read from RAM */
  6460. if (memcmp(vbuffer, nvram_ularray, varsize)) {
  6461. DHD_ERROR(("%s: Downloaded NVRAM image is corrupted.\n", __FUNCTION__));
  6462. } else
  6463. DHD_ERROR(("%s: Download, Upload and compare of NVRAM succeeded.\n",
  6464. __FUNCTION__));
  6465. MFREE(bus->dhd->osh, nvram_ularray, varsize);
  6466. #endif /* DHD_DEBUG */
  6467. MFREE(bus->dhd->osh, vbuffer, varsize);
  6468. }
  6469. phys_size = REMAP_ENAB(bus) ? bus->ramsize : bus->orig_ramsize;
  6470. phys_size += bus->dongle_ram_base;
  6471. /* adjust to the user specified RAM */
  6472. DHD_INFO(("Physical memory size: %d, usable memory size: %d\n",
  6473. phys_size, bus->ramsize));
  6474. DHD_INFO(("Vars are at %d, orig varsize is %d\n",
  6475. varaddr, varsize));
  6476. varsize = ((phys_size - 4) - varaddr);
  6477. /*
  6478. * Determine the length token:
  6479. * Varsize, converted to words, in lower 16-bits, checksum in upper 16-bits.
  6480. */
  6481. if (bcmerror) {
  6482. varsizew = 0;
  6483. bus->nvram_csm = varsizew;
  6484. } else {
  6485. varsizew = varsize / 4;
  6486. varsizew = (~varsizew << 16) | (varsizew & 0x0000FFFF);
  6487. bus->nvram_csm = varsizew;
  6488. varsizew = htol32(varsizew);
  6489. }
  6490. DHD_INFO(("New varsize is %d, length token=0x%08x\n", varsize, varsizew));
  6491. /* Write the length token to the last word */
  6492. bcmerror = dhdpcie_bus_membytes(bus, TRUE, (phys_size - 4),
  6493. (uint8*)&varsizew, 4);
  6494. return bcmerror;
  6495. } /* dhdpcie_bus_write_vars */
  6496. int
  6497. dhdpcie_downloadvars(dhd_bus_t *bus, void *arg, int len)
  6498. {
  6499. int bcmerror = BCME_OK;
  6500. DHD_TRACE(("%s: Enter\n", __FUNCTION__));
  6501. /* Basic sanity checks */
  6502. if (bus->dhd->up) {
  6503. bcmerror = BCME_NOTDOWN;
  6504. goto err;
  6505. }
  6506. if (!len) {
  6507. bcmerror = BCME_BUFTOOSHORT;
  6508. goto err;
  6509. }
  6510. /* Free the old ones and replace with passed variables */
  6511. if (bus->vars)
  6512. MFREE(bus->dhd->osh, bus->vars, bus->varsz);
  6513. bus->vars = MALLOC(bus->dhd->osh, len);
  6514. bus->varsz = bus->vars ? len : 0;
  6515. if (bus->vars == NULL) {
  6516. bcmerror = BCME_NOMEM;
  6517. goto err;
  6518. }
  6519. /* Copy the passed variables, which should include the terminating double-null */
  6520. bcopy(arg, bus->vars, bus->varsz);
  6521. #ifdef DHD_USE_SINGLE_NVRAM_FILE
  6522. if (dhd_bus_get_fw_mode(bus->dhd) == DHD_FLAG_MFG_MODE) {
  6523. char *sp = NULL;
  6524. char *ep = NULL;
  6525. int i;
  6526. char tag[2][8] = {"ccode=", "regrev="};
  6527. /* Find ccode and regrev info */
  6528. for (i = 0; i < 2; i++) {
  6529. sp = strnstr(bus->vars, tag[i], bus->varsz);
  6530. if (!sp) {
  6531. DHD_ERROR(("%s: Could not find ccode info from the nvram %s\n",
  6532. __FUNCTION__, bus->nv_path));
  6533. bcmerror = BCME_ERROR;
  6534. goto err;
  6535. }
  6536. sp = strchr(sp, '=');
  6537. ep = strchr(sp, '\0');
  6538. /* We assumed that string length of both ccode and
  6539. * regrev values should not exceed WLC_CNTRY_BUF_SZ
  6540. */
  6541. if (ep && ((ep - sp) <= WLC_CNTRY_BUF_SZ)) {
  6542. sp++;
  6543. while (*sp != '\0') {
  6544. DHD_INFO(("%s: parse '%s', current sp = '%c'\n",
  6545. __FUNCTION__, tag[i], *sp));
  6546. *sp++ = '0';
  6547. }
  6548. } else {
  6549. DHD_ERROR(("%s: Invalid parameter format when parsing for %s\n",
  6550. __FUNCTION__, tag[i]));
  6551. bcmerror = BCME_ERROR;
  6552. goto err;
  6553. }
  6554. }
  6555. }
  6556. #endif /* DHD_USE_SINGLE_NVRAM_FILE */
  6557. err:
  6558. return bcmerror;
  6559. }
  6560. /* loop through the capability list and see if the pcie capabilty exists */
  6561. uint8
  6562. dhdpcie_find_pci_capability(osl_t *osh, uint8 req_cap_id)
  6563. {
  6564. uint8 cap_id;
  6565. uint8 cap_ptr = 0;
  6566. uint8 byte_val;
  6567. /* check for Header type 0 */
  6568. byte_val = read_pci_cfg_byte(PCI_CFG_HDR);
  6569. if ((byte_val & 0x7f) != PCI_HEADER_NORMAL) {
  6570. DHD_ERROR(("%s : PCI config header not normal.\n", __FUNCTION__));
  6571. goto end;
  6572. }
  6573. /* check if the capability pointer field exists */
  6574. byte_val = read_pci_cfg_byte(PCI_CFG_STAT);
  6575. if (!(byte_val & PCI_CAPPTR_PRESENT)) {
  6576. DHD_ERROR(("%s : PCI CAP pointer not present.\n", __FUNCTION__));
  6577. goto end;
  6578. }
  6579. cap_ptr = read_pci_cfg_byte(PCI_CFG_CAPPTR);
  6580. /* check if the capability pointer is 0x00 */
  6581. if (cap_ptr == 0x00) {
  6582. DHD_ERROR(("%s : PCI CAP pointer is 0x00.\n", __FUNCTION__));
  6583. goto end;
  6584. }
  6585. /* loop thr'u the capability list and see if the pcie capabilty exists */
  6586. cap_id = read_pci_cfg_byte(cap_ptr);
  6587. while (cap_id != req_cap_id) {
  6588. cap_ptr = read_pci_cfg_byte((cap_ptr + 1));
  6589. if (cap_ptr == 0x00) break;
  6590. cap_id = read_pci_cfg_byte(cap_ptr);
  6591. }
  6592. end:
  6593. return cap_ptr;
  6594. }
  6595. void
  6596. dhdpcie_pme_active(osl_t *osh, bool enable)
  6597. {
  6598. uint8 cap_ptr;
  6599. uint32 pme_csr;
  6600. cap_ptr = dhdpcie_find_pci_capability(osh, PCI_CAP_POWERMGMTCAP_ID);
  6601. if (!cap_ptr) {
  6602. DHD_ERROR(("%s : Power Management Capability not present\n", __FUNCTION__));
  6603. return;
  6604. }
  6605. pme_csr = OSL_PCI_READ_CONFIG(osh, cap_ptr + PME_CSR_OFFSET, sizeof(uint32));
  6606. DHD_ERROR(("%s : pme_sts_ctrl 0x%x\n", __FUNCTION__, pme_csr));
  6607. pme_csr |= PME_CSR_PME_STAT;
  6608. if (enable) {
  6609. pme_csr |= PME_CSR_PME_EN;
  6610. } else {
  6611. pme_csr &= ~PME_CSR_PME_EN;
  6612. }
  6613. OSL_PCI_WRITE_CONFIG(osh, cap_ptr + PME_CSR_OFFSET, sizeof(uint32), pme_csr);
  6614. }
  6615. bool
  6616. dhdpcie_pme_cap(osl_t *osh)
  6617. {
  6618. uint8 cap_ptr;
  6619. uint32 pme_cap;
  6620. cap_ptr = dhdpcie_find_pci_capability(osh, PCI_CAP_POWERMGMTCAP_ID);
  6621. if (!cap_ptr) {
  6622. DHD_ERROR(("%s : Power Management Capability not present\n", __FUNCTION__));
  6623. return FALSE;
  6624. }
  6625. pme_cap = OSL_PCI_READ_CONFIG(osh, cap_ptr, sizeof(uint32));
  6626. DHD_ERROR(("%s : pme_cap 0x%x\n", __FUNCTION__, pme_cap));
  6627. return ((pme_cap & PME_CAP_PM_STATES) != 0);
  6628. }
  6629. uint32
  6630. dhdpcie_lcreg(osl_t *osh, uint32 mask, uint32 val)
  6631. {
  6632. uint8 pcie_cap;
  6633. uint8 lcreg_offset; /* PCIE capability LCreg offset in the config space */
  6634. uint32 reg_val;
  6635. pcie_cap = dhdpcie_find_pci_capability(osh, PCI_CAP_PCIECAP_ID);
  6636. if (!pcie_cap) {
  6637. DHD_ERROR(("%s : PCIe Capability not present\n", __FUNCTION__));
  6638. return 0;
  6639. }
  6640. lcreg_offset = pcie_cap + PCIE_CAP_LINKCTRL_OFFSET;
  6641. /* set operation */
  6642. if (mask) {
  6643. /* read */
  6644. reg_val = OSL_PCI_READ_CONFIG(osh, lcreg_offset, sizeof(uint32));
  6645. /* modify */
  6646. reg_val &= ~mask;
  6647. reg_val |= (mask & val);
  6648. /* write */
  6649. OSL_PCI_WRITE_CONFIG(osh, lcreg_offset, sizeof(uint32), reg_val);
  6650. }
  6651. return OSL_PCI_READ_CONFIG(osh, lcreg_offset, sizeof(uint32));
  6652. }
  6653. uint8
  6654. dhdpcie_clkreq(osl_t *osh, uint32 mask, uint32 val)
  6655. {
  6656. uint8 pcie_cap;
  6657. uint32 reg_val;
  6658. uint8 lcreg_offset; /* PCIE capability LCreg offset in the config space */
  6659. pcie_cap = dhdpcie_find_pci_capability(osh, PCI_CAP_PCIECAP_ID);
  6660. if (!pcie_cap) {
  6661. DHD_ERROR(("%s : PCIe Capability not present\n", __FUNCTION__));
  6662. return 0;
  6663. }
  6664. lcreg_offset = pcie_cap + PCIE_CAP_LINKCTRL_OFFSET;
  6665. reg_val = OSL_PCI_READ_CONFIG(osh, lcreg_offset, sizeof(uint32));
  6666. /* set operation */
  6667. if (mask) {
  6668. if (val)
  6669. reg_val |= PCIE_CLKREQ_ENAB;
  6670. else
  6671. reg_val &= ~PCIE_CLKREQ_ENAB;
  6672. OSL_PCI_WRITE_CONFIG(osh, lcreg_offset, sizeof(uint32), reg_val);
  6673. reg_val = OSL_PCI_READ_CONFIG(osh, lcreg_offset, sizeof(uint32));
  6674. }
  6675. if (reg_val & PCIE_CLKREQ_ENAB)
  6676. return 1;
  6677. else
  6678. return 0;
  6679. }
  6680. void dhd_dump_intr_counters(dhd_pub_t *dhd, struct bcmstrbuf *strbuf)
  6681. {
  6682. dhd_bus_t *bus;
  6683. uint64 current_time = OSL_LOCALTIME_NS();
  6684. if (!dhd) {
  6685. DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__));
  6686. return;
  6687. }
  6688. bus = dhd->bus;
  6689. if (!bus) {
  6690. DHD_ERROR(("%s: bus is NULL\n", __FUNCTION__));
  6691. return;
  6692. }
  6693. bcm_bprintf(strbuf, "\n ------- DUMPING INTR enable/disable counters-------\n");
  6694. bcm_bprintf(strbuf, "resume_intr_enable_count=%lu dpc_intr_enable_count=%lu\n"
  6695. "isr_intr_disable_count=%lu suspend_intr_disable_count=%lu\n"
  6696. "dpc_return_busdown_count=%lu non_ours_irq_count=%lu\n",
  6697. bus->resume_intr_enable_count, bus->dpc_intr_enable_count,
  6698. bus->isr_intr_disable_count, bus->suspend_intr_disable_count,
  6699. bus->dpc_return_busdown_count, bus->non_ours_irq_count);
  6700. #ifdef BCMPCIE_OOB_HOST_WAKE
  6701. bcm_bprintf(strbuf, "oob_intr_count=%lu oob_intr_enable_count=%lu"
  6702. " oob_intr_disable_count=%lu\noob_irq_num=%d last_oob_irq_time="SEC_USEC_FMT
  6703. " last_oob_irq_enable_time="SEC_USEC_FMT"\nlast_oob_irq_disable_time="SEC_USEC_FMT
  6704. " oob_irq_enabled=%d oob_gpio_level=%d\n",
  6705. bus->oob_intr_count, bus->oob_intr_enable_count,
  6706. bus->oob_intr_disable_count, dhdpcie_get_oob_irq_num(bus),
  6707. GET_SEC_USEC(bus->last_oob_irq_time), GET_SEC_USEC(bus->last_oob_irq_enable_time),
  6708. GET_SEC_USEC(bus->last_oob_irq_disable_time), dhdpcie_get_oob_irq_status(bus),
  6709. dhdpcie_get_oob_irq_level());
  6710. #endif /* BCMPCIE_OOB_HOST_WAKE */
  6711. bcm_bprintf(strbuf, "\ncurrent_time="SEC_USEC_FMT" isr_entry_time="SEC_USEC_FMT
  6712. " isr_exit_time="SEC_USEC_FMT"\ndpc_sched_time="SEC_USEC_FMT
  6713. " last_non_ours_irq_time="SEC_USEC_FMT" dpc_entry_time="SEC_USEC_FMT"\n"
  6714. "last_process_ctrlbuf_time="SEC_USEC_FMT " last_process_flowring_time="SEC_USEC_FMT
  6715. " last_process_txcpl_time="SEC_USEC_FMT"\nlast_process_rxcpl_time="SEC_USEC_FMT
  6716. " last_process_infocpl_time="SEC_USEC_FMT" last_process_edl_time="SEC_USEC_FMT
  6717. "\ndpc_exit_time="SEC_USEC_FMT" resched_dpc_time="SEC_USEC_FMT"\n"
  6718. "last_d3_inform_time="SEC_USEC_FMT"\n",
  6719. GET_SEC_USEC(current_time), GET_SEC_USEC(bus->isr_entry_time),
  6720. GET_SEC_USEC(bus->isr_exit_time), GET_SEC_USEC(bus->dpc_sched_time),
  6721. GET_SEC_USEC(bus->last_non_ours_irq_time), GET_SEC_USEC(bus->dpc_entry_time),
  6722. GET_SEC_USEC(bus->last_process_ctrlbuf_time),
  6723. GET_SEC_USEC(bus->last_process_flowring_time),
  6724. GET_SEC_USEC(bus->last_process_txcpl_time),
  6725. GET_SEC_USEC(bus->last_process_rxcpl_time),
  6726. GET_SEC_USEC(bus->last_process_infocpl_time),
  6727. GET_SEC_USEC(bus->last_process_edl_time),
  6728. GET_SEC_USEC(bus->dpc_exit_time), GET_SEC_USEC(bus->resched_dpc_time),
  6729. GET_SEC_USEC(bus->last_d3_inform_time));
  6730. bcm_bprintf(strbuf, "\nlast_suspend_start_time="SEC_USEC_FMT" last_suspend_end_time="
  6731. SEC_USEC_FMT" last_resume_start_time="SEC_USEC_FMT" last_resume_end_time="
  6732. SEC_USEC_FMT"\n", GET_SEC_USEC(bus->last_suspend_start_time),
  6733. GET_SEC_USEC(bus->last_suspend_end_time),
  6734. GET_SEC_USEC(bus->last_resume_start_time),
  6735. GET_SEC_USEC(bus->last_resume_end_time));
  6736. #if defined(SHOW_LOGTRACE) && defined(DHD_USE_KTHREAD_FOR_LOGTRACE)
  6737. bcm_bprintf(strbuf, "logtrace_thread_entry_time="SEC_USEC_FMT
  6738. " logtrace_thread_sem_down_time="SEC_USEC_FMT
  6739. "\nlogtrace_thread_flush_time="SEC_USEC_FMT
  6740. " logtrace_thread_unexpected_break_time="SEC_USEC_FMT
  6741. "\nlogtrace_thread_complete_time="SEC_USEC_FMT"\n",
  6742. GET_SEC_USEC(dhd->logtrace_thr_ts.entry_time),
  6743. GET_SEC_USEC(dhd->logtrace_thr_ts.sem_down_time),
  6744. GET_SEC_USEC(dhd->logtrace_thr_ts.flush_time),
  6745. GET_SEC_USEC(dhd->logtrace_thr_ts.unexpected_break_time),
  6746. GET_SEC_USEC(dhd->logtrace_thr_ts.complete_time));
  6747. #endif /* SHOW_LOGTRACE && DHD_USE_KTHREAD_FOR_LOGTRACE */
  6748. }
  6749. void dhd_dump_intr_registers(dhd_pub_t *dhd, struct bcmstrbuf *strbuf)
  6750. {
  6751. uint32 intstatus = 0;
  6752. uint32 intmask = 0;
  6753. uint32 d2h_db0 = 0;
  6754. uint32 d2h_mb_data = 0;
  6755. intstatus = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
  6756. dhd->bus->pcie_mailbox_int, 0, 0);
  6757. intmask = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
  6758. dhd->bus->pcie_mailbox_mask, 0, 0);
  6759. d2h_db0 = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, PCID2H_MailBox, 0, 0);
  6760. dhd_bus_cmn_readshared(dhd->bus, &d2h_mb_data, D2H_MB_DATA, 0);
  6761. bcm_bprintf(strbuf, "intstatus=0x%x intmask=0x%x d2h_db0=0x%x\n",
  6762. intstatus, intmask, d2h_db0);
  6763. bcm_bprintf(strbuf, "d2h_mb_data=0x%x def_intmask=0x%x\n",
  6764. d2h_mb_data, dhd->bus->def_intmask);
  6765. }
  6766. /** Add bus dump output to a buffer */
  6767. void dhd_bus_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf)
  6768. {
  6769. uint16 flowid;
  6770. int ix = 0;
  6771. flow_ring_node_t *flow_ring_node;
  6772. flow_info_t *flow_info;
  6773. #ifdef TX_STATUS_LATENCY_STATS
  6774. uint8 ifindex;
  6775. if_flow_lkup_t *if_flow_lkup;
  6776. dhd_if_tx_status_latency_t if_tx_status_latency[DHD_MAX_IFS];
  6777. #endif /* TX_STATUS_LATENCY_STATS */
  6778. if (dhdp->busstate != DHD_BUS_DATA)
  6779. return;
  6780. #ifdef TX_STATUS_LATENCY_STATS
  6781. memset(if_tx_status_latency, 0, sizeof(if_tx_status_latency));
  6782. #endif /* TX_STATUS_LATENCY_STATS */
  6783. #ifdef DHD_WAKE_STATUS
  6784. bcm_bprintf(strbuf, "wake %u rxwake %u readctrlwake %u\n",
  6785. bcmpcie_get_total_wake(dhdp->bus), dhdp->bus->wake_counts.rxwake,
  6786. dhdp->bus->wake_counts.rcwake);
  6787. #ifdef DHD_WAKE_RX_STATUS
  6788. bcm_bprintf(strbuf, " unicast %u muticast %u broadcast %u arp %u\n",
  6789. dhdp->bus->wake_counts.rx_ucast, dhdp->bus->wake_counts.rx_mcast,
  6790. dhdp->bus->wake_counts.rx_bcast, dhdp->bus->wake_counts.rx_arp);
  6791. bcm_bprintf(strbuf, " multi4 %u multi6 %u icmp6 %u multiother %u\n",
  6792. dhdp->bus->wake_counts.rx_multi_ipv4, dhdp->bus->wake_counts.rx_multi_ipv6,
  6793. dhdp->bus->wake_counts.rx_icmpv6, dhdp->bus->wake_counts.rx_multi_other);
  6794. bcm_bprintf(strbuf, " icmp6_ra %u, icmp6_na %u, icmp6_ns %u\n",
  6795. dhdp->bus->wake_counts.rx_icmpv6_ra, dhdp->bus->wake_counts.rx_icmpv6_na,
  6796. dhdp->bus->wake_counts.rx_icmpv6_ns);
  6797. #endif /* DHD_WAKE_RX_STATUS */
  6798. #ifdef DHD_WAKE_EVENT_STATUS
  6799. for (flowid = 0; flowid < WLC_E_LAST; flowid++)
  6800. if (dhdp->bus->wake_counts.rc_event[flowid] != 0)
  6801. bcm_bprintf(strbuf, " %s = %u\n", bcmevent_get_name(flowid),
  6802. dhdp->bus->wake_counts.rc_event[flowid]);
  6803. bcm_bprintf(strbuf, "\n");
  6804. #endif /* DHD_WAKE_EVENT_STATUS */
  6805. #endif /* DHD_WAKE_STATUS */
  6806. dhd_prot_print_info(dhdp, strbuf);
  6807. dhd_dump_intr_registers(dhdp, strbuf);
  6808. dhd_dump_intr_counters(dhdp, strbuf);
  6809. bcm_bprintf(strbuf, "h2d_mb_data_ptr_addr 0x%x, d2h_mb_data_ptr_addr 0x%x\n",
  6810. dhdp->bus->h2d_mb_data_ptr_addr, dhdp->bus->d2h_mb_data_ptr_addr);
  6811. bcm_bprintf(strbuf, "dhd cumm_ctr %d\n", DHD_CUMM_CTR_READ(&dhdp->cumm_ctr));
  6812. #ifdef DHD_LIMIT_MULTI_CLIENT_FLOWRINGS
  6813. bcm_bprintf(strbuf, "multi_client_flow_rings:%d max_multi_client_flow_rings:%d\n",
  6814. dhdp->multi_client_flow_rings, dhdp->max_multi_client_flow_rings);
  6815. #endif /* DHD_LIMIT_MULTI_CLIENT_FLOWRINGS */
  6816. bcm_bprintf(strbuf,
  6817. "%4s %4s %2s %4s %17s %4s %4s %6s %10s %4s %4s ",
  6818. "Num:", "Flow", "If", "Prio", ":Dest_MacAddress:", "Qlen", "CLen", "L2CLen",
  6819. " Overflows", " RD", " WR");
  6820. #ifdef TX_STATUS_LATENCY_STATS
  6821. /* Average Tx status/Completion Latency in micro secs */
  6822. bcm_bprintf(strbuf, "%16s %16s ", " NumTxPkts", " AvgTxCmpL_Us");
  6823. #endif /* TX_STATUS_LATENCY_STATS */
  6824. bcm_bprintf(strbuf, "\n");
  6825. for (flowid = 0; flowid < dhdp->num_flow_rings; flowid++) {
  6826. flow_ring_node = DHD_FLOW_RING(dhdp, flowid);
  6827. if (!flow_ring_node->active)
  6828. continue;
  6829. flow_info = &flow_ring_node->flow_info;
  6830. bcm_bprintf(strbuf,
  6831. "%4d %4d %2d %4d "MACDBG" %4d %4d %6d %10u ", ix++,
  6832. flow_ring_node->flowid, flow_info->ifindex, flow_info->tid,
  6833. MAC2STRDBG(flow_info->da),
  6834. DHD_FLOW_QUEUE_LEN(&flow_ring_node->queue),
  6835. DHD_CUMM_CTR_READ(DHD_FLOW_QUEUE_CLEN_PTR(&flow_ring_node->queue)),
  6836. DHD_CUMM_CTR_READ(DHD_FLOW_QUEUE_L2CLEN_PTR(&flow_ring_node->queue)),
  6837. DHD_FLOW_QUEUE_FAILURES(&flow_ring_node->queue));
  6838. dhd_prot_print_flow_ring(dhdp, flow_ring_node->prot_info, strbuf,
  6839. "%4d %4d ");
  6840. #ifdef TX_STATUS_LATENCY_STATS
  6841. bcm_bprintf(strbuf, "%16d %16d ",
  6842. flow_info->num_tx_pkts,
  6843. flow_info->num_tx_status ?
  6844. DIV_U64_BY_U64(flow_info->cum_tx_status_latency,
  6845. flow_info->num_tx_status) : 0);
  6846. ifindex = flow_info->ifindex;
  6847. ASSERT(ifindex < DHD_MAX_IFS);
  6848. if (ifindex < DHD_MAX_IFS) {
  6849. if_tx_status_latency[ifindex].num_tx_status += flow_info->num_tx_status;
  6850. if_tx_status_latency[ifindex].cum_tx_status_latency +=
  6851. flow_info->cum_tx_status_latency;
  6852. } else {
  6853. DHD_ERROR(("%s: Bad IF index: %d associated with flowid: %d\n",
  6854. __FUNCTION__, ifindex, flowid));
  6855. }
  6856. #endif /* TX_STATUS_LATENCY_STATS */
  6857. bcm_bprintf(strbuf, "\n");
  6858. }
  6859. #ifdef TX_STATUS_LATENCY_STATS
  6860. bcm_bprintf(strbuf, "\n%s %16s %16s\n", "If", "AvgTxCmpL_Us", "NumTxStatus");
  6861. if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup;
  6862. for (ix = 0; ix < DHD_MAX_IFS; ix++) {
  6863. if (!if_flow_lkup[ix].status) {
  6864. continue;
  6865. }
  6866. bcm_bprintf(strbuf, "%2d %16d %16d\n",
  6867. ix,
  6868. if_tx_status_latency[ix].num_tx_status ?
  6869. DIV_U64_BY_U64(if_tx_status_latency[ix].cum_tx_status_latency,
  6870. if_tx_status_latency[ix].num_tx_status): 0,
  6871. if_tx_status_latency[ix].num_tx_status);
  6872. }
  6873. #endif /* TX_STATUS_LATENCY_STATS */
  6874. #ifdef DHD_HP2P
  6875. if (dhdp->hp2p_capable) {
  6876. bcm_bprintf(strbuf, "\n%s %16s %16s", "Flowid", "Tx_t0", "Tx_t1");
  6877. for (flowid = 0; flowid < MAX_HP2P_FLOWS; flowid++) {
  6878. hp2p_info_t *hp2p_info;
  6879. int bin;
  6880. hp2p_info = &dhdp->hp2p_info[flowid];
  6881. if (hp2p_info->num_timer_start == 0)
  6882. continue;
  6883. bcm_bprintf(strbuf, "\n%d", hp2p_info->flowid);
  6884. bcm_bprintf(strbuf, "\n%s", "Bin");
  6885. for (bin = 0; bin < MAX_TX_HIST_BIN; bin++) {
  6886. bcm_bprintf(strbuf, "\n%2d %20d %16d", bin,
  6887. hp2p_info->tx_t0[bin], hp2p_info->tx_t1[bin]);
  6888. }
  6889. bcm_bprintf(strbuf, "\n%s %16s", "Flowid", "Rx_t0");
  6890. bcm_bprintf(strbuf, "\n%d", hp2p_info->flowid);
  6891. bcm_bprintf(strbuf, "\n%s", "Bin");
  6892. for (bin = 0; bin < MAX_RX_HIST_BIN; bin++) {
  6893. bcm_bprintf(strbuf, "\n%d %20d", bin,
  6894. hp2p_info->rx_t0[bin]);
  6895. }
  6896. bcm_bprintf(strbuf, "\n%s %16s %16s",
  6897. "Packet limit", "Timer limit", "Timer start");
  6898. bcm_bprintf(strbuf, "\n%d %24d %16d", hp2p_info->num_pkt_limit,
  6899. hp2p_info->num_timer_limit, hp2p_info->num_timer_start);
  6900. }
  6901. bcm_bprintf(strbuf, "\n");
  6902. }
  6903. #endif /* DHD_HP2P */
  6904. bcm_bprintf(strbuf, "D3 inform cnt %d\n", dhdp->bus->d3_inform_cnt);
  6905. bcm_bprintf(strbuf, "D0 inform cnt %d\n", dhdp->bus->d0_inform_cnt);
  6906. bcm_bprintf(strbuf, "D0 inform in use cnt %d\n", dhdp->bus->d0_inform_in_use_cnt);
  6907. if (dhdp->d2h_hostrdy_supported) {
  6908. bcm_bprintf(strbuf, "hostready count:%d\n", dhdp->bus->hostready_count);
  6909. }
  6910. bcm_bprintf(strbuf, "d2h_intr_method -> %s\n",
  6911. dhdp->bus->d2h_intr_method ? "PCIE_MSI" : "PCIE_INTX");
  6912. }
  6913. #ifdef DNGL_AXI_ERROR_LOGGING
  6914. bool
  6915. dhd_axi_sig_match(dhd_pub_t *dhdp)
  6916. {
  6917. uint32 axi_tcm_addr = dhdpcie_bus_rtcm32(dhdp->bus, dhdp->axierror_logbuf_addr);
  6918. if (dhdp->dhd_induce_error == DHD_INDUCE_DROP_AXI_SIG) {
  6919. DHD_ERROR(("%s: Induce AXI signature drop\n", __FUNCTION__));
  6920. return FALSE;
  6921. }
  6922. DHD_ERROR(("%s: axi_tcm_addr: 0x%x, tcm range: 0x%x ~ 0x%x\n",
  6923. __FUNCTION__, axi_tcm_addr, dhdp->bus->dongle_ram_base,
  6924. dhdp->bus->dongle_ram_base + dhdp->bus->ramsize));
  6925. if (axi_tcm_addr >= dhdp->bus->dongle_ram_base &&
  6926. axi_tcm_addr < dhdp->bus->dongle_ram_base + dhdp->bus->ramsize) {
  6927. uint32 axi_signature = dhdpcie_bus_rtcm32(dhdp->bus, (axi_tcm_addr +
  6928. OFFSETOF(hnd_ext_trap_axi_error_v1_t, signature)));
  6929. if (axi_signature == HND_EXT_TRAP_AXIERROR_SIGNATURE) {
  6930. return TRUE;
  6931. } else {
  6932. DHD_ERROR(("%s: No AXI signature: 0x%x\n",
  6933. __FUNCTION__, axi_signature));
  6934. return FALSE;
  6935. }
  6936. } else {
  6937. DHD_ERROR(("%s: No AXI shared tcm address debug info.\n", __FUNCTION__));
  6938. return FALSE;
  6939. }
  6940. }
  6941. void
  6942. dhd_axi_error(dhd_pub_t *dhdp)
  6943. {
  6944. dhd_axi_error_dump_t *axi_err_dump;
  6945. uint8 *axi_err_buf = NULL;
  6946. uint8 *p_axi_err = NULL;
  6947. uint32 axi_logbuf_addr;
  6948. uint32 axi_tcm_addr;
  6949. int err, size;
  6950. OSL_DELAY(75000);
  6951. axi_logbuf_addr = dhdp->axierror_logbuf_addr;
  6952. if (!axi_logbuf_addr) {
  6953. DHD_ERROR(("%s: No AXI TCM address debug info.\n", __FUNCTION__));
  6954. goto sched_axi;
  6955. }
  6956. axi_err_dump = dhdp->axi_err_dump;
  6957. if (!axi_err_dump) {
  6958. goto sched_axi;
  6959. }
  6960. if (!dhd_axi_sig_match(dhdp)) {
  6961. goto sched_axi;
  6962. }
  6963. /* Reading AXI error data for SMMU fault */
  6964. DHD_ERROR(("%s: Read AXI data from TCM address\n", __FUNCTION__));
  6965. axi_tcm_addr = dhdpcie_bus_rtcm32(dhdp->bus, axi_logbuf_addr);
  6966. size = sizeof(hnd_ext_trap_axi_error_v1_t);
  6967. axi_err_buf = MALLOCZ(dhdp->osh, size);
  6968. if (axi_err_buf == NULL) {
  6969. DHD_ERROR(("%s: out of memory !\n", __FUNCTION__));
  6970. goto sched_axi;
  6971. }
  6972. p_axi_err = axi_err_buf;
  6973. err = dhdpcie_bus_membytes(dhdp->bus, FALSE, axi_tcm_addr, p_axi_err, size);
  6974. if (err) {
  6975. DHD_ERROR(("%s: error %d on reading %d membytes at 0x%08x\n",
  6976. __FUNCTION__, err, size, axi_tcm_addr));
  6977. goto sched_axi;
  6978. }
  6979. /* Dump data to Dmesg */
  6980. dhd_log_dump_axi_error(axi_err_buf);
  6981. err = memcpy_s(&axi_err_dump->etd_axi_error_v1, size, axi_err_buf, size);
  6982. if (err) {
  6983. DHD_ERROR(("%s: failed to copy etd axi error info, err=%d\n",
  6984. __FUNCTION__, err));
  6985. }
  6986. sched_axi:
  6987. if (axi_err_buf) {
  6988. MFREE(dhdp->osh, axi_err_buf, size);
  6989. }
  6990. dhd_schedule_axi_error_dump(dhdp, NULL);
  6991. }
  6992. static void
  6993. dhd_log_dump_axi_error(uint8 *axi_err)
  6994. {
  6995. dma_dentry_v1_t dma_dentry;
  6996. dma_fifo_v1_t dma_fifo;
  6997. int i = 0, j = 0;
  6998. if (*(uint8 *)axi_err == HND_EXT_TRAP_AXIERROR_VERSION_1) {
  6999. hnd_ext_trap_axi_error_v1_t *axi_err_v1 = (hnd_ext_trap_axi_error_v1_t *)axi_err;
  7000. DHD_ERROR(("%s: signature : 0x%x\n", __FUNCTION__, axi_err_v1->signature));
  7001. DHD_ERROR(("%s: version : 0x%x\n", __FUNCTION__, axi_err_v1->version));
  7002. DHD_ERROR(("%s: length : 0x%x\n", __FUNCTION__, axi_err_v1->length));
  7003. DHD_ERROR(("%s: dma_fifo_valid_count : 0x%x\n",
  7004. __FUNCTION__, axi_err_v1->dma_fifo_valid_count));
  7005. DHD_ERROR(("%s: axi_errorlog_status : 0x%x\n",
  7006. __FUNCTION__, axi_err_v1->axi_errorlog_status));
  7007. DHD_ERROR(("%s: axi_errorlog_core : 0x%x\n",
  7008. __FUNCTION__, axi_err_v1->axi_errorlog_core));
  7009. DHD_ERROR(("%s: axi_errorlog_hi : 0x%x\n",
  7010. __FUNCTION__, axi_err_v1->axi_errorlog_hi));
  7011. DHD_ERROR(("%s: axi_errorlog_lo : 0x%x\n",
  7012. __FUNCTION__, axi_err_v1->axi_errorlog_lo));
  7013. DHD_ERROR(("%s: axi_errorlog_id : 0x%x\n",
  7014. __FUNCTION__, axi_err_v1->axi_errorlog_id));
  7015. for (i = 0; i < MAX_DMAFIFO_ENTRIES_V1; i++) {
  7016. dma_fifo = axi_err_v1->dma_fifo[i];
  7017. DHD_ERROR(("%s: valid:%d : 0x%x\n", __FUNCTION__, i, dma_fifo.valid));
  7018. DHD_ERROR(("%s: direction:%d : 0x%x\n",
  7019. __FUNCTION__, i, dma_fifo.direction));
  7020. DHD_ERROR(("%s: index:%d : 0x%x\n",
  7021. __FUNCTION__, i, dma_fifo.index));
  7022. DHD_ERROR(("%s: dpa:%d : 0x%x\n",
  7023. __FUNCTION__, i, dma_fifo.dpa));
  7024. DHD_ERROR(("%s: desc_lo:%d : 0x%x\n",
  7025. __FUNCTION__, i, dma_fifo.desc_lo));
  7026. DHD_ERROR(("%s: desc_hi:%d : 0x%x\n",
  7027. __FUNCTION__, i, dma_fifo.desc_hi));
  7028. DHD_ERROR(("%s: din:%d : 0x%x\n",
  7029. __FUNCTION__, i, dma_fifo.din));
  7030. DHD_ERROR(("%s: dout:%d : 0x%x\n",
  7031. __FUNCTION__, i, dma_fifo.dout));
  7032. for (j = 0; j < MAX_DMAFIFO_DESC_ENTRIES_V1; j++) {
  7033. dma_dentry = axi_err_v1->dma_fifo[i].dentry[j];
  7034. DHD_ERROR(("%s: ctrl1:%d : 0x%x\n",
  7035. __FUNCTION__, i, dma_dentry.ctrl1));
  7036. DHD_ERROR(("%s: ctrl2:%d : 0x%x\n",
  7037. __FUNCTION__, i, dma_dentry.ctrl2));
  7038. DHD_ERROR(("%s: addrlo:%d : 0x%x\n",
  7039. __FUNCTION__, i, dma_dentry.addrlo));
  7040. DHD_ERROR(("%s: addrhi:%d : 0x%x\n",
  7041. __FUNCTION__, i, dma_dentry.addrhi));
  7042. }
  7043. }
  7044. }
  7045. else {
  7046. DHD_ERROR(("%s: Invalid AXI version: 0x%x\n", __FUNCTION__, (*(uint8 *)axi_err)));
  7047. }
  7048. }
  7049. #endif /* DNGL_AXI_ERROR_LOGGING */
  7050. /**
  7051. * Brings transmit packets on all flow rings closer to the dongle, by moving (a subset) from their
  7052. * flow queue to their flow ring.
  7053. */
  7054. static void
  7055. dhd_update_txflowrings(dhd_pub_t *dhd)
  7056. {
  7057. unsigned long flags;
  7058. dll_t *item, *next;
  7059. flow_ring_node_t *flow_ring_node;
  7060. struct dhd_bus *bus = dhd->bus;
  7061. if (dhd_query_bus_erros(dhd)) {
  7062. return;
  7063. }
  7064. /* Hold flowring_list_lock to ensure no race condition while accessing the List */
  7065. DHD_FLOWRING_LIST_LOCK(bus->dhd->flowring_list_lock, flags);
  7066. for (item = dll_head_p(&bus->flowring_active_list);
  7067. (!dhd_is_device_removed(dhd) && !dll_end(&bus->flowring_active_list, item));
  7068. item = next) {
  7069. if (dhd->hang_was_sent) {
  7070. break;
  7071. }
  7072. next = dll_next_p(item);
  7073. flow_ring_node = dhd_constlist_to_flowring(item);
  7074. /* Ensure that flow_ring_node in the list is Not Null */
  7075. ASSERT(flow_ring_node != NULL);
  7076. /* Ensure that the flowring node has valid contents */
  7077. ASSERT(flow_ring_node->prot_info != NULL);
  7078. dhd_prot_update_txflowring(dhd, flow_ring_node->flowid, flow_ring_node->prot_info);
  7079. }
  7080. DHD_FLOWRING_LIST_UNLOCK(bus->dhd->flowring_list_lock, flags);
  7081. }
  7082. /** Mailbox ringbell Function */
  7083. static void
  7084. dhd_bus_gen_devmb_intr(struct dhd_bus *bus)
  7085. {
  7086. if ((bus->sih->buscorerev == 2) || (bus->sih->buscorerev == 6) ||
  7087. (bus->sih->buscorerev == 4)) {
  7088. DHD_ERROR(("mailbox communication not supported\n"));
  7089. return;
  7090. }
  7091. if (bus->db1_for_mb) {
  7092. /* this is a pcie core register, not the config register */
  7093. DHD_INFO(("writing a mail box interrupt to the device, through doorbell 1\n"));
  7094. if (DAR_PWRREQ(bus)) {
  7095. dhd_bus_pcie_pwr_req(bus);
  7096. }
  7097. si_corereg(bus->sih, bus->sih->buscoreidx, dhd_bus_db1_addr_get(bus),
  7098. ~0, 0x12345678);
  7099. } else {
  7100. DHD_INFO(("writing a mail box interrupt to the device, through config space\n"));
  7101. dhdpcie_bus_cfg_write_dword(bus, PCISBMbx, 4, (1 << 0));
  7102. dhdpcie_bus_cfg_write_dword(bus, PCISBMbx, 4, (1 << 0));
  7103. }
  7104. }
  7105. /* Upon receiving a mailbox interrupt,
  7106. * if H2D_FW_TRAP bit is set in mailbox location
  7107. * device traps
  7108. */
  7109. static void
  7110. dhdpcie_fw_trap(dhd_bus_t *bus)
  7111. {
  7112. /* Send the mailbox data and generate mailbox intr. */
  7113. dhdpcie_send_mb_data(bus, H2D_FW_TRAP);
  7114. /* For FWs that cannot interprete H2D_FW_TRAP */
  7115. (void)dhd_wl_ioctl_set_intiovar(bus->dhd, "bus:disconnect", 99, WLC_SET_VAR, TRUE, 0);
  7116. }
  7117. /** mailbox doorbell ring function */
  7118. void
  7119. dhd_bus_ringbell(struct dhd_bus *bus, uint32 value)
  7120. {
  7121. /* Skip after sending D3_INFORM */
  7122. if (bus->bus_low_power_state != DHD_BUS_NO_LOW_POWER_STATE) {
  7123. DHD_ERROR(("%s: trying to ring the doorbell after D3 inform %d\n",
  7124. __FUNCTION__, bus->bus_low_power_state));
  7125. return;
  7126. }
  7127. /* Skip in the case of link down */
  7128. if (bus->is_linkdown) {
  7129. DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__));
  7130. return;
  7131. }
  7132. if ((bus->sih->buscorerev == 2) || (bus->sih->buscorerev == 6) ||
  7133. (bus->sih->buscorerev == 4)) {
  7134. si_corereg(bus->sih, bus->sih->buscoreidx, bus->pcie_mailbox_int,
  7135. PCIE_INTB, PCIE_INTB);
  7136. } else {
  7137. /* this is a pcie core register, not the config regsiter */
  7138. DHD_INFO(("writing a door bell to the device\n"));
  7139. if (IDMA_ACTIVE(bus->dhd)) {
  7140. if (DAR_PWRREQ(bus)) {
  7141. dhd_bus_pcie_pwr_req(bus);
  7142. }
  7143. si_corereg(bus->sih, bus->sih->buscoreidx, dhd_bus_db0_addr_2_get(bus),
  7144. ~0, value);
  7145. } else {
  7146. if (DAR_PWRREQ(bus)) {
  7147. dhd_bus_pcie_pwr_req(bus);
  7148. }
  7149. si_corereg(bus->sih, bus->sih->buscoreidx,
  7150. dhd_bus_db0_addr_get(bus), ~0, 0x12345678);
  7151. }
  7152. }
  7153. }
  7154. /** mailbox doorbell ring function for IDMA/IFRM using dma channel2 */
  7155. void
  7156. dhd_bus_ringbell_2(struct dhd_bus *bus, uint32 value, bool devwake)
  7157. {
  7158. /* this is a pcie core register, not the config regsiter */
  7159. /* Skip after sending D3_INFORM */
  7160. if (bus->bus_low_power_state != DHD_BUS_NO_LOW_POWER_STATE) {
  7161. DHD_ERROR(("%s: trying to ring the doorbell after D3 inform %d\n",
  7162. __FUNCTION__, bus->bus_low_power_state));
  7163. return;
  7164. }
  7165. /* Skip in the case of link down */
  7166. if (bus->is_linkdown) {
  7167. DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__));
  7168. return;
  7169. }
  7170. DHD_INFO(("writing a door bell 2 to the device\n"));
  7171. if (DAR_PWRREQ(bus)) {
  7172. dhd_bus_pcie_pwr_req(bus);
  7173. }
  7174. si_corereg(bus->sih, bus->sih->buscoreidx, dhd_bus_db0_addr_2_get(bus),
  7175. ~0, value);
  7176. }
  7177. void
  7178. dhdpcie_bus_ringbell_fast(struct dhd_bus *bus, uint32 value)
  7179. {
  7180. /* Skip after sending D3_INFORM */
  7181. if (bus->bus_low_power_state != DHD_BUS_NO_LOW_POWER_STATE) {
  7182. DHD_ERROR(("%s: trying to ring the doorbell after D3 inform %d\n",
  7183. __FUNCTION__, bus->bus_low_power_state));
  7184. return;
  7185. }
  7186. /* Skip in the case of link down */
  7187. if (bus->is_linkdown) {
  7188. DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__));
  7189. return;
  7190. }
  7191. if (DAR_PWRREQ(bus)) {
  7192. dhd_bus_pcie_pwr_req(bus);
  7193. }
  7194. #ifdef DHD_DB0TS
  7195. if (bus->dhd->db0ts_capable) {
  7196. uint64 ts;
  7197. ts = local_clock();
  7198. do_div(ts, 1000);
  7199. value = htol32(ts & 0xFFFFFFFF);
  7200. DHD_INFO(("%s: usec timer = 0x%x\n", __FUNCTION__, value));
  7201. }
  7202. #endif /* DHD_DB0TS */
  7203. W_REG(bus->pcie_mb_intr_osh, bus->pcie_mb_intr_addr, value);
  7204. }
  7205. void
  7206. dhdpcie_bus_ringbell_2_fast(struct dhd_bus *bus, uint32 value, bool devwake)
  7207. {
  7208. /* Skip after sending D3_INFORM */
  7209. if (bus->bus_low_power_state != DHD_BUS_NO_LOW_POWER_STATE) {
  7210. DHD_ERROR(("%s: trying to ring the doorbell after D3 inform %d\n",
  7211. __FUNCTION__, bus->bus_low_power_state));
  7212. return;
  7213. }
  7214. /* Skip in the case of link down */
  7215. if (bus->is_linkdown) {
  7216. DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__));
  7217. return;
  7218. }
  7219. if (DAR_PWRREQ(bus)) {
  7220. dhd_bus_pcie_pwr_req(bus);
  7221. }
  7222. W_REG(bus->pcie_mb_intr_osh, bus->pcie_mb_intr_2_addr, value);
  7223. }
  7224. static void
  7225. dhd_bus_ringbell_oldpcie(struct dhd_bus *bus, uint32 value)
  7226. {
  7227. uint32 w;
  7228. /* Skip after sending D3_INFORM */
  7229. if (bus->bus_low_power_state != DHD_BUS_NO_LOW_POWER_STATE) {
  7230. DHD_ERROR(("%s: trying to ring the doorbell after D3 inform %d\n",
  7231. __FUNCTION__, bus->bus_low_power_state));
  7232. return;
  7233. }
  7234. /* Skip in the case of link down */
  7235. if (bus->is_linkdown) {
  7236. DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__));
  7237. return;
  7238. }
  7239. w = (R_REG(bus->pcie_mb_intr_osh, bus->pcie_mb_intr_addr) & ~PCIE_INTB) | PCIE_INTB;
  7240. W_REG(bus->pcie_mb_intr_osh, bus->pcie_mb_intr_addr, w);
  7241. }
  7242. dhd_mb_ring_t
  7243. dhd_bus_get_mbintr_fn(struct dhd_bus *bus)
  7244. {
  7245. if ((bus->sih->buscorerev == 2) || (bus->sih->buscorerev == 6) ||
  7246. (bus->sih->buscorerev == 4)) {
  7247. bus->pcie_mb_intr_addr = si_corereg_addr(bus->sih, bus->sih->buscoreidx,
  7248. bus->pcie_mailbox_int);
  7249. if (bus->pcie_mb_intr_addr) {
  7250. bus->pcie_mb_intr_osh = si_osh(bus->sih);
  7251. return dhd_bus_ringbell_oldpcie;
  7252. }
  7253. } else {
  7254. bus->pcie_mb_intr_addr = si_corereg_addr(bus->sih, bus->sih->buscoreidx,
  7255. dhd_bus_db0_addr_get(bus));
  7256. if (bus->pcie_mb_intr_addr) {
  7257. bus->pcie_mb_intr_osh = si_osh(bus->sih);
  7258. return dhdpcie_bus_ringbell_fast;
  7259. }
  7260. }
  7261. return dhd_bus_ringbell;
  7262. }
  7263. dhd_mb_ring_2_t
  7264. dhd_bus_get_mbintr_2_fn(struct dhd_bus *bus)
  7265. {
  7266. bus->pcie_mb_intr_2_addr = si_corereg_addr(bus->sih, bus->sih->buscoreidx,
  7267. dhd_bus_db0_addr_2_get(bus));
  7268. if (bus->pcie_mb_intr_2_addr) {
  7269. bus->pcie_mb_intr_osh = si_osh(bus->sih);
  7270. return dhdpcie_bus_ringbell_2_fast;
  7271. }
  7272. return dhd_bus_ringbell_2;
  7273. }
  7274. bool BCMFASTPATH
  7275. dhd_bus_dpc(struct dhd_bus *bus)
  7276. {
  7277. bool resched = FALSE; /* Flag indicating resched wanted */
  7278. unsigned long flags;
  7279. DHD_TRACE(("%s: Enter\n", __FUNCTION__));
  7280. bus->dpc_entry_time = OSL_LOCALTIME_NS();
  7281. DHD_GENERAL_LOCK(bus->dhd, flags);
  7282. /* Check for only DHD_BUS_DOWN and not for DHD_BUS_DOWN_IN_PROGRESS
  7283. * to avoid IOCTL Resumed On timeout when ioctl is waiting for response
  7284. * and rmmod is fired in parallel, which will make DHD_BUS_DOWN_IN_PROGRESS
  7285. * and if we return from here, then IOCTL response will never be handled
  7286. */
  7287. if (bus->dhd->busstate == DHD_BUS_DOWN) {
  7288. DHD_ERROR(("%s: Bus down, ret\n", __FUNCTION__));
  7289. bus->intstatus = 0;
  7290. DHD_GENERAL_UNLOCK(bus->dhd, flags);
  7291. bus->dpc_return_busdown_count++;
  7292. return 0;
  7293. }
  7294. #ifdef DHD_PCIE_RUNTIMEPM
  7295. bus->idlecount = 0;
  7296. #endif /* DHD_PCIE_RUNTIMEPM */
  7297. DHD_BUS_BUSY_SET_IN_DPC(bus->dhd);
  7298. DHD_GENERAL_UNLOCK(bus->dhd, flags);
  7299. resched = dhdpcie_bus_process_mailbox_intr(bus, bus->intstatus);
  7300. if (!resched) {
  7301. bus->intstatus = 0;
  7302. bus->dpc_intr_enable_count++;
  7303. /* For Linux, Macos etc (otherthan NDIS) enable back the host interrupts
  7304. * which has been disabled in the dhdpcie_bus_isr()
  7305. */
  7306. dhdpcie_enable_irq(bus); /* Enable back interrupt!! */
  7307. bus->dpc_exit_time = OSL_LOCALTIME_NS();
  7308. } else {
  7309. bus->resched_dpc_time = OSL_LOCALTIME_NS();
  7310. }
  7311. bus->dpc_sched = resched;
  7312. DHD_GENERAL_LOCK(bus->dhd, flags);
  7313. DHD_BUS_BUSY_CLEAR_IN_DPC(bus->dhd);
  7314. dhd_os_busbusy_wake(bus->dhd);
  7315. DHD_GENERAL_UNLOCK(bus->dhd, flags);
  7316. return resched;
  7317. }
  7318. int
  7319. dhdpcie_send_mb_data(dhd_bus_t *bus, uint32 h2d_mb_data)
  7320. {
  7321. uint32 cur_h2d_mb_data = 0;
  7322. DHD_INFO_HW4(("%s: H2D_MB_DATA: 0x%08X\n", __FUNCTION__, h2d_mb_data));
  7323. if (bus->is_linkdown) {
  7324. DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__));
  7325. return BCME_ERROR;
  7326. }
  7327. if (bus->api.fw_rev >= PCIE_SHARED_VERSION_6 && !bus->use_mailbox) {
  7328. DHD_INFO(("API rev is 6, sending mb data as H2D Ctrl message to dongle, 0x%04x\n",
  7329. h2d_mb_data));
  7330. /* Prevent asserting device_wake during doorbell ring for mb data to avoid loop. */
  7331. {
  7332. if (dhd_prot_h2d_mbdata_send_ctrlmsg(bus->dhd, h2d_mb_data)) {
  7333. DHD_ERROR(("failure sending the H2D Mailbox message "
  7334. "to firmware\n"));
  7335. goto fail;
  7336. }
  7337. }
  7338. goto done;
  7339. }
  7340. dhd_bus_cmn_readshared(bus, &cur_h2d_mb_data, H2D_MB_DATA, 0);
  7341. if (cur_h2d_mb_data != 0) {
  7342. uint32 i = 0;
  7343. DHD_INFO(("GRRRRRRR: MB transaction is already pending 0x%04x\n", cur_h2d_mb_data));
  7344. while ((i++ < 100) && cur_h2d_mb_data) {
  7345. OSL_DELAY(10);
  7346. dhd_bus_cmn_readshared(bus, &cur_h2d_mb_data, H2D_MB_DATA, 0);
  7347. }
  7348. if (i >= 100) {
  7349. DHD_ERROR(("%s : waited 1ms for the dngl "
  7350. "to ack the previous mb transaction\n", __FUNCTION__));
  7351. DHD_ERROR(("%s : MB transaction is still pending 0x%04x\n",
  7352. __FUNCTION__, cur_h2d_mb_data));
  7353. }
  7354. }
  7355. dhd_bus_cmn_writeshared(bus, &h2d_mb_data, sizeof(uint32), H2D_MB_DATA, 0);
  7356. dhd_bus_gen_devmb_intr(bus);
  7357. done:
  7358. if (h2d_mb_data == H2D_HOST_D3_INFORM) {
  7359. DHD_INFO_HW4(("%s: send H2D_HOST_D3_INFORM to dongle\n", __FUNCTION__));
  7360. bus->last_d3_inform_time = OSL_LOCALTIME_NS();
  7361. bus->d3_inform_cnt++;
  7362. }
  7363. if (h2d_mb_data == H2D_HOST_D0_INFORM_IN_USE) {
  7364. DHD_INFO_HW4(("%s: send H2D_HOST_D0_INFORM_IN_USE to dongle\n", __FUNCTION__));
  7365. bus->d0_inform_in_use_cnt++;
  7366. }
  7367. if (h2d_mb_data == H2D_HOST_D0_INFORM) {
  7368. DHD_INFO_HW4(("%s: send H2D_HOST_D0_INFORM to dongle\n", __FUNCTION__));
  7369. bus->d0_inform_cnt++;
  7370. }
  7371. return BCME_OK;
  7372. fail:
  7373. return BCME_ERROR;
  7374. }
  7375. static void
  7376. dhd_bus_handle_d3_ack(dhd_bus_t *bus)
  7377. {
  7378. unsigned long flags_bus;
  7379. DHD_BUS_LOCK(bus->bus_lock, flags_bus);
  7380. bus->suspend_intr_disable_count++;
  7381. /* Disable dongle Interrupts Immediately after D3 */
  7382. /* For Linux, Macos etc (otherthan NDIS) along with disabling
  7383. * dongle interrupt by clearing the IntMask, disable directly
  7384. * interrupt from the host side as well. Also clear the intstatus
  7385. * if it is set to avoid unnecessary intrrupts after D3 ACK.
  7386. */
  7387. dhdpcie_bus_intr_disable(bus); /* Disable interrupt using IntMask!! */
  7388. dhdpcie_bus_clear_intstatus(bus);
  7389. dhdpcie_disable_irq_nosync(bus); /* Disable host interrupt!! */
  7390. if (bus->dhd->dhd_induce_error != DHD_INDUCE_D3_ACK_TIMEOUT) {
  7391. /* Set bus_low_power_state to DHD_BUS_D3_ACK_RECIEVED */
  7392. bus->bus_low_power_state = DHD_BUS_D3_ACK_RECIEVED;
  7393. DHD_ERROR(("%s: D3_ACK Recieved\n", __FUNCTION__));
  7394. }
  7395. DHD_BUS_UNLOCK(bus->bus_lock, flags_bus);
  7396. /* Check for D3 ACK induce flag, which is set by firing dhd iovar to induce D3 Ack timeout.
  7397. * If flag is set, D3 wake is skipped, which results in to D3 Ack timeout.
  7398. */
  7399. if (bus->dhd->dhd_induce_error != DHD_INDUCE_D3_ACK_TIMEOUT) {
  7400. bus->wait_for_d3_ack = 1;
  7401. dhd_os_d3ack_wake(bus->dhd);
  7402. } else {
  7403. DHD_ERROR(("%s: Inducing D3 ACK timeout\n", __FUNCTION__));
  7404. }
  7405. }
  7406. void
  7407. dhd_bus_handle_mb_data(dhd_bus_t *bus, uint32 d2h_mb_data)
  7408. {
  7409. if (MULTIBP_ENAB(bus->sih)) {
  7410. dhd_bus_pcie_pwr_req(bus);
  7411. }
  7412. DHD_INFO(("D2H_MB_DATA: 0x%04x\n", d2h_mb_data));
  7413. if (d2h_mb_data & D2H_DEV_FWHALT) {
  7414. DHD_ERROR(("FW trap has happened\n"));
  7415. dhdpcie_checkdied(bus, NULL, 0);
  7416. #ifdef OEM_ANDROID
  7417. #ifdef SUPPORT_LINKDOWN_RECOVERY
  7418. #ifdef CONFIG_ARCH_MSM
  7419. bus->no_cfg_restore = 1;
  7420. #endif /* CONFIG_ARCH_MSM */
  7421. #endif /* SUPPORT_LINKDOWN_RECOVERY */
  7422. dhd_os_check_hang(bus->dhd, 0, -EREMOTEIO);
  7423. #endif /* OEM_ANDROID */
  7424. goto exit;
  7425. }
  7426. if (d2h_mb_data & D2H_DEV_DS_ENTER_REQ) {
  7427. bool ds_acked = FALSE;
  7428. BCM_REFERENCE(ds_acked);
  7429. if (bus->bus_low_power_state == DHD_BUS_D3_ACK_RECIEVED) {
  7430. DHD_ERROR(("DS-ENTRY AFTER D3-ACK!!!!! QUITING\n"));
  7431. DHD_ERROR(("%s: making DHD_BUS_DOWN\n", __FUNCTION__));
  7432. bus->dhd->busstate = DHD_BUS_DOWN;
  7433. goto exit;
  7434. }
  7435. /* what should we do */
  7436. DHD_INFO(("D2H_MB_DATA: DEEP SLEEP REQ\n"));
  7437. {
  7438. dhdpcie_send_mb_data(bus, H2D_HOST_DS_ACK);
  7439. DHD_INFO(("D2H_MB_DATA: sent DEEP SLEEP ACK\n"));
  7440. }
  7441. }
  7442. if (d2h_mb_data & D2H_DEV_DS_EXIT_NOTE) {
  7443. /* what should we do */
  7444. DHD_INFO(("D2H_MB_DATA: DEEP SLEEP EXIT\n"));
  7445. }
  7446. if (d2h_mb_data & D2HMB_DS_HOST_SLEEP_EXIT_ACK) {
  7447. /* what should we do */
  7448. DHD_INFO(("D2H_MB_DATA: D0 ACK\n"));
  7449. }
  7450. if (d2h_mb_data & D2H_DEV_D3_ACK) {
  7451. /* what should we do */
  7452. DHD_INFO_HW4(("D2H_MB_DATA: D3 ACK\n"));
  7453. if (!bus->wait_for_d3_ack) {
  7454. #if defined(DHD_HANG_SEND_UP_TEST)
  7455. if (bus->dhd->req_hang_type == HANG_REASON_D3_ACK_TIMEOUT) {
  7456. DHD_ERROR(("TEST HANG: Skip to process D3 ACK\n"));
  7457. } else {
  7458. dhd_bus_handle_d3_ack(bus);
  7459. }
  7460. #else /* DHD_HANG_SEND_UP_TEST */
  7461. dhd_bus_handle_d3_ack(bus);
  7462. #endif /* DHD_HANG_SEND_UP_TEST */
  7463. }
  7464. }
  7465. exit:
  7466. if (MULTIBP_ENAB(bus->sih)) {
  7467. dhd_bus_pcie_pwr_req_clear(bus);
  7468. }
  7469. }
  7470. static void
  7471. dhdpcie_handle_mb_data(dhd_bus_t *bus)
  7472. {
  7473. uint32 d2h_mb_data = 0;
  7474. uint32 zero = 0;
  7475. if (MULTIBP_ENAB(bus->sih)) {
  7476. dhd_bus_pcie_pwr_req(bus);
  7477. }
  7478. dhd_bus_cmn_readshared(bus, &d2h_mb_data, D2H_MB_DATA, 0);
  7479. if (D2H_DEV_MB_INVALIDATED(d2h_mb_data)) {
  7480. DHD_ERROR(("%s: Invalid D2H_MB_DATA: 0x%08x\n",
  7481. __FUNCTION__, d2h_mb_data));
  7482. goto exit;
  7483. }
  7484. dhd_bus_cmn_writeshared(bus, &zero, sizeof(uint32), D2H_MB_DATA, 0);
  7485. DHD_INFO_HW4(("D2H_MB_DATA: 0x%04x\n", d2h_mb_data));
  7486. if (d2h_mb_data & D2H_DEV_FWHALT) {
  7487. DHD_ERROR(("FW trap has happened\n"));
  7488. dhdpcie_checkdied(bus, NULL, 0);
  7489. /* not ready yet dhd_os_ind_firmware_stall(bus->dhd); */
  7490. goto exit;
  7491. }
  7492. if (d2h_mb_data & D2H_DEV_DS_ENTER_REQ) {
  7493. /* what should we do */
  7494. DHD_INFO(("D2H_MB_DATA: DEEP SLEEP REQ\n"));
  7495. dhdpcie_send_mb_data(bus, H2D_HOST_DS_ACK);
  7496. DHD_INFO(("D2H_MB_DATA: sent DEEP SLEEP ACK\n"));
  7497. }
  7498. if (d2h_mb_data & D2H_DEV_DS_EXIT_NOTE) {
  7499. /* what should we do */
  7500. DHD_INFO(("D2H_MB_DATA: DEEP SLEEP EXIT\n"));
  7501. }
  7502. if (d2h_mb_data & D2H_DEV_D3_ACK) {
  7503. /* what should we do */
  7504. DHD_INFO_HW4(("D2H_MB_DATA: D3 ACK\n"));
  7505. if (!bus->wait_for_d3_ack) {
  7506. #if defined(DHD_HANG_SEND_UP_TEST)
  7507. if (bus->dhd->req_hang_type == HANG_REASON_D3_ACK_TIMEOUT) {
  7508. DHD_ERROR(("TEST HANG: Skip to process D3 ACK\n"));
  7509. } else {
  7510. dhd_bus_handle_d3_ack(bus);
  7511. }
  7512. #else /* DHD_HANG_SEND_UP_TEST */
  7513. dhd_bus_handle_d3_ack(bus);
  7514. #endif /* DHD_HANG_SEND_UP_TEST */
  7515. }
  7516. }
  7517. exit:
  7518. if (MULTIBP_ENAB(bus->sih)) {
  7519. dhd_bus_pcie_pwr_req_clear(bus);
  7520. }
  7521. }
  7522. static void
  7523. dhdpcie_read_handle_mb_data(dhd_bus_t *bus)
  7524. {
  7525. uint32 d2h_mb_data = 0;
  7526. uint32 zero = 0;
  7527. if (bus->is_linkdown) {
  7528. DHD_ERROR(("%s: PCIe link is down\n", __FUNCTION__));
  7529. return;
  7530. }
  7531. if (MULTIBP_ENAB(bus->sih)) {
  7532. dhd_bus_pcie_pwr_req(bus);
  7533. }
  7534. dhd_bus_cmn_readshared(bus, &d2h_mb_data, D2H_MB_DATA, 0);
  7535. if (!d2h_mb_data) {
  7536. goto exit;
  7537. }
  7538. dhd_bus_cmn_writeshared(bus, &zero, sizeof(uint32), D2H_MB_DATA, 0);
  7539. dhd_bus_handle_mb_data(bus, d2h_mb_data);
  7540. exit:
  7541. if (MULTIBP_ENAB(bus->sih)) {
  7542. dhd_bus_pcie_pwr_req_clear(bus);
  7543. }
  7544. }
  7545. static bool
  7546. dhdpcie_bus_process_mailbox_intr(dhd_bus_t *bus, uint32 intstatus)
  7547. {
  7548. bool resched = FALSE;
  7549. unsigned long flags_bus;
  7550. if (MULTIBP_ENAB(bus->sih)) {
  7551. dhd_bus_pcie_pwr_req(bus);
  7552. }
  7553. if ((bus->sih->buscorerev == 2) || (bus->sih->buscorerev == 6) ||
  7554. (bus->sih->buscorerev == 4)) {
  7555. /* Msg stream interrupt */
  7556. if (intstatus & I_BIT1) {
  7557. resched = dhdpci_bus_read_frames(bus);
  7558. } else if (intstatus & I_BIT0) {
  7559. /* do nothing for Now */
  7560. }
  7561. } else {
  7562. if (intstatus & (PCIE_MB_TOPCIE_FN0_0 | PCIE_MB_TOPCIE_FN0_1))
  7563. bus->api.handle_mb_data(bus);
  7564. /* Do no process any rings after recieving D3_ACK */
  7565. DHD_BUS_LOCK(bus->bus_lock, flags_bus);
  7566. if (bus->bus_low_power_state == DHD_BUS_D3_ACK_RECIEVED) {
  7567. DHD_ERROR(("%s: D3 Ack Recieved. "
  7568. "Skip processing rest of ring buffers.\n", __FUNCTION__));
  7569. DHD_BUS_UNLOCK(bus->bus_lock, flags_bus);
  7570. goto exit;
  7571. }
  7572. DHD_BUS_UNLOCK(bus->bus_lock, flags_bus);
  7573. /* Validate intstatus only for INTX case */
  7574. if ((bus->d2h_intr_method == PCIE_MSI) ||
  7575. ((bus->d2h_intr_method == PCIE_INTX) && (intstatus & bus->d2h_mb_mask))) {
  7576. #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
  7577. if (pm_runtime_get(dhd_bus_to_dev(bus)) >= 0) {
  7578. resched = dhdpci_bus_read_frames(bus);
  7579. pm_runtime_mark_last_busy(dhd_bus_to_dev(bus));
  7580. pm_runtime_put_autosuspend(dhd_bus_to_dev(bus));
  7581. }
  7582. #else
  7583. resched = dhdpci_bus_read_frames(bus);
  7584. #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
  7585. }
  7586. }
  7587. exit:
  7588. if (MULTIBP_ENAB(bus->sih)) {
  7589. dhd_bus_pcie_pwr_req_clear(bus);
  7590. }
  7591. return resched;
  7592. }
  7593. #if defined(DHD_H2D_LOG_TIME_SYNC)
  7594. static void
  7595. dhdpci_bus_rte_log_time_sync_poll(dhd_bus_t *bus)
  7596. {
  7597. unsigned long time_elapsed;
  7598. /* Poll for timeout value periodically */
  7599. if ((bus->dhd->busstate == DHD_BUS_DATA) &&
  7600. (bus->dhd->dhd_rte_time_sync_ms != 0) &&
  7601. (bus->bus_low_power_state == DHD_BUS_NO_LOW_POWER_STATE)) {
  7602. time_elapsed = OSL_SYSUPTIME_US() - bus->dhd_rte_time_sync_count;
  7603. /* Compare time is milli seconds */
  7604. if ((time_elapsed / 1000) >= bus->dhd->dhd_rte_time_sync_ms) {
  7605. /*
  7606. * Its fine, if it has crossed the timeout value. No need to adjust the
  7607. * elapsed time
  7608. */
  7609. bus->dhd_rte_time_sync_count += time_elapsed;
  7610. /* Schedule deffered work. Work function will send IOVAR. */
  7611. dhd_h2d_log_time_sync_deferred_wq_schedule(bus->dhd);
  7612. }
  7613. }
  7614. }
  7615. #endif /* DHD_H2D_LOG_TIME_SYNC */
  7616. static bool
  7617. dhdpci_bus_read_frames(dhd_bus_t *bus)
  7618. {
  7619. bool more = FALSE;
  7620. unsigned long flags_bus;
  7621. /* First check if there a FW trap */
  7622. if ((bus->api.fw_rev >= PCIE_SHARED_VERSION_6) &&
  7623. (bus->dhd->dongle_trap_data = dhd_prot_process_trapbuf(bus->dhd))) {
  7624. #ifdef DNGL_AXI_ERROR_LOGGING
  7625. if (bus->dhd->axi_error) {
  7626. DHD_ERROR(("AXI Error happened\n"));
  7627. return FALSE;
  7628. }
  7629. #endif /* DNGL_AXI_ERROR_LOGGING */
  7630. dhd_bus_handle_mb_data(bus, D2H_DEV_FWHALT);
  7631. return FALSE;
  7632. }
  7633. /* There may be frames in both ctrl buf and data buf; check ctrl buf first */
  7634. DHD_PERIM_LOCK_ALL((bus->dhd->fwder_unit % FWDER_MAX_UNIT));
  7635. dhd_prot_process_ctrlbuf(bus->dhd);
  7636. bus->last_process_ctrlbuf_time = OSL_LOCALTIME_NS();
  7637. /* Unlock to give chance for resp to be handled */
  7638. DHD_PERIM_UNLOCK_ALL((bus->dhd->fwder_unit % FWDER_MAX_UNIT));
  7639. /* Do not process rest of ring buf once bus enters low power state (D3_INFORM/D3_ACK) */
  7640. DHD_BUS_LOCK(bus->bus_lock, flags_bus);
  7641. if (bus->bus_low_power_state != DHD_BUS_NO_LOW_POWER_STATE) {
  7642. DHD_ERROR(("%s: Bus is in power save state (%d). "
  7643. "Skip processing rest of ring buffers.\n",
  7644. __FUNCTION__, bus->bus_low_power_state));
  7645. DHD_BUS_UNLOCK(bus->bus_lock, flags_bus);
  7646. return FALSE;
  7647. }
  7648. DHD_BUS_UNLOCK(bus->bus_lock, flags_bus);
  7649. DHD_PERIM_LOCK_ALL((bus->dhd->fwder_unit % FWDER_MAX_UNIT));
  7650. /* update the flow ring cpls */
  7651. dhd_update_txflowrings(bus->dhd);
  7652. bus->last_process_flowring_time = OSL_LOCALTIME_NS();
  7653. /* With heavy TX traffic, we could get a lot of TxStatus
  7654. * so add bound
  7655. */
  7656. #ifdef DHD_HP2P
  7657. more |= dhd_prot_process_msgbuf_txcpl(bus->dhd, dhd_txbound, DHD_HP2P_RING);
  7658. #endif /* DHD_HP2P */
  7659. more |= dhd_prot_process_msgbuf_txcpl(bus->dhd, dhd_txbound, DHD_REGULAR_RING);
  7660. bus->last_process_txcpl_time = OSL_LOCALTIME_NS();
  7661. /* With heavy RX traffic, this routine potentially could spend some time
  7662. * processing RX frames without RX bound
  7663. */
  7664. #ifdef DHD_HP2P
  7665. more |= dhd_prot_process_msgbuf_rxcpl(bus->dhd, dhd_rxbound, DHD_HP2P_RING);
  7666. #endif /* DHD_HP2P */
  7667. more |= dhd_prot_process_msgbuf_rxcpl(bus->dhd, dhd_rxbound, DHD_REGULAR_RING);
  7668. bus->last_process_rxcpl_time = OSL_LOCALTIME_NS();
  7669. /* Process info ring completion messages */
  7670. #ifdef EWP_EDL
  7671. if (!bus->dhd->dongle_edl_support)
  7672. #endif // endif
  7673. {
  7674. more |= dhd_prot_process_msgbuf_infocpl(bus->dhd, DHD_INFORING_BOUND);
  7675. bus->last_process_infocpl_time = OSL_LOCALTIME_NS();
  7676. }
  7677. #ifdef EWP_EDL
  7678. else {
  7679. more |= dhd_prot_process_msgbuf_edl(bus->dhd);
  7680. bus->last_process_edl_time = OSL_LOCALTIME_NS();
  7681. }
  7682. #endif /* EWP_EDL */
  7683. #ifdef IDLE_TX_FLOW_MGMT
  7684. if (bus->enable_idle_flowring_mgmt) {
  7685. /* Look for idle flow rings */
  7686. dhd_bus_check_idle_scan(bus);
  7687. }
  7688. #endif /* IDLE_TX_FLOW_MGMT */
  7689. /* don't talk to the dongle if fw is about to be reloaded */
  7690. if (bus->dhd->hang_was_sent) {
  7691. more = FALSE;
  7692. }
  7693. DHD_PERIM_UNLOCK_ALL((bus->dhd->fwder_unit % FWDER_MAX_UNIT));
  7694. #ifdef SUPPORT_LINKDOWN_RECOVERY
  7695. if (bus->read_shm_fail) {
  7696. /* Read interrupt state once again to confirm linkdown */
  7697. int intstatus = si_corereg(bus->sih, bus->sih->buscoreidx,
  7698. bus->pcie_mailbox_int, 0, 0);
  7699. if (intstatus != (uint32)-1) {
  7700. DHD_ERROR(("%s: read SHM failed but intstatus is valid\n", __FUNCTION__));
  7701. #ifdef DHD_FW_COREDUMP
  7702. if (bus->dhd->memdump_enabled) {
  7703. DHD_OS_WAKE_LOCK(bus->dhd);
  7704. bus->dhd->memdump_type = DUMP_TYPE_READ_SHM_FAIL;
  7705. dhd_bus_mem_dump(bus->dhd);
  7706. DHD_OS_WAKE_UNLOCK(bus->dhd);
  7707. }
  7708. #endif /* DHD_FW_COREDUMP */
  7709. } else {
  7710. DHD_ERROR(("%s: Link is Down.\n", __FUNCTION__));
  7711. #ifdef CONFIG_ARCH_MSM
  7712. bus->no_cfg_restore = 1;
  7713. #endif /* CONFIG_ARCH_MSM */
  7714. bus->is_linkdown = 1;
  7715. }
  7716. dhd_prot_debug_info_print(bus->dhd);
  7717. bus->dhd->hang_reason = HANG_REASON_PCIE_LINK_DOWN_EP_DETECT;
  7718. dhd_os_send_hang_message(bus->dhd);
  7719. more = FALSE;
  7720. }
  7721. #endif /* SUPPORT_LINKDOWN_RECOVERY */
  7722. #if defined(DHD_H2D_LOG_TIME_SYNC)
  7723. dhdpci_bus_rte_log_time_sync_poll(bus);
  7724. #endif /* DHD_H2D_LOG_TIME_SYNC */
  7725. return more;
  7726. }
  7727. bool
  7728. dhdpcie_tcm_valid(dhd_bus_t *bus)
  7729. {
  7730. uint32 addr = 0;
  7731. int rv;
  7732. uint32 shaddr = 0;
  7733. pciedev_shared_t sh;
  7734. shaddr = bus->dongle_ram_base + bus->ramsize - 4;
  7735. /* Read last word in memory to determine address of pciedev_shared structure */
  7736. addr = LTOH32(dhdpcie_bus_rtcm32(bus, shaddr));
  7737. if ((addr == 0) || (addr == bus->nvram_csm) || (addr < bus->dongle_ram_base) ||
  7738. (addr > shaddr)) {
  7739. DHD_ERROR(("%s: address (0x%08x) of pciedev_shared invalid addr\n",
  7740. __FUNCTION__, addr));
  7741. return FALSE;
  7742. }
  7743. /* Read hndrte_shared structure */
  7744. if ((rv = dhdpcie_bus_membytes(bus, FALSE, addr, (uint8 *)&sh,
  7745. sizeof(pciedev_shared_t))) < 0) {
  7746. DHD_ERROR(("Failed to read PCIe shared struct with %d\n", rv));
  7747. return FALSE;
  7748. }
  7749. /* Compare any field in pciedev_shared_t */
  7750. if (sh.console_addr != bus->pcie_sh->console_addr) {
  7751. DHD_ERROR(("Contents of pciedev_shared_t structure are not matching.\n"));
  7752. return FALSE;
  7753. }
  7754. return TRUE;
  7755. }
  7756. static void
  7757. dhdpcie_update_bus_api_revisions(uint32 firmware_api_version, uint32 host_api_version)
  7758. {
  7759. snprintf(bus_api_revision, BUS_API_REV_STR_LEN, "\nBus API revisions:(FW rev%d)(DHD rev%d)",
  7760. firmware_api_version, host_api_version);
  7761. return;
  7762. }
  7763. static bool
  7764. dhdpcie_check_firmware_compatible(uint32 firmware_api_version, uint32 host_api_version)
  7765. {
  7766. bool retcode = FALSE;
  7767. DHD_INFO(("firmware api revision %d, host api revision %d\n",
  7768. firmware_api_version, host_api_version));
  7769. switch (firmware_api_version) {
  7770. case PCIE_SHARED_VERSION_7:
  7771. case PCIE_SHARED_VERSION_6:
  7772. case PCIE_SHARED_VERSION_5:
  7773. retcode = TRUE;
  7774. break;
  7775. default:
  7776. if (firmware_api_version <= host_api_version)
  7777. retcode = TRUE;
  7778. }
  7779. return retcode;
  7780. }
  7781. static int
  7782. dhdpcie_readshared(dhd_bus_t *bus)
  7783. {
  7784. uint32 addr = 0;
  7785. int rv, dma_indx_wr_buf, dma_indx_rd_buf;
  7786. uint32 shaddr = 0;
  7787. pciedev_shared_t *sh = bus->pcie_sh;
  7788. dhd_timeout_t tmo;
  7789. bool idma_en = FALSE;
  7790. if (MULTIBP_ENAB(bus->sih)) {
  7791. dhd_bus_pcie_pwr_req(bus);
  7792. }
  7793. shaddr = bus->dongle_ram_base + bus->ramsize - 4;
  7794. /* start a timer for 5 seconds */
  7795. dhd_timeout_start(&tmo, MAX_READ_TIMEOUT);
  7796. while (((addr == 0) || (addr == bus->nvram_csm)) && !dhd_timeout_expired(&tmo)) {
  7797. /* Read last word in memory to determine address of pciedev_shared structure */
  7798. addr = LTOH32(dhdpcie_bus_rtcm32(bus, shaddr));
  7799. }
  7800. if (addr == (uint32)-1) {
  7801. DHD_ERROR(("%s: PCIe link might be down\n", __FUNCTION__));
  7802. #ifdef SUPPORT_LINKDOWN_RECOVERY
  7803. #ifdef CONFIG_ARCH_MSM
  7804. bus->no_cfg_restore = 1;
  7805. #endif /* CONFIG_ARCH_MSM */
  7806. #endif /* SUPPORT_LINKDOWN_RECOVERY */
  7807. bus->is_linkdown = 1;
  7808. return BCME_ERROR;
  7809. }
  7810. if ((addr == 0) || (addr == bus->nvram_csm) || (addr < bus->dongle_ram_base) ||
  7811. (addr > shaddr)) {
  7812. DHD_ERROR(("%s: address (0x%08x) of pciedev_shared invalid\n",
  7813. __FUNCTION__, addr));
  7814. DHD_ERROR(("Waited %u usec, dongle is not ready\n", tmo.elapsed));
  7815. #ifdef DEBUG_DNGL_INIT_FAIL
  7816. if (addr != (uint32)-1) { /* skip further PCIE reads if read this addr */
  7817. #ifdef CUSTOMER_HW4_DEBUG
  7818. bus->dhd->memdump_enabled = DUMP_MEMFILE_BUGON;
  7819. #endif /* CUSTOMER_HW4_DEBUG */
  7820. if (bus->dhd->memdump_enabled) {
  7821. bus->dhd->memdump_type = DUMP_TYPE_DONGLE_INIT_FAILURE;
  7822. dhdpcie_mem_dump(bus);
  7823. }
  7824. }
  7825. #endif /* DEBUG_DNGL_INIT_FAIL */
  7826. return BCME_ERROR;
  7827. } else {
  7828. bus->shared_addr = (ulong)addr;
  7829. DHD_ERROR(("PCIe shared addr (0x%08x) read took %u usec "
  7830. "before dongle is ready\n", addr, tmo.elapsed));
  7831. }
  7832. /* Read hndrte_shared structure */
  7833. if ((rv = dhdpcie_bus_membytes(bus, FALSE, addr, (uint8 *)sh,
  7834. sizeof(pciedev_shared_t))) < 0) {
  7835. DHD_ERROR(("Failed to read PCIe shared struct with %d\n", rv));
  7836. return rv;
  7837. }
  7838. /* Endianness */
  7839. sh->flags = ltoh32(sh->flags);
  7840. sh->trap_addr = ltoh32(sh->trap_addr);
  7841. sh->assert_exp_addr = ltoh32(sh->assert_exp_addr);
  7842. sh->assert_file_addr = ltoh32(sh->assert_file_addr);
  7843. sh->assert_line = ltoh32(sh->assert_line);
  7844. sh->console_addr = ltoh32(sh->console_addr);
  7845. sh->msgtrace_addr = ltoh32(sh->msgtrace_addr);
  7846. sh->dma_rxoffset = ltoh32(sh->dma_rxoffset);
  7847. sh->rings_info_ptr = ltoh32(sh->rings_info_ptr);
  7848. sh->flags2 = ltoh32(sh->flags2);
  7849. /* load bus console address */
  7850. bus->console_addr = sh->console_addr;
  7851. /* Read the dma rx offset */
  7852. bus->dma_rxoffset = bus->pcie_sh->dma_rxoffset;
  7853. dhd_prot_rx_dataoffset(bus->dhd, bus->dma_rxoffset);
  7854. DHD_INFO(("DMA RX offset from shared Area %d\n", bus->dma_rxoffset));
  7855. bus->api.fw_rev = sh->flags & PCIE_SHARED_VERSION_MASK;
  7856. if (!(dhdpcie_check_firmware_compatible(bus->api.fw_rev, PCIE_SHARED_VERSION)))
  7857. {
  7858. DHD_ERROR(("%s: pcie_shared version %d in dhd "
  7859. "is older than pciedev_shared version %d in dongle\n",
  7860. __FUNCTION__, PCIE_SHARED_VERSION,
  7861. bus->api.fw_rev));
  7862. return BCME_ERROR;
  7863. }
  7864. dhdpcie_update_bus_api_revisions(bus->api.fw_rev, PCIE_SHARED_VERSION);
  7865. bus->rw_index_sz = (sh->flags & PCIE_SHARED_2BYTE_INDICES) ?
  7866. sizeof(uint16) : sizeof(uint32);
  7867. DHD_INFO(("%s: Dongle advertizes %d size indices\n",
  7868. __FUNCTION__, bus->rw_index_sz));
  7869. #ifdef IDLE_TX_FLOW_MGMT
  7870. if (sh->flags & PCIE_SHARED_IDLE_FLOW_RING) {
  7871. DHD_ERROR(("%s: FW Supports IdleFlow ring managment!\n",
  7872. __FUNCTION__));
  7873. bus->enable_idle_flowring_mgmt = TRUE;
  7874. }
  7875. #endif /* IDLE_TX_FLOW_MGMT */
  7876. if (IDMA_CAPABLE(bus)) {
  7877. if (bus->sih->buscorerev == 23) {
  7878. } else {
  7879. idma_en = TRUE;
  7880. }
  7881. }
  7882. /* TODO: This need to be selected based on IPC instead of compile time */
  7883. bus->dhd->hwa_enable = TRUE;
  7884. if (idma_en) {
  7885. bus->dhd->idma_enable = (sh->flags & PCIE_SHARED_IDMA) ? TRUE : FALSE;
  7886. bus->dhd->ifrm_enable = (sh->flags & PCIE_SHARED_IFRM) ? TRUE : FALSE;
  7887. }
  7888. bus->dhd->d2h_sync_mode = sh->flags & PCIE_SHARED_D2H_SYNC_MODE_MASK;
  7889. bus->dhd->dar_enable = (sh->flags & PCIE_SHARED_DAR) ? TRUE : FALSE;
  7890. /* Does the FW support DMA'ing r/w indices */
  7891. if (sh->flags & PCIE_SHARED_DMA_INDEX) {
  7892. if (!bus->dhd->dma_ring_upd_overwrite) {
  7893. {
  7894. if (!IFRM_ENAB(bus->dhd)) {
  7895. bus->dhd->dma_h2d_ring_upd_support = TRUE;
  7896. }
  7897. bus->dhd->dma_d2h_ring_upd_support = TRUE;
  7898. }
  7899. }
  7900. if (bus->dhd->dma_d2h_ring_upd_support)
  7901. bus->dhd->d2h_sync_mode = 0;
  7902. DHD_INFO(("%s: Host support DMAing indices: H2D:%d - D2H:%d. FW supports it\n",
  7903. __FUNCTION__,
  7904. (bus->dhd->dma_h2d_ring_upd_support ? 1 : 0),
  7905. (bus->dhd->dma_d2h_ring_upd_support ? 1 : 0)));
  7906. } else if (!(sh->flags & PCIE_SHARED_D2H_SYNC_MODE_MASK)) {
  7907. DHD_ERROR(("%s FW has to support either dma indices or d2h sync\n",
  7908. __FUNCTION__));
  7909. return BCME_UNSUPPORTED;
  7910. } else {
  7911. bus->dhd->dma_h2d_ring_upd_support = FALSE;
  7912. bus->dhd->dma_d2h_ring_upd_support = FALSE;
  7913. }
  7914. /* Does the firmware support fast delete ring? */
  7915. if (sh->flags2 & PCIE_SHARED2_FAST_DELETE_RING) {
  7916. DHD_INFO(("%s: Firmware supports fast delete ring\n",
  7917. __FUNCTION__));
  7918. bus->dhd->fast_delete_ring_support = TRUE;
  7919. } else {
  7920. DHD_INFO(("%s: Firmware does not support fast delete ring\n",
  7921. __FUNCTION__));
  7922. bus->dhd->fast_delete_ring_support = FALSE;
  7923. }
  7924. /* get ring_info, ring_state and mb data ptrs and store the addresses in bus structure */
  7925. {
  7926. ring_info_t ring_info;
  7927. /* boundary check */
  7928. if (sh->rings_info_ptr > shaddr) {
  7929. DHD_ERROR(("%s: rings_info_ptr is invalid 0x%x, skip reading ring info\n",
  7930. __FUNCTION__, sh->rings_info_ptr));
  7931. return BCME_ERROR;
  7932. }
  7933. if ((rv = dhdpcie_bus_membytes(bus, FALSE, sh->rings_info_ptr,
  7934. (uint8 *)&ring_info, sizeof(ring_info_t))) < 0)
  7935. return rv;
  7936. bus->h2d_mb_data_ptr_addr = ltoh32(sh->h2d_mb_data_ptr);
  7937. bus->d2h_mb_data_ptr_addr = ltoh32(sh->d2h_mb_data_ptr);
  7938. if (bus->api.fw_rev >= PCIE_SHARED_VERSION_6) {
  7939. bus->max_tx_flowrings = ltoh16(ring_info.max_tx_flowrings);
  7940. bus->max_submission_rings = ltoh16(ring_info.max_submission_queues);
  7941. bus->max_completion_rings = ltoh16(ring_info.max_completion_rings);
  7942. bus->max_cmn_rings = bus->max_submission_rings - bus->max_tx_flowrings;
  7943. bus->api.handle_mb_data = dhdpcie_read_handle_mb_data;
  7944. bus->use_mailbox = sh->flags & PCIE_SHARED_USE_MAILBOX;
  7945. }
  7946. else {
  7947. bus->max_tx_flowrings = ltoh16(ring_info.max_tx_flowrings);
  7948. bus->max_submission_rings = bus->max_tx_flowrings;
  7949. bus->max_completion_rings = BCMPCIE_D2H_COMMON_MSGRINGS;
  7950. bus->max_cmn_rings = BCMPCIE_H2D_COMMON_MSGRINGS;
  7951. bus->api.handle_mb_data = dhdpcie_handle_mb_data;
  7952. bus->use_mailbox = TRUE;
  7953. }
  7954. if (bus->max_completion_rings == 0) {
  7955. DHD_ERROR(("dongle completion rings are invalid %d\n",
  7956. bus->max_completion_rings));
  7957. return BCME_ERROR;
  7958. }
  7959. if (bus->max_submission_rings == 0) {
  7960. DHD_ERROR(("dongle submission rings are invalid %d\n",
  7961. bus->max_submission_rings));
  7962. return BCME_ERROR;
  7963. }
  7964. if (bus->max_tx_flowrings == 0) {
  7965. DHD_ERROR(("dongle txflow rings are invalid %d\n", bus->max_tx_flowrings));
  7966. return BCME_ERROR;
  7967. }
  7968. /* If both FW and Host support DMA'ing indices, allocate memory and notify FW
  7969. * The max_sub_queues is read from FW initialized ring_info
  7970. */
  7971. if (bus->dhd->dma_h2d_ring_upd_support || IDMA_ENAB(bus->dhd)) {
  7972. dma_indx_wr_buf = dhd_prot_dma_indx_init(bus->dhd, bus->rw_index_sz,
  7973. H2D_DMA_INDX_WR_BUF, bus->max_submission_rings);
  7974. dma_indx_rd_buf = dhd_prot_dma_indx_init(bus->dhd, bus->rw_index_sz,
  7975. D2H_DMA_INDX_RD_BUF, bus->max_completion_rings);
  7976. if ((dma_indx_wr_buf != BCME_OK) || (dma_indx_rd_buf != BCME_OK)) {
  7977. DHD_ERROR(("%s: Failed to allocate memory for dma'ing h2d indices"
  7978. "Host will use w/r indices in TCM\n",
  7979. __FUNCTION__));
  7980. bus->dhd->dma_h2d_ring_upd_support = FALSE;
  7981. bus->dhd->idma_enable = FALSE;
  7982. }
  7983. }
  7984. if (bus->dhd->dma_d2h_ring_upd_support) {
  7985. dma_indx_wr_buf = dhd_prot_dma_indx_init(bus->dhd, bus->rw_index_sz,
  7986. D2H_DMA_INDX_WR_BUF, bus->max_completion_rings);
  7987. dma_indx_rd_buf = dhd_prot_dma_indx_init(bus->dhd, bus->rw_index_sz,
  7988. H2D_DMA_INDX_RD_BUF, bus->max_submission_rings);
  7989. if ((dma_indx_wr_buf != BCME_OK) || (dma_indx_rd_buf != BCME_OK)) {
  7990. DHD_ERROR(("%s: Failed to allocate memory for dma'ing d2h indices"
  7991. "Host will use w/r indices in TCM\n",
  7992. __FUNCTION__));
  7993. bus->dhd->dma_d2h_ring_upd_support = FALSE;
  7994. }
  7995. }
  7996. if (IFRM_ENAB(bus->dhd)) {
  7997. dma_indx_wr_buf = dhd_prot_dma_indx_init(bus->dhd, bus->rw_index_sz,
  7998. H2D_IFRM_INDX_WR_BUF, bus->max_tx_flowrings);
  7999. if (dma_indx_wr_buf != BCME_OK) {
  8000. DHD_ERROR(("%s: Failed to alloc memory for Implicit DMA\n",
  8001. __FUNCTION__));
  8002. bus->dhd->ifrm_enable = FALSE;
  8003. }
  8004. }
  8005. /* read ringmem and ringstate ptrs from shared area and store in host variables */
  8006. dhd_fillup_ring_sharedptr_info(bus, &ring_info);
  8007. if (dhd_msg_level & DHD_INFO_VAL) {
  8008. bcm_print_bytes("ring_info_raw", (uchar *)&ring_info, sizeof(ring_info_t));
  8009. }
  8010. DHD_INFO(("ring_info\n"));
  8011. DHD_ERROR(("%s: max H2D queues %d\n",
  8012. __FUNCTION__, ltoh16(ring_info.max_tx_flowrings)));
  8013. DHD_INFO(("mail box address\n"));
  8014. DHD_INFO(("%s: h2d_mb_data_ptr_addr 0x%04x\n",
  8015. __FUNCTION__, bus->h2d_mb_data_ptr_addr));
  8016. DHD_INFO(("%s: d2h_mb_data_ptr_addr 0x%04x\n",
  8017. __FUNCTION__, bus->d2h_mb_data_ptr_addr));
  8018. }
  8019. DHD_INFO(("%s: d2h_sync_mode 0x%08x\n",
  8020. __FUNCTION__, bus->dhd->d2h_sync_mode));
  8021. bus->dhd->d2h_hostrdy_supported =
  8022. ((sh->flags & PCIE_SHARED_HOSTRDY_SUPPORT) == PCIE_SHARED_HOSTRDY_SUPPORT);
  8023. bus->dhd->ext_trap_data_supported =
  8024. ((sh->flags2 & PCIE_SHARED2_EXTENDED_TRAP_DATA) == PCIE_SHARED2_EXTENDED_TRAP_DATA);
  8025. if ((sh->flags2 & PCIE_SHARED2_TXSTATUS_METADATA) == 0)
  8026. bus->dhd->pcie_txs_metadata_enable = 0;
  8027. bus->dhd->hscb_enable =
  8028. (sh->flags2 & PCIE_SHARED2_HSCB) == PCIE_SHARED2_HSCB;
  8029. #ifdef EWP_EDL
  8030. if (host_edl_support) {
  8031. bus->dhd->dongle_edl_support = (sh->flags2 & PCIE_SHARED2_EDL_RING) ? TRUE : FALSE;
  8032. DHD_ERROR(("Dongle EDL support: %u\n", bus->dhd->dongle_edl_support));
  8033. }
  8034. #endif /* EWP_EDL */
  8035. bus->dhd->debug_buf_dest_support =
  8036. (sh->flags2 & PCIE_SHARED2_DEBUG_BUF_DEST) ? TRUE : FALSE;
  8037. DHD_ERROR(("FW supports debug buf dest ? %s \n",
  8038. bus->dhd->debug_buf_dest_support ? "Y" : "N"));
  8039. #ifdef DHD_HP2P
  8040. if (bus->dhd->hp2p_enable) {
  8041. bus->dhd->hp2p_ts_capable =
  8042. (sh->flags2 & PCIE_SHARED2_PKT_TIMESTAMP) == PCIE_SHARED2_PKT_TIMESTAMP;
  8043. bus->dhd->hp2p_capable =
  8044. (sh->flags2 & PCIE_SHARED2_HP2P) == PCIE_SHARED2_HP2P;
  8045. bus->dhd->hp2p_capable &= bus->dhd->hp2p_ts_capable;
  8046. DHD_ERROR(("FW supports HP2P ? %s \n",
  8047. bus->dhd->hp2p_capable ? "Y" : "N"));
  8048. if (bus->dhd->hp2p_capable) {
  8049. bus->dhd->pkt_thresh = HP2P_PKT_THRESH;
  8050. bus->dhd->pkt_expiry = HP2P_PKT_EXPIRY;
  8051. bus->dhd->time_thresh = HP2P_TIME_THRESH;
  8052. for (addr = 0; addr < MAX_HP2P_FLOWS; addr++) {
  8053. hp2p_info_t *hp2p_info = &bus->dhd->hp2p_info[addr];
  8054. hp2p_info->hrtimer_init = FALSE;
  8055. hp2p_info->timer.function = &dhd_hp2p_write;
  8056. #if (LINUX_VERSION_CODE <= KERNEL_VERSION(5, 1, 21))
  8057. tasklet_hrtimer_init(&hp2p_info->timer,
  8058. dhd_hp2p_write, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
  8059. #else
  8060. hrtimer_init(&hp2p_info->timer, CLOCK_MONOTONIC,
  8061. HRTIMER_MODE_REL_SOFT);
  8062. #endif /* LINUX_VERSION_CODE <= KERNEL_VERSION(5, 1, 21 */
  8063. }
  8064. }
  8065. }
  8066. #endif /* DHD_HP2P */
  8067. #ifdef DHD_DB0TS
  8068. bus->dhd->db0ts_capable =
  8069. (sh->flags & PCIE_SHARED_TIMESTAMP_DB0) == PCIE_SHARED_TIMESTAMP_DB0;
  8070. #endif /* DHD_DB0TS */
  8071. if (MULTIBP_ENAB(bus->sih)) {
  8072. dhd_bus_pcie_pwr_req_clear(bus);
  8073. /*
  8074. * WAR to fix ARM cold boot;
  8075. * De-assert WL domain in DAR
  8076. */
  8077. if (bus->sih->buscorerev >= 68) {
  8078. dhd_bus_pcie_pwr_req_wl_domain(bus, FALSE);
  8079. }
  8080. }
  8081. return BCME_OK;
  8082. } /* dhdpcie_readshared */
  8083. /** Read ring mem and ring state ptr info from shared memory area in device memory */
  8084. static void
  8085. dhd_fillup_ring_sharedptr_info(dhd_bus_t *bus, ring_info_t *ring_info)
  8086. {
  8087. uint16 i = 0;
  8088. uint16 j = 0;
  8089. uint32 tcm_memloc;
  8090. uint32 d2h_w_idx_ptr, d2h_r_idx_ptr, h2d_w_idx_ptr, h2d_r_idx_ptr;
  8091. uint16 max_tx_flowrings = bus->max_tx_flowrings;
  8092. /* Ring mem ptr info */
  8093. /* Alloated in the order
  8094. H2D_MSGRING_CONTROL_SUBMIT 0
  8095. H2D_MSGRING_RXPOST_SUBMIT 1
  8096. D2H_MSGRING_CONTROL_COMPLETE 2
  8097. D2H_MSGRING_TX_COMPLETE 3
  8098. D2H_MSGRING_RX_COMPLETE 4
  8099. */
  8100. {
  8101. /* ringmemptr holds start of the mem block address space */
  8102. tcm_memloc = ltoh32(ring_info->ringmem_ptr);
  8103. /* Find out ringmem ptr for each ring common ring */
  8104. for (i = 0; i <= BCMPCIE_COMMON_MSGRING_MAX_ID; i++) {
  8105. bus->ring_sh[i].ring_mem_addr = tcm_memloc;
  8106. /* Update mem block */
  8107. tcm_memloc = tcm_memloc + sizeof(ring_mem_t);
  8108. DHD_INFO(("ring id %d ring mem addr 0x%04x \n",
  8109. i, bus->ring_sh[i].ring_mem_addr));
  8110. }
  8111. }
  8112. /* Ring state mem ptr info */
  8113. {
  8114. d2h_w_idx_ptr = ltoh32(ring_info->d2h_w_idx_ptr);
  8115. d2h_r_idx_ptr = ltoh32(ring_info->d2h_r_idx_ptr);
  8116. h2d_w_idx_ptr = ltoh32(ring_info->h2d_w_idx_ptr);
  8117. h2d_r_idx_ptr = ltoh32(ring_info->h2d_r_idx_ptr);
  8118. /* Store h2d common ring write/read pointers */
  8119. for (i = 0; i < BCMPCIE_H2D_COMMON_MSGRINGS; i++) {
  8120. bus->ring_sh[i].ring_state_w = h2d_w_idx_ptr;
  8121. bus->ring_sh[i].ring_state_r = h2d_r_idx_ptr;
  8122. /* update mem block */
  8123. h2d_w_idx_ptr = h2d_w_idx_ptr + bus->rw_index_sz;
  8124. h2d_r_idx_ptr = h2d_r_idx_ptr + bus->rw_index_sz;
  8125. DHD_INFO(("h2d w/r : idx %d write %x read %x \n", i,
  8126. bus->ring_sh[i].ring_state_w, bus->ring_sh[i].ring_state_r));
  8127. }
  8128. /* Store d2h common ring write/read pointers */
  8129. for (j = 0; j < BCMPCIE_D2H_COMMON_MSGRINGS; j++, i++) {
  8130. bus->ring_sh[i].ring_state_w = d2h_w_idx_ptr;
  8131. bus->ring_sh[i].ring_state_r = d2h_r_idx_ptr;
  8132. /* update mem block */
  8133. d2h_w_idx_ptr = d2h_w_idx_ptr + bus->rw_index_sz;
  8134. d2h_r_idx_ptr = d2h_r_idx_ptr + bus->rw_index_sz;
  8135. DHD_INFO(("d2h w/r : idx %d write %x read %x \n", i,
  8136. bus->ring_sh[i].ring_state_w, bus->ring_sh[i].ring_state_r));
  8137. }
  8138. /* Store txflow ring write/read pointers */
  8139. if (bus->api.fw_rev < PCIE_SHARED_VERSION_6) {
  8140. max_tx_flowrings -= BCMPCIE_H2D_COMMON_MSGRINGS;
  8141. } else {
  8142. /* Account for Debug info h2d ring located after the last tx flow ring */
  8143. max_tx_flowrings = max_tx_flowrings + 1;
  8144. }
  8145. for (j = 0; j < max_tx_flowrings; i++, j++)
  8146. {
  8147. bus->ring_sh[i].ring_state_w = h2d_w_idx_ptr;
  8148. bus->ring_sh[i].ring_state_r = h2d_r_idx_ptr;
  8149. /* update mem block */
  8150. h2d_w_idx_ptr = h2d_w_idx_ptr + bus->rw_index_sz;
  8151. h2d_r_idx_ptr = h2d_r_idx_ptr + bus->rw_index_sz;
  8152. DHD_INFO(("FLOW Rings h2d w/r : idx %d write %x read %x \n", i,
  8153. bus->ring_sh[i].ring_state_w,
  8154. bus->ring_sh[i].ring_state_r));
  8155. }
  8156. /* store wr/rd pointers for debug info completion ring */
  8157. bus->ring_sh[i].ring_state_w = d2h_w_idx_ptr;
  8158. bus->ring_sh[i].ring_state_r = d2h_r_idx_ptr;
  8159. d2h_w_idx_ptr = d2h_w_idx_ptr + bus->rw_index_sz;
  8160. d2h_r_idx_ptr = d2h_r_idx_ptr + bus->rw_index_sz;
  8161. DHD_INFO(("d2h w/r : idx %d write %x read %x \n", i,
  8162. bus->ring_sh[i].ring_state_w, bus->ring_sh[i].ring_state_r));
  8163. }
  8164. } /* dhd_fillup_ring_sharedptr_info */
  8165. /**
  8166. * Initialize bus module: prepare for communication with the dongle. Called after downloading
  8167. * firmware into the dongle.
  8168. */
  8169. int dhd_bus_init(dhd_pub_t *dhdp, bool enforce_mutex)
  8170. {
  8171. dhd_bus_t *bus = dhdp->bus;
  8172. int ret = 0;
  8173. DHD_TRACE(("%s: Enter\n", __FUNCTION__));
  8174. ASSERT(bus->dhd);
  8175. if (!bus->dhd)
  8176. return 0;
  8177. if (PCIE_RELOAD_WAR_ENAB(bus->sih->buscorerev)) {
  8178. dhd_bus_pcie_pwr_req_clear_reload_war(bus);
  8179. }
  8180. if (MULTIBP_ENAB(bus->sih)) {
  8181. dhd_bus_pcie_pwr_req(bus);
  8182. }
  8183. /* Configure AER registers to log the TLP header */
  8184. dhd_bus_aer_config(bus);
  8185. /* Make sure we're talking to the core. */
  8186. bus->reg = si_setcore(bus->sih, PCIE2_CORE_ID, 0);
  8187. ASSERT(bus->reg != NULL);
  8188. /* before opening up bus for data transfer, check if shared are is intact */
  8189. ret = dhdpcie_readshared(bus);
  8190. if (ret < 0) {
  8191. DHD_ERROR(("%s :Shared area read failed \n", __FUNCTION__));
  8192. goto exit;
  8193. }
  8194. /* Make sure we're talking to the core. */
  8195. bus->reg = si_setcore(bus->sih, PCIE2_CORE_ID, 0);
  8196. ASSERT(bus->reg != NULL);
  8197. dhd_init_bus_lock(bus);
  8198. dhd_init_backplane_access_lock(bus);
  8199. /* Set bus state according to enable result */
  8200. dhdp->busstate = DHD_BUS_DATA;
  8201. bus->bus_low_power_state = DHD_BUS_NO_LOW_POWER_STATE;
  8202. dhdp->dhd_bus_busy_state = 0;
  8203. /* D11 status via PCIe completion header */
  8204. if ((ret = dhdpcie_init_d11status(bus)) < 0) {
  8205. goto exit;
  8206. }
  8207. if (!dhd_download_fw_on_driverload)
  8208. dhd_dpc_enable(bus->dhd);
  8209. /* Enable the interrupt after device is up */
  8210. dhdpcie_bus_intr_enable(bus);
  8211. bus->intr_enabled = TRUE;
  8212. /* bcmsdh_intr_unmask(bus->sdh); */
  8213. #ifdef DHD_PCIE_RUNTIMEPM
  8214. bus->idlecount = 0;
  8215. bus->idletime = (int32)MAX_IDLE_COUNT;
  8216. init_waitqueue_head(&bus->rpm_queue);
  8217. mutex_init(&bus->pm_lock);
  8218. #else
  8219. bus->idletime = 0;
  8220. #endif /* DHD_PCIE_RUNTIMEPM */
  8221. /* Make use_d0_inform TRUE for Rev 5 for backward compatibility */
  8222. if (bus->api.fw_rev < PCIE_SHARED_VERSION_6) {
  8223. bus->use_d0_inform = TRUE;
  8224. } else {
  8225. bus->use_d0_inform = FALSE;
  8226. }
  8227. exit:
  8228. if (MULTIBP_ENAB(bus->sih)) {
  8229. dhd_bus_pcie_pwr_req_clear(bus);
  8230. }
  8231. return ret;
  8232. }
  8233. static void
  8234. dhdpcie_init_shared_addr(dhd_bus_t *bus)
  8235. {
  8236. uint32 addr = 0;
  8237. uint32 val = 0;
  8238. addr = bus->dongle_ram_base + bus->ramsize - 4;
  8239. #ifdef DHD_PCIE_RUNTIMEPM
  8240. dhdpcie_runtime_bus_wake(bus->dhd, TRUE, __builtin_return_address(0));
  8241. #endif /* DHD_PCIE_RUNTIMEPM */
  8242. dhdpcie_bus_membytes(bus, TRUE, addr, (uint8 *)&val, sizeof(val));
  8243. }
  8244. bool
  8245. dhdpcie_chipmatch(uint16 vendor, uint16 device)
  8246. {
  8247. if (vendor == PCI_VENDOR_ID_BROADCOM) {
  8248. DHD_ERROR(("%s: Supporting vendor %x device %x\n", __FUNCTION__,
  8249. vendor, device));
  8250. } else {
  8251. DHD_ERROR(("%s: Unsupported vendor %x device %x\n", __FUNCTION__,
  8252. vendor, device));
  8253. return (-ENODEV);
  8254. }
  8255. if ((device == BCM4350_D11AC_ID) || (device == BCM4350_D11AC2G_ID) ||
  8256. (device == BCM4350_D11AC5G_ID) || (device == BCM4350_CHIP_ID) ||
  8257. (device == BCM43569_CHIP_ID)) {
  8258. return 0;
  8259. }
  8260. if ((device == BCM4354_D11AC_ID) || (device == BCM4354_D11AC2G_ID) ||
  8261. (device == BCM4354_D11AC5G_ID) || (device == BCM4354_CHIP_ID)) {
  8262. return 0;
  8263. }
  8264. if ((device == BCM4356_D11AC_ID) || (device == BCM4356_D11AC2G_ID) ||
  8265. (device == BCM4356_D11AC5G_ID) || (device == BCM4356_CHIP_ID)) {
  8266. return 0;
  8267. }
  8268. if ((device == BCM4371_D11AC_ID) || (device == BCM4371_D11AC2G_ID) ||
  8269. (device == BCM4371_D11AC5G_ID) || (device == BCM4371_CHIP_ID)) {
  8270. return 0;
  8271. }
  8272. if ((device == BCM4345_D11AC_ID) || (device == BCM4345_D11AC2G_ID) ||
  8273. (device == BCM4345_D11AC5G_ID) || BCM4345_CHIP(device)) {
  8274. return 0;
  8275. }
  8276. if ((device == BCM43452_D11AC_ID) || (device == BCM43452_D11AC2G_ID) ||
  8277. (device == BCM43452_D11AC5G_ID)) {
  8278. return 0;
  8279. }
  8280. if ((device == BCM4335_D11AC_ID) || (device == BCM4335_D11AC2G_ID) ||
  8281. (device == BCM4335_D11AC5G_ID) || (device == BCM4335_CHIP_ID)) {
  8282. return 0;
  8283. }
  8284. if ((device == BCM43602_D11AC_ID) || (device == BCM43602_D11AC2G_ID) ||
  8285. (device == BCM43602_D11AC5G_ID) || (device == BCM43602_CHIP_ID)) {
  8286. return 0;
  8287. }
  8288. if ((device == BCM43569_D11AC_ID) || (device == BCM43569_D11AC2G_ID) ||
  8289. (device == BCM43569_D11AC5G_ID) || (device == BCM43569_CHIP_ID)) {
  8290. return 0;
  8291. }
  8292. if ((device == BCM4358_D11AC_ID) || (device == BCM4358_D11AC2G_ID) ||
  8293. (device == BCM4358_D11AC5G_ID)) {
  8294. return 0;
  8295. }
  8296. if ((device == BCM4349_D11AC_ID) || (device == BCM4349_D11AC2G_ID) ||
  8297. (device == BCM4349_D11AC5G_ID) || (device == BCM4349_CHIP_ID)) {
  8298. return 0;
  8299. }
  8300. if ((device == BCM4355_D11AC_ID) || (device == BCM4355_D11AC2G_ID) ||
  8301. (device == BCM4355_D11AC5G_ID) || (device == BCM4355_CHIP_ID)) {
  8302. return 0;
  8303. }
  8304. if ((device == BCM4359_D11AC_ID) || (device == BCM4359_D11AC2G_ID) ||
  8305. (device == BCM4359_D11AC5G_ID)) {
  8306. return 0;
  8307. }
  8308. if ((device == BCM43596_D11AC_ID) || (device == BCM43596_D11AC2G_ID) ||
  8309. (device == BCM43596_D11AC5G_ID)) {
  8310. return 0;
  8311. }
  8312. if ((device == BCM43597_D11AC_ID) || (device == BCM43597_D11AC2G_ID) ||
  8313. (device == BCM43597_D11AC5G_ID)) {
  8314. return 0;
  8315. }
  8316. if ((device == BCM4364_D11AC_ID) || (device == BCM4364_D11AC2G_ID) ||
  8317. (device == BCM4364_D11AC5G_ID) || (device == BCM4364_CHIP_ID)) {
  8318. return 0;
  8319. }
  8320. if ((device == BCM4361_D11AC_ID) || (device == BCM4361_D11AC2G_ID) ||
  8321. (device == BCM4361_D11AC5G_ID) || (device == BCM4361_CHIP_ID)) {
  8322. return 0;
  8323. }
  8324. if ((device == BCM4362_D11AX_ID) || (device == BCM4362_D11AX2G_ID) ||
  8325. (device == BCM4362_D11AX5G_ID) || (device == BCM4362_CHIP_ID)) {
  8326. return 0;
  8327. }
  8328. if ((device == BCM43751_D11AX_ID) || (device == BCM43751_D11AX2G_ID) ||
  8329. (device == BCM43751_D11AX5G_ID) || (device == BCM43751_CHIP_ID)) {
  8330. return 0;
  8331. }
  8332. if ((device == BCM4347_D11AC_ID) || (device == BCM4347_D11AC2G_ID) ||
  8333. (device == BCM4347_D11AC5G_ID) || (device == BCM4347_CHIP_ID)) {
  8334. return 0;
  8335. }
  8336. if ((device == BCM4365_D11AC_ID) || (device == BCM4365_D11AC2G_ID) ||
  8337. (device == BCM4365_D11AC5G_ID) || (device == BCM4365_CHIP_ID)) {
  8338. return 0;
  8339. }
  8340. if ((device == BCM4366_D11AC_ID) || (device == BCM4366_D11AC2G_ID) ||
  8341. (device == BCM4366_D11AC5G_ID) || (device == BCM4366_CHIP_ID) ||
  8342. (device == BCM43664_CHIP_ID) || (device == BCM43666_CHIP_ID)) {
  8343. return 0;
  8344. }
  8345. if ((device == BCM4369_D11AX_ID) || (device == BCM4369_D11AX2G_ID) ||
  8346. (device == BCM4369_D11AX5G_ID) || (device == BCM4369_CHIP_ID)) {
  8347. return 0;
  8348. }
  8349. if ((device == BCM4373_D11AC_ID) || (device == BCM4373_D11AC2G_ID) ||
  8350. (device == BCM4373_D11AC5G_ID) || (device == BCM4373_CHIP_ID)) {
  8351. return 0;
  8352. }
  8353. if ((device == BCM4375_D11AX_ID) || (device == BCM4375_D11AX2G_ID) ||
  8354. (device == BCM4375_D11AX5G_ID) || (device == BCM4375_CHIP_ID)) {
  8355. return 0;
  8356. }
  8357. #ifdef CHIPS_CUSTOMER_HW6
  8358. if ((device == BCM4376_D11AC_ID) || (device == BCM4376_D11AC2G_ID) ||
  8359. (device == BCM4376_D11AC5G_ID) || (device == BCM4376_CHIP_ID)) {
  8360. return 0;
  8361. }
  8362. if ((device == BCM4377_M_D11AX_ID) || (device == BCM4377_D11AX_ID) ||
  8363. (device == BCM4377_D11AX2G_ID) || (device == BCM4377_D11AX5G_ID) ||
  8364. (device == BCM4377_CHIP_ID)) {
  8365. return 0;
  8366. }
  8367. if ((device == BCM4378_D11AC_ID) || (device == BCM4378_D11AC2G_ID) ||
  8368. (device == BCM4378_D11AC5G_ID) || (device == BCM4378_CHIP_ID)) {
  8369. return 0;
  8370. }
  8371. #endif /* CHIPS_CUSTOMER_HW6 */
  8372. #ifdef CHIPS_CUSTOMER_HW6
  8373. if ((device == BCM4368_D11AC_ID) || (device == BCM4368_D11AC2G_ID) ||
  8374. (device == BCM4368_D11AC5G_ID) || (device == BCM4368_CHIP_ID)) {
  8375. return 0;
  8376. }
  8377. if ((device == BCM4367_D11AC_ID) || (device == BCM4367_D11AC2G_ID) ||
  8378. (device == BCM4367_D11AC5G_ID) || (device == BCM4367_CHIP_ID)) {
  8379. return 0;
  8380. }
  8381. #endif /* CHIPS_CUSTOMER_HW6 */
  8382. DHD_ERROR(("%s: Unsupported vendor %x device %x\n", __FUNCTION__, vendor, device));
  8383. return (-ENODEV);
  8384. } /* dhdpcie_chipmatch */
  8385. /*
  8386. * Name: dhdpcie_sromotp_customvar
  8387. * Description:
  8388. * read otp/sprom and parse & store customvar.
  8389. * A shadow of OTP/SPROM exists in ChipCommon Region
  8390. * betw. 0x800 and 0xBFF (Backplane Addr. 0x1800_0800 and 0x1800_0BFF).
  8391. * Strapping option (SPROM vs. OTP), presence of OTP/SPROM and its size
  8392. * can also be read from ChipCommon Registers.
  8393. */
  8394. static int
  8395. dhdpcie_sromotp_customvar(dhd_bus_t *bus, uint32 *customvar1, uint32 *customvar2)
  8396. {
  8397. uint16 dump_offset = 0;
  8398. uint32 dump_size = 0, otp_size = 0, sprom_size = 0;
  8399. /* Table for 65nm OTP Size (in bits) */
  8400. int otp_size_65nm[8] = {0, 2048, 4096, 8192, 4096, 6144, 512, 1024};
  8401. volatile uint16 *nvm_shadow;
  8402. uint cur_coreid;
  8403. uint chipc_corerev;
  8404. chipcregs_t *chipcregs;
  8405. uint16 *otp_dump;
  8406. uint8 *cis;
  8407. uint8 tup, tlen;
  8408. int i = 0;
  8409. /* Save the current core */
  8410. cur_coreid = si_coreid(bus->sih);
  8411. /* Switch to ChipC */
  8412. chipcregs = (chipcregs_t *)si_setcore(bus->sih, CC_CORE_ID, 0);
  8413. ASSERT(chipcregs != NULL);
  8414. chipc_corerev = si_corerev(bus->sih);
  8415. /* Check ChipcommonCore Rev */
  8416. if (chipc_corerev < 44) {
  8417. DHD_ERROR(("%s: ChipcommonCore Rev %d < 44\n", __FUNCTION__, chipc_corerev));
  8418. return BCME_UNSUPPORTED;
  8419. }
  8420. /* Check ChipID */
  8421. if (((uint16)bus->sih->chip != BCM4350_CHIP_ID) && !BCM4345_CHIP((uint16)bus->sih->chip) &&
  8422. ((uint16)bus->sih->chip != BCM4355_CHIP_ID) &&
  8423. ((uint16)bus->sih->chip != BCM4359_CHIP_ID) &&
  8424. ((uint16)bus->sih->chip != BCM4349_CHIP_ID)) {
  8425. DHD_ERROR(("%s: supported for chips"
  8426. "4350/4345/4355/4364/4349/4359 only\n", __FUNCTION__));
  8427. return BCME_UNSUPPORTED;
  8428. }
  8429. /* Check if SRC_PRESENT in SpromCtrl(0x190 in ChipCommon Regs) is set */
  8430. if (chipcregs->sromcontrol & SRC_PRESENT) {
  8431. /* SPROM Size: 1Kbits (0x0), 4Kbits (0x1), 16Kbits(0x2) */
  8432. sprom_size = (1 << (2 * ((chipcregs->sromcontrol & SRC_SIZE_MASK)
  8433. >> SRC_SIZE_SHIFT))) * 1024;
  8434. DHD_TRACE(("\nSPROM Present (Size %d bits)\n", sprom_size));
  8435. }
  8436. if (chipcregs->sromcontrol & SRC_OTPPRESENT) {
  8437. DHD_TRACE(("\nOTP Present"));
  8438. if (((chipcregs->otplayout & OTPL_WRAP_TYPE_MASK) >> OTPL_WRAP_TYPE_SHIFT)
  8439. == OTPL_WRAP_TYPE_40NM) {
  8440. /* 40nm OTP: Size = (OtpSize + 1) * 1024 bits */
  8441. /* Chipcommon rev51 is a variation on rev45 and does not support
  8442. * the latest OTP configuration.
  8443. */
  8444. if (chipc_corerev != 51 && chipc_corerev >= 49) {
  8445. otp_size = (((chipcregs->otplayout & OTPL_ROW_SIZE_MASK)
  8446. >> OTPL_ROW_SIZE_SHIFT) + 1) * 1024;
  8447. DHD_TRACE(("(Size %d bits)\n", otp_size));
  8448. } else {
  8449. otp_size = (((chipcregs->capabilities & CC_CAP_OTPSIZE)
  8450. >> CC_CAP_OTPSIZE_SHIFT) + 1) * 1024;
  8451. DHD_TRACE(("(Size %d bits)\n", otp_size));
  8452. }
  8453. } else {
  8454. /* This part is untested since newer chips have 40nm OTP */
  8455. /* Chipcommon rev51 is a variation on rev45 and does not support
  8456. * the latest OTP configuration.
  8457. */
  8458. if (chipc_corerev != 51 && chipc_corerev >= 49) {
  8459. otp_size = otp_size_65nm[(chipcregs->otplayout &
  8460. OTPL_ROW_SIZE_MASK) >> OTPL_ROW_SIZE_SHIFT];
  8461. DHD_TRACE(("(Size %d bits)\n", otp_size));
  8462. } else {
  8463. otp_size = otp_size_65nm[(chipcregs->capabilities &
  8464. CC_CAP_OTPSIZE) >> CC_CAP_OTPSIZE_SHIFT];
  8465. DHD_TRACE(("(Size %d bits)\n", otp_size));
  8466. DHD_TRACE(("%s: 65nm/130nm OTP Size not tested. \n",
  8467. __FUNCTION__));
  8468. }
  8469. }
  8470. }
  8471. /* Chipcommon rev51 is a variation on rev45 and does not support
  8472. * the latest OTP configuration.
  8473. */
  8474. if (chipc_corerev != 51 && chipc_corerev >= 49) {
  8475. if (((chipcregs->sromcontrol & SRC_PRESENT) == 0) &&
  8476. ((chipcregs->otplayout & OTPL_ROW_SIZE_MASK) == 0)) {
  8477. DHD_ERROR(("%s: SPROM and OTP could not be found "
  8478. "sromcontrol = %x, otplayout = %x \n",
  8479. __FUNCTION__, chipcregs->sromcontrol,
  8480. chipcregs->otplayout));
  8481. return BCME_NOTFOUND;
  8482. }
  8483. } else {
  8484. if (((chipcregs->sromcontrol & SRC_PRESENT) == 0) &&
  8485. ((chipcregs->capabilities & CC_CAP_OTPSIZE) == 0)) {
  8486. DHD_ERROR(("%s: SPROM and OTP could not be found "
  8487. "sromcontrol = %x, capablities = %x \n",
  8488. __FUNCTION__, chipcregs->sromcontrol,
  8489. chipcregs->capabilities));
  8490. return BCME_NOTFOUND;
  8491. }
  8492. }
  8493. /* Check the strapping option in SpromCtrl: Set = OTP otherwise SPROM */
  8494. if ((!(chipcregs->sromcontrol & SRC_PRESENT) || (chipcregs->sromcontrol & SRC_OTPSEL)) &&
  8495. (chipcregs->sromcontrol & SRC_OTPPRESENT)) {
  8496. DHD_TRACE(("OTP Strap selected.\n"
  8497. "\nOTP Shadow in ChipCommon:\n"));
  8498. dump_size = otp_size / 16 ; /* 16bit words */
  8499. } else if (((chipcregs->sromcontrol & SRC_OTPSEL) == 0) &&
  8500. (chipcregs->sromcontrol & SRC_PRESENT)) {
  8501. DHD_TRACE(("SPROM Strap selected\n"
  8502. "\nSPROM Shadow in ChipCommon:\n"));
  8503. /* If SPROM > 8K only 8Kbits is mapped to ChipCommon (0x800 - 0xBFF) */
  8504. /* dump_size in 16bit words */
  8505. dump_size = sprom_size > 8 ? (8 * 1024) / 16 : sprom_size / 16;
  8506. } else {
  8507. DHD_ERROR(("%s: NVM Shadow does not exist in ChipCommon\n",
  8508. __FUNCTION__));
  8509. return BCME_NOTFOUND;
  8510. }
  8511. if (bus->regs == NULL) {
  8512. DHD_ERROR(("ChipCommon Regs. not initialized\n"));
  8513. return BCME_NOTREADY;
  8514. } else {
  8515. /* Chipcommon rev51 is a variation on rev45 and does not support
  8516. * the latest OTP configuration.
  8517. */
  8518. if (chipc_corerev != 51 && chipc_corerev >= 49) {
  8519. /* Chip common can read only 8kbits,
  8520. * for ccrev >= 49 otp size is around 12 kbits so use GCI core
  8521. */
  8522. nvm_shadow = (volatile uint16 *)si_setcore(bus->sih, GCI_CORE_ID, 0);
  8523. } else {
  8524. /* Point to the SPROM/OTP shadow in ChipCommon */
  8525. nvm_shadow = chipcregs->sromotp;
  8526. }
  8527. if (nvm_shadow == NULL) {
  8528. DHD_ERROR(("%s: NVM Shadow is not intialized\n", __FUNCTION__));
  8529. return BCME_NOTFOUND;
  8530. }
  8531. otp_dump = kzalloc(dump_size*2, GFP_KERNEL);
  8532. if (otp_dump == NULL) {
  8533. DHD_ERROR(("%s: Insufficient system memory of size %d\n",
  8534. __FUNCTION__, dump_size));
  8535. return BCME_NOMEM;
  8536. }
  8537. /*
  8538. * Read 16 bits / iteration.
  8539. * dump_size & dump_offset in 16-bit words
  8540. */
  8541. while (dump_offset < dump_size) {
  8542. *(otp_dump + dump_offset) = *(nvm_shadow + dump_offset);
  8543. dump_offset += 0x1;
  8544. }
  8545. /* Read from cis tuple start address */
  8546. cis = (uint8 *)otp_dump + CISTPL_OFFSET;
  8547. /* parse value of customvar2 tuple */
  8548. do {
  8549. tup = cis[i++];
  8550. if (tup == CISTPL_NULL || tup == CISTPL_END)
  8551. tlen = 0;
  8552. else
  8553. tlen = cis[i++];
  8554. if ((i + tlen) >= dump_size*2)
  8555. break;
  8556. switch (tup) {
  8557. case CISTPL_BRCM_HNBU:
  8558. switch (cis[i]) {
  8559. case HNBU_CUSTOM1:
  8560. *customvar1 = ((cis[i + 4] << 24) +
  8561. (cis[i + 3] << 16) +
  8562. (cis[i + 2] << 8) +
  8563. cis[i + 1]);
  8564. DHD_TRACE(("%s : customvar1 [%x]\n",
  8565. __FUNCTION__, *customvar1));
  8566. break;
  8567. case HNBU_CUSTOM2:
  8568. *customvar2 = ((cis[i + 4] << 24) +
  8569. (cis[i + 3] << 16) +
  8570. (cis[i + 2] << 8) +
  8571. cis[i + 1]);
  8572. DHD_TRACE(("%s : customvar2 [%x]\n",
  8573. __FUNCTION__, *customvar2));
  8574. break;
  8575. default:
  8576. break;
  8577. }
  8578. break;
  8579. default:
  8580. break;
  8581. }
  8582. i += tlen;
  8583. } while (tup != 0xff);
  8584. if (otp_dump) {
  8585. kfree(otp_dump);
  8586. otp_dump = NULL;
  8587. }
  8588. }
  8589. /* Switch back to the original core */
  8590. si_setcore(bus->sih, cur_coreid, 0);
  8591. return BCME_OK;
  8592. } /* dhdpcie_sromotp_customvar */
  8593. /**
  8594. * Name: dhdpcie_cc_nvmshadow
  8595. *
  8596. * Description:
  8597. * A shadow of OTP/SPROM exists in ChipCommon Region
  8598. * betw. 0x800 and 0xBFF (Backplane Addr. 0x1800_0800 and 0x1800_0BFF).
  8599. * Strapping option (SPROM vs. OTP), presence of OTP/SPROM and its size
  8600. * can also be read from ChipCommon Registers.
  8601. */
  8602. static int
  8603. dhdpcie_cc_nvmshadow(dhd_bus_t *bus, struct bcmstrbuf *b)
  8604. {
  8605. uint16 dump_offset = 0;
  8606. uint32 dump_size = 0, otp_size = 0, sprom_size = 0;
  8607. /* Table for 65nm OTP Size (in bits) */
  8608. int otp_size_65nm[8] = {0, 2048, 4096, 8192, 4096, 6144, 512, 1024};
  8609. volatile uint16 *nvm_shadow;
  8610. uint cur_coreid;
  8611. uint chipc_corerev;
  8612. chipcregs_t *chipcregs;
  8613. /* Save the current core */
  8614. cur_coreid = si_coreid(bus->sih);
  8615. /* Switch to ChipC */
  8616. chipcregs = (chipcregs_t *)si_setcore(bus->sih, CC_CORE_ID, 0);
  8617. ASSERT(chipcregs != NULL);
  8618. chipc_corerev = si_corerev(bus->sih);
  8619. /* Check ChipcommonCore Rev */
  8620. if (chipc_corerev < 44) {
  8621. DHD_ERROR(("%s: ChipcommonCore Rev %d < 44\n", __FUNCTION__, chipc_corerev));
  8622. return BCME_UNSUPPORTED;
  8623. }
  8624. /* Check ChipID */
  8625. if (((uint16)bus->sih->chip != BCM4350_CHIP_ID) && !BCM4345_CHIP((uint16)bus->sih->chip) &&
  8626. ((uint16)bus->sih->chip != BCM4355_CHIP_ID) &&
  8627. ((uint16)bus->sih->chip != BCM4364_CHIP_ID)) {
  8628. DHD_ERROR(("%s: cc_nvmdump cmd. supported for Olympic chips"
  8629. "4350/4345/4355/4364 only\n", __FUNCTION__));
  8630. return BCME_UNSUPPORTED;
  8631. }
  8632. /* Check if SRC_PRESENT in SpromCtrl(0x190 in ChipCommon Regs) is set */
  8633. if (chipcregs->sromcontrol & SRC_PRESENT) {
  8634. /* SPROM Size: 1Kbits (0x0), 4Kbits (0x1), 16Kbits(0x2) */
  8635. sprom_size = (1 << (2 * ((chipcregs->sromcontrol & SRC_SIZE_MASK)
  8636. >> SRC_SIZE_SHIFT))) * 1024;
  8637. bcm_bprintf(b, "\nSPROM Present (Size %d bits)\n", sprom_size);
  8638. }
  8639. if (chipcregs->sromcontrol & SRC_OTPPRESENT) {
  8640. bcm_bprintf(b, "\nOTP Present");
  8641. if (((chipcregs->otplayout & OTPL_WRAP_TYPE_MASK) >> OTPL_WRAP_TYPE_SHIFT)
  8642. == OTPL_WRAP_TYPE_40NM) {
  8643. /* 40nm OTP: Size = (OtpSize + 1) * 1024 bits */
  8644. /* Chipcommon rev51 is a variation on rev45 and does not support
  8645. * the latest OTP configuration.
  8646. */
  8647. if (chipc_corerev != 51 && chipc_corerev >= 49) {
  8648. otp_size = (((chipcregs->otplayout & OTPL_ROW_SIZE_MASK)
  8649. >> OTPL_ROW_SIZE_SHIFT) + 1) * 1024;
  8650. bcm_bprintf(b, "(Size %d bits)\n", otp_size);
  8651. } else {
  8652. otp_size = (((chipcregs->capabilities & CC_CAP_OTPSIZE)
  8653. >> CC_CAP_OTPSIZE_SHIFT) + 1) * 1024;
  8654. bcm_bprintf(b, "(Size %d bits)\n", otp_size);
  8655. }
  8656. } else {
  8657. /* This part is untested since newer chips have 40nm OTP */
  8658. /* Chipcommon rev51 is a variation on rev45 and does not support
  8659. * the latest OTP configuration.
  8660. */
  8661. if (chipc_corerev != 51 && chipc_corerev >= 49) {
  8662. otp_size = otp_size_65nm[(chipcregs->otplayout & OTPL_ROW_SIZE_MASK)
  8663. >> OTPL_ROW_SIZE_SHIFT];
  8664. bcm_bprintf(b, "(Size %d bits)\n", otp_size);
  8665. } else {
  8666. otp_size = otp_size_65nm[(chipcregs->capabilities & CC_CAP_OTPSIZE)
  8667. >> CC_CAP_OTPSIZE_SHIFT];
  8668. bcm_bprintf(b, "(Size %d bits)\n", otp_size);
  8669. DHD_INFO(("%s: 65nm/130nm OTP Size not tested. \n",
  8670. __FUNCTION__));
  8671. }
  8672. }
  8673. }
  8674. /* Chipcommon rev51 is a variation on rev45 and does not support
  8675. * the latest OTP configuration.
  8676. */
  8677. if (chipc_corerev != 51 && chipc_corerev >= 49) {
  8678. if (((chipcregs->sromcontrol & SRC_PRESENT) == 0) &&
  8679. ((chipcregs->otplayout & OTPL_ROW_SIZE_MASK) == 0)) {
  8680. DHD_ERROR(("%s: SPROM and OTP could not be found "
  8681. "sromcontrol = %x, otplayout = %x \n",
  8682. __FUNCTION__, chipcregs->sromcontrol, chipcregs->otplayout));
  8683. return BCME_NOTFOUND;
  8684. }
  8685. } else {
  8686. if (((chipcregs->sromcontrol & SRC_PRESENT) == 0) &&
  8687. ((chipcregs->capabilities & CC_CAP_OTPSIZE) == 0)) {
  8688. DHD_ERROR(("%s: SPROM and OTP could not be found "
  8689. "sromcontrol = %x, capablities = %x \n",
  8690. __FUNCTION__, chipcregs->sromcontrol, chipcregs->capabilities));
  8691. return BCME_NOTFOUND;
  8692. }
  8693. }
  8694. /* Check the strapping option in SpromCtrl: Set = OTP otherwise SPROM */
  8695. if ((!(chipcregs->sromcontrol & SRC_PRESENT) || (chipcregs->sromcontrol & SRC_OTPSEL)) &&
  8696. (chipcregs->sromcontrol & SRC_OTPPRESENT)) {
  8697. bcm_bprintf(b, "OTP Strap selected.\n"
  8698. "\nOTP Shadow in ChipCommon:\n");
  8699. dump_size = otp_size / 16 ; /* 16bit words */
  8700. } else if (((chipcregs->sromcontrol & SRC_OTPSEL) == 0) &&
  8701. (chipcregs->sromcontrol & SRC_PRESENT)) {
  8702. bcm_bprintf(b, "SPROM Strap selected\n"
  8703. "\nSPROM Shadow in ChipCommon:\n");
  8704. /* If SPROM > 8K only 8Kbits is mapped to ChipCommon (0x800 - 0xBFF) */
  8705. /* dump_size in 16bit words */
  8706. dump_size = sprom_size > 8 ? (8 * 1024) / 16 : sprom_size / 16;
  8707. } else {
  8708. DHD_ERROR(("%s: NVM Shadow does not exist in ChipCommon\n",
  8709. __FUNCTION__));
  8710. return BCME_NOTFOUND;
  8711. }
  8712. if (bus->regs == NULL) {
  8713. DHD_ERROR(("ChipCommon Regs. not initialized\n"));
  8714. return BCME_NOTREADY;
  8715. } else {
  8716. bcm_bprintf(b, "\n OffSet:");
  8717. /* Chipcommon rev51 is a variation on rev45 and does not support
  8718. * the latest OTP configuration.
  8719. */
  8720. if (chipc_corerev != 51 && chipc_corerev >= 49) {
  8721. /* Chip common can read only 8kbits,
  8722. * for ccrev >= 49 otp size is around 12 kbits so use GCI core
  8723. */
  8724. nvm_shadow = (volatile uint16 *)si_setcore(bus->sih, GCI_CORE_ID, 0);
  8725. } else {
  8726. /* Point to the SPROM/OTP shadow in ChipCommon */
  8727. nvm_shadow = chipcregs->sromotp;
  8728. }
  8729. if (nvm_shadow == NULL) {
  8730. DHD_ERROR(("%s: NVM Shadow is not intialized\n", __FUNCTION__));
  8731. return BCME_NOTFOUND;
  8732. }
  8733. /*
  8734. * Read 16 bits / iteration.
  8735. * dump_size & dump_offset in 16-bit words
  8736. */
  8737. while (dump_offset < dump_size) {
  8738. if (dump_offset % 2 == 0)
  8739. /* Print the offset in the shadow space in Bytes */
  8740. bcm_bprintf(b, "\n 0x%04x", dump_offset * 2);
  8741. bcm_bprintf(b, "\t0x%04x", *(nvm_shadow + dump_offset));
  8742. dump_offset += 0x1;
  8743. }
  8744. }
  8745. /* Switch back to the original core */
  8746. si_setcore(bus->sih, cur_coreid, 0);
  8747. return BCME_OK;
  8748. } /* dhdpcie_cc_nvmshadow */
  8749. /** Flow rings are dynamically created and destroyed */
  8750. void dhd_bus_clean_flow_ring(dhd_bus_t *bus, void *node)
  8751. {
  8752. void *pkt;
  8753. flow_queue_t *queue;
  8754. flow_ring_node_t *flow_ring_node = (flow_ring_node_t *)node;
  8755. unsigned long flags;
  8756. queue = &flow_ring_node->queue;
  8757. #ifdef DHDTCPACK_SUPPRESS
  8758. /* Clean tcp_ack_info_tbl in order to prevent access to flushed pkt,
  8759. * when there is a newly coming packet from network stack.
  8760. */
  8761. dhd_tcpack_info_tbl_clean(bus->dhd);
  8762. #endif /* DHDTCPACK_SUPPRESS */
  8763. #ifdef DHD_HP2P
  8764. if (flow_ring_node->hp2p_ring) {
  8765. bus->dhd->hp2p_ring_active = FALSE;
  8766. flow_ring_node->hp2p_ring = FALSE;
  8767. }
  8768. #endif /* DHD_HP2P */
  8769. /* clean up BUS level info */
  8770. DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
  8771. /* Flush all pending packets in the queue, if any */
  8772. while ((pkt = dhd_flow_queue_dequeue(bus->dhd, queue)) != NULL) {
  8773. PKTFREE(bus->dhd->osh, pkt, TRUE);
  8774. }
  8775. ASSERT(DHD_FLOW_QUEUE_EMPTY(queue));
  8776. /* Reinitialise flowring's queue */
  8777. dhd_flow_queue_reinit(bus->dhd, queue, FLOW_RING_QUEUE_THRESHOLD);
  8778. flow_ring_node->status = FLOW_RING_STATUS_CLOSED;
  8779. flow_ring_node->active = FALSE;
  8780. DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
  8781. /* Hold flowring_list_lock to ensure no race condition while accessing the List */
  8782. DHD_FLOWRING_LIST_LOCK(bus->dhd->flowring_list_lock, flags);
  8783. dll_delete(&flow_ring_node->list);
  8784. DHD_FLOWRING_LIST_UNLOCK(bus->dhd->flowring_list_lock, flags);
  8785. /* Release the flowring object back into the pool */
  8786. dhd_prot_flowrings_pool_release(bus->dhd,
  8787. flow_ring_node->flowid, flow_ring_node->prot_info);
  8788. /* Free the flowid back to the flowid allocator */
  8789. dhd_flowid_free(bus->dhd, flow_ring_node->flow_info.ifindex,
  8790. flow_ring_node->flowid);
  8791. }
  8792. /**
  8793. * Allocate a Flow ring buffer,
  8794. * Init Ring buffer, send Msg to device about flow ring creation
  8795. */
  8796. int
  8797. dhd_bus_flow_ring_create_request(dhd_bus_t *bus, void *arg)
  8798. {
  8799. flow_ring_node_t *flow_ring_node = (flow_ring_node_t *)arg;
  8800. DHD_INFO(("%s :Flow create\n", __FUNCTION__));
  8801. /* Send Msg to device about flow ring creation */
  8802. if (dhd_prot_flow_ring_create(bus->dhd, flow_ring_node) != BCME_OK)
  8803. return BCME_NOMEM;
  8804. return BCME_OK;
  8805. }
  8806. /** Handle response from dongle on a 'flow ring create' request */
  8807. void
  8808. dhd_bus_flow_ring_create_response(dhd_bus_t *bus, uint16 flowid, int32 status)
  8809. {
  8810. flow_ring_node_t *flow_ring_node;
  8811. unsigned long flags;
  8812. DHD_INFO(("%s :Flow Response %d \n", __FUNCTION__, flowid));
  8813. /* Boundary check of the flowid */
  8814. if (flowid >= bus->dhd->num_flow_rings) {
  8815. DHD_ERROR(("%s: flowid is invalid %d, max %d\n", __FUNCTION__,
  8816. flowid, bus->dhd->num_flow_rings));
  8817. return;
  8818. }
  8819. flow_ring_node = DHD_FLOW_RING(bus->dhd, flowid);
  8820. if (!flow_ring_node) {
  8821. DHD_ERROR(("%s: flow_ring_node is NULL\n", __FUNCTION__));
  8822. return;
  8823. }
  8824. ASSERT(flow_ring_node->flowid == flowid);
  8825. if (flow_ring_node->flowid != flowid) {
  8826. DHD_ERROR(("%s: flowid %d is different from the flowid "
  8827. "of the flow_ring_node %d\n", __FUNCTION__, flowid,
  8828. flow_ring_node->flowid));
  8829. return;
  8830. }
  8831. if (status != BCME_OK) {
  8832. DHD_ERROR(("%s Flow create Response failure error status = %d \n",
  8833. __FUNCTION__, status));
  8834. /* Call Flow clean up */
  8835. dhd_bus_clean_flow_ring(bus, flow_ring_node);
  8836. return;
  8837. }
  8838. DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
  8839. flow_ring_node->status = FLOW_RING_STATUS_OPEN;
  8840. DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
  8841. /* Now add the Flow ring node into the active list
  8842. * Note that this code to add the newly created node to the active
  8843. * list was living in dhd_flowid_lookup. But note that after
  8844. * adding the node to the active list the contents of node is being
  8845. * filled in dhd_prot_flow_ring_create.
  8846. * If there is a D2H interrupt after the node gets added to the
  8847. * active list and before the node gets populated with values
  8848. * from the Bottom half dhd_update_txflowrings would be called.
  8849. * which will then try to walk through the active flow ring list,
  8850. * pickup the nodes and operate on them. Now note that since
  8851. * the function dhd_prot_flow_ring_create is not finished yet
  8852. * the contents of flow_ring_node can still be NULL leading to
  8853. * crashes. Hence the flow_ring_node should be added to the
  8854. * active list only after its truely created, which is after
  8855. * receiving the create response message from the Host.
  8856. */
  8857. DHD_FLOWRING_LIST_LOCK(bus->dhd->flowring_list_lock, flags);
  8858. dll_prepend(&bus->flowring_active_list, &flow_ring_node->list);
  8859. DHD_FLOWRING_LIST_UNLOCK(bus->dhd->flowring_list_lock, flags);
  8860. dhd_bus_schedule_queue(bus, flowid, FALSE); /* from queue to flowring */
  8861. return;
  8862. }
  8863. int
  8864. dhd_bus_flow_ring_delete_request(dhd_bus_t *bus, void *arg)
  8865. {
  8866. void * pkt;
  8867. flow_queue_t *queue;
  8868. flow_ring_node_t *flow_ring_node;
  8869. unsigned long flags;
  8870. DHD_INFO(("%s :Flow Delete\n", __FUNCTION__));
  8871. flow_ring_node = (flow_ring_node_t *)arg;
  8872. #ifdef DHDTCPACK_SUPPRESS
  8873. /* Clean tcp_ack_info_tbl in order to prevent access to flushed pkt,
  8874. * when there is a newly coming packet from network stack.
  8875. */
  8876. dhd_tcpack_info_tbl_clean(bus->dhd);
  8877. #endif /* DHDTCPACK_SUPPRESS */
  8878. DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
  8879. if (flow_ring_node->status == FLOW_RING_STATUS_DELETE_PENDING) {
  8880. DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
  8881. DHD_ERROR(("%s :Delete Pending flowid %u\n", __FUNCTION__, flow_ring_node->flowid));
  8882. return BCME_ERROR;
  8883. }
  8884. flow_ring_node->status = FLOW_RING_STATUS_DELETE_PENDING;
  8885. queue = &flow_ring_node->queue; /* queue associated with flow ring */
  8886. /* Flush all pending packets in the queue, if any */
  8887. while ((pkt = dhd_flow_queue_dequeue(bus->dhd, queue)) != NULL) {
  8888. PKTFREE(bus->dhd->osh, pkt, TRUE);
  8889. }
  8890. ASSERT(DHD_FLOW_QUEUE_EMPTY(queue));
  8891. DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
  8892. /* Send Msg to device about flow ring deletion */
  8893. dhd_prot_flow_ring_delete(bus->dhd, flow_ring_node);
  8894. return BCME_OK;
  8895. }
  8896. void
  8897. dhd_bus_flow_ring_delete_response(dhd_bus_t *bus, uint16 flowid, uint32 status)
  8898. {
  8899. flow_ring_node_t *flow_ring_node;
  8900. DHD_INFO(("%s :Flow Delete Response %d \n", __FUNCTION__, flowid));
  8901. /* Boundary check of the flowid */
  8902. if (flowid >= bus->dhd->num_flow_rings) {
  8903. DHD_ERROR(("%s: flowid is invalid %d, max %d\n", __FUNCTION__,
  8904. flowid, bus->dhd->num_flow_rings));
  8905. return;
  8906. }
  8907. flow_ring_node = DHD_FLOW_RING(bus->dhd, flowid);
  8908. if (!flow_ring_node) {
  8909. DHD_ERROR(("%s: flow_ring_node is NULL\n", __FUNCTION__));
  8910. return;
  8911. }
  8912. ASSERT(flow_ring_node->flowid == flowid);
  8913. if (flow_ring_node->flowid != flowid) {
  8914. DHD_ERROR(("%s: flowid %d is different from the flowid "
  8915. "of the flow_ring_node %d\n", __FUNCTION__, flowid,
  8916. flow_ring_node->flowid));
  8917. return;
  8918. }
  8919. if (status != BCME_OK) {
  8920. DHD_ERROR(("%s Flow Delete Response failure error status = %d \n",
  8921. __FUNCTION__, status));
  8922. return;
  8923. }
  8924. /* Call Flow clean up */
  8925. dhd_bus_clean_flow_ring(bus, flow_ring_node);
  8926. return;
  8927. }
  8928. int dhd_bus_flow_ring_flush_request(dhd_bus_t *bus, void *arg)
  8929. {
  8930. void *pkt;
  8931. flow_queue_t *queue;
  8932. flow_ring_node_t *flow_ring_node;
  8933. unsigned long flags;
  8934. DHD_INFO(("%s :Flow Flush\n", __FUNCTION__));
  8935. flow_ring_node = (flow_ring_node_t *)arg;
  8936. DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
  8937. queue = &flow_ring_node->queue; /* queue associated with flow ring */
  8938. /* Flow ring status will be set back to FLOW_RING_STATUS_OPEN
  8939. * once flow ring flush response is received for this flowring node.
  8940. */
  8941. flow_ring_node->status = FLOW_RING_STATUS_FLUSH_PENDING;
  8942. #ifdef DHDTCPACK_SUPPRESS
  8943. /* Clean tcp_ack_info_tbl in order to prevent access to flushed pkt,
  8944. * when there is a newly coming packet from network stack.
  8945. */
  8946. dhd_tcpack_info_tbl_clean(bus->dhd);
  8947. #endif /* DHDTCPACK_SUPPRESS */
  8948. /* Flush all pending packets in the queue, if any */
  8949. while ((pkt = dhd_flow_queue_dequeue(bus->dhd, queue)) != NULL) {
  8950. PKTFREE(bus->dhd->osh, pkt, TRUE);
  8951. }
  8952. ASSERT(DHD_FLOW_QUEUE_EMPTY(queue));
  8953. DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
  8954. /* Send Msg to device about flow ring flush */
  8955. dhd_prot_flow_ring_flush(bus->dhd, flow_ring_node);
  8956. return BCME_OK;
  8957. }
  8958. void
  8959. dhd_bus_flow_ring_flush_response(dhd_bus_t *bus, uint16 flowid, uint32 status)
  8960. {
  8961. flow_ring_node_t *flow_ring_node;
  8962. if (status != BCME_OK) {
  8963. DHD_ERROR(("%s Flow flush Response failure error status = %d \n",
  8964. __FUNCTION__, status));
  8965. return;
  8966. }
  8967. /* Boundary check of the flowid */
  8968. if (flowid >= bus->dhd->num_flow_rings) {
  8969. DHD_ERROR(("%s: flowid is invalid %d, max %d\n", __FUNCTION__,
  8970. flowid, bus->dhd->num_flow_rings));
  8971. return;
  8972. }
  8973. flow_ring_node = DHD_FLOW_RING(bus->dhd, flowid);
  8974. if (!flow_ring_node) {
  8975. DHD_ERROR(("%s: flow_ring_node is NULL\n", __FUNCTION__));
  8976. return;
  8977. }
  8978. ASSERT(flow_ring_node->flowid == flowid);
  8979. if (flow_ring_node->flowid != flowid) {
  8980. DHD_ERROR(("%s: flowid %d is different from the flowid "
  8981. "of the flow_ring_node %d\n", __FUNCTION__, flowid,
  8982. flow_ring_node->flowid));
  8983. return;
  8984. }
  8985. flow_ring_node->status = FLOW_RING_STATUS_OPEN;
  8986. return;
  8987. }
  8988. uint32
  8989. dhd_bus_max_h2d_queues(struct dhd_bus *bus)
  8990. {
  8991. return bus->max_submission_rings;
  8992. }
  8993. /* To be symmetric with SDIO */
  8994. void
  8995. dhd_bus_pktq_flush(dhd_pub_t *dhdp)
  8996. {
  8997. return;
  8998. }
  8999. void
  9000. dhd_bus_set_linkdown(dhd_pub_t *dhdp, bool val)
  9001. {
  9002. dhdp->bus->is_linkdown = val;
  9003. }
  9004. int
  9005. dhd_bus_get_linkdown(dhd_pub_t *dhdp)
  9006. {
  9007. return dhdp->bus->is_linkdown;
  9008. }
  9009. int
  9010. dhd_bus_get_cto(dhd_pub_t *dhdp)
  9011. {
  9012. return dhdp->bus->cto_triggered;
  9013. }
  9014. #ifdef IDLE_TX_FLOW_MGMT
  9015. /* resume request */
  9016. int
  9017. dhd_bus_flow_ring_resume_request(dhd_bus_t *bus, void *arg)
  9018. {
  9019. flow_ring_node_t *flow_ring_node = (flow_ring_node_t *)arg;
  9020. DHD_ERROR(("%s :Flow Resume Request flow id %u\n", __FUNCTION__, flow_ring_node->flowid));
  9021. flow_ring_node->status = FLOW_RING_STATUS_RESUME_PENDING;
  9022. /* Send Msg to device about flow ring resume */
  9023. dhd_prot_flow_ring_resume(bus->dhd, flow_ring_node);
  9024. return BCME_OK;
  9025. }
  9026. /* add the node back to active flowring */
  9027. void
  9028. dhd_bus_flow_ring_resume_response(dhd_bus_t *bus, uint16 flowid, int32 status)
  9029. {
  9030. flow_ring_node_t *flow_ring_node;
  9031. DHD_TRACE(("%s :flowid %d \n", __FUNCTION__, flowid));
  9032. flow_ring_node = DHD_FLOW_RING(bus->dhd, flowid);
  9033. ASSERT(flow_ring_node->flowid == flowid);
  9034. if (status != BCME_OK) {
  9035. DHD_ERROR(("%s Error Status = %d \n",
  9036. __FUNCTION__, status));
  9037. return;
  9038. }
  9039. DHD_TRACE(("%s :Number of pkts queued in FlowId:%d is -> %u!!\n",
  9040. __FUNCTION__, flow_ring_node->flowid, flow_ring_node->queue.len));
  9041. flow_ring_node->status = FLOW_RING_STATUS_OPEN;
  9042. dhd_bus_schedule_queue(bus, flowid, FALSE);
  9043. return;
  9044. }
  9045. /* scan the flow rings in active list for idle time out */
  9046. void
  9047. dhd_bus_check_idle_scan(dhd_bus_t *bus)
  9048. {
  9049. uint64 time_stamp; /* in millisec */
  9050. uint64 diff;
  9051. time_stamp = OSL_SYSUPTIME();
  9052. diff = time_stamp - bus->active_list_last_process_ts;
  9053. if (diff > IDLE_FLOW_LIST_TIMEOUT) {
  9054. dhd_bus_idle_scan(bus);
  9055. bus->active_list_last_process_ts = OSL_SYSUPTIME();
  9056. }
  9057. return;
  9058. }
  9059. /* scan the nodes in active list till it finds a non idle node */
  9060. void
  9061. dhd_bus_idle_scan(dhd_bus_t *bus)
  9062. {
  9063. dll_t *item, *prev;
  9064. flow_ring_node_t *flow_ring_node;
  9065. uint64 time_stamp, diff;
  9066. unsigned long flags;
  9067. uint16 ringid[MAX_SUSPEND_REQ];
  9068. uint16 count = 0;
  9069. time_stamp = OSL_SYSUPTIME();
  9070. DHD_FLOWRING_LIST_LOCK(bus->dhd->flowring_list_lock, flags);
  9071. for (item = dll_tail_p(&bus->flowring_active_list);
  9072. !dll_end(&bus->flowring_active_list, item); item = prev) {
  9073. prev = dll_prev_p(item);
  9074. flow_ring_node = dhd_constlist_to_flowring(item);
  9075. if (flow_ring_node->flowid == (bus->max_submission_rings - 1))
  9076. continue;
  9077. if (flow_ring_node->status != FLOW_RING_STATUS_OPEN) {
  9078. /* Takes care of deleting zombie rings */
  9079. /* delete from the active list */
  9080. DHD_INFO(("deleting flow id %u from active list\n",
  9081. flow_ring_node->flowid));
  9082. __dhd_flow_ring_delete_from_active_list(bus, flow_ring_node);
  9083. continue;
  9084. }
  9085. diff = time_stamp - flow_ring_node->last_active_ts;
  9086. if ((diff > IDLE_FLOW_RING_TIMEOUT) && !(flow_ring_node->queue.len)) {
  9087. DHD_ERROR(("\nSuspending flowid %d\n", flow_ring_node->flowid));
  9088. /* delete from the active list */
  9089. __dhd_flow_ring_delete_from_active_list(bus, flow_ring_node);
  9090. flow_ring_node->status = FLOW_RING_STATUS_SUSPENDED;
  9091. ringid[count] = flow_ring_node->flowid;
  9092. count++;
  9093. if (count == MAX_SUSPEND_REQ) {
  9094. /* create a batch message now!! */
  9095. dhd_prot_flow_ring_batch_suspend_request(bus->dhd, ringid, count);
  9096. count = 0;
  9097. }
  9098. } else {
  9099. /* No more scanning, break from here! */
  9100. break;
  9101. }
  9102. }
  9103. if (count) {
  9104. dhd_prot_flow_ring_batch_suspend_request(bus->dhd, ringid, count);
  9105. }
  9106. DHD_FLOWRING_LIST_UNLOCK(bus->dhd->flowring_list_lock, flags);
  9107. return;
  9108. }
  9109. void dhd_flow_ring_move_to_active_list_head(struct dhd_bus *bus, flow_ring_node_t *flow_ring_node)
  9110. {
  9111. unsigned long flags;
  9112. dll_t* list;
  9113. DHD_FLOWRING_LIST_LOCK(bus->dhd->flowring_list_lock, flags);
  9114. /* check if the node is already at head, otherwise delete it and prepend */
  9115. list = dll_head_p(&bus->flowring_active_list);
  9116. if (&flow_ring_node->list != list) {
  9117. dll_delete(&flow_ring_node->list);
  9118. dll_prepend(&bus->flowring_active_list, &flow_ring_node->list);
  9119. }
  9120. /* update flow ring timestamp */
  9121. flow_ring_node->last_active_ts = OSL_SYSUPTIME();
  9122. DHD_FLOWRING_LIST_UNLOCK(bus->dhd->flowring_list_lock, flags);
  9123. return;
  9124. }
  9125. void dhd_flow_ring_add_to_active_list(struct dhd_bus *bus, flow_ring_node_t *flow_ring_node)
  9126. {
  9127. unsigned long flags;
  9128. DHD_FLOWRING_LIST_LOCK(bus->dhd->flowring_list_lock, flags);
  9129. dll_prepend(&bus->flowring_active_list, &flow_ring_node->list);
  9130. /* update flow ring timestamp */
  9131. flow_ring_node->last_active_ts = OSL_SYSUPTIME();
  9132. DHD_FLOWRING_LIST_UNLOCK(bus->dhd->flowring_list_lock, flags);
  9133. return;
  9134. }
  9135. void __dhd_flow_ring_delete_from_active_list(struct dhd_bus *bus, flow_ring_node_t *flow_ring_node)
  9136. {
  9137. dll_delete(&flow_ring_node->list);
  9138. }
  9139. void dhd_flow_ring_delete_from_active_list(struct dhd_bus *bus, flow_ring_node_t *flow_ring_node)
  9140. {
  9141. unsigned long flags;
  9142. DHD_FLOWRING_LIST_LOCK(bus->dhd->flowring_list_lock, flags);
  9143. __dhd_flow_ring_delete_from_active_list(bus, flow_ring_node);
  9144. DHD_FLOWRING_LIST_UNLOCK(bus->dhd->flowring_list_lock, flags);
  9145. return;
  9146. }
  9147. #endif /* IDLE_TX_FLOW_MGMT */
  9148. int
  9149. dhdpcie_bus_clock_start(struct dhd_bus *bus)
  9150. {
  9151. return dhdpcie_start_host_pcieclock(bus);
  9152. }
  9153. int
  9154. dhdpcie_bus_clock_stop(struct dhd_bus *bus)
  9155. {
  9156. return dhdpcie_stop_host_pcieclock(bus);
  9157. }
  9158. int
  9159. dhdpcie_bus_disable_device(struct dhd_bus *bus)
  9160. {
  9161. return dhdpcie_disable_device(bus);
  9162. }
  9163. int
  9164. dhdpcie_bus_enable_device(struct dhd_bus *bus)
  9165. {
  9166. return dhdpcie_enable_device(bus);
  9167. }
  9168. int
  9169. dhdpcie_bus_alloc_resource(struct dhd_bus *bus)
  9170. {
  9171. return dhdpcie_alloc_resource(bus);
  9172. }
  9173. void
  9174. dhdpcie_bus_free_resource(struct dhd_bus *bus)
  9175. {
  9176. dhdpcie_free_resource(bus);
  9177. }
  9178. int
  9179. dhd_bus_request_irq(struct dhd_bus *bus)
  9180. {
  9181. return dhdpcie_bus_request_irq(bus);
  9182. }
  9183. bool
  9184. dhdpcie_bus_dongle_attach(struct dhd_bus *bus)
  9185. {
  9186. return dhdpcie_dongle_attach(bus);
  9187. }
  9188. int
  9189. dhd_bus_release_dongle(struct dhd_bus *bus)
  9190. {
  9191. bool dongle_isolation;
  9192. osl_t *osh;
  9193. DHD_TRACE(("%s: Enter\n", __FUNCTION__));
  9194. if (bus) {
  9195. osh = bus->osh;
  9196. ASSERT(osh);
  9197. if (bus->dhd) {
  9198. #if defined(DEBUGGER) || defined(DHD_DSCOPE)
  9199. debugger_close();
  9200. #endif /* DEBUGGER || DHD_DSCOPE */
  9201. dongle_isolation = bus->dhd->dongle_isolation;
  9202. dhdpcie_bus_release_dongle(bus, osh, dongle_isolation, TRUE);
  9203. }
  9204. }
  9205. return 0;
  9206. }
  9207. int
  9208. dhdpcie_cto_cfg_init(struct dhd_bus *bus, bool enable)
  9209. {
  9210. uint32 val;
  9211. if (enable) {
  9212. dhdpcie_bus_cfg_write_dword(bus, PCI_INT_MASK, 4,
  9213. PCI_CTO_INT_MASK | PCI_SBIM_MASK_SERR);
  9214. val = dhdpcie_bus_cfg_read_dword(bus, PCI_SPROM_CONTROL, 4);
  9215. dhdpcie_bus_cfg_write_dword(bus, PCI_SPROM_CONTROL, 4, val | SPROM_BACKPLANE_EN);
  9216. } else {
  9217. dhdpcie_bus_cfg_write_dword(bus, PCI_INT_MASK, 4, 0);
  9218. val = dhdpcie_bus_cfg_read_dword(bus, PCI_SPROM_CONTROL, 4);
  9219. dhdpcie_bus_cfg_write_dword(bus, PCI_SPROM_CONTROL, 4, val & ~SPROM_BACKPLANE_EN);
  9220. }
  9221. return 0;
  9222. }
  9223. int
  9224. dhdpcie_cto_init(struct dhd_bus *bus, bool enable)
  9225. {
  9226. if (bus->sih->buscorerev < 19) {
  9227. DHD_INFO(("%s: Unsupported CTO, buscorerev=%d\n",
  9228. __FUNCTION__, bus->sih->buscorerev));
  9229. return BCME_UNSUPPORTED;
  9230. }
  9231. if (bus->sih->buscorerev == 19) {
  9232. uint32 pcie_lnkst;
  9233. si_corereg(bus->sih, bus->sih->buscoreidx,
  9234. OFFSETOF(sbpcieregs_t, configaddr), ~0, PCI_LINK_STATUS);
  9235. pcie_lnkst = si_corereg(bus->sih, bus->sih->buscoreidx,
  9236. OFFSETOF(sbpcieregs_t, configdata), 0, 0);
  9237. if (((pcie_lnkst >> PCI_LINK_SPEED_SHIFT) &
  9238. PCI_LINK_SPEED_MASK) == PCIE_LNK_SPEED_GEN1) {
  9239. return BCME_UNSUPPORTED;
  9240. }
  9241. }
  9242. bus->cto_enable = enable;
  9243. dhdpcie_cto_cfg_init(bus, enable);
  9244. if (enable) {
  9245. if (bus->cto_threshold == 0) {
  9246. bus->cto_threshold = PCIE_CTO_TO_THRESH_DEFAULT;
  9247. }
  9248. si_corereg(bus->sih, bus->sih->buscoreidx,
  9249. OFFSETOF(sbpcieregs_t, ctoctrl), ~0,
  9250. ((bus->cto_threshold << PCIE_CTO_TO_THRESHOLD_SHIFT) &
  9251. PCIE_CTO_TO_THRESHHOLD_MASK) |
  9252. ((PCIE_CTO_CLKCHKCNT_VAL << PCIE_CTO_CLKCHKCNT_SHIFT) &
  9253. PCIE_CTO_CLKCHKCNT_MASK) |
  9254. PCIE_CTO_ENAB_MASK);
  9255. } else {
  9256. si_corereg(bus->sih, bus->sih->buscoreidx,
  9257. OFFSETOF(sbpcieregs_t, ctoctrl), ~0, 0);
  9258. }
  9259. DHD_ERROR(("%s: set CTO prevention and recovery enable/disable %d\n",
  9260. __FUNCTION__, bus->cto_enable));
  9261. return 0;
  9262. }
  9263. static int
  9264. dhdpcie_cto_error_recovery(struct dhd_bus *bus)
  9265. {
  9266. uint32 pci_intmask, err_status;
  9267. uint8 i = 0;
  9268. uint32 val;
  9269. pci_intmask = dhdpcie_bus_cfg_read_dword(bus, PCI_INT_MASK, 4);
  9270. dhdpcie_bus_cfg_write_dword(bus, PCI_INT_MASK, 4, pci_intmask & ~PCI_CTO_INT_MASK);
  9271. DHD_OS_WAKE_LOCK(bus->dhd);
  9272. DHD_ERROR(("--- CTO Triggered --- %d\n", bus->pwr_req_ref));
  9273. /*
  9274. * DAR still accessible
  9275. */
  9276. dhd_bus_dump_dar_registers(bus);
  9277. /* reset backplane */
  9278. val = dhdpcie_bus_cfg_read_dword(bus, PCI_SPROM_CONTROL, 4);
  9279. dhdpcie_bus_cfg_write_dword(bus, PCI_SPROM_CONTROL, 4, val | SPROM_CFG_TO_SB_RST);
  9280. /* clear timeout error */
  9281. while (1) {
  9282. err_status = si_corereg(bus->sih, bus->sih->buscoreidx,
  9283. DAR_ERRLOG(bus->sih->buscorerev),
  9284. 0, 0);
  9285. if (err_status & PCIE_CTO_ERR_MASK) {
  9286. si_corereg(bus->sih, bus->sih->buscoreidx,
  9287. DAR_ERRLOG(bus->sih->buscorerev),
  9288. ~0, PCIE_CTO_ERR_MASK);
  9289. } else {
  9290. break;
  9291. }
  9292. OSL_DELAY(CTO_TO_CLEAR_WAIT_MS * 1000);
  9293. i++;
  9294. if (i > CTO_TO_CLEAR_WAIT_MAX_CNT) {
  9295. DHD_ERROR(("cto recovery fail\n"));
  9296. DHD_OS_WAKE_UNLOCK(bus->dhd);
  9297. return BCME_ERROR;
  9298. }
  9299. }
  9300. /* clear interrupt status */
  9301. dhdpcie_bus_cfg_write_dword(bus, PCI_INT_STATUS, 4, PCI_CTO_INT_MASK);
  9302. /* Halt ARM & remove reset */
  9303. /* TBD : we can add ARM Halt here in case */
  9304. /* reset SPROM_CFG_TO_SB_RST */
  9305. val = dhdpcie_bus_cfg_read_dword(bus, PCI_SPROM_CONTROL, 4);
  9306. DHD_ERROR(("cto recovery reset 0x%x:SPROM_CFG_TO_SB_RST(0x%x) 0x%x\n",
  9307. PCI_SPROM_CONTROL, SPROM_CFG_TO_SB_RST, val));
  9308. dhdpcie_bus_cfg_write_dword(bus, PCI_SPROM_CONTROL, 4, val & ~SPROM_CFG_TO_SB_RST);
  9309. val = dhdpcie_bus_cfg_read_dword(bus, PCI_SPROM_CONTROL, 4);
  9310. DHD_ERROR(("cto recovery success 0x%x:SPROM_CFG_TO_SB_RST(0x%x) 0x%x\n",
  9311. PCI_SPROM_CONTROL, SPROM_CFG_TO_SB_RST, val));
  9312. DHD_OS_WAKE_UNLOCK(bus->dhd);
  9313. return BCME_OK;
  9314. }
  9315. void
  9316. dhdpcie_ssreset_dis_enum_rst(struct dhd_bus *bus)
  9317. {
  9318. uint32 val;
  9319. val = dhdpcie_bus_cfg_read_dword(bus, PCIE_CFG_SUBSYSTEM_CONTROL, 4);
  9320. dhdpcie_bus_cfg_write_dword(bus, PCIE_CFG_SUBSYSTEM_CONTROL, 4,
  9321. val | (0x1 << PCIE_SSRESET_DIS_ENUM_RST_BIT));
  9322. }
  9323. #if defined(DBG_PKT_MON) || defined(DHD_PKT_LOGGING)
  9324. static int
  9325. dhdpcie_init_d11status(struct dhd_bus *bus)
  9326. {
  9327. uint32 addr;
  9328. uint32 flags2;
  9329. int ret = 0;
  9330. if (bus->pcie_sh->flags2 & PCIE_SHARED2_D2H_D11_TX_STATUS) {
  9331. flags2 = bus->pcie_sh->flags2;
  9332. addr = bus->shared_addr + OFFSETOF(pciedev_shared_t, flags2);
  9333. flags2 |= PCIE_SHARED2_H2D_D11_TX_STATUS;
  9334. ret = dhdpcie_bus_membytes(bus, TRUE, addr,
  9335. (uint8 *)&flags2, sizeof(flags2));
  9336. if (ret < 0) {
  9337. DHD_ERROR(("%s: update flag bit (H2D_D11_TX_STATUS) failed\n",
  9338. __FUNCTION__));
  9339. return ret;
  9340. }
  9341. bus->pcie_sh->flags2 = flags2;
  9342. bus->dhd->d11_tx_status = TRUE;
  9343. }
  9344. return ret;
  9345. }
  9346. #else
  9347. static int
  9348. dhdpcie_init_d11status(struct dhd_bus *bus)
  9349. {
  9350. return 0;
  9351. }
  9352. #endif /* DBG_PKT_MON || DHD_PKT_LOGGING */
  9353. #ifdef BCMPCIE_OOB_HOST_WAKE
  9354. int
  9355. dhd_bus_oob_intr_register(dhd_pub_t *dhdp)
  9356. {
  9357. return dhdpcie_oob_intr_register(dhdp->bus);
  9358. }
  9359. void
  9360. dhd_bus_oob_intr_unregister(dhd_pub_t *dhdp)
  9361. {
  9362. dhdpcie_oob_intr_unregister(dhdp->bus);
  9363. }
  9364. void
  9365. dhd_bus_oob_intr_set(dhd_pub_t *dhdp, bool enable)
  9366. {
  9367. dhdpcie_oob_intr_set(dhdp->bus, enable);
  9368. }
  9369. #endif /* BCMPCIE_OOB_HOST_WAKE */
  9370. bool
  9371. dhdpcie_bus_get_pcie_hostready_supported(dhd_bus_t *bus)
  9372. {
  9373. return bus->dhd->d2h_hostrdy_supported;
  9374. }
  9375. void
  9376. dhd_pcie_dump_core_regs(dhd_pub_t * pub, uint32 index, uint32 first_addr, uint32 last_addr)
  9377. {
  9378. dhd_bus_t *bus = pub->bus;
  9379. uint32 coreoffset = index << 12;
  9380. uint32 core_addr = SI_ENUM_BASE(bus->sih) + coreoffset;
  9381. uint32 value;
  9382. while (first_addr <= last_addr) {
  9383. core_addr = SI_ENUM_BASE(bus->sih) + coreoffset + first_addr;
  9384. if (serialized_backplane_access(bus, core_addr, 4, &value, TRUE) != BCME_OK) {
  9385. DHD_ERROR(("Invalid size/addr combination \n"));
  9386. }
  9387. DHD_ERROR(("[0x%08x]: 0x%08x\n", core_addr, value));
  9388. first_addr = first_addr + 4;
  9389. }
  9390. }
  9391. bool
  9392. dhdpcie_bus_get_pcie_hwa_supported(dhd_bus_t *bus)
  9393. {
  9394. if (!bus->dhd)
  9395. return FALSE;
  9396. else if (bus->hwa_enab_bmap) {
  9397. return bus->dhd->hwa_enable;
  9398. } else {
  9399. return FALSE;
  9400. }
  9401. }
  9402. bool
  9403. dhdpcie_bus_get_pcie_idma_supported(dhd_bus_t *bus)
  9404. {
  9405. if (!bus->dhd)
  9406. return FALSE;
  9407. else if (bus->idma_enabled) {
  9408. return bus->dhd->idma_enable;
  9409. } else {
  9410. return FALSE;
  9411. }
  9412. }
  9413. bool
  9414. dhdpcie_bus_get_pcie_ifrm_supported(dhd_bus_t *bus)
  9415. {
  9416. if (!bus->dhd)
  9417. return FALSE;
  9418. else if (bus->ifrm_enabled) {
  9419. return bus->dhd->ifrm_enable;
  9420. } else {
  9421. return FALSE;
  9422. }
  9423. }
  9424. bool
  9425. dhdpcie_bus_get_pcie_dar_supported(dhd_bus_t *bus)
  9426. {
  9427. if (!bus->dhd) {
  9428. return FALSE;
  9429. } else if (bus->dar_enabled) {
  9430. return bus->dhd->dar_enable;
  9431. } else {
  9432. return FALSE;
  9433. }
  9434. }
  9435. void
  9436. dhdpcie_bus_enab_pcie_dw(dhd_bus_t *bus, uint8 dw_option)
  9437. {
  9438. DHD_ERROR(("ENABLING DW:%d\n", dw_option));
  9439. bus->dw_option = dw_option;
  9440. }
  9441. void
  9442. dhd_bus_dump_trap_info(dhd_bus_t *bus, struct bcmstrbuf *strbuf)
  9443. {
  9444. trap_t *tr = &bus->dhd->last_trap_info;
  9445. bcm_bprintf(strbuf,
  9446. "\nTRAP type 0x%x @ epc 0x%x, cpsr 0x%x, spsr 0x%x, sp 0x%x,"
  9447. " lp 0x%x, rpc 0x%x"
  9448. "\nTrap offset 0x%x, r0 0x%x, r1 0x%x, r2 0x%x, r3 0x%x, "
  9449. "r4 0x%x, r5 0x%x, r6 0x%x, r7 0x%x, r8 0x%x, r9 0x%x, "
  9450. "r10 0x%x, r11 0x%x, r12 0x%x\n\n",
  9451. ltoh32(tr->type), ltoh32(tr->epc), ltoh32(tr->cpsr), ltoh32(tr->spsr),
  9452. ltoh32(tr->r13), ltoh32(tr->r14), ltoh32(tr->pc),
  9453. ltoh32(bus->pcie_sh->trap_addr),
  9454. ltoh32(tr->r0), ltoh32(tr->r1), ltoh32(tr->r2), ltoh32(tr->r3),
  9455. ltoh32(tr->r4), ltoh32(tr->r5), ltoh32(tr->r6), ltoh32(tr->r7),
  9456. ltoh32(tr->r8), ltoh32(tr->r9), ltoh32(tr->r10),
  9457. ltoh32(tr->r11), ltoh32(tr->r12));
  9458. }
  9459. int
  9460. dhd_bus_readwrite_bp_addr(dhd_pub_t *dhdp, uint addr, uint size, uint* data, bool read)
  9461. {
  9462. int bcmerror = 0;
  9463. struct dhd_bus *bus = dhdp->bus;
  9464. if (serialized_backplane_access(bus, addr, size, data, read) != BCME_OK) {
  9465. DHD_ERROR(("Invalid size/addr combination \n"));
  9466. bcmerror = BCME_ERROR;
  9467. }
  9468. return bcmerror;
  9469. }
  9470. int
  9471. dhd_get_idletime(dhd_pub_t *dhd)
  9472. {
  9473. return dhd->bus->idletime;
  9474. }
  9475. static INLINE void
  9476. dhd_sbreg_op(dhd_pub_t *dhd, uint addr, uint *val, bool read)
  9477. {
  9478. OSL_DELAY(1);
  9479. if (serialized_backplane_access(dhd->bus, addr, sizeof(uint), val, read) != BCME_OK) {
  9480. DHD_ERROR(("sbreg: Invalid uint addr: 0x%x \n", addr));
  9481. } else {
  9482. DHD_ERROR(("sbreg: addr:0x%x val:0x%x read:%d\n", addr, *val, read));
  9483. }
  9484. return;
  9485. }
  9486. #ifdef DHD_SSSR_DUMP
  9487. static int
  9488. dhdpcie_get_sssr_fifo_dump(dhd_pub_t *dhd, uint *buf, uint fifo_size,
  9489. uint addr_reg, uint data_reg)
  9490. {
  9491. uint addr;
  9492. uint val = 0;
  9493. int i;
  9494. DHD_ERROR(("%s\n", __FUNCTION__));
  9495. if (!buf) {
  9496. DHD_ERROR(("%s: buf is NULL\n", __FUNCTION__));
  9497. return BCME_ERROR;
  9498. }
  9499. if (!fifo_size) {
  9500. DHD_ERROR(("%s: fifo_size is 0\n", __FUNCTION__));
  9501. return BCME_ERROR;
  9502. }
  9503. /* Set the base address offset to 0 */
  9504. addr = addr_reg;
  9505. val = 0;
  9506. dhd_sbreg_op(dhd, addr, &val, FALSE);
  9507. addr = data_reg;
  9508. /* Read 4 bytes at once and loop for fifo_size / 4 */
  9509. for (i = 0; i < fifo_size / 4; i++) {
  9510. if (serialized_backplane_access(dhd->bus, addr,
  9511. sizeof(uint), &val, TRUE) != BCME_OK) {
  9512. DHD_ERROR(("%s: error in serialized_backplane_access\n", __FUNCTION__));
  9513. return BCME_ERROR;
  9514. }
  9515. buf[i] = val;
  9516. OSL_DELAY(1);
  9517. }
  9518. return BCME_OK;
  9519. }
  9520. static int
  9521. dhdpcie_get_sssr_dig_dump(dhd_pub_t *dhd, uint *buf, uint fifo_size,
  9522. uint addr_reg)
  9523. {
  9524. uint addr;
  9525. uint val = 0;
  9526. int i;
  9527. si_t *sih = dhd->bus->sih;
  9528. DHD_ERROR(("%s\n", __FUNCTION__));
  9529. if (!buf) {
  9530. DHD_ERROR(("%s: buf is NULL\n", __FUNCTION__));
  9531. return BCME_ERROR;
  9532. }
  9533. if (!fifo_size) {
  9534. DHD_ERROR(("%s: fifo_size is 0\n", __FUNCTION__));
  9535. return BCME_ERROR;
  9536. }
  9537. if (addr_reg) {
  9538. if ((!dhd->sssr_reg_info.vasip_regs.vasip_sr_size) &&
  9539. dhd->sssr_reg_info.length > OFFSETOF(sssr_reg_info_v1_t, dig_mem_info)) {
  9540. int err = dhdpcie_bus_membytes(dhd->bus, FALSE, addr_reg, (uint8 *)buf,
  9541. fifo_size);
  9542. if (err != BCME_OK) {
  9543. DHD_ERROR(("%s: Error reading dig dump from dongle !\n",
  9544. __FUNCTION__));
  9545. }
  9546. } else {
  9547. /* Check if vasip clk is disabled, if yes enable it */
  9548. addr = dhd->sssr_reg_info.vasip_regs.wrapper_regs.ioctrl;
  9549. dhd_sbreg_op(dhd, addr, &val, TRUE);
  9550. if (!val) {
  9551. val = 1;
  9552. dhd_sbreg_op(dhd, addr, &val, FALSE);
  9553. }
  9554. addr = addr_reg;
  9555. /* Read 4 bytes at once and loop for fifo_size / 4 */
  9556. for (i = 0; i < fifo_size / 4; i++, addr += 4) {
  9557. if (serialized_backplane_access(dhd->bus, addr, sizeof(uint),
  9558. &val, TRUE) != BCME_OK) {
  9559. DHD_ERROR(("%s: Invalid uint addr: 0x%x \n", __FUNCTION__,
  9560. addr));
  9561. return BCME_ERROR;
  9562. }
  9563. buf[i] = val;
  9564. OSL_DELAY(1);
  9565. }
  9566. }
  9567. } else {
  9568. uint cur_coreid;
  9569. uint chipc_corerev;
  9570. chipcregs_t *chipcregs;
  9571. /* Save the current core */
  9572. cur_coreid = si_coreid(sih);
  9573. /* Switch to ChipC */
  9574. chipcregs = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0);
  9575. chipc_corerev = si_corerev(sih);
  9576. if ((chipc_corerev == 64) || (chipc_corerev == 65)) {
  9577. W_REG(si_osh(sih), &chipcregs->sr_memrw_addr, 0);
  9578. /* Read 4 bytes at once and loop for fifo_size / 4 */
  9579. for (i = 0; i < fifo_size / 4; i++) {
  9580. buf[i] = R_REG(si_osh(sih), &chipcregs->sr_memrw_data);
  9581. OSL_DELAY(1);
  9582. }
  9583. }
  9584. /* Switch back to the original core */
  9585. si_setcore(sih, cur_coreid, 0);
  9586. }
  9587. return BCME_OK;
  9588. }
  9589. #if defined(EWP_ETD_PRSRV_LOGS)
  9590. void
  9591. dhdpcie_get_etd_preserve_logs(dhd_pub_t *dhd,
  9592. uint8 *ext_trap_data, void *event_decode_data)
  9593. {
  9594. hnd_ext_trap_hdr_t *hdr = NULL;
  9595. bcm_tlv_t *tlv;
  9596. eventlog_trapdata_info_t *etd_evtlog = NULL;
  9597. eventlog_trap_buf_info_t *evtlog_buf_arr = NULL;
  9598. uint arr_size = 0;
  9599. int i = 0;
  9600. int err = 0;
  9601. uint32 seqnum = 0;
  9602. if (!ext_trap_data || !event_decode_data || !dhd)
  9603. return;
  9604. if (!dhd->concise_dbg_buf)
  9605. return;
  9606. /* First word is original trap_data, skip */
  9607. ext_trap_data += sizeof(uint32);
  9608. hdr = (hnd_ext_trap_hdr_t *)ext_trap_data;
  9609. tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_LOG_DATA);
  9610. if (tlv) {
  9611. uint32 baseaddr = 0;
  9612. uint32 endaddr = dhd->bus->dongle_ram_base + dhd->bus->ramsize - 4;
  9613. etd_evtlog = (eventlog_trapdata_info_t *)tlv->data;
  9614. DHD_ERROR(("%s: etd_evtlog tlv found, num_elements=%x; "
  9615. "seq_num=%x; log_arr_addr=%x\n", __FUNCTION__,
  9616. (etd_evtlog->num_elements),
  9617. ntoh32(etd_evtlog->seq_num), (etd_evtlog->log_arr_addr)));
  9618. arr_size = (uint32)sizeof(*evtlog_buf_arr) * (etd_evtlog->num_elements);
  9619. if (!arr_size) {
  9620. DHD_ERROR(("%s: num event logs is zero! \n", __FUNCTION__));
  9621. return;
  9622. }
  9623. evtlog_buf_arr = MALLOCZ(dhd->osh, arr_size);
  9624. if (!evtlog_buf_arr) {
  9625. DHD_ERROR(("%s: out of memory !\n", __FUNCTION__));
  9626. return;
  9627. }
  9628. /* boundary check */
  9629. baseaddr = etd_evtlog->log_arr_addr;
  9630. if ((baseaddr < dhd->bus->dongle_ram_base) ||
  9631. ((baseaddr + arr_size) > endaddr)) {
  9632. DHD_ERROR(("%s: Error reading invalid address\n",
  9633. __FUNCTION__));
  9634. goto err;
  9635. }
  9636. /* read the eventlog_trap_buf_info_t array from dongle memory */
  9637. err = dhdpcie_bus_membytes(dhd->bus, FALSE,
  9638. (ulong)(etd_evtlog->log_arr_addr),
  9639. (uint8 *)evtlog_buf_arr, arr_size);
  9640. if (err != BCME_OK) {
  9641. DHD_ERROR(("%s: Error reading event log array from dongle !\n",
  9642. __FUNCTION__));
  9643. goto err;
  9644. }
  9645. /* ntoh is required only for seq_num, because in the original
  9646. * case of event logs from info ring, it is sent from dongle in that way
  9647. * so for ETD also dongle follows same convention
  9648. */
  9649. seqnum = ntoh32(etd_evtlog->seq_num);
  9650. memset(dhd->concise_dbg_buf, 0, CONCISE_DUMP_BUFLEN);
  9651. for (i = 0; i < (etd_evtlog->num_elements); ++i) {
  9652. /* boundary check */
  9653. baseaddr = evtlog_buf_arr[i].buf_addr;
  9654. if ((baseaddr < dhd->bus->dongle_ram_base) ||
  9655. ((baseaddr + evtlog_buf_arr[i].len) > endaddr)) {
  9656. DHD_ERROR(("%s: Error reading invalid address\n",
  9657. __FUNCTION__));
  9658. goto err;
  9659. }
  9660. /* read each individual event log buf from dongle memory */
  9661. err = dhdpcie_bus_membytes(dhd->bus, FALSE,
  9662. ((ulong)evtlog_buf_arr[i].buf_addr),
  9663. dhd->concise_dbg_buf, (evtlog_buf_arr[i].len));
  9664. if (err != BCME_OK) {
  9665. DHD_ERROR(("%s: Error reading event log buffer from dongle !\n",
  9666. __FUNCTION__));
  9667. goto err;
  9668. }
  9669. dhd_dbg_msgtrace_log_parser(dhd, dhd->concise_dbg_buf,
  9670. event_decode_data, (evtlog_buf_arr[i].len),
  9671. FALSE, hton32(seqnum));
  9672. ++seqnum;
  9673. }
  9674. err:
  9675. MFREE(dhd->osh, evtlog_buf_arr, arr_size);
  9676. } else {
  9677. DHD_ERROR(("%s: Error getting trap log data in ETD !\n", __FUNCTION__));
  9678. }
  9679. }
  9680. #endif /* BCMPCIE && DHD_LOG_DUMP */
  9681. static uint32
  9682. dhdpcie_resume_chipcommon_powerctrl(dhd_pub_t *dhd, uint32 reg_val)
  9683. {
  9684. uint addr;
  9685. uint val = 0;
  9686. DHD_ERROR(("%s\n", __FUNCTION__));
  9687. /* conditionally clear bits [11:8] of PowerCtrl */
  9688. addr = dhd->sssr_reg_info.chipcommon_regs.base_regs.powerctrl;
  9689. dhd_sbreg_op(dhd, addr, &val, TRUE);
  9690. if (!(val & dhd->sssr_reg_info.chipcommon_regs.base_regs.powerctrl_mask)) {
  9691. addr = dhd->sssr_reg_info.chipcommon_regs.base_regs.powerctrl;
  9692. dhd_sbreg_op(dhd, addr, &reg_val, FALSE);
  9693. }
  9694. return BCME_OK;
  9695. }
  9696. static uint32
  9697. dhdpcie_suspend_chipcommon_powerctrl(dhd_pub_t *dhd)
  9698. {
  9699. uint addr;
  9700. uint val = 0, reg_val = 0;
  9701. DHD_ERROR(("%s\n", __FUNCTION__));
  9702. /* conditionally clear bits [11:8] of PowerCtrl */
  9703. addr = dhd->sssr_reg_info.chipcommon_regs.base_regs.powerctrl;
  9704. dhd_sbreg_op(dhd, addr, &reg_val, TRUE);
  9705. if (reg_val & dhd->sssr_reg_info.chipcommon_regs.base_regs.powerctrl_mask) {
  9706. addr = dhd->sssr_reg_info.chipcommon_regs.base_regs.powerctrl;
  9707. val = 0;
  9708. dhd_sbreg_op(dhd, addr, &val, FALSE);
  9709. }
  9710. return reg_val;
  9711. }
  9712. static int
  9713. dhdpcie_clear_intmask_and_timer(dhd_pub_t *dhd)
  9714. {
  9715. uint addr;
  9716. uint val;
  9717. DHD_ERROR(("%s\n", __FUNCTION__));
  9718. /* clear chipcommon intmask */
  9719. addr = dhd->sssr_reg_info.chipcommon_regs.base_regs.intmask;
  9720. val = 0x0;
  9721. dhd_sbreg_op(dhd, addr, &val, FALSE);
  9722. /* clear PMUIntMask0 */
  9723. addr = dhd->sssr_reg_info.pmu_regs.base_regs.pmuintmask0;
  9724. val = 0x0;
  9725. dhd_sbreg_op(dhd, addr, &val, FALSE);
  9726. /* clear PMUIntMask1 */
  9727. addr = dhd->sssr_reg_info.pmu_regs.base_regs.pmuintmask1;
  9728. val = 0x0;
  9729. dhd_sbreg_op(dhd, addr, &val, FALSE);
  9730. /* clear res_req_timer */
  9731. addr = dhd->sssr_reg_info.pmu_regs.base_regs.resreqtimer;
  9732. val = 0x0;
  9733. dhd_sbreg_op(dhd, addr, &val, FALSE);
  9734. /* clear macresreqtimer */
  9735. addr = dhd->sssr_reg_info.pmu_regs.base_regs.macresreqtimer;
  9736. val = 0x0;
  9737. dhd_sbreg_op(dhd, addr, &val, FALSE);
  9738. /* clear macresreqtimer1 */
  9739. addr = dhd->sssr_reg_info.pmu_regs.base_regs.macresreqtimer1;
  9740. val = 0x0;
  9741. dhd_sbreg_op(dhd, addr, &val, FALSE);
  9742. /* clear VasipClkEn */
  9743. if (dhd->sssr_reg_info.vasip_regs.vasip_sr_size) {
  9744. addr = dhd->sssr_reg_info.vasip_regs.wrapper_regs.ioctrl;
  9745. val = 0x0;
  9746. dhd_sbreg_op(dhd, addr, &val, FALSE);
  9747. }
  9748. return BCME_OK;
  9749. }
  9750. static void
  9751. dhdpcie_update_d11_status_from_trapdata(dhd_pub_t *dhd)
  9752. {
  9753. #define TRAP_DATA_MAIN_CORE_BIT_MASK (1 << 1)
  9754. #define TRAP_DATA_AUX_CORE_BIT_MASK (1 << 4)
  9755. uint trap_data_mask[MAX_NUM_D11CORES] =
  9756. {TRAP_DATA_MAIN_CORE_BIT_MASK, TRAP_DATA_AUX_CORE_BIT_MASK};
  9757. int i;
  9758. /* Apply only for 4375 chip */
  9759. if (dhd_bus_chip_id(dhd) == BCM4375_CHIP_ID) {
  9760. for (i = 0; i < MAX_NUM_D11CORES; i++) {
  9761. if (dhd->sssr_d11_outofreset[i] &&
  9762. (dhd->dongle_trap_data & trap_data_mask[i])) {
  9763. dhd->sssr_d11_outofreset[i] = TRUE;
  9764. } else {
  9765. dhd->sssr_d11_outofreset[i] = FALSE;
  9766. }
  9767. DHD_ERROR(("%s: sssr_d11_outofreset[%d] : %d after AND with "
  9768. "trap_data:0x%x-0x%x\n",
  9769. __FUNCTION__, i, dhd->sssr_d11_outofreset[i],
  9770. dhd->dongle_trap_data, trap_data_mask[i]));
  9771. }
  9772. }
  9773. }
  9774. static int
  9775. dhdpcie_d11_check_outofreset(dhd_pub_t *dhd)
  9776. {
  9777. int i;
  9778. uint addr;
  9779. uint val = 0;
  9780. DHD_ERROR(("%s\n", __FUNCTION__));
  9781. for (i = 0; i < MAX_NUM_D11CORES; i++) {
  9782. /* Check if bit 0 of resetctrl is cleared */
  9783. addr = dhd->sssr_reg_info.mac_regs[i].wrapper_regs.resetctrl;
  9784. if (!addr) {
  9785. DHD_ERROR(("%s: skipping for core[%d] as 'addr' is NULL\n",
  9786. __FUNCTION__, i));
  9787. continue;
  9788. }
  9789. dhd_sbreg_op(dhd, addr, &val, TRUE);
  9790. if (!(val & 1)) {
  9791. dhd->sssr_d11_outofreset[i] = TRUE;
  9792. } else {
  9793. dhd->sssr_d11_outofreset[i] = FALSE;
  9794. }
  9795. DHD_ERROR(("%s: sssr_d11_outofreset[%d] : %d\n",
  9796. __FUNCTION__, i, dhd->sssr_d11_outofreset[i]));
  9797. }
  9798. dhdpcie_update_d11_status_from_trapdata(dhd);
  9799. return BCME_OK;
  9800. }
  9801. static int
  9802. dhdpcie_d11_clear_clk_req(dhd_pub_t *dhd)
  9803. {
  9804. int i;
  9805. uint addr;
  9806. uint val = 0;
  9807. DHD_ERROR(("%s\n", __FUNCTION__));
  9808. for (i = 0; i < MAX_NUM_D11CORES; i++) {
  9809. if (dhd->sssr_d11_outofreset[i]) {
  9810. /* clear request clk only if itopoobb is non zero */
  9811. addr = dhd->sssr_reg_info.mac_regs[i].wrapper_regs.itopoobb;
  9812. dhd_sbreg_op(dhd, addr, &val, TRUE);
  9813. if (val != 0) {
  9814. /* clear clockcontrolstatus */
  9815. addr = dhd->sssr_reg_info.mac_regs[i].base_regs.clockcontrolstatus;
  9816. val =
  9817. dhd->sssr_reg_info.mac_regs[i].base_regs.clockcontrolstatus_val;
  9818. dhd_sbreg_op(dhd, addr, &val, FALSE);
  9819. }
  9820. }
  9821. }
  9822. return BCME_OK;
  9823. }
  9824. static int
  9825. dhdpcie_arm_clear_clk_req(dhd_pub_t *dhd)
  9826. {
  9827. uint addr;
  9828. uint val = 0;
  9829. DHD_ERROR(("%s\n", __FUNCTION__));
  9830. /* Check if bit 0 of resetctrl is cleared */
  9831. addr = dhd->sssr_reg_info.arm_regs.wrapper_regs.resetctrl;
  9832. dhd_sbreg_op(dhd, addr, &val, TRUE);
  9833. if (!(val & 1)) {
  9834. /* clear request clk only if itopoobb is non zero */
  9835. addr = dhd->sssr_reg_info.arm_regs.wrapper_regs.itopoobb;
  9836. dhd_sbreg_op(dhd, addr, &val, TRUE);
  9837. if (val != 0) {
  9838. /* clear clockcontrolstatus */
  9839. addr = dhd->sssr_reg_info.arm_regs.base_regs.clockcontrolstatus;
  9840. val = dhd->sssr_reg_info.arm_regs.base_regs.clockcontrolstatus_val;
  9841. dhd_sbreg_op(dhd, addr, &val, FALSE);
  9842. }
  9843. }
  9844. return BCME_OK;
  9845. }
  9846. static int
  9847. dhdpcie_pcie_clear_clk_req(dhd_pub_t *dhd)
  9848. {
  9849. uint addr;
  9850. uint val = 0;
  9851. DHD_ERROR(("%s\n", __FUNCTION__));
  9852. /* clear request clk only if itopoobb is non zero */
  9853. addr = dhd->sssr_reg_info.pcie_regs.wrapper_regs.itopoobb;
  9854. dhd_sbreg_op(dhd, addr, &val, TRUE);
  9855. if (val) {
  9856. /* clear clockcontrolstatus */
  9857. addr = dhd->sssr_reg_info.pcie_regs.base_regs.clockcontrolstatus;
  9858. val = dhd->sssr_reg_info.pcie_regs.base_regs.clockcontrolstatus_val;
  9859. dhd_sbreg_op(dhd, addr, &val, FALSE);
  9860. }
  9861. return BCME_OK;
  9862. }
  9863. static int
  9864. dhdpcie_pcie_send_ltrsleep(dhd_pub_t *dhd)
  9865. {
  9866. uint addr;
  9867. uint val = 0;
  9868. DHD_ERROR(("%s\n", __FUNCTION__));
  9869. addr = dhd->sssr_reg_info.pcie_regs.base_regs.ltrstate;
  9870. val = LTR_ACTIVE;
  9871. dhd_sbreg_op(dhd, addr, &val, FALSE);
  9872. val = LTR_SLEEP;
  9873. dhd_sbreg_op(dhd, addr, &val, FALSE);
  9874. return BCME_OK;
  9875. }
  9876. static int
  9877. dhdpcie_clear_clk_req(dhd_pub_t *dhd)
  9878. {
  9879. DHD_ERROR(("%s\n", __FUNCTION__));
  9880. dhdpcie_arm_clear_clk_req(dhd);
  9881. dhdpcie_d11_clear_clk_req(dhd);
  9882. dhdpcie_pcie_clear_clk_req(dhd);
  9883. return BCME_OK;
  9884. }
  9885. static int
  9886. dhdpcie_bring_d11_outofreset(dhd_pub_t *dhd)
  9887. {
  9888. int i;
  9889. uint addr;
  9890. uint val = 0;
  9891. DHD_ERROR(("%s\n", __FUNCTION__));
  9892. for (i = 0; i < MAX_NUM_D11CORES; i++) {
  9893. if (dhd->sssr_d11_outofreset[i]) {
  9894. /* disable core by setting bit 0 */
  9895. addr = dhd->sssr_reg_info.mac_regs[i].wrapper_regs.resetctrl;
  9896. val = 1;
  9897. dhd_sbreg_op(dhd, addr, &val, FALSE);
  9898. OSL_DELAY(6000);
  9899. addr = dhd->sssr_reg_info.mac_regs[i].wrapper_regs.ioctrl;
  9900. val = dhd->sssr_reg_info.mac_regs[0].wrapper_regs.ioctrl_resetseq_val[0];
  9901. dhd_sbreg_op(dhd, addr, &val, FALSE);
  9902. val = dhd->sssr_reg_info.mac_regs[0].wrapper_regs.ioctrl_resetseq_val[1];
  9903. dhd_sbreg_op(dhd, addr, &val, FALSE);
  9904. /* enable core by clearing bit 0 */
  9905. addr = dhd->sssr_reg_info.mac_regs[i].wrapper_regs.resetctrl;
  9906. val = 0;
  9907. dhd_sbreg_op(dhd, addr, &val, FALSE);
  9908. addr = dhd->sssr_reg_info.mac_regs[i].wrapper_regs.ioctrl;
  9909. val = dhd->sssr_reg_info.mac_regs[0].wrapper_regs.ioctrl_resetseq_val[2];
  9910. dhd_sbreg_op(dhd, addr, &val, FALSE);
  9911. val = dhd->sssr_reg_info.mac_regs[0].wrapper_regs.ioctrl_resetseq_val[3];
  9912. dhd_sbreg_op(dhd, addr, &val, FALSE);
  9913. val = dhd->sssr_reg_info.mac_regs[0].wrapper_regs.ioctrl_resetseq_val[4];
  9914. dhd_sbreg_op(dhd, addr, &val, FALSE);
  9915. }
  9916. }
  9917. return BCME_OK;
  9918. }
  9919. static int
  9920. dhdpcie_sssr_dump_get_before_sr(dhd_pub_t *dhd)
  9921. {
  9922. int i;
  9923. DHD_ERROR(("%s\n", __FUNCTION__));
  9924. for (i = 0; i < MAX_NUM_D11CORES; i++) {
  9925. if (dhd->sssr_d11_outofreset[i]) {
  9926. dhdpcie_get_sssr_fifo_dump(dhd, dhd->sssr_d11_before[i],
  9927. dhd->sssr_reg_info.mac_regs[i].sr_size,
  9928. dhd->sssr_reg_info.mac_regs[i].base_regs.xmtaddress,
  9929. dhd->sssr_reg_info.mac_regs[i].base_regs.xmtdata);
  9930. }
  9931. }
  9932. if (dhd->sssr_reg_info.vasip_regs.vasip_sr_size) {
  9933. dhdpcie_get_sssr_dig_dump(dhd, dhd->sssr_dig_buf_before,
  9934. dhd->sssr_reg_info.vasip_regs.vasip_sr_size,
  9935. dhd->sssr_reg_info.vasip_regs.vasip_sr_addr);
  9936. } else if ((dhd->sssr_reg_info.length > OFFSETOF(sssr_reg_info_v1_t, dig_mem_info)) &&
  9937. dhd->sssr_reg_info.dig_mem_info.dig_sr_addr) {
  9938. dhdpcie_get_sssr_dig_dump(dhd, dhd->sssr_dig_buf_before,
  9939. dhd->sssr_reg_info.dig_mem_info.dig_sr_size,
  9940. dhd->sssr_reg_info.dig_mem_info.dig_sr_addr);
  9941. }
  9942. return BCME_OK;
  9943. }
  9944. static int
  9945. dhdpcie_sssr_dump_get_after_sr(dhd_pub_t *dhd)
  9946. {
  9947. int i;
  9948. DHD_ERROR(("%s\n", __FUNCTION__));
  9949. for (i = 0; i < MAX_NUM_D11CORES; i++) {
  9950. if (dhd->sssr_d11_outofreset[i]) {
  9951. dhdpcie_get_sssr_fifo_dump(dhd, dhd->sssr_d11_after[i],
  9952. dhd->sssr_reg_info.mac_regs[i].sr_size,
  9953. dhd->sssr_reg_info.mac_regs[i].base_regs.xmtaddress,
  9954. dhd->sssr_reg_info.mac_regs[i].base_regs.xmtdata);
  9955. }
  9956. }
  9957. if (dhd->sssr_reg_info.vasip_regs.vasip_sr_size) {
  9958. dhdpcie_get_sssr_dig_dump(dhd, dhd->sssr_dig_buf_after,
  9959. dhd->sssr_reg_info.vasip_regs.vasip_sr_size,
  9960. dhd->sssr_reg_info.vasip_regs.vasip_sr_addr);
  9961. } else if ((dhd->sssr_reg_info.length > OFFSETOF(sssr_reg_info_v1_t, dig_mem_info)) &&
  9962. dhd->sssr_reg_info.dig_mem_info.dig_sr_addr) {
  9963. dhdpcie_get_sssr_dig_dump(dhd, dhd->sssr_dig_buf_after,
  9964. dhd->sssr_reg_info.dig_mem_info.dig_sr_size,
  9965. dhd->sssr_reg_info.dig_mem_info.dig_sr_addr);
  9966. }
  9967. return BCME_OK;
  9968. }
  9969. int
  9970. dhdpcie_sssr_dump(dhd_pub_t *dhd)
  9971. {
  9972. uint32 powerctrl_val;
  9973. if (!dhd->sssr_inited) {
  9974. DHD_ERROR(("%s: SSSR not inited\n", __FUNCTION__));
  9975. return BCME_ERROR;
  9976. }
  9977. if (dhd->bus->is_linkdown) {
  9978. DHD_ERROR(("%s: PCIe link is down\n", __FUNCTION__));
  9979. return BCME_ERROR;
  9980. }
  9981. dhdpcie_d11_check_outofreset(dhd);
  9982. DHD_ERROR(("%s: Collecting Dump before SR\n", __FUNCTION__));
  9983. if (dhdpcie_sssr_dump_get_before_sr(dhd) != BCME_OK) {
  9984. DHD_ERROR(("%s: dhdpcie_sssr_dump_get_before_sr failed\n", __FUNCTION__));
  9985. return BCME_ERROR;
  9986. }
  9987. dhdpcie_clear_intmask_and_timer(dhd);
  9988. powerctrl_val = dhdpcie_suspend_chipcommon_powerctrl(dhd);
  9989. dhdpcie_clear_clk_req(dhd);
  9990. dhdpcie_pcie_send_ltrsleep(dhd);
  9991. /* Wait for some time before Restore */
  9992. OSL_DELAY(6000);
  9993. dhdpcie_resume_chipcommon_powerctrl(dhd, powerctrl_val);
  9994. dhdpcie_bring_d11_outofreset(dhd);
  9995. DHD_ERROR(("%s: Collecting Dump after SR\n", __FUNCTION__));
  9996. if (dhdpcie_sssr_dump_get_after_sr(dhd) != BCME_OK) {
  9997. DHD_ERROR(("%s: dhdpcie_sssr_dump_get_after_sr failed\n", __FUNCTION__));
  9998. return BCME_ERROR;
  9999. }
  10000. dhd->sssr_dump_collected = TRUE;
  10001. dhd_write_sssr_dump(dhd, SSSR_DUMP_MODE_SSSR);
  10002. return BCME_OK;
  10003. }
  10004. static int
  10005. dhdpcie_fis_trigger(dhd_pub_t *dhd)
  10006. {
  10007. if (!dhd->sssr_inited) {
  10008. DHD_ERROR(("%s: SSSR not inited\n", __FUNCTION__));
  10009. return BCME_ERROR;
  10010. }
  10011. if (dhd->bus->is_linkdown) {
  10012. DHD_ERROR(("%s: PCIe link is down\n", __FUNCTION__));
  10013. return BCME_ERROR;
  10014. }
  10015. /* Trigger FIS */
  10016. si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
  10017. DAR_FIS_CTRL(dhd->bus->sih->buscorerev), ~0, DAR_FIS_START_MASK);
  10018. OSL_DELAY(100 * 1000);
  10019. return BCME_OK;
  10020. }
  10021. int
  10022. dhd_bus_fis_trigger(dhd_pub_t *dhd)
  10023. {
  10024. return dhdpcie_fis_trigger(dhd);
  10025. }
  10026. static int
  10027. dhdpcie_fis_dump(dhd_pub_t *dhd)
  10028. {
  10029. int i;
  10030. if (!dhd->sssr_inited) {
  10031. DHD_ERROR(("%s: SSSR not inited\n", __FUNCTION__));
  10032. return BCME_ERROR;
  10033. }
  10034. if (dhd->bus->is_linkdown) {
  10035. DHD_ERROR(("%s: PCIe link is down\n", __FUNCTION__));
  10036. return BCME_ERROR;
  10037. }
  10038. /* bring up all pmu resources */
  10039. PMU_REG(dhd->bus->sih, min_res_mask, ~0,
  10040. PMU_REG(dhd->bus->sih, max_res_mask, 0, 0));
  10041. OSL_DELAY(10 * 1000);
  10042. for (i = 0; i < MAX_NUM_D11CORES; i++) {
  10043. dhd->sssr_d11_outofreset[i] = TRUE;
  10044. }
  10045. dhdpcie_bring_d11_outofreset(dhd);
  10046. OSL_DELAY(6000);
  10047. /* clear FIS Done */
  10048. PMU_REG(dhd->bus->sih, fis_ctrl_status, PMU_CLEAR_FIS_DONE_MASK, PMU_CLEAR_FIS_DONE_MASK);
  10049. dhdpcie_d11_check_outofreset(dhd);
  10050. DHD_ERROR(("%s: Collecting Dump after SR\n", __FUNCTION__));
  10051. if (dhdpcie_sssr_dump_get_after_sr(dhd) != BCME_OK) {
  10052. DHD_ERROR(("%s: dhdpcie_sssr_dump_get_after_sr failed\n", __FUNCTION__));
  10053. return BCME_ERROR;
  10054. }
  10055. dhd_write_sssr_dump(dhd, SSSR_DUMP_MODE_FIS);
  10056. return BCME_OK;
  10057. }
  10058. int
  10059. dhd_bus_fis_dump(dhd_pub_t *dhd)
  10060. {
  10061. return dhdpcie_fis_dump(dhd);
  10062. }
  10063. #endif /* DHD_SSSR_DUMP */
  10064. #ifdef DHD_WAKE_STATUS
  10065. wake_counts_t*
  10066. dhd_bus_get_wakecount(dhd_pub_t *dhd)
  10067. {
  10068. return &dhd->bus->wake_counts;
  10069. }
  10070. int
  10071. dhd_bus_get_bus_wake(dhd_pub_t *dhd)
  10072. {
  10073. return bcmpcie_set_get_wake(dhd->bus, 0);
  10074. }
  10075. #endif /* DHD_WAKE_STATUS */
  10076. /* Writes random number(s) to the TCM. FW upon initialization reads this register
  10077. * to fetch the random number, and uses it to randomize heap address space layout.
  10078. */
  10079. static int
  10080. dhdpcie_wrt_rnd(struct dhd_bus *bus)
  10081. {
  10082. bcm_rand_metadata_t rnd_data;
  10083. uint8 rand_buf[BCM_ENTROPY_HOST_NBYTES];
  10084. uint32 count = BCM_ENTROPY_HOST_NBYTES;
  10085. int ret = 0;
  10086. uint32 addr = bus->dongle_ram_base + (bus->ramsize - BCM_NVRAM_OFFSET_TCM) -
  10087. ((bus->nvram_csm & 0xffff)* BCM_NVRAM_IMG_COMPRS_FACTOR + sizeof(rnd_data));
  10088. memset(rand_buf, 0, BCM_ENTROPY_HOST_NBYTES);
  10089. rnd_data.signature = htol32(BCM_NVRAM_RNG_SIGNATURE);
  10090. rnd_data.count = htol32(count);
  10091. /* write the metadata about random number */
  10092. dhdpcie_bus_membytes(bus, TRUE, addr, (uint8 *)&rnd_data, sizeof(rnd_data));
  10093. /* scale back by number of random number counts */
  10094. addr -= count;
  10095. #ifdef DHD_RND_DEBUG
  10096. bus->dhd->rnd_buf = NULL;
  10097. /* get random contents from file */
  10098. ret = dhd_get_rnd_info(bus->dhd);
  10099. if (bus->dhd->rnd_buf) {
  10100. /* write file contents to TCM */
  10101. DHD_ERROR(("%s: use stored .rnd.in content\n", __FUNCTION__));
  10102. dhdpcie_bus_membytes(bus, TRUE, addr, bus->dhd->rnd_buf, bus->dhd->rnd_len);
  10103. /* Dump random content to out file */
  10104. dhd_dump_rnd_info(bus->dhd, bus->dhd->rnd_buf, bus->dhd->rnd_len);
  10105. /* bus->dhd->rnd_buf is allocated in dhd_get_rnd_info, free here */
  10106. MFREE(bus->dhd->osh, bus->dhd->rnd_buf, bus->dhd->rnd_len);
  10107. bus->dhd->rnd_buf = NULL;
  10108. return BCME_OK;
  10109. }
  10110. #endif /* DHD_RND_DEBUG */
  10111. /* Now get & write the random number(s) */
  10112. ret = dhd_get_random_bytes(rand_buf, count);
  10113. if (ret != BCME_OK) {
  10114. return ret;
  10115. }
  10116. dhdpcie_bus_membytes(bus, TRUE, addr, rand_buf, count);
  10117. #ifdef DHD_RND_DEBUG
  10118. /* Dump random content to out file */
  10119. dhd_dump_rnd_info(bus->dhd, rand_buf, count);
  10120. #endif /* DHD_RND_DEBUG */
  10121. return BCME_OK;
  10122. }
  10123. void
  10124. dhd_pcie_intr_count_dump(dhd_pub_t *dhd)
  10125. {
  10126. struct dhd_bus *bus = dhd->bus;
  10127. uint64 current_time;
  10128. DHD_ERROR(("\n ------- DUMPING INTR enable/disable counters ------- \r\n"));
  10129. DHD_ERROR(("resume_intr_enable_count=%lu dpc_intr_enable_count=%lu\n",
  10130. bus->resume_intr_enable_count, bus->dpc_intr_enable_count));
  10131. DHD_ERROR(("isr_intr_disable_count=%lu suspend_intr_disable_count=%lu\n",
  10132. bus->isr_intr_disable_count, bus->suspend_intr_disable_count));
  10133. #ifdef BCMPCIE_OOB_HOST_WAKE
  10134. DHD_ERROR(("oob_intr_count=%lu oob_intr_enable_count=%lu oob_intr_disable_count=%lu\n",
  10135. bus->oob_intr_count, bus->oob_intr_enable_count,
  10136. bus->oob_intr_disable_count));
  10137. DHD_ERROR(("oob_irq_num=%d last_oob_irq_time="SEC_USEC_FMT"\n",
  10138. dhdpcie_get_oob_irq_num(bus),
  10139. GET_SEC_USEC(bus->last_oob_irq_time)));
  10140. DHD_ERROR(("last_oob_irq_enable_time="SEC_USEC_FMT
  10141. " last_oob_irq_disable_time="SEC_USEC_FMT"\n",
  10142. GET_SEC_USEC(bus->last_oob_irq_enable_time),
  10143. GET_SEC_USEC(bus->last_oob_irq_disable_time)));
  10144. DHD_ERROR(("oob_irq_enabled=%d oob_gpio_level=%d\n",
  10145. dhdpcie_get_oob_irq_status(bus),
  10146. dhdpcie_get_oob_irq_level()));
  10147. #endif /* BCMPCIE_OOB_HOST_WAKE */
  10148. DHD_ERROR(("dpc_return_busdown_count=%lu non_ours_irq_count=%lu\n",
  10149. bus->dpc_return_busdown_count, bus->non_ours_irq_count));
  10150. current_time = OSL_LOCALTIME_NS();
  10151. DHD_ERROR(("\ncurrent_time="SEC_USEC_FMT"\n",
  10152. GET_SEC_USEC(current_time)));
  10153. DHD_ERROR(("isr_entry_time="SEC_USEC_FMT
  10154. " isr_exit_time="SEC_USEC_FMT"\n",
  10155. GET_SEC_USEC(bus->isr_entry_time),
  10156. GET_SEC_USEC(bus->isr_exit_time)));
  10157. DHD_ERROR(("dpc_sched_time="SEC_USEC_FMT
  10158. " last_non_ours_irq_time="SEC_USEC_FMT"\n",
  10159. GET_SEC_USEC(bus->dpc_sched_time),
  10160. GET_SEC_USEC(bus->last_non_ours_irq_time)));
  10161. DHD_ERROR(("dpc_entry_time="SEC_USEC_FMT
  10162. " last_process_ctrlbuf_time="SEC_USEC_FMT"\n",
  10163. GET_SEC_USEC(bus->dpc_entry_time),
  10164. GET_SEC_USEC(bus->last_process_ctrlbuf_time)));
  10165. DHD_ERROR(("last_process_flowring_time="SEC_USEC_FMT
  10166. " last_process_txcpl_time="SEC_USEC_FMT"\n",
  10167. GET_SEC_USEC(bus->last_process_flowring_time),
  10168. GET_SEC_USEC(bus->last_process_txcpl_time)));
  10169. DHD_ERROR(("last_process_rxcpl_time="SEC_USEC_FMT
  10170. " last_process_infocpl_time="SEC_USEC_FMT
  10171. " last_process_edl_time="SEC_USEC_FMT"\n",
  10172. GET_SEC_USEC(bus->last_process_rxcpl_time),
  10173. GET_SEC_USEC(bus->last_process_infocpl_time),
  10174. GET_SEC_USEC(bus->last_process_edl_time)));
  10175. DHD_ERROR(("dpc_exit_time="SEC_USEC_FMT
  10176. " resched_dpc_time="SEC_USEC_FMT"\n",
  10177. GET_SEC_USEC(bus->dpc_exit_time),
  10178. GET_SEC_USEC(bus->resched_dpc_time)));
  10179. DHD_ERROR(("last_d3_inform_time="SEC_USEC_FMT"\n",
  10180. GET_SEC_USEC(bus->last_d3_inform_time)));
  10181. DHD_ERROR(("\nlast_suspend_start_time="SEC_USEC_FMT
  10182. " last_suspend_end_time="SEC_USEC_FMT"\n",
  10183. GET_SEC_USEC(bus->last_suspend_start_time),
  10184. GET_SEC_USEC(bus->last_suspend_end_time)));
  10185. DHD_ERROR(("last_resume_start_time="SEC_USEC_FMT
  10186. " last_resume_end_time="SEC_USEC_FMT"\n",
  10187. GET_SEC_USEC(bus->last_resume_start_time),
  10188. GET_SEC_USEC(bus->last_resume_end_time)));
  10189. #if defined(SHOW_LOGTRACE) && defined(DHD_USE_KTHREAD_FOR_LOGTRACE)
  10190. DHD_ERROR(("logtrace_thread_entry_time="SEC_USEC_FMT
  10191. " logtrace_thread_sem_down_time="SEC_USEC_FMT
  10192. "\nlogtrace_thread_flush_time="SEC_USEC_FMT
  10193. " logtrace_thread_unexpected_break_time="SEC_USEC_FMT
  10194. "\nlogtrace_thread_complete_time="SEC_USEC_FMT"\n",
  10195. GET_SEC_USEC(dhd->logtrace_thr_ts.entry_time),
  10196. GET_SEC_USEC(dhd->logtrace_thr_ts.sem_down_time),
  10197. GET_SEC_USEC(dhd->logtrace_thr_ts.flush_time),
  10198. GET_SEC_USEC(dhd->logtrace_thr_ts.unexpected_break_time),
  10199. GET_SEC_USEC(dhd->logtrace_thr_ts.complete_time)));
  10200. #endif /* SHOW_LOGTRACE && DHD_USE_KTHREAD_FOR_LOGTRACE */
  10201. }
  10202. void
  10203. dhd_bus_intr_count_dump(dhd_pub_t *dhd)
  10204. {
  10205. dhd_pcie_intr_count_dump(dhd);
  10206. }
  10207. int
  10208. dhd_pcie_dump_wrapper_regs(dhd_pub_t *dhd)
  10209. {
  10210. uint32 save_idx, val;
  10211. si_t *sih = dhd->bus->sih;
  10212. uint32 oob_base, oob_base1;
  10213. uint32 wrapper_dump_list[] = {
  10214. AI_OOBSELOUTA30, AI_OOBSELOUTA74, AI_OOBSELOUTB30, AI_OOBSELOUTB74,
  10215. AI_OOBSELOUTC30, AI_OOBSELOUTC74, AI_OOBSELOUTD30, AI_OOBSELOUTD74,
  10216. AI_RESETSTATUS, AI_RESETCTRL,
  10217. AI_ITIPOOBA, AI_ITIPOOBB, AI_ITIPOOBC, AI_ITIPOOBD,
  10218. AI_ITIPOOBAOUT, AI_ITIPOOBBOUT, AI_ITIPOOBCOUT, AI_ITIPOOBDOUT
  10219. };
  10220. uint8 i;
  10221. hndoobr_reg_t *reg;
  10222. cr4regs_t *cr4regs;
  10223. ca7regs_t *ca7regs;
  10224. save_idx = si_coreidx(sih);
  10225. DHD_ERROR(("%s: Master wrapper Reg\n", __FUNCTION__));
  10226. if (si_setcore(sih, PCIE2_CORE_ID, 0) != NULL) {
  10227. for (i = 0; i < sizeof(wrapper_dump_list) / 4; i++) {
  10228. val = si_wrapperreg(sih, wrapper_dump_list[i], 0, 0);
  10229. DHD_ERROR(("sbreg: addr:0x%x val:0x%x\n", wrapper_dump_list[i], val));
  10230. }
  10231. }
  10232. if ((cr4regs = si_setcore(sih, ARMCR4_CORE_ID, 0)) != NULL) {
  10233. DHD_ERROR(("%s: ARM CR4 wrapper Reg\n", __FUNCTION__));
  10234. for (i = 0; i < sizeof(wrapper_dump_list) / 4; i++) {
  10235. val = si_wrapperreg(sih, wrapper_dump_list[i], 0, 0);
  10236. DHD_ERROR(("sbreg: addr:0x%x val:0x%x\n", wrapper_dump_list[i], val));
  10237. }
  10238. DHD_ERROR(("%s: ARM CR4 core Reg\n", __FUNCTION__));
  10239. val = R_REG(dhd->osh, ARM_CR4_REG(cr4regs, corecontrol));
  10240. DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(cr4regs_t, corecontrol), val));
  10241. val = R_REG(dhd->osh, ARM_CR4_REG(cr4regs, corecapabilities));
  10242. DHD_ERROR(("reg:0x%x val:0x%x\n",
  10243. (uint)OFFSETOF(cr4regs_t, corecapabilities), val));
  10244. val = R_REG(dhd->osh, ARM_CR4_REG(cr4regs, corestatus));
  10245. DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(cr4regs_t, corestatus), val));
  10246. val = R_REG(dhd->osh, ARM_CR4_REG(cr4regs, nmiisrst));
  10247. DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(cr4regs_t, nmiisrst), val));
  10248. val = R_REG(dhd->osh, ARM_CR4_REG(cr4regs, nmimask));
  10249. DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(cr4regs_t, nmimask), val));
  10250. val = R_REG(dhd->osh, ARM_CR4_REG(cr4regs, isrmask));
  10251. DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(cr4regs_t, isrmask), val));
  10252. val = R_REG(dhd->osh, ARM_CR4_REG(cr4regs, swintreg));
  10253. DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(cr4regs_t, swintreg), val));
  10254. val = R_REG(dhd->osh, ARM_CR4_REG(cr4regs, intstatus));
  10255. DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(cr4regs_t, intstatus), val));
  10256. val = R_REG(dhd->osh, ARM_CR4_REG(cr4regs, cyclecnt));
  10257. DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(cr4regs_t, cyclecnt), val));
  10258. val = R_REG(dhd->osh, ARM_CR4_REG(cr4regs, inttimer));
  10259. DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(cr4regs_t, inttimer), val));
  10260. val = R_REG(dhd->osh, ARM_CR4_REG(cr4regs, clk_ctl_st));
  10261. DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(cr4regs_t, clk_ctl_st), val));
  10262. val = R_REG(dhd->osh, ARM_CR4_REG(cr4regs, powerctl));
  10263. DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(cr4regs_t, powerctl), val));
  10264. }
  10265. if ((ca7regs = si_setcore(sih, ARMCA7_CORE_ID, 0)) != NULL) {
  10266. DHD_ERROR(("%s: ARM CA7 core Reg\n", __FUNCTION__));
  10267. val = R_REG(dhd->osh, ARM_CA7_REG(ca7regs, corecontrol));
  10268. DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(ca7regs_t, corecontrol), val));
  10269. val = R_REG(dhd->osh, ARM_CA7_REG(ca7regs, corecapabilities));
  10270. DHD_ERROR(("reg:0x%x val:0x%x\n",
  10271. (uint)OFFSETOF(ca7regs_t, corecapabilities), val));
  10272. val = R_REG(dhd->osh, ARM_CA7_REG(ca7regs, corestatus));
  10273. DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(ca7regs_t, corestatus), val));
  10274. val = R_REG(dhd->osh, ARM_CA7_REG(ca7regs, tracecontrol));
  10275. DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(ca7regs_t, tracecontrol), val));
  10276. val = R_REG(dhd->osh, ARM_CA7_REG(ca7regs, clk_ctl_st));
  10277. DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(ca7regs_t, clk_ctl_st), val));
  10278. val = R_REG(dhd->osh, ARM_CA7_REG(ca7regs, powerctl));
  10279. DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(ca7regs_t, powerctl), val));
  10280. }
  10281. DHD_ERROR(("%s: OOBR Reg\n", __FUNCTION__));
  10282. oob_base = si_oobr_baseaddr(sih, FALSE);
  10283. oob_base1 = si_oobr_baseaddr(sih, TRUE);
  10284. if (oob_base) {
  10285. dhd_sbreg_op(dhd, oob_base + OOB_STATUSA, &val, TRUE);
  10286. dhd_sbreg_op(dhd, oob_base + OOB_STATUSB, &val, TRUE);
  10287. dhd_sbreg_op(dhd, oob_base + OOB_STATUSC, &val, TRUE);
  10288. dhd_sbreg_op(dhd, oob_base + OOB_STATUSD, &val, TRUE);
  10289. } else if ((reg = si_setcore(sih, HND_OOBR_CORE_ID, 0)) != NULL) {
  10290. val = R_REG(dhd->osh, &reg->intstatus[0]);
  10291. DHD_ERROR(("reg: addr:%p val:0x%x\n", reg, val));
  10292. val = R_REG(dhd->osh, &reg->intstatus[1]);
  10293. DHD_ERROR(("reg: addr:%p val:0x%x\n", reg, val));
  10294. val = R_REG(dhd->osh, &reg->intstatus[2]);
  10295. DHD_ERROR(("reg: addr:%p val:0x%x\n", reg, val));
  10296. val = R_REG(dhd->osh, &reg->intstatus[3]);
  10297. DHD_ERROR(("reg: addr:%p val:0x%x\n", reg, val));
  10298. }
  10299. if (oob_base1) {
  10300. DHD_ERROR(("%s: Second OOBR Reg\n", __FUNCTION__));
  10301. dhd_sbreg_op(dhd, oob_base1 + OOB_STATUSA, &val, TRUE);
  10302. dhd_sbreg_op(dhd, oob_base1 + OOB_STATUSB, &val, TRUE);
  10303. dhd_sbreg_op(dhd, oob_base1 + OOB_STATUSC, &val, TRUE);
  10304. dhd_sbreg_op(dhd, oob_base1 + OOB_STATUSD, &val, TRUE);
  10305. }
  10306. si_setcoreidx(dhd->bus->sih, save_idx);
  10307. return 0;
  10308. }
  10309. int
  10310. dhd_pcie_dma_info_dump(dhd_pub_t *dhd)
  10311. {
  10312. if (dhd->bus->is_linkdown) {
  10313. DHD_ERROR(("\n ------- SKIP DUMPING DMA Registers "
  10314. "due to PCIe link down ------- \r\n"));
  10315. return 0;
  10316. }
  10317. DHD_ERROR(("\n ------- DUMPING DMA Registers ------- \r\n"));
  10318. //HostToDev
  10319. DHD_ERROR(("HostToDev TX: XmtCtrl=0x%08x XmtPtr=0x%08x\n",
  10320. si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x200, 0, 0),
  10321. si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x204, 0, 0)));
  10322. DHD_ERROR((" : XmtAddrLow=0x%08x XmtAddrHigh=0x%08x\n",
  10323. si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x208, 0, 0),
  10324. si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x20C, 0, 0)));
  10325. DHD_ERROR((" : XmtStatus0=0x%08x XmtStatus1=0x%08x\n",
  10326. si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x210, 0, 0),
  10327. si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x214, 0, 0)));
  10328. DHD_ERROR(("HostToDev RX: RcvCtrl=0x%08x RcvPtr=0x%08x\n",
  10329. si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x220, 0, 0),
  10330. si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x224, 0, 0)));
  10331. DHD_ERROR((" : RcvAddrLow=0x%08x RcvAddrHigh=0x%08x\n",
  10332. si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x228, 0, 0),
  10333. si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x22C, 0, 0)));
  10334. DHD_ERROR((" : RcvStatus0=0x%08x RcvStatus1=0x%08x\n",
  10335. si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x230, 0, 0),
  10336. si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x234, 0, 0)));
  10337. //DevToHost
  10338. DHD_ERROR(("DevToHost TX: XmtCtrl=0x%08x XmtPtr=0x%08x\n",
  10339. si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x240, 0, 0),
  10340. si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x244, 0, 0)));
  10341. DHD_ERROR((" : XmtAddrLow=0x%08x XmtAddrHigh=0x%08x\n",
  10342. si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x248, 0, 0),
  10343. si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x24C, 0, 0)));
  10344. DHD_ERROR((" : XmtStatus0=0x%08x XmtStatus1=0x%08x\n",
  10345. si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x250, 0, 0),
  10346. si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x254, 0, 0)));
  10347. DHD_ERROR(("DevToHost RX: RcvCtrl=0x%08x RcvPtr=0x%08x\n",
  10348. si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x260, 0, 0),
  10349. si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x264, 0, 0)));
  10350. DHD_ERROR((" : RcvAddrLow=0x%08x RcvAddrHigh=0x%08x\n",
  10351. si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x268, 0, 0),
  10352. si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x26C, 0, 0)));
  10353. DHD_ERROR((" : RcvStatus0=0x%08x RcvStatus1=0x%08x\n",
  10354. si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x270, 0, 0),
  10355. si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x274, 0, 0)));
  10356. return 0;
  10357. }
  10358. bool
  10359. dhd_pcie_dump_int_regs(dhd_pub_t *dhd)
  10360. {
  10361. uint32 intstatus = 0;
  10362. uint32 intmask = 0;
  10363. uint32 d2h_db0 = 0;
  10364. uint32 d2h_mb_data = 0;
  10365. DHD_ERROR(("\n ------- DUMPING INTR Status and Masks ------- \r\n"));
  10366. intstatus = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
  10367. dhd->bus->pcie_mailbox_int, 0, 0);
  10368. if (intstatus == (uint32)-1) {
  10369. DHD_ERROR(("intstatus=0x%x \n", intstatus));
  10370. return FALSE;
  10371. }
  10372. intmask = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
  10373. dhd->bus->pcie_mailbox_mask, 0, 0);
  10374. if (intmask == (uint32) -1) {
  10375. DHD_ERROR(("intstatus=0x%x intmask=0x%x \n", intstatus, intmask));
  10376. return FALSE;
  10377. }
  10378. d2h_db0 = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
  10379. PCID2H_MailBox, 0, 0);
  10380. if (d2h_db0 == (uint32)-1) {
  10381. DHD_ERROR(("intstatus=0x%x intmask=0x%x d2h_db0=0x%x\n",
  10382. intstatus, intmask, d2h_db0));
  10383. return FALSE;
  10384. }
  10385. DHD_ERROR(("intstatus=0x%x intmask=0x%x d2h_db0=0x%x\n",
  10386. intstatus, intmask, d2h_db0));
  10387. dhd_bus_cmn_readshared(dhd->bus, &d2h_mb_data, D2H_MB_DATA, 0);
  10388. DHD_ERROR(("d2h_mb_data=0x%x def_intmask=0x%x \r\n", d2h_mb_data,
  10389. dhd->bus->def_intmask));
  10390. return TRUE;
  10391. }
  10392. void
  10393. dhd_pcie_dump_rc_conf_space_cap(dhd_pub_t *dhd)
  10394. {
  10395. DHD_ERROR(("\n ------- DUMPING PCIE RC config space Registers ------- \r\n"));
  10396. DHD_ERROR(("Pcie RC Uncorrectable Error Status Val=0x%x\n",
  10397. dhdpcie_rc_access_cap(dhd->bus, PCIE_EXTCAP_ID_ERR,
  10398. PCIE_EXTCAP_AER_UCERR_OFFSET, TRUE, FALSE, 0)));
  10399. #ifdef EXTENDED_PCIE_DEBUG_DUMP
  10400. DHD_ERROR(("hdrlog0 =0x%08x hdrlog1 =0x%08x hdrlog2 =0x%08x hdrlog3 =0x%08x\n",
  10401. dhdpcie_rc_access_cap(dhd->bus, PCIE_EXTCAP_ID_ERR,
  10402. PCIE_EXTCAP_ERR_HEADER_LOG_0, TRUE, FALSE, 0),
  10403. dhdpcie_rc_access_cap(dhd->bus, PCIE_EXTCAP_ID_ERR,
  10404. PCIE_EXTCAP_ERR_HEADER_LOG_1, TRUE, FALSE, 0),
  10405. dhdpcie_rc_access_cap(dhd->bus, PCIE_EXTCAP_ID_ERR,
  10406. PCIE_EXTCAP_ERR_HEADER_LOG_2, TRUE, FALSE, 0),
  10407. dhdpcie_rc_access_cap(dhd->bus, PCIE_EXTCAP_ID_ERR,
  10408. PCIE_EXTCAP_ERR_HEADER_LOG_3, TRUE, FALSE, 0)));
  10409. #endif /* EXTENDED_PCIE_DEBUG_DUMP */
  10410. }
  10411. int
  10412. dhd_pcie_debug_info_dump(dhd_pub_t *dhd)
  10413. {
  10414. int host_irq_disabled;
  10415. DHD_ERROR(("bus->bus_low_power_state = %d\n", dhd->bus->bus_low_power_state));
  10416. host_irq_disabled = dhdpcie_irq_disabled(dhd->bus);
  10417. DHD_ERROR(("host pcie_irq disabled = %d\n", host_irq_disabled));
  10418. dhd_print_tasklet_status(dhd);
  10419. dhd_pcie_intr_count_dump(dhd);
  10420. DHD_ERROR(("\n ------- DUMPING PCIE EP Resouce Info ------- \r\n"));
  10421. dhdpcie_dump_resource(dhd->bus);
  10422. dhd_pcie_dump_rc_conf_space_cap(dhd);
  10423. DHD_ERROR(("RootPort PCIe linkcap=0x%08x\n",
  10424. dhd_debug_get_rc_linkcap(dhd->bus)));
  10425. if (dhd->bus->is_linkdown && !dhd->bus->cto_triggered) {
  10426. DHD_ERROR(("Skip dumping the PCIe Config and Core registers. "
  10427. "link may be DOWN\n"));
  10428. return 0;
  10429. }
  10430. DHD_ERROR(("\n ------- DUMPING PCIE EP config space Registers ------- \r\n"));
  10431. DHD_ERROR(("Status Command(0x%x)=0x%x, BaseAddress0(0x%x)=0x%x BaseAddress1(0x%x)=0x%x "
  10432. "PCIE_CFG_PMCSR(0x%x)=0x%x\n",
  10433. PCIECFGREG_STATUS_CMD,
  10434. dhd_pcie_config_read(dhd->bus->osh, PCIECFGREG_STATUS_CMD, sizeof(uint32)),
  10435. PCIECFGREG_BASEADDR0,
  10436. dhd_pcie_config_read(dhd->bus->osh, PCIECFGREG_BASEADDR0, sizeof(uint32)),
  10437. PCIECFGREG_BASEADDR1,
  10438. dhd_pcie_config_read(dhd->bus->osh, PCIECFGREG_BASEADDR1, sizeof(uint32)),
  10439. PCIE_CFG_PMCSR,
  10440. dhd_pcie_config_read(dhd->bus->osh, PCIE_CFG_PMCSR, sizeof(uint32))));
  10441. DHD_ERROR(("LinkCtl(0x%x)=0x%x DeviceStatusControl2(0x%x)=0x%x "
  10442. "L1SSControl(0x%x)=0x%x\n", PCIECFGREG_LINK_STATUS_CTRL,
  10443. dhd_pcie_config_read(dhd->bus->osh, PCIECFGREG_LINK_STATUS_CTRL,
  10444. sizeof(uint32)), PCIECFGGEN_DEV_STATUS_CTRL2,
  10445. dhd_pcie_config_read(dhd->bus->osh, PCIECFGGEN_DEV_STATUS_CTRL2,
  10446. sizeof(uint32)), PCIECFGREG_PML1_SUB_CTRL1,
  10447. dhd_pcie_config_read(dhd->bus->osh, PCIECFGREG_PML1_SUB_CTRL1,
  10448. sizeof(uint32))));
  10449. #ifdef EXTENDED_PCIE_DEBUG_DUMP
  10450. DHD_ERROR(("Pcie EP Uncorrectable Error Status Val=0x%x\n",
  10451. dhdpcie_ep_access_cap(dhd->bus, PCIE_EXTCAP_ID_ERR,
  10452. PCIE_EXTCAP_AER_UCERR_OFFSET, TRUE, FALSE, 0)));
  10453. DHD_ERROR(("hdrlog0(0x%x)=0x%08x hdrlog1(0x%x)=0x%08x hdrlog2(0x%x)=0x%08x "
  10454. "hdrlog3(0x%x)=0x%08x\n", PCI_TLP_HDR_LOG1,
  10455. dhd_pcie_config_read(dhd->bus->osh, PCI_TLP_HDR_LOG1, sizeof(uint32)),
  10456. PCI_TLP_HDR_LOG2,
  10457. dhd_pcie_config_read(dhd->bus->osh, PCI_TLP_HDR_LOG2, sizeof(uint32)),
  10458. PCI_TLP_HDR_LOG3,
  10459. dhd_pcie_config_read(dhd->bus->osh, PCI_TLP_HDR_LOG3, sizeof(uint32)),
  10460. PCI_TLP_HDR_LOG4,
  10461. dhd_pcie_config_read(dhd->bus->osh, PCI_TLP_HDR_LOG4, sizeof(uint32))));
  10462. if (dhd->bus->sih->buscorerev >= 24) {
  10463. DHD_ERROR(("DeviceStatusControl(0x%x)=0x%x SubsystemControl(0x%x)=0x%x "
  10464. "L1SSControl2(0x%x)=0x%x\n", PCIECFGREG_DEV_STATUS_CTRL,
  10465. dhd_pcie_config_read(dhd->bus->osh, PCIECFGREG_DEV_STATUS_CTRL,
  10466. sizeof(uint32)), PCIE_CFG_SUBSYSTEM_CONTROL,
  10467. dhd_pcie_config_read(dhd->bus->osh, PCIE_CFG_SUBSYSTEM_CONTROL,
  10468. sizeof(uint32)), PCIECFGREG_PML1_SUB_CTRL2,
  10469. dhd_pcie_config_read(dhd->bus->osh, PCIECFGREG_PML1_SUB_CTRL2,
  10470. sizeof(uint32))));
  10471. dhd_bus_dump_dar_registers(dhd->bus);
  10472. }
  10473. #endif /* EXTENDED_PCIE_DEBUG_DUMP */
  10474. if (dhd->bus->is_linkdown) {
  10475. DHD_ERROR(("Skip dumping the PCIe Core registers. link may be DOWN\n"));
  10476. return 0;
  10477. }
  10478. DHD_ERROR(("\n ------- DUMPING PCIE core Registers ------- \r\n"));
  10479. DHD_ERROR(("ClkReq0(0x%x)=0x%x ClkReq1(0x%x)=0x%x ClkReq2(0x%x)=0x%x "
  10480. "ClkReq3(0x%x)=0x%x\n", PCIECFGREG_PHY_DBG_CLKREQ0,
  10481. dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_PHY_DBG_CLKREQ0),
  10482. PCIECFGREG_PHY_DBG_CLKREQ1,
  10483. dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_PHY_DBG_CLKREQ1),
  10484. PCIECFGREG_PHY_DBG_CLKREQ2,
  10485. dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_PHY_DBG_CLKREQ2),
  10486. PCIECFGREG_PHY_DBG_CLKREQ3,
  10487. dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_PHY_DBG_CLKREQ3)));
  10488. #ifdef EXTENDED_PCIE_DEBUG_DUMP
  10489. if (dhd->bus->sih->buscorerev >= 24) {
  10490. DHD_ERROR(("ltssm_hist_0(0x%x)=0x%x ltssm_hist_1(0x%x)=0x%x "
  10491. "ltssm_hist_2(0x%x)=0x%x "
  10492. "ltssm_hist_3(0x%x)=0x%x\n", PCIECFGREG_PHY_LTSSM_HIST_0,
  10493. dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_PHY_LTSSM_HIST_0),
  10494. PCIECFGREG_PHY_LTSSM_HIST_1,
  10495. dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_PHY_LTSSM_HIST_1),
  10496. PCIECFGREG_PHY_LTSSM_HIST_2,
  10497. dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_PHY_LTSSM_HIST_2),
  10498. PCIECFGREG_PHY_LTSSM_HIST_3,
  10499. dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_PHY_LTSSM_HIST_3)));
  10500. DHD_ERROR(("trefup(0x%x)=0x%x trefup_ext(0x%x)=0x%x\n",
  10501. PCIECFGREG_TREFUP,
  10502. dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_TREFUP),
  10503. PCIECFGREG_TREFUP_EXT,
  10504. dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_TREFUP_EXT)));
  10505. DHD_ERROR(("errlog(0x%x)=0x%x errlog_addr(0x%x)=0x%x "
  10506. "Function_Intstatus(0x%x)=0x%x "
  10507. "Function_Intmask(0x%x)=0x%x Power_Intstatus(0x%x)=0x%x "
  10508. "Power_Intmask(0x%x)=0x%x\n",
  10509. PCIE_CORE_REG_ERRLOG,
  10510. si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
  10511. PCIE_CORE_REG_ERRLOG, 0, 0),
  10512. PCIE_CORE_REG_ERR_ADDR,
  10513. si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
  10514. PCIE_CORE_REG_ERR_ADDR, 0, 0),
  10515. PCIFunctionIntstatus(dhd->bus->sih->buscorerev),
  10516. si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
  10517. PCIFunctionIntstatus(dhd->bus->sih->buscorerev), 0, 0),
  10518. PCIFunctionIntmask(dhd->bus->sih->buscorerev),
  10519. si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
  10520. PCIFunctionIntmask(dhd->bus->sih->buscorerev), 0, 0),
  10521. PCIPowerIntstatus(dhd->bus->sih->buscorerev),
  10522. si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
  10523. PCIPowerIntstatus(dhd->bus->sih->buscorerev), 0, 0),
  10524. PCIPowerIntmask(dhd->bus->sih->buscorerev),
  10525. si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
  10526. PCIPowerIntmask(dhd->bus->sih->buscorerev), 0, 0)));
  10527. DHD_ERROR(("err_hdrlog1(0x%x)=0x%x err_hdrlog2(0x%x)=0x%x "
  10528. "err_hdrlog3(0x%x)=0x%x err_hdrlog4(0x%x)=0x%x\n",
  10529. (uint)OFFSETOF(sbpcieregs_t, u.pcie2.err_hdr_logreg1),
  10530. si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
  10531. OFFSETOF(sbpcieregs_t, u.pcie2.err_hdr_logreg1), 0, 0),
  10532. (uint)OFFSETOF(sbpcieregs_t, u.pcie2.err_hdr_logreg2),
  10533. si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
  10534. OFFSETOF(sbpcieregs_t, u.pcie2.err_hdr_logreg2), 0, 0),
  10535. (uint)OFFSETOF(sbpcieregs_t, u.pcie2.err_hdr_logreg3),
  10536. si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
  10537. OFFSETOF(sbpcieregs_t, u.pcie2.err_hdr_logreg3), 0, 0),
  10538. (uint)OFFSETOF(sbpcieregs_t, u.pcie2.err_hdr_logreg4),
  10539. si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
  10540. OFFSETOF(sbpcieregs_t, u.pcie2.err_hdr_logreg4), 0, 0)));
  10541. DHD_ERROR(("err_code(0x%x)=0x%x\n",
  10542. (uint)OFFSETOF(sbpcieregs_t, u.pcie2.err_code_logreg),
  10543. si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
  10544. OFFSETOF(sbpcieregs_t, u.pcie2.err_code_logreg), 0, 0)));
  10545. dhd_pcie_dump_wrapper_regs(dhd);
  10546. }
  10547. #endif /* EXTENDED_PCIE_DEBUG_DUMP */
  10548. dhd_pcie_dma_info_dump(dhd);
  10549. return 0;
  10550. }
  10551. bool
  10552. dhd_bus_force_bt_quiesce_enabled(struct dhd_bus *bus)
  10553. {
  10554. return bus->force_bt_quiesce;
  10555. }
  10556. #ifdef DHD_HP2P
  10557. uint16
  10558. dhd_bus_get_hp2p_ring_max_size(struct dhd_bus *bus, bool tx)
  10559. {
  10560. if (tx)
  10561. return bus->hp2p_txcpl_max_items;
  10562. else
  10563. return bus->hp2p_rxcpl_max_items;
  10564. }
  10565. static uint16
  10566. dhd_bus_set_hp2p_ring_max_size(struct dhd_bus *bus, bool tx, uint16 val)
  10567. {
  10568. if (tx)
  10569. bus->hp2p_txcpl_max_items = val;
  10570. else
  10571. bus->hp2p_rxcpl_max_items = val;
  10572. return val;
  10573. }
  10574. #endif /* DHD_HP2P */
  10575. static bool
  10576. dhd_bus_tcm_test(struct dhd_bus *bus)
  10577. {
  10578. int ret = 0;
  10579. int size; /* Full mem size */
  10580. int start; /* Start address */
  10581. int read_size = 0; /* Read size of each iteration */
  10582. int num = 0;
  10583. uint8 *read_buf, *write_buf;
  10584. uint8 init_val[NUM_PATTERNS] = {
  10585. 0xFFu, /* 11111111 */
  10586. 0x00u, /* 00000000 */
  10587. };
  10588. if (!bus) {
  10589. DHD_ERROR(("%s: bus is NULL !\n", __FUNCTION__));
  10590. return FALSE;
  10591. }
  10592. read_buf = MALLOCZ(bus->dhd->osh, MEMBLOCK);
  10593. if (!read_buf) {
  10594. DHD_ERROR(("%s: MALLOC of read_buf failed\n", __FUNCTION__));
  10595. return FALSE;
  10596. }
  10597. write_buf = MALLOCZ(bus->dhd->osh, MEMBLOCK);
  10598. if (!write_buf) {
  10599. MFREE(bus->dhd->osh, read_buf, MEMBLOCK);
  10600. DHD_ERROR(("%s: MALLOC of write_buf failed\n", __FUNCTION__));
  10601. return FALSE;
  10602. }
  10603. DHD_ERROR(("%s: start %x, size: %x\n", __FUNCTION__, bus->dongle_ram_base, bus->ramsize));
  10604. DHD_ERROR(("%s: memblock size %d, #pattern %d\n", __FUNCTION__, MEMBLOCK, NUM_PATTERNS));
  10605. while (num < NUM_PATTERNS) {
  10606. start = bus->dongle_ram_base;
  10607. /* Get full mem size */
  10608. size = bus->ramsize;
  10609. memset(write_buf, init_val[num], MEMBLOCK);
  10610. while (size > 0) {
  10611. read_size = MIN(MEMBLOCK, size);
  10612. memset(read_buf, 0, read_size);
  10613. /* Write */
  10614. if ((ret = dhdpcie_bus_membytes(bus, TRUE, start, write_buf, read_size))) {
  10615. DHD_ERROR(("%s: Write Error membytes %d\n", __FUNCTION__, ret));
  10616. MFREE(bus->dhd->osh, read_buf, MEMBLOCK);
  10617. MFREE(bus->dhd->osh, write_buf, MEMBLOCK);
  10618. return FALSE;
  10619. }
  10620. /* Read */
  10621. if ((ret = dhdpcie_bus_membytes(bus, FALSE, start, read_buf, read_size))) {
  10622. DHD_ERROR(("%s: Read Error membytes %d\n", __FUNCTION__, ret));
  10623. MFREE(bus->dhd->osh, read_buf, MEMBLOCK);
  10624. MFREE(bus->dhd->osh, write_buf, MEMBLOCK);
  10625. return FALSE;
  10626. }
  10627. /* Compare */
  10628. if (memcmp(read_buf, write_buf, read_size)) {
  10629. DHD_ERROR(("%s: Mismatch at %x, iter : %d\n",
  10630. __FUNCTION__, start, num));
  10631. prhex("Readbuf", read_buf, read_size);
  10632. prhex("Writebuf", write_buf, read_size);
  10633. MFREE(bus->dhd->osh, read_buf, MEMBLOCK);
  10634. MFREE(bus->dhd->osh, write_buf, MEMBLOCK);
  10635. return FALSE;
  10636. }
  10637. /* Decrement size and increment start address */
  10638. size -= read_size;
  10639. start += read_size;
  10640. }
  10641. num++;
  10642. }
  10643. MFREE(bus->dhd->osh, read_buf, MEMBLOCK);
  10644. MFREE(bus->dhd->osh, write_buf, MEMBLOCK);
  10645. DHD_ERROR(("%s: Success iter : %d\n", __FUNCTION__, num));
  10646. return TRUE;
  10647. }