dhd_linux.c 623 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447444844494450445144524453445444554456445744584459446044614462446344644465446644674468446944704471447244734474447544764477447844794480448144824483448444854486448744884489449044914492449344944495449644974498449945004501450245034504450545064507450845094510451145124513451445154516451745184519452045214522452345244525452645274528452945304531453245334534453545364537453845394540454145424543454445454546454745484549455045514552455345544555455645574558455945604561456245634564456545664567456845694570457145724573457445754576457745784579458045814582458345844585458645874588458945904591459245934594459545964597459845994600460146024603460446054606460746084609461046114612461346144615461646174618461946204621462246234624462546264627462846294630463146324633463446354636463746384639464046414642464346444645464646474648464946504651465246534654465546564657465846594660466146624663466446654666466746684669467046714672467346744675467646774678467946804681468246834684468546864687468846894690469146924693469446954696469746984699470047014702470347044705470647074708470947104711471247134714471547164717471847194720472147224723472447254726472747284729473047314732473347344735473647374738473947404741474247434744474547464747474847494750475147524753475447554756475747584759476047614762476347644765476647674768476947704771477247734774477547764777477847794780478147824783478447854786478747884789479047914792479347944795479647974798479948004801480248034804480548064807480848094810481148124813481448154816481748184819482048214822482348244825482648274828482948304831483248334834483548364837483848394840484148424843484448454846484748484849485048514852485348544855485648574858485948604861486248634864486548664867486848694870487148724873487448754876487748784879488048814882488348844885488648874888488948904891489248934894489548964897489848994900490149024903490449054906490749084909491049114912491349144915491649174918491949204921492249234924492549264927492849294930493149324933493449354936493749384939494049414942494349444945494649474948494949504951495249534954495549564957495849594960496149624963496449654966496749684969497049714972497349744975497649774978497949804981498249834984498549864987498849894990499149924993499449954996499749984999500050015002500350045005500650075008500950105011501250135014501550165017501850195020502150225023502450255026502750285029503050315032503350345035503650375038503950405041504250435044504550465047504850495050505150525053505450555056505750585059506050615062506350645065506650675068506950705071507250735074507550765077507850795080508150825083508450855086508750885089509050915092509350945095509650975098509951005101510251035104510551065107510851095110511151125113511451155116511751185119512051215122512351245125512651275128512951305131513251335134513551365137513851395140514151425143514451455146514751485149515051515152515351545155515651575158515951605161516251635164516551665167516851695170517151725173517451755176517751785179518051815182518351845185518651875188518951905191519251935194519551965197519851995200520152025203520452055206520752085209521052115212521352145215521652175218521952205221522252235224522552265227522852295230523152325233523452355236523752385239524052415242524352445245524652475248524952505251525252535254525552565257525852595260526152625263526452655266526752685269527052715272527352745275527652775278527952805281528252835284528552865287528852895290529152925293529452955296529752985299530053015302530353045305530653075308530953105311531253135314531553165317531853195320532153225323532453255326532753285329533053315332533353345335533653375338533953405341534253435344534553465347534853495350535153525353535453555356535753585359536053615362536353645365536653675368536953705371537253735374537553765377537853795380538153825383538453855386538753885389539053915392539353945395539653975398539954005401540254035404540554065407540854095410541154125413541454155416541754185419542054215422542354245425542654275428542954305431543254335434543554365437543854395440544154425443544454455446544754485449545054515452545354545455545654575458545954605461546254635464546554665467546854695470547154725473547454755476547754785479548054815482548354845485548654875488548954905491549254935494549554965497549854995500550155025503550455055506550755085509551055115512551355145515551655175518551955205521552255235524552555265527552855295530553155325533553455355536553755385539554055415542554355445545554655475548554955505551555255535554555555565557555855595560556155625563556455655566556755685569557055715572557355745575557655775578557955805581558255835584558555865587558855895590559155925593559455955596559755985599560056015602560356045605560656075608560956105611561256135614561556165617561856195620562156225623562456255626562756285629563056315632563356345635563656375638563956405641564256435644564556465647564856495650565156525653565456555656565756585659566056615662566356645665566656675668566956705671567256735674567556765677567856795680568156825683568456855686568756885689569056915692569356945695569656975698569957005701570257035704570557065707570857095710571157125713571457155716571757185719572057215722572357245725572657275728572957305731573257335734573557365737573857395740574157425743574457455746574757485749575057515752575357545755575657575758575957605761576257635764576557665767576857695770577157725773577457755776577757785779578057815782578357845785578657875788578957905791579257935794579557965797579857995800580158025803580458055806580758085809581058115812581358145815581658175818581958205821582258235824582558265827582858295830583158325833583458355836583758385839584058415842584358445845584658475848584958505851585258535854585558565857585858595860586158625863586458655866586758685869587058715872587358745875587658775878587958805881588258835884588558865887588858895890589158925893589458955896589758985899590059015902590359045905590659075908590959105911591259135914591559165917591859195920592159225923592459255926592759285929593059315932593359345935593659375938593959405941594259435944594559465947594859495950595159525953595459555956595759585959596059615962596359645965596659675968596959705971597259735974597559765977597859795980598159825983598459855986598759885989599059915992599359945995599659975998599960006001600260036004600560066007600860096010601160126013601460156016601760186019602060216022602360246025602660276028602960306031603260336034603560366037603860396040604160426043604460456046604760486049605060516052605360546055605660576058605960606061606260636064606560666067606860696070607160726073607460756076607760786079608060816082608360846085608660876088608960906091609260936094609560966097609860996100610161026103610461056106610761086109611061116112611361146115611661176118611961206121612261236124612561266127612861296130613161326133613461356136613761386139614061416142614361446145614661476148614961506151615261536154615561566157615861596160616161626163616461656166616761686169617061716172617361746175617661776178617961806181618261836184618561866187618861896190619161926193619461956196619761986199620062016202620362046205620662076208620962106211621262136214621562166217621862196220622162226223622462256226622762286229623062316232623362346235623662376238623962406241624262436244624562466247624862496250625162526253625462556256625762586259626062616262626362646265626662676268626962706271627262736274627562766277627862796280628162826283628462856286628762886289629062916292629362946295629662976298629963006301630263036304630563066307630863096310631163126313631463156316631763186319632063216322632363246325632663276328632963306331633263336334633563366337633863396340634163426343634463456346634763486349635063516352635363546355635663576358635963606361636263636364636563666367636863696370637163726373637463756376637763786379638063816382638363846385638663876388638963906391639263936394639563966397639863996400640164026403640464056406640764086409641064116412641364146415641664176418641964206421642264236424642564266427642864296430643164326433643464356436643764386439644064416442644364446445644664476448644964506451645264536454645564566457645864596460646164626463646464656466646764686469647064716472647364746475647664776478647964806481648264836484648564866487648864896490649164926493649464956496649764986499650065016502650365046505650665076508650965106511651265136514651565166517651865196520652165226523652465256526652765286529653065316532653365346535653665376538653965406541654265436544654565466547654865496550655165526553655465556556655765586559656065616562656365646565656665676568656965706571657265736574657565766577657865796580658165826583658465856586658765886589659065916592659365946595659665976598659966006601660266036604660566066607660866096610661166126613661466156616661766186619662066216622662366246625662666276628662966306631663266336634663566366637663866396640664166426643664466456646664766486649665066516652665366546655665666576658665966606661666266636664666566666667666866696670667166726673667466756676667766786679668066816682668366846685668666876688668966906691669266936694669566966697669866996700670167026703670467056706670767086709671067116712671367146715671667176718671967206721672267236724672567266727672867296730673167326733673467356736673767386739674067416742674367446745674667476748674967506751675267536754675567566757675867596760676167626763676467656766676767686769677067716772677367746775677667776778677967806781678267836784678567866787678867896790679167926793679467956796679767986799680068016802680368046805680668076808680968106811681268136814681568166817681868196820682168226823682468256826682768286829683068316832683368346835683668376838683968406841684268436844684568466847684868496850685168526853685468556856685768586859686068616862686368646865686668676868686968706871687268736874687568766877687868796880688168826883688468856886688768886889689068916892689368946895689668976898689969006901690269036904690569066907690869096910691169126913691469156916691769186919692069216922692369246925692669276928692969306931693269336934693569366937693869396940694169426943694469456946694769486949695069516952695369546955695669576958695969606961696269636964696569666967696869696970697169726973697469756976697769786979698069816982698369846985698669876988698969906991699269936994699569966997699869997000700170027003700470057006700770087009701070117012701370147015701670177018701970207021702270237024702570267027702870297030703170327033703470357036703770387039704070417042704370447045704670477048704970507051705270537054705570567057705870597060706170627063706470657066706770687069707070717072707370747075707670777078707970807081708270837084708570867087708870897090709170927093709470957096709770987099710071017102710371047105710671077108710971107111711271137114711571167117711871197120712171227123712471257126712771287129713071317132713371347135713671377138713971407141714271437144714571467147714871497150715171527153715471557156715771587159716071617162716371647165716671677168716971707171717271737174717571767177717871797180718171827183718471857186718771887189719071917192719371947195719671977198719972007201720272037204720572067207720872097210721172127213721472157216721772187219722072217222722372247225722672277228722972307231723272337234723572367237723872397240724172427243724472457246724772487249725072517252725372547255725672577258725972607261726272637264726572667267726872697270727172727273727472757276727772787279728072817282728372847285728672877288728972907291729272937294729572967297729872997300730173027303730473057306730773087309731073117312731373147315731673177318731973207321732273237324732573267327732873297330733173327333733473357336733773387339734073417342734373447345734673477348734973507351735273537354735573567357735873597360736173627363736473657366736773687369737073717372737373747375737673777378737973807381738273837384738573867387738873897390739173927393739473957396739773987399740074017402740374047405740674077408740974107411741274137414741574167417741874197420742174227423742474257426742774287429743074317432743374347435743674377438743974407441744274437444744574467447744874497450745174527453745474557456745774587459746074617462746374647465746674677468746974707471747274737474747574767477747874797480748174827483748474857486748774887489749074917492749374947495749674977498749975007501750275037504750575067507750875097510751175127513751475157516751775187519752075217522752375247525752675277528752975307531753275337534753575367537753875397540754175427543754475457546754775487549755075517552755375547555755675577558755975607561756275637564756575667567756875697570757175727573757475757576757775787579758075817582758375847585758675877588758975907591759275937594759575967597759875997600760176027603760476057606760776087609761076117612761376147615761676177618761976207621762276237624762576267627762876297630763176327633763476357636763776387639764076417642764376447645764676477648764976507651765276537654765576567657765876597660766176627663766476657666766776687669767076717672767376747675767676777678767976807681768276837684768576867687768876897690769176927693769476957696769776987699770077017702770377047705770677077708770977107711771277137714771577167717771877197720772177227723772477257726772777287729773077317732773377347735773677377738773977407741774277437744774577467747774877497750775177527753775477557756775777587759776077617762776377647765776677677768776977707771777277737774777577767777777877797780778177827783778477857786778777887789779077917792779377947795779677977798779978007801780278037804780578067807780878097810781178127813781478157816781778187819782078217822782378247825782678277828782978307831783278337834783578367837783878397840784178427843784478457846784778487849785078517852785378547855785678577858785978607861786278637864786578667867786878697870787178727873787478757876787778787879788078817882788378847885788678877888788978907891789278937894789578967897789878997900790179027903790479057906790779087909791079117912791379147915791679177918791979207921792279237924792579267927792879297930793179327933793479357936793779387939794079417942794379447945794679477948794979507951795279537954795579567957795879597960796179627963796479657966796779687969797079717972797379747975797679777978797979807981798279837984798579867987798879897990799179927993799479957996799779987999800080018002800380048005800680078008800980108011801280138014801580168017801880198020802180228023802480258026802780288029803080318032803380348035803680378038803980408041804280438044804580468047804880498050805180528053805480558056805780588059806080618062806380648065806680678068806980708071807280738074807580768077807880798080808180828083808480858086808780888089809080918092809380948095809680978098809981008101810281038104810581068107810881098110811181128113811481158116811781188119812081218122812381248125812681278128812981308131813281338134813581368137813881398140814181428143814481458146814781488149815081518152815381548155815681578158815981608161816281638164816581668167816881698170817181728173817481758176817781788179818081818182818381848185818681878188818981908191819281938194819581968197819881998200820182028203820482058206820782088209821082118212821382148215821682178218821982208221822282238224822582268227822882298230823182328233823482358236823782388239824082418242824382448245824682478248824982508251825282538254825582568257825882598260826182628263826482658266826782688269827082718272827382748275827682778278827982808281828282838284828582868287828882898290829182928293829482958296829782988299830083018302830383048305830683078308830983108311831283138314831583168317831883198320832183228323832483258326832783288329833083318332833383348335833683378338833983408341834283438344834583468347834883498350835183528353835483558356835783588359836083618362836383648365836683678368836983708371837283738374837583768377837883798380838183828383838483858386838783888389839083918392839383948395839683978398839984008401840284038404840584068407840884098410841184128413841484158416841784188419842084218422842384248425842684278428842984308431843284338434843584368437843884398440844184428443844484458446844784488449845084518452845384548455845684578458845984608461846284638464846584668467846884698470847184728473847484758476847784788479848084818482848384848485848684878488848984908491849284938494849584968497849884998500850185028503850485058506850785088509851085118512851385148515851685178518851985208521852285238524852585268527852885298530853185328533853485358536853785388539854085418542854385448545854685478548854985508551855285538554855585568557855885598560856185628563856485658566856785688569857085718572857385748575857685778578857985808581858285838584858585868587858885898590859185928593859485958596859785988599860086018602860386048605860686078608860986108611861286138614861586168617861886198620862186228623862486258626862786288629863086318632863386348635863686378638863986408641864286438644864586468647864886498650865186528653865486558656865786588659866086618662866386648665866686678668866986708671867286738674867586768677867886798680868186828683868486858686868786888689869086918692869386948695869686978698869987008701870287038704870587068707870887098710871187128713871487158716871787188719872087218722872387248725872687278728872987308731873287338734873587368737873887398740874187428743874487458746874787488749875087518752875387548755875687578758875987608761876287638764876587668767876887698770877187728773877487758776877787788779878087818782878387848785878687878788878987908791879287938794879587968797879887998800880188028803880488058806880788088809881088118812881388148815881688178818881988208821882288238824882588268827882888298830883188328833883488358836883788388839884088418842884388448845884688478848884988508851885288538854885588568857885888598860886188628863886488658866886788688869887088718872887388748875887688778878887988808881888288838884888588868887888888898890889188928893889488958896889788988899890089018902890389048905890689078908890989108911891289138914891589168917891889198920892189228923892489258926892789288929893089318932893389348935893689378938893989408941894289438944894589468947894889498950895189528953895489558956895789588959896089618962896389648965896689678968896989708971897289738974897589768977897889798980898189828983898489858986898789888989899089918992899389948995899689978998899990009001900290039004900590069007900890099010901190129013901490159016901790189019902090219022902390249025902690279028902990309031903290339034903590369037903890399040904190429043904490459046904790489049905090519052905390549055905690579058905990609061906290639064906590669067906890699070907190729073907490759076907790789079908090819082908390849085908690879088908990909091909290939094909590969097909890999100910191029103910491059106910791089109911091119112911391149115911691179118911991209121912291239124912591269127912891299130913191329133913491359136913791389139914091419142914391449145914691479148914991509151915291539154915591569157915891599160916191629163916491659166916791689169917091719172917391749175917691779178917991809181918291839184918591869187918891899190919191929193919491959196919791989199920092019202920392049205920692079208920992109211921292139214921592169217921892199220922192229223922492259226922792289229923092319232923392349235923692379238923992409241924292439244924592469247924892499250925192529253925492559256925792589259926092619262926392649265926692679268926992709271927292739274927592769277927892799280928192829283928492859286928792889289929092919292929392949295929692979298929993009301930293039304930593069307930893099310931193129313931493159316931793189319932093219322932393249325932693279328932993309331933293339334933593369337933893399340934193429343934493459346934793489349935093519352935393549355935693579358935993609361936293639364936593669367936893699370937193729373937493759376937793789379938093819382938393849385938693879388938993909391939293939394939593969397939893999400940194029403940494059406940794089409941094119412941394149415941694179418941994209421942294239424942594269427942894299430943194329433943494359436943794389439944094419442944394449445944694479448944994509451945294539454945594569457945894599460946194629463946494659466946794689469947094719472947394749475947694779478947994809481948294839484948594869487948894899490949194929493949494959496949794989499950095019502950395049505950695079508950995109511951295139514951595169517951895199520952195229523952495259526952795289529953095319532953395349535953695379538953995409541954295439544954595469547954895499550955195529553955495559556955795589559956095619562956395649565956695679568956995709571957295739574957595769577957895799580958195829583958495859586958795889589959095919592959395949595959695979598959996009601960296039604960596069607960896099610961196129613961496159616961796189619962096219622962396249625962696279628962996309631963296339634963596369637963896399640964196429643964496459646964796489649965096519652965396549655965696579658965996609661966296639664966596669667966896699670967196729673967496759676967796789679968096819682968396849685968696879688968996909691969296939694969596969697969896999700970197029703970497059706970797089709971097119712971397149715971697179718971997209721972297239724972597269727972897299730973197329733973497359736973797389739974097419742974397449745974697479748974997509751975297539754975597569757975897599760976197629763976497659766976797689769977097719772977397749775977697779778977997809781978297839784978597869787978897899790979197929793979497959796979797989799980098019802980398049805980698079808980998109811981298139814981598169817981898199820982198229823982498259826982798289829983098319832983398349835983698379838983998409841984298439844984598469847984898499850985198529853985498559856985798589859986098619862986398649865986698679868986998709871987298739874987598769877987898799880988198829883988498859886988798889889989098919892989398949895989698979898989999009901990299039904990599069907990899099910991199129913991499159916991799189919992099219922992399249925992699279928992999309931993299339934993599369937993899399940994199429943994499459946994799489949995099519952995399549955995699579958995999609961996299639964996599669967996899699970997199729973997499759976997799789979998099819982998399849985998699879988998999909991999299939994999599969997999899991000010001100021000310004100051000610007100081000910010100111001210013100141001510016100171001810019100201002110022100231002410025100261002710028100291003010031100321003310034100351003610037100381003910040100411004210043100441004510046100471004810049100501005110052100531005410055100561005710058100591006010061100621006310064100651006610067100681006910070100711007210073100741007510076100771007810079100801008110082100831008410085100861008710088100891009010091100921009310094100951009610097100981009910100101011010210103101041010510106101071010810109101101011110112101131011410115101161011710118101191012010121101221012310124101251012610127101281012910130101311013210133101341013510136101371013810139101401014110142101431014410145101461014710148101491015010151101521015310154101551015610157101581015910160101611016210163101641016510166101671016810169101701017110172101731017410175101761017710178101791018010181101821018310184101851018610187101881018910190101911019210193101941019510196101971019810199102001020110202102031020410205102061020710208102091021010211102121021310214102151021610217102181021910220102211022210223102241022510226102271022810229102301023110232102331023410235102361023710238102391024010241102421024310244102451024610247102481024910250102511025210253102541025510256102571025810259102601026110262102631026410265102661026710268102691027010271102721027310274102751027610277102781027910280102811028210283102841028510286102871028810289102901029110292102931029410295102961029710298102991030010301103021030310304103051030610307103081030910310103111031210313103141031510316103171031810319103201032110322103231032410325103261032710328103291033010331103321033310334103351033610337103381033910340103411034210343103441034510346103471034810349103501035110352103531035410355103561035710358103591036010361103621036310364103651036610367103681036910370103711037210373103741037510376103771037810379103801038110382103831038410385103861038710388103891039010391103921039310394103951039610397103981039910400104011040210403104041040510406104071040810409104101041110412104131041410415104161041710418104191042010421104221042310424104251042610427104281042910430104311043210433104341043510436104371043810439104401044110442104431044410445104461044710448104491045010451104521045310454104551045610457104581045910460104611046210463104641046510466104671046810469104701047110472104731047410475104761047710478104791048010481104821048310484104851048610487104881048910490104911049210493104941049510496104971049810499105001050110502105031050410505105061050710508105091051010511105121051310514105151051610517105181051910520105211052210523105241052510526105271052810529105301053110532105331053410535105361053710538105391054010541105421054310544105451054610547105481054910550105511055210553105541055510556105571055810559105601056110562105631056410565105661056710568105691057010571105721057310574105751057610577105781057910580105811058210583105841058510586105871058810589105901059110592105931059410595105961059710598105991060010601106021060310604106051060610607106081060910610106111061210613106141061510616106171061810619106201062110622106231062410625106261062710628106291063010631106321063310634106351063610637106381063910640106411064210643106441064510646106471064810649106501065110652106531065410655106561065710658106591066010661106621066310664106651066610667106681066910670106711067210673106741067510676106771067810679106801068110682106831068410685106861068710688106891069010691106921069310694106951069610697106981069910700107011070210703107041070510706107071070810709107101071110712107131071410715107161071710718107191072010721107221072310724107251072610727107281072910730107311073210733107341073510736107371073810739107401074110742107431074410745107461074710748107491075010751107521075310754107551075610757107581075910760107611076210763107641076510766107671076810769107701077110772107731077410775107761077710778107791078010781107821078310784107851078610787107881078910790107911079210793107941079510796107971079810799108001080110802108031080410805108061080710808108091081010811108121081310814108151081610817108181081910820108211082210823108241082510826108271082810829108301083110832108331083410835108361083710838108391084010841108421084310844108451084610847108481084910850108511085210853108541085510856108571085810859108601086110862108631086410865108661086710868108691087010871108721087310874108751087610877108781087910880108811088210883108841088510886108871088810889108901089110892108931089410895108961089710898108991090010901109021090310904109051090610907109081090910910109111091210913109141091510916109171091810919109201092110922109231092410925109261092710928109291093010931109321093310934109351093610937109381093910940109411094210943109441094510946109471094810949109501095110952109531095410955109561095710958109591096010961109621096310964109651096610967109681096910970109711097210973109741097510976109771097810979109801098110982109831098410985109861098710988109891099010991109921099310994109951099610997109981099911000110011100211003110041100511006110071100811009110101101111012110131101411015110161101711018110191102011021110221102311024110251102611027110281102911030110311103211033110341103511036110371103811039110401104111042110431104411045110461104711048110491105011051110521105311054110551105611057110581105911060110611106211063110641106511066110671106811069110701107111072110731107411075110761107711078110791108011081110821108311084110851108611087110881108911090110911109211093110941109511096110971109811099111001110111102111031110411105111061110711108111091111011111111121111311114111151111611117111181111911120111211112211123111241112511126111271112811129111301113111132111331113411135111361113711138111391114011141111421114311144111451114611147111481114911150111511115211153111541115511156111571115811159111601116111162111631116411165111661116711168111691117011171111721117311174111751117611177111781117911180111811118211183111841118511186111871118811189111901119111192111931119411195111961119711198111991120011201112021120311204112051120611207112081120911210112111121211213112141121511216112171121811219112201122111222112231122411225112261122711228112291123011231112321123311234112351123611237112381123911240112411124211243112441124511246112471124811249112501125111252112531125411255112561125711258112591126011261112621126311264112651126611267112681126911270112711127211273112741127511276112771127811279112801128111282112831128411285112861128711288112891129011291112921129311294112951129611297112981129911300113011130211303113041130511306113071130811309113101131111312113131131411315113161131711318113191132011321113221132311324113251132611327113281132911330113311133211333113341133511336113371133811339113401134111342113431134411345113461134711348113491135011351113521135311354113551135611357113581135911360113611136211363113641136511366113671136811369113701137111372113731137411375113761137711378113791138011381113821138311384113851138611387113881138911390113911139211393113941139511396113971139811399114001140111402114031140411405114061140711408114091141011411114121141311414114151141611417114181141911420114211142211423114241142511426114271142811429114301143111432114331143411435114361143711438114391144011441114421144311444114451144611447114481144911450114511145211453114541145511456114571145811459114601146111462114631146411465114661146711468114691147011471114721147311474114751147611477114781147911480114811148211483114841148511486114871148811489114901149111492114931149411495114961149711498114991150011501115021150311504115051150611507115081150911510115111151211513115141151511516115171151811519115201152111522115231152411525115261152711528115291153011531115321153311534115351153611537115381153911540115411154211543115441154511546115471154811549115501155111552115531155411555115561155711558115591156011561115621156311564115651156611567115681156911570115711157211573115741157511576115771157811579115801158111582115831158411585115861158711588115891159011591115921159311594115951159611597115981159911600116011160211603116041160511606116071160811609116101161111612116131161411615116161161711618116191162011621116221162311624116251162611627116281162911630116311163211633116341163511636116371163811639116401164111642116431164411645116461164711648116491165011651116521165311654116551165611657116581165911660116611166211663116641166511666116671166811669116701167111672116731167411675116761167711678116791168011681116821168311684116851168611687116881168911690116911169211693116941169511696116971169811699117001170111702117031170411705117061170711708117091171011711117121171311714117151171611717117181171911720117211172211723117241172511726117271172811729117301173111732117331173411735117361173711738117391174011741117421174311744117451174611747117481174911750117511175211753117541175511756117571175811759117601176111762117631176411765117661176711768117691177011771117721177311774117751177611777117781177911780117811178211783117841178511786117871178811789117901179111792117931179411795117961179711798117991180011801118021180311804118051180611807118081180911810118111181211813118141181511816118171181811819118201182111822118231182411825118261182711828118291183011831118321183311834118351183611837118381183911840118411184211843118441184511846118471184811849118501185111852118531185411855118561185711858118591186011861118621186311864118651186611867118681186911870118711187211873118741187511876118771187811879118801188111882118831188411885118861188711888118891189011891118921189311894118951189611897118981189911900119011190211903119041190511906119071190811909119101191111912119131191411915119161191711918119191192011921119221192311924119251192611927119281192911930119311193211933119341193511936119371193811939119401194111942119431194411945119461194711948119491195011951119521195311954119551195611957119581195911960119611196211963119641196511966119671196811969119701197111972119731197411975119761197711978119791198011981119821198311984119851198611987119881198911990119911199211993119941199511996119971199811999120001200112002120031200412005120061200712008120091201012011120121201312014120151201612017120181201912020120211202212023120241202512026120271202812029120301203112032120331203412035120361203712038120391204012041120421204312044120451204612047120481204912050120511205212053120541205512056120571205812059120601206112062120631206412065120661206712068120691207012071120721207312074120751207612077120781207912080120811208212083120841208512086120871208812089120901209112092120931209412095120961209712098120991210012101121021210312104121051210612107121081210912110121111211212113121141211512116121171211812119121201212112122121231212412125121261212712128121291213012131121321213312134121351213612137121381213912140121411214212143121441214512146121471214812149121501215112152121531215412155121561215712158121591216012161121621216312164121651216612167121681216912170121711217212173121741217512176121771217812179121801218112182121831218412185121861218712188121891219012191121921219312194121951219612197121981219912200122011220212203122041220512206122071220812209122101221112212122131221412215122161221712218122191222012221122221222312224122251222612227122281222912230122311223212233122341223512236122371223812239122401224112242122431224412245122461224712248122491225012251122521225312254122551225612257122581225912260122611226212263122641226512266122671226812269122701227112272122731227412275122761227712278122791228012281122821228312284122851228612287122881228912290122911229212293122941229512296122971229812299123001230112302123031230412305123061230712308123091231012311123121231312314123151231612317123181231912320123211232212323123241232512326123271232812329123301233112332123331233412335123361233712338123391234012341123421234312344123451234612347123481234912350123511235212353123541235512356123571235812359123601236112362123631236412365123661236712368123691237012371123721237312374123751237612377123781237912380123811238212383123841238512386123871238812389123901239112392123931239412395123961239712398123991240012401124021240312404124051240612407124081240912410124111241212413124141241512416124171241812419124201242112422124231242412425124261242712428124291243012431124321243312434124351243612437124381243912440124411244212443124441244512446124471244812449124501245112452124531245412455124561245712458124591246012461124621246312464124651246612467124681246912470124711247212473124741247512476124771247812479124801248112482124831248412485124861248712488124891249012491124921249312494124951249612497124981249912500125011250212503125041250512506125071250812509125101251112512125131251412515125161251712518125191252012521125221252312524125251252612527125281252912530125311253212533125341253512536125371253812539125401254112542125431254412545125461254712548125491255012551125521255312554125551255612557125581255912560125611256212563125641256512566125671256812569125701257112572125731257412575125761257712578125791258012581125821258312584125851258612587125881258912590125911259212593125941259512596125971259812599126001260112602126031260412605126061260712608126091261012611126121261312614126151261612617126181261912620126211262212623126241262512626126271262812629126301263112632126331263412635126361263712638126391264012641126421264312644126451264612647126481264912650126511265212653126541265512656126571265812659126601266112662126631266412665126661266712668126691267012671126721267312674126751267612677126781267912680126811268212683126841268512686126871268812689126901269112692126931269412695126961269712698126991270012701127021270312704127051270612707127081270912710127111271212713127141271512716127171271812719127201272112722127231272412725127261272712728127291273012731127321273312734127351273612737127381273912740127411274212743127441274512746127471274812749127501275112752127531275412755127561275712758127591276012761127621276312764127651276612767127681276912770127711277212773127741277512776127771277812779127801278112782127831278412785127861278712788127891279012791127921279312794127951279612797127981279912800128011280212803128041280512806128071280812809128101281112812128131281412815128161281712818128191282012821128221282312824128251282612827128281282912830128311283212833128341283512836128371283812839128401284112842128431284412845128461284712848128491285012851128521285312854128551285612857128581285912860128611286212863128641286512866128671286812869128701287112872128731287412875128761287712878128791288012881128821288312884128851288612887128881288912890128911289212893128941289512896128971289812899129001290112902129031290412905129061290712908129091291012911129121291312914129151291612917129181291912920129211292212923129241292512926129271292812929129301293112932129331293412935129361293712938129391294012941129421294312944129451294612947129481294912950129511295212953129541295512956129571295812959129601296112962129631296412965129661296712968129691297012971129721297312974129751297612977129781297912980129811298212983129841298512986129871298812989129901299112992129931299412995129961299712998129991300013001130021300313004130051300613007130081300913010130111301213013130141301513016130171301813019130201302113022130231302413025130261302713028130291303013031130321303313034130351303613037130381303913040130411304213043130441304513046130471304813049130501305113052130531305413055130561305713058130591306013061130621306313064130651306613067130681306913070130711307213073130741307513076130771307813079130801308113082130831308413085130861308713088130891309013091130921309313094130951309613097130981309913100131011310213103131041310513106131071310813109131101311113112131131311413115131161311713118131191312013121131221312313124131251312613127131281312913130131311313213133131341313513136131371313813139131401314113142131431314413145131461314713148131491315013151131521315313154131551315613157131581315913160131611316213163131641316513166131671316813169131701317113172131731317413175131761317713178131791318013181131821318313184131851318613187131881318913190131911319213193131941319513196131971319813199132001320113202132031320413205132061320713208132091321013211132121321313214132151321613217132181321913220132211322213223132241322513226132271322813229132301323113232132331323413235132361323713238132391324013241132421324313244132451324613247132481324913250132511325213253132541325513256132571325813259132601326113262132631326413265132661326713268132691327013271132721327313274132751327613277132781327913280132811328213283132841328513286132871328813289132901329113292132931329413295132961329713298132991330013301133021330313304133051330613307133081330913310133111331213313133141331513316133171331813319133201332113322133231332413325133261332713328133291333013331133321333313334133351333613337133381333913340133411334213343133441334513346133471334813349133501335113352133531335413355133561335713358133591336013361133621336313364133651336613367133681336913370133711337213373133741337513376133771337813379133801338113382133831338413385133861338713388133891339013391133921339313394133951339613397133981339913400134011340213403134041340513406134071340813409134101341113412134131341413415134161341713418134191342013421134221342313424134251342613427134281342913430134311343213433134341343513436134371343813439134401344113442134431344413445134461344713448134491345013451134521345313454134551345613457134581345913460134611346213463134641346513466134671346813469134701347113472134731347413475134761347713478134791348013481134821348313484134851348613487134881348913490134911349213493134941349513496134971349813499135001350113502135031350413505135061350713508135091351013511135121351313514135151351613517135181351913520135211352213523135241352513526135271352813529135301353113532135331353413535135361353713538135391354013541135421354313544135451354613547135481354913550135511355213553135541355513556135571355813559135601356113562135631356413565135661356713568135691357013571135721357313574135751357613577135781357913580135811358213583135841358513586135871358813589135901359113592135931359413595135961359713598135991360013601136021360313604136051360613607136081360913610136111361213613136141361513616136171361813619136201362113622136231362413625136261362713628136291363013631136321363313634136351363613637136381363913640136411364213643136441364513646136471364813649136501365113652136531365413655136561365713658136591366013661136621366313664136651366613667136681366913670136711367213673136741367513676136771367813679136801368113682136831368413685136861368713688136891369013691136921369313694136951369613697136981369913700137011370213703137041370513706137071370813709137101371113712137131371413715137161371713718137191372013721137221372313724137251372613727137281372913730137311373213733137341373513736137371373813739137401374113742137431374413745137461374713748137491375013751137521375313754137551375613757137581375913760137611376213763137641376513766137671376813769137701377113772137731377413775137761377713778137791378013781137821378313784137851378613787137881378913790137911379213793137941379513796137971379813799138001380113802138031380413805138061380713808138091381013811138121381313814138151381613817138181381913820138211382213823138241382513826138271382813829138301383113832138331383413835138361383713838138391384013841138421384313844138451384613847138481384913850138511385213853138541385513856138571385813859138601386113862138631386413865138661386713868138691387013871138721387313874138751387613877138781387913880138811388213883138841388513886138871388813889138901389113892138931389413895138961389713898138991390013901139021390313904139051390613907139081390913910139111391213913139141391513916139171391813919139201392113922139231392413925139261392713928139291393013931139321393313934139351393613937139381393913940139411394213943139441394513946139471394813949139501395113952139531395413955139561395713958139591396013961139621396313964139651396613967139681396913970139711397213973139741397513976139771397813979139801398113982139831398413985139861398713988139891399013991139921399313994139951399613997139981399914000140011400214003140041400514006140071400814009140101401114012140131401414015140161401714018140191402014021140221402314024140251402614027140281402914030140311403214033140341403514036140371403814039140401404114042140431404414045140461404714048140491405014051140521405314054140551405614057140581405914060140611406214063140641406514066140671406814069140701407114072140731407414075140761407714078140791408014081140821408314084140851408614087140881408914090140911409214093140941409514096140971409814099141001410114102141031410414105141061410714108141091411014111141121411314114141151411614117141181411914120141211412214123141241412514126141271412814129141301413114132141331413414135141361413714138141391414014141141421414314144141451414614147141481414914150141511415214153141541415514156141571415814159141601416114162141631416414165141661416714168141691417014171141721417314174141751417614177141781417914180141811418214183141841418514186141871418814189141901419114192141931419414195141961419714198141991420014201142021420314204142051420614207142081420914210142111421214213142141421514216142171421814219142201422114222142231422414225142261422714228142291423014231142321423314234142351423614237142381423914240142411424214243142441424514246142471424814249142501425114252142531425414255142561425714258142591426014261142621426314264142651426614267142681426914270142711427214273142741427514276142771427814279142801428114282142831428414285142861428714288142891429014291142921429314294142951429614297142981429914300143011430214303143041430514306143071430814309143101431114312143131431414315143161431714318143191432014321143221432314324143251432614327143281432914330143311433214333143341433514336143371433814339143401434114342143431434414345143461434714348143491435014351143521435314354143551435614357143581435914360143611436214363143641436514366143671436814369143701437114372143731437414375143761437714378143791438014381143821438314384143851438614387143881438914390143911439214393143941439514396143971439814399144001440114402144031440414405144061440714408144091441014411144121441314414144151441614417144181441914420144211442214423144241442514426144271442814429144301443114432144331443414435144361443714438144391444014441144421444314444144451444614447144481444914450144511445214453144541445514456144571445814459144601446114462144631446414465144661446714468144691447014471144721447314474144751447614477144781447914480144811448214483144841448514486144871448814489144901449114492144931449414495144961449714498144991450014501145021450314504145051450614507145081450914510145111451214513145141451514516145171451814519145201452114522145231452414525145261452714528145291453014531145321453314534145351453614537145381453914540145411454214543145441454514546145471454814549145501455114552145531455414555145561455714558145591456014561145621456314564145651456614567145681456914570145711457214573145741457514576145771457814579145801458114582145831458414585145861458714588145891459014591145921459314594145951459614597145981459914600146011460214603146041460514606146071460814609146101461114612146131461414615146161461714618146191462014621146221462314624146251462614627146281462914630146311463214633146341463514636146371463814639146401464114642146431464414645146461464714648146491465014651146521465314654146551465614657146581465914660146611466214663146641466514666146671466814669146701467114672146731467414675146761467714678146791468014681146821468314684146851468614687146881468914690146911469214693146941469514696146971469814699147001470114702147031470414705147061470714708147091471014711147121471314714147151471614717147181471914720147211472214723147241472514726147271472814729147301473114732147331473414735147361473714738147391474014741147421474314744147451474614747147481474914750147511475214753147541475514756147571475814759147601476114762147631476414765147661476714768147691477014771147721477314774147751477614777147781477914780147811478214783147841478514786147871478814789147901479114792147931479414795147961479714798147991480014801148021480314804148051480614807148081480914810148111481214813148141481514816148171481814819148201482114822148231482414825148261482714828148291483014831148321483314834148351483614837148381483914840148411484214843148441484514846148471484814849148501485114852148531485414855148561485714858148591486014861148621486314864148651486614867148681486914870148711487214873148741487514876148771487814879148801488114882148831488414885148861488714888148891489014891148921489314894148951489614897148981489914900149011490214903149041490514906149071490814909149101491114912149131491414915149161491714918149191492014921149221492314924149251492614927149281492914930149311493214933149341493514936149371493814939149401494114942149431494414945149461494714948149491495014951149521495314954149551495614957149581495914960149611496214963149641496514966149671496814969149701497114972149731497414975149761497714978149791498014981149821498314984149851498614987149881498914990149911499214993149941499514996149971499814999150001500115002150031500415005150061500715008150091501015011150121501315014150151501615017150181501915020150211502215023150241502515026150271502815029150301503115032150331503415035150361503715038150391504015041150421504315044150451504615047150481504915050150511505215053150541505515056150571505815059150601506115062150631506415065150661506715068150691507015071150721507315074150751507615077150781507915080150811508215083150841508515086150871508815089150901509115092150931509415095150961509715098150991510015101151021510315104151051510615107151081510915110151111511215113151141511515116151171511815119151201512115122151231512415125151261512715128151291513015131151321513315134151351513615137151381513915140151411514215143151441514515146151471514815149151501515115152151531515415155151561515715158151591516015161151621516315164151651516615167151681516915170151711517215173151741517515176151771517815179151801518115182151831518415185151861518715188151891519015191151921519315194151951519615197151981519915200152011520215203152041520515206152071520815209152101521115212152131521415215152161521715218152191522015221152221522315224152251522615227152281522915230152311523215233152341523515236152371523815239152401524115242152431524415245152461524715248152491525015251152521525315254152551525615257152581525915260152611526215263152641526515266152671526815269152701527115272152731527415275152761527715278152791528015281152821528315284152851528615287152881528915290152911529215293152941529515296152971529815299153001530115302153031530415305153061530715308153091531015311153121531315314153151531615317153181531915320153211532215323153241532515326153271532815329153301533115332153331533415335153361533715338153391534015341153421534315344153451534615347153481534915350153511535215353153541535515356153571535815359153601536115362153631536415365153661536715368153691537015371153721537315374153751537615377153781537915380153811538215383153841538515386153871538815389153901539115392153931539415395153961539715398153991540015401154021540315404154051540615407154081540915410154111541215413154141541515416154171541815419154201542115422154231542415425154261542715428154291543015431154321543315434154351543615437154381543915440154411544215443154441544515446154471544815449154501545115452154531545415455154561545715458154591546015461154621546315464154651546615467154681546915470154711547215473154741547515476154771547815479154801548115482154831548415485154861548715488154891549015491154921549315494154951549615497154981549915500155011550215503155041550515506155071550815509155101551115512155131551415515155161551715518155191552015521155221552315524155251552615527155281552915530155311553215533155341553515536155371553815539155401554115542155431554415545155461554715548155491555015551155521555315554155551555615557155581555915560155611556215563155641556515566155671556815569155701557115572155731557415575155761557715578155791558015581155821558315584155851558615587155881558915590155911559215593155941559515596155971559815599156001560115602156031560415605156061560715608156091561015611156121561315614156151561615617156181561915620156211562215623156241562515626156271562815629156301563115632156331563415635156361563715638156391564015641156421564315644156451564615647156481564915650156511565215653156541565515656156571565815659156601566115662156631566415665156661566715668156691567015671156721567315674156751567615677156781567915680156811568215683156841568515686156871568815689156901569115692156931569415695156961569715698156991570015701157021570315704157051570615707157081570915710157111571215713157141571515716157171571815719157201572115722157231572415725157261572715728157291573015731157321573315734157351573615737157381573915740157411574215743157441574515746157471574815749157501575115752157531575415755157561575715758157591576015761157621576315764157651576615767157681576915770157711577215773157741577515776157771577815779157801578115782157831578415785157861578715788157891579015791157921579315794157951579615797157981579915800158011580215803158041580515806158071580815809158101581115812158131581415815158161581715818158191582015821158221582315824158251582615827158281582915830158311583215833158341583515836158371583815839158401584115842158431584415845158461584715848158491585015851158521585315854158551585615857158581585915860158611586215863158641586515866158671586815869158701587115872158731587415875158761587715878158791588015881158821588315884158851588615887158881588915890158911589215893158941589515896158971589815899159001590115902159031590415905159061590715908159091591015911159121591315914159151591615917159181591915920159211592215923159241592515926159271592815929159301593115932159331593415935159361593715938159391594015941159421594315944159451594615947159481594915950159511595215953159541595515956159571595815959159601596115962159631596415965159661596715968159691597015971159721597315974159751597615977159781597915980159811598215983159841598515986159871598815989159901599115992159931599415995159961599715998159991600016001160021600316004160051600616007160081600916010160111601216013160141601516016160171601816019160201602116022160231602416025160261602716028160291603016031160321603316034160351603616037160381603916040160411604216043160441604516046160471604816049160501605116052160531605416055160561605716058160591606016061160621606316064160651606616067160681606916070160711607216073160741607516076160771607816079160801608116082160831608416085160861608716088160891609016091160921609316094160951609616097160981609916100161011610216103161041610516106161071610816109161101611116112161131611416115161161611716118161191612016121161221612316124161251612616127161281612916130161311613216133161341613516136161371613816139161401614116142161431614416145161461614716148161491615016151161521615316154161551615616157161581615916160161611616216163161641616516166161671616816169161701617116172161731617416175161761617716178161791618016181161821618316184161851618616187161881618916190161911619216193161941619516196161971619816199162001620116202162031620416205162061620716208162091621016211162121621316214162151621616217162181621916220162211622216223162241622516226162271622816229162301623116232162331623416235162361623716238162391624016241162421624316244162451624616247162481624916250162511625216253162541625516256162571625816259162601626116262162631626416265162661626716268162691627016271162721627316274162751627616277162781627916280162811628216283162841628516286162871628816289162901629116292162931629416295162961629716298162991630016301163021630316304163051630616307163081630916310163111631216313163141631516316163171631816319163201632116322163231632416325163261632716328163291633016331163321633316334163351633616337163381633916340163411634216343163441634516346163471634816349163501635116352163531635416355163561635716358163591636016361163621636316364163651636616367163681636916370163711637216373163741637516376163771637816379163801638116382163831638416385163861638716388163891639016391163921639316394163951639616397163981639916400164011640216403164041640516406164071640816409164101641116412164131641416415164161641716418164191642016421164221642316424164251642616427164281642916430164311643216433164341643516436164371643816439164401644116442164431644416445164461644716448164491645016451164521645316454164551645616457164581645916460164611646216463164641646516466164671646816469164701647116472164731647416475164761647716478164791648016481164821648316484164851648616487164881648916490164911649216493164941649516496164971649816499165001650116502165031650416505165061650716508165091651016511165121651316514165151651616517165181651916520165211652216523165241652516526165271652816529165301653116532165331653416535165361653716538165391654016541165421654316544165451654616547165481654916550165511655216553165541655516556165571655816559165601656116562165631656416565165661656716568165691657016571165721657316574165751657616577165781657916580165811658216583165841658516586165871658816589165901659116592165931659416595165961659716598165991660016601166021660316604166051660616607166081660916610166111661216613166141661516616166171661816619166201662116622166231662416625166261662716628166291663016631166321663316634166351663616637166381663916640166411664216643166441664516646166471664816649166501665116652166531665416655166561665716658166591666016661166621666316664166651666616667166681666916670166711667216673166741667516676166771667816679166801668116682166831668416685166861668716688166891669016691166921669316694166951669616697166981669916700167011670216703167041670516706167071670816709167101671116712167131671416715167161671716718167191672016721167221672316724167251672616727167281672916730167311673216733167341673516736167371673816739167401674116742167431674416745167461674716748167491675016751167521675316754167551675616757167581675916760167611676216763167641676516766167671676816769167701677116772167731677416775167761677716778167791678016781167821678316784167851678616787167881678916790167911679216793167941679516796167971679816799168001680116802168031680416805168061680716808168091681016811168121681316814168151681616817168181681916820168211682216823168241682516826168271682816829168301683116832168331683416835168361683716838168391684016841168421684316844168451684616847168481684916850168511685216853168541685516856168571685816859168601686116862168631686416865168661686716868168691687016871168721687316874168751687616877168781687916880168811688216883168841688516886168871688816889168901689116892168931689416895168961689716898168991690016901169021690316904169051690616907169081690916910169111691216913169141691516916169171691816919169201692116922169231692416925169261692716928169291693016931169321693316934169351693616937169381693916940169411694216943169441694516946169471694816949169501695116952169531695416955169561695716958169591696016961169621696316964169651696616967169681696916970169711697216973169741697516976169771697816979169801698116982169831698416985169861698716988169891699016991169921699316994169951699616997169981699917000170011700217003170041700517006170071700817009170101701117012170131701417015170161701717018170191702017021170221702317024170251702617027170281702917030170311703217033170341703517036170371703817039170401704117042170431704417045170461704717048170491705017051170521705317054170551705617057170581705917060170611706217063170641706517066170671706817069170701707117072170731707417075170761707717078170791708017081170821708317084170851708617087170881708917090170911709217093170941709517096170971709817099171001710117102171031710417105171061710717108171091711017111171121711317114171151711617117171181711917120171211712217123171241712517126171271712817129171301713117132171331713417135171361713717138171391714017141171421714317144171451714617147171481714917150171511715217153171541715517156171571715817159171601716117162171631716417165171661716717168171691717017171171721717317174171751717617177171781717917180171811718217183171841718517186171871718817189171901719117192171931719417195171961719717198171991720017201172021720317204172051720617207172081720917210172111721217213172141721517216172171721817219172201722117222172231722417225172261722717228172291723017231172321723317234172351723617237172381723917240172411724217243172441724517246172471724817249172501725117252172531725417255172561725717258172591726017261172621726317264172651726617267172681726917270172711727217273172741727517276172771727817279172801728117282172831728417285172861728717288172891729017291172921729317294172951729617297172981729917300173011730217303173041730517306173071730817309173101731117312173131731417315173161731717318173191732017321173221732317324173251732617327173281732917330173311733217333173341733517336173371733817339173401734117342173431734417345173461734717348173491735017351173521735317354173551735617357173581735917360173611736217363173641736517366173671736817369173701737117372173731737417375173761737717378173791738017381173821738317384173851738617387173881738917390173911739217393173941739517396173971739817399174001740117402174031740417405174061740717408174091741017411174121741317414174151741617417174181741917420174211742217423174241742517426174271742817429174301743117432174331743417435174361743717438174391744017441174421744317444174451744617447174481744917450174511745217453174541745517456174571745817459174601746117462174631746417465174661746717468174691747017471174721747317474174751747617477174781747917480174811748217483174841748517486174871748817489174901749117492174931749417495174961749717498174991750017501175021750317504175051750617507175081750917510175111751217513175141751517516175171751817519175201752117522175231752417525175261752717528175291753017531175321753317534175351753617537175381753917540175411754217543175441754517546175471754817549175501755117552175531755417555175561755717558175591756017561175621756317564175651756617567175681756917570175711757217573175741757517576175771757817579175801758117582175831758417585175861758717588175891759017591175921759317594175951759617597175981759917600176011760217603176041760517606176071760817609176101761117612176131761417615176161761717618176191762017621176221762317624176251762617627176281762917630176311763217633176341763517636176371763817639176401764117642176431764417645176461764717648176491765017651176521765317654176551765617657176581765917660176611766217663176641766517666176671766817669176701767117672176731767417675176761767717678176791768017681176821768317684176851768617687176881768917690176911769217693176941769517696176971769817699177001770117702177031770417705177061770717708177091771017711177121771317714177151771617717177181771917720177211772217723177241772517726177271772817729177301773117732177331773417735177361773717738177391774017741177421774317744177451774617747177481774917750177511775217753177541775517756177571775817759177601776117762177631776417765177661776717768177691777017771177721777317774177751777617777177781777917780177811778217783177841778517786177871778817789177901779117792177931779417795177961779717798177991780017801178021780317804178051780617807178081780917810178111781217813178141781517816178171781817819178201782117822178231782417825178261782717828178291783017831178321783317834178351783617837178381783917840178411784217843178441784517846178471784817849178501785117852178531785417855178561785717858178591786017861178621786317864178651786617867178681786917870178711787217873178741787517876178771787817879178801788117882178831788417885178861788717888178891789017891178921789317894178951789617897178981789917900179011790217903179041790517906179071790817909179101791117912179131791417915179161791717918179191792017921179221792317924179251792617927179281792917930179311793217933179341793517936179371793817939179401794117942179431794417945179461794717948179491795017951179521795317954179551795617957179581795917960179611796217963179641796517966179671796817969179701797117972179731797417975179761797717978179791798017981179821798317984179851798617987179881798917990179911799217993179941799517996179971799817999180001800118002180031800418005180061800718008180091801018011180121801318014180151801618017180181801918020180211802218023180241802518026180271802818029180301803118032180331803418035180361803718038180391804018041180421804318044180451804618047180481804918050180511805218053180541805518056180571805818059180601806118062180631806418065180661806718068180691807018071180721807318074180751807618077180781807918080180811808218083180841808518086180871808818089180901809118092180931809418095180961809718098180991810018101181021810318104181051810618107181081810918110181111811218113181141811518116181171811818119181201812118122181231812418125181261812718128181291813018131181321813318134181351813618137181381813918140181411814218143181441814518146181471814818149181501815118152181531815418155181561815718158181591816018161181621816318164181651816618167181681816918170181711817218173181741817518176181771817818179181801818118182181831818418185181861818718188181891819018191181921819318194181951819618197181981819918200182011820218203182041820518206182071820818209182101821118212182131821418215182161821718218182191822018221182221822318224182251822618227182281822918230182311823218233182341823518236182371823818239182401824118242182431824418245182461824718248182491825018251182521825318254182551825618257182581825918260182611826218263182641826518266182671826818269182701827118272182731827418275182761827718278182791828018281182821828318284182851828618287182881828918290182911829218293182941829518296182971829818299183001830118302183031830418305183061830718308183091831018311183121831318314183151831618317183181831918320183211832218323183241832518326183271832818329183301833118332183331833418335183361833718338183391834018341183421834318344183451834618347183481834918350183511835218353183541835518356183571835818359183601836118362183631836418365183661836718368183691837018371183721837318374183751837618377183781837918380183811838218383183841838518386183871838818389183901839118392183931839418395183961839718398183991840018401184021840318404184051840618407184081840918410184111841218413184141841518416184171841818419184201842118422184231842418425184261842718428184291843018431184321843318434184351843618437184381843918440184411844218443184441844518446184471844818449184501845118452184531845418455184561845718458184591846018461184621846318464184651846618467184681846918470184711847218473184741847518476184771847818479184801848118482184831848418485184861848718488184891849018491184921849318494184951849618497184981849918500185011850218503185041850518506185071850818509185101851118512185131851418515185161851718518185191852018521185221852318524185251852618527185281852918530185311853218533185341853518536185371853818539185401854118542185431854418545185461854718548185491855018551185521855318554185551855618557185581855918560185611856218563185641856518566185671856818569185701857118572185731857418575185761857718578185791858018581185821858318584185851858618587185881858918590185911859218593185941859518596185971859818599186001860118602186031860418605186061860718608186091861018611186121861318614186151861618617186181861918620186211862218623186241862518626186271862818629186301863118632186331863418635186361863718638186391864018641186421864318644186451864618647186481864918650186511865218653186541865518656186571865818659186601866118662186631866418665186661866718668186691867018671186721867318674186751867618677186781867918680186811868218683186841868518686186871868818689186901869118692186931869418695186961869718698186991870018701187021870318704187051870618707187081870918710187111871218713187141871518716187171871818719187201872118722187231872418725187261872718728187291873018731187321873318734187351873618737187381873918740187411874218743187441874518746187471874818749187501875118752187531875418755187561875718758187591876018761187621876318764187651876618767187681876918770187711877218773187741877518776187771877818779187801878118782187831878418785187861878718788187891879018791187921879318794187951879618797187981879918800188011880218803188041880518806188071880818809188101881118812188131881418815188161881718818188191882018821188221882318824188251882618827188281882918830188311883218833188341883518836188371883818839188401884118842188431884418845188461884718848188491885018851188521885318854188551885618857188581885918860188611886218863188641886518866188671886818869188701887118872188731887418875188761887718878188791888018881188821888318884188851888618887188881888918890188911889218893188941889518896188971889818899189001890118902189031890418905189061890718908189091891018911189121891318914189151891618917189181891918920189211892218923189241892518926189271892818929189301893118932189331893418935189361893718938189391894018941189421894318944189451894618947189481894918950189511895218953189541895518956189571895818959189601896118962189631896418965189661896718968189691897018971189721897318974189751897618977189781897918980189811898218983189841898518986189871898818989189901899118992189931899418995189961899718998189991900019001190021900319004190051900619007190081900919010190111901219013190141901519016190171901819019190201902119022190231902419025190261902719028190291903019031190321903319034190351903619037190381903919040190411904219043190441904519046190471904819049190501905119052190531905419055190561905719058190591906019061190621906319064190651906619067190681906919070190711907219073190741907519076190771907819079190801908119082190831908419085190861908719088190891909019091190921909319094190951909619097190981909919100191011910219103191041910519106191071910819109191101911119112191131911419115191161911719118191191912019121191221912319124191251912619127191281912919130191311913219133191341913519136191371913819139191401914119142191431914419145191461914719148191491915019151191521915319154191551915619157191581915919160191611916219163191641916519166191671916819169191701917119172191731917419175191761917719178191791918019181191821918319184191851918619187191881918919190191911919219193191941919519196191971919819199192001920119202192031920419205192061920719208192091921019211192121921319214192151921619217192181921919220192211922219223192241922519226192271922819229192301923119232192331923419235192361923719238192391924019241192421924319244192451924619247192481924919250192511925219253192541925519256192571925819259192601926119262192631926419265192661926719268192691927019271192721927319274192751927619277192781927919280192811928219283192841928519286192871928819289192901929119292192931929419295192961929719298192991930019301193021930319304193051930619307193081930919310193111931219313193141931519316193171931819319193201932119322193231932419325193261932719328193291933019331193321933319334193351933619337193381933919340193411934219343193441934519346193471934819349193501935119352193531935419355193561935719358193591936019361193621936319364193651936619367193681936919370193711937219373193741937519376193771937819379193801938119382193831938419385193861938719388193891939019391193921939319394193951939619397193981939919400194011940219403194041940519406194071940819409194101941119412194131941419415194161941719418194191942019421194221942319424194251942619427194281942919430194311943219433194341943519436194371943819439194401944119442194431944419445194461944719448194491945019451194521945319454194551945619457194581945919460194611946219463194641946519466194671946819469194701947119472194731947419475194761947719478194791948019481194821948319484194851948619487194881948919490194911949219493194941949519496194971949819499195001950119502195031950419505195061950719508195091951019511195121951319514195151951619517195181951919520195211952219523195241952519526195271952819529195301953119532195331953419535195361953719538195391954019541195421954319544195451954619547195481954919550195511955219553195541955519556195571955819559195601956119562195631956419565195661956719568195691957019571195721957319574195751957619577195781957919580195811958219583195841958519586195871958819589195901959119592195931959419595195961959719598195991960019601196021960319604196051960619607196081960919610196111961219613196141961519616196171961819619196201962119622196231962419625196261962719628196291963019631196321963319634196351963619637196381963919640196411964219643196441964519646196471964819649196501965119652196531965419655196561965719658196591966019661196621966319664196651966619667196681966919670196711967219673196741967519676196771967819679196801968119682196831968419685196861968719688196891969019691196921969319694196951969619697196981969919700197011970219703197041970519706197071970819709197101971119712197131971419715197161971719718197191972019721197221972319724197251972619727197281972919730197311973219733197341973519736197371973819739197401974119742197431974419745197461974719748197491975019751197521975319754197551975619757197581975919760197611976219763197641976519766197671976819769197701977119772197731977419775197761977719778197791978019781197821978319784197851978619787197881978919790197911979219793197941979519796197971979819799198001980119802198031980419805198061980719808198091981019811198121981319814198151981619817198181981919820198211982219823198241982519826198271982819829198301983119832198331983419835198361983719838198391984019841198421984319844198451984619847198481984919850198511985219853198541985519856198571985819859198601986119862198631986419865198661986719868198691987019871198721987319874198751987619877198781987919880198811988219883198841988519886198871988819889198901989119892198931989419895198961989719898198991990019901199021990319904199051990619907199081990919910199111991219913199141991519916199171991819919199201992119922199231992419925199261992719928199291993019931199321993319934199351993619937199381993919940199411994219943199441994519946199471994819949199501995119952199531995419955199561995719958199591996019961199621996319964199651996619967199681996919970199711997219973199741997519976199771997819979199801998119982199831998419985199861998719988199891999019991199921999319994199951999619997199981999920000200012000220003200042000520006200072000820009200102001120012200132001420015200162001720018200192002020021200222002320024200252002620027200282002920030200312003220033200342003520036200372003820039200402004120042200432004420045200462004720048200492005020051200522005320054200552005620057200582005920060200612006220063200642006520066200672006820069200702007120072200732007420075200762007720078200792008020081200822008320084200852008620087200882008920090200912009220093200942009520096200972009820099201002010120102201032010420105201062010720108201092011020111201122011320114201152011620117201182011920120201212012220123201242012520126201272012820129201302013120132201332013420135201362013720138201392014020141201422014320144201452014620147201482014920150201512015220153201542015520156201572015820159201602016120162201632016420165201662016720168201692017020171201722017320174201752017620177201782017920180201812018220183201842018520186201872018820189201902019120192201932019420195201962019720198201992020020201202022020320204202052020620207202082020920210202112021220213202142021520216202172021820219202202022120222202232022420225202262022720228202292023020231202322023320234202352023620237202382023920240202412024220243202442024520246202472024820249202502025120252202532025420255202562025720258202592026020261202622026320264202652026620267202682026920270202712027220273202742027520276202772027820279202802028120282202832028420285202862028720288202892029020291202922029320294202952029620297202982029920300203012030220303203042030520306203072030820309203102031120312203132031420315203162031720318203192032020321203222032320324203252032620327203282032920330203312033220333203342033520336203372033820339203402034120342203432034420345203462034720348203492035020351203522035320354203552035620357203582035920360203612036220363203642036520366203672036820369203702037120372203732037420375203762037720378203792038020381203822038320384203852038620387203882038920390203912039220393203942039520396203972039820399204002040120402204032040420405204062040720408204092041020411204122041320414204152041620417204182041920420204212042220423204242042520426204272042820429204302043120432204332043420435204362043720438204392044020441204422044320444204452044620447204482044920450204512045220453204542045520456204572045820459204602046120462204632046420465204662046720468204692047020471204722047320474204752047620477204782047920480204812048220483204842048520486204872048820489204902049120492204932049420495204962049720498204992050020501205022050320504205052050620507205082050920510205112051220513205142051520516205172051820519205202052120522205232052420525205262052720528205292053020531205322053320534205352053620537205382053920540205412054220543205442054520546205472054820549205502055120552205532055420555205562055720558205592056020561205622056320564205652056620567205682056920570205712057220573205742057520576205772057820579205802058120582205832058420585205862058720588205892059020591205922059320594205952059620597205982059920600206012060220603206042060520606206072060820609206102061120612206132061420615206162061720618206192062020621206222062320624206252062620627206282062920630206312063220633206342063520636206372063820639206402064120642206432064420645206462064720648206492065020651206522065320654206552065620657206582065920660206612066220663206642066520666206672066820669206702067120672206732067420675206762067720678206792068020681206822068320684206852068620687206882068920690206912069220693206942069520696206972069820699207002070120702207032070420705207062070720708207092071020711207122071320714207152071620717207182071920720207212072220723207242072520726207272072820729207302073120732207332073420735207362073720738207392074020741207422074320744207452074620747207482074920750207512075220753207542075520756207572075820759207602076120762207632076420765207662076720768207692077020771207722077320774207752077620777207782077920780207812078220783207842078520786207872078820789207902079120792207932079420795207962079720798207992080020801208022080320804208052080620807208082080920810208112081220813208142081520816208172081820819208202082120822208232082420825208262082720828208292083020831208322083320834208352083620837208382083920840208412084220843208442084520846208472084820849208502085120852208532085420855208562085720858208592086020861208622086320864208652086620867208682086920870208712087220873208742087520876208772087820879208802088120882208832088420885208862088720888208892089020891208922089320894208952089620897208982089920900209012090220903209042090520906209072090820909209102091120912209132091420915209162091720918209192092020921209222092320924209252092620927209282092920930209312093220933209342093520936209372093820939209402094120942209432094420945209462094720948209492095020951209522095320954209552095620957209582095920960209612096220963209642096520966209672096820969209702097120972209732097420975209762097720978209792098020981209822098320984209852098620987209882098920990209912099220993209942099520996209972099820999210002100121002210032100421005210062100721008210092101021011210122101321014210152101621017210182101921020210212102221023210242102521026210272102821029210302103121032210332103421035210362103721038210392104021041210422104321044210452104621047210482104921050210512105221053210542105521056210572105821059210602106121062210632106421065210662106721068210692107021071210722107321074210752107621077210782107921080210812108221083210842108521086210872108821089210902109121092210932109421095210962109721098210992110021101211022110321104211052110621107211082110921110211112111221113211142111521116211172111821119211202112121122211232112421125211262112721128211292113021131211322113321134211352113621137211382113921140211412114221143211442114521146211472114821149211502115121152211532115421155211562115721158211592116021161211622116321164211652116621167211682116921170211712117221173211742117521176211772117821179211802118121182211832118421185211862118721188211892119021191211922119321194211952119621197211982119921200212012120221203212042120521206212072120821209212102121121212212132121421215212162121721218212192122021221212222122321224212252122621227212282122921230212312123221233212342123521236212372123821239212402124121242212432124421245212462124721248212492125021251212522125321254212552125621257212582125921260212612126221263212642126521266212672126821269212702127121272212732127421275212762127721278212792128021281212822128321284212852128621287212882128921290212912129221293212942129521296212972129821299213002130121302213032130421305213062130721308213092131021311213122131321314213152131621317213182131921320213212132221323213242132521326213272132821329213302133121332213332133421335213362133721338213392134021341213422134321344213452134621347213482134921350213512135221353213542135521356213572135821359213602136121362213632136421365213662136721368213692137021371213722137321374213752137621377213782137921380213812138221383213842138521386213872138821389213902139121392213932139421395213962139721398213992140021401214022140321404214052140621407214082140921410214112141221413214142141521416214172141821419214202142121422214232142421425214262142721428214292143021431214322143321434214352143621437214382143921440214412144221443214442144521446214472144821449214502145121452214532145421455214562145721458214592146021461214622146321464214652146621467214682146921470214712147221473214742147521476214772147821479214802148121482214832148421485214862148721488214892149021491214922149321494214952149621497214982149921500215012150221503215042150521506215072150821509215102151121512215132151421515215162151721518215192152021521215222152321524215252152621527215282152921530215312153221533215342153521536215372153821539215402154121542215432154421545215462154721548215492155021551215522155321554215552155621557215582155921560215612156221563215642156521566215672156821569215702157121572215732157421575215762157721578215792158021581215822158321584215852158621587215882158921590215912159221593215942159521596215972159821599216002160121602216032160421605216062160721608216092161021611216122161321614216152161621617216182161921620216212162221623216242162521626216272162821629216302163121632216332163421635216362163721638216392164021641216422164321644216452164621647216482164921650216512165221653216542165521656216572165821659216602166121662216632166421665216662166721668216692167021671216722167321674216752167621677216782167921680216812168221683216842168521686216872168821689216902169121692216932169421695216962169721698216992170021701217022170321704217052170621707217082170921710217112171221713217142171521716217172171821719217202172121722217232172421725217262172721728217292173021731217322173321734217352173621737217382173921740217412174221743217442174521746217472174821749217502175121752217532175421755217562175721758217592176021761217622176321764217652176621767217682176921770217712177221773217742177521776217772177821779217802178121782217832178421785217862178721788217892179021791217922179321794217952179621797217982179921800218012180221803218042180521806218072180821809218102181121812218132181421815218162181721818218192182021821218222182321824218252182621827218282182921830218312183221833218342183521836218372183821839218402184121842218432184421845218462184721848218492185021851218522185321854218552185621857218582185921860218612186221863218642186521866218672186821869218702187121872218732187421875218762187721878218792188021881218822188321884218852188621887218882188921890218912189221893218942189521896218972189821899219002190121902219032190421905219062190721908219092191021911219122191321914219152191621917219182191921920219212192221923219242192521926219272192821929219302193121932219332193421935219362193721938219392194021941219422194321944219452194621947219482194921950219512195221953219542195521956219572195821959219602196121962219632196421965219662196721968219692197021971219722197321974219752197621977219782197921980219812198221983219842198521986219872198821989219902199121992219932199421995219962199721998219992200022001220022200322004220052200622007220082200922010220112201222013220142201522016220172201822019220202202122022220232202422025220262202722028220292203022031220322203322034220352203622037220382203922040220412204222043220442204522046220472204822049220502205122052220532205422055220562205722058220592206022061220622206322064220652206622067220682206922070220712207222073220742207522076220772207822079220802208122082220832208422085220862208722088220892209022091220922209322094220952209622097220982209922100221012210222103221042210522106221072210822109221102211122112221132211422115221162211722118221192212022121221222212322124221252212622127221282212922130221312213222133221342213522136221372213822139221402214122142221432214422145221462214722148221492215022151221522215322154221552215622157221582215922160221612216222163221642216522166221672216822169221702217122172221732217422175221762217722178221792218022181221822218322184221852218622187221882218922190221912219222193221942219522196221972219822199222002220122202222032220422205222062220722208222092221022211222122221322214222152221622217222182221922220222212222222223222242222522226222272222822229222302223122232222332223422235222362223722238222392224022241222422224322244222452224622247222482224922250222512225222253222542225522256222572225822259222602226122262222632226422265222662226722268222692227022271222722227322274222752227622277222782227922280222812228222283222842228522286222872228822289222902229122292222932229422295222962229722298222992230022301223022230322304223052230622307223082230922310223112231222313223142231522316223172231822319223202232122322223232232422325223262232722328223292233022331223322233322334223352233622337223382233922340223412234222343223442234522346223472234822349223502235122352223532235422355223562235722358223592236022361223622236322364223652236622367223682236922370223712237222373223742237522376223772237822379223802238122382223832238422385223862238722388223892239022391223922239322394223952239622397223982239922400224012240222403224042240522406224072240822409224102241122412224132241422415224162241722418224192242022421224222242322424224252242622427224282242922430224312243222433224342243522436224372243822439224402244122442224432244422445224462244722448224492245022451224522245322454224552245622457224582245922460224612246222463224642246522466224672246822469224702247122472224732247422475224762247722478224792248022481224822248322484224852248622487224882248922490224912249222493224942249522496224972249822499225002250122502225032250422505225062250722508225092251022511225122251322514225152251622517225182251922520225212252222523225242252522526225272252822529225302253122532225332253422535225362253722538225392254022541225422254322544225452254622547225482254922550225512255222553225542255522556225572255822559225602256122562225632256422565225662256722568225692257022571225722257322574225752257622577225782257922580225812258222583225842258522586225872258822589225902259122592225932259422595225962259722598225992260022601226022260322604226052260622607226082260922610226112261222613226142261522616226172261822619226202262122622226232262422625226262262722628226292263022631226322263322634226352263622637226382263922640
  1. /*
  2. * Broadcom Dongle Host Driver (DHD), Linux-specific network interface
  3. * Basically selected code segments from usb-cdc.c and usb-rndis.c
  4. *
  5. * Portions of this code are copyright (c) 2020 Cypress Semiconductor Corporation
  6. *
  7. * Copyright (C) 1999-2020, Broadcom Corporation
  8. *
  9. * Unless you and Broadcom execute a separate written software license
  10. * agreement governing use of this software, this software is licensed to you
  11. * under the terms of the GNU General Public License version 2 (the "GPL"),
  12. * available at http://www.broadcom.com/licenses/GPLv2.php, with the
  13. * following added to such license:
  14. *
  15. * As a special exception, the copyright holders of this software give you
  16. * permission to link this software with independent modules, and to copy and
  17. * distribute the resulting executable under terms of your choice, provided that
  18. * you also meet, for each linked independent module, the terms and conditions of
  19. * the license of that module. An independent module is a module which is not
  20. * derived from this software. The special exception does not apply to any
  21. * modifications of the software.
  22. *
  23. * Notwithstanding the above, under no circumstances may you combine this
  24. * software in any way with any other Broadcom software provided under a license
  25. * other than the GPL, without Broadcom's express prior written consent.
  26. *
  27. *
  28. * <<Broadcom-WL-IPTag/Open:>>
  29. *
  30. * $Id: dhd_linux.c 702611 2017-06-02 06:40:15Z $
  31. */
  32. #include <typedefs.h>
  33. #include <linuxver.h>
  34. #include <osl.h>
  35. #include <bcmstdlib_s.h>
  36. #ifdef SHOW_LOGTRACE
  37. #include <linux/syscalls.h>
  38. #include <event_log.h>
  39. #endif /* SHOW_LOGTRACE */
  40. #ifdef PCIE_FULL_DONGLE
  41. #include <bcmmsgbuf.h>
  42. #endif /* PCIE_FULL_DONGLE */
  43. #include <linux/init.h>
  44. #include <linux/kernel.h>
  45. #include <linux/slab.h>
  46. #include <linux/skbuff.h>
  47. #include <linux/netdevice.h>
  48. #include <linux/inetdevice.h>
  49. #include <linux/rtnetlink.h>
  50. #include <linux/etherdevice.h>
  51. #include <linux/random.h>
  52. #include <linux/spinlock.h>
  53. #include <linux/ethtool.h>
  54. #include <linux/fcntl.h>
  55. #include <linux/fs.h>
  56. #include <linux/ip.h>
  57. #include <linux/reboot.h>
  58. #include <linux/notifier.h>
  59. #include <linux/irq.h>
  60. #include <net/addrconf.h>
  61. #ifdef ENABLE_ADAPTIVE_SCHED
  62. #include <linux/cpufreq.h>
  63. #endif /* ENABLE_ADAPTIVE_SCHED */
  64. #include <linux/rtc.h>
  65. #include <linux/namei.h>
  66. #include <asm/uaccess.h>
  67. #include <asm/unaligned.h>
  68. #include <dhd_linux_priv.h>
  69. #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0))
  70. #include <uapi/linux/sched/types.h>
  71. #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
  72. #include <epivers.h>
  73. #include <bcmutils.h>
  74. #include <bcmendian.h>
  75. #include <bcmdevs.h>
  76. #include <bcmiov.h>
  77. #include <ethernet.h>
  78. #include <bcmevent.h>
  79. #include <vlan.h>
  80. #include <802.3.h>
  81. #include <dhd_linux_wq.h>
  82. #include <dhd.h>
  83. #include <dhd_linux.h>
  84. #include <dhd_linux_pktdump.h>
  85. #ifdef DHD_WET
  86. #include <dhd_wet.h>
  87. #endif /* DHD_WET */
  88. #ifdef PCIE_FULL_DONGLE
  89. #include <dhd_flowring.h>
  90. #endif // endif
  91. #include <dhd_bus.h>
  92. #include <dhd_proto.h>
  93. #include <dhd_dbg.h>
  94. #include <dhd_dbg_ring.h>
  95. #include <dhd_debug.h>
  96. #if defined(CONFIG_PM_WAKELOCKS) || defined(CONFIG_HAS_WAKELOCK)
  97. #include <linux/wakelock.h>
  98. #endif /* CONFIG_PM_WAKELOCKS || CONFIG_HAS_WAKELOCK */
  99. #if defined(WL_CFG80211)
  100. #include <wl_cfg80211.h>
  101. #ifdef WL_BAM
  102. #include <wl_bam.h>
  103. #endif /* WL_BAM */
  104. #endif /* WL_CFG80211 */
  105. #ifdef PNO_SUPPORT
  106. #include <dhd_pno.h>
  107. #endif // endif
  108. #ifdef RTT_SUPPORT
  109. #include <dhd_rtt.h>
  110. #endif // endif
  111. #if defined(CONFIG_SOC_EXYNOS8895) || defined(CONFIG_SOC_EXYNOS9810) || \
  112. defined(CONFIG_SOC_EXYNOS9820)
  113. #include <linux/exynos-pci-ctrl.h>
  114. #endif /* CONFIG_SOC_EXYNOS8895 || CONFIG_SOC_EXYNOS9810 || CONFIG_SOC_EXYNOS9820 */
  115. #ifdef DHD_L2_FILTER
  116. #include <bcmicmp.h>
  117. #include <bcm_l2_filter.h>
  118. #include <dhd_l2_filter.h>
  119. #endif /* DHD_L2_FILTER */
  120. #ifdef DHD_PSTA
  121. #include <dhd_psta.h>
  122. #endif /* DHD_PSTA */
  123. #ifdef AMPDU_VO_ENABLE
  124. #include <802.1d.h>
  125. #endif /* AMPDU_VO_ENABLE */
  126. #if defined(DHDTCPACK_SUPPRESS) || defined(DHDTCPSYNC_FLOOD_BLK)
  127. #include <dhd_ip.h>
  128. #endif /* DHDTCPACK_SUPPRESS || DHDTCPSYNC_FLOOD_BLK */
  129. #include <dhd_daemon.h>
  130. #ifdef DHD_PKT_LOGGING
  131. #include <dhd_pktlog.h>
  132. #endif /* DHD_PKT_LOGGING */
  133. #ifdef DHD_4WAYM4_FAIL_DISCONNECT
  134. #include <eapol.h>
  135. #endif /* DHD_4WAYM4_FAIL_DISCONNECT */
  136. #ifdef DHD_BANDSTEER
  137. #include <dhd_bandsteer.h>
  138. #endif /* DHD_BANDSTEER */
  139. #ifdef DHD_DEBUG_PAGEALLOC
  140. typedef void (*page_corrupt_cb_t)(void *handle, void *addr_corrupt, size_t len);
  141. void dhd_page_corrupt_cb(void *handle, void *addr_corrupt, size_t len);
  142. extern void register_page_corrupt_cb(page_corrupt_cb_t cb, void* handle);
  143. #endif /* DHD_DEBUG_PAGEALLOC */
  144. #define IP_PROT_RESERVED 0xFF
  145. #ifdef DHD_4WAYM4_FAIL_DISCONNECT
  146. static void dhd_m4_state_handler(struct work_struct * work);
  147. #endif /* DHD_4WAYM4_FAIL_DISCONNECT */
  148. #ifdef DHDTCPSYNC_FLOOD_BLK
  149. static void dhd_blk_tsfl_handler(struct work_struct * work);
  150. #endif /* DHDTCPSYNC_FLOOD_BLK */
  151. #ifdef WL_NATOE
  152. #include <dhd_linux_nfct.h>
  153. #endif /* WL_NATOE */
  154. #if defined(OEM_ANDROID) && defined(SOFTAP)
  155. extern bool ap_cfg_running;
  156. extern bool ap_fw_loaded;
  157. #endif // endif
  158. #ifdef FIX_CPU_MIN_CLOCK
  159. #include <linux/pm_qos.h>
  160. #endif /* FIX_CPU_MIN_CLOCK */
  161. #ifdef SET_RANDOM_MAC_SOFTAP
  162. #ifndef CONFIG_DHD_SET_RANDOM_MAC_VAL
  163. #define CONFIG_DHD_SET_RANDOM_MAC_VAL 0x001A11
  164. #endif // endif
  165. static u32 vendor_oui = CONFIG_DHD_SET_RANDOM_MAC_VAL;
  166. #endif /* SET_RANDOM_MAC_SOFTAP */
  167. #ifdef ENABLE_ADAPTIVE_SCHED
  168. #define DEFAULT_CPUFREQ_THRESH 1000000 /* threshold frequency : 1000000 = 1GHz */
  169. #ifndef CUSTOM_CPUFREQ_THRESH
  170. #define CUSTOM_CPUFREQ_THRESH DEFAULT_CPUFREQ_THRESH
  171. #endif /* CUSTOM_CPUFREQ_THRESH */
  172. #endif /* ENABLE_ADAPTIVE_SCHED */
  173. /* enable HOSTIP cache update from the host side when an eth0:N is up */
  174. #define AOE_IP_ALIAS_SUPPORT 1
  175. #ifdef PROP_TXSTATUS
  176. #include <wlfc_proto.h>
  177. #include <dhd_wlfc.h>
  178. #endif // endif
  179. #if defined(OEM_ANDROID)
  180. #include <wl_android.h>
  181. #endif // endif
  182. /* Maximum STA per radio */
  183. #define DHD_MAX_STA 32
  184. #ifdef DHD_EVENT_LOG_FILTER
  185. #include <dhd_event_log_filter.h>
  186. #endif /* DHD_EVENT_LOG_FILTER */
  187. const uint8 wme_fifo2ac[] = { 0, 1, 2, 3, 1, 1 };
  188. const uint8 prio2fifo[8] = { 1, 0, 0, 1, 2, 2, 3, 3 };
  189. #define WME_PRIO2AC(prio) wme_fifo2ac[prio2fifo[(prio)]]
  190. #ifdef ARP_OFFLOAD_SUPPORT
  191. void aoe_update_host_ipv4_table(dhd_pub_t *dhd_pub, u32 ipa, bool add, int idx);
  192. static int dhd_inetaddr_notifier_call(struct notifier_block *this,
  193. unsigned long event, void *ptr);
  194. static struct notifier_block dhd_inetaddr_notifier = {
  195. .notifier_call = dhd_inetaddr_notifier_call
  196. };
  197. /* to make sure we won't register the same notifier twice, otherwise a loop is likely to be
  198. * created in kernel notifier link list (with 'next' pointing to itself)
  199. */
  200. static bool dhd_inetaddr_notifier_registered = FALSE;
  201. #endif /* ARP_OFFLOAD_SUPPORT */
  202. #if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT)
  203. int dhd_inet6addr_notifier_call(struct notifier_block *this,
  204. unsigned long event, void *ptr);
  205. static struct notifier_block dhd_inet6addr_notifier = {
  206. .notifier_call = dhd_inet6addr_notifier_call
  207. };
  208. /* to make sure we won't register the same notifier twice, otherwise a loop is likely to be
  209. * created in kernel notifier link list (with 'next' pointing to itself)
  210. */
  211. static bool dhd_inet6addr_notifier_registered = FALSE;
  212. #endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */
  213. #if defined(CONFIG_PM_SLEEP)
  214. #include <linux/suspend.h>
  215. volatile bool dhd_mmc_suspend = FALSE;
  216. DECLARE_WAIT_QUEUE_HEAD(dhd_dpc_wait);
  217. #endif /* defined(CONFIG_PM_SLEEP) */
  218. #if defined(OOB_INTR_ONLY) || defined(BCMSPI_ANDROID)
  219. extern void dhd_enable_oob_intr(struct dhd_bus *bus, bool enable);
  220. #endif /* defined(OOB_INTR_ONLY) || defined(BCMSPI_ANDROID) */
  221. #if defined(OEM_ANDROID)
  222. static void dhd_hang_process(struct work_struct *work_data);
  223. #endif /* #OEM_ANDROID */
  224. MODULE_LICENSE("GPL and additional rights");
  225. #ifdef CONFIG_BCM_DETECT_CONSECUTIVE_HANG
  226. #define MAX_CONSECUTIVE_HANG_COUNTS 5
  227. #endif /* CONFIG_BCM_DETECT_CONSECUTIVE_HANG */
  228. #include <dhd_bus.h>
  229. #ifdef DHD_ULP
  230. #include <dhd_ulp.h>
  231. #endif /* DHD_ULP */
  232. #ifndef PROP_TXSTATUS
  233. #define DBUS_RX_BUFFER_SIZE_DHD(net) (net->mtu + net->hard_header_len + dhd->pub.hdrlen)
  234. #else
  235. #define DBUS_RX_BUFFER_SIZE_DHD(net) (net->mtu + net->hard_header_len + dhd->pub.hdrlen + 128)
  236. #endif // endif
  237. #ifdef PROP_TXSTATUS
  238. extern bool dhd_wlfc_skip_fc(void * dhdp, uint8 idx);
  239. extern void dhd_wlfc_plat_init(void *dhd);
  240. extern void dhd_wlfc_plat_deinit(void *dhd);
  241. #endif /* PROP_TXSTATUS */
  242. #ifdef USE_DYNAMIC_F2_BLKSIZE
  243. //extern uint sd_f2_blocksize;
  244. uint sd_f2_blocksize;//add 20210511
  245. extern int dhdsdio_func_blocksize(dhd_pub_t *dhd, int function_num, int block_size);
  246. #endif /* USE_DYNAMIC_F2_BLKSIZE */
  247. /* Linux wireless extension support */
  248. #if defined(WL_WIRELESS_EXT)
  249. #include <wl_iw.h>
  250. extern wl_iw_extra_params_t g_wl_iw_params;
  251. #endif /* defined(WL_WIRELESS_EXT) */
  252. #ifdef CONFIG_PARTIALSUSPEND_SLP
  253. #include <linux/partialsuspend_slp.h>
  254. #define CONFIG_HAS_EARLYSUSPEND
  255. #define DHD_USE_EARLYSUSPEND
  256. #define register_early_suspend register_pre_suspend
  257. #define unregister_early_suspend unregister_pre_suspend
  258. #define early_suspend pre_suspend
  259. #define EARLY_SUSPEND_LEVEL_BLANK_SCREEN 50
  260. #else
  261. #if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
  262. #include <linux/earlysuspend.h>
  263. #endif /* defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND) */
  264. #endif /* CONFIG_PARTIALSUSPEND_SLP */
  265. #if defined(OEM_ANDROID) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0))
  266. #include <linux/nl80211.h>
  267. #endif /* OEM_ANDROID && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0)) */
  268. #if defined(PKT_FILTER_SUPPORT) && defined(APF)
  269. static int __dhd_apf_add_filter(struct net_device *ndev, uint32 filter_id,
  270. u8* program, uint32 program_len);
  271. static int __dhd_apf_config_filter(struct net_device *ndev, uint32 filter_id,
  272. uint32 mode, uint32 enable);
  273. static int __dhd_apf_delete_filter(struct net_device *ndev, uint32 filter_id);
  274. #endif /* PKT_FILTER_SUPPORT && APF */
  275. #if defined(WL_CFG80211) && defined(DHD_FILE_DUMP_EVENT) && defined(DHD_FW_COREDUMP)
  276. static int dhd_wait_for_file_dump(dhd_pub_t *dhdp);
  277. #endif /* WL_CFG80211 && DHD_FILE_DUMP_EVENT && DHD_FW_COREDUMP */
  278. #if defined(ARGOS_NOTIFY_CB)
  279. /* ARGOS notifer data */
  280. static struct notifier_block argos_wifi; /* STA */
  281. static struct notifier_block argos_p2p; /* P2P */
  282. argos_rps_ctrl argos_rps_ctrl_data;
  283. #endif // endif
  284. #ifdef DHD_FW_COREDUMP
  285. static int dhd_mem_dump(void *dhd_info, void *event_info, u8 event);
  286. #endif /* DHD_FW_COREDUMP */
  287. #ifdef DHD_LOG_DUMP
  288. struct dhd_log_dump_buf g_dld_buf[DLD_BUFFER_NUM];
  289. /* Only header for log dump buffers is stored in array
  290. * header for sections like 'dhd dump', 'ext trap'
  291. * etc, is not in the array, because they are not log
  292. * ring buffers
  293. */
  294. dld_hdr_t dld_hdrs[DLD_BUFFER_NUM] = {
  295. {GENERAL_LOG_HDR, LOG_DUMP_SECTION_GENERAL},
  296. {PRESERVE_LOG_HDR, LOG_DUMP_SECTION_PRESERVE},
  297. {SPECIAL_LOG_HDR, LOG_DUMP_SECTION_SPECIAL}
  298. };
  299. static int dld_buf_size[DLD_BUFFER_NUM] = {
  300. LOG_DUMP_GENERAL_MAX_BUFSIZE, /* DLD_BUF_TYPE_GENERAL */
  301. LOG_DUMP_PRESERVE_MAX_BUFSIZE, /* DLD_BUF_TYPE_PRESERVE */
  302. LOG_DUMP_SPECIAL_MAX_BUFSIZE, /* DLD_BUF_TYPE_SPECIAL */
  303. };
  304. static void dhd_log_dump_init(dhd_pub_t *dhd);
  305. static void dhd_log_dump_deinit(dhd_pub_t *dhd);
  306. static void dhd_log_dump(void *handle, void *event_info, u8 event);
  307. static int do_dhd_log_dump(dhd_pub_t *dhdp, log_dump_type_t *type);
  308. static int dhd_log_flush(dhd_pub_t *dhdp, log_dump_type_t *type);
  309. static void dhd_get_time_str(dhd_pub_t *dhdp, char *time_str, int size);
  310. void dhd_get_debug_dump_len(void *handle, struct sk_buff *skb, void *event_info, u8 event);
  311. void cfgvendor_log_dump_len(dhd_pub_t *dhdp, log_dump_type_t *type, struct sk_buff *skb);
  312. static void dhd_print_buf_addr(dhd_pub_t *dhdp, char *name, void *buf, unsigned int size);
  313. static void dhd_log_dump_buf_addr(dhd_pub_t *dhdp, log_dump_type_t *type);
  314. #endif /* DHD_LOG_DUMP */
  315. #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
  316. #include <linux/workqueue.h>
  317. #include <linux/pm_runtime.h>
  318. #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
  319. #ifdef DHD_DEBUG_UART
  320. #include <linux/kmod.h>
  321. #define DHD_DEBUG_UART_EXEC_PATH "/system/bin/wldu"
  322. static void dhd_debug_uart_exec_rd(void *handle, void *event_info, u8 event);
  323. static void dhd_debug_uart_exec(dhd_pub_t *dhdp, char *cmd);
  324. #endif /* DHD_DEBUG_UART */
  325. static int dhd_reboot_callback(struct notifier_block *this, unsigned long code, void *unused);
  326. static struct notifier_block dhd_reboot_notifier = {
  327. .notifier_call = dhd_reboot_callback,
  328. .priority = 1,
  329. };
  330. #ifdef OEM_ANDROID
  331. #ifdef BCMPCIE
  332. static int is_reboot = 0;
  333. #endif /* BCMPCIE */
  334. #endif /* OEM_ANDROID */
  335. dhd_pub_t *g_dhd_pub = NULL;
  336. #if defined(BT_OVER_SDIO)
  337. #include "dhd_bt_interface.h"
  338. #endif /* defined (BT_OVER_SDIO) */
  339. #ifdef WL_STATIC_IF
  340. bool dhd_is_static_ndev(dhd_pub_t *dhdp, struct net_device *ndev);
  341. #endif /* WL_STATIC_IF */
  342. atomic_t exit_in_progress = ATOMIC_INIT(0);
  343. static void dhd_process_daemon_msg(struct sk_buff *skb);
  344. static void dhd_destroy_to_notifier_skt(void);
  345. static int dhd_create_to_notifier_skt(void);
  346. static struct sock *nl_to_event_sk = NULL;
  347. int sender_pid = 0;
  348. #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0))
  349. struct netlink_kernel_cfg dhd_netlink_cfg = {
  350. .groups = 1,
  351. .input = dhd_process_daemon_msg,
  352. };
  353. #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0)) */
  354. #if defined(BT_OVER_SDIO)
  355. /* Flag to indicate if driver is initialized */
  356. uint dhd_driver_init_done = TRUE;
  357. #else
  358. /* Flag to indicate if driver is initialized */
  359. uint dhd_driver_init_done = FALSE;
  360. #endif // endif
  361. /* Flag to indicate if we should download firmware on driver load */
  362. uint dhd_download_fw_on_driverload = TRUE;
  363. /* Definitions to provide path to the firmware and nvram
  364. * example nvram_path[MOD_PARAM_PATHLEN]="/projects/wlan/nvram.txt"
  365. */
  366. char firmware_path[MOD_PARAM_PATHLEN];
  367. char nvram_path[MOD_PARAM_PATHLEN];
  368. char clm_path[MOD_PARAM_PATHLEN];
  369. #ifdef DHD_UCODE_DOWNLOAD
  370. char ucode_path[MOD_PARAM_PATHLEN];
  371. #endif /* DHD_UCODE_DOWNLOAD */
  372. module_param_string(clm_path, clm_path, MOD_PARAM_PATHLEN, 0660);
  373. /* backup buffer for firmware and nvram path */
  374. char fw_bak_path[MOD_PARAM_PATHLEN];
  375. char nv_bak_path[MOD_PARAM_PATHLEN];
  376. /* information string to keep firmware, chio, cheip version info visiable from log */
  377. char info_string[MOD_PARAM_INFOLEN];
  378. module_param_string(info_string, info_string, MOD_PARAM_INFOLEN, 0444);
  379. int op_mode = 0;
  380. int disable_proptx = 0;
  381. module_param(op_mode, int, 0644);
  382. #if defined(OEM_ANDROID)
  383. extern int wl_control_wl_start(struct net_device *dev);
  384. #if defined(BCMLXSDMMC)
  385. struct semaphore dhd_registration_sem;
  386. #endif /* BCMXSDMMC */
  387. #endif /* defined(OEM_ANDROID) */
  388. #ifdef DHD_LOG_DUMP
  389. int logdump_max_filesize = LOG_DUMP_MAX_FILESIZE;
  390. module_param(logdump_max_filesize, int, 0644);
  391. int logdump_max_bufsize = LOG_DUMP_GENERAL_MAX_BUFSIZE;
  392. module_param(logdump_max_bufsize, int, 0644);
  393. int logdump_prsrv_tailsize = DHD_LOG_DUMP_MAX_TAIL_FLUSH_SIZE;
  394. int logdump_periodic_flush = FALSE;
  395. module_param(logdump_periodic_flush, int, 0644);
  396. #ifdef EWP_ECNTRS_LOGGING
  397. int logdump_ecntr_enable = TRUE;
  398. #else
  399. int logdump_ecntr_enable = FALSE;
  400. #endif /* EWP_ECNTRS_LOGGING */
  401. module_param(logdump_ecntr_enable, int, 0644);
  402. #ifdef EWP_RTT_LOGGING
  403. int logdump_rtt_enable = TRUE;
  404. #else
  405. int logdump_rtt_enable = FALSE;
  406. #endif /* EWP_RTT_LOGGING */
  407. module_param(logdump_rtt_enable, int, 0644);
  408. #endif /* DHD_LOG_DUMP */
  409. #ifdef EWP_EDL
  410. int host_edl_support = TRUE;
  411. module_param(host_edl_support, int, 0644);
  412. #endif // endif
  413. /* deferred handlers */
  414. static void dhd_ifadd_event_handler(void *handle, void *event_info, u8 event);
  415. static void dhd_ifdel_event_handler(void *handle, void *event_info, u8 event);
  416. static void dhd_set_mac_addr_handler(void *handle, void *event_info, u8 event);
  417. static void dhd_set_mcast_list_handler(void *handle, void *event_info, u8 event);
  418. #ifdef WL_NATOE
  419. static void dhd_natoe_ct_event_hanlder(void *handle, void *event_info, u8 event);
  420. static void dhd_natoe_ct_ioctl_handler(void *handle, void *event_info, uint8 event);
  421. #endif /* WL_NATOE */
  422. #if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT)
  423. static void dhd_inet6_work_handler(void *dhd_info, void *event_data, u8 event);
  424. #endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */
  425. #ifdef WL_CFG80211
  426. extern void dhd_netdev_free(struct net_device *ndev);
  427. #endif /* WL_CFG80211 */
  428. static dhd_if_t * dhd_get_ifp_by_ndev(dhd_pub_t *dhdp, struct net_device *ndev);
  429. #if (defined(DHD_WET) || defined(DHD_MCAST_REGEN) || defined(DHD_L2_FILTER))
  430. /* update rx_pkt_chainable state of dhd interface */
  431. static void dhd_update_rx_pkt_chainable_state(dhd_pub_t* dhdp, uint32 idx);
  432. #endif /* DHD_WET || DHD_MCAST_REGEN || DHD_L2_FILTER */
  433. /* Error bits */
  434. module_param(dhd_msg_level, int, 0);
  435. #ifdef ARP_OFFLOAD_SUPPORT
  436. /* ARP offload enable */
  437. uint dhd_arp_enable = TRUE;
  438. module_param(dhd_arp_enable, uint, 0);
  439. /* ARP offload agent mode : Enable ARP Host Auto-Reply and ARP Peer Auto-Reply */
  440. #ifdef ENABLE_ARP_SNOOP_MODE
  441. uint dhd_arp_mode = (ARP_OL_AGENT | ARP_OL_PEER_AUTO_REPLY | ARP_OL_SNOOP | ARP_OL_HOST_AUTO_REPLY |
  442. ARP_OL_UPDATE_HOST_CACHE);
  443. #else
  444. uint dhd_arp_mode = ARP_OL_AGENT | ARP_OL_PEER_AUTO_REPLY | ARP_OL_UPDATE_HOST_CACHE;
  445. #endif /* ENABLE_ARP_SNOOP_MODE */
  446. module_param(dhd_arp_mode, uint, 0);
  447. #endif /* ARP_OFFLOAD_SUPPORT */
  448. /* Disable Prop tx */
  449. module_param(disable_proptx, int, 0644);
  450. /* load firmware and/or nvram values from the filesystem */
  451. module_param_string(firmware_path, firmware_path, MOD_PARAM_PATHLEN, 0660);
  452. module_param_string(nvram_path, nvram_path, MOD_PARAM_PATHLEN, 0660);
  453. #ifdef DHD_UCODE_DOWNLOAD
  454. module_param_string(ucode_path, ucode_path, MOD_PARAM_PATHLEN, 0660);
  455. #endif /* DHD_UCODE_DOWNLOAD */
  456. /* wl event forwarding */
  457. #ifdef WL_EVENT_ENAB
  458. uint wl_event_enable = true;
  459. #else
  460. uint wl_event_enable = false;
  461. #endif /* WL_EVENT_ENAB */
  462. module_param(wl_event_enable, uint, 0660);
  463. /* wl event forwarding */
  464. #ifdef LOGTRACE_PKT_SENDUP
  465. uint logtrace_pkt_sendup = true;
  466. #else
  467. uint logtrace_pkt_sendup = false;
  468. #endif /* LOGTRACE_PKT_SENDUP */
  469. module_param(logtrace_pkt_sendup, uint, 0660);
  470. /* Watchdog interval */
  471. /* extend watchdog expiration to 2 seconds when DPC is running */
  472. #define WATCHDOG_EXTEND_INTERVAL (2000)
  473. uint dhd_watchdog_ms = CUSTOM_DHD_WATCHDOG_MS;
  474. module_param(dhd_watchdog_ms, uint, 0);
  475. #ifdef DHD_PCIE_RUNTIMEPM
  476. uint dhd_runtimepm_ms = CUSTOM_DHD_RUNTIME_MS;
  477. #endif /* DHD_PCIE_RUNTIMEPMT */
  478. #if defined(DHD_DEBUG)
  479. /* Console poll interval */
  480. #if defined(OEM_ANDROID)
  481. uint dhd_console_ms = 0;
  482. #else
  483. uint dhd_console_ms = 250;
  484. #endif /* OEM_ANDROID */
  485. module_param(dhd_console_ms, uint, 0644);
  486. #else
  487. uint dhd_console_ms = 0;
  488. #endif /* DHD_DEBUG */
  489. uint dhd_slpauto = TRUE;
  490. module_param(dhd_slpauto, uint, 0);
  491. #ifdef PKT_FILTER_SUPPORT
  492. /* Global Pkt filter enable control */
  493. uint dhd_pkt_filter_enable = TRUE;
  494. module_param(dhd_pkt_filter_enable, uint, 0);
  495. #endif // endif
  496. /* Pkt filter init setup */
  497. uint dhd_pkt_filter_init = 0;
  498. module_param(dhd_pkt_filter_init, uint, 0);
  499. /* Pkt filter mode control */
  500. #ifdef GAN_LITE_NAT_KEEPALIVE_FILTER
  501. uint dhd_master_mode = FALSE;
  502. #else
  503. uint dhd_master_mode = TRUE;
  504. #endif /* GAN_LITE_NAT_KEEPALIVE_FILTER */
  505. module_param(dhd_master_mode, uint, 0);
  506. int dhd_watchdog_prio = 0;
  507. module_param(dhd_watchdog_prio, int, 0);
  508. /* DPC thread priority */
  509. int dhd_dpc_prio = CUSTOM_DPC_PRIO_SETTING;
  510. module_param(dhd_dpc_prio, int, 0);
  511. /* RX frame thread priority */
  512. int dhd_rxf_prio = CUSTOM_RXF_PRIO_SETTING;
  513. module_param(dhd_rxf_prio, int, 0);
  514. #if !defined(BCMDHDUSB)
  515. extern int dhd_dongle_ramsize;
  516. module_param(dhd_dongle_ramsize, int, 0);
  517. #endif /* BCMDHDUSB */
  518. #ifdef WL_CFG80211
  519. int passive_channel_skip = 0;
  520. module_param(passive_channel_skip, int, (S_IRUSR|S_IWUSR));
  521. #endif /* WL_CFG80211 */
  522. #ifdef DHD_MSI_SUPPORT
  523. uint enable_msi = TRUE;
  524. module_param(enable_msi, uint, 0);
  525. #endif /* PCIE_FULL_DONGLE */
  526. #ifdef DHD_SSSR_DUMP
  527. int dhdpcie_sssr_dump_get_before_after_len(dhd_pub_t *dhd, uint32 *arr_len);
  528. extern uint support_sssr_dump;
  529. module_param(support_sssr_dump, uint, 0);
  530. #endif /* DHD_SSSR_DUMP */
  531. /* Keep track of number of instances */
  532. static int dhd_found = 0;
  533. static int instance_base = 0; /* Starting instance number */
  534. module_param(instance_base, int, 0644);
  535. /* Takes value of LL of OTP param customvar2=0xKKLLMMNN.
  536. * LL is module variant
  537. */
  538. uint32 hw_module_variant = 0;
  539. module_param(hw_module_variant, uint, 0644);
  540. #if defined(DHD_LB_RXP)
  541. static int dhd_napi_weight = 32;
  542. module_param(dhd_napi_weight, int, 0644);
  543. #endif /* DHD_LB_RXP */
  544. #ifdef PCIE_FULL_DONGLE
  545. extern int h2d_max_txpost;
  546. module_param(h2d_max_txpost, int, 0644);
  547. extern uint dma_ring_indices;
  548. module_param(dma_ring_indices, uint, 0644);
  549. extern bool h2d_phase;
  550. module_param(h2d_phase, bool, 0644);
  551. extern bool force_trap_bad_h2d_phase;
  552. module_param(force_trap_bad_h2d_phase, bool, 0644);
  553. #endif /* PCIE_FULL_DONGLE */
  554. #ifdef FORCE_TPOWERON
  555. /*
  556. * On Fire's reference platform, coming out of L1.2,
  557. * there is a constant delay of 45us between CLKREQ# and stable REFCLK
  558. * Due to this delay, with tPowerOn < 50
  559. * there is a chance of the refclk sense to trigger on noise.
  560. *
  561. * 0x29 when written to L1SSControl2 translates to 50us.
  562. */
  563. #define FORCE_TPOWERON_50US 0x29
  564. uint32 tpoweron_scale = FORCE_TPOWERON_50US; /* default 50us */
  565. module_param(tpoweron_scale, uint, 0644);
  566. #endif /* FORCE_TPOWERON */
  567. #ifdef SHOW_LOGTRACE
  568. #if defined(CUSTOMER_HW4_DEBUG)
  569. static char *logstrs_path = PLATFORM_PATH"logstrs.bin";
  570. char *st_str_file_path = PLATFORM_PATH"rtecdc.bin";
  571. static char *map_file_path = PLATFORM_PATH"rtecdc.map";
  572. static char *rom_st_str_file_path = PLATFORM_PATH"roml.bin";
  573. static char *rom_map_file_path = PLATFORM_PATH"roml.map";
  574. #elif defined(CUSTOMER_HW2) || defined(BOARD_HIKEY)
  575. static char *logstrs_path = "/data/misc/wifi/logstrs.bin";
  576. char *st_str_file_path = "/data/misc/wifi/rtecdc.bin";
  577. static char *map_file_path = "/data/misc/wifi/rtecdc.map";
  578. static char *rom_st_str_file_path = "/data/misc/wifi/roml.bin";
  579. static char *rom_map_file_path = "/data/misc/wifi/roml.map";
  580. #elif defined(OEM_ANDROID) /* For Brix KK Live Image */
  581. static char *logstrs_path = "/installmedia/logstrs.bin";
  582. char *st_str_file_path = "/installmedia/rtecdc.bin";
  583. static char *map_file_path = "/installmedia/rtecdc.map";
  584. static char *rom_st_str_file_path = "/installmedia/roml.bin";
  585. static char *rom_map_file_path = "/installmedia/roml.map";
  586. #else /* For Linux platforms */
  587. static char *logstrs_path = "/root/logstrs.bin";
  588. char *st_str_file_path = "/root/rtecdc.bin";
  589. static char *map_file_path = "/root/rtecdc.map";
  590. static char *rom_st_str_file_path = "/root/roml.bin";
  591. static char *rom_map_file_path = "/root/roml.map";
  592. #endif /* CUSTOMER_HW4_DEBUG || CUSTOMER_HW2 || BOARD_HIKEY */
  593. static char *ram_file_str = "rtecdc";
  594. static char *rom_file_str = "roml";
  595. module_param(logstrs_path, charp, S_IRUGO);
  596. module_param(st_str_file_path, charp, S_IRUGO);
  597. module_param(map_file_path, charp, S_IRUGO);
  598. module_param(rom_st_str_file_path, charp, S_IRUGO);
  599. module_param(rom_map_file_path, charp, S_IRUGO);
  600. static int dhd_init_logstrs_array(osl_t *osh, dhd_event_log_t *temp);
  601. static int dhd_read_map(osl_t *osh, char *fname, uint32 *ramstart, uint32 *rodata_start,
  602. uint32 *rodata_end);
  603. static int dhd_init_static_strs_array(osl_t *osh, dhd_event_log_t *temp, char *str_file,
  604. char *map_file);
  605. #endif /* SHOW_LOGTRACE */
  606. #ifdef BCMSDIO
  607. #define DHD_IF_ROLE(pub, idx) ((pub)->info->iflist[idx]->role)
  608. #define DHD_IF_ROLE_AP(pub, idx) (DHD_IF_ROLE(pub, idx) == WLC_E_IF_ROLE_AP)
  609. #define DHD_IF_ROLE_STA(pub, idx) (DHD_IF_ROLE(pub, idx) == WLC_E_IF_ROLE_STA)
  610. #define DHD_IF_ROLE_P2PGO(pub, idx) (DHD_IF_ROLE(pub, idx) == WLC_E_IF_ROLE_P2P_GO)
  611. void dhd_set_role(dhd_pub_t *dhdp, int role, int bssidx)
  612. {
  613. int ifidx = dhd_bssidx2idx(dhdp, bssidx);
  614. DHD_TRACE(("dhd_set_role ifidx %d role %d\n", ifidx, role));
  615. dhdp->info->iflist[ifidx]->role = role;
  616. }
  617. #endif /* BCMSDIO */
  618. #ifdef USE_WFA_CERT_CONF
  619. int g_frameburst = 1;
  620. #endif /* USE_WFA_CERT_CONF */
  621. static int dhd_get_pend_8021x_cnt(dhd_info_t *dhd);
  622. /* DHD Perimiter lock only used in router with bypass forwarding. */
  623. #define DHD_PERIM_RADIO_INIT() do { /* noop */ } while (0)
  624. #define DHD_PERIM_LOCK_TRY(unit, flag) do { /* noop */ } while (0)
  625. #define DHD_PERIM_UNLOCK_TRY(unit, flag) do { /* noop */ } while (0)
  626. #define DHD_IF_STA_LIST_LOCK_INIT(ifp) spin_lock_init(&(ifp)->sta_list_lock)
  627. #define DHD_IF_STA_LIST_LOCK(ifp, flags) \
  628. spin_lock_irqsave(&(ifp)->sta_list_lock, (flags))
  629. #define DHD_IF_STA_LIST_UNLOCK(ifp, flags) \
  630. spin_unlock_irqrestore(&(ifp)->sta_list_lock, (flags))
  631. #if defined(DHD_IGMP_UCQUERY) || defined(DHD_UCAST_UPNP)
  632. static struct list_head * dhd_sta_list_snapshot(dhd_info_t *dhd, dhd_if_t *ifp,
  633. struct list_head *snapshot_list);
  634. static void dhd_sta_list_snapshot_free(dhd_info_t *dhd, struct list_head *snapshot_list);
  635. #define DHD_IF_WMF_UCFORWARD_LOCK(dhd, ifp, slist) ({ dhd_sta_list_snapshot(dhd, ifp, slist); })
  636. #define DHD_IF_WMF_UCFORWARD_UNLOCK(dhd, slist) ({ dhd_sta_list_snapshot_free(dhd, slist); })
  637. #endif /* DHD_IGMP_UCQUERY || DHD_UCAST_UPNP */
  638. /* Control fw roaming */
  639. #ifdef BCMCCX
  640. uint dhd_roam_disable = 0;
  641. #else
  642. #ifdef OEM_ANDROID
  643. uint dhd_roam_disable = 0;
  644. #else
  645. uint dhd_roam_disable = 1;
  646. #endif // endif
  647. #endif /* BCMCCX */
  648. #ifdef BCMDBGFS
  649. extern void dhd_dbgfs_init(dhd_pub_t *dhdp);
  650. extern void dhd_dbgfs_remove(void);
  651. #endif // endif
  652. static uint pcie_txs_metadata_enable = 0; /* Enable TX status metadta report */
  653. module_param(pcie_txs_metadata_enable, int, 0);
  654. /* Control radio state */
  655. uint dhd_radio_up = 1;
  656. /* Network inteface name */
  657. char iface_name[IFNAMSIZ] = {'\0'};
  658. module_param_string(iface_name, iface_name, IFNAMSIZ, 0);
  659. #ifdef WL_VIF_SUPPORT
  660. /* Virtual inteface name */
  661. char vif_name[IFNAMSIZ] = "wlan";
  662. module_param_string(vif_name, vif_name, IFNAMSIZ, 0);
  663. int vif_num = 0;
  664. module_param(vif_num, int, 0);
  665. #endif /* WL_VIF_SUPPORT */
  666. /* The following are specific to the SDIO dongle */
  667. /* IOCTL response timeout */
  668. int dhd_ioctl_timeout_msec = IOCTL_RESP_TIMEOUT;
  669. /* DS Exit response timeout */
  670. int ds_exit_timeout_msec = DS_EXIT_TIMEOUT;
  671. /* Idle timeout for backplane clock */
  672. int dhd_idletime = DHD_IDLETIME_TICKS;
  673. module_param(dhd_idletime, int, 0);
  674. /* Use polling */
  675. uint dhd_poll = FALSE;
  676. module_param(dhd_poll, uint, 0);
  677. /* Use interrupts */
  678. uint dhd_intr = TRUE;
  679. module_param(dhd_intr, uint, 0);
  680. /* SDIO Drive Strength (in milliamps) */
  681. uint dhd_sdiod_drive_strength = 6;
  682. module_param(dhd_sdiod_drive_strength, uint, 0);
  683. #ifdef BCMSDIO
  684. /* Tx/Rx bounds */
  685. extern uint dhd_txbound;
  686. extern uint dhd_rxbound;
  687. module_param(dhd_txbound, uint, 0);
  688. module_param(dhd_rxbound, uint, 0);
  689. /* Deferred transmits */
  690. extern uint dhd_deferred_tx;
  691. module_param(dhd_deferred_tx, uint, 0);
  692. #endif /* BCMSDIO */
  693. #ifdef SDTEST
  694. /* Echo packet generator (pkts/s) */
  695. uint dhd_pktgen = 0;
  696. module_param(dhd_pktgen, uint, 0);
  697. /* Echo packet len (0 => sawtooth, max 2040) */
  698. uint dhd_pktgen_len = 0;
  699. module_param(dhd_pktgen_len, uint, 0);
  700. #endif /* SDTEST */
  701. #if defined(BCMSUP_4WAY_HANDSHAKE)
  702. /* Use in dongle supplicant for 4-way handshake */
  703. #if defined(WLFBT) || defined(WL_ENABLE_IDSUP)
  704. /* Enable idsup by default (if supported in fw) */
  705. uint dhd_use_idsup = 1;
  706. #else
  707. uint dhd_use_idsup = 0;
  708. #endif /* WLFBT || WL_ENABLE_IDSUP */
  709. module_param(dhd_use_idsup, uint, 0);
  710. #endif /* BCMSUP_4WAY_HANDSHAKE */
  711. #if defined(OEM_ANDROID)
  712. /* Allow delayed firmware download for debug purpose */
  713. int allow_delay_fwdl = FALSE;
  714. #else
  715. int allow_delay_fwdl = TRUE;
  716. #endif // endif
  717. module_param(allow_delay_fwdl, int, 0);
  718. #ifdef ECOUNTER_PERIODIC_DISABLE
  719. uint enable_ecounter = FALSE;
  720. #else
  721. uint enable_ecounter = TRUE;
  722. #endif // endif
  723. module_param(enable_ecounter, uint, 0);
  724. /* TCM verification flag */
  725. uint dhd_tcm_test_enable = FALSE;
  726. module_param(dhd_tcm_test_enable, uint, 0644);
  727. extern char dhd_version[];
  728. extern char fw_version[];
  729. extern char clm_version[];
  730. int dhd_net_bus_devreset(struct net_device *dev, uint8 flag);
  731. static void dhd_net_if_lock_local(dhd_info_t *dhd);
  732. static void dhd_net_if_unlock_local(dhd_info_t *dhd);
  733. static void dhd_suspend_lock(dhd_pub_t *dhdp);
  734. static void dhd_suspend_unlock(dhd_pub_t *dhdp);
  735. /* Monitor interface */
  736. int dhd_monitor_init(void *dhd_pub);
  737. int dhd_monitor_uninit(void);
  738. #ifdef DHD_PM_CONTROL_FROM_FILE
  739. bool g_pm_control;
  740. #ifdef DHD_EXPORT_CNTL_FILE
  741. int pmmode_val;
  742. #endif /* DHD_EXPORT_CNTL_FILE */
  743. void sec_control_pm(dhd_pub_t *dhd, uint *);
  744. #endif /* DHD_PM_CONTROL_FROM_FILE */
  745. #if defined(WL_WIRELESS_EXT)
  746. struct iw_statistics *dhd_get_wireless_stats(struct net_device *dev);
  747. #endif /* defined(WL_WIRELESS_EXT) */
  748. static void dhd_dpc(ulong data);
  749. /* forward decl */
  750. extern int dhd_wait_pend8021x(struct net_device *dev);
  751. void dhd_os_wd_timer_extend(void *bus, bool extend);
  752. #ifdef TOE
  753. #ifndef BDC
  754. #error TOE requires BDC
  755. #endif /* !BDC */
  756. static int dhd_toe_get(dhd_info_t *dhd, int idx, uint32 *toe_ol);
  757. static int dhd_toe_set(dhd_info_t *dhd, int idx, uint32 toe_ol);
  758. #endif /* TOE */
  759. static int dhd_wl_host_event(dhd_info_t *dhd, int ifidx, void *pktdata, uint16 pktlen,
  760. wl_event_msg_t *event_ptr, void **data_ptr);
  761. #if defined(CONFIG_PM_SLEEP)
  762. static int dhd_pm_callback(struct notifier_block *nfb, unsigned long action, void *ignored)
  763. {
  764. int ret = NOTIFY_DONE;
  765. bool suspend = FALSE;
  766. #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
  767. #pragma GCC diagnostic push
  768. #pragma GCC diagnostic ignored "-Wcast-qual"
  769. #endif // endif
  770. dhd_info_t *dhdinfo = (dhd_info_t*)container_of(nfb, struct dhd_info, pm_notifier);
  771. #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
  772. #pragma GCC diagnostic pop
  773. #endif // endif
  774. BCM_REFERENCE(dhdinfo);
  775. BCM_REFERENCE(suspend);
  776. switch (action) {
  777. case PM_HIBERNATION_PREPARE:
  778. case PM_SUSPEND_PREPARE:
  779. suspend = TRUE;
  780. break;
  781. case PM_POST_HIBERNATION:
  782. case PM_POST_SUSPEND:
  783. suspend = FALSE;
  784. break;
  785. }
  786. #if defined(SUPPORT_P2P_GO_PS) && defined(PROP_TXSTATUS)
  787. if (suspend) {
  788. DHD_OS_WAKE_LOCK_WAIVE(&dhdinfo->pub);
  789. dhd_wlfc_suspend(&dhdinfo->pub);
  790. DHD_OS_WAKE_LOCK_RESTORE(&dhdinfo->pub);
  791. } else {
  792. dhd_wlfc_resume(&dhdinfo->pub);
  793. }
  794. #endif /* defined(SUPPORT_P2P_GO_PS) && defined(PROP_TXSTATUS) */
  795. dhd_mmc_suspend = suspend;
  796. smp_mb();
  797. return ret;
  798. }
  799. /* to make sure we won't register the same notifier twice, otherwise a loop is likely to be
  800. * created in kernel notifier link list (with 'next' pointing to itself)
  801. */
  802. static bool dhd_pm_notifier_registered = FALSE;
  803. extern int register_pm_notifier(struct notifier_block *nb);
  804. extern int unregister_pm_notifier(struct notifier_block *nb);
  805. #endif /* CONFIG_PM_SLEEP */
  806. /* Request scheduling of the bus rx frame */
  807. static void dhd_sched_rxf(dhd_pub_t *dhdp, void *skb);
  808. static void dhd_os_rxflock(dhd_pub_t *pub);
  809. static void dhd_os_rxfunlock(dhd_pub_t *pub);
  810. #if defined(DHD_H2D_LOG_TIME_SYNC)
  811. static void
  812. dhd_deferred_work_rte_log_time_sync(void *handle, void *event_info, u8 event);
  813. #endif /* DHD_H2D_LOG_TIME_SYNC */
  814. /** priv_link is the link between netdev and the dhdif and dhd_info structs. */
  815. typedef struct dhd_dev_priv {
  816. dhd_info_t * dhd; /* cached pointer to dhd_info in netdevice priv */
  817. dhd_if_t * ifp; /* cached pointer to dhd_if in netdevice priv */
  818. int ifidx; /* interface index */
  819. void * lkup;
  820. } dhd_dev_priv_t;
  821. #define DHD_DEV_PRIV_SIZE (sizeof(dhd_dev_priv_t))
  822. #define DHD_DEV_PRIV(dev) ((dhd_dev_priv_t *)DEV_PRIV(dev))
  823. #define DHD_DEV_INFO(dev) (((dhd_dev_priv_t *)DEV_PRIV(dev))->dhd)
  824. #define DHD_DEV_IFP(dev) (((dhd_dev_priv_t *)DEV_PRIV(dev))->ifp)
  825. #define DHD_DEV_IFIDX(dev) (((dhd_dev_priv_t *)DEV_PRIV(dev))->ifidx)
  826. #define DHD_DEV_LKUP(dev) (((dhd_dev_priv_t *)DEV_PRIV(dev))->lkup)
  827. #if defined(DHD_OF_SUPPORT)
  828. extern int dhd_wlan_init(void);
  829. #endif /* defined(DHD_OF_SUPPORT) */
  830. /** Clear the dhd net_device's private structure. */
  831. static inline void
  832. dhd_dev_priv_clear(struct net_device * dev)
  833. {
  834. dhd_dev_priv_t * dev_priv;
  835. ASSERT(dev != (struct net_device *)NULL);
  836. dev_priv = DHD_DEV_PRIV(dev);
  837. dev_priv->dhd = (dhd_info_t *)NULL;
  838. dev_priv->ifp = (dhd_if_t *)NULL;
  839. dev_priv->ifidx = DHD_BAD_IF;
  840. dev_priv->lkup = (void *)NULL;
  841. }
  842. /** Setup the dhd net_device's private structure. */
  843. static inline void
  844. dhd_dev_priv_save(struct net_device * dev, dhd_info_t * dhd, dhd_if_t * ifp,
  845. int ifidx)
  846. {
  847. dhd_dev_priv_t * dev_priv;
  848. ASSERT(dev != (struct net_device *)NULL);
  849. dev_priv = DHD_DEV_PRIV(dev);
  850. dev_priv->dhd = dhd;
  851. dev_priv->ifp = ifp;
  852. dev_priv->ifidx = ifidx;
  853. }
  854. /* Return interface pointer */
  855. struct dhd_if * dhd_get_ifp(dhd_pub_t *dhdp, uint32 ifidx)
  856. {
  857. ASSERT(ifidx < DHD_MAX_IFS);
  858. if (!dhdp || !dhdp->info || ifidx >= DHD_MAX_IFS)
  859. return NULL;
  860. return dhdp->info->iflist[ifidx];
  861. }
  862. /** Dummy objects are defined with state representing bad|down.
  863. * Performance gains from reducing branch conditionals, instruction parallelism,
  864. * dual issue, reducing load shadows, avail of larger pipelines.
  865. * Use DHD_XXX_NULL instead of (dhd_xxx_t *)NULL, whenever an object pointer
  866. * is accessed via the dhd_sta_t.
  867. */
  868. /* Dummy dhd_info object */
  869. dhd_info_t dhd_info_null = {
  870. .pub = {
  871. .info = &dhd_info_null,
  872. #ifdef DHDTCPACK_SUPPRESS
  873. .tcpack_sup_mode = TCPACK_SUP_REPLACE,
  874. #endif /* DHDTCPACK_SUPPRESS */
  875. .up = FALSE,
  876. .busstate = DHD_BUS_DOWN
  877. }
  878. };
  879. #define DHD_INFO_NULL (&dhd_info_null)
  880. #define DHD_PUB_NULL (&dhd_info_null.pub)
  881. /* Dummy netdevice object */
  882. struct net_device dhd_net_dev_null = {
  883. .reg_state = NETREG_UNREGISTERED
  884. };
  885. #define DHD_NET_DEV_NULL (&dhd_net_dev_null)
  886. /* Dummy dhd_if object */
  887. dhd_if_t dhd_if_null = {
  888. #ifdef WMF
  889. .wmf = { .wmf_enable = TRUE },
  890. #endif // endif
  891. .info = DHD_INFO_NULL,
  892. .net = DHD_NET_DEV_NULL,
  893. .idx = DHD_BAD_IF
  894. };
  895. #define DHD_IF_NULL (&dhd_if_null)
  896. #define DHD_STA_NULL ((dhd_sta_t *)NULL)
  897. /** Interface STA list management. */
  898. /** Alloc/Free a dhd_sta object from the dhd instances' sta_pool. */
  899. static void dhd_sta_free(dhd_pub_t *pub, dhd_sta_t *sta);
  900. static dhd_sta_t * dhd_sta_alloc(dhd_pub_t * dhdp);
  901. /* Delete a dhd_sta or flush all dhd_sta in an interface's sta_list. */
  902. static void dhd_if_del_sta_list(dhd_if_t * ifp);
  903. static void dhd_if_flush_sta(dhd_if_t * ifp);
  904. /* Construct/Destruct a sta pool. */
  905. static int dhd_sta_pool_init(dhd_pub_t *dhdp, int max_sta);
  906. static void dhd_sta_pool_fini(dhd_pub_t *dhdp, int max_sta);
  907. /* Clear the pool of dhd_sta_t objects for built-in type driver */
  908. static void dhd_sta_pool_clear(dhd_pub_t *dhdp, int max_sta);
  909. /** Reset a dhd_sta object and free into the dhd pool. */
  910. static void
  911. dhd_sta_free(dhd_pub_t * dhdp, dhd_sta_t * sta)
  912. {
  913. #ifdef PCIE_FULL_DONGLE
  914. int prio;
  915. #endif // endif
  916. ASSERT((sta != DHD_STA_NULL) && (sta->idx != ID16_INVALID));
  917. ASSERT((dhdp->staid_allocator != NULL) && (dhdp->sta_pool != NULL));
  918. #ifdef PCIE_FULL_DONGLE
  919. /*
  920. * Flush and free all packets in all flowring's queues belonging to sta.
  921. * Packets in flow ring will be flushed later.
  922. */
  923. for (prio = 0; prio < (int)NUMPRIO; prio++) {
  924. uint16 flowid = sta->flowid[prio];
  925. if (flowid != FLOWID_INVALID) {
  926. unsigned long flags;
  927. flow_ring_node_t * flow_ring_node;
  928. #ifdef DHDTCPACK_SUPPRESS
  929. /* Clean tcp_ack_info_tbl in order to prevent access to flushed pkt,
  930. * when there is a newly coming packet from network stack.
  931. */
  932. dhd_tcpack_info_tbl_clean(dhdp);
  933. #endif /* DHDTCPACK_SUPPRESS */
  934. flow_ring_node = dhd_flow_ring_node(dhdp, flowid);
  935. if (flow_ring_node) {
  936. flow_queue_t *queue = &flow_ring_node->queue;
  937. DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
  938. flow_ring_node->status = FLOW_RING_STATUS_STA_FREEING;
  939. if (!DHD_FLOW_QUEUE_EMPTY(queue)) {
  940. void * pkt;
  941. while ((pkt = dhd_flow_queue_dequeue(dhdp, queue)) !=
  942. NULL) {
  943. PKTFREE(dhdp->osh, pkt, TRUE);
  944. }
  945. }
  946. DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
  947. ASSERT(DHD_FLOW_QUEUE_EMPTY(queue));
  948. }
  949. }
  950. sta->flowid[prio] = FLOWID_INVALID;
  951. }
  952. #endif /* PCIE_FULL_DONGLE */
  953. id16_map_free(dhdp->staid_allocator, sta->idx);
  954. DHD_CUMM_CTR_INIT(&sta->cumm_ctr);
  955. sta->ifp = DHD_IF_NULL; /* dummy dhd_if object */
  956. sta->ifidx = DHD_BAD_IF;
  957. bzero(sta->ea.octet, ETHER_ADDR_LEN);
  958. INIT_LIST_HEAD(&sta->list);
  959. sta->idx = ID16_INVALID; /* implying free */
  960. }
  961. /** Allocate a dhd_sta object from the dhd pool. */
  962. static dhd_sta_t *
  963. dhd_sta_alloc(dhd_pub_t * dhdp)
  964. {
  965. uint16 idx;
  966. dhd_sta_t * sta;
  967. dhd_sta_pool_t * sta_pool;
  968. ASSERT((dhdp->staid_allocator != NULL) && (dhdp->sta_pool != NULL));
  969. idx = id16_map_alloc(dhdp->staid_allocator);
  970. if (idx == ID16_INVALID) {
  971. DHD_ERROR(("%s: cannot get free staid\n", __FUNCTION__));
  972. return DHD_STA_NULL;
  973. }
  974. sta_pool = (dhd_sta_pool_t *)(dhdp->sta_pool);
  975. sta = &sta_pool[idx];
  976. ASSERT((sta->idx == ID16_INVALID) &&
  977. (sta->ifp == DHD_IF_NULL) && (sta->ifidx == DHD_BAD_IF));
  978. DHD_CUMM_CTR_INIT(&sta->cumm_ctr);
  979. sta->idx = idx; /* implying allocated */
  980. return sta;
  981. }
  982. /** Delete all STAs in an interface's STA list. */
  983. static void
  984. dhd_if_del_sta_list(dhd_if_t *ifp)
  985. {
  986. dhd_sta_t *sta, *next;
  987. unsigned long flags;
  988. DHD_IF_STA_LIST_LOCK(ifp, flags);
  989. #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
  990. #pragma GCC diagnostic push
  991. #pragma GCC diagnostic ignored "-Wcast-qual"
  992. #endif // endif
  993. list_for_each_entry_safe(sta, next, &ifp->sta_list, list) {
  994. list_del(&sta->list);
  995. dhd_sta_free(&ifp->info->pub, sta);
  996. }
  997. #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
  998. #pragma GCC diagnostic pop
  999. #endif // endif
  1000. DHD_IF_STA_LIST_UNLOCK(ifp, flags);
  1001. return;
  1002. }
  1003. /** Router/GMAC3: Flush all station entries in the forwarder's WOFA database. */
  1004. static void
  1005. dhd_if_flush_sta(dhd_if_t * ifp)
  1006. {
  1007. }
  1008. /** Construct a pool of dhd_sta_t objects to be used by interfaces. */
  1009. static int
  1010. dhd_sta_pool_init(dhd_pub_t *dhdp, int max_sta)
  1011. {
  1012. int idx, sta_pool_memsz;
  1013. #ifdef PCIE_FULL_DONGLE
  1014. int prio;
  1015. #endif /* PCIE_FULL_DONGLE */
  1016. dhd_sta_t * sta;
  1017. dhd_sta_pool_t * sta_pool;
  1018. void * staid_allocator;
  1019. ASSERT(dhdp != (dhd_pub_t *)NULL);
  1020. ASSERT((dhdp->staid_allocator == NULL) && (dhdp->sta_pool == NULL));
  1021. /* dhd_sta objects per radio are managed in a table. id#0 reserved. */
  1022. staid_allocator = id16_map_init(dhdp->osh, max_sta, 1);
  1023. if (staid_allocator == NULL) {
  1024. DHD_ERROR(("%s: sta id allocator init failure\n", __FUNCTION__));
  1025. return BCME_ERROR;
  1026. }
  1027. /* Pre allocate a pool of dhd_sta objects (one extra). */
  1028. sta_pool_memsz = ((max_sta + 1) * sizeof(dhd_sta_t)); /* skip idx 0 */
  1029. sta_pool = (dhd_sta_pool_t *)MALLOC(dhdp->osh, sta_pool_memsz);
  1030. if (sta_pool == NULL) {
  1031. DHD_ERROR(("%s: sta table alloc failure\n", __FUNCTION__));
  1032. id16_map_fini(dhdp->osh, staid_allocator);
  1033. return BCME_ERROR;
  1034. }
  1035. dhdp->sta_pool = sta_pool;
  1036. dhdp->staid_allocator = staid_allocator;
  1037. /* Initialize all sta(s) for the pre-allocated free pool. */
  1038. bzero((uchar *)sta_pool, sta_pool_memsz);
  1039. for (idx = max_sta; idx >= 1; idx--) { /* skip sta_pool[0] */
  1040. sta = &sta_pool[idx];
  1041. sta->idx = id16_map_alloc(staid_allocator);
  1042. ASSERT(sta->idx <= max_sta);
  1043. }
  1044. /* Now place them into the pre-allocated free pool. */
  1045. for (idx = 1; idx <= max_sta; idx++) {
  1046. sta = &sta_pool[idx];
  1047. #ifdef PCIE_FULL_DONGLE
  1048. for (prio = 0; prio < (int)NUMPRIO; prio++) {
  1049. sta->flowid[prio] = FLOWID_INVALID; /* Flow rings do not exist */
  1050. }
  1051. #endif /* PCIE_FULL_DONGLE */
  1052. dhd_sta_free(dhdp, sta);
  1053. }
  1054. return BCME_OK;
  1055. }
  1056. /** Destruct the pool of dhd_sta_t objects.
  1057. * Caller must ensure that no STA objects are currently associated with an if.
  1058. */
  1059. static void
  1060. dhd_sta_pool_fini(dhd_pub_t *dhdp, int max_sta)
  1061. {
  1062. dhd_sta_pool_t * sta_pool = (dhd_sta_pool_t *)dhdp->sta_pool;
  1063. if (sta_pool) {
  1064. int idx;
  1065. int sta_pool_memsz = ((max_sta + 1) * sizeof(dhd_sta_t));
  1066. for (idx = 1; idx <= max_sta; idx++) {
  1067. ASSERT(sta_pool[idx].ifp == DHD_IF_NULL);
  1068. ASSERT(sta_pool[idx].idx == ID16_INVALID);
  1069. }
  1070. MFREE(dhdp->osh, dhdp->sta_pool, sta_pool_memsz);
  1071. dhdp->sta_pool = NULL;
  1072. }
  1073. id16_map_fini(dhdp->osh, dhdp->staid_allocator);
  1074. dhdp->staid_allocator = NULL;
  1075. }
  1076. /* Clear the pool of dhd_sta_t objects for built-in type driver */
  1077. static void
  1078. dhd_sta_pool_clear(dhd_pub_t *dhdp, int max_sta)
  1079. {
  1080. int idx, sta_pool_memsz;
  1081. #ifdef PCIE_FULL_DONGLE
  1082. int prio;
  1083. #endif /* PCIE_FULL_DONGLE */
  1084. dhd_sta_t * sta;
  1085. dhd_sta_pool_t * sta_pool;
  1086. void *staid_allocator;
  1087. if (!dhdp) {
  1088. DHD_ERROR(("%s: dhdp is NULL\n", __FUNCTION__));
  1089. return;
  1090. }
  1091. sta_pool = (dhd_sta_pool_t *)dhdp->sta_pool;
  1092. staid_allocator = dhdp->staid_allocator;
  1093. if (!sta_pool) {
  1094. DHD_ERROR(("%s: sta_pool is NULL\n", __FUNCTION__));
  1095. return;
  1096. }
  1097. if (!staid_allocator) {
  1098. DHD_ERROR(("%s: staid_allocator is NULL\n", __FUNCTION__));
  1099. return;
  1100. }
  1101. /* clear free pool */
  1102. sta_pool_memsz = ((max_sta + 1) * sizeof(dhd_sta_t));
  1103. bzero((uchar *)sta_pool, sta_pool_memsz);
  1104. /* dhd_sta objects per radio are managed in a table. id#0 reserved. */
  1105. id16_map_clear(staid_allocator, max_sta, 1);
  1106. /* Initialize all sta(s) for the pre-allocated free pool. */
  1107. for (idx = max_sta; idx >= 1; idx--) { /* skip sta_pool[0] */
  1108. sta = &sta_pool[idx];
  1109. sta->idx = id16_map_alloc(staid_allocator);
  1110. ASSERT(sta->idx <= max_sta);
  1111. }
  1112. /* Now place them into the pre-allocated free pool. */
  1113. for (idx = 1; idx <= max_sta; idx++) {
  1114. sta = &sta_pool[idx];
  1115. #ifdef PCIE_FULL_DONGLE
  1116. for (prio = 0; prio < (int)NUMPRIO; prio++) {
  1117. sta->flowid[prio] = FLOWID_INVALID; /* Flow rings do not exist */
  1118. }
  1119. #endif /* PCIE_FULL_DONGLE */
  1120. dhd_sta_free(dhdp, sta);
  1121. }
  1122. }
  1123. /** Find STA with MAC address ea in an interface's STA list. */
  1124. dhd_sta_t *
  1125. dhd_find_sta(void *pub, int ifidx, void *ea)
  1126. {
  1127. dhd_sta_t *sta;
  1128. dhd_if_t *ifp;
  1129. unsigned long flags;
  1130. ASSERT(ea != NULL);
  1131. ifp = dhd_get_ifp((dhd_pub_t *)pub, ifidx);
  1132. if (ifp == NULL)
  1133. return DHD_STA_NULL;
  1134. DHD_IF_STA_LIST_LOCK(ifp, flags);
  1135. #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
  1136. #pragma GCC diagnostic push
  1137. #pragma GCC diagnostic ignored "-Wcast-qual"
  1138. #endif // endif
  1139. list_for_each_entry(sta, &ifp->sta_list, list) {
  1140. if (!memcmp(sta->ea.octet, ea, ETHER_ADDR_LEN)) {
  1141. DHD_INFO(("%s: Found STA " MACDBG "\n",
  1142. __FUNCTION__, MAC2STRDBG((char *)ea)));
  1143. DHD_IF_STA_LIST_UNLOCK(ifp, flags);
  1144. return sta;
  1145. }
  1146. }
  1147. #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
  1148. #pragma GCC diagnostic pop
  1149. #endif // endif
  1150. DHD_IF_STA_LIST_UNLOCK(ifp, flags);
  1151. return DHD_STA_NULL;
  1152. }
  1153. /** Add STA into the interface's STA list. */
  1154. dhd_sta_t *
  1155. dhd_add_sta(void *pub, int ifidx, void *ea)
  1156. {
  1157. dhd_sta_t *sta;
  1158. dhd_if_t *ifp;
  1159. unsigned long flags;
  1160. ASSERT(ea != NULL);
  1161. ifp = dhd_get_ifp((dhd_pub_t *)pub, ifidx);
  1162. if (ifp == NULL)
  1163. return DHD_STA_NULL;
  1164. if (!memcmp(ifp->net->dev_addr, ea, ETHER_ADDR_LEN)) {
  1165. DHD_ERROR(("%s: Serious FAILURE, receive own MAC %pM !!\n", __FUNCTION__, ea));
  1166. return DHD_STA_NULL;
  1167. }
  1168. sta = dhd_sta_alloc((dhd_pub_t *)pub);
  1169. if (sta == DHD_STA_NULL) {
  1170. DHD_ERROR(("%s: Alloc failed\n", __FUNCTION__));
  1171. return DHD_STA_NULL;
  1172. }
  1173. memcpy(sta->ea.octet, ea, ETHER_ADDR_LEN);
  1174. /* link the sta and the dhd interface */
  1175. sta->ifp = ifp;
  1176. sta->ifidx = ifidx;
  1177. INIT_LIST_HEAD(&sta->list);
  1178. DHD_IF_STA_LIST_LOCK(ifp, flags);
  1179. list_add_tail(&sta->list, &ifp->sta_list);
  1180. DHD_ERROR(("%s: Adding STA " MACDBG "\n",
  1181. __FUNCTION__, MAC2STRDBG((char *)ea)));
  1182. DHD_IF_STA_LIST_UNLOCK(ifp, flags);
  1183. return sta;
  1184. }
  1185. /** Delete all STAs from the interface's STA list. */
  1186. void
  1187. dhd_del_all_sta(void *pub, int ifidx)
  1188. {
  1189. dhd_sta_t *sta, *next;
  1190. dhd_if_t *ifp;
  1191. unsigned long flags;
  1192. ifp = dhd_get_ifp((dhd_pub_t *)pub, ifidx);
  1193. if (ifp == NULL)
  1194. return;
  1195. DHD_IF_STA_LIST_LOCK(ifp, flags);
  1196. #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
  1197. #pragma GCC diagnostic push
  1198. #pragma GCC diagnostic ignored "-Wcast-qual"
  1199. #endif // endif
  1200. list_for_each_entry_safe(sta, next, &ifp->sta_list, list) {
  1201. list_del(&sta->list);
  1202. dhd_sta_free(&ifp->info->pub, sta);
  1203. #ifdef DHD_L2_FILTER
  1204. if (ifp->parp_enable) {
  1205. /* clear Proxy ARP cache of specific Ethernet Address */
  1206. bcm_l2_filter_arp_table_update(((dhd_pub_t*)pub)->osh,
  1207. ifp->phnd_arp_table, FALSE,
  1208. sta->ea.octet, FALSE, ((dhd_pub_t*)pub)->tickcnt);
  1209. }
  1210. #endif /* DHD_L2_FILTER */
  1211. }
  1212. #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
  1213. #pragma GCC diagnostic pop
  1214. #endif // endif
  1215. DHD_IF_STA_LIST_UNLOCK(ifp, flags);
  1216. return;
  1217. }
  1218. /** Delete STA from the interface's STA list. */
  1219. void
  1220. dhd_del_sta(void *pub, int ifidx, void *ea)
  1221. {
  1222. dhd_sta_t *sta, *next;
  1223. dhd_if_t *ifp;
  1224. unsigned long flags;
  1225. ASSERT(ea != NULL);
  1226. ifp = dhd_get_ifp((dhd_pub_t *)pub, ifidx);
  1227. if (ifp == NULL)
  1228. return;
  1229. DHD_IF_STA_LIST_LOCK(ifp, flags);
  1230. #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
  1231. #pragma GCC diagnostic push
  1232. #pragma GCC diagnostic ignored "-Wcast-qual"
  1233. #endif // endif
  1234. list_for_each_entry_safe(sta, next, &ifp->sta_list, list) {
  1235. if (!memcmp(sta->ea.octet, ea, ETHER_ADDR_LEN)) {
  1236. DHD_ERROR(("%s: Deleting STA " MACDBG "\n",
  1237. __FUNCTION__, MAC2STRDBG(sta->ea.octet)));
  1238. list_del(&sta->list);
  1239. dhd_sta_free(&ifp->info->pub, sta);
  1240. }
  1241. }
  1242. #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
  1243. #pragma GCC diagnostic pop
  1244. #endif // endif
  1245. DHD_IF_STA_LIST_UNLOCK(ifp, flags);
  1246. #ifdef DHD_L2_FILTER
  1247. if (ifp->parp_enable) {
  1248. /* clear Proxy ARP cache of specific Ethernet Address */
  1249. bcm_l2_filter_arp_table_update(((dhd_pub_t*)pub)->osh, ifp->phnd_arp_table, FALSE,
  1250. ea, FALSE, ((dhd_pub_t*)pub)->tickcnt);
  1251. }
  1252. #endif /* DHD_L2_FILTER */
  1253. return;
  1254. }
  1255. /** Add STA if it doesn't exist. Not reentrant. */
  1256. dhd_sta_t*
  1257. dhd_findadd_sta(void *pub, int ifidx, void *ea)
  1258. {
  1259. dhd_sta_t *sta;
  1260. sta = dhd_find_sta(pub, ifidx, ea);
  1261. if (!sta) {
  1262. /* Add entry */
  1263. sta = dhd_add_sta(pub, ifidx, ea);
  1264. }
  1265. return sta;
  1266. }
  1267. #if defined(DHD_IGMP_UCQUERY) || defined(DHD_UCAST_UPNP)
  1268. static struct list_head *
  1269. dhd_sta_list_snapshot(dhd_info_t *dhd, dhd_if_t *ifp, struct list_head *snapshot_list)
  1270. {
  1271. unsigned long flags;
  1272. dhd_sta_t *sta, *snapshot;
  1273. INIT_LIST_HEAD(snapshot_list);
  1274. DHD_IF_STA_LIST_LOCK(ifp, flags);
  1275. list_for_each_entry(sta, &ifp->sta_list, list) {
  1276. /* allocate one and add to snapshot */
  1277. snapshot = (dhd_sta_t *)MALLOC(dhd->pub.osh, sizeof(dhd_sta_t));
  1278. if (snapshot == NULL) {
  1279. DHD_ERROR(("%s: Cannot allocate memory\n", __FUNCTION__));
  1280. continue;
  1281. }
  1282. memcpy(snapshot->ea.octet, sta->ea.octet, ETHER_ADDR_LEN);
  1283. INIT_LIST_HEAD(&snapshot->list);
  1284. list_add_tail(&snapshot->list, snapshot_list);
  1285. }
  1286. DHD_IF_STA_LIST_UNLOCK(ifp, flags);
  1287. return snapshot_list;
  1288. }
  1289. static void
  1290. dhd_sta_list_snapshot_free(dhd_info_t *dhd, struct list_head *snapshot_list)
  1291. {
  1292. dhd_sta_t *sta, *next;
  1293. list_for_each_entry_safe(sta, next, snapshot_list, list) {
  1294. list_del(&sta->list);
  1295. MFREE(dhd->pub.osh, sta, sizeof(dhd_sta_t));
  1296. }
  1297. }
  1298. #endif /* DHD_IGMP_UCQUERY || DHD_UCAST_UPNP */
  1299. #if defined(DNGL_AXI_ERROR_LOGGING) && defined(DHD_USE_WQ_FOR_DNGL_AXI_ERROR)
  1300. void
  1301. dhd_axi_error_dispatch(dhd_pub_t *dhdp)
  1302. {
  1303. dhd_info_t *dhd = dhdp->info;
  1304. schedule_work(&dhd->axi_error_dispatcher_work);
  1305. }
  1306. static void dhd_axi_error_dispatcher_fn(struct work_struct * work)
  1307. {
  1308. struct dhd_info *dhd =
  1309. container_of(work, struct dhd_info, axi_error_dispatcher_work);
  1310. dhd_axi_error(&dhd->pub);
  1311. }
  1312. #endif /* DNGL_AXI_ERROR_LOGGING && DHD_USE_WQ_FOR_DNGL_AXI_ERROR */
  1313. /** Returns dhd iflist index corresponding the the bssidx provided by apps */
  1314. int dhd_bssidx2idx(dhd_pub_t *dhdp, uint32 bssidx)
  1315. {
  1316. dhd_if_t *ifp;
  1317. dhd_info_t *dhd = dhdp->info;
  1318. int i;
  1319. ASSERT(bssidx < DHD_MAX_IFS);
  1320. ASSERT(dhdp);
  1321. for (i = 0; i < DHD_MAX_IFS; i++) {
  1322. ifp = dhd->iflist[i];
  1323. if (ifp && (ifp->bssidx == bssidx)) {
  1324. DHD_TRACE(("Index manipulated for %s from %d to %d\n",
  1325. ifp->name, bssidx, i));
  1326. break;
  1327. }
  1328. }
  1329. return i;
  1330. }
  1331. static inline int dhd_rxf_enqueue(dhd_pub_t *dhdp, void* skb)
  1332. {
  1333. uint32 store_idx;
  1334. uint32 sent_idx;
  1335. if (!skb) {
  1336. DHD_ERROR(("dhd_rxf_enqueue: NULL skb!!!\n"));
  1337. return BCME_ERROR;
  1338. }
  1339. dhd_os_rxflock(dhdp);
  1340. store_idx = dhdp->store_idx;
  1341. sent_idx = dhdp->sent_idx;
  1342. if (dhdp->skbbuf[store_idx] != NULL) {
  1343. /* Make sure the previous packets are processed */
  1344. dhd_os_rxfunlock(dhdp);
  1345. DHD_ERROR(("dhd_rxf_enqueue: pktbuf not consumed %p, store idx %d sent idx %d\n",
  1346. skb, store_idx, sent_idx));
  1347. /* removed msleep here, should use wait_event_timeout if we
  1348. * want to give rx frame thread a chance to run
  1349. */
  1350. #if defined(WAIT_DEQUEUE)
  1351. OSL_SLEEP(1);
  1352. #endif // endif
  1353. return BCME_ERROR;
  1354. }
  1355. DHD_TRACE(("dhd_rxf_enqueue: Store SKB %p. idx %d -> %d\n",
  1356. skb, store_idx, (store_idx + 1) & (MAXSKBPEND - 1)));
  1357. dhdp->skbbuf[store_idx] = skb;
  1358. dhdp->store_idx = (store_idx + 1) & (MAXSKBPEND - 1);
  1359. dhd_os_rxfunlock(dhdp);
  1360. return BCME_OK;
  1361. }
  1362. static inline void* dhd_rxf_dequeue(dhd_pub_t *dhdp)
  1363. {
  1364. uint32 store_idx;
  1365. uint32 sent_idx;
  1366. void *skb;
  1367. dhd_os_rxflock(dhdp);
  1368. store_idx = dhdp->store_idx;
  1369. sent_idx = dhdp->sent_idx;
  1370. skb = dhdp->skbbuf[sent_idx];
  1371. if (skb == NULL) {
  1372. dhd_os_rxfunlock(dhdp);
  1373. DHD_ERROR(("dhd_rxf_dequeue: Dequeued packet is NULL, store idx %d sent idx %d\n",
  1374. store_idx, sent_idx));
  1375. return NULL;
  1376. }
  1377. dhdp->skbbuf[sent_idx] = NULL;
  1378. dhdp->sent_idx = (sent_idx + 1) & (MAXSKBPEND - 1);
  1379. DHD_TRACE(("dhd_rxf_dequeue: netif_rx_ni(%p), sent idx %d\n",
  1380. skb, sent_idx));
  1381. dhd_os_rxfunlock(dhdp);
  1382. return skb;
  1383. }
  1384. int dhd_process_cid_mac(dhd_pub_t *dhdp, bool prepost)
  1385. {
  1386. if (prepost) { /* pre process */
  1387. dhd_read_cis(dhdp);
  1388. dhd_check_module_cid(dhdp);
  1389. dhd_check_module_mac(dhdp);
  1390. dhd_set_macaddr_from_file(dhdp);
  1391. } else { /* post process */
  1392. dhd_write_macaddr(&dhdp->mac);
  1393. dhd_clear_cis(dhdp);
  1394. }
  1395. return 0;
  1396. }
  1397. #if defined(WL_CFG80211) && defined(DHD_FILE_DUMP_EVENT) && defined(DHD_FW_COREDUMP)
  1398. static int dhd_wait_for_file_dump(dhd_pub_t *dhdp)
  1399. {
  1400. struct net_device *primary_ndev;
  1401. struct bcm_cfg80211 *cfg;
  1402. unsigned long flags = 0;
  1403. primary_ndev = dhd_linux_get_primary_netdev(dhdp);
  1404. if (!primary_ndev) {
  1405. DHD_ERROR(("%s: Cannot find primary netdev\n", __FUNCTION__));
  1406. return BCME_ERROR;
  1407. }
  1408. cfg = wl_get_cfg(primary_ndev);
  1409. if (!cfg) {
  1410. DHD_ERROR(("%s: Cannot find cfg\n", __FUNCTION__));
  1411. return BCME_ERROR;
  1412. }
  1413. DHD_GENERAL_LOCK(dhdp, flags);
  1414. if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(dhdp)) {
  1415. DHD_BUS_BUSY_CLEAR_IN_HALDUMP(dhdp);
  1416. dhd_os_busbusy_wake(dhdp);
  1417. DHD_GENERAL_UNLOCK(dhdp, flags);
  1418. DHD_ERROR(("%s: bus is down! can't collect log dump. \n", __FUNCTION__));
  1419. return BCME_ERROR;
  1420. }
  1421. DHD_BUS_BUSY_SET_IN_HALDUMP(dhdp);
  1422. DHD_GENERAL_UNLOCK(dhdp, flags);
  1423. DHD_OS_WAKE_LOCK(dhdp);
  1424. /* check for hal started and only then send event if not clear dump state here */
  1425. if (wl_cfg80211_is_hal_started(cfg)) {
  1426. int timeleft = 0;
  1427. DHD_ERROR(("[DUMP] %s: HAL started. send urgent event\n", __FUNCTION__));
  1428. dhd_dbg_send_urgent_evt(dhdp, NULL, 0);
  1429. DHD_ERROR(("%s: wait to clear dhd_bus_busy_state: 0x%x\n",
  1430. __FUNCTION__, dhdp->dhd_bus_busy_state));
  1431. timeleft = dhd_os_busbusy_wait_bitmask(dhdp,
  1432. &dhdp->dhd_bus_busy_state, DHD_BUS_BUSY_IN_HALDUMP, 0);
  1433. if ((dhdp->dhd_bus_busy_state & DHD_BUS_BUSY_IN_HALDUMP) != 0) {
  1434. DHD_ERROR(("%s: Timed out dhd_bus_busy_state=0x%x\n",
  1435. __FUNCTION__, dhdp->dhd_bus_busy_state));
  1436. }
  1437. } else {
  1438. DHD_ERROR(("[DUMP] %s: HAL Not started. skip urgent event\n", __FUNCTION__));
  1439. }
  1440. DHD_OS_WAKE_UNLOCK(dhdp);
  1441. /* In case of dhd_os_busbusy_wait_bitmask() timeout,
  1442. * hal dump bit will not be cleared. Hence clearing it here.
  1443. */
  1444. DHD_GENERAL_LOCK(dhdp, flags);
  1445. DHD_BUS_BUSY_CLEAR_IN_HALDUMP(dhdp);
  1446. dhd_os_busbusy_wake(dhdp);
  1447. DHD_GENERAL_UNLOCK(dhdp, flags);
  1448. return BCME_OK;
  1449. }
  1450. #endif /* WL_CFG80211 && DHD_FILE_DUMP_EVENT && DHD_FW_CORE_DUMP */
  1451. #ifdef PKT_FILTER_SUPPORT
  1452. #ifndef GAN_LITE_NAT_KEEPALIVE_FILTER
  1453. static bool
  1454. _turn_on_arp_filter(dhd_pub_t *dhd, int op_mode_param)
  1455. {
  1456. bool _apply = FALSE;
  1457. /* In case of IBSS mode, apply arp pkt filter */
  1458. if (op_mode_param & DHD_FLAG_IBSS_MODE) {
  1459. _apply = TRUE;
  1460. goto exit;
  1461. }
  1462. /* In case of P2P GO or GC, apply pkt filter to pass arp pkt to host */
  1463. if (op_mode_param & (DHD_FLAG_P2P_GC_MODE | DHD_FLAG_P2P_GO_MODE)) {
  1464. _apply = TRUE;
  1465. goto exit;
  1466. }
  1467. exit:
  1468. return _apply;
  1469. }
  1470. #endif /* !GAN_LITE_NAT_KEEPALIVE_FILTER */
  1471. void
  1472. dhd_set_packet_filter(dhd_pub_t *dhd)
  1473. {
  1474. int i;
  1475. DHD_TRACE(("%s: enter\n", __FUNCTION__));
  1476. if (dhd_pkt_filter_enable) {
  1477. for (i = 0; i < dhd->pktfilter_count; i++) {
  1478. dhd_pktfilter_offload_set(dhd, dhd->pktfilter[i]);
  1479. }
  1480. }
  1481. }
  1482. void
  1483. dhd_enable_packet_filter(int value, dhd_pub_t *dhd)
  1484. {
  1485. int i;
  1486. DHD_ERROR(("%s: enter, value = %d\n", __FUNCTION__, value));
  1487. if ((dhd->op_mode & DHD_FLAG_HOSTAP_MODE) && value) {
  1488. DHD_ERROR(("%s: DHD_FLAG_HOSTAP_MODE\n", __FUNCTION__));
  1489. return;
  1490. }
  1491. /* 1 - Enable packet filter, only allow unicast packet to send up */
  1492. /* 0 - Disable packet filter */
  1493. if (dhd_pkt_filter_enable && (!value ||
  1494. (dhd_support_sta_mode(dhd) && !dhd->dhcp_in_progress)))
  1495. {
  1496. for (i = 0; i < dhd->pktfilter_count; i++) {
  1497. #ifndef GAN_LITE_NAT_KEEPALIVE_FILTER
  1498. if (value && (i == DHD_ARP_FILTER_NUM) &&
  1499. !_turn_on_arp_filter(dhd, dhd->op_mode)) {
  1500. DHD_TRACE(("Do not turn on ARP white list pkt filter:"
  1501. "val %d, cnt %d, op_mode 0x%x\n",
  1502. value, i, dhd->op_mode));
  1503. continue;
  1504. }
  1505. #endif /* !GAN_LITE_NAT_KEEPALIVE_FILTER */
  1506. dhd_pktfilter_offload_enable(dhd, dhd->pktfilter[i],
  1507. value, dhd_master_mode);
  1508. }
  1509. }
  1510. }
  1511. int
  1512. dhd_packet_filter_add_remove(dhd_pub_t *dhdp, int add_remove, int num)
  1513. {
  1514. char *filterp = NULL;
  1515. int filter_id = 0;
  1516. switch (num) {
  1517. case DHD_BROADCAST_FILTER_NUM:
  1518. filterp = "101 0 0 0 0xFFFFFFFFFFFF 0xFFFFFFFFFFFF";
  1519. filter_id = 101;
  1520. break;
  1521. case DHD_MULTICAST4_FILTER_NUM:
  1522. filter_id = 102;
  1523. if (FW_SUPPORTED((dhdp), pf6)) {
  1524. if (dhdp->pktfilter[num] != NULL) {
  1525. dhd_pktfilter_offload_delete(dhdp, filter_id);
  1526. dhdp->pktfilter[num] = NULL;
  1527. }
  1528. if (!add_remove) {
  1529. filterp = DISCARD_IPV4_MCAST;
  1530. add_remove = 1;
  1531. break;
  1532. }
  1533. }
  1534. filterp = "102 0 0 0 0xFFFFFF 0x01005E";
  1535. break;
  1536. case DHD_MULTICAST6_FILTER_NUM:
  1537. filter_id = 103;
  1538. if (FW_SUPPORTED((dhdp), pf6)) {
  1539. if (dhdp->pktfilter[num] != NULL) {
  1540. dhd_pktfilter_offload_delete(dhdp, filter_id);
  1541. dhdp->pktfilter[num] = NULL;
  1542. }
  1543. if (!add_remove) {
  1544. filterp = DISCARD_IPV6_MCAST;
  1545. add_remove = 1;
  1546. break;
  1547. }
  1548. }
  1549. filterp = "103 0 0 0 0xFFFF 0x3333";
  1550. break;
  1551. case DHD_MDNS_FILTER_NUM:
  1552. filterp = "104 0 0 0 0xFFFFFFFFFFFF 0x01005E0000FB";
  1553. filter_id = 104;
  1554. break;
  1555. case DHD_ARP_FILTER_NUM:
  1556. filterp = "105 0 0 12 0xFFFF 0x0806";
  1557. filter_id = 105;
  1558. break;
  1559. case DHD_BROADCAST_ARP_FILTER_NUM:
  1560. filterp = "106 0 0 0 0xFFFFFFFFFFFF0000000000000806"
  1561. " 0xFFFFFFFFFFFF0000000000000806";
  1562. filter_id = 106;
  1563. break;
  1564. default:
  1565. return -EINVAL;
  1566. }
  1567. /* Add filter */
  1568. if (add_remove) {
  1569. dhdp->pktfilter[num] = filterp;
  1570. dhd_pktfilter_offload_set(dhdp, dhdp->pktfilter[num]);
  1571. } else { /* Delete filter */
  1572. if (dhdp->pktfilter[num]) {
  1573. dhd_pktfilter_offload_delete(dhdp, filter_id);
  1574. dhdp->pktfilter[num] = NULL;
  1575. }
  1576. }
  1577. return 0;
  1578. }
  1579. #endif /* PKT_FILTER_SUPPORT */
  1580. static int dhd_set_suspend(int value, dhd_pub_t *dhd)
  1581. {
  1582. #ifndef SUPPORT_PM2_ONLY
  1583. int power_mode = PM_MAX;
  1584. #endif /* SUPPORT_PM2_ONLY */
  1585. /* wl_pkt_filter_enable_t enable_parm; */
  1586. int bcn_li_dtim = 0; /* Default bcn_li_dtim in resume mode is 0 */
  1587. int ret = 0;
  1588. #ifdef DHD_USE_EARLYSUSPEND
  1589. #ifdef CUSTOM_BCN_TIMEOUT_IN_SUSPEND
  1590. int bcn_timeout = 0;
  1591. #endif /* CUSTOM_BCN_TIMEOUT_IN_SUSPEND */
  1592. #ifdef CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND
  1593. int roam_time_thresh = 0; /* (ms) */
  1594. #endif /* CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND */
  1595. #ifndef ENABLE_FW_ROAM_SUSPEND
  1596. uint roamvar = 1;
  1597. #endif /* ENABLE_FW_ROAM_SUSPEND */
  1598. #ifdef ENABLE_BCN_LI_BCN_WAKEUP
  1599. int bcn_li_bcn = 1;
  1600. #endif /* ENABLE_BCN_LI_BCN_WAKEUP */
  1601. uint nd_ra_filter = 0;
  1602. #ifdef ENABLE_IPMCAST_FILTER
  1603. int ipmcast_l2filter;
  1604. #endif /* ENABLE_IPMCAST_FILTER */
  1605. #ifdef CUSTOM_EVENT_PM_WAKE
  1606. uint32 pm_awake_thresh = CUSTOM_EVENT_PM_WAKE;
  1607. #endif /* CUSTOM_EVENT_PM_WAKE */
  1608. #endif /* DHD_USE_EARLYSUSPEND */
  1609. #ifdef PASS_ALL_MCAST_PKTS
  1610. struct dhd_info *dhdinfo;
  1611. uint32 allmulti;
  1612. uint i;
  1613. #endif /* PASS_ALL_MCAST_PKTS */
  1614. #ifdef DYNAMIC_SWOOB_DURATION
  1615. #ifndef CUSTOM_INTR_WIDTH
  1616. #define CUSTOM_INTR_WIDTH 100
  1617. int intr_width = 0;
  1618. #endif /* CUSTOM_INTR_WIDTH */
  1619. #endif /* DYNAMIC_SWOOB_DURATION */
  1620. #if defined(OEM_ANDROID) && defined(BCMPCIE)
  1621. int lpas = 0;
  1622. int dtim_period = 0;
  1623. int bcn_interval = 0;
  1624. int bcn_to_dly = 0;
  1625. #if defined(CUSTOM_BCN_TIMEOUT_IN_SUSPEND) && defined(DHD_USE_EARLYSUSPEND)
  1626. bcn_timeout = CUSTOM_BCN_TIMEOUT_SETTING;
  1627. #else
  1628. int bcn_timeout = CUSTOM_BCN_TIMEOUT_SETTING;
  1629. #endif /* CUSTOM_BCN_TIMEOUT_IN_SUSPEND && DHD_USE_EARLYSUSPEND */
  1630. #endif /* OEM_ANDROID && BCMPCIE */
  1631. if (!dhd)
  1632. return -ENODEV;
  1633. #ifdef PASS_ALL_MCAST_PKTS
  1634. dhdinfo = dhd->info;
  1635. #endif /* PASS_ALL_MCAST_PKTS */
  1636. DHD_TRACE(("%s: enter, value = %d in_suspend=%d\n",
  1637. __FUNCTION__, value, dhd->in_suspend));
  1638. dhd_suspend_lock(dhd);
  1639. #ifdef CUSTOM_SET_CPUCORE
  1640. DHD_TRACE(("%s set cpucore(suspend%d)\n", __FUNCTION__, value));
  1641. /* set specific cpucore */
  1642. dhd_set_cpucore(dhd, TRUE);
  1643. #endif /* CUSTOM_SET_CPUCORE */
  1644. if (dhd->up) {
  1645. if (value && dhd->in_suspend) {
  1646. #ifdef PKT_FILTER_SUPPORT
  1647. dhd->early_suspended = 1;
  1648. #endif // endif
  1649. /* Kernel suspended */
  1650. DHD_ERROR(("%s: force extra Suspend setting \n", __FUNCTION__));
  1651. #ifndef SUPPORT_PM2_ONLY
  1652. dhd_wl_ioctl_cmd(dhd, WLC_SET_PM, (char *)&power_mode,
  1653. sizeof(power_mode), TRUE, 0);
  1654. #endif /* SUPPORT_PM2_ONLY */
  1655. #ifdef PKT_FILTER_SUPPORT
  1656. /* Enable packet filter,
  1657. * only allow unicast packet to send up
  1658. */
  1659. dhd_enable_packet_filter(1, dhd);
  1660. #ifdef APF
  1661. dhd_dev_apf_enable_filter(dhd_linux_get_primary_netdev(dhd));
  1662. #endif /* APF */
  1663. #endif /* PKT_FILTER_SUPPORT */
  1664. #ifdef ARP_OFFLOAD_SUPPORT
  1665. dhd_arp_offload_enable(dhd, TRUE);
  1666. #endif /* ARP_OFFLOAD_SUPPORT */
  1667. #ifdef PASS_ALL_MCAST_PKTS
  1668. allmulti = 0;
  1669. for (i = 0; i < DHD_MAX_IFS; i++) {
  1670. if (dhdinfo->iflist[i] && dhdinfo->iflist[i]->net)
  1671. ret = dhd_iovar(dhd, i, "allmulti",
  1672. (char *)&allmulti,
  1673. sizeof(allmulti),
  1674. NULL, 0, TRUE);
  1675. if (ret < 0) {
  1676. DHD_ERROR(("%s allmulti failed %d\n",
  1677. __FUNCTION__, ret));
  1678. }
  1679. }
  1680. #endif /* PASS_ALL_MCAST_PKTS */
  1681. /* If DTIM skip is set up as default, force it to wake
  1682. * each third DTIM for better power savings. Note that
  1683. * one side effect is a chance to miss BC/MC packet.
  1684. */
  1685. #ifdef WLTDLS
  1686. /* Do not set bcn_li_ditm on WFD mode */
  1687. if (dhd->tdls_mode) {
  1688. bcn_li_dtim = 0;
  1689. } else
  1690. #endif /* WLTDLS */
  1691. #if defined(OEM_ANDROID) && defined(BCMPCIE)
  1692. bcn_li_dtim = dhd_get_suspend_bcn_li_dtim(dhd, &dtim_period,
  1693. &bcn_interval);
  1694. ret = dhd_iovar(dhd, 0, "bcn_li_dtim", (char *)&bcn_li_dtim,
  1695. sizeof(bcn_li_dtim), NULL, 0, TRUE);
  1696. if (ret < 0) {
  1697. DHD_ERROR(("%s bcn_li_dtim failed %d\n",
  1698. __FUNCTION__, ret));
  1699. }
  1700. if ((bcn_li_dtim * dtim_period * bcn_interval) >=
  1701. MIN_DTIM_FOR_ROAM_THRES_EXTEND) {
  1702. /*
  1703. * Increase max roaming threshold from 2 secs to 8 secs
  1704. * the real roam threshold is MIN(max_roam_threshold,
  1705. * bcn_timeout/2)
  1706. */
  1707. lpas = 1;
  1708. ret = dhd_iovar(dhd, 0, "lpas", (char *)&lpas, sizeof(lpas),
  1709. NULL, 0, TRUE);
  1710. if (ret < 0) {
  1711. DHD_ERROR(("%s lpas failed %d\n", __FUNCTION__,
  1712. ret));
  1713. }
  1714. bcn_to_dly = 1;
  1715. /*
  1716. * if bcn_to_dly is 1, the real roam threshold is
  1717. * MIN(max_roam_threshold, bcn_timeout -1);
  1718. * notify link down event after roaming procedure complete
  1719. * if we hit bcn_timeout while we are in roaming progress.
  1720. */
  1721. ret = dhd_iovar(dhd, 0, "bcn_to_dly", (char *)&bcn_to_dly,
  1722. sizeof(bcn_to_dly), NULL, 0, TRUE);
  1723. if (ret < 0) {
  1724. DHD_ERROR(("%s bcn_to_dly failed %d\n",
  1725. __FUNCTION__, ret));
  1726. }
  1727. /* Increase beacon timeout to 6 secs or use bigger one */
  1728. bcn_timeout = max(bcn_timeout, BCN_TIMEOUT_IN_SUSPEND);
  1729. ret = dhd_iovar(dhd, 0, "bcn_timeout", (char *)&bcn_timeout,
  1730. sizeof(bcn_timeout), NULL, 0, TRUE);
  1731. if (ret < 0) {
  1732. DHD_ERROR(("%s bcn_timeout failed %d\n",
  1733. __FUNCTION__, ret));
  1734. }
  1735. }
  1736. #else
  1737. bcn_li_dtim = dhd_get_suspend_bcn_li_dtim(dhd);
  1738. if (dhd_iovar(dhd, 0, "bcn_li_dtim", (char *)&bcn_li_dtim,
  1739. sizeof(bcn_li_dtim), NULL, 0, TRUE) < 0)
  1740. DHD_ERROR(("%s: set dtim failed\n", __FUNCTION__));
  1741. #endif /* OEM_ANDROID && BCMPCIE */
  1742. #ifdef WL_CFG80211
  1743. /* Disable cfg80211 feature events during suspend */
  1744. ret = wl_cfg80211_config_suspend_events(
  1745. dhd_linux_get_primary_netdev(dhd), FALSE);
  1746. if (ret < 0) {
  1747. DHD_ERROR(("failed to disable events (%d)\n", ret));
  1748. }
  1749. #endif /* WL_CFG80211 */
  1750. #ifdef DHD_USE_EARLYSUSPEND
  1751. #ifdef CUSTOM_BCN_TIMEOUT_IN_SUSPEND
  1752. bcn_timeout = CUSTOM_BCN_TIMEOUT_IN_SUSPEND;
  1753. ret = dhd_iovar(dhd, 0, "bcn_timeout", (char *)&bcn_timeout,
  1754. sizeof(bcn_timeout), NULL, 0, TRUE);
  1755. if (ret < 0) {
  1756. DHD_ERROR(("%s bcn_timeout failed %d\n", __FUNCTION__,
  1757. ret));
  1758. }
  1759. #endif /* CUSTOM_BCN_TIMEOUT_IN_SUSPEND */
  1760. #ifdef CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND
  1761. roam_time_thresh = CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND;
  1762. ret = dhd_iovar(dhd, 0, "roam_time_thresh",
  1763. (char *)&roam_time_thresh,
  1764. sizeof(roam_time_thresh), NULL, 0, TRUE);
  1765. if (ret < 0) {
  1766. DHD_ERROR(("%s roam_time_thresh failed %d\n",
  1767. __FUNCTION__, ret));
  1768. }
  1769. #endif /* CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND */
  1770. #ifndef ENABLE_FW_ROAM_SUSPEND
  1771. /* Disable firmware roaming during suspend */
  1772. ret = dhd_iovar(dhd, 0, "roam_off", (char *)&roamvar,
  1773. sizeof(roamvar), NULL, 0, TRUE);
  1774. if (ret < 0) {
  1775. DHD_ERROR(("%s roam_off failed %d\n",
  1776. __FUNCTION__, ret));
  1777. }
  1778. #endif /* ENABLE_FW_ROAM_SUSPEND */
  1779. #ifdef ENABLE_BCN_LI_BCN_WAKEUP
  1780. if (bcn_li_dtim) {
  1781. bcn_li_bcn = 0;
  1782. }
  1783. ret = dhd_iovar(dhd, 0, "bcn_li_bcn", (char *)&bcn_li_bcn,
  1784. sizeof(bcn_li_bcn), NULL, 0, TRUE);
  1785. if (ret < 0) {
  1786. DHD_ERROR(("%s bcn_li_bcn failed %d\n", __FUNCTION__, ret));
  1787. }
  1788. #endif /* ENABLE_BCN_LI_BCN_WAKEUP */
  1789. #if defined(WL_CFG80211) && defined(WL_BCNRECV)
  1790. ret = wl_android_bcnrecv_suspend(dhd_linux_get_primary_netdev(dhd));
  1791. if (ret != BCME_OK) {
  1792. DHD_ERROR(("failed to stop beacon recv event on"
  1793. " suspend state (%d)\n", ret));
  1794. }
  1795. #endif /* WL_CFG80211 && WL_BCNRECV */
  1796. #ifdef NDO_CONFIG_SUPPORT
  1797. if (dhd->ndo_enable) {
  1798. if (!dhd->ndo_host_ip_overflow) {
  1799. /* enable ND offload on suspend */
  1800. ret = dhd_ndo_enable(dhd, TRUE);
  1801. if (ret < 0) {
  1802. DHD_ERROR(("%s: failed to enable NDO\n",
  1803. __FUNCTION__));
  1804. }
  1805. } else {
  1806. DHD_INFO(("%s: NDO disabled on suspend due to"
  1807. "HW capacity\n", __FUNCTION__));
  1808. }
  1809. }
  1810. #endif /* NDO_CONFIG_SUPPORT */
  1811. #ifndef APF
  1812. if (FW_SUPPORTED(dhd, ndoe)) {
  1813. #else
  1814. if (FW_SUPPORTED(dhd, ndoe) && !FW_SUPPORTED(dhd, apf)) {
  1815. #endif /* APF */
  1816. /* enable IPv6 RA filter in firmware during suspend */
  1817. nd_ra_filter = 1;
  1818. ret = dhd_iovar(dhd, 0, "nd_ra_filter_enable",
  1819. (char *)&nd_ra_filter, sizeof(nd_ra_filter),
  1820. NULL, 0, TRUE);
  1821. if (ret < 0)
  1822. DHD_ERROR(("failed to set nd_ra_filter (%d)\n",
  1823. ret));
  1824. }
  1825. dhd_os_suppress_logging(dhd, TRUE);
  1826. #ifdef ENABLE_IPMCAST_FILTER
  1827. ipmcast_l2filter = 1;
  1828. ret = dhd_iovar(dhd, 0, "ipmcast_l2filter",
  1829. (char *)&ipmcast_l2filter, sizeof(ipmcast_l2filter),
  1830. NULL, 0, TRUE);
  1831. if (ret < 0) {
  1832. DHD_ERROR(("failed to set ipmcast_l2filter (%d)\n", ret));
  1833. }
  1834. #endif /* ENABLE_IPMCAST_FILTER */
  1835. #ifdef DYNAMIC_SWOOB_DURATION
  1836. intr_width = CUSTOM_INTR_WIDTH;
  1837. ret = dhd_iovar(dhd, 0, "bus:intr_width", (char *)&intr_width,
  1838. sizeof(intr_width), NULL, 0, TRUE);
  1839. if (ret < 0) {
  1840. DHD_ERROR(("failed to set intr_width (%d)\n", ret));
  1841. }
  1842. #endif /* DYNAMIC_SWOOB_DURATION */
  1843. #ifdef CUSTOM_EVENT_PM_WAKE
  1844. pm_awake_thresh = CUSTOM_EVENT_PM_WAKE * 4;
  1845. ret = dhd_iovar(dhd, 0, "const_awake_thresh",
  1846. (char *)&pm_awake_thresh,
  1847. sizeof(pm_awake_thresh), NULL, 0, TRUE);
  1848. if (ret < 0) {
  1849. DHD_ERROR(("%s set const_awake_thresh failed %d\n",
  1850. __FUNCTION__, ret));
  1851. }
  1852. #endif /* CUSTOM_EVENT_PM_WAKE */
  1853. #ifdef CONFIG_SILENT_ROAM
  1854. if (!dhd->sroamed) {
  1855. ret = dhd_sroam_set_mon(dhd, TRUE);
  1856. if (ret < 0) {
  1857. DHD_ERROR(("%s set sroam failed %d\n",
  1858. __FUNCTION__, ret));
  1859. }
  1860. }
  1861. dhd->sroamed = FALSE;
  1862. #endif /* CONFIG_SILENT_ROAM */
  1863. #endif /* DHD_USE_EARLYSUSPEND */
  1864. } else {
  1865. #ifdef PKT_FILTER_SUPPORT
  1866. dhd->early_suspended = 0;
  1867. #endif // endif
  1868. /* Kernel resumed */
  1869. DHD_ERROR(("%s: Remove extra suspend setting \n", __FUNCTION__));
  1870. #ifdef DYNAMIC_SWOOB_DURATION
  1871. intr_width = 0;
  1872. ret = dhd_iovar(dhd, 0, "bus:intr_width", (char *)&intr_width,
  1873. sizeof(intr_width), NULL, 0, TRUE);
  1874. if (ret < 0) {
  1875. DHD_ERROR(("failed to set intr_width (%d)\n", ret));
  1876. }
  1877. #endif /* DYNAMIC_SWOOB_DURATION */
  1878. #ifndef SUPPORT_PM2_ONLY
  1879. power_mode = PM_FAST;
  1880. dhd_wl_ioctl_cmd(dhd, WLC_SET_PM, (char *)&power_mode,
  1881. sizeof(power_mode), TRUE, 0);
  1882. #endif /* SUPPORT_PM2_ONLY */
  1883. #if defined(WL_CFG80211) && defined(WL_BCNRECV)
  1884. ret = wl_android_bcnrecv_resume(dhd_linux_get_primary_netdev(dhd));
  1885. if (ret != BCME_OK) {
  1886. DHD_ERROR(("failed to resume beacon recv state (%d)\n",
  1887. ret));
  1888. }
  1889. #endif /* WL_CF80211 && WL_BCNRECV */
  1890. #ifdef ARP_OFFLOAD_SUPPORT
  1891. dhd_arp_offload_enable(dhd, FALSE);
  1892. #endif /* ARP_OFFLOAD_SUPPORT */
  1893. #ifdef PKT_FILTER_SUPPORT
  1894. /* disable pkt filter */
  1895. dhd_enable_packet_filter(0, dhd);
  1896. #ifdef APF
  1897. dhd_dev_apf_disable_filter(dhd_linux_get_primary_netdev(dhd));
  1898. #endif /* APF */
  1899. #endif /* PKT_FILTER_SUPPORT */
  1900. #ifdef PASS_ALL_MCAST_PKTS
  1901. allmulti = 1;
  1902. for (i = 0; i < DHD_MAX_IFS; i++) {
  1903. if (dhdinfo->iflist[i] && dhdinfo->iflist[i]->net)
  1904. ret = dhd_iovar(dhd, i, "allmulti",
  1905. (char *)&allmulti,
  1906. sizeof(allmulti), NULL,
  1907. 0, TRUE);
  1908. if (ret < 0) {
  1909. DHD_ERROR(("%s: allmulti failed:%d\n",
  1910. __FUNCTION__, ret));
  1911. }
  1912. }
  1913. #endif /* PASS_ALL_MCAST_PKTS */
  1914. #if defined(OEM_ANDROID) && defined(BCMPCIE)
  1915. /* restore pre-suspend setting */
  1916. ret = dhd_iovar(dhd, 0, "bcn_li_dtim", (char *)&bcn_li_dtim,
  1917. sizeof(bcn_li_dtim), NULL, 0, TRUE);
  1918. if (ret < 0) {
  1919. DHD_ERROR(("%s:bcn_li_ditm failed:%d\n",
  1920. __FUNCTION__, ret));
  1921. }
  1922. ret = dhd_iovar(dhd, 0, "lpas", (char *)&lpas, sizeof(lpas), NULL,
  1923. 0, TRUE);
  1924. if (ret < 0) {
  1925. DHD_ERROR(("%s:lpas failed:%d\n", __FUNCTION__, ret));
  1926. }
  1927. ret = dhd_iovar(dhd, 0, "bcn_to_dly", (char *)&bcn_to_dly,
  1928. sizeof(bcn_to_dly), NULL, 0, TRUE);
  1929. if (ret < 0) {
  1930. DHD_ERROR(("%s:bcn_to_dly failed:%d\n", __FUNCTION__, ret));
  1931. }
  1932. ret = dhd_iovar(dhd, 0, "bcn_timeout", (char *)&bcn_timeout,
  1933. sizeof(bcn_timeout), NULL, 0, TRUE);
  1934. if (ret < 0) {
  1935. DHD_ERROR(("%s:bcn_timeout failed:%d\n",
  1936. __FUNCTION__, ret));
  1937. }
  1938. #else
  1939. /* restore pre-suspend setting for dtim_skip */
  1940. ret = dhd_iovar(dhd, 0, "bcn_li_dtim", (char *)&bcn_li_dtim,
  1941. sizeof(bcn_li_dtim), NULL, 0, TRUE);
  1942. if (ret < 0) {
  1943. DHD_ERROR(("%s:bcn_li_ditm fail:%d\n", __FUNCTION__, ret));
  1944. }
  1945. #endif /* OEM_ANDROID && BCMPCIE */
  1946. #ifdef DHD_USE_EARLYSUSPEND
  1947. #ifdef CUSTOM_BCN_TIMEOUT_IN_SUSPEND
  1948. bcn_timeout = CUSTOM_BCN_TIMEOUT;
  1949. ret = dhd_iovar(dhd, 0, "bcn_timeout", (char *)&bcn_timeout,
  1950. sizeof(bcn_timeout), NULL, 0, TRUE);
  1951. if (ret < 0) {
  1952. DHD_ERROR(("%s:bcn_timeout failed:%d\n",
  1953. __FUNCTION__, ret));
  1954. }
  1955. #endif /* CUSTOM_BCN_TIMEOUT_IN_SUSPEND */
  1956. #ifdef CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND
  1957. roam_time_thresh = 2000;
  1958. ret = dhd_iovar(dhd, 0, "roam_time_thresh",
  1959. (char *)&roam_time_thresh,
  1960. sizeof(roam_time_thresh), NULL, 0, TRUE);
  1961. if (ret < 0) {
  1962. DHD_ERROR(("%s:roam_time_thresh failed:%d\n",
  1963. __FUNCTION__, ret));
  1964. }
  1965. #endif /* CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND */
  1966. #ifndef ENABLE_FW_ROAM_SUSPEND
  1967. roamvar = dhd_roam_disable;
  1968. ret = dhd_iovar(dhd, 0, "roam_off", (char *)&roamvar,
  1969. sizeof(roamvar), NULL, 0, TRUE);
  1970. if (ret < 0) {
  1971. DHD_ERROR(("%s: roam_off fail:%d\n", __FUNCTION__, ret));
  1972. }
  1973. #endif /* ENABLE_FW_ROAM_SUSPEND */
  1974. #ifdef ENABLE_BCN_LI_BCN_WAKEUP
  1975. ret = dhd_iovar(dhd, 0, "bcn_li_bcn", (char *)&bcn_li_bcn,
  1976. sizeof(bcn_li_bcn), NULL, 0, TRUE);
  1977. if (ret < 0) {
  1978. DHD_ERROR(("%s: bcn_li_bcn failed:%d\n",
  1979. __FUNCTION__, ret));
  1980. }
  1981. #endif /* ENABLE_BCN_LI_BCN_WAKEUP */
  1982. #ifdef NDO_CONFIG_SUPPORT
  1983. if (dhd->ndo_enable) {
  1984. /* Disable ND offload on resume */
  1985. ret = dhd_ndo_enable(dhd, FALSE);
  1986. if (ret < 0) {
  1987. DHD_ERROR(("%s: failed to disable NDO\n",
  1988. __FUNCTION__));
  1989. }
  1990. }
  1991. #endif /* NDO_CONFIG_SUPPORT */
  1992. #ifndef APF
  1993. if (FW_SUPPORTED(dhd, ndoe)) {
  1994. #else
  1995. if (FW_SUPPORTED(dhd, ndoe) && !FW_SUPPORTED(dhd, apf)) {
  1996. #endif /* APF */
  1997. /* disable IPv6 RA filter in firmware during suspend */
  1998. nd_ra_filter = 0;
  1999. ret = dhd_iovar(dhd, 0, "nd_ra_filter_enable",
  2000. (char *)&nd_ra_filter, sizeof(nd_ra_filter),
  2001. NULL, 0, TRUE);
  2002. if (ret < 0) {
  2003. DHD_ERROR(("failed to set nd_ra_filter (%d)\n",
  2004. ret));
  2005. }
  2006. }
  2007. dhd_os_suppress_logging(dhd, FALSE);
  2008. #ifdef ENABLE_IPMCAST_FILTER
  2009. ipmcast_l2filter = 0;
  2010. ret = dhd_iovar(dhd, 0, "ipmcast_l2filter",
  2011. (char *)&ipmcast_l2filter, sizeof(ipmcast_l2filter),
  2012. NULL, 0, TRUE);
  2013. if (ret < 0) {
  2014. DHD_ERROR(("failed to clear ipmcast_l2filter ret:%d", ret));
  2015. }
  2016. #endif /* ENABLE_IPMCAST_FILTER */
  2017. #ifdef CUSTOM_EVENT_PM_WAKE
  2018. ret = dhd_iovar(dhd, 0, "const_awake_thresh",
  2019. (char *)&pm_awake_thresh,
  2020. sizeof(pm_awake_thresh), NULL, 0, TRUE);
  2021. if (ret < 0) {
  2022. DHD_ERROR(("%s set const_awake_thresh failed %d\n",
  2023. __FUNCTION__, ret));
  2024. }
  2025. #endif /* CUSTOM_EVENT_PM_WAKE */
  2026. #ifdef CONFIG_SILENT_ROAM
  2027. ret = dhd_sroam_set_mon(dhd, FALSE);
  2028. if (ret < 0) {
  2029. DHD_ERROR(("%s set sroam failed %d\n", __FUNCTION__, ret));
  2030. }
  2031. #endif /* CONFIG_SILENT_ROAM */
  2032. #endif /* DHD_USE_EARLYSUSPEND */
  2033. #ifdef WL_CFG80211
  2034. /* Enable cfg80211 feature events during resume */
  2035. ret = wl_cfg80211_config_suspend_events(
  2036. dhd_linux_get_primary_netdev(dhd), TRUE);
  2037. if (ret < 0) {
  2038. DHD_ERROR(("failed to enable events (%d)\n", ret));
  2039. }
  2040. #endif /* WL_CFG80211 */
  2041. #ifdef DHD_LB_IRQSET
  2042. dhd_irq_set_affinity(dhd, dhd->info->cpumask_primary);
  2043. #endif /* DHD_LB_IRQSET */
  2044. }
  2045. }
  2046. dhd_suspend_unlock(dhd);
  2047. return 0;
  2048. }
  2049. static int dhd_suspend_resume_helper(struct dhd_info *dhd, int val, int force)
  2050. {
  2051. dhd_pub_t *dhdp = &dhd->pub;
  2052. int ret = 0;
  2053. DHD_OS_WAKE_LOCK(dhdp);
  2054. DHD_PERIM_LOCK(dhdp);
  2055. /* Set flag when early suspend was called */
  2056. dhdp->in_suspend = val;
  2057. if ((force || !dhdp->suspend_disable_flag) &&
  2058. dhd_support_sta_mode(dhdp))
  2059. {
  2060. ret = dhd_set_suspend(val, dhdp);
  2061. }
  2062. DHD_PERIM_UNLOCK(dhdp);
  2063. DHD_OS_WAKE_UNLOCK(dhdp);
  2064. return ret;
  2065. }
  2066. #if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
  2067. static void dhd_early_suspend(struct early_suspend *h)
  2068. {
  2069. struct dhd_info *dhd = container_of(h, struct dhd_info, early_suspend);
  2070. DHD_TRACE_HW4(("%s: enter\n", __FUNCTION__));
  2071. if (dhd)
  2072. dhd_suspend_resume_helper(dhd, 1, 0);
  2073. }
  2074. static void dhd_late_resume(struct early_suspend *h)
  2075. {
  2076. struct dhd_info *dhd = container_of(h, struct dhd_info, early_suspend);
  2077. DHD_TRACE_HW4(("%s: enter\n", __FUNCTION__));
  2078. if (dhd)
  2079. dhd_suspend_resume_helper(dhd, 0, 0);
  2080. }
  2081. #endif /* CONFIG_HAS_EARLYSUSPEND && DHD_USE_EARLYSUSPEND */
  2082. /*
  2083. * Generalized timeout mechanism. Uses spin sleep with exponential back-off until
  2084. * the sleep time reaches one jiffy, then switches over to task delay. Usage:
  2085. *
  2086. * dhd_timeout_start(&tmo, usec);
  2087. * while (!dhd_timeout_expired(&tmo))
  2088. * if (poll_something())
  2089. * break;
  2090. * if (dhd_timeout_expired(&tmo))
  2091. * fatal();
  2092. */
  2093. void
  2094. dhd_timeout_start(dhd_timeout_t *tmo, uint usec)
  2095. {
  2096. tmo->limit = usec;
  2097. tmo->increment = 0;
  2098. tmo->elapsed = 0;
  2099. tmo->tick = jiffies_to_usecs(1);
  2100. }
  2101. int
  2102. dhd_timeout_expired(dhd_timeout_t *tmo)
  2103. {
  2104. /* Does nothing the first call */
  2105. if (tmo->increment == 0) {
  2106. tmo->increment = 1;
  2107. return 0;
  2108. }
  2109. if (tmo->elapsed >= tmo->limit)
  2110. return 1;
  2111. /* Add the delay that's about to take place */
  2112. tmo->elapsed += tmo->increment;
  2113. if ((!CAN_SLEEP()) || tmo->increment < tmo->tick) {
  2114. OSL_DELAY(tmo->increment);
  2115. tmo->increment *= 2;
  2116. if (tmo->increment > tmo->tick)
  2117. tmo->increment = tmo->tick;
  2118. } else {
  2119. /*
  2120. * OSL_SLEEP() is corresponding to usleep_range(). In non-atomic
  2121. * context where the exact wakeup time is flexible, it would be good
  2122. * to use usleep_range() instead of udelay(). It takes a few advantages
  2123. * such as improving responsiveness and reducing power.
  2124. */
  2125. OSL_SLEEP(jiffies_to_msecs(1));
  2126. }
  2127. return 0;
  2128. }
  2129. int
  2130. dhd_net2idx(dhd_info_t *dhd, struct net_device *net)
  2131. {
  2132. int i = 0;
  2133. if (!dhd) {
  2134. DHD_ERROR(("%s : DHD_BAD_IF return\n", __FUNCTION__));
  2135. return DHD_BAD_IF;
  2136. }
  2137. while (i < DHD_MAX_IFS) {
  2138. if (dhd->iflist[i] && dhd->iflist[i]->net && (dhd->iflist[i]->net == net))
  2139. return i;
  2140. i++;
  2141. }
  2142. return DHD_BAD_IF;
  2143. }
  2144. struct net_device * dhd_idx2net(void *pub, int ifidx)
  2145. {
  2146. struct dhd_pub *dhd_pub = (struct dhd_pub *)pub;
  2147. struct dhd_info *dhd_info;
  2148. if (!dhd_pub || ifidx < 0 || ifidx >= DHD_MAX_IFS)
  2149. return NULL;
  2150. dhd_info = dhd_pub->info;
  2151. if (dhd_info && dhd_info->iflist[ifidx])
  2152. return dhd_info->iflist[ifidx]->net;
  2153. return NULL;
  2154. }
  2155. int
  2156. dhd_ifname2idx(dhd_info_t *dhd, char *name)
  2157. {
  2158. int i = DHD_MAX_IFS;
  2159. ASSERT(dhd);
  2160. if (name == NULL || *name == '\0')
  2161. return 0;
  2162. while (--i > 0)
  2163. if (dhd->iflist[i] && !strncmp(dhd->iflist[i]->dngl_name, name, IFNAMSIZ))
  2164. break;
  2165. DHD_TRACE(("%s: return idx %d for \"%s\"\n", __FUNCTION__, i, name));
  2166. return i; /* default - the primary interface */
  2167. }
  2168. char *
  2169. dhd_ifname(dhd_pub_t *dhdp, int ifidx)
  2170. {
  2171. dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
  2172. ASSERT(dhd);
  2173. if (ifidx < 0 || ifidx >= DHD_MAX_IFS) {
  2174. DHD_ERROR(("%s: ifidx %d out of range\n", __FUNCTION__, ifidx));
  2175. return "<if_bad>";
  2176. }
  2177. if (dhd->iflist[ifidx] == NULL) {
  2178. DHD_ERROR(("%s: null i/f %d\n", __FUNCTION__, ifidx));
  2179. return "<if_null>";
  2180. }
  2181. if (dhd->iflist[ifidx]->net)
  2182. return dhd->iflist[ifidx]->net->name;
  2183. return "<if_none>";
  2184. }
  2185. uint8 *
  2186. dhd_bssidx2bssid(dhd_pub_t *dhdp, int idx)
  2187. {
  2188. int i;
  2189. dhd_info_t *dhd = (dhd_info_t *)dhdp;
  2190. ASSERT(dhd);
  2191. for (i = 0; i < DHD_MAX_IFS; i++)
  2192. if (dhd->iflist[i] && dhd->iflist[i]->bssidx == idx)
  2193. return dhd->iflist[i]->mac_addr;
  2194. return NULL;
  2195. }
  2196. static void
  2197. _dhd_set_multicast_list(dhd_info_t *dhd, int ifidx)
  2198. {
  2199. struct net_device *dev;
  2200. struct netdev_hw_addr *ha;
  2201. uint32 allmulti, cnt;
  2202. wl_ioctl_t ioc;
  2203. char *buf, *bufp;
  2204. uint buflen;
  2205. int ret;
  2206. #ifdef MCAST_LIST_ACCUMULATION
  2207. int i;
  2208. uint32 cnt_iface[DHD_MAX_IFS];
  2209. cnt = 0;
  2210. allmulti = 0;
  2211. for (i = 0; i < DHD_MAX_IFS; i++) {
  2212. if (dhd->iflist[i]) {
  2213. dev = dhd->iflist[i]->net;
  2214. if (!dev)
  2215. continue;
  2216. netif_addr_lock_bh(dev);
  2217. cnt_iface[i] = netdev_mc_count(dev);
  2218. cnt += cnt_iface[i];
  2219. netif_addr_unlock_bh(dev);
  2220. /* Determine initial value of allmulti flag */
  2221. allmulti |= (dev->flags & IFF_ALLMULTI) ? TRUE : FALSE;
  2222. }
  2223. }
  2224. #else /* !MCAST_LIST_ACCUMULATION */
  2225. if (!dhd->iflist[ifidx]) {
  2226. DHD_ERROR(("%s : dhd->iflist[%d] was NULL\n", __FUNCTION__, ifidx));
  2227. return;
  2228. }
  2229. dev = dhd->iflist[ifidx]->net;
  2230. if (!dev)
  2231. return;
  2232. netif_addr_lock_bh(dev);
  2233. cnt = netdev_mc_count(dev);
  2234. netif_addr_unlock_bh(dev);
  2235. /* Determine initial value of allmulti flag */
  2236. allmulti = (dev->flags & IFF_ALLMULTI) ? TRUE : FALSE;
  2237. #endif /* MCAST_LIST_ACCUMULATION */
  2238. #ifdef PASS_ALL_MCAST_PKTS
  2239. #ifdef PKT_FILTER_SUPPORT
  2240. if (!dhd->pub.early_suspended)
  2241. #endif /* PKT_FILTER_SUPPORT */
  2242. allmulti = TRUE;
  2243. #endif /* PASS_ALL_MCAST_PKTS */
  2244. /* Send down the multicast list first. */
  2245. buflen = sizeof("mcast_list") + sizeof(cnt) + (cnt * ETHER_ADDR_LEN);
  2246. if (!(bufp = buf = MALLOC(dhd->pub.osh, buflen))) {
  2247. DHD_ERROR(("%s: out of memory for mcast_list, cnt %d\n",
  2248. dhd_ifname(&dhd->pub, ifidx), cnt));
  2249. return;
  2250. }
  2251. strncpy(bufp, "mcast_list", buflen - 1);
  2252. bufp[buflen - 1] = '\0';
  2253. bufp += strlen("mcast_list") + 1;
  2254. cnt = htol32(cnt);
  2255. memcpy(bufp, &cnt, sizeof(cnt));
  2256. bufp += sizeof(cnt);
  2257. #ifdef MCAST_LIST_ACCUMULATION
  2258. for (i = 0; i < DHD_MAX_IFS; i++) {
  2259. if (dhd->iflist[i]) {
  2260. DHD_TRACE(("_dhd_set_multicast_list: ifidx %d\n", i));
  2261. dev = dhd->iflist[i]->net;
  2262. netif_addr_lock_bh(dev);
  2263. #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
  2264. #pragma GCC diagnostic push
  2265. #pragma GCC diagnostic ignored "-Wcast-qual"
  2266. #endif // endif
  2267. netdev_for_each_mc_addr(ha, dev) {
  2268. #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
  2269. #pragma GCC diagnostic pop
  2270. #endif // endif
  2271. if (!cnt_iface[i])
  2272. break;
  2273. memcpy(bufp, ha->addr, ETHER_ADDR_LEN);
  2274. bufp += ETHER_ADDR_LEN;
  2275. DHD_TRACE(("_dhd_set_multicast_list: cnt "
  2276. "%d " MACDBG "\n",
  2277. cnt_iface[i], MAC2STRDBG(ha->addr)));
  2278. cnt_iface[i]--;
  2279. }
  2280. netif_addr_unlock_bh(dev);
  2281. }
  2282. }
  2283. #else /* !MCAST_LIST_ACCUMULATION */
  2284. netif_addr_lock_bh(dev);
  2285. #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
  2286. #pragma GCC diagnostic push
  2287. #pragma GCC diagnostic ignored "-Wcast-qual"
  2288. #endif // endif
  2289. netdev_for_each_mc_addr(ha, dev) {
  2290. if (!cnt)
  2291. break;
  2292. memcpy(bufp, ha->addr, ETHER_ADDR_LEN);
  2293. bufp += ETHER_ADDR_LEN;
  2294. cnt--;
  2295. }
  2296. #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
  2297. #pragma GCC diagnostic pop
  2298. #endif // endif
  2299. netif_addr_unlock_bh(dev);
  2300. #endif /* MCAST_LIST_ACCUMULATION */
  2301. memset(&ioc, 0, sizeof(ioc));
  2302. ioc.cmd = WLC_SET_VAR;
  2303. ioc.buf = buf;
  2304. ioc.len = buflen;
  2305. ioc.set = TRUE;
  2306. ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len);
  2307. if (ret < 0) {
  2308. DHD_ERROR(("%s: set mcast_list failed, cnt %d\n",
  2309. dhd_ifname(&dhd->pub, ifidx), cnt));
  2310. allmulti = cnt ? TRUE : allmulti;
  2311. }
  2312. MFREE(dhd->pub.osh, buf, buflen);
  2313. /* Now send the allmulti setting. This is based on the setting in the
  2314. * net_device flags, but might be modified above to be turned on if we
  2315. * were trying to set some addresses and dongle rejected it...
  2316. */
  2317. allmulti = htol32(allmulti);
  2318. ret = dhd_iovar(&dhd->pub, ifidx, "allmulti", (char *)&allmulti,
  2319. sizeof(allmulti), NULL, 0, TRUE);
  2320. if (ret < 0) {
  2321. DHD_ERROR(("%s: set allmulti %d failed\n",
  2322. dhd_ifname(&dhd->pub, ifidx), ltoh32(allmulti)));
  2323. }
  2324. /* Finally, pick up the PROMISC flag as well, like the NIC driver does */
  2325. #ifdef MCAST_LIST_ACCUMULATION
  2326. allmulti = 0;
  2327. for (i = 0; i < DHD_MAX_IFS; i++) {
  2328. if (dhd->iflist[i]) {
  2329. dev = dhd->iflist[i]->net;
  2330. allmulti |= (dev->flags & IFF_PROMISC) ? TRUE : FALSE;
  2331. }
  2332. }
  2333. #else
  2334. allmulti = (dev->flags & IFF_PROMISC) ? TRUE : FALSE;
  2335. #endif /* MCAST_LIST_ACCUMULATION */
  2336. allmulti = htol32(allmulti);
  2337. memset(&ioc, 0, sizeof(ioc));
  2338. ioc.cmd = WLC_SET_PROMISC;
  2339. ioc.buf = &allmulti;
  2340. ioc.len = sizeof(allmulti);
  2341. ioc.set = TRUE;
  2342. ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len);
  2343. if (ret < 0) {
  2344. DHD_ERROR(("%s: set promisc %d failed\n",
  2345. dhd_ifname(&dhd->pub, ifidx), ltoh32(allmulti)));
  2346. }
  2347. }
  2348. int
  2349. _dhd_set_mac_address(dhd_info_t *dhd, int ifidx, uint8 *addr)
  2350. {
  2351. int ret;
  2352. ret = dhd_iovar(&dhd->pub, ifidx, "cur_etheraddr", (char *)addr,
  2353. ETHER_ADDR_LEN, NULL, 0, TRUE);
  2354. if (ret < 0) {
  2355. DHD_ERROR(("%s: set cur_etheraddr failed\n", dhd_ifname(&dhd->pub, ifidx)));
  2356. } else {
  2357. memcpy(dhd->iflist[ifidx]->net->dev_addr, addr, ETHER_ADDR_LEN);
  2358. if (ifidx == 0)
  2359. memcpy(dhd->pub.mac.octet, addr, ETHER_ADDR_LEN);
  2360. }
  2361. return ret;
  2362. }
  2363. #ifdef SOFTAP
  2364. extern struct net_device *ap_net_dev;
  2365. extern tsk_ctl_t ap_eth_ctl; /* ap netdev heper thread ctl */
  2366. #endif // endif
  2367. #ifdef DHD_PSTA
  2368. /* Get psta/psr configuration configuration */
  2369. int dhd_get_psta_mode(dhd_pub_t *dhdp)
  2370. {
  2371. dhd_info_t *dhd = dhdp->info;
  2372. return (int)dhd->psta_mode;
  2373. }
  2374. /* Set psta/psr configuration configuration */
  2375. int dhd_set_psta_mode(dhd_pub_t *dhdp, uint32 val)
  2376. {
  2377. dhd_info_t *dhd = dhdp->info;
  2378. dhd->psta_mode = val;
  2379. return 0;
  2380. }
  2381. #endif /* DHD_PSTA */
  2382. #if (defined(DHD_WET) || defined(DHD_MCAST_REGEN) || defined(DHD_L2_FILTER))
  2383. static void
  2384. dhd_update_rx_pkt_chainable_state(dhd_pub_t* dhdp, uint32 idx)
  2385. {
  2386. dhd_info_t *dhd = dhdp->info;
  2387. dhd_if_t *ifp;
  2388. ASSERT(idx < DHD_MAX_IFS);
  2389. ifp = dhd->iflist[idx];
  2390. if (
  2391. #ifdef DHD_L2_FILTER
  2392. (ifp->block_ping) ||
  2393. #endif // endif
  2394. #ifdef DHD_WET
  2395. (dhd->wet_mode) ||
  2396. #endif // endif
  2397. #ifdef DHD_MCAST_REGEN
  2398. (ifp->mcast_regen_bss_enable) ||
  2399. #endif // endif
  2400. FALSE) {
  2401. ifp->rx_pkt_chainable = FALSE;
  2402. }
  2403. }
  2404. #endif /* DHD_WET || DHD_MCAST_REGEN || DHD_L2_FILTER */
  2405. #ifdef DHD_WET
  2406. /* Get wet configuration configuration */
  2407. int dhd_get_wet_mode(dhd_pub_t *dhdp)
  2408. {
  2409. dhd_info_t *dhd = dhdp->info;
  2410. return (int)dhd->wet_mode;
  2411. }
  2412. /* Set wet configuration configuration */
  2413. int dhd_set_wet_mode(dhd_pub_t *dhdp, uint32 val)
  2414. {
  2415. dhd_info_t *dhd = dhdp->info;
  2416. dhd->wet_mode = val;
  2417. dhd_update_rx_pkt_chainable_state(dhdp, 0);
  2418. return 0;
  2419. }
  2420. #endif /* DHD_WET */
  2421. #if defined(WL_CFG80211) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
  2422. int32 dhd_role_to_nl80211_iftype(int32 role)
  2423. {
  2424. switch (role) {
  2425. case WLC_E_IF_ROLE_STA:
  2426. return NL80211_IFTYPE_STATION;
  2427. case WLC_E_IF_ROLE_AP:
  2428. return NL80211_IFTYPE_AP;
  2429. case WLC_E_IF_ROLE_WDS:
  2430. return NL80211_IFTYPE_WDS;
  2431. case WLC_E_IF_ROLE_P2P_GO:
  2432. return NL80211_IFTYPE_P2P_GO;
  2433. case WLC_E_IF_ROLE_P2P_CLIENT:
  2434. return NL80211_IFTYPE_P2P_CLIENT;
  2435. case WLC_E_IF_ROLE_IBSS:
  2436. case WLC_E_IF_ROLE_NAN:
  2437. return NL80211_IFTYPE_ADHOC;
  2438. default:
  2439. return NL80211_IFTYPE_UNSPECIFIED;
  2440. }
  2441. }
  2442. #endif /* WL_CFG80211 && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
  2443. static void
  2444. dhd_ifadd_event_handler(void *handle, void *event_info, u8 event)
  2445. {
  2446. dhd_info_t *dhd = handle;
  2447. dhd_if_event_t *if_event = event_info;
  2448. int ifidx, bssidx;
  2449. int ret;
  2450. #if defined(WL_CFG80211) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
  2451. struct wl_if_event_info info;
  2452. #else
  2453. struct net_device *ndev;
  2454. #endif /* WL_CFG80211 && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
  2455. BCM_REFERENCE(ret);
  2456. if (event != DHD_WQ_WORK_IF_ADD) {
  2457. DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
  2458. return;
  2459. }
  2460. if (!dhd) {
  2461. DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
  2462. return;
  2463. }
  2464. if (!if_event) {
  2465. DHD_ERROR(("%s: event data is null \n", __FUNCTION__));
  2466. return;
  2467. }
  2468. dhd_net_if_lock_local(dhd);
  2469. DHD_OS_WAKE_LOCK(&dhd->pub);
  2470. DHD_PERIM_LOCK(&dhd->pub);
  2471. ifidx = if_event->event.ifidx;
  2472. bssidx = if_event->event.bssidx;
  2473. DHD_TRACE(("%s: registering if with ifidx %d\n", __FUNCTION__, ifidx));
  2474. #if defined(WL_CFG80211) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
  2475. if (if_event->event.ifidx > 0) {
  2476. u8 *mac_addr;
  2477. bzero(&info, sizeof(info));
  2478. info.ifidx = ifidx;
  2479. info.bssidx = bssidx;
  2480. info.role = if_event->event.role;
  2481. strncpy(info.name, if_event->name, IFNAMSIZ);
  2482. if (is_valid_ether_addr(if_event->mac)) {
  2483. mac_addr = if_event->mac;
  2484. } else {
  2485. mac_addr = NULL;
  2486. }
  2487. if (wl_cfg80211_post_ifcreate(dhd->pub.info->iflist[0]->net,
  2488. &info, mac_addr, NULL, true) == NULL) {
  2489. /* Do the post interface create ops */
  2490. DHD_ERROR(("Post ifcreate ops failed. Returning \n"));
  2491. goto done;
  2492. }
  2493. }
  2494. #else
  2495. /* This path is for non-android case */
  2496. /* The interface name in host and in event msg are same */
  2497. /* if name in event msg is used to create dongle if list on host */
  2498. ndev = dhd_allocate_if(&dhd->pub, ifidx, if_event->name,
  2499. if_event->mac, bssidx, TRUE, if_event->name);
  2500. if (!ndev) {
  2501. DHD_ERROR(("%s: net device alloc failed \n", __FUNCTION__));
  2502. goto done;
  2503. }
  2504. DHD_PERIM_UNLOCK(&dhd->pub);
  2505. ret = dhd_register_if(&dhd->pub, ifidx, TRUE);
  2506. DHD_PERIM_LOCK(&dhd->pub);
  2507. if (ret != BCME_OK) {
  2508. DHD_ERROR(("%s: dhd_register_if failed\n", __FUNCTION__));
  2509. dhd_remove_if(&dhd->pub, ifidx, TRUE);
  2510. goto done;
  2511. }
  2512. #endif /* WL_CFG80211 && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
  2513. #ifndef PCIE_FULL_DONGLE
  2514. /* Turn on AP isolation in the firmware for interfaces operating in AP mode */
  2515. if (FW_SUPPORTED((&dhd->pub), ap) && (if_event->event.role != WLC_E_IF_ROLE_STA)) {
  2516. uint32 var_int = 1;
  2517. ret = dhd_iovar(&dhd->pub, ifidx, "ap_isolate", (char *)&var_int, sizeof(var_int),
  2518. NULL, 0, TRUE);
  2519. if (ret != BCME_OK) {
  2520. DHD_ERROR(("%s: Failed to set ap_isolate to dongle\n", __FUNCTION__));
  2521. dhd_remove_if(&dhd->pub, ifidx, TRUE);
  2522. }
  2523. }
  2524. #endif /* PCIE_FULL_DONGLE */
  2525. done:
  2526. MFREE(dhd->pub.osh, if_event, sizeof(dhd_if_event_t));
  2527. DHD_PERIM_UNLOCK(&dhd->pub);
  2528. DHD_OS_WAKE_UNLOCK(&dhd->pub);
  2529. dhd_net_if_unlock_local(dhd);
  2530. }
  2531. static void
  2532. dhd_ifdel_event_handler(void *handle, void *event_info, u8 event)
  2533. {
  2534. dhd_info_t *dhd = handle;
  2535. int ifidx;
  2536. dhd_if_event_t *if_event = event_info;
  2537. if (event != DHD_WQ_WORK_IF_DEL) {
  2538. DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
  2539. return;
  2540. }
  2541. if (!dhd) {
  2542. DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
  2543. return;
  2544. }
  2545. if (!if_event) {
  2546. DHD_ERROR(("%s: event data is null \n", __FUNCTION__));
  2547. return;
  2548. }
  2549. dhd_net_if_lock_local(dhd);
  2550. DHD_OS_WAKE_LOCK(&dhd->pub);
  2551. DHD_PERIM_LOCK(&dhd->pub);
  2552. ifidx = if_event->event.ifidx;
  2553. DHD_TRACE(("Removing interface with idx %d\n", ifidx));
  2554. DHD_PERIM_UNLOCK(&dhd->pub);
  2555. if (!dhd->pub.info->iflist[ifidx]) {
  2556. /* No matching netdev found */
  2557. DHD_ERROR(("Netdev not found! Do nothing.\n"));
  2558. goto done;
  2559. }
  2560. #if defined(WL_CFG80211) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
  2561. if (if_event->event.ifidx > 0) {
  2562. /* Do the post interface del ops */
  2563. if (wl_cfg80211_post_ifdel(dhd->pub.info->iflist[ifidx]->net,
  2564. true, if_event->event.ifidx) != 0) {
  2565. DHD_TRACE(("Post ifdel ops failed. Returning \n"));
  2566. goto done;
  2567. }
  2568. }
  2569. #else
  2570. /* For non-cfg80211 drivers */
  2571. dhd_remove_if(&dhd->pub, ifidx, TRUE);
  2572. #endif /* WL_CFG80211 && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
  2573. done:
  2574. DHD_PERIM_LOCK(&dhd->pub);
  2575. MFREE(dhd->pub.osh, if_event, sizeof(dhd_if_event_t));
  2576. DHD_PERIM_UNLOCK(&dhd->pub);
  2577. DHD_OS_WAKE_UNLOCK(&dhd->pub);
  2578. dhd_net_if_unlock_local(dhd);
  2579. }
  2580. static void
  2581. dhd_set_mac_addr_handler(void *handle, void *event_info, u8 event)
  2582. {
  2583. dhd_info_t *dhd = handle;
  2584. dhd_if_t *ifp = event_info;
  2585. if (event != DHD_WQ_WORK_SET_MAC) {
  2586. DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
  2587. }
  2588. if (!dhd) {
  2589. DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
  2590. return;
  2591. }
  2592. dhd_net_if_lock_local(dhd);
  2593. DHD_OS_WAKE_LOCK(&dhd->pub);
  2594. DHD_PERIM_LOCK(&dhd->pub);
  2595. #ifdef SOFTAP
  2596. {
  2597. unsigned long flags;
  2598. bool in_ap = FALSE;
  2599. DHD_GENERAL_LOCK(&dhd->pub, flags);
  2600. in_ap = (ap_net_dev != NULL);
  2601. DHD_GENERAL_UNLOCK(&dhd->pub, flags);
  2602. if (in_ap) {
  2603. DHD_ERROR(("attempt to set MAC for %s in AP Mode, blocked. \n",
  2604. ifp->net->name));
  2605. goto done;
  2606. }
  2607. }
  2608. #endif /* SOFTAP */
  2609. if (ifp == NULL || !dhd->pub.up) {
  2610. DHD_ERROR(("%s: interface info not available/down \n", __FUNCTION__));
  2611. goto done;
  2612. }
  2613. DHD_ERROR(("%s: MACID is overwritten\n", __FUNCTION__));
  2614. ifp->set_macaddress = FALSE;
  2615. if (_dhd_set_mac_address(dhd, ifp->idx, ifp->mac_addr) == 0)
  2616. DHD_INFO(("%s: MACID is overwritten\n", __FUNCTION__));
  2617. else
  2618. DHD_ERROR(("%s: _dhd_set_mac_address() failed\n", __FUNCTION__));
  2619. done:
  2620. DHD_PERIM_UNLOCK(&dhd->pub);
  2621. DHD_OS_WAKE_UNLOCK(&dhd->pub);
  2622. dhd_net_if_unlock_local(dhd);
  2623. }
  2624. static void
  2625. dhd_set_mcast_list_handler(void *handle, void *event_info, u8 event)
  2626. {
  2627. dhd_info_t *dhd = handle;
  2628. int ifidx = (int)((long int)event_info);
  2629. dhd_if_t *ifp = NULL;
  2630. if (event != DHD_WQ_WORK_SET_MCAST_LIST) {
  2631. DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
  2632. return;
  2633. }
  2634. if (!dhd) {
  2635. DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
  2636. return;
  2637. }
  2638. dhd_net_if_lock_local(dhd);
  2639. DHD_OS_WAKE_LOCK(&dhd->pub);
  2640. DHD_PERIM_LOCK(&dhd->pub);
  2641. ifp = dhd->iflist[ifidx];
  2642. if (ifp == NULL || !dhd->pub.up) {
  2643. DHD_ERROR(("%s: interface info not available/down \n", __FUNCTION__));
  2644. goto done;
  2645. }
  2646. #ifdef SOFTAP
  2647. {
  2648. bool in_ap = FALSE;
  2649. unsigned long flags;
  2650. DHD_GENERAL_LOCK(&dhd->pub, flags);
  2651. in_ap = (ap_net_dev != NULL);
  2652. DHD_GENERAL_UNLOCK(&dhd->pub, flags);
  2653. if (in_ap) {
  2654. DHD_ERROR(("set MULTICAST list for %s in AP Mode, blocked. \n",
  2655. ifp->net->name));
  2656. ifp->set_multicast = FALSE;
  2657. goto done;
  2658. }
  2659. }
  2660. #endif /* SOFTAP */
  2661. ifidx = ifp->idx;
  2662. #ifdef MCAST_LIST_ACCUMULATION
  2663. ifidx = 0;
  2664. #endif /* MCAST_LIST_ACCUMULATION */
  2665. _dhd_set_multicast_list(dhd, ifidx);
  2666. DHD_INFO(("%s: set multicast list for if %d\n", __FUNCTION__, ifidx));
  2667. done:
  2668. DHD_PERIM_UNLOCK(&dhd->pub);
  2669. DHD_OS_WAKE_UNLOCK(&dhd->pub);
  2670. dhd_net_if_unlock_local(dhd);
  2671. }
  2672. static int
  2673. dhd_set_mac_address(struct net_device *dev, void *addr)
  2674. {
  2675. int ret = 0;
  2676. dhd_info_t *dhd = DHD_DEV_INFO(dev);
  2677. struct sockaddr *sa = (struct sockaddr *)addr;
  2678. int ifidx;
  2679. dhd_if_t *dhdif;
  2680. ifidx = dhd_net2idx(dhd, dev);
  2681. if (ifidx == DHD_BAD_IF)
  2682. return -1;
  2683. dhdif = dhd->iflist[ifidx];
  2684. dhd_net_if_lock_local(dhd);
  2685. memcpy(dhdif->mac_addr, sa->sa_data, ETHER_ADDR_LEN);
  2686. dhdif->set_macaddress = TRUE;
  2687. dhd_net_if_unlock_local(dhd);
  2688. dhd_deferred_schedule_work(dhd->dhd_deferred_wq, (void *)dhdif, DHD_WQ_WORK_SET_MAC,
  2689. dhd_set_mac_addr_handler, DHD_WQ_WORK_PRIORITY_LOW);
  2690. return ret;
  2691. }
  2692. static void
  2693. dhd_set_multicast_list(struct net_device *dev)
  2694. {
  2695. dhd_info_t *dhd = DHD_DEV_INFO(dev);
  2696. int ifidx;
  2697. ifidx = dhd_net2idx(dhd, dev);
  2698. if (ifidx == DHD_BAD_IF)
  2699. return;
  2700. dhd->iflist[ifidx]->set_multicast = TRUE;
  2701. dhd_deferred_schedule_work(dhd->dhd_deferred_wq, (void *)((long int)ifidx),
  2702. DHD_WQ_WORK_SET_MCAST_LIST, dhd_set_mcast_list_handler, DHD_WQ_WORK_PRIORITY_LOW);
  2703. }
  2704. #ifdef DHD_UCODE_DOWNLOAD
  2705. /* Get ucode path */
  2706. char *
  2707. dhd_get_ucode_path(dhd_pub_t *dhdp)
  2708. {
  2709. dhd_info_t *dhd = dhdp->info;
  2710. return dhd->uc_path;
  2711. }
  2712. #endif /* DHD_UCODE_DOWNLOAD */
  2713. #ifdef PROP_TXSTATUS
  2714. int
  2715. dhd_os_wlfc_block(dhd_pub_t *pub)
  2716. {
  2717. dhd_info_t *di = (dhd_info_t *)(pub->info);
  2718. ASSERT(di != NULL);
  2719. spin_lock_bh(&di->wlfc_spinlock);
  2720. return 1;
  2721. }
  2722. int
  2723. dhd_os_wlfc_unblock(dhd_pub_t *pub)
  2724. {
  2725. dhd_info_t *di = (dhd_info_t *)(pub->info);
  2726. ASSERT(di != NULL);
  2727. spin_unlock_bh(&di->wlfc_spinlock);
  2728. return 1;
  2729. }
  2730. #endif /* PROP_TXSTATUS */
  2731. /* This routine do not support Packet chain feature, Currently tested for
  2732. * proxy arp feature
  2733. */
  2734. int dhd_sendup(dhd_pub_t *dhdp, int ifidx, void *p)
  2735. {
  2736. struct sk_buff *skb;
  2737. void *skbhead = NULL;
  2738. void *skbprev = NULL;
  2739. dhd_if_t *ifp;
  2740. ASSERT(!PKTISCHAINED(p));
  2741. skb = PKTTONATIVE(dhdp->osh, p);
  2742. ifp = dhdp->info->iflist[ifidx];
  2743. skb->dev = ifp->net;
  2744. skb->protocol = eth_type_trans(skb, skb->dev);
  2745. if (in_interrupt()) {
  2746. bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE,
  2747. __FUNCTION__, __LINE__);
  2748. netif_rx(skb);
  2749. } else {
  2750. if (dhdp->info->rxthread_enabled) {
  2751. if (!skbhead) {
  2752. skbhead = skb;
  2753. } else {
  2754. PKTSETNEXT(dhdp->osh, skbprev, skb);
  2755. }
  2756. skbprev = skb;
  2757. } else {
  2758. /* If the receive is not processed inside an ISR,
  2759. * the softirqd must be woken explicitly to service
  2760. * the NET_RX_SOFTIRQ. In 2.6 kernels, this is handled
  2761. * by netif_rx_ni(), but in earlier kernels, we need
  2762. * to do it manually.
  2763. */
  2764. bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE,
  2765. __FUNCTION__, __LINE__);
  2766. netif_rx_ni(skb);
  2767. }
  2768. }
  2769. if (dhdp->info->rxthread_enabled && skbhead)
  2770. dhd_sched_rxf(dhdp, skbhead);
  2771. return BCME_OK;
  2772. }
  2773. int BCMFASTPATH
  2774. __dhd_sendpkt(dhd_pub_t *dhdp, int ifidx, void *pktbuf)
  2775. {
  2776. int ret = BCME_OK;
  2777. dhd_info_t *dhd = (dhd_info_t *)(dhdp->info);
  2778. struct ether_header *eh = NULL;
  2779. bool pkt_ether_type_802_1x = FALSE;
  2780. uint8 pkt_flow_prio;
  2781. #if defined(DHD_L2_FILTER)
  2782. dhd_if_t *ifp = dhd_get_ifp(dhdp, ifidx);
  2783. #endif // endif
  2784. /* Reject if down */
  2785. if (!dhdp->up || (dhdp->busstate == DHD_BUS_DOWN)) {
  2786. /* free the packet here since the caller won't */
  2787. PKTCFREE(dhdp->osh, pktbuf, TRUE);
  2788. return -ENODEV;
  2789. }
  2790. #ifdef PCIE_FULL_DONGLE
  2791. if (dhdp->busstate == DHD_BUS_SUSPEND) {
  2792. DHD_ERROR(("%s : pcie is still in suspend state!!\n", __FUNCTION__));
  2793. PKTCFREE(dhdp->osh, pktbuf, TRUE);
  2794. return NETDEV_TX_BUSY;
  2795. }
  2796. #endif /* PCIE_FULL_DONGLE */
  2797. /* Reject if pktlen > MAX_MTU_SZ */
  2798. if (PKTLEN(dhdp->osh, pktbuf) > MAX_MTU_SZ) {
  2799. /* free the packet here since the caller won't */
  2800. dhdp->tx_big_packets++;
  2801. PKTCFREE(dhdp->osh, pktbuf, TRUE);
  2802. return BCME_ERROR;
  2803. }
  2804. #ifdef DHD_L2_FILTER
  2805. /* if dhcp_unicast is enabled, we need to convert the */
  2806. /* broadcast DHCP ACK/REPLY packets to Unicast. */
  2807. if (ifp->dhcp_unicast) {
  2808. uint8* mac_addr;
  2809. uint8* ehptr = NULL;
  2810. int ret;
  2811. ret = bcm_l2_filter_get_mac_addr_dhcp_pkt(dhdp->osh, pktbuf, ifidx, &mac_addr);
  2812. if (ret == BCME_OK) {
  2813. /* if given mac address having valid entry in sta list
  2814. * copy the given mac address, and return with BCME_OK
  2815. */
  2816. if (dhd_find_sta(dhdp, ifidx, mac_addr)) {
  2817. ehptr = PKTDATA(dhdp->osh, pktbuf);
  2818. bcopy(mac_addr, ehptr + ETHER_DEST_OFFSET, ETHER_ADDR_LEN);
  2819. }
  2820. }
  2821. }
  2822. if (ifp->grat_arp && DHD_IF_ROLE_AP(dhdp, ifidx)) {
  2823. if (bcm_l2_filter_gratuitous_arp(dhdp->osh, pktbuf) == BCME_OK) {
  2824. PKTCFREE(dhdp->osh, pktbuf, TRUE);
  2825. return BCME_ERROR;
  2826. }
  2827. }
  2828. if (ifp->parp_enable && DHD_IF_ROLE_AP(dhdp, ifidx)) {
  2829. ret = dhd_l2_filter_pkt_handle(dhdp, ifidx, pktbuf, TRUE);
  2830. /* Drop the packets if l2 filter has processed it already
  2831. * otherwise continue with the normal path
  2832. */
  2833. if (ret == BCME_OK) {
  2834. PKTCFREE(dhdp->osh, pktbuf, TRUE);
  2835. return BCME_ERROR;
  2836. }
  2837. }
  2838. #endif /* DHD_L2_FILTER */
  2839. /* Update multicast statistic */
  2840. if (PKTLEN(dhdp->osh, pktbuf) >= ETHER_HDR_LEN) {
  2841. uint8 *pktdata = (uint8 *)PKTDATA(dhdp->osh, pktbuf);
  2842. eh = (struct ether_header *)pktdata;
  2843. if (ETHER_ISMULTI(eh->ether_dhost))
  2844. dhdp->tx_multicast++;
  2845. if (ntoh16(eh->ether_type) == ETHER_TYPE_802_1X) {
  2846. #ifdef DHD_LOSSLESS_ROAMING
  2847. uint8 prio = (uint8)PKTPRIO(pktbuf);
  2848. /* back up 802.1x's priority */
  2849. dhdp->prio_8021x = prio;
  2850. #endif /* DHD_LOSSLESS_ROAMING */
  2851. pkt_ether_type_802_1x = TRUE;
  2852. DBG_EVENT_LOG(dhdp, WIFI_EVENT_DRIVER_EAPOL_FRAME_TRANSMIT_REQUESTED);
  2853. atomic_inc(&dhd->pend_8021x_cnt);
  2854. #if defined(WL_CFG80211) && defined(WL_WPS_SYNC)
  2855. wl_handle_wps_states(dhd_idx2net(dhdp, ifidx),
  2856. pktdata, PKTLEN(dhdp->osh, pktbuf), TRUE);
  2857. #endif /* WL_CFG80211 && WL_WPS_SYNC */
  2858. }
  2859. dhd_dump_pkt(dhdp, ifidx, pktdata,
  2860. (uint32)PKTLEN(dhdp->osh, pktbuf), TRUE, NULL, NULL);
  2861. } else {
  2862. PKTCFREE(dhdp->osh, pktbuf, TRUE);
  2863. return BCME_ERROR;
  2864. }
  2865. {
  2866. /* Look into the packet and update the packet priority */
  2867. #ifndef PKTPRIO_OVERRIDE
  2868. if (PKTPRIO(pktbuf) == 0)
  2869. #endif /* !PKTPRIO_OVERRIDE */
  2870. {
  2871. #if defined(QOS_MAP_SET)
  2872. pktsetprio_qms(pktbuf, wl_get_up_table(dhdp, ifidx), FALSE);
  2873. #else
  2874. pktsetprio(pktbuf, FALSE);
  2875. #endif /* QOS_MAP_SET */
  2876. }
  2877. #ifndef PKTPRIO_OVERRIDE
  2878. else {
  2879. /* Some protocols like OZMO use priority values from 256..263.
  2880. * these are magic values to indicate a specific 802.1d priority.
  2881. * make sure that priority field is in range of 0..7
  2882. */
  2883. PKTSETPRIO(pktbuf, PKTPRIO(pktbuf) & 0x7);
  2884. }
  2885. #endif /* !PKTPRIO_OVERRIDE */
  2886. }
  2887. BCM_REFERENCE(pkt_ether_type_802_1x);
  2888. BCM_REFERENCE(pkt_flow_prio);
  2889. #ifdef SUPPORT_SET_TID
  2890. dhd_set_tid_based_on_uid(dhdp, pktbuf);
  2891. #endif /* SUPPORT_SET_TID */
  2892. #ifdef PCIE_FULL_DONGLE
  2893. /*
  2894. * Lkup the per interface hash table, for a matching flowring. If one is not
  2895. * available, allocate a unique flowid and add a flowring entry.
  2896. * The found or newly created flowid is placed into the pktbuf's tag.
  2897. */
  2898. #ifdef DHD_LOSSLESS_ROAMING
  2899. /* For LLR override and use flowring with prio 7 for 802.1x packets */
  2900. if (pkt_ether_type_802_1x) {
  2901. pkt_flow_prio = PRIO_8021D_NC;
  2902. } else
  2903. #endif /* DHD_LOSSLESS_ROAMING */
  2904. {
  2905. pkt_flow_prio = dhdp->flow_prio_map[(PKTPRIO(pktbuf))];
  2906. }
  2907. ret = dhd_flowid_update(dhdp, ifidx, pkt_flow_prio, pktbuf);
  2908. if (ret != BCME_OK) {
  2909. if (ntoh16(eh->ether_type) == ETHER_TYPE_802_1X) {
  2910. atomic_dec(&dhd->pend_8021x_cnt);
  2911. }
  2912. PKTCFREE(dhd->pub.osh, pktbuf, TRUE);
  2913. return ret;
  2914. }
  2915. #endif /* PCIE_FULL_DONGLE */
  2916. #ifdef PROP_TXSTATUS
  2917. if (dhd_wlfc_is_supported(dhdp)) {
  2918. /* store the interface ID */
  2919. DHD_PKTTAG_SETIF(PKTTAG(pktbuf), ifidx);
  2920. /* store destination MAC in the tag as well */
  2921. DHD_PKTTAG_SETDSTN(PKTTAG(pktbuf), eh->ether_dhost);
  2922. /* decide which FIFO this packet belongs to */
  2923. if (ETHER_ISMULTI(eh->ether_dhost))
  2924. /* one additional queue index (highest AC + 1) is used for bc/mc queue */
  2925. DHD_PKTTAG_SETFIFO(PKTTAG(pktbuf), AC_COUNT);
  2926. else
  2927. DHD_PKTTAG_SETFIFO(PKTTAG(pktbuf), WME_PRIO2AC(PKTPRIO(pktbuf)));
  2928. } else
  2929. #endif /* PROP_TXSTATUS */
  2930. {
  2931. /* If the protocol uses a data header, apply it */
  2932. dhd_prot_hdrpush(dhdp, ifidx, pktbuf);
  2933. }
  2934. /* Use bus module to send data frame */
  2935. #ifdef PROP_TXSTATUS
  2936. {
  2937. if (dhd_wlfc_commit_packets(dhdp, (f_commitpkt_t)dhd_bus_txdata,
  2938. dhdp->bus, pktbuf, TRUE) == WLFC_UNSUPPORTED) {
  2939. /* non-proptxstatus way */
  2940. #ifdef BCMPCIE
  2941. ret = dhd_bus_txdata(dhdp->bus, pktbuf, (uint8)ifidx);
  2942. #else
  2943. ret = dhd_bus_txdata(dhdp->bus, pktbuf);
  2944. #endif /* BCMPCIE */
  2945. }
  2946. }
  2947. #else
  2948. #ifdef BCMPCIE
  2949. ret = dhd_bus_txdata(dhdp->bus, pktbuf, (uint8)ifidx);
  2950. #else
  2951. ret = dhd_bus_txdata(dhdp->bus, pktbuf);
  2952. #endif /* BCMPCIE */
  2953. #endif /* PROP_TXSTATUS */
  2954. return ret;
  2955. }
  2956. int BCMFASTPATH
  2957. dhd_sendpkt(dhd_pub_t *dhdp, int ifidx, void *pktbuf)
  2958. {
  2959. int ret = 0;
  2960. unsigned long flags;
  2961. dhd_if_t *ifp;
  2962. DHD_GENERAL_LOCK(dhdp, flags);
  2963. ifp = dhd_get_ifp(dhdp, ifidx);
  2964. if (!ifp || ifp->del_in_progress) {
  2965. DHD_ERROR(("%s: ifp:%p del_in_progress:%d\n",
  2966. __FUNCTION__, ifp, ifp ? ifp->del_in_progress : 0));
  2967. DHD_GENERAL_UNLOCK(dhdp, flags);
  2968. PKTCFREE(dhdp->osh, pktbuf, TRUE);
  2969. return -ENODEV;
  2970. }
  2971. if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(dhdp)) {
  2972. DHD_ERROR(("%s: returning as busstate=%d\n",
  2973. __FUNCTION__, dhdp->busstate));
  2974. DHD_GENERAL_UNLOCK(dhdp, flags);
  2975. PKTCFREE(dhdp->osh, pktbuf, TRUE);
  2976. return -ENODEV;
  2977. }
  2978. DHD_IF_SET_TX_ACTIVE(ifp, DHD_TX_SEND_PKT);
  2979. DHD_BUS_BUSY_SET_IN_SEND_PKT(dhdp);
  2980. DHD_GENERAL_UNLOCK(dhdp, flags);
  2981. #ifdef DHD_PCIE_RUNTIMEPM
  2982. if (dhdpcie_runtime_bus_wake(dhdp, FALSE, __builtin_return_address(0))) {
  2983. DHD_ERROR(("%s : pcie is still in suspend state!!\n", __FUNCTION__));
  2984. PKTCFREE(dhdp->osh, pktbuf, TRUE);
  2985. ret = -EBUSY;
  2986. goto exit;
  2987. }
  2988. #endif /* DHD_PCIE_RUNTIMEPM */
  2989. DHD_GENERAL_LOCK(dhdp, flags);
  2990. if (DHD_BUS_CHECK_SUSPEND_OR_SUSPEND_IN_PROGRESS(dhdp)) {
  2991. DHD_ERROR(("%s: bus is in suspend(%d) or suspending(0x%x) state!!\n",
  2992. __FUNCTION__, dhdp->busstate, dhdp->dhd_bus_busy_state));
  2993. DHD_BUS_BUSY_CLEAR_IN_SEND_PKT(dhdp);
  2994. DHD_IF_CLR_TX_ACTIVE(ifp, DHD_TX_SEND_PKT);
  2995. dhd_os_tx_completion_wake(dhdp);
  2996. dhd_os_busbusy_wake(dhdp);
  2997. DHD_GENERAL_UNLOCK(dhdp, flags);
  2998. PKTCFREE(dhdp->osh, pktbuf, TRUE);
  2999. return -ENODEV;
  3000. }
  3001. DHD_GENERAL_UNLOCK(dhdp, flags);
  3002. ret = __dhd_sendpkt(dhdp, ifidx, pktbuf);
  3003. #ifdef DHD_PCIE_RUNTIMEPM
  3004. exit:
  3005. #endif // endif
  3006. DHD_GENERAL_LOCK(dhdp, flags);
  3007. DHD_BUS_BUSY_CLEAR_IN_SEND_PKT(dhdp);
  3008. DHD_IF_CLR_TX_ACTIVE(ifp, DHD_TX_SEND_PKT);
  3009. dhd_os_tx_completion_wake(dhdp);
  3010. dhd_os_busbusy_wake(dhdp);
  3011. DHD_GENERAL_UNLOCK(dhdp, flags);
  3012. return ret;
  3013. }
  3014. int BCMFASTPATH
  3015. dhd_start_xmit(struct sk_buff *skb, struct net_device *net)
  3016. {
  3017. int ret;
  3018. uint datalen;
  3019. void *pktbuf;
  3020. dhd_info_t *dhd = DHD_DEV_INFO(net);
  3021. dhd_if_t *ifp = NULL;
  3022. int ifidx;
  3023. unsigned long flags;
  3024. uint8 htsfdlystat_sz = 0;
  3025. DHD_TRACE(("%s: Enter\n", __FUNCTION__));
  3026. if (dhd_query_bus_erros(&dhd->pub)) {
  3027. return -ENODEV;
  3028. }
  3029. DHD_GENERAL_LOCK(&dhd->pub, flags);
  3030. DHD_BUS_BUSY_SET_IN_TX(&dhd->pub);
  3031. DHD_GENERAL_UNLOCK(&dhd->pub, flags);
  3032. #ifdef DHD_PCIE_RUNTIMEPM
  3033. if (dhdpcie_runtime_bus_wake(&dhd->pub, FALSE, dhd_start_xmit)) {
  3034. /* In order to avoid pkt loss. Return NETDEV_TX_BUSY until run-time resumed. */
  3035. /* stop the network queue temporarily until resume done */
  3036. DHD_GENERAL_LOCK(&dhd->pub, flags);
  3037. if (!dhdpcie_is_resume_done(&dhd->pub)) {
  3038. dhd_bus_stop_queue(dhd->pub.bus);
  3039. }
  3040. DHD_BUS_BUSY_CLEAR_IN_TX(&dhd->pub);
  3041. dhd_os_busbusy_wake(&dhd->pub);
  3042. DHD_GENERAL_UNLOCK(&dhd->pub, flags);
  3043. return NETDEV_TX_BUSY;
  3044. }
  3045. #endif /* DHD_PCIE_RUNTIMEPM */
  3046. DHD_GENERAL_LOCK(&dhd->pub, flags);
  3047. if (DHD_BUS_CHECK_SUSPEND_OR_SUSPEND_IN_PROGRESS(&dhd->pub)) {
  3048. DHD_ERROR(("%s: bus is in suspend(%d) or suspending(0x%x) state!!\n",
  3049. __FUNCTION__, dhd->pub.busstate, dhd->pub.dhd_bus_busy_state));
  3050. DHD_BUS_BUSY_CLEAR_IN_TX(&dhd->pub);
  3051. #ifdef PCIE_FULL_DONGLE
  3052. /* Stop tx queues if suspend is in progress */
  3053. if (DHD_BUS_CHECK_ANY_SUSPEND_IN_PROGRESS(&dhd->pub)) {
  3054. dhd_bus_stop_queue(dhd->pub.bus);
  3055. }
  3056. #endif /* PCIE_FULL_DONGLE */
  3057. dhd_os_busbusy_wake(&dhd->pub);
  3058. DHD_GENERAL_UNLOCK(&dhd->pub, flags);
  3059. return NETDEV_TX_BUSY;
  3060. }
  3061. DHD_OS_WAKE_LOCK(&dhd->pub);
  3062. DHD_PERIM_LOCK_TRY(DHD_FWDER_UNIT(dhd), lock_taken);
  3063. #if defined(DHD_HANG_SEND_UP_TEST)
  3064. if (dhd->pub.req_hang_type == HANG_REASON_BUS_DOWN) {
  3065. DHD_ERROR(("%s: making DHD_BUS_DOWN\n", __FUNCTION__));
  3066. dhd->pub.busstate = DHD_BUS_DOWN;
  3067. }
  3068. #endif /* DHD_HANG_SEND_UP_TEST */
  3069. /* Reject if down */
  3070. if (dhd->pub.hang_was_sent || DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(&dhd->pub)) {
  3071. DHD_ERROR(("%s: xmit rejected pub.up=%d busstate=%d \n",
  3072. __FUNCTION__, dhd->pub.up, dhd->pub.busstate));
  3073. netif_stop_queue(net);
  3074. #if defined(OEM_ANDROID)
  3075. /* Send Event when bus down detected during data session */
  3076. if (dhd->pub.up && !dhd->pub.hang_was_sent) {
  3077. DHD_ERROR(("%s: Event HANG sent up\n", __FUNCTION__));
  3078. dhd->pub.hang_reason = HANG_REASON_BUS_DOWN;
  3079. net_os_send_hang_message(net);
  3080. }
  3081. #endif /* OEM_ANDROID */
  3082. DHD_BUS_BUSY_CLEAR_IN_TX(&dhd->pub);
  3083. dhd_os_busbusy_wake(&dhd->pub);
  3084. DHD_GENERAL_UNLOCK(&dhd->pub, flags);
  3085. DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd), lock_taken);
  3086. DHD_OS_WAKE_UNLOCK(&dhd->pub);
  3087. return NETDEV_TX_BUSY;
  3088. }
  3089. ifp = DHD_DEV_IFP(net);
  3090. ifidx = DHD_DEV_IFIDX(net);
  3091. if (!ifp || (ifidx == DHD_BAD_IF) ||
  3092. ifp->del_in_progress) {
  3093. DHD_ERROR(("%s: ifidx %d ifp:%p del_in_progress:%d\n",
  3094. __FUNCTION__, ifidx, ifp, (ifp ? ifp->del_in_progress : 0)));
  3095. netif_stop_queue(net);
  3096. DHD_BUS_BUSY_CLEAR_IN_TX(&dhd->pub);
  3097. dhd_os_busbusy_wake(&dhd->pub);
  3098. DHD_GENERAL_UNLOCK(&dhd->pub, flags);
  3099. DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd), lock_taken);
  3100. DHD_OS_WAKE_UNLOCK(&dhd->pub);
  3101. return NETDEV_TX_BUSY;
  3102. }
  3103. DHD_IF_SET_TX_ACTIVE(ifp, DHD_TX_START_XMIT);
  3104. DHD_GENERAL_UNLOCK(&dhd->pub, flags);
  3105. ASSERT(ifidx == dhd_net2idx(dhd, net));
  3106. ASSERT((ifp != NULL) && ((ifidx < DHD_MAX_IFS) && (ifp == dhd->iflist[ifidx])));
  3107. bcm_object_trace_opr(skb, BCM_OBJDBG_ADD_PKT, __FUNCTION__, __LINE__);
  3108. /* re-align socket buffer if "skb->data" is odd address */
  3109. if (((unsigned long)(skb->data)) & 0x1) {
  3110. unsigned char *data = skb->data;
  3111. uint32 length = skb->len;
  3112. PKTPUSH(dhd->pub.osh, skb, 1);
  3113. memmove(skb->data, data, length);
  3114. PKTSETLEN(dhd->pub.osh, skb, length);
  3115. }
  3116. datalen = PKTLEN(dhd->pub.osh, skb);
  3117. /* Make sure there's enough room for any header */
  3118. if (skb_headroom(skb) < dhd->pub.hdrlen + htsfdlystat_sz) {
  3119. struct sk_buff *skb2;
  3120. DHD_INFO(("%s: insufficient headroom\n",
  3121. dhd_ifname(&dhd->pub, ifidx)));
  3122. dhd->pub.tx_realloc++;
  3123. bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE, __FUNCTION__, __LINE__);
  3124. skb2 = skb_realloc_headroom(skb, dhd->pub.hdrlen + htsfdlystat_sz);
  3125. dev_kfree_skb(skb);
  3126. if ((skb = skb2) == NULL) {
  3127. DHD_ERROR(("%s: skb_realloc_headroom failed\n",
  3128. dhd_ifname(&dhd->pub, ifidx)));
  3129. ret = -ENOMEM;
  3130. goto done;
  3131. }
  3132. bcm_object_trace_opr(skb, BCM_OBJDBG_ADD_PKT, __FUNCTION__, __LINE__);
  3133. }
  3134. /* Convert to packet */
  3135. if (!(pktbuf = PKTFRMNATIVE(dhd->pub.osh, skb))) {
  3136. DHD_ERROR(("%s: PKTFRMNATIVE failed\n",
  3137. dhd_ifname(&dhd->pub, ifidx)));
  3138. bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE, __FUNCTION__, __LINE__);
  3139. dev_kfree_skb_any(skb);
  3140. ret = -ENOMEM;
  3141. goto done;
  3142. }
  3143. #ifdef DHD_WET
  3144. /* wet related packet proto manipulation should be done in DHD
  3145. since dongle doesn't have complete payload
  3146. */
  3147. if (WET_ENABLED(&dhd->pub) &&
  3148. (dhd_wet_send_proc(dhd->pub.wet_info, pktbuf, &pktbuf) < 0)) {
  3149. DHD_INFO(("%s:%s: wet send proc failed\n",
  3150. __FUNCTION__, dhd_ifname(&dhd->pub, ifidx)));
  3151. PKTFREE(dhd->pub.osh, pktbuf, FALSE);
  3152. ret = -EFAULT;
  3153. goto done;
  3154. }
  3155. #endif /* DHD_WET */
  3156. #ifdef DHD_PSTA
  3157. /* PSR related packet proto manipulation should be done in DHD
  3158. * since dongle doesn't have complete payload
  3159. */
  3160. if (PSR_ENABLED(&dhd->pub) &&
  3161. (dhd_psta_proc(&dhd->pub, ifidx, &pktbuf, TRUE) < 0)) {
  3162. DHD_ERROR(("%s:%s: psta send proc failed\n", __FUNCTION__,
  3163. dhd_ifname(&dhd->pub, ifidx)));
  3164. }
  3165. #endif /* DHD_PSTA */
  3166. #ifdef DHDTCPSYNC_FLOOD_BLK
  3167. if (dhd_tcpdata_get_flag(&dhd->pub, pktbuf) == FLAG_SYNCACK) {
  3168. ifp->tsyncack_txed ++;
  3169. }
  3170. #endif /* DHDTCPSYNC_FLOOD_BLK */
  3171. #ifdef DHDTCPACK_SUPPRESS
  3172. if (dhd->pub.tcpack_sup_mode == TCPACK_SUP_HOLD) {
  3173. /* If this packet has been hold or got freed, just return */
  3174. if (dhd_tcpack_hold(&dhd->pub, pktbuf, ifidx)) {
  3175. ret = 0;
  3176. goto done;
  3177. }
  3178. } else {
  3179. /* If this packet has replaced another packet and got freed, just return */
  3180. if (dhd_tcpack_suppress(&dhd->pub, pktbuf)) {
  3181. ret = 0;
  3182. goto done;
  3183. }
  3184. }
  3185. #endif /* DHDTCPACK_SUPPRESS */
  3186. /*
  3187. * If Load Balance is enabled queue the packet
  3188. * else send directly from here.
  3189. */
  3190. #if defined(DHD_LB_TXP)
  3191. ret = dhd_lb_sendpkt(dhd, net, ifidx, pktbuf);
  3192. #else
  3193. ret = __dhd_sendpkt(&dhd->pub, ifidx, pktbuf);
  3194. #endif // endif
  3195. done:
  3196. if (ret) {
  3197. ifp->stats.tx_dropped++;
  3198. dhd->pub.tx_dropped++;
  3199. } else {
  3200. #ifdef PROP_TXSTATUS
  3201. /* tx_packets counter can counted only when wlfc is disabled */
  3202. if (!dhd_wlfc_is_supported(&dhd->pub))
  3203. #endif // endif
  3204. {
  3205. dhd->pub.tx_packets++;
  3206. ifp->stats.tx_packets++;
  3207. ifp->stats.tx_bytes += datalen;
  3208. }
  3209. }
  3210. DHD_GENERAL_LOCK(&dhd->pub, flags);
  3211. DHD_BUS_BUSY_CLEAR_IN_TX(&dhd->pub);
  3212. DHD_IF_CLR_TX_ACTIVE(ifp, DHD_TX_START_XMIT);
  3213. dhd_os_tx_completion_wake(&dhd->pub);
  3214. dhd_os_busbusy_wake(&dhd->pub);
  3215. DHD_GENERAL_UNLOCK(&dhd->pub, flags);
  3216. DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd), lock_taken);
  3217. DHD_OS_WAKE_UNLOCK(&dhd->pub);
  3218. /* Return ok: we always eat the packet */
  3219. return NETDEV_TX_OK;
  3220. }
  3221. #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
  3222. void dhd_rx_wq_wakeup(struct work_struct *ptr)
  3223. {
  3224. struct dhd_rx_tx_work *work;
  3225. struct dhd_pub * pub;
  3226. work = container_of(ptr, struct dhd_rx_tx_work, work);
  3227. pub = work->pub;
  3228. DHD_RPM(("%s: ENTER. \n", __FUNCTION__));
  3229. if (atomic_read(&pub->block_bus) || pub->busstate == DHD_BUS_DOWN) {
  3230. return;
  3231. }
  3232. DHD_OS_WAKE_LOCK(pub);
  3233. if (pm_runtime_get_sync(dhd_bus_to_dev(pub->bus)) >= 0) {
  3234. // do nothing but wakeup the bus.
  3235. pm_runtime_mark_last_busy(dhd_bus_to_dev(pub->bus));
  3236. pm_runtime_put_autosuspend(dhd_bus_to_dev(pub->bus));
  3237. }
  3238. DHD_OS_WAKE_UNLOCK(pub);
  3239. kfree(work);
  3240. }
  3241. void dhd_start_xmit_wq_adapter(struct work_struct *ptr)
  3242. {
  3243. struct dhd_rx_tx_work *work;
  3244. int ret;
  3245. dhd_info_t *dhd;
  3246. struct dhd_bus * bus;
  3247. work = container_of(ptr, struct dhd_rx_tx_work, work);
  3248. dhd = DHD_DEV_INFO(work->net);
  3249. bus = dhd->pub.bus;
  3250. if (atomic_read(&dhd->pub.block_bus)) {
  3251. kfree_skb(work->skb);
  3252. kfree(work);
  3253. dhd_netif_start_queue(bus);
  3254. return;
  3255. }
  3256. if (pm_runtime_get_sync(dhd_bus_to_dev(bus)) >= 0) {
  3257. ret = dhd_start_xmit(work->skb, work->net);
  3258. pm_runtime_mark_last_busy(dhd_bus_to_dev(bus));
  3259. pm_runtime_put_autosuspend(dhd_bus_to_dev(bus));
  3260. }
  3261. kfree(work);
  3262. dhd_netif_start_queue(bus);
  3263. if (ret)
  3264. netdev_err(work->net,
  3265. "error: dhd_start_xmit():%d\n", ret);
  3266. }
  3267. int BCMFASTPATH
  3268. dhd_start_xmit_wrapper(struct sk_buff *skb, struct net_device *net)
  3269. {
  3270. struct dhd_rx_tx_work *start_xmit_work;
  3271. int ret;
  3272. dhd_info_t *dhd = DHD_DEV_INFO(net);
  3273. if (dhd->pub.busstate == DHD_BUS_SUSPEND) {
  3274. DHD_RPM(("%s: wakeup the bus using workqueue.\n", __FUNCTION__));
  3275. dhd_netif_stop_queue(dhd->pub.bus);
  3276. start_xmit_work = (struct dhd_rx_tx_work*)
  3277. kmalloc(sizeof(*start_xmit_work), GFP_ATOMIC);
  3278. if (!start_xmit_work) {
  3279. netdev_err(net,
  3280. "error: failed to alloc start_xmit_work\n");
  3281. ret = -ENOMEM;
  3282. goto exit;
  3283. }
  3284. INIT_WORK(&start_xmit_work->work, dhd_start_xmit_wq_adapter);
  3285. start_xmit_work->skb = skb;
  3286. start_xmit_work->net = net;
  3287. queue_work(dhd->tx_wq, &start_xmit_work->work);
  3288. ret = NET_XMIT_SUCCESS;
  3289. } else if (dhd->pub.busstate == DHD_BUS_DATA) {
  3290. ret = dhd_start_xmit(skb, net);
  3291. } else {
  3292. /* when bus is down */
  3293. ret = -ENODEV;
  3294. }
  3295. exit:
  3296. return ret;
  3297. }
  3298. void
  3299. dhd_bus_wakeup_work(dhd_pub_t *dhdp)
  3300. {
  3301. struct dhd_rx_tx_work *rx_work;
  3302. dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
  3303. rx_work = kmalloc(sizeof(*rx_work), GFP_ATOMIC);
  3304. if (!rx_work) {
  3305. DHD_ERROR(("%s: start_rx_work alloc error. \n", __FUNCTION__));
  3306. return;
  3307. }
  3308. INIT_WORK(&rx_work->work, dhd_rx_wq_wakeup);
  3309. rx_work->pub = dhdp;
  3310. queue_work(dhd->rx_wq, &rx_work->work);
  3311. }
  3312. #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
  3313. static void
  3314. __dhd_txflowcontrol(dhd_pub_t *dhdp, struct net_device *net, bool state)
  3315. {
  3316. if ((state == ON) && (dhdp->txoff == FALSE)) {
  3317. netif_stop_queue(net);
  3318. dhd_prot_update_pktid_txq_stop_cnt(dhdp);
  3319. } else if (state == ON) {
  3320. DHD_ERROR(("%s: Netif Queue has already stopped\n", __FUNCTION__));
  3321. }
  3322. if ((state == OFF) && (dhdp->txoff == TRUE)) {
  3323. netif_wake_queue(net);
  3324. dhd_prot_update_pktid_txq_start_cnt(dhdp);
  3325. } else if (state == OFF) {
  3326. DHD_ERROR(("%s: Netif Queue has already started\n", __FUNCTION__));
  3327. }
  3328. }
  3329. void
  3330. dhd_txflowcontrol(dhd_pub_t *dhdp, int ifidx, bool state)
  3331. {
  3332. struct net_device *net;
  3333. dhd_info_t *dhd = dhdp->info;
  3334. int i;
  3335. DHD_TRACE(("%s: Enter\n", __FUNCTION__));
  3336. ASSERT(dhd);
  3337. #ifdef DHD_LOSSLESS_ROAMING
  3338. /* block flowcontrol during roaming */
  3339. if ((dhdp->dequeue_prec_map == 1 << PRIO_8021D_NC) && state == ON) {
  3340. return;
  3341. }
  3342. #endif // endif
  3343. if (ifidx == ALL_INTERFACES) {
  3344. for (i = 0; i < DHD_MAX_IFS; i++) {
  3345. if (dhd->iflist[i]) {
  3346. net = dhd->iflist[i]->net;
  3347. __dhd_txflowcontrol(dhdp, net, state);
  3348. }
  3349. }
  3350. } else {
  3351. if (dhd->iflist[ifidx]) {
  3352. net = dhd->iflist[ifidx]->net;
  3353. __dhd_txflowcontrol(dhdp, net, state);
  3354. }
  3355. }
  3356. dhdp->txoff = state;
  3357. }
  3358. #ifdef DHD_MCAST_REGEN
  3359. /*
  3360. * Description: This function is called to do the reverse translation
  3361. *
  3362. * Input eh - pointer to the ethernet header
  3363. */
  3364. int32
  3365. dhd_mcast_reverse_translation(struct ether_header *eh)
  3366. {
  3367. uint8 *iph;
  3368. uint32 dest_ip;
  3369. iph = (uint8 *)eh + ETHER_HDR_LEN;
  3370. dest_ip = ntoh32(*((uint32 *)(iph + IPV4_DEST_IP_OFFSET)));
  3371. /* Only IP packets are handled */
  3372. if (eh->ether_type != hton16(ETHER_TYPE_IP))
  3373. return BCME_ERROR;
  3374. /* Non-IPv4 multicast packets are not handled */
  3375. if (IP_VER(iph) != IP_VER_4)
  3376. return BCME_ERROR;
  3377. /*
  3378. * The packet has a multicast IP and unicast MAC. That means
  3379. * we have to do the reverse translation
  3380. */
  3381. if (IPV4_ISMULTI(dest_ip) && !ETHER_ISMULTI(&eh->ether_dhost)) {
  3382. ETHER_FILL_MCAST_ADDR_FROM_IP(eh->ether_dhost, dest_ip);
  3383. return BCME_OK;
  3384. }
  3385. return BCME_ERROR;
  3386. }
  3387. #endif /* MCAST_REGEN */
  3388. #ifdef SHOW_LOGTRACE
  3389. static void
  3390. dhd_netif_rx_ni(struct sk_buff * skb)
  3391. {
  3392. /* Do not call netif_recieve_skb as this workqueue scheduler is
  3393. * not from NAPI Also as we are not in INTR context, do not call
  3394. * netif_rx, instead call netif_rx_ni (for kerenl >= 2.6) which
  3395. * does netif_rx, disables irq, raise NET_IF_RX softirq and
  3396. * enables interrupts back
  3397. */
  3398. netif_rx_ni(skb);
  3399. }
  3400. static int
  3401. dhd_event_logtrace_pkt_process(dhd_pub_t *dhdp, struct sk_buff * skb)
  3402. {
  3403. dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
  3404. int ret = BCME_OK;
  3405. uint datalen;
  3406. bcm_event_msg_u_t evu;
  3407. void *data = NULL;
  3408. void *pktdata = NULL;
  3409. bcm_event_t *pvt_data;
  3410. uint pktlen;
  3411. DHD_TRACE(("%s:Enter\n", __FUNCTION__));
  3412. /* In dhd_rx_frame, header is stripped using skb_pull
  3413. * of size ETH_HLEN, so adjust pktlen accordingly
  3414. */
  3415. pktlen = skb->len + ETH_HLEN;
  3416. pktdata = (void *)skb_mac_header(skb);
  3417. ret = wl_host_event_get_data(pktdata, pktlen, &evu);
  3418. if (ret != BCME_OK) {
  3419. DHD_ERROR(("%s: wl_host_event_get_data err = %d\n",
  3420. __FUNCTION__, ret));
  3421. goto exit;
  3422. }
  3423. datalen = ntoh32(evu.event.datalen);
  3424. pvt_data = (bcm_event_t *)pktdata;
  3425. data = &pvt_data[1];
  3426. dhd_dbg_trace_evnt_handler(dhdp, data, &dhd->event_data, datalen);
  3427. exit:
  3428. return ret;
  3429. }
  3430. /*
  3431. * dhd_event_logtrace_process_items processes
  3432. * each skb from evt_trace_queue.
  3433. * Returns TRUE if more packets to be processed
  3434. * else returns FALSE
  3435. */
  3436. static int
  3437. dhd_event_logtrace_process_items(dhd_info_t *dhd)
  3438. {
  3439. dhd_pub_t *dhdp;
  3440. struct sk_buff *skb;
  3441. uint32 qlen;
  3442. uint32 process_len;
  3443. if (!dhd) {
  3444. DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
  3445. return 0;
  3446. }
  3447. dhdp = &dhd->pub;
  3448. if (!dhdp) {
  3449. DHD_ERROR(("%s: dhd pub is null \n", __FUNCTION__));
  3450. return 0;
  3451. }
  3452. qlen = skb_queue_len(&dhd->evt_trace_queue);
  3453. process_len = MIN(qlen, DHD_EVENT_LOGTRACE_BOUND);
  3454. /* Run while loop till bound is reached or skb queue is empty */
  3455. while (process_len--) {
  3456. int ifid = 0;
  3457. skb = skb_dequeue(&dhd->evt_trace_queue);
  3458. if (skb == NULL) {
  3459. DHD_ERROR(("%s: skb is NULL, which is not valid case\n",
  3460. __FUNCTION__));
  3461. break;
  3462. }
  3463. BCM_REFERENCE(ifid);
  3464. #ifdef PCIE_FULL_DONGLE
  3465. /* Check if pkt is from INFO ring or WLC_E_TRACE */
  3466. ifid = DHD_PKTTAG_IFID((dhd_pkttag_fr_t *)PKTTAG(skb));
  3467. if (ifid == DHD_DUMMY_INFO_IF) {
  3468. /* Process logtrace from info rings */
  3469. dhd_event_logtrace_infobuf_pkt_process(dhdp, skb, &dhd->event_data);
  3470. } else
  3471. #endif /* PCIE_FULL_DONGLE */
  3472. {
  3473. /* Processing WLC_E_TRACE case OR non PCIE PCIE_FULL_DONGLE case */
  3474. dhd_event_logtrace_pkt_process(dhdp, skb);
  3475. }
  3476. /* Dummy sleep so that scheduler kicks in after processing any logprints */
  3477. OSL_SLEEP(0);
  3478. /* Send packet up if logtrace_pkt_sendup is TRUE */
  3479. if (dhdp->logtrace_pkt_sendup) {
  3480. #ifdef DHD_USE_STATIC_CTRLBUF
  3481. /* If bufs are allocated via static buf pool
  3482. * and logtrace_pkt_sendup enabled, make a copy,
  3483. * free the local one and send the copy up.
  3484. */
  3485. void *npkt = PKTDUP(dhdp->osh, skb);
  3486. /* Clone event and send it up */
  3487. PKTFREE_STATIC(dhdp->osh, skb, FALSE);
  3488. if (npkt) {
  3489. skb = npkt;
  3490. } else {
  3491. DHD_ERROR(("skb clone failed. dropping logtrace pkt.\n"));
  3492. /* Packet is already freed, go to next packet */
  3493. continue;
  3494. }
  3495. #endif /* DHD_USE_STATIC_CTRLBUF */
  3496. #ifdef PCIE_FULL_DONGLE
  3497. /* For infobuf packets as if is DHD_DUMMY_INFO_IF,
  3498. * to send skb to network layer, assign skb->dev with
  3499. * Primary interface n/w device
  3500. */
  3501. if (ifid == DHD_DUMMY_INFO_IF) {
  3502. skb = PKTTONATIVE(dhdp->osh, skb);
  3503. skb->dev = dhd->iflist[0]->net;
  3504. }
  3505. #endif /* PCIE_FULL_DONGLE */
  3506. /* Send pkt UP */
  3507. dhd_netif_rx_ni(skb);
  3508. } else {
  3509. /* Don't send up. Free up the packet. */
  3510. #ifdef DHD_USE_STATIC_CTRLBUF
  3511. PKTFREE_STATIC(dhdp->osh, skb, FALSE);
  3512. #else
  3513. PKTFREE(dhdp->osh, skb, FALSE);
  3514. #endif /* DHD_USE_STATIC_CTRLBUF */
  3515. }
  3516. }
  3517. /* Reschedule if more packets to be processed */
  3518. return (qlen >= DHD_EVENT_LOGTRACE_BOUND);
  3519. }
  3520. #ifdef DHD_USE_KTHREAD_FOR_LOGTRACE
  3521. static int
  3522. dhd_logtrace_thread(void *data)
  3523. {
  3524. tsk_ctl_t *tsk = (tsk_ctl_t *)data;
  3525. dhd_info_t *dhd = (dhd_info_t *)tsk->parent;
  3526. dhd_pub_t *dhdp = (dhd_pub_t *)&dhd->pub;
  3527. int ret;
  3528. while (1) {
  3529. dhdp->logtrace_thr_ts.entry_time = OSL_LOCALTIME_NS();
  3530. if (!binary_sema_down(tsk)) {
  3531. dhdp->logtrace_thr_ts.sem_down_time = OSL_LOCALTIME_NS();
  3532. SMP_RD_BARRIER_DEPENDS();
  3533. if (dhd->pub.dongle_reset == FALSE) {
  3534. do {
  3535. /* Check terminated before processing the items */
  3536. if (tsk->terminated) {
  3537. DHD_ERROR(("%s: task terminated\n", __FUNCTION__));
  3538. goto exit;
  3539. }
  3540. #ifdef EWP_EDL
  3541. /* check if EDL is being used */
  3542. if (dhd->pub.dongle_edl_support) {
  3543. ret = dhd_prot_process_edl_complete(&dhd->pub,
  3544. &dhd->event_data);
  3545. } else {
  3546. ret = dhd_event_logtrace_process_items(dhd);
  3547. }
  3548. #else
  3549. ret = dhd_event_logtrace_process_items(dhd);
  3550. #endif /* EWP_EDL */
  3551. /* if ret > 0, bound has reached so to be fair to other
  3552. * processes need to yield the scheduler.
  3553. * The comment above yield()'s definition says:
  3554. * If you want to use yield() to wait for something,
  3555. * use wait_event().
  3556. * If you want to use yield() to be 'nice' for others,
  3557. * use cond_resched().
  3558. * If you still want to use yield(), do not!
  3559. */
  3560. if (ret > 0) {
  3561. cond_resched();
  3562. OSL_SLEEP(DHD_EVENT_LOGTRACE_RESCHEDULE_DELAY_MS);
  3563. } else if (ret < 0) {
  3564. DHD_ERROR(("%s: ERROR should not reach here\n",
  3565. __FUNCTION__));
  3566. }
  3567. } while (ret > 0);
  3568. }
  3569. if (tsk->flush_ind) {
  3570. DHD_ERROR(("%s: flushed\n", __FUNCTION__));
  3571. dhdp->logtrace_thr_ts.flush_time = OSL_LOCALTIME_NS();
  3572. tsk->flush_ind = 0;
  3573. complete(&tsk->flushed);
  3574. }
  3575. } else {
  3576. DHD_ERROR(("%s: unexpted break\n", __FUNCTION__));
  3577. dhdp->logtrace_thr_ts.unexpected_break_time = OSL_LOCALTIME_NS();
  3578. break;
  3579. }
  3580. }
  3581. exit:
  3582. complete_and_exit(&tsk->completed, 0);
  3583. dhdp->logtrace_thr_ts.complete_time = OSL_LOCALTIME_NS();
  3584. }
  3585. #else
  3586. static void
  3587. dhd_event_logtrace_process(struct work_struct * work)
  3588. {
  3589. int ret = 0;
  3590. /* Ignore compiler warnings due to -Werror=cast-qual */
  3591. #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
  3592. #pragma GCC diagnostic push
  3593. #pragma GCC diagnostic ignored "-Wcast-qual"
  3594. #endif // endif
  3595. struct delayed_work *dw = to_delayed_work(work);
  3596. struct dhd_info *dhd =
  3597. container_of(dw, struct dhd_info, event_log_dispatcher_work);
  3598. #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
  3599. #pragma GCC diagnostic pop
  3600. #endif // endif
  3601. #ifdef EWP_EDL
  3602. if (dhd->pub.dongle_edl_support) {
  3603. ret = dhd_prot_process_edl_complete(&dhd->pub, &dhd->event_data);
  3604. } else {
  3605. ret = dhd_event_logtrace_process_items(dhd);
  3606. }
  3607. #else
  3608. ret = dhd_event_logtrace_process_items(dhd);
  3609. #endif /* EWP_EDL */
  3610. if (ret > 0) {
  3611. schedule_delayed_work(&(dhd)->event_log_dispatcher_work,
  3612. msecs_to_jiffies(DHD_EVENT_LOGTRACE_RESCHEDULE_DELAY_MS));
  3613. }
  3614. return;
  3615. }
  3616. #endif /* DHD_USE_KTHREAD_FOR_LOGTRACE */
  3617. void
  3618. dhd_schedule_logtrace(void *dhd_info)
  3619. {
  3620. dhd_info_t *dhd = (dhd_info_t *)dhd_info;
  3621. #ifdef DHD_USE_KTHREAD_FOR_LOGTRACE
  3622. if (dhd->thr_logtrace_ctl.thr_pid >= 0) {
  3623. binary_sema_up(&dhd->thr_logtrace_ctl);
  3624. } else {
  3625. DHD_ERROR(("%s: thr_logtrace_ctl(%ld) not inited\n", __FUNCTION__,
  3626. dhd->thr_logtrace_ctl.thr_pid));
  3627. }
  3628. #else
  3629. schedule_delayed_work(&dhd->event_log_dispatcher_work, 0);
  3630. #endif /* DHD_USE_KTHREAD_FOR_LOGTRACE */
  3631. return;
  3632. }
  3633. void
  3634. dhd_cancel_logtrace_process_sync(dhd_info_t *dhd)
  3635. {
  3636. #ifdef DHD_USE_KTHREAD_FOR_LOGTRACE
  3637. if (dhd->thr_logtrace_ctl.thr_pid >= 0) {
  3638. PROC_STOP_USING_BINARY_SEMA(&dhd->thr_logtrace_ctl);
  3639. } else {
  3640. DHD_ERROR(("%s: thr_logtrace_ctl(%ld) not inited\n", __FUNCTION__,
  3641. dhd->thr_logtrace_ctl.thr_pid));
  3642. }
  3643. #else
  3644. cancel_delayed_work_sync(&dhd->event_log_dispatcher_work);
  3645. #endif /* DHD_USE_KTHREAD_FOR_LOGTRACE */
  3646. }
  3647. void
  3648. dhd_flush_logtrace_process(dhd_info_t *dhd)
  3649. {
  3650. #ifdef DHD_USE_KTHREAD_FOR_LOGTRACE
  3651. if (dhd->thr_logtrace_ctl.thr_pid >= 0) {
  3652. PROC_FLUSH_USING_BINARY_SEMA(&dhd->thr_logtrace_ctl);
  3653. } else {
  3654. DHD_ERROR(("%s: thr_logtrace_ctl(%ld) not inited\n", __FUNCTION__,
  3655. dhd->thr_logtrace_ctl.thr_pid));
  3656. }
  3657. #else
  3658. flush_delayed_work(&dhd->event_log_dispatcher_work);
  3659. #endif /* DHD_USE_KTHREAD_FOR_LOGTRACE */
  3660. }
  3661. int
  3662. dhd_init_logtrace_process(dhd_info_t *dhd)
  3663. {
  3664. #ifdef DHD_USE_KTHREAD_FOR_LOGTRACE
  3665. dhd->thr_logtrace_ctl.thr_pid = DHD_PID_KT_INVALID;
  3666. PROC_START(dhd_logtrace_thread, dhd, &dhd->thr_logtrace_ctl, 0, "dhd_logtrace_thread");
  3667. if (dhd->thr_logtrace_ctl.thr_pid < 0) {
  3668. DHD_ERROR(("%s: init logtrace process failed\n", __FUNCTION__));
  3669. return BCME_ERROR;
  3670. } else {
  3671. DHD_ERROR(("%s: thr_logtrace_ctl(%ld) succedded\n", __FUNCTION__,
  3672. dhd->thr_logtrace_ctl.thr_pid));
  3673. }
  3674. #else
  3675. INIT_DELAYED_WORK(&dhd->event_log_dispatcher_work, dhd_event_logtrace_process);
  3676. #endif /* DHD_USE_KTHREAD_FOR_LOGTRACE */
  3677. return BCME_OK;
  3678. }
  3679. int
  3680. dhd_reinit_logtrace_process(dhd_info_t *dhd)
  3681. {
  3682. #ifdef DHD_USE_KTHREAD_FOR_LOGTRACE
  3683. /* Re-init only if PROC_STOP from dhd_stop was called
  3684. * which can be checked via thr_pid
  3685. */
  3686. if (dhd->thr_logtrace_ctl.thr_pid < 0) {
  3687. PROC_START(dhd_logtrace_thread, dhd, &dhd->thr_logtrace_ctl,
  3688. 0, "dhd_logtrace_thread");
  3689. if (dhd->thr_logtrace_ctl.thr_pid < 0) {
  3690. DHD_ERROR(("%s: reinit logtrace process failed\n", __FUNCTION__));
  3691. return BCME_ERROR;
  3692. } else {
  3693. DHD_ERROR(("%s: thr_logtrace_ctl(%ld) succedded\n", __FUNCTION__,
  3694. dhd->thr_logtrace_ctl.thr_pid));
  3695. }
  3696. }
  3697. #else
  3698. /* No need to re-init for WQ as calcel_delayed_work_sync will
  3699. * will not delete the WQ
  3700. */
  3701. #endif /* DHD_USE_KTHREAD_FOR_LOGTRACE */
  3702. return BCME_OK;
  3703. }
  3704. void
  3705. dhd_event_logtrace_enqueue(dhd_pub_t *dhdp, int ifidx, void *pktbuf)
  3706. {
  3707. dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
  3708. #ifdef PCIE_FULL_DONGLE
  3709. /* Add ifidx in the PKTTAG */
  3710. DHD_PKTTAG_SET_IFID((dhd_pkttag_fr_t *)PKTTAG(pktbuf), ifidx);
  3711. #endif /* PCIE_FULL_DONGLE */
  3712. skb_queue_tail(&dhd->evt_trace_queue, pktbuf);
  3713. dhd_schedule_logtrace(dhd);
  3714. }
  3715. void
  3716. dhd_event_logtrace_flush_queue(dhd_pub_t *dhdp)
  3717. {
  3718. dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
  3719. struct sk_buff *skb;
  3720. while ((skb = skb_dequeue(&dhd->evt_trace_queue)) != NULL) {
  3721. #ifdef DHD_USE_STATIC_CTRLBUF
  3722. PKTFREE_STATIC(dhdp->osh, skb, FALSE);
  3723. #else
  3724. PKTFREE(dhdp->osh, skb, FALSE);
  3725. #endif /* DHD_USE_STATIC_CTRLBUF */
  3726. }
  3727. }
  3728. #ifdef BCMPCIE
  3729. void
  3730. dhd_sendup_info_buf(dhd_pub_t *dhdp, uint8 *msg)
  3731. {
  3732. struct sk_buff *skb = NULL;
  3733. uint32 pktsize = 0;
  3734. void *pkt = NULL;
  3735. info_buf_payload_hdr_t *infobuf = NULL;
  3736. dhd_info_t *dhd = dhdp->info;
  3737. uint8 *pktdata = NULL;
  3738. if (!msg)
  3739. return;
  3740. /* msg = |infobuf_ver(u32)|info_buf_payload_hdr_t|msgtrace_hdr_t|<var len data>| */
  3741. infobuf = (info_buf_payload_hdr_t *)(msg + sizeof(uint32));
  3742. pktsize = (uint32)(ltoh16(infobuf->length) + sizeof(info_buf_payload_hdr_t) +
  3743. sizeof(uint32));
  3744. pkt = PKTGET(dhdp->osh, pktsize, FALSE);
  3745. if (!pkt) {
  3746. DHD_ERROR(("%s: skb alloc failed ! not sending event log up.\n", __FUNCTION__));
  3747. } else {
  3748. PKTSETLEN(dhdp->osh, pkt, pktsize);
  3749. pktdata = PKTDATA(dhdp->osh, pkt);
  3750. memcpy(pktdata, msg, pktsize);
  3751. /* For infobuf packets assign skb->dev with
  3752. * Primary interface n/w device
  3753. */
  3754. skb = PKTTONATIVE(dhdp->osh, pkt);
  3755. skb->dev = dhd->iflist[0]->net;
  3756. /* Send pkt UP */
  3757. dhd_netif_rx_ni(skb);
  3758. }
  3759. }
  3760. #endif /* BCMPCIE */
  3761. #endif /* SHOW_LOGTRACE */
  3762. /** Called when a frame is received by the dongle on interface 'ifidx' */
  3763. void
  3764. dhd_rx_frame(dhd_pub_t *dhdp, int ifidx, void *pktbuf, int numpkt, uint8 chan)
  3765. {
  3766. dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
  3767. struct sk_buff *skb;
  3768. uchar *eth;
  3769. uint len;
  3770. void *data, *pnext = NULL;
  3771. int i;
  3772. dhd_if_t *ifp;
  3773. wl_event_msg_t event;
  3774. #if (defined(OEM_ANDROID) || defined(OEM_EMBEDDED_LINUX))
  3775. int tout_rx = 0;
  3776. int tout_ctrl = 0;
  3777. #endif /* OEM_ANDROID || OEM_EMBEDDED_LINUX */
  3778. void *skbhead = NULL;
  3779. void *skbprev = NULL;
  3780. uint16 protocol;
  3781. unsigned char *dump_data;
  3782. #ifdef DHD_MCAST_REGEN
  3783. uint8 interface_role;
  3784. if_flow_lkup_t *if_flow_lkup;
  3785. unsigned long flags;
  3786. #endif // endif
  3787. #ifdef DHD_WAKE_STATUS
  3788. int pkt_wake = 0;
  3789. wake_counts_t *wcp = NULL;
  3790. #endif /* DHD_WAKE_STATUS */
  3791. DHD_TRACE(("%s: Enter\n", __FUNCTION__));
  3792. BCM_REFERENCE(dump_data);
  3793. for (i = 0; pktbuf && i < numpkt; i++, pktbuf = pnext) {
  3794. struct ether_header *eh;
  3795. pnext = PKTNEXT(dhdp->osh, pktbuf);
  3796. PKTSETNEXT(dhdp->osh, pktbuf, NULL);
  3797. /* info ring "debug" data, which is not a 802.3 frame, is sent/hacked with a
  3798. * special ifidx of DHD_DUMMY_INFO_IF. This is just internal to dhd to get the data
  3799. * from dhd_msgbuf.c:dhd_prot_infobuf_cmplt_process() to here (dhd_rx_frame).
  3800. */
  3801. if (ifidx == DHD_DUMMY_INFO_IF) {
  3802. /* Event msg printing is called from dhd_rx_frame which is in Tasklet
  3803. * context in case of PCIe FD, in case of other bus this will be from
  3804. * DPC context. If we get bunch of events from Dongle then printing all
  3805. * of them from Tasklet/DPC context that too in data path is costly.
  3806. * Also in the new Dongle SW(4359, 4355 onwards) console prints too come as
  3807. * events with type WLC_E_TRACE.
  3808. * We'll print this console logs from the WorkQueue context by enqueing SKB
  3809. * here and Dequeuing will be done in WorkQueue and will be freed only if
  3810. * logtrace_pkt_sendup is TRUE
  3811. */
  3812. #ifdef SHOW_LOGTRACE
  3813. dhd_event_logtrace_enqueue(dhdp, ifidx, pktbuf);
  3814. #else /* !SHOW_LOGTRACE */
  3815. /* If SHOW_LOGTRACE not defined and ifidx is DHD_DUMMY_INFO_IF,
  3816. * free the PKT here itself
  3817. */
  3818. #ifdef DHD_USE_STATIC_CTRLBUF
  3819. PKTFREE_STATIC(dhdp->osh, pktbuf, FALSE);
  3820. #else
  3821. PKTFREE(dhdp->osh, pktbuf, FALSE);
  3822. #endif /* DHD_USE_STATIC_CTRLBUF */
  3823. #endif /* SHOW_LOGTRACE */
  3824. continue;
  3825. }
  3826. #ifdef DHD_WAKE_STATUS
  3827. pkt_wake = dhd_bus_get_bus_wake(dhdp);
  3828. wcp = dhd_bus_get_wakecount(dhdp);
  3829. if (wcp == NULL) {
  3830. /* If wakeinfo count buffer is null do not update wake count values */
  3831. pkt_wake = 0;
  3832. }
  3833. #endif /* DHD_WAKE_STATUS */
  3834. eh = (struct ether_header *)PKTDATA(dhdp->osh, pktbuf);
  3835. if (ifidx >= DHD_MAX_IFS) {
  3836. DHD_ERROR(("%s: ifidx(%d) Out of bound. drop packet\n",
  3837. __FUNCTION__, ifidx));
  3838. if (ntoh16(eh->ether_type) == ETHER_TYPE_BRCM) {
  3839. #ifdef DHD_USE_STATIC_CTRLBUF
  3840. PKTFREE_STATIC(dhdp->osh, pktbuf, FALSE);
  3841. #else
  3842. PKTFREE(dhdp->osh, pktbuf, FALSE);
  3843. #endif /* DHD_USE_STATIC_CTRLBUF */
  3844. } else {
  3845. PKTCFREE(dhdp->osh, pktbuf, FALSE);
  3846. }
  3847. continue;
  3848. }
  3849. ifp = dhd->iflist[ifidx];
  3850. if (ifp == NULL) {
  3851. DHD_ERROR(("%s: ifp is NULL. drop packet\n",
  3852. __FUNCTION__));
  3853. if (ntoh16(eh->ether_type) == ETHER_TYPE_BRCM) {
  3854. #ifdef DHD_USE_STATIC_CTRLBUF
  3855. PKTFREE_STATIC(dhdp->osh, pktbuf, FALSE);
  3856. #else
  3857. PKTFREE(dhdp->osh, pktbuf, FALSE);
  3858. #endif /* DHD_USE_STATIC_CTRLBUF */
  3859. } else {
  3860. PKTCFREE(dhdp->osh, pktbuf, FALSE);
  3861. }
  3862. continue;
  3863. }
  3864. /* Dropping only data packets before registering net device to avoid kernel panic */
  3865. #ifndef PROP_TXSTATUS_VSDB
  3866. if ((!ifp->net || ifp->net->reg_state != NETREG_REGISTERED) &&
  3867. (ntoh16(eh->ether_type) != ETHER_TYPE_BRCM))
  3868. #else
  3869. if ((!ifp->net || ifp->net->reg_state != NETREG_REGISTERED || !dhd->pub.up) &&
  3870. (ntoh16(eh->ether_type) != ETHER_TYPE_BRCM))
  3871. #endif /* PROP_TXSTATUS_VSDB */
  3872. {
  3873. DHD_ERROR(("%s: net device is NOT registered yet. drop packet\n",
  3874. __FUNCTION__));
  3875. PKTCFREE(dhdp->osh, pktbuf, FALSE);
  3876. continue;
  3877. }
  3878. #ifdef PROP_TXSTATUS
  3879. if (dhd_wlfc_is_header_only_pkt(dhdp, pktbuf)) {
  3880. /* WLFC may send header only packet when
  3881. there is an urgent message but no packet to
  3882. piggy-back on
  3883. */
  3884. PKTCFREE(dhdp->osh, pktbuf, FALSE);
  3885. continue;
  3886. }
  3887. #endif // endif
  3888. #ifdef DHD_L2_FILTER
  3889. /* If block_ping is enabled drop the ping packet */
  3890. if (ifp->block_ping) {
  3891. if (bcm_l2_filter_block_ping(dhdp->osh, pktbuf) == BCME_OK) {
  3892. PKTCFREE(dhdp->osh, pktbuf, FALSE);
  3893. continue;
  3894. }
  3895. }
  3896. if (ifp->grat_arp && DHD_IF_ROLE_STA(dhdp, ifidx)) {
  3897. if (bcm_l2_filter_gratuitous_arp(dhdp->osh, pktbuf) == BCME_OK) {
  3898. PKTCFREE(dhdp->osh, pktbuf, FALSE);
  3899. continue;
  3900. }
  3901. }
  3902. if (ifp->parp_enable && DHD_IF_ROLE_AP(dhdp, ifidx)) {
  3903. int ret = dhd_l2_filter_pkt_handle(dhdp, ifidx, pktbuf, FALSE);
  3904. /* Drop the packets if l2 filter has processed it already
  3905. * otherwise continue with the normal path
  3906. */
  3907. if (ret == BCME_OK) {
  3908. PKTCFREE(dhdp->osh, pktbuf, TRUE);
  3909. continue;
  3910. }
  3911. }
  3912. if (ifp->block_tdls) {
  3913. if (bcm_l2_filter_block_tdls(dhdp->osh, pktbuf) == BCME_OK) {
  3914. PKTCFREE(dhdp->osh, pktbuf, FALSE);
  3915. continue;
  3916. }
  3917. }
  3918. #endif /* DHD_L2_FILTER */
  3919. #ifdef DHD_MCAST_REGEN
  3920. DHD_FLOWID_LOCK(dhdp->flowid_lock, flags);
  3921. if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup;
  3922. ASSERT(if_flow_lkup);
  3923. interface_role = if_flow_lkup[ifidx].role;
  3924. DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags);
  3925. if (ifp->mcast_regen_bss_enable && (interface_role != WLC_E_IF_ROLE_WDS) &&
  3926. !DHD_IF_ROLE_AP(dhdp, ifidx) &&
  3927. ETHER_ISUCAST(eh->ether_dhost)) {
  3928. if (dhd_mcast_reverse_translation(eh) == BCME_OK) {
  3929. #ifdef DHD_PSTA
  3930. /* Change bsscfg to primary bsscfg for unicast-multicast packets */
  3931. if ((dhd_get_psta_mode(dhdp) == DHD_MODE_PSTA) ||
  3932. (dhd_get_psta_mode(dhdp) == DHD_MODE_PSR)) {
  3933. if (ifidx != 0) {
  3934. /* Let the primary in PSTA interface handle this
  3935. * frame after unicast to Multicast conversion
  3936. */
  3937. ifp = dhd_get_ifp(dhdp, 0);
  3938. ASSERT(ifp);
  3939. }
  3940. }
  3941. }
  3942. #endif /* PSTA */
  3943. }
  3944. #endif /* MCAST_REGEN */
  3945. #ifdef DHDTCPSYNC_FLOOD_BLK
  3946. if (dhd_tcpdata_get_flag(dhdp, pktbuf) == FLAG_SYNC) {
  3947. int delta_sec;
  3948. int delta_sync;
  3949. int sync_per_sec;
  3950. u64 curr_time = DIV_U64_BY_U32(OSL_LOCALTIME_NS(), NSEC_PER_SEC);
  3951. ifp->tsync_rcvd ++;
  3952. delta_sync = ifp->tsync_rcvd - ifp->tsyncack_txed;
  3953. delta_sec = curr_time - ifp->last_sync;
  3954. if (delta_sec > 1) {
  3955. sync_per_sec = delta_sync/delta_sec;
  3956. if (sync_per_sec > TCP_SYNC_FLOOD_LIMIT) {
  3957. schedule_work(&ifp->blk_tsfl_work);
  3958. DHD_ERROR(("ifx %d TCP SYNC Flood attack suspected! "
  3959. "sync recvied %d pkt/sec \n",
  3960. ifidx, sync_per_sec));
  3961. }
  3962. dhd_reset_tcpsync_info_by_ifp(ifp);
  3963. }
  3964. }
  3965. #endif /* DHDTCPSYNC_FLOOD_BLK */
  3966. #ifdef DHDTCPACK_SUPPRESS
  3967. dhd_tcpdata_info_get(dhdp, pktbuf);
  3968. #endif // endif
  3969. skb = PKTTONATIVE(dhdp->osh, pktbuf);
  3970. ASSERT(ifp);
  3971. skb->dev = ifp->net;
  3972. #ifdef DHD_WET
  3973. /* wet related packet proto manipulation should be done in DHD
  3974. * since dongle doesn't have complete payload
  3975. */
  3976. if (WET_ENABLED(&dhd->pub) && (dhd_wet_recv_proc(dhd->pub.wet_info,
  3977. pktbuf) < 0)) {
  3978. DHD_INFO(("%s:%s: wet recv proc failed\n",
  3979. __FUNCTION__, dhd_ifname(dhdp, ifidx)));
  3980. }
  3981. #endif /* DHD_WET */
  3982. #ifdef DHD_PSTA
  3983. if (PSR_ENABLED(dhdp) &&
  3984. (dhd_psta_proc(dhdp, ifidx, &pktbuf, FALSE) < 0)) {
  3985. DHD_ERROR(("%s:%s: psta recv proc failed\n", __FUNCTION__,
  3986. dhd_ifname(dhdp, ifidx)));
  3987. }
  3988. #endif /* DHD_PSTA */
  3989. DHD_TRACE(("\nAp isolate in dhd is %d\n", ifp->ap_isolate));
  3990. if (ifidx >= 0 && dhdp != NULL && dhdp->info != NULL &&
  3991. dhdp->info->iflist[ifidx] != NULL) {
  3992. if ((DHD_IF_ROLE_AP(dhdp, ifidx) || DHD_IF_ROLE_P2PGO(dhdp, ifidx)) &&
  3993. (!ifp->ap_isolate)) {
  3994. DHD_TRACE(("%s: MACADDR: " MACDBG " ifidx %d\n",
  3995. __FUNCTION__,
  3996. MAC2STRDBG(dhdp->info->iflist[ifidx]->mac_addr),
  3997. ifidx));
  3998. DHD_TRACE(("%s: DEST: " MACDBG " ifidx %d\n",
  3999. __FUNCTION__, MAC2STRDBG(eh->ether_dhost), ifidx));
  4000. eh = (struct ether_header *)PKTDATA(dhdp->osh, pktbuf);
  4001. if (ETHER_ISUCAST(eh->ether_dhost)) {
  4002. if (dhd_find_sta(dhdp, ifidx, (void *)eh->ether_dhost)) {
  4003. DHD_TRACE(("\nPacket not for us send down\n"));
  4004. dhd_sendpkt(dhdp, ifidx, pktbuf);
  4005. continue;
  4006. }
  4007. } else {
  4008. void *npktbuf = PKTDUP(dhdp->osh, pktbuf);
  4009. if (npktbuf) {
  4010. DHD_TRACE(("\ncalling bcmc dhd_sendpkt"
  4011. "and send dup up\n"));
  4012. dhd_sendpkt(dhdp, ifidx, npktbuf);
  4013. }
  4014. }
  4015. }
  4016. }
  4017. #ifdef DHD_POST_EAPOL_M1_AFTER_ROAM_EVT
  4018. if (IS_STA_IFACE(ndev_to_wdev(ifp->net)) &&
  4019. (ifp->recv_reassoc_evt == TRUE) && (ifp->post_roam_evt == FALSE) &&
  4020. (dhd_is_4way_msg((char *)(skb->data)) == EAPOL_4WAY_M1)) {
  4021. DHD_ERROR(("%s: Reassoc is in progress. "
  4022. "Drop EAPOL M1 frame\n", __FUNCTION__));
  4023. PKTFREE(dhdp->osh, pktbuf, FALSE);
  4024. continue;
  4025. }
  4026. #endif /* DHD_POST_EAPOL_M1_AFTER_ROAM_EVT */
  4027. /* Get the protocol, maintain skb around eth_type_trans()
  4028. * The main reason for this hack is for the limitation of
  4029. * Linux 2.4 where 'eth_type_trans' uses the 'net->hard_header_len'
  4030. * to perform skb_pull inside vs ETH_HLEN. Since to avoid
  4031. * coping of the packet coming from the network stack to add
  4032. * BDC, Hardware header etc, during network interface registration
  4033. * we set the 'net->hard_header_len' to ETH_HLEN + extra space required
  4034. * for BDC, Hardware header etc. and not just the ETH_HLEN
  4035. */
  4036. eth = skb->data;
  4037. len = skb->len;
  4038. dump_data = skb->data;
  4039. protocol = (skb->data[12] << 8) | skb->data[13];
  4040. if (protocol == ETHER_TYPE_802_1X) {
  4041. DBG_EVENT_LOG(dhdp, WIFI_EVENT_DRIVER_EAPOL_FRAME_RECEIVED);
  4042. #if defined(WL_CFG80211) && defined(WL_WPS_SYNC)
  4043. wl_handle_wps_states(ifp->net, dump_data, len, FALSE);
  4044. #endif /* WL_CFG80211 && WL_WPS_SYNC */
  4045. #ifdef DHD_4WAYM4_FAIL_DISCONNECT
  4046. if (dhd_is_4way_msg((uint8 *)(skb->data)) == EAPOL_4WAY_M3) {
  4047. OSL_ATOMIC_SET(dhdp->osh, &ifp->m4state, M3_RXED);
  4048. }
  4049. #endif /* DHD_4WAYM4_FAIL_DISCONNECT */
  4050. }
  4051. dhd_rx_pkt_dump(dhdp, ifidx, dump_data, len);
  4052. dhd_dump_pkt(dhdp, ifidx, dump_data, len, FALSE, NULL, NULL);
  4053. #if defined(DHD_WAKE_STATUS) && defined(DHD_WAKEPKT_DUMP)
  4054. if (pkt_wake) {
  4055. prhex("[wakepkt_dump]", (char*)dump_data, MIN(len, 32));
  4056. }
  4057. #endif /* DHD_WAKE_STATUS && DHD_WAKEPKT_DUMP */
  4058. skb->protocol = eth_type_trans(skb, skb->dev);
  4059. if (skb->pkt_type == PACKET_MULTICAST) {
  4060. dhd->pub.rx_multicast++;
  4061. ifp->stats.multicast++;
  4062. }
  4063. skb->data = eth;
  4064. skb->len = len;
  4065. DHD_DBG_PKT_MON_RX(dhdp, skb);
  4066. #ifdef DHD_PKT_LOGGING
  4067. DHD_PKTLOG_RX(dhdp, skb);
  4068. #endif /* DHD_PKT_LOGGING */
  4069. /* Strip header, count, deliver upward */
  4070. skb_pull(skb, ETH_HLEN);
  4071. /* Process special event packets and then discard them */
  4072. memset(&event, 0, sizeof(event));
  4073. if (ntoh16(skb->protocol) == ETHER_TYPE_BRCM) {
  4074. bcm_event_msg_u_t evu;
  4075. int ret_event, event_type;
  4076. void *pkt_data = skb_mac_header(skb);
  4077. ret_event = wl_host_event_get_data(pkt_data, len, &evu);
  4078. if (ret_event != BCME_OK) {
  4079. DHD_ERROR(("%s: wl_host_event_get_data err = %d\n",
  4080. __FUNCTION__, ret_event));
  4081. #ifdef DHD_USE_STATIC_CTRLBUF
  4082. PKTFREE_STATIC(dhdp->osh, pktbuf, FALSE);
  4083. #else
  4084. PKTFREE(dhdp->osh, pktbuf, FALSE);
  4085. #endif // endif
  4086. continue;
  4087. }
  4088. memcpy(&event, &evu.event, sizeof(wl_event_msg_t));
  4089. event_type = ntoh32_ua((void *)&event.event_type);
  4090. #ifdef SHOW_LOGTRACE
  4091. /* Event msg printing is called from dhd_rx_frame which is in Tasklet
  4092. * context in case of PCIe FD, in case of other bus this will be from
  4093. * DPC context. If we get bunch of events from Dongle then printing all
  4094. * of them from Tasklet/DPC context that too in data path is costly.
  4095. * Also in the new Dongle SW(4359, 4355 onwards) console prints too come as
  4096. * events with type WLC_E_TRACE.
  4097. * We'll print this console logs from the WorkQueue context by enqueing SKB
  4098. * here and Dequeuing will be done in WorkQueue and will be freed only if
  4099. * logtrace_pkt_sendup is true
  4100. */
  4101. if (event_type == WLC_E_TRACE) {
  4102. DHD_TRACE(("%s: WLC_E_TRACE\n", __FUNCTION__));
  4103. dhd_event_logtrace_enqueue(dhdp, ifidx, pktbuf);
  4104. continue;
  4105. }
  4106. #endif /* SHOW_LOGTRACE */
  4107. ret_event = dhd_wl_host_event(dhd, ifidx, pkt_data, len, &event, &data);
  4108. wl_event_to_host_order(&event);
  4109. #if (defined(OEM_ANDROID) || defined(OEM_EMBEDDED_LINUX))
  4110. if (!tout_ctrl)
  4111. tout_ctrl = DHD_PACKET_TIMEOUT_MS;
  4112. #endif /* (defined(OEM_ANDROID) || defined(OEM_EMBEDDED_LINUX)) */
  4113. #if (defined(OEM_ANDROID) && defined(PNO_SUPPORT))
  4114. if (event_type == WLC_E_PFN_NET_FOUND) {
  4115. /* enforce custom wake lock to garantee that Kernel not suspended */
  4116. tout_ctrl = CUSTOM_PNO_EVENT_LOCK_xTIME * DHD_PACKET_TIMEOUT_MS;
  4117. }
  4118. #endif /* PNO_SUPPORT */
  4119. if (numpkt != 1) {
  4120. DHD_TRACE(("%s: Got BRCM event packet in a chained packet.\n",
  4121. __FUNCTION__));
  4122. }
  4123. #ifdef DHD_WAKE_STATUS
  4124. if (unlikely(pkt_wake)) {
  4125. #ifdef DHD_WAKE_EVENT_STATUS
  4126. if (event.event_type < WLC_E_LAST) {
  4127. wcp->rc_event[event.event_type]++;
  4128. wcp->rcwake++;
  4129. pkt_wake = 0;
  4130. }
  4131. #endif /* DHD_WAKE_EVENT_STATUS */
  4132. }
  4133. #endif /* DHD_WAKE_STATUS */
  4134. /* For delete virtual interface event, wl_host_event returns positive
  4135. * i/f index, do not proceed. just free the pkt.
  4136. */
  4137. if ((event_type == WLC_E_IF) && (ret_event > 0)) {
  4138. DHD_ERROR(("%s: interface is deleted. Free event packet\n",
  4139. __FUNCTION__));
  4140. #ifdef DHD_USE_STATIC_CTRLBUF
  4141. PKTFREE_STATIC(dhdp->osh, pktbuf, FALSE);
  4142. #else
  4143. PKTFREE(dhdp->osh, pktbuf, FALSE);
  4144. #endif // endif
  4145. continue;
  4146. }
  4147. /*
  4148. * For the event packets, there is a possibility
  4149. * of ifidx getting modifed.Thus update the ifp
  4150. * once again.
  4151. */
  4152. ASSERT(ifidx < DHD_MAX_IFS && dhd->iflist[ifidx]);
  4153. ifp = dhd->iflist[ifidx];
  4154. #ifndef PROP_TXSTATUS_VSDB
  4155. if (!(ifp && ifp->net && (ifp->net->reg_state == NETREG_REGISTERED)))
  4156. #else
  4157. if (!(ifp && ifp->net && (ifp->net->reg_state == NETREG_REGISTERED) &&
  4158. dhd->pub.up))
  4159. #endif /* PROP_TXSTATUS_VSDB */
  4160. {
  4161. DHD_ERROR(("%s: net device is NOT registered. drop event packet\n",
  4162. __FUNCTION__));
  4163. #ifdef DHD_USE_STATIC_CTRLBUF
  4164. PKTFREE_STATIC(dhdp->osh, pktbuf, FALSE);
  4165. #else
  4166. PKTFREE(dhdp->osh, pktbuf, FALSE);
  4167. #endif // endif
  4168. continue;
  4169. }
  4170. if (dhdp->wl_event_enabled) {
  4171. #ifdef DHD_USE_STATIC_CTRLBUF
  4172. /* If event bufs are allocated via static buf pool
  4173. * and wl events are enabled, make a copy, free the
  4174. * local one and send the copy up.
  4175. */
  4176. void *npkt = PKTDUP(dhdp->osh, skb);
  4177. /* Clone event and send it up */
  4178. PKTFREE_STATIC(dhdp->osh, pktbuf, FALSE);
  4179. if (npkt) {
  4180. skb = npkt;
  4181. } else {
  4182. DHD_ERROR(("skb clone failed. dropping event.\n"));
  4183. continue;
  4184. }
  4185. #endif /* DHD_USE_STATIC_CTRLBUF */
  4186. } else {
  4187. /* If event enabled not explictly set, drop events */
  4188. #ifdef DHD_USE_STATIC_CTRLBUF
  4189. PKTFREE_STATIC(dhdp->osh, pktbuf, FALSE);
  4190. #else
  4191. PKTFREE(dhdp->osh, pktbuf, FALSE);
  4192. #endif /* DHD_USE_STATIC_CTRLBUF */
  4193. continue;
  4194. }
  4195. } else {
  4196. #if (defined(OEM_ANDROID) || defined(OEM_EMBEDDED_LINUX))
  4197. tout_rx = DHD_PACKET_TIMEOUT_MS;
  4198. #endif /* OEM_ANDROID || OEM_EMBEDDED_LINUX */
  4199. #ifdef PROP_TXSTATUS
  4200. dhd_wlfc_save_rxpath_ac_time(dhdp, (uint8)PKTPRIO(skb));
  4201. #endif /* PROP_TXSTATUS */
  4202. #ifdef DHD_WAKE_STATUS
  4203. if (unlikely(pkt_wake)) {
  4204. wcp->rxwake++;
  4205. #ifdef DHD_WAKE_RX_STATUS
  4206. #define ETHER_ICMP6_HEADER 20
  4207. #define ETHER_IPV6_SADDR (ETHER_ICMP6_HEADER + 2)
  4208. #define ETHER_IPV6_DAADR (ETHER_IPV6_SADDR + IPV6_ADDR_LEN)
  4209. #define ETHER_ICMPV6_TYPE (ETHER_IPV6_DAADR + IPV6_ADDR_LEN)
  4210. if (ntoh16(skb->protocol) == ETHER_TYPE_ARP) /* ARP */
  4211. wcp->rx_arp++;
  4212. if (dump_data[0] == 0xFF) { /* Broadcast */
  4213. wcp->rx_bcast++;
  4214. } else if (dump_data[0] & 0x01) { /* Multicast */
  4215. wcp->rx_mcast++;
  4216. if (ntoh16(skb->protocol) == ETHER_TYPE_IPV6) {
  4217. wcp->rx_multi_ipv6++;
  4218. if ((skb->len > ETHER_ICMP6_HEADER) &&
  4219. (dump_data[ETHER_ICMP6_HEADER] == IPPROTO_ICMPV6)) {
  4220. wcp->rx_icmpv6++;
  4221. if (skb->len > ETHER_ICMPV6_TYPE) {
  4222. switch (dump_data[ETHER_ICMPV6_TYPE]) {
  4223. case NDISC_ROUTER_ADVERTISEMENT:
  4224. wcp->rx_icmpv6_ra++;
  4225. break;
  4226. case NDISC_NEIGHBOUR_ADVERTISEMENT:
  4227. wcp->rx_icmpv6_na++;
  4228. break;
  4229. case NDISC_NEIGHBOUR_SOLICITATION:
  4230. wcp->rx_icmpv6_ns++;
  4231. break;
  4232. }
  4233. }
  4234. }
  4235. } else if (dump_data[2] == 0x5E) {
  4236. wcp->rx_multi_ipv4++;
  4237. } else {
  4238. wcp->rx_multi_other++;
  4239. }
  4240. } else { /* Unicast */
  4241. wcp->rx_ucast++;
  4242. }
  4243. #undef ETHER_ICMP6_HEADER
  4244. #undef ETHER_IPV6_SADDR
  4245. #undef ETHER_IPV6_DAADR
  4246. #undef ETHER_ICMPV6_TYPE
  4247. #endif /* DHD_WAKE_RX_STATUS */
  4248. pkt_wake = 0;
  4249. }
  4250. #endif /* DHD_WAKE_STATUS */
  4251. }
  4252. #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)
  4253. ifp->net->last_rx = jiffies;
  4254. #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0) */
  4255. if (ntoh16(skb->protocol) != ETHER_TYPE_BRCM) {
  4256. dhdp->dstats.rx_bytes += skb->len;
  4257. dhdp->rx_packets++; /* Local count */
  4258. ifp->stats.rx_bytes += skb->len;
  4259. ifp->stats.rx_packets++;
  4260. }
  4261. if (in_interrupt()) {
  4262. bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE,
  4263. __FUNCTION__, __LINE__);
  4264. DHD_PERIM_UNLOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
  4265. #if defined(DHD_LB_RXP)
  4266. netif_receive_skb(skb);
  4267. #else /* !defined(DHD_LB_RXP) */
  4268. netif_rx(skb);
  4269. #endif /* !defined(DHD_LB_RXP) */
  4270. DHD_PERIM_LOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
  4271. } else {
  4272. if (dhd->rxthread_enabled) {
  4273. if (!skbhead)
  4274. skbhead = skb;
  4275. else
  4276. PKTSETNEXT(dhdp->osh, skbprev, skb);
  4277. skbprev = skb;
  4278. } else {
  4279. /* If the receive is not processed inside an ISR,
  4280. * the softirqd must be woken explicitly to service
  4281. * the NET_RX_SOFTIRQ. In 2.6 kernels, this is handled
  4282. * by netif_rx_ni(), but in earlier kernels, we need
  4283. * to do it manually.
  4284. */
  4285. bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE,
  4286. __FUNCTION__, __LINE__);
  4287. #if defined(ARGOS_NOTIFY_CB)
  4288. argos_register_notifier_deinit();
  4289. #endif // endif
  4290. #if defined(BCMPCIE) && defined(DHDTCPACK_SUPPRESS)
  4291. dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_OFF);
  4292. #endif /* BCMPCIE && DHDTCPACK_SUPPRESS */
  4293. DHD_PERIM_UNLOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
  4294. #if defined(DHD_LB_RXP)
  4295. netif_receive_skb(skb);
  4296. #else /* !defined(DHD_LB_RXP) */
  4297. netif_rx_ni(skb);
  4298. #endif /* defined(DHD_LB_RXP) */
  4299. DHD_PERIM_LOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
  4300. }
  4301. }
  4302. }
  4303. if (dhd->rxthread_enabled && skbhead)
  4304. dhd_sched_rxf(dhdp, skbhead);
  4305. #if (defined(OEM_ANDROID) || defined(OEM_EMBEDDED_LINUX))
  4306. DHD_OS_WAKE_LOCK_RX_TIMEOUT_ENABLE(dhdp, tout_rx);
  4307. DHD_OS_WAKE_LOCK_CTRL_TIMEOUT_ENABLE(dhdp, tout_ctrl);
  4308. #endif /* OEM_ANDROID || OEM_EMBEDDED_LINUX */
  4309. }
  4310. void
  4311. dhd_event(struct dhd_info *dhd, char *evpkt, int evlen, int ifidx)
  4312. {
  4313. /* Linux version has nothing to do */
  4314. return;
  4315. }
  4316. void
  4317. dhd_txcomplete(dhd_pub_t *dhdp, void *txp, bool success)
  4318. {
  4319. dhd_info_t *dhd = (dhd_info_t *)(dhdp->info);
  4320. struct ether_header *eh;
  4321. uint16 type;
  4322. dhd_prot_hdrpull(dhdp, NULL, txp, NULL, NULL);
  4323. eh = (struct ether_header *)PKTDATA(dhdp->osh, txp);
  4324. type = ntoh16(eh->ether_type);
  4325. if (type == ETHER_TYPE_802_1X) {
  4326. atomic_dec(&dhd->pend_8021x_cnt);
  4327. }
  4328. #ifdef PROP_TXSTATUS
  4329. if (dhdp->wlfc_state && (dhdp->proptxstatus_mode != WLFC_FCMODE_NONE)) {
  4330. dhd_if_t *ifp = dhd->iflist[DHD_PKTTAG_IF(PKTTAG(txp))];
  4331. uint datalen = PKTLEN(dhd->pub.osh, txp);
  4332. if (ifp != NULL) {
  4333. if (success) {
  4334. dhd->pub.tx_packets++;
  4335. ifp->stats.tx_packets++;
  4336. ifp->stats.tx_bytes += datalen;
  4337. } else {
  4338. ifp->stats.tx_dropped++;
  4339. }
  4340. }
  4341. }
  4342. #endif // endif
  4343. }
  4344. static struct net_device_stats *
  4345. dhd_get_stats(struct net_device *net)
  4346. {
  4347. dhd_info_t *dhd = DHD_DEV_INFO(net);
  4348. dhd_if_t *ifp;
  4349. DHD_TRACE(("%s: Enter\n", __FUNCTION__));
  4350. if (!dhd) {
  4351. DHD_ERROR(("%s : dhd is NULL\n", __FUNCTION__));
  4352. goto error;
  4353. }
  4354. ifp = dhd_get_ifp_by_ndev(&dhd->pub, net);
  4355. if (!ifp) {
  4356. /* return empty stats */
  4357. DHD_ERROR(("%s: BAD_IF\n", __FUNCTION__));
  4358. goto error;
  4359. }
  4360. if (dhd->pub.up) {
  4361. /* Use the protocol to get dongle stats */
  4362. dhd_prot_dstats(&dhd->pub);
  4363. }
  4364. return &ifp->stats;
  4365. error:
  4366. memset(&net->stats, 0, sizeof(net->stats));
  4367. return &net->stats;
  4368. }
  4369. static int
  4370. dhd_watchdog_thread(void *data)
  4371. {
  4372. tsk_ctl_t *tsk = (tsk_ctl_t *)data;
  4373. dhd_info_t *dhd = (dhd_info_t *)tsk->parent;
  4374. /* This thread doesn't need any user-level access,
  4375. * so get rid of all our resources
  4376. */
  4377. if (dhd_watchdog_prio > 0) {
  4378. struct sched_param param;
  4379. param.sched_priority = (dhd_watchdog_prio < MAX_RT_PRIO)?
  4380. dhd_watchdog_prio:(MAX_RT_PRIO-1);
  4381. setScheduler(current, SCHED_FIFO, &param);
  4382. }
  4383. while (1) {
  4384. if (down_interruptible (&tsk->sema) == 0) {
  4385. unsigned long flags;
  4386. unsigned long jiffies_at_start = jiffies;
  4387. unsigned long time_lapse;
  4388. #ifdef BCMPCIE
  4389. DHD_OS_WD_WAKE_LOCK(&dhd->pub);
  4390. #endif /* BCMPCIE */
  4391. SMP_RD_BARRIER_DEPENDS();
  4392. if (tsk->terminated) {
  4393. #ifdef BCMPCIE
  4394. DHD_OS_WD_WAKE_UNLOCK(&dhd->pub);
  4395. #endif /* BCMPCIE */
  4396. break;
  4397. }
  4398. if (dhd->pub.dongle_reset == FALSE) {
  4399. DHD_TIMER(("%s:\n", __FUNCTION__));
  4400. dhd_bus_watchdog(&dhd->pub);
  4401. DHD_GENERAL_LOCK(&dhd->pub, flags);
  4402. /* Count the tick for reference */
  4403. dhd->pub.tickcnt++;
  4404. #ifdef DHD_L2_FILTER
  4405. dhd_l2_filter_watchdog(&dhd->pub);
  4406. #endif /* DHD_L2_FILTER */
  4407. time_lapse = jiffies - jiffies_at_start;
  4408. /* Reschedule the watchdog */
  4409. if (dhd->wd_timer_valid) {
  4410. mod_timer(&dhd->timer,
  4411. jiffies +
  4412. msecs_to_jiffies(dhd_watchdog_ms) -
  4413. min(msecs_to_jiffies(dhd_watchdog_ms), time_lapse));
  4414. }
  4415. DHD_GENERAL_UNLOCK(&dhd->pub, flags);
  4416. }
  4417. #ifdef BCMPCIE
  4418. DHD_OS_WD_WAKE_UNLOCK(&dhd->pub);
  4419. #endif /* BCMPCIE */
  4420. } else {
  4421. break;
  4422. }
  4423. }
  4424. complete_and_exit(&tsk->completed, 0);
  4425. }
  4426. static void dhd_watchdog(ulong data)
  4427. {
  4428. dhd_info_t *dhd = (dhd_info_t *)data;
  4429. unsigned long flags;
  4430. if (dhd->pub.dongle_reset) {
  4431. return;
  4432. }
  4433. if (dhd->thr_wdt_ctl.thr_pid >= 0) {
  4434. up(&dhd->thr_wdt_ctl.sema);
  4435. return;
  4436. }
  4437. #ifdef BCMPCIE
  4438. DHD_OS_WD_WAKE_LOCK(&dhd->pub);
  4439. #endif /* BCMPCIE */
  4440. /* Call the bus module watchdog */
  4441. dhd_bus_watchdog(&dhd->pub);
  4442. DHD_GENERAL_LOCK(&dhd->pub, flags);
  4443. /* Count the tick for reference */
  4444. dhd->pub.tickcnt++;
  4445. #ifdef DHD_L2_FILTER
  4446. dhd_l2_filter_watchdog(&dhd->pub);
  4447. #endif /* DHD_L2_FILTER */
  4448. /* Reschedule the watchdog */
  4449. if (dhd->wd_timer_valid)
  4450. mod_timer(&dhd->timer, jiffies + msecs_to_jiffies(dhd_watchdog_ms));
  4451. DHD_GENERAL_UNLOCK(&dhd->pub, flags);
  4452. #ifdef BCMPCIE
  4453. DHD_OS_WD_WAKE_UNLOCK(&dhd->pub);
  4454. #endif /* BCMPCIE */
  4455. }
  4456. #ifdef DHD_PCIE_RUNTIMEPM
  4457. static int
  4458. dhd_rpm_state_thread(void *data)
  4459. {
  4460. tsk_ctl_t *tsk = (tsk_ctl_t *)data;
  4461. dhd_info_t *dhd = (dhd_info_t *)tsk->parent;
  4462. while (1) {
  4463. if (down_interruptible (&tsk->sema) == 0) {
  4464. unsigned long flags;
  4465. unsigned long jiffies_at_start = jiffies;
  4466. unsigned long time_lapse;
  4467. SMP_RD_BARRIER_DEPENDS();
  4468. if (tsk->terminated) {
  4469. break;
  4470. }
  4471. if (dhd->pub.dongle_reset == FALSE) {
  4472. DHD_TIMER(("%s:\n", __FUNCTION__));
  4473. if (dhd->pub.up) {
  4474. dhd_runtimepm_state(&dhd->pub);
  4475. }
  4476. DHD_GENERAL_LOCK(&dhd->pub, flags);
  4477. time_lapse = jiffies - jiffies_at_start;
  4478. /* Reschedule the watchdog */
  4479. if (dhd->rpm_timer_valid) {
  4480. mod_timer(&dhd->rpm_timer,
  4481. jiffies +
  4482. msecs_to_jiffies(dhd_runtimepm_ms) -
  4483. min(msecs_to_jiffies(dhd_runtimepm_ms),
  4484. time_lapse));
  4485. }
  4486. DHD_GENERAL_UNLOCK(&dhd->pub, flags);
  4487. }
  4488. } else {
  4489. break;
  4490. }
  4491. }
  4492. complete_and_exit(&tsk->completed, 0);
  4493. }
  4494. static void dhd_runtimepm(ulong data)
  4495. {
  4496. dhd_info_t *dhd = (dhd_info_t *)data;
  4497. if (dhd->pub.dongle_reset) {
  4498. return;
  4499. }
  4500. if (dhd->thr_rpm_ctl.thr_pid >= 0) {
  4501. up(&dhd->thr_rpm_ctl.sema);
  4502. return;
  4503. }
  4504. }
  4505. void dhd_runtime_pm_disable(dhd_pub_t *dhdp)
  4506. {
  4507. dhd_os_runtimepm_timer(dhdp, 0);
  4508. dhdpcie_runtime_bus_wake(dhdp, CAN_SLEEP(), __builtin_return_address(0));
  4509. }
  4510. void dhd_runtime_pm_enable(dhd_pub_t *dhdp)
  4511. {
  4512. /* Enable Runtime PM except for MFG Mode */
  4513. if (!(dhdp->op_mode & DHD_FLAG_MFG_MODE)) {
  4514. if (dhd_get_idletime(dhdp)) {
  4515. dhd_os_runtimepm_timer(dhdp, dhd_runtimepm_ms);
  4516. }
  4517. }
  4518. }
  4519. #endif /* DHD_PCIE_RUNTIMEPM */
  4520. #ifdef ENABLE_ADAPTIVE_SCHED
  4521. static void
  4522. dhd_sched_policy(int prio)
  4523. {
  4524. struct sched_param param;
  4525. if (cpufreq_quick_get(0) <= CUSTOM_CPUFREQ_THRESH) {
  4526. param.sched_priority = 0;
  4527. setScheduler(current, SCHED_NORMAL, &param);
  4528. } else {
  4529. if (get_scheduler_policy(current) != SCHED_FIFO) {
  4530. param.sched_priority = (prio < MAX_RT_PRIO)? prio : (MAX_RT_PRIO-1);
  4531. setScheduler(current, SCHED_FIFO, &param);
  4532. }
  4533. }
  4534. }
  4535. #endif /* ENABLE_ADAPTIVE_SCHED */
  4536. #ifdef DEBUG_CPU_FREQ
  4537. static int dhd_cpufreq_notifier(struct notifier_block *nb, unsigned long val, void *data)
  4538. {
  4539. dhd_info_t *dhd = container_of(nb, struct dhd_info, freq_trans);
  4540. struct cpufreq_freqs *freq = data;
  4541. if (dhd) {
  4542. if (!dhd->new_freq)
  4543. goto exit;
  4544. if (val == CPUFREQ_POSTCHANGE) {
  4545. DHD_ERROR(("cpu freq is changed to %u kHZ on CPU %d\n",
  4546. freq->new, freq->cpu));
  4547. *per_cpu_ptr(dhd->new_freq, freq->cpu) = freq->new;
  4548. }
  4549. }
  4550. exit:
  4551. return 0;
  4552. }
  4553. #endif /* DEBUG_CPU_FREQ */
  4554. static int
  4555. dhd_dpc_thread(void *data)
  4556. {
  4557. tsk_ctl_t *tsk = (tsk_ctl_t *)data;
  4558. dhd_info_t *dhd = (dhd_info_t *)tsk->parent;
  4559. /* This thread doesn't need any user-level access,
  4560. * so get rid of all our resources
  4561. */
  4562. if (dhd_dpc_prio > 0)
  4563. {
  4564. struct sched_param param;
  4565. param.sched_priority = (dhd_dpc_prio < MAX_RT_PRIO)?dhd_dpc_prio:(MAX_RT_PRIO-1);
  4566. setScheduler(current, SCHED_FIFO, &param);
  4567. }
  4568. #ifdef CUSTOM_DPC_CPUCORE
  4569. set_cpus_allowed_ptr(current, cpumask_of(CUSTOM_DPC_CPUCORE));
  4570. #endif // endif
  4571. #ifdef CUSTOM_SET_CPUCORE
  4572. dhd->pub.current_dpc = current;
  4573. #endif /* CUSTOM_SET_CPUCORE */
  4574. /* Run until signal received */
  4575. while (1) {
  4576. if (!binary_sema_down(tsk)) {
  4577. #ifdef ENABLE_ADAPTIVE_SCHED
  4578. dhd_sched_policy(dhd_dpc_prio);
  4579. #endif /* ENABLE_ADAPTIVE_SCHED */
  4580. SMP_RD_BARRIER_DEPENDS();
  4581. if (tsk->terminated) {
  4582. break;
  4583. }
  4584. /* Call bus dpc unless it indicated down (then clean stop) */
  4585. if (dhd->pub.busstate != DHD_BUS_DOWN) {
  4586. #ifdef DEBUG_DPC_THREAD_WATCHDOG
  4587. int resched_cnt = 0;
  4588. #endif /* DEBUG_DPC_THREAD_WATCHDOG */
  4589. dhd_os_wd_timer_extend(&dhd->pub, TRUE);
  4590. while (dhd_bus_dpc(dhd->pub.bus)) {
  4591. /* process all data */
  4592. #ifdef DEBUG_DPC_THREAD_WATCHDOG
  4593. resched_cnt++;
  4594. if (resched_cnt > MAX_RESCHED_CNT) {
  4595. DHD_INFO(("%s Calling msleep to"
  4596. "let other processes run. \n",
  4597. __FUNCTION__));
  4598. dhd->pub.dhd_bug_on = true;
  4599. resched_cnt = 0;
  4600. OSL_SLEEP(1);
  4601. }
  4602. #endif /* DEBUG_DPC_THREAD_WATCHDOG */
  4603. }
  4604. dhd_os_wd_timer_extend(&dhd->pub, FALSE);
  4605. DHD_OS_WAKE_UNLOCK(&dhd->pub);
  4606. } else {
  4607. if (dhd->pub.up)
  4608. dhd_bus_stop(dhd->pub.bus, TRUE);
  4609. DHD_OS_WAKE_UNLOCK(&dhd->pub);
  4610. }
  4611. } else {
  4612. break;
  4613. }
  4614. }
  4615. complete_and_exit(&tsk->completed, 0);
  4616. }
  4617. static int
  4618. dhd_rxf_thread(void *data)
  4619. {
  4620. tsk_ctl_t *tsk = (tsk_ctl_t *)data;
  4621. dhd_info_t *dhd = (dhd_info_t *)tsk->parent;
  4622. #if defined(WAIT_DEQUEUE)
  4623. #define RXF_WATCHDOG_TIME 250 /* BARK_TIME(1000) / */
  4624. ulong watchdogTime = OSL_SYSUPTIME(); /* msec */
  4625. #endif // endif
  4626. dhd_pub_t *pub = &dhd->pub;
  4627. /* This thread doesn't need any user-level access,
  4628. * so get rid of all our resources
  4629. */
  4630. if (dhd_rxf_prio > 0)
  4631. {
  4632. struct sched_param param;
  4633. param.sched_priority = (dhd_rxf_prio < MAX_RT_PRIO)?dhd_rxf_prio:(MAX_RT_PRIO-1);
  4634. setScheduler(current, SCHED_FIFO, &param);
  4635. }
  4636. #ifdef CUSTOM_SET_CPUCORE
  4637. dhd->pub.current_rxf = current;
  4638. #endif /* CUSTOM_SET_CPUCORE */
  4639. /* Run until signal received */
  4640. while (1) {
  4641. if (down_interruptible(&tsk->sema) == 0) {
  4642. void *skb;
  4643. #ifdef ENABLE_ADAPTIVE_SCHED
  4644. dhd_sched_policy(dhd_rxf_prio);
  4645. #endif /* ENABLE_ADAPTIVE_SCHED */
  4646. SMP_RD_BARRIER_DEPENDS();
  4647. if (tsk->terminated) {
  4648. break;
  4649. }
  4650. skb = dhd_rxf_dequeue(pub);
  4651. if (skb == NULL) {
  4652. continue;
  4653. }
  4654. while (skb) {
  4655. void *skbnext = PKTNEXT(pub->osh, skb);
  4656. PKTSETNEXT(pub->osh, skb, NULL);
  4657. bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE,
  4658. __FUNCTION__, __LINE__);
  4659. netif_rx_ni(skb);
  4660. skb = skbnext;
  4661. }
  4662. #if defined(WAIT_DEQUEUE)
  4663. if (OSL_SYSUPTIME() - watchdogTime > RXF_WATCHDOG_TIME) {
  4664. OSL_SLEEP(1);
  4665. watchdogTime = OSL_SYSUPTIME();
  4666. }
  4667. #endif // endif
  4668. DHD_OS_WAKE_UNLOCK(pub);
  4669. } else {
  4670. break;
  4671. }
  4672. }
  4673. complete_and_exit(&tsk->completed, 0);
  4674. }
  4675. #ifdef BCMPCIE
  4676. void dhd_dpc_enable(dhd_pub_t *dhdp)
  4677. {
  4678. #if defined(DHD_LB_RXP) || defined(DHD_LB_TXP)
  4679. dhd_info_t *dhd;
  4680. if (!dhdp || !dhdp->info)
  4681. return;
  4682. dhd = dhdp->info;
  4683. #endif /* DHD_LB_RXP || DHD_LB_TXP */
  4684. #ifdef DHD_LB_RXP
  4685. __skb_queue_head_init(&dhd->rx_pend_queue);
  4686. #endif /* DHD_LB_RXP */
  4687. #ifdef DHD_LB_TXP
  4688. skb_queue_head_init(&dhd->tx_pend_queue);
  4689. #endif /* DHD_LB_TXP */
  4690. }
  4691. #endif /* BCMPCIE */
  4692. #ifdef BCMPCIE
  4693. void
  4694. dhd_dpc_kill(dhd_pub_t *dhdp)
  4695. {
  4696. dhd_info_t *dhd;
  4697. if (!dhdp) {
  4698. return;
  4699. }
  4700. dhd = dhdp->info;
  4701. if (!dhd) {
  4702. return;
  4703. }
  4704. if (dhd->thr_dpc_ctl.thr_pid < 0) {
  4705. tasklet_kill(&dhd->tasklet);
  4706. DHD_ERROR(("%s: tasklet disabled\n", __FUNCTION__));
  4707. }
  4708. #ifdef DHD_LB
  4709. #ifdef DHD_LB_RXP
  4710. cancel_work_sync(&dhd->rx_napi_dispatcher_work);
  4711. __skb_queue_purge(&dhd->rx_pend_queue);
  4712. #endif /* DHD_LB_RXP */
  4713. #ifdef DHD_LB_TXP
  4714. cancel_work_sync(&dhd->tx_dispatcher_work);
  4715. skb_queue_purge(&dhd->tx_pend_queue);
  4716. #endif /* DHD_LB_TXP */
  4717. /* Kill the Load Balancing Tasklets */
  4718. #if defined(DHD_LB_TXC)
  4719. tasklet_kill(&dhd->tx_compl_tasklet);
  4720. #endif /* DHD_LB_TXC */
  4721. #if defined(DHD_LB_RXC)
  4722. tasklet_kill(&dhd->rx_compl_tasklet);
  4723. #endif /* DHD_LB_RXC */
  4724. #if defined(DHD_LB_TXP)
  4725. tasklet_kill(&dhd->tx_tasklet);
  4726. #endif /* DHD_LB_TXP */
  4727. #endif /* DHD_LB */
  4728. }
  4729. void
  4730. dhd_dpc_tasklet_kill(dhd_pub_t *dhdp)
  4731. {
  4732. dhd_info_t *dhd;
  4733. if (!dhdp) {
  4734. return;
  4735. }
  4736. dhd = dhdp->info;
  4737. if (!dhd) {
  4738. return;
  4739. }
  4740. if (dhd->thr_dpc_ctl.thr_pid < 0) {
  4741. tasklet_kill(&dhd->tasklet);
  4742. }
  4743. }
  4744. #endif /* BCMPCIE */
  4745. static void
  4746. dhd_dpc(ulong data)
  4747. {
  4748. dhd_info_t *dhd;
  4749. dhd = (dhd_info_t *)data;
  4750. /* this (tasklet) can be scheduled in dhd_sched_dpc[dhd_linux.c]
  4751. * down below , wake lock is set,
  4752. * the tasklet is initialized in dhd_attach()
  4753. */
  4754. /* Call bus dpc unless it indicated down (then clean stop) */
  4755. if (dhd->pub.busstate != DHD_BUS_DOWN) {
  4756. #if defined(DHD_LB_STATS) && defined(PCIE_FULL_DONGLE)
  4757. DHD_LB_STATS_INCR(dhd->dhd_dpc_cnt);
  4758. #endif /* DHD_LB_STATS && PCIE_FULL_DONGLE */
  4759. if (dhd_bus_dpc(dhd->pub.bus)) {
  4760. tasklet_schedule(&dhd->tasklet);
  4761. }
  4762. } else {
  4763. dhd_bus_stop(dhd->pub.bus, TRUE);
  4764. }
  4765. }
  4766. void
  4767. dhd_sched_dpc(dhd_pub_t *dhdp)
  4768. {
  4769. dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
  4770. if (dhd->thr_dpc_ctl.thr_pid >= 0) {
  4771. DHD_OS_WAKE_LOCK(dhdp);
  4772. /* If the semaphore does not get up,
  4773. * wake unlock should be done here
  4774. */
  4775. if (!binary_sema_up(&dhd->thr_dpc_ctl)) {
  4776. DHD_OS_WAKE_UNLOCK(dhdp);
  4777. }
  4778. return;
  4779. } else {
  4780. dhd_bus_set_dpc_sched_time(dhdp);
  4781. tasklet_schedule(&dhd->tasklet);
  4782. }
  4783. }
  4784. static void
  4785. dhd_sched_rxf(dhd_pub_t *dhdp, void *skb)
  4786. {
  4787. dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
  4788. DHD_OS_WAKE_LOCK(dhdp);
  4789. DHD_TRACE(("dhd_sched_rxf: Enter\n"));
  4790. do {
  4791. if (dhd_rxf_enqueue(dhdp, skb) == BCME_OK)
  4792. break;
  4793. } while (1);
  4794. if (dhd->thr_rxf_ctl.thr_pid >= 0) {
  4795. up(&dhd->thr_rxf_ctl.sema);
  4796. }
  4797. return;
  4798. }
  4799. #if defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW)
  4800. #endif /* defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW) */
  4801. #ifdef TOE
  4802. /* Retrieve current toe component enables, which are kept as a bitmap in toe_ol iovar */
  4803. static int
  4804. dhd_toe_get(dhd_info_t *dhd, int ifidx, uint32 *toe_ol)
  4805. {
  4806. char buf[32];
  4807. int ret;
  4808. ret = dhd_iovar(&dhd->pub, ifidx, "toe_ol", NULL, 0, (char *)&buf, sizeof(buf), FALSE);
  4809. if (ret < 0) {
  4810. if (ret == -EIO) {
  4811. DHD_ERROR(("%s: toe not supported by device\n", dhd_ifname(&dhd->pub,
  4812. ifidx)));
  4813. return -EOPNOTSUPP;
  4814. }
  4815. DHD_INFO(("%s: could not get toe_ol: ret=%d\n", dhd_ifname(&dhd->pub, ifidx), ret));
  4816. return ret;
  4817. }
  4818. memcpy(toe_ol, buf, sizeof(uint32));
  4819. return 0;
  4820. }
  4821. /* Set current toe component enables in toe_ol iovar, and set toe global enable iovar */
  4822. static int
  4823. dhd_toe_set(dhd_info_t *dhd, int ifidx, uint32 toe_ol)
  4824. {
  4825. int toe, ret;
  4826. /* Set toe_ol as requested */
  4827. ret = dhd_iovar(&dhd->pub, ifidx, "toe_ol", (char *)&toe_ol, sizeof(toe_ol), NULL, 0, TRUE);
  4828. if (ret < 0) {
  4829. DHD_ERROR(("%s: could not set toe_ol: ret=%d\n",
  4830. dhd_ifname(&dhd->pub, ifidx), ret));
  4831. return ret;
  4832. }
  4833. /* Enable toe globally only if any components are enabled. */
  4834. toe = (toe_ol != 0);
  4835. ret = dhd_iovar(&dhd->pub, ifidx, "toe", (char *)&toe, sizeof(toe), NULL, 0, TRUE);
  4836. if (ret < 0) {
  4837. DHD_ERROR(("%s: could not set toe: ret=%d\n", dhd_ifname(&dhd->pub, ifidx), ret));
  4838. return ret;
  4839. }
  4840. return 0;
  4841. }
  4842. #endif /* TOE */
  4843. #if defined(WL_CFG80211) && defined(NUM_SCB_MAX_PROBE)
  4844. void dhd_set_scb_probe(dhd_pub_t *dhd)
  4845. {
  4846. wl_scb_probe_t scb_probe;
  4847. char iovbuf[WL_EVENTING_MASK_LEN + sizeof(wl_scb_probe_t)];
  4848. int ret;
  4849. if (dhd->op_mode & DHD_FLAG_HOSTAP_MODE) {
  4850. return;
  4851. }
  4852. ret = dhd_iovar(dhd, 0, "scb_probe", NULL, 0, iovbuf, sizeof(iovbuf), FALSE);
  4853. if (ret < 0) {
  4854. DHD_ERROR(("%s: GET max_scb_probe failed\n", __FUNCTION__));
  4855. }
  4856. memcpy(&scb_probe, iovbuf, sizeof(wl_scb_probe_t));
  4857. scb_probe.scb_max_probe = NUM_SCB_MAX_PROBE;
  4858. ret = dhd_iovar(dhd, 0, "scb_probe", (char *)&scb_probe, sizeof(wl_scb_probe_t), NULL, 0,
  4859. TRUE);
  4860. if (ret < 0) {
  4861. DHD_ERROR(("%s: max_scb_probe setting failed\n", __FUNCTION__));
  4862. return;
  4863. }
  4864. }
  4865. #endif /* WL_CFG80211 && NUM_SCB_MAX_PROBE */
  4866. static void
  4867. dhd_ethtool_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *info)
  4868. {
  4869. dhd_info_t *dhd = DHD_DEV_INFO(net);
  4870. snprintf(info->driver, sizeof(info->driver), "wl");
  4871. snprintf(info->version, sizeof(info->version), "%lu", dhd->pub.drv_version);
  4872. }
  4873. struct ethtool_ops dhd_ethtool_ops = {
  4874. .get_drvinfo = dhd_ethtool_get_drvinfo
  4875. };
  4876. static int
  4877. dhd_ethtool(dhd_info_t *dhd, void *uaddr)
  4878. {
  4879. struct ethtool_drvinfo info;
  4880. char drvname[sizeof(info.driver)];
  4881. uint32 cmd;
  4882. #ifdef TOE
  4883. struct ethtool_value edata;
  4884. uint32 toe_cmpnt, csum_dir;
  4885. int ret;
  4886. #endif // endif
  4887. DHD_TRACE(("%s: Enter\n", __FUNCTION__));
  4888. /* all ethtool calls start with a cmd word */
  4889. if (copy_from_user(&cmd, uaddr, sizeof (uint32)))
  4890. return -EFAULT;
  4891. switch (cmd) {
  4892. case ETHTOOL_GDRVINFO:
  4893. /* Copy out any request driver name */
  4894. if (copy_from_user(&info, uaddr, sizeof(info)))
  4895. return -EFAULT;
  4896. strncpy(drvname, info.driver, sizeof(info.driver));
  4897. drvname[sizeof(info.driver)-1] = '\0';
  4898. /* clear struct for return */
  4899. memset(&info, 0, sizeof(info));
  4900. info.cmd = cmd;
  4901. /* if dhd requested, identify ourselves */
  4902. if (strcmp(drvname, "?dhd") == 0) {
  4903. snprintf(info.driver, sizeof(info.driver), "dhd");
  4904. strncpy(info.version, EPI_VERSION_STR, sizeof(info.version) - 1);
  4905. info.version[sizeof(info.version) - 1] = '\0';
  4906. }
  4907. /* otherwise, require dongle to be up */
  4908. else if (!dhd->pub.up) {
  4909. DHD_ERROR(("%s: dongle is not up\n", __FUNCTION__));
  4910. return -ENODEV;
  4911. }
  4912. /* finally, report dongle driver type */
  4913. else if (dhd->pub.iswl)
  4914. snprintf(info.driver, sizeof(info.driver), "wl");
  4915. else
  4916. snprintf(info.driver, sizeof(info.driver), "xx");
  4917. snprintf(info.version, sizeof(info.version), "%lu", dhd->pub.drv_version);
  4918. if (copy_to_user(uaddr, &info, sizeof(info)))
  4919. return -EFAULT;
  4920. DHD_CTL(("%s: given %*s, returning %s\n", __FUNCTION__,
  4921. (int)sizeof(drvname), drvname, info.driver));
  4922. break;
  4923. #ifdef TOE
  4924. /* Get toe offload components from dongle */
  4925. case ETHTOOL_GRXCSUM:
  4926. case ETHTOOL_GTXCSUM:
  4927. if ((ret = dhd_toe_get(dhd, 0, &toe_cmpnt)) < 0)
  4928. return ret;
  4929. csum_dir = (cmd == ETHTOOL_GTXCSUM) ? TOE_TX_CSUM_OL : TOE_RX_CSUM_OL;
  4930. edata.cmd = cmd;
  4931. edata.data = (toe_cmpnt & csum_dir) ? 1 : 0;
  4932. if (copy_to_user(uaddr, &edata, sizeof(edata)))
  4933. return -EFAULT;
  4934. break;
  4935. /* Set toe offload components in dongle */
  4936. case ETHTOOL_SRXCSUM:
  4937. case ETHTOOL_STXCSUM:
  4938. if (copy_from_user(&edata, uaddr, sizeof(edata)))
  4939. return -EFAULT;
  4940. /* Read the current settings, update and write back */
  4941. if ((ret = dhd_toe_get(dhd, 0, &toe_cmpnt)) < 0)
  4942. return ret;
  4943. csum_dir = (cmd == ETHTOOL_STXCSUM) ? TOE_TX_CSUM_OL : TOE_RX_CSUM_OL;
  4944. if (edata.data != 0)
  4945. toe_cmpnt |= csum_dir;
  4946. else
  4947. toe_cmpnt &= ~csum_dir;
  4948. if ((ret = dhd_toe_set(dhd, 0, toe_cmpnt)) < 0)
  4949. return ret;
  4950. /* If setting TX checksum mode, tell Linux the new mode */
  4951. if (cmd == ETHTOOL_STXCSUM) {
  4952. if (edata.data)
  4953. dhd->iflist[0]->net->features |= NETIF_F_IP_CSUM;
  4954. else
  4955. dhd->iflist[0]->net->features &= ~NETIF_F_IP_CSUM;
  4956. }
  4957. break;
  4958. #endif /* TOE */
  4959. default:
  4960. return -EOPNOTSUPP;
  4961. }
  4962. return 0;
  4963. }
  4964. static bool dhd_check_hang(struct net_device *net, dhd_pub_t *dhdp, int error)
  4965. {
  4966. #if defined(OEM_ANDROID)
  4967. if (!dhdp) {
  4968. DHD_ERROR(("%s: dhdp is NULL\n", __FUNCTION__));
  4969. return FALSE;
  4970. }
  4971. if (!dhdp->up)
  4972. return FALSE;
  4973. #if !defined(BCMPCIE)
  4974. if (dhdp->info->thr_dpc_ctl.thr_pid < 0) {
  4975. DHD_ERROR(("%s : skipped due to negative pid - unloading?\n", __FUNCTION__));
  4976. return FALSE;
  4977. }
  4978. #endif // endif
  4979. if ((error == -ETIMEDOUT) || (error == -EREMOTEIO) ||
  4980. ((dhdp->busstate == DHD_BUS_DOWN) && (!dhdp->dongle_reset))) {
  4981. #ifdef BCMPCIE
  4982. DHD_ERROR(("%s: Event HANG send up due to re=%d te=%d d3acke=%d e=%d s=%d\n",
  4983. __FUNCTION__, dhdp->rxcnt_timeout, dhdp->txcnt_timeout,
  4984. dhdp->d3ackcnt_timeout, error, dhdp->busstate));
  4985. #else
  4986. DHD_ERROR(("%s: Event HANG send up due to re=%d te=%d e=%d s=%d\n", __FUNCTION__,
  4987. dhdp->rxcnt_timeout, dhdp->txcnt_timeout, error, dhdp->busstate));
  4988. #endif /* BCMPCIE */
  4989. if (dhdp->hang_reason == 0) {
  4990. if (dhdp->dongle_trap_occured) {
  4991. dhdp->hang_reason = HANG_REASON_DONGLE_TRAP;
  4992. #ifdef BCMPCIE
  4993. } else if (dhdp->d3ackcnt_timeout) {
  4994. dhdp->hang_reason = dhdp->is_sched_error ?
  4995. HANG_REASON_D3_ACK_TIMEOUT_SCHED_ERROR :
  4996. HANG_REASON_D3_ACK_TIMEOUT;
  4997. #endif /* BCMPCIE */
  4998. } else {
  4999. dhdp->hang_reason = dhdp->is_sched_error ?
  5000. HANG_REASON_IOCTL_RESP_TIMEOUT_SCHED_ERROR :
  5001. HANG_REASON_IOCTL_RESP_TIMEOUT;
  5002. }
  5003. }
  5004. net_os_send_hang_message(net);
  5005. return TRUE;
  5006. }
  5007. #endif /* OEM_ANDROID */
  5008. return FALSE;
  5009. }
  5010. #ifdef WL_MONITOR
  5011. bool
  5012. dhd_monitor_enabled(dhd_pub_t *dhd, int ifidx)
  5013. {
  5014. return (dhd->info->monitor_type != 0);
  5015. }
  5016. void
  5017. dhd_rx_mon_pkt(dhd_pub_t *dhdp, host_rxbuf_cmpl_t* msg, void *pkt, int ifidx)
  5018. {
  5019. dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
  5020. {
  5021. uint8 amsdu_flag = (msg->flags & BCMPCIE_PKT_FLAGS_MONITOR_MASK) >>
  5022. BCMPCIE_PKT_FLAGS_MONITOR_SHIFT;
  5023. switch (amsdu_flag) {
  5024. case BCMPCIE_PKT_FLAGS_MONITOR_NO_AMSDU:
  5025. default:
  5026. if (!dhd->monitor_skb) {
  5027. if ((dhd->monitor_skb = PKTTONATIVE(dhdp->osh, pkt))
  5028. == NULL)
  5029. return;
  5030. }
  5031. if (dhd->monitor_type && dhd->monitor_dev)
  5032. dhd->monitor_skb->dev = dhd->monitor_dev;
  5033. else {
  5034. PKTFREE(dhdp->osh, pkt, FALSE);
  5035. dhd->monitor_skb = NULL;
  5036. return;
  5037. }
  5038. dhd->monitor_skb->protocol =
  5039. eth_type_trans(dhd->monitor_skb, dhd->monitor_skb->dev);
  5040. dhd->monitor_len = 0;
  5041. break;
  5042. case BCMPCIE_PKT_FLAGS_MONITOR_FIRST_PKT:
  5043. if (!dhd->monitor_skb) {
  5044. if ((dhd->monitor_skb = dev_alloc_skb(MAX_MON_PKT_SIZE))
  5045. == NULL)
  5046. return;
  5047. dhd->monitor_len = 0;
  5048. }
  5049. if (dhd->monitor_type && dhd->monitor_dev)
  5050. dhd->monitor_skb->dev = dhd->monitor_dev;
  5051. else {
  5052. PKTFREE(dhdp->osh, pkt, FALSE);
  5053. dev_kfree_skb(dhd->monitor_skb);
  5054. return;
  5055. }
  5056. memcpy(PKTDATA(dhdp->osh, dhd->monitor_skb),
  5057. PKTDATA(dhdp->osh, pkt), PKTLEN(dhdp->osh, pkt));
  5058. dhd->monitor_len = PKTLEN(dhdp->osh, pkt);
  5059. PKTFREE(dhdp->osh, pkt, FALSE);
  5060. return;
  5061. case BCMPCIE_PKT_FLAGS_MONITOR_INTER_PKT:
  5062. memcpy(PKTDATA(dhdp->osh, dhd->monitor_skb) + dhd->monitor_len,
  5063. PKTDATA(dhdp->osh, pkt), PKTLEN(dhdp->osh, pkt));
  5064. dhd->monitor_len += PKTLEN(dhdp->osh, pkt);
  5065. PKTFREE(dhdp->osh, pkt, FALSE);
  5066. return;
  5067. case BCMPCIE_PKT_FLAGS_MONITOR_LAST_PKT:
  5068. memcpy(PKTDATA(dhdp->osh, dhd->monitor_skb) + dhd->monitor_len,
  5069. PKTDATA(dhdp->osh, pkt), PKTLEN(dhdp->osh, pkt));
  5070. dhd->monitor_len += PKTLEN(dhdp->osh, pkt);
  5071. PKTFREE(dhdp->osh, pkt, FALSE);
  5072. skb_put(dhd->monitor_skb, dhd->monitor_len);
  5073. dhd->monitor_skb->protocol =
  5074. eth_type_trans(dhd->monitor_skb, dhd->monitor_skb->dev);
  5075. dhd->monitor_len = 0;
  5076. break;
  5077. }
  5078. }
  5079. if (in_interrupt()) {
  5080. bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE,
  5081. __FUNCTION__, __LINE__);
  5082. DHD_PERIM_UNLOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
  5083. netif_rx(dhd->monitor_skb);
  5084. DHD_PERIM_LOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
  5085. } else {
  5086. /* If the receive is not processed inside an ISR,
  5087. * the softirqd must be woken explicitly to service
  5088. * the NET_RX_SOFTIRQ. In 2.6 kernels, this is handled
  5089. * by netif_rx_ni(), but in earlier kernels, we need
  5090. * to do it manually.
  5091. */
  5092. bcm_object_trace_opr(dhd->monitor_skb, BCM_OBJDBG_REMOVE,
  5093. __FUNCTION__, __LINE__);
  5094. DHD_PERIM_UNLOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
  5095. netif_rx_ni(dhd->monitor_skb);
  5096. DHD_PERIM_LOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
  5097. }
  5098. dhd->monitor_skb = NULL;
  5099. }
  5100. typedef struct dhd_mon_dev_priv {
  5101. struct net_device_stats stats;
  5102. } dhd_mon_dev_priv_t;
  5103. #define DHD_MON_DEV_PRIV_SIZE (sizeof(dhd_mon_dev_priv_t))
  5104. #define DHD_MON_DEV_PRIV(dev) ((dhd_mon_dev_priv_t *)DEV_PRIV(dev))
  5105. #define DHD_MON_DEV_STATS(dev) (((dhd_mon_dev_priv_t *)DEV_PRIV(dev))->stats)
  5106. static int
  5107. dhd_monitor_start(struct sk_buff *skb, struct net_device *dev)
  5108. {
  5109. PKTFREE(NULL, skb, FALSE);
  5110. return 0;
  5111. }
  5112. #if defined(BT_OVER_SDIO)
  5113. void
  5114. dhdsdio_bus_usr_cnt_inc(dhd_pub_t *dhdp)
  5115. {
  5116. dhdp->info->bus_user_count++;
  5117. }
  5118. void
  5119. dhdsdio_bus_usr_cnt_dec(dhd_pub_t *dhdp)
  5120. {
  5121. dhdp->info->bus_user_count--;
  5122. }
  5123. /* Return values:
  5124. * Success: Returns 0
  5125. * Failure: Returns -1 or errono code
  5126. */
  5127. int
  5128. dhd_bus_get(wlan_bt_handle_t handle, bus_owner_t owner)
  5129. {
  5130. dhd_pub_t *dhdp = (dhd_pub_t *)handle;
  5131. dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
  5132. int ret = 0;
  5133. mutex_lock(&dhd->bus_user_lock);
  5134. ++dhd->bus_user_count;
  5135. if (dhd->bus_user_count < 0) {
  5136. DHD_ERROR(("%s(): bus_user_count is negative, which is invalid\n", __FUNCTION__));
  5137. ret = -1;
  5138. goto exit;
  5139. }
  5140. if (dhd->bus_user_count == 1) {
  5141. dhd->pub.hang_was_sent = 0;
  5142. /* First user, turn on WL_REG, start the bus */
  5143. DHD_ERROR(("%s(): First user Turn On WL_REG & start the bus", __FUNCTION__));
  5144. if (!wifi_platform_set_power(dhd->adapter, TRUE, WIFI_TURNON_DELAY)) {
  5145. /* Enable F1 */
  5146. ret = dhd_bus_resume(dhdp, 0);
  5147. if (ret) {
  5148. DHD_ERROR(("%s(): Failed to enable F1, err=%d\n",
  5149. __FUNCTION__, ret));
  5150. goto exit;
  5151. }
  5152. }
  5153. dhd_update_fw_nv_path(dhd);
  5154. /* update firmware and nvram path to sdio bus */
  5155. dhd_bus_update_fw_nv_path(dhd->pub.bus,
  5156. dhd->fw_path, dhd->nv_path);
  5157. /* download the firmware, Enable F2 */
  5158. /* TODO: Should be done only in case of FW switch */
  5159. ret = dhd_bus_devreset(dhdp, FALSE);
  5160. dhd_bus_resume(dhdp, 1);
  5161. if (!ret) {
  5162. if (dhd_sync_with_dongle(&dhd->pub) < 0) {
  5163. DHD_ERROR(("%s(): Sync with dongle failed!!\n", __FUNCTION__));
  5164. ret = -EFAULT;
  5165. }
  5166. } else {
  5167. DHD_ERROR(("%s(): Failed to download, err=%d\n", __FUNCTION__, ret));
  5168. }
  5169. } else {
  5170. DHD_ERROR(("%s(): BUS is already acquired, just increase the count %d \r\n",
  5171. __FUNCTION__, dhd->bus_user_count));
  5172. }
  5173. exit:
  5174. mutex_unlock(&dhd->bus_user_lock);
  5175. return ret;
  5176. }
  5177. EXPORT_SYMBOL(dhd_bus_get);
  5178. /* Return values:
  5179. * Success: Returns 0
  5180. * Failure: Returns -1 or errono code
  5181. */
  5182. int
  5183. dhd_bus_put(wlan_bt_handle_t handle, bus_owner_t owner)
  5184. {
  5185. dhd_pub_t *dhdp = (dhd_pub_t *)handle;
  5186. dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
  5187. int ret = 0;
  5188. BCM_REFERENCE(owner);
  5189. mutex_lock(&dhd->bus_user_lock);
  5190. --dhd->bus_user_count;
  5191. if (dhd->bus_user_count < 0) {
  5192. DHD_ERROR(("%s(): bus_user_count is negative, which is invalid\n", __FUNCTION__));
  5193. dhd->bus_user_count = 0;
  5194. ret = -1;
  5195. goto exit;
  5196. }
  5197. if (dhd->bus_user_count == 0) {
  5198. /* Last user, stop the bus and turn Off WL_REG */
  5199. DHD_ERROR(("%s(): There are no owners left Trunf Off WL_REG & stop the bus \r\n",
  5200. __FUNCTION__));
  5201. #ifdef PROP_TXSTATUS
  5202. if (dhd->pub.wlfc_enabled) {
  5203. dhd_wlfc_deinit(&dhd->pub);
  5204. }
  5205. #endif /* PROP_TXSTATUS */
  5206. #ifdef PNO_SUPPORT
  5207. if (dhd->pub.pno_state) {
  5208. dhd_pno_deinit(&dhd->pub);
  5209. }
  5210. #endif /* PNO_SUPPORT */
  5211. #ifdef RTT_SUPPORT
  5212. if (dhd->pub.rtt_state) {
  5213. dhd_rtt_deinit(&dhd->pub);
  5214. }
  5215. #endif /* RTT_SUPPORT */
  5216. ret = dhd_bus_devreset(dhdp, TRUE);
  5217. if (!ret) {
  5218. dhd_bus_suspend(dhdp);
  5219. wifi_platform_set_power(dhd->adapter, FALSE, WIFI_TURNOFF_DELAY);
  5220. }
  5221. } else {
  5222. DHD_ERROR(("%s(): Other owners using bus, decrease the count %d \r\n",
  5223. __FUNCTION__, dhd->bus_user_count));
  5224. }
  5225. exit:
  5226. mutex_unlock(&dhd->bus_user_lock);
  5227. return ret;
  5228. }
  5229. EXPORT_SYMBOL(dhd_bus_put);
  5230. int
  5231. dhd_net_bus_get(struct net_device *dev)
  5232. {
  5233. dhd_info_t *dhd = DHD_DEV_INFO(dev);
  5234. return dhd_bus_get(&dhd->pub, WLAN_MODULE);
  5235. }
  5236. int
  5237. dhd_net_bus_put(struct net_device *dev)
  5238. {
  5239. dhd_info_t *dhd = DHD_DEV_INFO(dev);
  5240. return dhd_bus_put(&dhd->pub, WLAN_MODULE);
  5241. }
  5242. /*
  5243. * Function to enable the Bus Clock
  5244. * Returns BCME_OK on success and BCME_xxx on failure
  5245. *
  5246. * This function is not callable from non-sleepable context
  5247. */
  5248. int dhd_bus_clk_enable(wlan_bt_handle_t handle, bus_owner_t owner)
  5249. {
  5250. dhd_pub_t *dhdp = (dhd_pub_t *)handle;
  5251. int ret;
  5252. dhd_os_sdlock(dhdp);
  5253. /*
  5254. * The second argument is TRUE, that means, we expect
  5255. * the function to "wait" until the clocks are really
  5256. * available
  5257. */
  5258. ret = __dhdsdio_clk_enable(dhdp->bus, owner, TRUE);
  5259. dhd_os_sdunlock(dhdp);
  5260. return ret;
  5261. }
  5262. EXPORT_SYMBOL(dhd_bus_clk_enable);
  5263. /*
  5264. * Function to disable the Bus Clock
  5265. * Returns BCME_OK on success and BCME_xxx on failure
  5266. *
  5267. * This function is not callable from non-sleepable context
  5268. */
  5269. int dhd_bus_clk_disable(wlan_bt_handle_t handle, bus_owner_t owner)
  5270. {
  5271. dhd_pub_t *dhdp = (dhd_pub_t *)handle;
  5272. int ret;
  5273. dhd_os_sdlock(dhdp);
  5274. /*
  5275. * The second argument is TRUE, that means, we expect
  5276. * the function to "wait" until the clocks are really
  5277. * disabled
  5278. */
  5279. ret = __dhdsdio_clk_disable(dhdp->bus, owner, TRUE);
  5280. dhd_os_sdunlock(dhdp);
  5281. return ret;
  5282. }
  5283. EXPORT_SYMBOL(dhd_bus_clk_disable);
  5284. /*
  5285. * Function to reset bt_use_count counter to zero.
  5286. *
  5287. * This function is not callable from non-sleepable context
  5288. */
  5289. void dhd_bus_reset_bt_use_count(wlan_bt_handle_t handle)
  5290. {
  5291. dhd_pub_t *dhdp = (dhd_pub_t *)handle;
  5292. /* take the lock and reset bt use count */
  5293. dhd_os_sdlock(dhdp);
  5294. dhdsdio_reset_bt_use_count(dhdp->bus);
  5295. dhd_os_sdunlock(dhdp);
  5296. }
  5297. EXPORT_SYMBOL(dhd_bus_reset_bt_use_count);
  5298. void dhd_bus_retry_hang_recovery(wlan_bt_handle_t handle)
  5299. {
  5300. dhd_pub_t *dhdp = (dhd_pub_t *)handle;
  5301. dhd_info_t *dhd = (dhd_info_t*)dhdp->info;
  5302. dhdp->hang_was_sent = 0;
  5303. dhd_os_send_hang_message(&dhd->pub);
  5304. }
  5305. EXPORT_SYMBOL(dhd_bus_retry_hang_recovery);
  5306. #endif /* BT_OVER_SDIO */
  5307. static int
  5308. dhd_monitor_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
  5309. {
  5310. return 0;
  5311. }
  5312. static struct net_device_stats*
  5313. dhd_monitor_get_stats(struct net_device *dev)
  5314. {
  5315. return &DHD_MON_DEV_STATS(dev);
  5316. }
  5317. static const struct net_device_ops netdev_monitor_ops =
  5318. {
  5319. .ndo_start_xmit = dhd_monitor_start,
  5320. .ndo_get_stats = dhd_monitor_get_stats,
  5321. .ndo_do_ioctl = dhd_monitor_ioctl
  5322. };
  5323. static void
  5324. dhd_add_monitor_if(dhd_info_t *dhd)
  5325. {
  5326. struct net_device *dev;
  5327. char *devname;
  5328. uint32 scan_suppress = FALSE;
  5329. int ret = BCME_OK;
  5330. if (!dhd) {
  5331. DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
  5332. return;
  5333. }
  5334. if (dhd->monitor_dev) {
  5335. DHD_ERROR(("%s: monitor i/f already exists", __FUNCTION__));
  5336. return;
  5337. }
  5338. dev = alloc_etherdev(DHD_MON_DEV_PRIV_SIZE);
  5339. if (!dev) {
  5340. DHD_ERROR(("%s: alloc wlif failed\n", __FUNCTION__));
  5341. return;
  5342. }
  5343. devname = "radiotap";
  5344. snprintf(dev->name, sizeof(dev->name), "%s%u", devname, dhd->unit);
  5345. #ifndef ARPHRD_IEEE80211_PRISM /* From Linux 2.4.18 */
  5346. #define ARPHRD_IEEE80211_PRISM 802
  5347. #endif // endif
  5348. #ifndef ARPHRD_IEEE80211_RADIOTAP
  5349. #define ARPHRD_IEEE80211_RADIOTAP 803 /* IEEE 802.11 + radiotap header */
  5350. #endif /* ARPHRD_IEEE80211_RADIOTAP */
  5351. dev->type = ARPHRD_IEEE80211_RADIOTAP;
  5352. dev->netdev_ops = &netdev_monitor_ops;
  5353. if (register_netdevice(dev)) {
  5354. DHD_ERROR(("%s, register_netdev failed for %s\n",
  5355. __FUNCTION__, dev->name));
  5356. free_netdev(dev);
  5357. return;
  5358. }
  5359. if (FW_SUPPORTED((&dhd->pub), monitor)) {
  5360. #ifdef DHD_PCIE_RUNTIMEPM
  5361. /* Disable RuntimePM in monitor mode */
  5362. DHD_DISABLE_RUNTIME_PM(&dhd->pub);
  5363. DHD_ERROR(("%s : disable runtime PM in monitor mode\n", __FUNCTION__));
  5364. #endif /* DHD_PCIE_RUNTIME_PM */
  5365. scan_suppress = TRUE;
  5366. /* Set the SCAN SUPPRESS Flag in the firmware to disable scan in Monitor mode */
  5367. ret = dhd_iovar(&dhd->pub, 0, "scansuppress", (char *)&scan_suppress,
  5368. sizeof(scan_suppress), NULL, 0, TRUE);
  5369. if (ret < 0) {
  5370. DHD_ERROR(("%s: scansuppress set failed, ret=%d\n", __FUNCTION__, ret));
  5371. }
  5372. }
  5373. dhd->monitor_dev = dev;
  5374. }
  5375. static void
  5376. dhd_del_monitor_if(dhd_info_t *dhd)
  5377. {
  5378. int ret = BCME_OK;
  5379. uint32 scan_suppress = FALSE;
  5380. if (!dhd) {
  5381. DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
  5382. return;
  5383. }
  5384. if (!dhd->monitor_dev) {
  5385. DHD_ERROR(("%s: monitor i/f doesn't exist", __FUNCTION__));
  5386. return;
  5387. }
  5388. if (FW_SUPPORTED((&dhd->pub), monitor)) {
  5389. #ifdef DHD_PCIE_RUNTIMEPM
  5390. /* Enable RuntimePM */
  5391. DHD_ENABLE_RUNTIME_PM(&dhd->pub);
  5392. DHD_ERROR(("%s : enabled runtime PM\n", __FUNCTION__));
  5393. #endif /* DHD_PCIE_RUNTIME_PM */
  5394. scan_suppress = FALSE;
  5395. /* Unset the SCAN SUPPRESS Flag in the firmware to enable scan */
  5396. ret = dhd_iovar(&dhd->pub, 0, "scansuppress", (char *)&scan_suppress,
  5397. sizeof(scan_suppress), NULL, 0, TRUE);
  5398. if (ret < 0) {
  5399. DHD_ERROR(("%s: scansuppress set failed, ret=%d\n", __FUNCTION__, ret));
  5400. }
  5401. }
  5402. if (dhd->monitor_dev) {
  5403. if (dhd->monitor_dev->reg_state == NETREG_UNINITIALIZED) {
  5404. free_netdev(dhd->monitor_dev);
  5405. } else {
  5406. unregister_netdevice(dhd->monitor_dev);
  5407. }
  5408. dhd->monitor_dev = NULL;
  5409. }
  5410. }
  5411. static void
  5412. dhd_set_monitor(dhd_pub_t *pub, int ifidx, int val)
  5413. {
  5414. dhd_info_t *dhd = pub->info;
  5415. DHD_TRACE(("%s: val %d\n", __FUNCTION__, val));
  5416. dhd_net_if_lock_local(dhd);
  5417. if (!val) {
  5418. /* Delete monitor */
  5419. dhd_del_monitor_if(dhd);
  5420. } else {
  5421. /* Add monitor */
  5422. dhd_add_monitor_if(dhd);
  5423. }
  5424. dhd->monitor_type = val;
  5425. dhd_net_if_unlock_local(dhd);
  5426. }
  5427. #endif /* WL_MONITOR */
  5428. #if defined(DHD_H2D_LOG_TIME_SYNC)
  5429. /*
  5430. * Helper function:
  5431. * Used for RTE console message time syncing with Host printk
  5432. */
  5433. void dhd_h2d_log_time_sync_deferred_wq_schedule(dhd_pub_t *dhdp)
  5434. {
  5435. dhd_info_t *info = dhdp->info;
  5436. /* Ideally the "state" should be always TRUE */
  5437. dhd_deferred_schedule_work(info->dhd_deferred_wq, NULL,
  5438. DHD_WQ_WORK_H2D_CONSOLE_TIME_STAMP_MATCH,
  5439. dhd_deferred_work_rte_log_time_sync,
  5440. DHD_WQ_WORK_PRIORITY_LOW);
  5441. }
  5442. void
  5443. dhd_deferred_work_rte_log_time_sync(void *handle, void *event_info, u8 event)
  5444. {
  5445. dhd_info_t *dhd_info = handle;
  5446. dhd_pub_t *dhd;
  5447. if (event != DHD_WQ_WORK_H2D_CONSOLE_TIME_STAMP_MATCH) {
  5448. DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
  5449. return;
  5450. }
  5451. if (!dhd_info) {
  5452. DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
  5453. return;
  5454. }
  5455. dhd = &dhd_info->pub;
  5456. /*
  5457. * Function to send IOVAR for console timesyncing
  5458. * between Host and Dongle.
  5459. * If the IOVAR fails,
  5460. * 1. dhd_rte_time_sync_ms is set to 0 and
  5461. * 2. HOST Dongle console time sync will *not* happen.
  5462. */
  5463. dhd_h2d_log_time_sync(dhd);
  5464. }
  5465. #endif /* DHD_H2D_LOG_TIME_SYNC */
  5466. int dhd_ioctl_process(dhd_pub_t *pub, int ifidx, dhd_ioctl_t *ioc, void *data_buf)
  5467. {
  5468. int bcmerror = BCME_OK;
  5469. int buflen = 0;
  5470. struct net_device *net;
  5471. net = dhd_idx2net(pub, ifidx);
  5472. if (!net) {
  5473. bcmerror = BCME_BADARG;
  5474. /*
  5475. * The netdev pointer is bad means the DHD can't communicate
  5476. * to higher layers, so just return from here
  5477. */
  5478. return bcmerror;
  5479. }
  5480. /* check for local dhd ioctl and handle it */
  5481. if (ioc->driver == DHD_IOCTL_MAGIC) {
  5482. /* This is a DHD IOVAR, truncate buflen to DHD_IOCTL_MAXLEN */
  5483. if (data_buf)
  5484. buflen = MIN(ioc->len, DHD_IOCTL_MAXLEN);
  5485. bcmerror = dhd_ioctl((void *)pub, ioc, data_buf, buflen);
  5486. if (bcmerror)
  5487. pub->bcmerror = bcmerror;
  5488. goto done;
  5489. }
  5490. /* This is a WL IOVAR, truncate buflen to WLC_IOCTL_MAXLEN */
  5491. if (data_buf)
  5492. buflen = MIN(ioc->len, WLC_IOCTL_MAXLEN);
  5493. /* send to dongle (must be up, and wl). */
  5494. if (pub->busstate == DHD_BUS_DOWN || pub->busstate == DHD_BUS_LOAD) {
  5495. if ((!pub->dongle_trap_occured) && allow_delay_fwdl) {
  5496. int ret;
  5497. if (atomic_read(&exit_in_progress)) {
  5498. DHD_ERROR(("%s module exit in progress\n", __func__));
  5499. bcmerror = BCME_DONGLE_DOWN;
  5500. goto done;
  5501. }
  5502. ret = dhd_bus_start(pub);
  5503. if (ret != 0) {
  5504. DHD_ERROR(("%s: failed with code %d\n", __FUNCTION__, ret));
  5505. bcmerror = BCME_DONGLE_DOWN;
  5506. goto done;
  5507. }
  5508. } else {
  5509. bcmerror = BCME_DONGLE_DOWN;
  5510. goto done;
  5511. }
  5512. }
  5513. if (!pub->iswl) {
  5514. bcmerror = BCME_DONGLE_DOWN;
  5515. goto done;
  5516. }
  5517. /*
  5518. * Flush the TX queue if required for proper message serialization:
  5519. * Intercept WLC_SET_KEY IOCTL - serialize M4 send and set key IOCTL to
  5520. * prevent M4 encryption and
  5521. * intercept WLC_DISASSOC IOCTL - serialize WPS-DONE and WLC_DISASSOC IOCTL to
  5522. * prevent disassoc frame being sent before WPS-DONE frame.
  5523. */
  5524. if (ioc->cmd == WLC_SET_KEY ||
  5525. (ioc->cmd == WLC_SET_VAR && data_buf != NULL &&
  5526. strncmp("wsec_key", data_buf, 9) == 0) ||
  5527. (ioc->cmd == WLC_SET_VAR && data_buf != NULL &&
  5528. strncmp("bsscfg:wsec_key", data_buf, 15) == 0) ||
  5529. ioc->cmd == WLC_DISASSOC)
  5530. dhd_wait_pend8021x(net);
  5531. if ((ioc->cmd == WLC_SET_VAR || ioc->cmd == WLC_GET_VAR) &&
  5532. data_buf != NULL && strncmp("rpc_", data_buf, 4) == 0) {
  5533. bcmerror = BCME_UNSUPPORTED;
  5534. goto done;
  5535. }
  5536. bcmerror = dhd_wl_ioctl(pub, ifidx, (wl_ioctl_t *)ioc, data_buf, buflen);
  5537. #ifdef WL_MONITOR
  5538. /* Intercept monitor ioctl here, add/del monitor if */
  5539. if (bcmerror == BCME_OK && ioc->cmd == WLC_SET_MONITOR) {
  5540. int val = 0;
  5541. if (data_buf != NULL && buflen != 0) {
  5542. if (buflen >= 4) {
  5543. val = *(int*)data_buf;
  5544. } else if (buflen >= 2) {
  5545. val = *(short*)data_buf;
  5546. } else {
  5547. val = *(char*)data_buf;
  5548. }
  5549. }
  5550. dhd_set_monitor(pub, ifidx, val);
  5551. }
  5552. #endif /* WL_MONITOR */
  5553. done:
  5554. #if defined(OEM_ANDROID)
  5555. dhd_check_hang(net, pub, bcmerror);
  5556. #endif /* OEM_ANDROID */
  5557. return bcmerror;
  5558. }
  5559. /**
  5560. * Called by the OS (optionally via a wrapper function).
  5561. * @param net Linux per dongle instance
  5562. * @param ifr Linux request structure
  5563. * @param cmd e.g. SIOCETHTOOL
  5564. */
  5565. static int
  5566. dhd_ioctl_entry(struct net_device *net, struct ifreq *ifr, int cmd)
  5567. {
  5568. dhd_info_t *dhd = DHD_DEV_INFO(net);
  5569. dhd_ioctl_t ioc;
  5570. int bcmerror = 0;
  5571. int ifidx;
  5572. int ret;
  5573. void *local_buf = NULL; /**< buffer in kernel space */
  5574. void __user *ioc_buf_user = NULL; /**< buffer in user space */
  5575. u16 buflen = 0;
  5576. #ifdef ENABLE_INSMOD_NO_FW_LOAD
  5577. allow_delay_fwdl = 1;
  5578. #endif /* ENABLE_INSMOD_NO_FW_LOAD */
  5579. if (atomic_read(&exit_in_progress)) {
  5580. DHD_ERROR(("%s module exit in progress\n", __func__));
  5581. bcmerror = BCME_DONGLE_DOWN;
  5582. return OSL_ERROR(bcmerror);
  5583. }
  5584. DHD_OS_WAKE_LOCK(&dhd->pub);
  5585. DHD_PERIM_LOCK(&dhd->pub);
  5586. #if defined(OEM_ANDROID)
  5587. #ifndef ENABLE_INSMOD_NO_FW_LOAD
  5588. /* Interface up check for built-in type */
  5589. if (!dhd_download_fw_on_driverload && dhd->pub.up == FALSE) {
  5590. DHD_TRACE(("%s: Interface is down \n", __FUNCTION__));
  5591. DHD_PERIM_UNLOCK(&dhd->pub);
  5592. DHD_OS_WAKE_UNLOCK(&dhd->pub);
  5593. return OSL_ERROR(BCME_NOTUP);
  5594. }
  5595. #endif /* ENABLE_INSMOD_NO_FW_LOAD */
  5596. #endif /* (OEM_ANDROID) */
  5597. ifidx = dhd_net2idx(dhd, net);
  5598. DHD_TRACE(("%s: ifidx %d, cmd 0x%04x\n", __FUNCTION__, ifidx, cmd));
  5599. #if defined(WL_STATIC_IF)
  5600. /* skip for static ndev when it is down */
  5601. if (dhd_is_static_ndev(&dhd->pub, net) && !(net->flags & IFF_UP)) {
  5602. DHD_PERIM_UNLOCK(&dhd->pub);
  5603. DHD_OS_WAKE_UNLOCK(&dhd->pub);
  5604. return -1;
  5605. }
  5606. #endif /* WL_STATIC_iF */
  5607. if (ifidx == DHD_BAD_IF) {
  5608. DHD_ERROR(("%s: BAD IF\n", __FUNCTION__));
  5609. DHD_PERIM_UNLOCK(&dhd->pub);
  5610. DHD_OS_WAKE_UNLOCK(&dhd->pub);
  5611. return -1;
  5612. }
  5613. #if defined(WL_WIRELESS_EXT)
  5614. /* linux wireless extensions */
  5615. if ((cmd >= SIOCIWFIRST) && (cmd <= SIOCIWLAST)) {
  5616. /* may recurse, do NOT lock */
  5617. ret = wl_iw_ioctl(net, ifr, cmd);
  5618. DHD_PERIM_UNLOCK(&dhd->pub);
  5619. DHD_OS_WAKE_UNLOCK(&dhd->pub);
  5620. return ret;
  5621. }
  5622. #endif /* defined(WL_WIRELESS_EXT) */
  5623. if (cmd == SIOCETHTOOL) {
  5624. ret = dhd_ethtool(dhd, (void*)ifr->ifr_data);
  5625. DHD_PERIM_UNLOCK(&dhd->pub);
  5626. DHD_OS_WAKE_UNLOCK(&dhd->pub);
  5627. return ret;
  5628. }
  5629. #if defined(OEM_ANDROID)
  5630. if (cmd == SIOCDEVPRIVATE+1) {
  5631. ret = wl_android_priv_cmd(net, ifr);
  5632. dhd_check_hang(net, &dhd->pub, ret);
  5633. DHD_PERIM_UNLOCK(&dhd->pub);
  5634. DHD_OS_WAKE_UNLOCK(&dhd->pub);
  5635. return ret;
  5636. }
  5637. #endif /* OEM_ANDROID */
  5638. if (cmd != SIOCDEVPRIVATE) {
  5639. DHD_PERIM_UNLOCK(&dhd->pub);
  5640. DHD_OS_WAKE_UNLOCK(&dhd->pub);
  5641. return -EOPNOTSUPP;
  5642. }
  5643. memset(&ioc, 0, sizeof(ioc));
  5644. {
  5645. /* Copy the ioc control structure part of ioctl request */
  5646. if (copy_from_user(&ioc, ifr->ifr_data, sizeof(wl_ioctl_t))) {
  5647. bcmerror = BCME_BADADDR;
  5648. goto done;
  5649. }
  5650. /* To differentiate between wl and dhd read 4 more byes */
  5651. if ((copy_from_user(&ioc.driver, (char *)ifr->ifr_data + sizeof(wl_ioctl_t),
  5652. sizeof(uint)) != 0)) {
  5653. bcmerror = BCME_BADADDR;
  5654. goto done;
  5655. }
  5656. }
  5657. if (!capable(CAP_NET_ADMIN)) {
  5658. bcmerror = BCME_EPERM;
  5659. goto done;
  5660. }
  5661. /* Take backup of ioc.buf and restore later */
  5662. ioc_buf_user = ioc.buf;
  5663. if (ioc.len > 0) {
  5664. buflen = MIN(ioc.len, DHD_IOCTL_MAXLEN);
  5665. if (!(local_buf = MALLOC(dhd->pub.osh, buflen+1))) {
  5666. bcmerror = BCME_NOMEM;
  5667. goto done;
  5668. }
  5669. DHD_PERIM_UNLOCK(&dhd->pub);
  5670. if (copy_from_user(local_buf, ioc.buf, buflen)) {
  5671. DHD_PERIM_LOCK(&dhd->pub);
  5672. bcmerror = BCME_BADADDR;
  5673. goto done;
  5674. }
  5675. DHD_PERIM_LOCK(&dhd->pub);
  5676. *((char *)local_buf + buflen) = '\0';
  5677. /* For some platforms accessing userspace memory
  5678. * of ioc.buf is causing kernel panic, so to avoid that
  5679. * make ioc.buf pointing to kernel space memory local_buf
  5680. */
  5681. ioc.buf = local_buf;
  5682. }
  5683. #if defined(OEM_ANDROID)
  5684. /* Skip all the non DHD iovars (wl iovars) after f/w hang */
  5685. if (ioc.driver != DHD_IOCTL_MAGIC && dhd->pub.hang_was_sent) {
  5686. DHD_TRACE(("%s: HANG was sent up earlier\n", __FUNCTION__));
  5687. DHD_OS_WAKE_LOCK_CTRL_TIMEOUT_ENABLE(&dhd->pub, DHD_EVENT_TIMEOUT_MS);
  5688. bcmerror = BCME_DONGLE_DOWN;
  5689. goto done;
  5690. }
  5691. #endif /* OEM_ANDROID */
  5692. bcmerror = dhd_ioctl_process(&dhd->pub, ifidx, &ioc, local_buf);
  5693. /* Restore back userspace pointer to ioc.buf */
  5694. ioc.buf = ioc_buf_user;
  5695. if (!bcmerror && buflen && local_buf && ioc.buf) {
  5696. DHD_PERIM_UNLOCK(&dhd->pub);
  5697. if (copy_to_user(ioc.buf, local_buf, buflen))
  5698. bcmerror = -EFAULT;
  5699. DHD_PERIM_LOCK(&dhd->pub);
  5700. }
  5701. done:
  5702. if (local_buf)
  5703. MFREE(dhd->pub.osh, local_buf, buflen+1);
  5704. DHD_PERIM_UNLOCK(&dhd->pub);
  5705. DHD_OS_WAKE_UNLOCK(&dhd->pub);
  5706. return OSL_ERROR(bcmerror);
  5707. }
  5708. #if defined(WL_CFG80211) && defined(SUPPORT_DEEP_SLEEP)
  5709. /* Flags to indicate if we distingish power off policy when
  5710. * user set the memu "Keep Wi-Fi on during sleep" to "Never"
  5711. */
  5712. int trigger_deep_sleep = 0;
  5713. #endif /* WL_CFG80211 && SUPPORT_DEEP_SLEEP */
  5714. #ifdef FIX_CPU_MIN_CLOCK
  5715. static int dhd_init_cpufreq_fix(dhd_info_t *dhd)
  5716. {
  5717. if (dhd) {
  5718. #if defined(OEM_ANDROID)
  5719. mutex_init(&dhd->cpufreq_fix);
  5720. #endif // endif
  5721. dhd->cpufreq_fix_status = FALSE;
  5722. }
  5723. return 0;
  5724. }
  5725. static void dhd_fix_cpu_freq(dhd_info_t *dhd)
  5726. {
  5727. #if defined(OEM_ANDROID)
  5728. mutex_lock(&dhd->cpufreq_fix);
  5729. #endif // endif
  5730. if (dhd && !dhd->cpufreq_fix_status) {
  5731. pm_qos_add_request(&dhd->dhd_cpu_qos, PM_QOS_CPU_FREQ_MIN, 300000);
  5732. #ifdef FIX_BUS_MIN_CLOCK
  5733. pm_qos_add_request(&dhd->dhd_bus_qos, PM_QOS_BUS_THROUGHPUT, 400000);
  5734. #endif /* FIX_BUS_MIN_CLOCK */
  5735. DHD_ERROR(("pm_qos_add_requests called\n"));
  5736. dhd->cpufreq_fix_status = TRUE;
  5737. }
  5738. #if defined(OEM_ANDROID)
  5739. mutex_unlock(&dhd->cpufreq_fix);
  5740. #endif // endif
  5741. }
  5742. static void dhd_rollback_cpu_freq(dhd_info_t *dhd)
  5743. {
  5744. #if defined(OEM_ANDROID)
  5745. mutex_lock(&dhd ->cpufreq_fix);
  5746. #endif // endif
  5747. if (dhd && dhd->cpufreq_fix_status != TRUE) {
  5748. #if defined(OEM_ANDROID)
  5749. mutex_unlock(&dhd->cpufreq_fix);
  5750. #endif // endif
  5751. return;
  5752. }
  5753. pm_qos_remove_request(&dhd->dhd_cpu_qos);
  5754. #ifdef FIX_BUS_MIN_CLOCK
  5755. pm_qos_remove_request(&dhd->dhd_bus_qos);
  5756. #endif /* FIX_BUS_MIN_CLOCK */
  5757. DHD_ERROR(("pm_qos_add_requests called\n"));
  5758. dhd->cpufreq_fix_status = FALSE;
  5759. #if defined(OEM_ANDROID)
  5760. mutex_unlock(&dhd->cpufreq_fix);
  5761. #endif // endif
  5762. }
  5763. #endif /* FIX_CPU_MIN_CLOCK */
  5764. #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
  5765. static int
  5766. dhd_ioctl_entry_wrapper(struct net_device *net, struct ifreq *ifr, int cmd)
  5767. {
  5768. int error;
  5769. dhd_info_t *dhd = DHD_DEV_INFO(net);
  5770. if (atomic_read(&dhd->pub.block_bus))
  5771. return -EHOSTDOWN;
  5772. if (pm_runtime_get_sync(dhd_bus_to_dev(dhd->pub.bus)) < 0)
  5773. return BCME_ERROR;
  5774. error = dhd_ioctl_entry(net, ifr, cmd);
  5775. pm_runtime_mark_last_busy(dhd_bus_to_dev(dhd->pub.bus));
  5776. pm_runtime_put_autosuspend(dhd_bus_to_dev(dhd->pub.bus));
  5777. return error;
  5778. }
  5779. #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
  5780. static int
  5781. dhd_stop(struct net_device *net)
  5782. {
  5783. int ifidx = 0;
  5784. bool skip_reset = false;
  5785. #if defined(WL_CFG80211)
  5786. unsigned long flags = 0;
  5787. #ifdef WL_STATIC_IF
  5788. struct bcm_cfg80211 *cfg = wl_get_cfg(net);
  5789. #endif /* WL_STATIC_IF */
  5790. #endif /* WL_CFG80211 */
  5791. dhd_info_t *dhd = DHD_DEV_INFO(net);
  5792. DHD_OS_WAKE_LOCK(&dhd->pub);
  5793. DHD_PERIM_LOCK(&dhd->pub);
  5794. DHD_TRACE(("%s: Enter %p\n", __FUNCTION__, net));
  5795. dhd->pub.rxcnt_timeout = 0;
  5796. dhd->pub.txcnt_timeout = 0;
  5797. #ifdef BCMPCIE
  5798. dhd->pub.d3ackcnt_timeout = 0;
  5799. #endif /* BCMPCIE */
  5800. mutex_lock(&dhd->pub.ndev_op_sync);
  5801. if (dhd->pub.up == 0) {
  5802. goto exit;
  5803. }
  5804. #if defined(DHD_HANG_SEND_UP_TEST)
  5805. if (dhd->pub.req_hang_type) {
  5806. DHD_ERROR(("%s, Clear HANG test request 0x%x\n",
  5807. __FUNCTION__, dhd->pub.req_hang_type));
  5808. dhd->pub.req_hang_type = 0;
  5809. }
  5810. #endif /* DHD_HANG_SEND_UP_TEST */
  5811. dhd_if_flush_sta(DHD_DEV_IFP(net));
  5812. #ifdef FIX_CPU_MIN_CLOCK
  5813. if (dhd_get_fw_mode(dhd) == DHD_FLAG_HOSTAP_MODE)
  5814. dhd_rollback_cpu_freq(dhd);
  5815. #endif /* FIX_CPU_MIN_CLOCK */
  5816. ifidx = dhd_net2idx(dhd, net);
  5817. BCM_REFERENCE(ifidx);
  5818. DHD_ERROR(("%s: ######### dhd_stop called for ifidx=%d #########\n", __FUNCTION__, ifidx));
  5819. #if defined(WL_STATIC_IF) && defined(WL_CFG80211)
  5820. /* If static if is operational, don't reset the chip */
  5821. if (IS_CFG80211_STATIC_IF_ACTIVE(cfg)) {
  5822. DHD_ERROR(("static if operational. skip chip reset.\n"));
  5823. skip_reset = true;
  5824. wl_cfg80211_sta_ifdown(net);
  5825. goto exit;
  5826. }
  5827. #endif /* WL_STATIC_IF && WL_CFG80211 */
  5828. #if defined(WL_VIF_SUPPORT)
  5829. if (vif_num > 0) {
  5830. DHD_ERROR(("virtual if operational. skip chip reset.\n"));
  5831. skip_reset = true;
  5832. wl_cfg80211_sta_ifdown(net);
  5833. goto exit;
  5834. }
  5835. #endif /* WL_VIF_SUPPORT */
  5836. DHD_ERROR(("%s: making dhdpub up FALSE\n", __FUNCTION__));
  5837. #ifdef WL_CFG80211
  5838. /* Disable Runtime PM before interface down */
  5839. DHD_DISABLE_RUNTIME_PM(&dhd->pub);
  5840. spin_lock_irqsave(&dhd->pub.up_lock, flags);
  5841. dhd->pub.up = 0;
  5842. spin_unlock_irqrestore(&dhd->pub.up_lock, flags);
  5843. #else
  5844. dhd->pub.up = 0;
  5845. #endif /* WL_CFG80211 */
  5846. #ifdef WL_CFG80211
  5847. if (ifidx == 0) {
  5848. dhd_if_t *ifp;
  5849. wl_cfg80211_down(net);
  5850. ifp = dhd->iflist[0];
  5851. /*
  5852. * For CFG80211: Clean up all the left over virtual interfaces
  5853. * when the primary Interface is brought down. [ifconfig wlan0 down]
  5854. */
  5855. if (!dhd_download_fw_on_driverload) {
  5856. DHD_STATLOG_CTRL(&dhd->pub, ST(WLAN_POWER_OFF), ifidx, 0);
  5857. if ((dhd->dhd_state & DHD_ATTACH_STATE_ADD_IF) &&
  5858. (dhd->dhd_state & DHD_ATTACH_STATE_CFG80211)) {
  5859. int i;
  5860. #ifdef WL_CFG80211_P2P_DEV_IF
  5861. wl_cfg80211_del_p2p_wdev(net);
  5862. #endif /* WL_CFG80211_P2P_DEV_IF */
  5863. #ifdef DHD_4WAYM4_FAIL_DISCONNECT
  5864. dhd_cleanup_m4_state_work(&dhd->pub, ifidx);
  5865. #endif /* DHD_4WAYM4_FAIL_DISCONNECT */
  5866. #ifdef DHD_PKTDUMP_ROAM
  5867. dhd_dump_pkt_clear(&dhd->pub);
  5868. #endif /* DHD_PKTDUMP_ROAM */
  5869. dhd_net_if_lock_local(dhd);
  5870. for (i = 1; i < DHD_MAX_IFS; i++)
  5871. dhd_remove_if(&dhd->pub, i, FALSE);
  5872. if (ifp && ifp->net) {
  5873. dhd_if_del_sta_list(ifp);
  5874. }
  5875. #ifdef ARP_OFFLOAD_SUPPORT
  5876. if (dhd_inetaddr_notifier_registered) {
  5877. dhd_inetaddr_notifier_registered = FALSE;
  5878. unregister_inetaddr_notifier(&dhd_inetaddr_notifier);
  5879. }
  5880. #endif /* ARP_OFFLOAD_SUPPORT */
  5881. #if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT)
  5882. if (dhd_inet6addr_notifier_registered) {
  5883. dhd_inet6addr_notifier_registered = FALSE;
  5884. unregister_inet6addr_notifier(&dhd_inet6addr_notifier);
  5885. }
  5886. #endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */
  5887. dhd_net_if_unlock_local(dhd);
  5888. }
  5889. cancel_work_sync(dhd->dhd_deferred_wq);
  5890. #ifdef SHOW_LOGTRACE
  5891. /* Wait till event logs work/kthread finishes */
  5892. dhd_cancel_logtrace_process_sync(dhd);
  5893. #endif /* SHOW_LOGTRACE */
  5894. #if defined(DHD_LB_RXP)
  5895. __skb_queue_purge(&dhd->rx_pend_queue);
  5896. #endif /* DHD_LB_RXP */
  5897. #if defined(DHD_LB_TXP)
  5898. skb_queue_purge(&dhd->tx_pend_queue);
  5899. #endif /* DHD_LB_TXP */
  5900. }
  5901. #if defined(ARGOS_NOTIFY_CB)
  5902. argos_register_notifier_deinit();
  5903. #endif // endif
  5904. #ifdef DHDTCPACK_SUPPRESS
  5905. dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_OFF);
  5906. #endif /* DHDTCPACK_SUPPRESS */
  5907. #if defined(DHD_LB_RXP)
  5908. if (ifp && ifp->net == dhd->rx_napi_netdev) {
  5909. DHD_INFO(("%s napi<%p> disabled ifp->net<%p,%s>\n",
  5910. __FUNCTION__, &dhd->rx_napi_struct, net, net->name));
  5911. skb_queue_purge(&dhd->rx_napi_queue);
  5912. napi_disable(&dhd->rx_napi_struct);
  5913. netif_napi_del(&dhd->rx_napi_struct);
  5914. dhd->rx_napi_netdev = NULL;
  5915. }
  5916. #endif /* DHD_LB_RXP */
  5917. }
  5918. #endif /* WL_CFG80211 */
  5919. DHD_SSSR_DUMP_DEINIT(&dhd->pub);
  5920. #ifdef PROP_TXSTATUS
  5921. dhd_wlfc_cleanup(&dhd->pub, NULL, 0);
  5922. #endif // endif
  5923. #ifdef SHOW_LOGTRACE
  5924. if (!dhd_download_fw_on_driverload) {
  5925. /* Release the skbs from queue for WLC_E_TRACE event */
  5926. dhd_event_logtrace_flush_queue(&dhd->pub);
  5927. if (dhd->dhd_state & DHD_ATTACH_LOGTRACE_INIT) {
  5928. if (dhd->event_data.fmts) {
  5929. MFREE(dhd->pub.osh, dhd->event_data.fmts,
  5930. dhd->event_data.fmts_size);
  5931. dhd->event_data.fmts = NULL;
  5932. }
  5933. if (dhd->event_data.raw_fmts) {
  5934. MFREE(dhd->pub.osh, dhd->event_data.raw_fmts,
  5935. dhd->event_data.raw_fmts_size);
  5936. dhd->event_data.raw_fmts = NULL;
  5937. }
  5938. if (dhd->event_data.raw_sstr) {
  5939. MFREE(dhd->pub.osh, dhd->event_data.raw_sstr,
  5940. dhd->event_data.raw_sstr_size);
  5941. dhd->event_data.raw_sstr = NULL;
  5942. }
  5943. if (dhd->event_data.rom_raw_sstr) {
  5944. MFREE(dhd->pub.osh, dhd->event_data.rom_raw_sstr,
  5945. dhd->event_data.rom_raw_sstr_size);
  5946. dhd->event_data.rom_raw_sstr = NULL;
  5947. }
  5948. dhd->dhd_state &= ~DHD_ATTACH_LOGTRACE_INIT;
  5949. }
  5950. }
  5951. #endif /* SHOW_LOGTRACE */
  5952. #ifdef APF
  5953. dhd_dev_apf_delete_filter(net);
  5954. #endif /* APF */
  5955. /* Stop the protocol module */
  5956. dhd_prot_stop(&dhd->pub);
  5957. OLD_MOD_DEC_USE_COUNT;
  5958. exit:
  5959. if (skip_reset == false) {
  5960. #if defined(WL_CFG80211) && defined(OEM_ANDROID)
  5961. if (ifidx == 0 && !dhd_download_fw_on_driverload) {
  5962. #if defined(BT_OVER_SDIO)
  5963. dhd_bus_put(&dhd->pub, WLAN_MODULE);
  5964. wl_android_set_wifi_on_flag(FALSE);
  5965. #else
  5966. wl_android_wifi_off(net, TRUE);
  5967. #endif /* BT_OVER_SDIO */
  5968. }
  5969. #ifdef SUPPORT_DEEP_SLEEP
  5970. else {
  5971. /* CSP#505233: Flags to indicate if we distingish
  5972. * power off policy when user set the memu
  5973. * "Keep Wi-Fi on during sleep" to "Never"
  5974. */
  5975. if (trigger_deep_sleep) {
  5976. dhd_deepsleep(net, 1);
  5977. trigger_deep_sleep = 0;
  5978. }
  5979. }
  5980. #endif /* SUPPORT_DEEP_SLEEP */
  5981. #endif /* defined(WL_CFG80211) && defined(OEM_ANDROID) */
  5982. dhd->pub.hang_was_sent = 0;
  5983. dhd->pub.hang_was_pending = 0;
  5984. /* Clear country spec for for built-in type driver */
  5985. if (!dhd_download_fw_on_driverload) {
  5986. dhd->pub.dhd_cspec.country_abbrev[0] = 0x00;
  5987. dhd->pub.dhd_cspec.rev = 0;
  5988. dhd->pub.dhd_cspec.ccode[0] = 0x00;
  5989. }
  5990. #ifdef BCMDBGFS
  5991. dhd_dbgfs_remove();
  5992. #endif // endif
  5993. }
  5994. DHD_PERIM_UNLOCK(&dhd->pub);
  5995. DHD_OS_WAKE_UNLOCK(&dhd->pub);
  5996. /* Destroy wakelock */
  5997. if (!dhd_download_fw_on_driverload &&
  5998. (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT) &&
  5999. (skip_reset == false)) {
  6000. DHD_OS_WAKE_LOCK_DESTROY(dhd);
  6001. dhd->dhd_state &= ~DHD_ATTACH_STATE_WAKELOCKS_INIT;
  6002. }
  6003. mutex_unlock(&dhd->pub.ndev_op_sync);
  6004. return 0;
  6005. }
  6006. #if defined(OEM_ANDROID) && defined(WL_CFG80211) && (defined(USE_INITIAL_2G_SCAN) || \
  6007. defined(USE_INITIAL_SHORT_DWELL_TIME))
  6008. extern bool g_first_broadcast_scan;
  6009. #endif /* OEM_ANDROID && WL_CFG80211 && (USE_INITIAL_2G_SCAN || USE_INITIAL_SHORT_DWELL_TIME) */
  6010. #ifdef WL11U
  6011. static int dhd_interworking_enable(dhd_pub_t *dhd)
  6012. {
  6013. uint32 enable = true;
  6014. int ret = BCME_OK;
  6015. ret = dhd_iovar(dhd, 0, "interworking", (char *)&enable, sizeof(enable), NULL, 0, TRUE);
  6016. if (ret < 0) {
  6017. DHD_ERROR(("%s: enableing interworking failed, ret=%d\n", __FUNCTION__, ret));
  6018. }
  6019. return ret;
  6020. }
  6021. #endif /* WL11u */
  6022. static int
  6023. dhd_open(struct net_device *net)
  6024. {
  6025. dhd_info_t *dhd = DHD_DEV_INFO(net);
  6026. #ifdef TOE
  6027. uint32 toe_ol;
  6028. #endif // endif
  6029. int ifidx;
  6030. int32 ret = 0;
  6031. #if defined(PREVENT_REOPEN_DURING_HANG)
  6032. /* WAR : to prevent calling dhd_open abnormally in quick succession after hang event */
  6033. if (dhd->pub.hang_was_sent == 1) {
  6034. DHD_ERROR(("%s: HANG was sent up earlier\n", __FUNCTION__));
  6035. /* Force to bring down WLAN interface in case dhd_stop() is not called
  6036. * from the upper layer when HANG event is triggered.
  6037. */
  6038. if (!dhd_download_fw_on_driverload && dhd->pub.up == 1) {
  6039. DHD_ERROR(("%s: WLAN interface is not brought down\n", __FUNCTION__));
  6040. dhd_stop(net);
  6041. } else {
  6042. return -1;
  6043. }
  6044. }
  6045. #endif /* PREVENT_REOPEN_DURING_HANG */
  6046. mutex_lock(&dhd->pub.ndev_op_sync);
  6047. if (dhd->pub.up == 1) {
  6048. /* already up */
  6049. DHD_ERROR(("Primary net_device is already up \n"));
  6050. mutex_unlock(&dhd->pub.ndev_op_sync);
  6051. return BCME_OK;
  6052. }
  6053. if (!dhd_download_fw_on_driverload) {
  6054. if (!dhd_driver_init_done) {
  6055. DHD_ERROR(("%s: WLAN driver is not initialized\n", __FUNCTION__));
  6056. mutex_unlock(&dhd->pub.ndev_op_sync);
  6057. return -1;
  6058. }
  6059. /* Init wakelock */
  6060. if (!(dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT)) {
  6061. DHD_OS_WAKE_LOCK_INIT(dhd);
  6062. dhd->dhd_state |= DHD_ATTACH_STATE_WAKELOCKS_INIT;
  6063. }
  6064. #ifdef SHOW_LOGTRACE
  6065. skb_queue_head_init(&dhd->evt_trace_queue);
  6066. if (!(dhd->dhd_state & DHD_ATTACH_LOGTRACE_INIT)) {
  6067. ret = dhd_init_logstrs_array(dhd->pub.osh, &dhd->event_data);
  6068. if (ret == BCME_OK) {
  6069. dhd_init_static_strs_array(dhd->pub.osh, &dhd->event_data,
  6070. st_str_file_path, map_file_path);
  6071. dhd_init_static_strs_array(dhd->pub.osh, &dhd->event_data,
  6072. rom_st_str_file_path, rom_map_file_path);
  6073. dhd->dhd_state |= DHD_ATTACH_LOGTRACE_INIT;
  6074. }
  6075. }
  6076. #endif /* SHOW_LOGTRACE */
  6077. }
  6078. #if defined(MULTIPLE_SUPPLICANT)
  6079. #if defined(OEM_ANDROID) && defined(BCMSDIO)
  6080. if (mutex_is_locked(&_dhd_sdio_mutex_lock_) != 0) {
  6081. DHD_ERROR(("%s : dhd_open: call dev open before insmod complete!\n", __FUNCTION__));
  6082. }
  6083. mutex_lock(&_dhd_sdio_mutex_lock_);
  6084. #endif // endif
  6085. #endif /* MULTIPLE_SUPPLICANT */
  6086. DHD_OS_WAKE_LOCK(&dhd->pub);
  6087. DHD_PERIM_LOCK(&dhd->pub);
  6088. dhd->pub.dongle_trap_occured = 0;
  6089. dhd->pub.hang_was_sent = 0;
  6090. dhd->pub.hang_was_pending = 0;
  6091. dhd->pub.hang_reason = 0;
  6092. dhd->pub.iovar_timeout_occured = 0;
  6093. #ifdef PCIE_FULL_DONGLE
  6094. dhd->pub.d3ack_timeout_occured = 0;
  6095. dhd->pub.livelock_occured = 0;
  6096. dhd->pub.pktid_audit_failed = 0;
  6097. #endif /* PCIE_FULL_DONGLE */
  6098. dhd->pub.iface_op_failed = 0;
  6099. dhd->pub.scan_timeout_occurred = 0;
  6100. dhd->pub.scan_busy_occurred = 0;
  6101. dhd->pub.smmu_fault_occurred = 0;
  6102. #ifdef DHD_LOSSLESS_ROAMING
  6103. dhd->pub.dequeue_prec_map = ALLPRIO;
  6104. #endif // endif
  6105. #if defined(OEM_ANDROID) && !defined(WL_CFG80211)
  6106. /*
  6107. * Force start if ifconfig_up gets called before START command
  6108. * We keep WEXT's wl_control_wl_start to provide backward compatibility
  6109. * This should be removed in the future
  6110. */
  6111. ret = wl_control_wl_start(net);
  6112. if (ret != 0) {
  6113. DHD_ERROR(("%s: failed with code %d\n", __FUNCTION__, ret));
  6114. ret = -1;
  6115. goto exit;
  6116. }
  6117. #endif /* defined(OEM_ANDROID) && !defined(WL_CFG80211) */
  6118. ifidx = dhd_net2idx(dhd, net);
  6119. DHD_TRACE(("%s: ifidx %d\n", __FUNCTION__, ifidx));
  6120. if (ifidx < 0) {
  6121. DHD_ERROR(("%s: Error: called with invalid IF\n", __FUNCTION__));
  6122. ret = -1;
  6123. goto exit;
  6124. }
  6125. if (!dhd->iflist[ifidx]) {
  6126. DHD_ERROR(("%s: Error: called when IF already deleted\n", __FUNCTION__));
  6127. ret = -1;
  6128. goto exit;
  6129. }
  6130. if (ifidx == 0) {
  6131. atomic_set(&dhd->pend_8021x_cnt, 0);
  6132. #if defined(WL_CFG80211) && defined(OEM_ANDROID)
  6133. if (!dhd_download_fw_on_driverload) {
  6134. DHD_ERROR(("\n%s\n", dhd_version));
  6135. DHD_STATLOG_CTRL(&dhd->pub, ST(WLAN_POWER_ON), ifidx, 0);
  6136. #if defined(USE_INITIAL_2G_SCAN) || defined(USE_INITIAL_SHORT_DWELL_TIME)
  6137. g_first_broadcast_scan = TRUE;
  6138. #endif /* USE_INITIAL_2G_SCAN || USE_INITIAL_SHORT_DWELL_TIME */
  6139. #ifdef SHOW_LOGTRACE
  6140. /* dhd_cancel_logtrace_process_sync is called in dhd_stop
  6141. * for built-in models. Need to start logtrace kthread before
  6142. * calling wifi on, because once wifi is on, EDL will be in action
  6143. * any moment, and if kthread is not active, FW event logs will
  6144. * not be available
  6145. */
  6146. if (dhd_reinit_logtrace_process(dhd) != BCME_OK) {
  6147. goto exit;
  6148. }
  6149. #endif /* SHOW_LOGTRACE */
  6150. #if defined(BT_OVER_SDIO)
  6151. ret = dhd_bus_get(&dhd->pub, WLAN_MODULE);
  6152. wl_android_set_wifi_on_flag(TRUE);
  6153. #else
  6154. ret = wl_android_wifi_on(net);
  6155. #endif /* BT_OVER_SDIO */
  6156. if (ret != 0) {
  6157. DHD_ERROR(("%s : wl_android_wifi_on failed (%d)\n",
  6158. __FUNCTION__, ret));
  6159. ret = -1;
  6160. goto exit;
  6161. }
  6162. }
  6163. #ifdef SUPPORT_DEEP_SLEEP
  6164. else {
  6165. /* Flags to indicate if we distingish
  6166. * power off policy when user set the memu
  6167. * "Keep Wi-Fi on during sleep" to "Never"
  6168. */
  6169. if (trigger_deep_sleep) {
  6170. #if defined(USE_INITIAL_2G_SCAN) || defined(USE_INITIAL_SHORT_DWELL_TIME)
  6171. g_first_broadcast_scan = TRUE;
  6172. #endif /* USE_INITIAL_2G_SCAN || USE_INITIAL_SHORT_DWELL_TIME */
  6173. dhd_deepsleep(net, 0);
  6174. trigger_deep_sleep = 0;
  6175. }
  6176. }
  6177. #endif /* SUPPORT_DEEP_SLEEP */
  6178. #ifdef FIX_CPU_MIN_CLOCK
  6179. if (dhd_get_fw_mode(dhd) == DHD_FLAG_HOSTAP_MODE) {
  6180. dhd_init_cpufreq_fix(dhd);
  6181. dhd_fix_cpu_freq(dhd);
  6182. }
  6183. #endif /* FIX_CPU_MIN_CLOCK */
  6184. #endif /* defined(WL_CFG80211) && defined(OEM_ANDROID) */
  6185. if (dhd->pub.busstate != DHD_BUS_DATA) {
  6186. /* try to bring up bus */
  6187. DHD_PERIM_UNLOCK(&dhd->pub);
  6188. #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
  6189. if (pm_runtime_get_sync(dhd_bus_to_dev(dhd->pub.bus)) >= 0) {
  6190. ret = dhd_bus_start(&dhd->pub);
  6191. pm_runtime_mark_last_busy(dhd_bus_to_dev(dhd->pub.bus));
  6192. pm_runtime_put_autosuspend(dhd_bus_to_dev(dhd->pub.bus));
  6193. }
  6194. #else
  6195. ret = dhd_bus_start(&dhd->pub);
  6196. #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
  6197. DHD_PERIM_LOCK(&dhd->pub);
  6198. if (ret) {
  6199. DHD_ERROR(("%s: failed with code %d\n", __FUNCTION__, ret));
  6200. ret = -1;
  6201. goto exit;
  6202. }
  6203. }
  6204. #ifdef BT_OVER_SDIO
  6205. if (dhd->pub.is_bt_recovery_required) {
  6206. DHD_ERROR(("%s: Send Hang Notification 2 to BT\n", __FUNCTION__));
  6207. bcmsdh_btsdio_process_dhd_hang_notification(TRUE);
  6208. }
  6209. dhd->pub.is_bt_recovery_required = FALSE;
  6210. #endif // endif
  6211. /* dhd_sync_with_dongle has been called in dhd_bus_start or wl_android_wifi_on */
  6212. memcpy(net->dev_addr, dhd->pub.mac.octet, ETHER_ADDR_LEN);
  6213. #ifdef TOE
  6214. /* Get current TOE mode from dongle */
  6215. if (dhd_toe_get(dhd, ifidx, &toe_ol) >= 0 && (toe_ol & TOE_TX_CSUM_OL) != 0) {
  6216. dhd->iflist[ifidx]->net->features |= NETIF_F_IP_CSUM;
  6217. } else {
  6218. dhd->iflist[ifidx]->net->features &= ~NETIF_F_IP_CSUM;
  6219. }
  6220. #endif /* TOE */
  6221. #if defined(DHD_LB_RXP)
  6222. __skb_queue_head_init(&dhd->rx_pend_queue);
  6223. if (dhd->rx_napi_netdev == NULL) {
  6224. dhd->rx_napi_netdev = dhd->iflist[ifidx]->net;
  6225. memset(&dhd->rx_napi_struct, 0, sizeof(struct napi_struct));
  6226. netif_napi_add(dhd->rx_napi_netdev, &dhd->rx_napi_struct,
  6227. dhd_napi_poll, dhd_napi_weight);
  6228. DHD_INFO(("%s napi<%p> enabled ifp->net<%p,%s>\n",
  6229. __FUNCTION__, &dhd->rx_napi_struct, net, net->name));
  6230. napi_enable(&dhd->rx_napi_struct);
  6231. DHD_INFO(("%s load balance init rx_napi_struct\n", __FUNCTION__));
  6232. skb_queue_head_init(&dhd->rx_napi_queue);
  6233. } /* rx_napi_netdev == NULL */
  6234. #endif /* DHD_LB_RXP */
  6235. #if defined(DHD_LB_TXP)
  6236. /* Use the variant that uses locks */
  6237. skb_queue_head_init(&dhd->tx_pend_queue);
  6238. #endif /* DHD_LB_TXP */
  6239. #if defined(WL_CFG80211)
  6240. if (unlikely(wl_cfg80211_up(net))) {
  6241. DHD_ERROR(("%s: failed to bring up cfg80211\n", __FUNCTION__));
  6242. ret = -1;
  6243. goto exit;
  6244. }
  6245. if (!dhd_download_fw_on_driverload) {
  6246. #ifdef ARP_OFFLOAD_SUPPORT
  6247. dhd->pend_ipaddr = 0;
  6248. if (!dhd_inetaddr_notifier_registered) {
  6249. dhd_inetaddr_notifier_registered = TRUE;
  6250. register_inetaddr_notifier(&dhd_inetaddr_notifier);
  6251. }
  6252. #endif /* ARP_OFFLOAD_SUPPORT */
  6253. #if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT)
  6254. if (!dhd_inet6addr_notifier_registered) {
  6255. dhd_inet6addr_notifier_registered = TRUE;
  6256. register_inet6addr_notifier(&dhd_inet6addr_notifier);
  6257. }
  6258. #endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */
  6259. }
  6260. #if defined(DHD_CONTROL_PCIE_ASPM_WIFI_TURNON)
  6261. dhd_bus_aspm_enable_rc_ep(dhd->pub.bus, TRUE);
  6262. #endif /* DHD_CONTROL_PCIE_ASPM_WIFI_TURNON */
  6263. #if defined(DHD_CONTROL_PCIE_CPUCORE_WIFI_TURNON)
  6264. dhd_irq_set_affinity(&dhd->pub, cpumask_of(0));
  6265. #endif /* DHD_CONTROL_PCIE_CPUCORE_WIFI_TURNON */
  6266. #ifdef DHD_LB_IRQSET
  6267. dhd_irq_set_affinity(&dhd->pub, dhd->cpumask_primary);
  6268. #endif /* DHD_LB_IRQSET */
  6269. #if defined(ARGOS_NOTIFY_CB)
  6270. argos_register_notifier_init(net);
  6271. #endif // endif
  6272. #if defined(BCMPCIE) && defined(DHDTCPACK_SUPPRESS)
  6273. #if defined(SET_RPS_CPUS)
  6274. dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_OFF);
  6275. #else
  6276. dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_OFF);
  6277. #endif // endif
  6278. #endif /* BCMPCIE && DHDTCPACK_SUPPRESS */
  6279. #if defined(NUM_SCB_MAX_PROBE)
  6280. dhd_set_scb_probe(&dhd->pub);
  6281. #endif /* NUM_SCB_MAX_PROBE */
  6282. #endif /* WL_CFG80211 */
  6283. }
  6284. dhd->pub.up = 1;
  6285. if (wl_event_enable) {
  6286. /* For wl utility to receive events */
  6287. dhd->pub.wl_event_enabled = true;
  6288. } else {
  6289. dhd->pub.wl_event_enabled = false;
  6290. }
  6291. if (logtrace_pkt_sendup) {
  6292. /* For any deamon to recieve logtrace */
  6293. dhd->pub.logtrace_pkt_sendup = true;
  6294. } else {
  6295. dhd->pub.logtrace_pkt_sendup = false;
  6296. }
  6297. OLD_MOD_INC_USE_COUNT;
  6298. #ifdef BCMDBGFS
  6299. dhd_dbgfs_init(&dhd->pub);
  6300. #endif // endif
  6301. exit:
  6302. mutex_unlock(&dhd->pub.ndev_op_sync);
  6303. if (ret) {
  6304. dhd_stop(net);
  6305. }
  6306. DHD_PERIM_UNLOCK(&dhd->pub);
  6307. DHD_OS_WAKE_UNLOCK(&dhd->pub);
  6308. #if defined(MULTIPLE_SUPPLICANT)
  6309. #if defined(OEM_ANDROID) && defined(BCMSDIO)
  6310. mutex_unlock(&_dhd_sdio_mutex_lock_);
  6311. #endif // endif
  6312. #endif /* MULTIPLE_SUPPLICANT */
  6313. return ret;
  6314. }
  6315. /*
  6316. * ndo_start handler for primary ndev
  6317. */
  6318. static int
  6319. dhd_pri_open(struct net_device *net)
  6320. {
  6321. s32 ret;
  6322. ret = dhd_open(net);
  6323. if (unlikely(ret)) {
  6324. DHD_ERROR(("Failed to open primary dev ret %d\n", ret));
  6325. return ret;
  6326. }
  6327. /* Allow transmit calls */
  6328. netif_start_queue(net);
  6329. DHD_ERROR(("[%s] tx queue started\n", net->name));
  6330. return ret;
  6331. }
  6332. /*
  6333. * ndo_stop handler for primary ndev
  6334. */
  6335. static int
  6336. dhd_pri_stop(struct net_device *net)
  6337. {
  6338. s32 ret;
  6339. /* stop tx queue */
  6340. netif_stop_queue(net);
  6341. DHD_ERROR(("[%s] tx queue stopped\n", net->name));
  6342. ret = dhd_stop(net);
  6343. if (unlikely(ret)) {
  6344. DHD_ERROR(("dhd_stop failed: %d\n", ret));
  6345. return ret;
  6346. }
  6347. return ret;
  6348. }
  6349. #if defined(WL_STATIC_IF) && defined(WL_CFG80211)
  6350. /*
  6351. * For static I/Fs, the firmware interface init
  6352. * is done from the IFF_UP context.
  6353. */
  6354. static int
  6355. dhd_static_if_open(struct net_device *net)
  6356. {
  6357. s32 ret = 0;
  6358. struct bcm_cfg80211 *cfg;
  6359. struct net_device *primary_netdev = NULL;
  6360. cfg = wl_get_cfg(net);
  6361. primary_netdev = bcmcfg_to_prmry_ndev(cfg);
  6362. if (!IS_CFG80211_STATIC_IF(cfg, net)) {
  6363. DHD_TRACE(("non-static interface (%s)..do nothing \n", net->name));
  6364. ret = BCME_OK;
  6365. goto done;
  6366. }
  6367. DHD_INFO(("[%s][STATIC_IF] Enter \n", net->name));
  6368. /* Ensure fw is initialized. If it is already initialized,
  6369. * dhd_open will return success.
  6370. */
  6371. ret = dhd_open(primary_netdev);
  6372. if (unlikely(ret)) {
  6373. DHD_ERROR(("Failed to open primary dev ret %d\n", ret));
  6374. goto done;
  6375. }
  6376. ret = wl_cfg80211_static_if_open(net);
  6377. if (!ret) {
  6378. /* Allow transmit calls */
  6379. netif_start_queue(net);
  6380. }
  6381. done:
  6382. return ret;
  6383. }
  6384. static int
  6385. dhd_static_if_stop(struct net_device *net)
  6386. {
  6387. struct bcm_cfg80211 *cfg;
  6388. struct net_device *primary_netdev = NULL;
  6389. int ret = BCME_OK;
  6390. dhd_info_t *dhd = DHD_DEV_INFO(net);
  6391. DHD_INFO(("[%s][STATIC_IF] Enter \n", net->name));
  6392. /* Ensure queue is disabled */
  6393. netif_tx_disable(net);
  6394. cfg = wl_get_cfg(net);
  6395. if (!IS_CFG80211_STATIC_IF(cfg, net)) {
  6396. DHD_TRACE(("non-static interface (%s)..do nothing \n", net->name));
  6397. return BCME_OK;
  6398. }
  6399. ret = wl_cfg80211_static_if_close(net);
  6400. if (dhd->pub.up == 0) {
  6401. /* If fw is down, return */
  6402. DHD_ERROR(("fw down\n"));
  6403. return BCME_OK;
  6404. }
  6405. /* If STA iface is not in operational, invoke dhd_close from this
  6406. * context.
  6407. */
  6408. primary_netdev = bcmcfg_to_prmry_ndev(cfg);
  6409. if (!(primary_netdev->flags & IFF_UP)) {
  6410. ret = dhd_stop(primary_netdev);
  6411. } else {
  6412. DHD_ERROR(("Skipped dhd_stop, as sta is operational\n"));
  6413. }
  6414. return ret;
  6415. }
  6416. #endif /* WL_STATIC_IF && WL_CF80211 */
  6417. int dhd_do_driver_init(struct net_device *net)
  6418. {
  6419. dhd_info_t *dhd = NULL;
  6420. if (!net) {
  6421. DHD_ERROR(("Primary Interface not initialized \n"));
  6422. return -EINVAL;
  6423. }
  6424. #ifdef MULTIPLE_SUPPLICANT
  6425. #if defined(OEM_ANDROID) && defined(BCMSDIO)
  6426. if (mutex_is_locked(&_dhd_sdio_mutex_lock_) != 0) {
  6427. DHD_ERROR(("%s : dhdsdio_probe is already running!\n", __FUNCTION__));
  6428. return 0;
  6429. }
  6430. #endif /* OEM_ANDROID & BCMSDIO */
  6431. #endif /* MULTIPLE_SUPPLICANT */
  6432. /* && defined(OEM_ANDROID) && defined(BCMSDIO) */
  6433. dhd = DHD_DEV_INFO(net);
  6434. /* If driver is already initialized, do nothing
  6435. */
  6436. if (dhd->pub.busstate == DHD_BUS_DATA) {
  6437. DHD_TRACE(("Driver already Inititalized. Nothing to do"));
  6438. return 0;
  6439. }
  6440. if (dhd_open(net) < 0) {
  6441. DHD_ERROR(("Driver Init Failed \n"));
  6442. return -1;
  6443. }
  6444. return 0;
  6445. }
  6446. int
  6447. dhd_event_ifadd(dhd_info_t *dhdinfo, wl_event_data_if_t *ifevent, char *name, uint8 *mac)
  6448. {
  6449. #ifdef WL_CFG80211
  6450. if (wl_cfg80211_notify_ifadd(dhd_linux_get_primary_netdev(&dhdinfo->pub),
  6451. ifevent->ifidx, name, mac, ifevent->bssidx, ifevent->role) == BCME_OK)
  6452. return BCME_OK;
  6453. #endif // endif
  6454. /* handle IF event caused by wl commands, SoftAP, WEXT and
  6455. * anything else. This has to be done asynchronously otherwise
  6456. * DPC will be blocked (and iovars will timeout as DPC has no chance
  6457. * to read the response back)
  6458. */
  6459. if (ifevent->ifidx > 0) {
  6460. dhd_if_event_t *if_event = MALLOC(dhdinfo->pub.osh, sizeof(dhd_if_event_t));
  6461. if (if_event == NULL) {
  6462. DHD_ERROR(("dhd_event_ifadd: Failed MALLOC, malloced %d bytes",
  6463. MALLOCED(dhdinfo->pub.osh)));
  6464. return BCME_NOMEM;
  6465. }
  6466. memcpy(&if_event->event, ifevent, sizeof(if_event->event));
  6467. memcpy(if_event->mac, mac, ETHER_ADDR_LEN);
  6468. strncpy(if_event->name, name, IFNAMSIZ);
  6469. if_event->name[IFNAMSIZ - 1] = '\0';
  6470. dhd_deferred_schedule_work(dhdinfo->dhd_deferred_wq, (void *)if_event,
  6471. DHD_WQ_WORK_IF_ADD, dhd_ifadd_event_handler, DHD_WQ_WORK_PRIORITY_LOW);
  6472. }
  6473. return BCME_OK;
  6474. }
  6475. int
  6476. dhd_event_ifdel(dhd_info_t *dhdinfo, wl_event_data_if_t *ifevent, char *name, uint8 *mac)
  6477. {
  6478. dhd_if_event_t *if_event;
  6479. #ifdef WL_CFG80211
  6480. if (wl_cfg80211_notify_ifdel(dhd_linux_get_primary_netdev(&dhdinfo->pub),
  6481. ifevent->ifidx, name, mac, ifevent->bssidx) == BCME_OK)
  6482. return BCME_OK;
  6483. #endif /* WL_CFG80211 */
  6484. /* handle IF event caused by wl commands, SoftAP, WEXT and
  6485. * anything else
  6486. */
  6487. if_event = MALLOC(dhdinfo->pub.osh, sizeof(dhd_if_event_t));
  6488. if (if_event == NULL) {
  6489. DHD_ERROR(("dhd_event_ifdel: malloc failed for if_event, malloced %d bytes",
  6490. MALLOCED(dhdinfo->pub.osh)));
  6491. return BCME_NOMEM;
  6492. }
  6493. memcpy(&if_event->event, ifevent, sizeof(if_event->event));
  6494. memcpy(if_event->mac, mac, ETHER_ADDR_LEN);
  6495. strncpy(if_event->name, name, IFNAMSIZ);
  6496. if_event->name[IFNAMSIZ - 1] = '\0';
  6497. dhd_deferred_schedule_work(dhdinfo->dhd_deferred_wq, (void *)if_event, DHD_WQ_WORK_IF_DEL,
  6498. dhd_ifdel_event_handler, DHD_WQ_WORK_PRIORITY_LOW);
  6499. return BCME_OK;
  6500. }
  6501. int
  6502. dhd_event_ifchange(dhd_info_t *dhdinfo, wl_event_data_if_t *ifevent, char *name, uint8 *mac)
  6503. {
  6504. #ifdef WL_CFG80211
  6505. wl_cfg80211_notify_ifchange(dhd_linux_get_primary_netdev(&dhdinfo->pub),
  6506. ifevent->ifidx, name, mac, ifevent->bssidx);
  6507. #endif /* WL_CFG80211 */
  6508. return BCME_OK;
  6509. }
  6510. #ifdef WL_NATOE
  6511. /* Handler to update natoe info and bind with new subscriptions if there is change in config */
  6512. static void
  6513. dhd_natoe_ct_event_hanlder(void *handle, void *event_info, u8 event)
  6514. {
  6515. dhd_info_t *dhd = handle;
  6516. wl_event_data_natoe_t *natoe = event_info;
  6517. dhd_nfct_info_t *nfct = dhd->pub.nfct;
  6518. if (event != DHD_WQ_WORK_NATOE_EVENT) {
  6519. DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
  6520. return;
  6521. }
  6522. if (!dhd) {
  6523. DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
  6524. return;
  6525. }
  6526. if (natoe->natoe_active && natoe->sta_ip && natoe->start_port && natoe->end_port &&
  6527. (natoe->start_port < natoe->end_port)) {
  6528. /* Rebind subscriptions to start receiving notifications from groups */
  6529. if (dhd_ct_nl_bind(nfct, nfct->subscriptions) < 0) {
  6530. dhd_ct_close(nfct);
  6531. }
  6532. dhd_ct_send_dump_req(nfct);
  6533. } else if (!natoe->natoe_active) {
  6534. /* Rebind subscriptions to stop receiving notifications from groups */
  6535. if (dhd_ct_nl_bind(nfct, CT_NULL_SUBSCRIPTION) < 0) {
  6536. dhd_ct_close(nfct);
  6537. }
  6538. }
  6539. }
  6540. /* As NATOE enable/disbale event is received, we have to bind with new NL subscriptions.
  6541. * Scheduling workq to switch from tasklet context as bind call may sleep in handler
  6542. */
  6543. int
  6544. dhd_natoe_ct_event(dhd_pub_t *dhd, char *data)
  6545. {
  6546. wl_event_data_natoe_t *event_data = (wl_event_data_natoe_t *)data;
  6547. if (dhd->nfct) {
  6548. wl_event_data_natoe_t *natoe = dhd->nfct->natoe_info;
  6549. uint8 prev_enable = natoe->natoe_active;
  6550. spin_lock_bh(&dhd->nfct_lock);
  6551. memcpy(natoe, event_data, sizeof(*event_data));
  6552. spin_unlock_bh(&dhd->nfct_lock);
  6553. if (prev_enable != event_data->natoe_active) {
  6554. dhd_deferred_schedule_work(dhd->info->dhd_deferred_wq,
  6555. (void *)natoe, DHD_WQ_WORK_NATOE_EVENT,
  6556. dhd_natoe_ct_event_hanlder, DHD_WQ_WORK_PRIORITY_LOW);
  6557. }
  6558. return BCME_OK;
  6559. }
  6560. DHD_ERROR(("%s ERROR NFCT is not enabled \n", __FUNCTION__));
  6561. return BCME_ERROR;
  6562. }
  6563. /* Handler to send natoe ioctl to dongle */
  6564. static void
  6565. dhd_natoe_ct_ioctl_handler(void *handle, void *event_info, uint8 event)
  6566. {
  6567. dhd_info_t *dhd = handle;
  6568. dhd_ct_ioc_t *ct_ioc = event_info;
  6569. if (event != DHD_WQ_WORK_NATOE_IOCTL) {
  6570. DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
  6571. return;
  6572. }
  6573. if (!dhd) {
  6574. DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
  6575. return;
  6576. }
  6577. if (dhd_natoe_prep_send_exception_port_ioctl(&dhd->pub, ct_ioc) < 0) {
  6578. DHD_ERROR(("%s: Error in sending NATOE IOCTL \n", __FUNCTION__));
  6579. }
  6580. }
  6581. /* When Netlink message contains port collision info, the info must be sent to dongle FW
  6582. * For that we have to switch context from softirq/tasklet by scheduling workq for natoe_ct ioctl
  6583. */
  6584. void
  6585. dhd_natoe_ct_ioctl_schedule_work(dhd_pub_t *dhd, dhd_ct_ioc_t *ioc)
  6586. {
  6587. dhd_deferred_schedule_work(dhd->info->dhd_deferred_wq, (void *)ioc,
  6588. DHD_WQ_WORK_NATOE_IOCTL, dhd_natoe_ct_ioctl_handler,
  6589. DHD_WQ_WORK_PRIORITY_HIGH);
  6590. }
  6591. #endif /* WL_NATOE */
  6592. /* This API maps ndev to ifp inclusive of static IFs */
  6593. static dhd_if_t *
  6594. dhd_get_ifp_by_ndev(dhd_pub_t *dhdp, struct net_device *ndev)
  6595. {
  6596. dhd_if_t *ifp = NULL;
  6597. #ifdef WL_STATIC_IF
  6598. u32 ifidx = (DHD_MAX_IFS + DHD_MAX_STATIC_IFS - 1);
  6599. #else
  6600. u32 ifidx = (DHD_MAX_IFS - 1);
  6601. #endif /* WL_STATIC_IF */
  6602. dhd_info_t *dhdinfo = (dhd_info_t *)dhdp->info;
  6603. do {
  6604. ifp = dhdinfo->iflist[ifidx];
  6605. if (ifp && (ifp->net == ndev)) {
  6606. DHD_TRACE(("match found for %s. ifidx:%d\n",
  6607. ndev->name, ifidx));
  6608. return ifp;
  6609. }
  6610. } while (ifidx--);
  6611. DHD_ERROR(("no entry found for %s\n", ndev->name));
  6612. return NULL;
  6613. }
  6614. bool
  6615. dhd_is_static_ndev(dhd_pub_t *dhdp, struct net_device *ndev)
  6616. {
  6617. dhd_if_t *ifp = NULL;
  6618. if (!dhdp || !ndev) {
  6619. DHD_ERROR(("wrong input\n"));
  6620. ASSERT(0);
  6621. return false;
  6622. }
  6623. ifp = dhd_get_ifp_by_ndev(dhdp, ndev);
  6624. return (ifp && (ifp->static_if == true));
  6625. }
  6626. #ifdef WL_STATIC_IF
  6627. /* In some cases, while registering I/F, the actual ifidx, bssidx and dngl_name
  6628. * are not known. For e.g: static i/f case. This function lets to update it once
  6629. * it is known.
  6630. */
  6631. s32
  6632. dhd_update_iflist_info(dhd_pub_t *dhdp, struct net_device *ndev, int ifidx,
  6633. uint8 *mac, uint8 bssidx, const char *dngl_name, int if_state)
  6634. {
  6635. dhd_info_t *dhdinfo = (dhd_info_t *)dhdp->info;
  6636. dhd_if_t *ifp, *ifp_new;
  6637. s32 cur_idx;
  6638. dhd_dev_priv_t * dev_priv;
  6639. DHD_TRACE(("[STATIC_IF] update ifinfo for state:%d ifidx:%d\n",
  6640. if_state, ifidx));
  6641. ASSERT(dhdinfo && (ifidx < (DHD_MAX_IFS + DHD_MAX_STATIC_IFS)));
  6642. if ((ifp = dhd_get_ifp_by_ndev(dhdp, ndev)) == NULL) {
  6643. return -ENODEV;
  6644. }
  6645. cur_idx = ifp->idx;
  6646. if (if_state == NDEV_STATE_OS_IF_CREATED) {
  6647. /* mark static if */
  6648. ifp->static_if = TRUE;
  6649. return BCME_OK;
  6650. }
  6651. ifp_new = dhdinfo->iflist[ifidx];
  6652. if (ifp_new && (ifp_new != ifp)) {
  6653. /* There should be only one entry for a given ifidx. */
  6654. DHD_ERROR(("ifp ptr already present for ifidx:%d\n", ifidx));
  6655. ASSERT(0);
  6656. dhdp->hang_reason = HANG_REASON_IFACE_ADD_FAILURE;
  6657. net_os_send_hang_message(ifp->net);
  6658. return -EINVAL;
  6659. }
  6660. /* For static if delete case, cleanup the if before ifidx update */
  6661. if ((if_state == NDEV_STATE_FW_IF_DELETED) ||
  6662. (if_state == NDEV_STATE_FW_IF_FAILED)) {
  6663. dhd_cleanup_if(ifp->net);
  6664. dev_priv = DHD_DEV_PRIV(ndev);
  6665. dev_priv->ifidx = ifidx;
  6666. }
  6667. /* update the iflist ifidx slot with cached info */
  6668. dhdinfo->iflist[ifidx] = ifp;
  6669. dhdinfo->iflist[cur_idx] = NULL;
  6670. /* update the values */
  6671. ifp->idx = ifidx;
  6672. ifp->bssidx = bssidx;
  6673. if (if_state == NDEV_STATE_FW_IF_CREATED) {
  6674. dhd_dev_priv_save(ndev, dhdinfo, ifp, ifidx);
  6675. /* initialize the dongle provided if name */
  6676. if (dngl_name) {
  6677. strlcpy(ifp->dngl_name, dngl_name, IFNAMSIZ);
  6678. } else if (ndev->name[0] != '\0') {
  6679. strlcpy(ifp->dngl_name, ndev->name, IFNAMSIZ);
  6680. }
  6681. if (mac != NULL) {
  6682. (void)memcpy_s(&ifp->mac_addr, ETHER_ADDR_LEN, mac, ETHER_ADDR_LEN);
  6683. }
  6684. }
  6685. DHD_INFO(("[STATIC_IF] ifp ptr updated for ifidx:%d curidx:%d if_state:%d\n",
  6686. ifidx, cur_idx, if_state));
  6687. return BCME_OK;
  6688. }
  6689. #endif /* WL_STATIC_IF */
  6690. /* unregister and free the existing net_device interface (if any) in iflist and
  6691. * allocate a new one. the slot is reused. this function does NOT register the
  6692. * new interface to linux kernel. dhd_register_if does the job
  6693. */
  6694. struct net_device*
  6695. dhd_allocate_if(dhd_pub_t *dhdpub, int ifidx, const char *name,
  6696. uint8 *mac, uint8 bssidx, bool need_rtnl_lock, const char *dngl_name)
  6697. {
  6698. dhd_info_t *dhdinfo = (dhd_info_t *)dhdpub->info;
  6699. dhd_if_t *ifp;
  6700. ASSERT(dhdinfo && (ifidx < (DHD_MAX_IFS + DHD_MAX_STATIC_IFS)));
  6701. ifp = dhdinfo->iflist[ifidx];
  6702. if (ifp != NULL) {
  6703. if (ifp->net != NULL) {
  6704. DHD_ERROR(("%s: free existing IF %s ifidx:%d \n",
  6705. __FUNCTION__, ifp->net->name, ifidx));
  6706. if (ifidx == 0) {
  6707. /* For primary ifidx (0), there shouldn't be
  6708. * any netdev present already.
  6709. */
  6710. DHD_ERROR(("Primary ifidx populated already\n"));
  6711. ASSERT(0);
  6712. return NULL;
  6713. }
  6714. dhd_dev_priv_clear(ifp->net); /* clear net_device private */
  6715. /* in unregister_netdev case, the interface gets freed by net->destructor
  6716. * (which is set to free_netdev)
  6717. */
  6718. if (ifp->net->reg_state == NETREG_UNINITIALIZED) {
  6719. free_netdev(ifp->net);
  6720. } else {
  6721. netif_stop_queue(ifp->net);
  6722. if (need_rtnl_lock)
  6723. unregister_netdev(ifp->net);
  6724. else
  6725. unregister_netdevice(ifp->net);
  6726. }
  6727. ifp->net = NULL;
  6728. }
  6729. } else {
  6730. ifp = MALLOC(dhdinfo->pub.osh, sizeof(dhd_if_t));
  6731. if (ifp == NULL) {
  6732. DHD_ERROR(("%s: OOM - dhd_if_t(%zu)\n", __FUNCTION__, sizeof(dhd_if_t)));
  6733. return NULL;
  6734. }
  6735. }
  6736. memset(ifp, 0, sizeof(dhd_if_t));
  6737. ifp->info = dhdinfo;
  6738. ifp->idx = ifidx;
  6739. ifp->bssidx = bssidx;
  6740. #ifdef DHD_MCAST_REGEN
  6741. ifp->mcast_regen_bss_enable = FALSE;
  6742. #endif // endif
  6743. /* set to TRUE rx_pkt_chainable at alloc time */
  6744. ifp->rx_pkt_chainable = TRUE;
  6745. if (mac != NULL)
  6746. memcpy(&ifp->mac_addr, mac, ETHER_ADDR_LEN);
  6747. /* Allocate etherdev, including space for private structure */
  6748. ifp->net = alloc_etherdev(DHD_DEV_PRIV_SIZE);
  6749. if (ifp->net == NULL) {
  6750. DHD_ERROR(("%s: OOM - alloc_etherdev(%zu)\n", __FUNCTION__, sizeof(dhdinfo)));
  6751. goto fail;
  6752. }
  6753. /* Setup the dhd interface's netdevice private structure. */
  6754. dhd_dev_priv_save(ifp->net, dhdinfo, ifp, ifidx);
  6755. if (name && name[0]) {
  6756. strncpy(ifp->net->name, name, IFNAMSIZ);
  6757. ifp->net->name[IFNAMSIZ - 1] = '\0';
  6758. }
  6759. #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 9))
  6760. #define IFP_NET_DESTRUCTOR ifp->net->priv_destructor
  6761. #else
  6762. #define IFP_NET_DESTRUCTOR ifp->net->destructor
  6763. #endif // endif
  6764. #ifdef WL_CFG80211
  6765. if (ifidx == 0) {
  6766. IFP_NET_DESTRUCTOR = free_netdev;
  6767. } else {
  6768. IFP_NET_DESTRUCTOR = dhd_netdev_free;
  6769. }
  6770. #else
  6771. IFP_NET_DESTRUCTOR = free_netdev;
  6772. #endif /* WL_CFG80211 */
  6773. strncpy(ifp->name, ifp->net->name, IFNAMSIZ);
  6774. ifp->name[IFNAMSIZ - 1] = '\0';
  6775. dhdinfo->iflist[ifidx] = ifp;
  6776. /* initialize the dongle provided if name */
  6777. if (dngl_name) {
  6778. strncpy(ifp->dngl_name, dngl_name, IFNAMSIZ);
  6779. } else if (name) {
  6780. strncpy(ifp->dngl_name, name, IFNAMSIZ);
  6781. }
  6782. /* Initialize STA info list */
  6783. INIT_LIST_HEAD(&ifp->sta_list);
  6784. DHD_IF_STA_LIST_LOCK_INIT(ifp);
  6785. #ifdef DHD_L2_FILTER
  6786. ifp->phnd_arp_table = init_l2_filter_arp_table(dhdpub->osh);
  6787. ifp->parp_allnode = TRUE;
  6788. #endif /* DHD_L2_FILTER */
  6789. DHD_CUMM_CTR_INIT(&ifp->cumm_ctr);
  6790. #ifdef DHD_4WAYM4_FAIL_DISCONNECT
  6791. INIT_DELAYED_WORK(&ifp->m4state_work, dhd_m4_state_handler);
  6792. #endif /* DHD_4WAYM4_FAIL_DISCONNECT */
  6793. #ifdef DHD_POST_EAPOL_M1_AFTER_ROAM_EVT
  6794. ifp->recv_reassoc_evt = FALSE;
  6795. ifp->post_roam_evt = FALSE;
  6796. #endif /* DHD_POST_EAPOL_M1_AFTER_ROAM_EVT */
  6797. #ifdef DHDTCPSYNC_FLOOD_BLK
  6798. INIT_WORK(&ifp->blk_tsfl_work, dhd_blk_tsfl_handler);
  6799. dhd_reset_tcpsync_info_by_ifp(ifp);
  6800. #endif /* DHDTCPSYNC_FLOOD_BLK */
  6801. return ifp->net;
  6802. fail:
  6803. if (ifp != NULL) {
  6804. if (ifp->net != NULL) {
  6805. #if defined(DHD_LB_RXP) && defined(PCIE_FULL_DONGLE)
  6806. if (ifp->net == dhdinfo->rx_napi_netdev) {
  6807. napi_disable(&dhdinfo->rx_napi_struct);
  6808. netif_napi_del(&dhdinfo->rx_napi_struct);
  6809. skb_queue_purge(&dhdinfo->rx_napi_queue);
  6810. dhdinfo->rx_napi_netdev = NULL;
  6811. }
  6812. #endif /* DHD_LB_RXP && PCIE_FULL_DONGLE */
  6813. dhd_dev_priv_clear(ifp->net);
  6814. free_netdev(ifp->net);
  6815. ifp->net = NULL;
  6816. }
  6817. MFREE(dhdinfo->pub.osh, ifp, sizeof(*ifp));
  6818. ifp = NULL;
  6819. }
  6820. dhdinfo->iflist[ifidx] = NULL;
  6821. return NULL;
  6822. }
  6823. static void
  6824. dhd_cleanup_ifp(dhd_pub_t *dhdp, dhd_if_t *ifp)
  6825. {
  6826. #ifdef PCIE_FULL_DONGLE
  6827. s32 ifidx = 0;
  6828. if_flow_lkup_t *if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup;
  6829. #endif /* PCIE_FULL_DONGLE */
  6830. if (ifp != NULL) {
  6831. if ((ifp->idx < 0) || (ifp->idx >= DHD_MAX_IFS)) {
  6832. DHD_ERROR(("Wrong idx:%d \n", ifp->idx));
  6833. ASSERT(0);
  6834. return;
  6835. }
  6836. #ifdef DHD_L2_FILTER
  6837. bcm_l2_filter_arp_table_update(dhdpub->osh, ifp->phnd_arp_table, TRUE,
  6838. NULL, FALSE, dhdpub->tickcnt);
  6839. deinit_l2_filter_arp_table(dhdpub->osh, ifp->phnd_arp_table);
  6840. ifp->phnd_arp_table = NULL;
  6841. #endif /* DHD_L2_FILTER */
  6842. dhd_if_del_sta_list(ifp);
  6843. #ifdef PCIE_FULL_DONGLE
  6844. /* Delete flowrings of virtual interface */
  6845. ifidx = ifp->idx;
  6846. if ((ifidx != 0) && (if_flow_lkup[ifidx].role != WLC_E_IF_ROLE_AP)) {
  6847. dhd_flow_rings_delete(dhdp, ifidx);
  6848. }
  6849. #endif /* PCIE_FULL_DONGLE */
  6850. }
  6851. }
  6852. void
  6853. dhd_cleanup_if(struct net_device *net)
  6854. {
  6855. dhd_info_t *dhdinfo = DHD_DEV_INFO(net);
  6856. dhd_pub_t *dhdp = &dhdinfo->pub;
  6857. dhd_if_t *ifp;
  6858. if (!(ifp = dhd_get_ifp_by_ndev(dhdp, net)) ||
  6859. (ifp->idx >= DHD_MAX_IFS)) {
  6860. DHD_ERROR(("Wrong ifidx: %p, %d\n", ifp, ifp ? ifp->idx : -1));
  6861. ASSERT(0);
  6862. return;
  6863. }
  6864. dhd_cleanup_ifp(dhdp, ifp);
  6865. }
  6866. /* unregister and free the the net_device interface associated with the indexed
  6867. * slot, also free the slot memory and set the slot pointer to NULL
  6868. */
  6869. #define DHD_TX_COMPLETION_TIMEOUT 5000
  6870. int
  6871. dhd_remove_if(dhd_pub_t *dhdpub, int ifidx, bool need_rtnl_lock)
  6872. {
  6873. dhd_info_t *dhdinfo = (dhd_info_t *)dhdpub->info;
  6874. dhd_if_t *ifp;
  6875. unsigned long flags;
  6876. long timeout;
  6877. ifp = dhdinfo->iflist[ifidx];
  6878. if (ifp != NULL) {
  6879. #ifdef DHD_4WAYM4_FAIL_DISCONNECT
  6880. cancel_delayed_work_sync(&ifp->m4state_work);
  6881. #endif /* DHD_4WAYM4_FAIL_DISCONNECT */
  6882. #ifdef DHDTCPSYNC_FLOOD_BLK
  6883. cancel_work_sync(&ifp->blk_tsfl_work);
  6884. #endif /* DHDTCPSYNC_FLOOD_BLK */
  6885. #ifdef WL_STATIC_IF
  6886. /* static IF will be handled in detach */
  6887. if (ifp->static_if) {
  6888. DHD_TRACE(("Skip del iface for static interface\n"));
  6889. return BCME_OK;
  6890. }
  6891. #endif /* WL_STATIC_IF */
  6892. if (ifp->net != NULL) {
  6893. DHD_ERROR(("deleting interface '%s' idx %d\n", ifp->net->name, ifp->idx));
  6894. DHD_GENERAL_LOCK(dhdpub, flags);
  6895. ifp->del_in_progress = true;
  6896. DHD_GENERAL_UNLOCK(dhdpub, flags);
  6897. /* If TX is in progress, hold the if del */
  6898. if (DHD_IF_IS_TX_ACTIVE(ifp)) {
  6899. DHD_INFO(("TX in progress. Wait for it to be complete."));
  6900. timeout = wait_event_timeout(dhdpub->tx_completion_wait,
  6901. ((ifp->tx_paths_active & DHD_TX_CONTEXT_MASK) == 0),
  6902. msecs_to_jiffies(DHD_TX_COMPLETION_TIMEOUT));
  6903. if (!timeout) {
  6904. /* Tx completion timeout. Attempt proceeding ahead */
  6905. DHD_ERROR(("Tx completion timed out!\n"));
  6906. ASSERT(0);
  6907. }
  6908. } else {
  6909. DHD_TRACE(("No outstanding TX!\n"));
  6910. }
  6911. dhdinfo->iflist[ifidx] = NULL;
  6912. /* in unregister_netdev case, the interface gets freed by net->destructor
  6913. * (which is set to free_netdev)
  6914. */
  6915. if (ifp->net->reg_state == NETREG_UNINITIALIZED) {
  6916. free_netdev(ifp->net);
  6917. } else {
  6918. netif_tx_disable(ifp->net);
  6919. #if defined(SET_RPS_CPUS)
  6920. custom_rps_map_clear(ifp->net->_rx);
  6921. #endif /* SET_RPS_CPUS */
  6922. #if defined(SET_RPS_CPUS)
  6923. #if (defined(DHDTCPACK_SUPPRESS) && defined(BCMPCIE))
  6924. dhd_tcpack_suppress_set(dhdpub, TCPACK_SUP_OFF);
  6925. #endif /* DHDTCPACK_SUPPRESS && BCMPCIE */
  6926. #endif // endif
  6927. if (need_rtnl_lock)
  6928. unregister_netdev(ifp->net);
  6929. else
  6930. unregister_netdevice(ifp->net);
  6931. }
  6932. ifp->net = NULL;
  6933. DHD_GENERAL_LOCK(dhdpub, flags);
  6934. ifp->del_in_progress = false;
  6935. DHD_GENERAL_UNLOCK(dhdpub, flags);
  6936. }
  6937. dhd_cleanup_ifp(dhdpub, ifp);
  6938. DHD_CUMM_CTR_INIT(&ifp->cumm_ctr);
  6939. MFREE(dhdinfo->pub.osh, ifp, sizeof(*ifp));
  6940. ifp = NULL;
  6941. }
  6942. return BCME_OK;
  6943. }
  6944. static struct net_device_ops dhd_ops_pri = {
  6945. .ndo_open = dhd_pri_open,
  6946. .ndo_stop = dhd_pri_stop,
  6947. .ndo_get_stats = dhd_get_stats,
  6948. #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
  6949. .ndo_do_ioctl = dhd_ioctl_entry_wrapper,
  6950. .ndo_start_xmit = dhd_start_xmit_wrapper,
  6951. #else
  6952. .ndo_do_ioctl = dhd_ioctl_entry,
  6953. .ndo_start_xmit = dhd_start_xmit,
  6954. #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
  6955. .ndo_set_mac_address = dhd_set_mac_address,
  6956. #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0))
  6957. .ndo_set_rx_mode = dhd_set_multicast_list,
  6958. #else
  6959. .ndo_set_multicast_list = dhd_set_multicast_list,
  6960. #endif // endif
  6961. };
  6962. static struct net_device_ops dhd_ops_virt = {
  6963. #if defined(WL_CFG80211) && defined(WL_STATIC_IF)
  6964. .ndo_open = dhd_static_if_open,
  6965. .ndo_stop = dhd_static_if_stop,
  6966. #endif // endif
  6967. .ndo_get_stats = dhd_get_stats,
  6968. #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
  6969. .ndo_do_ioctl = dhd_ioctl_entry_wrapper,
  6970. .ndo_start_xmit = dhd_start_xmit_wrapper,
  6971. #else
  6972. .ndo_do_ioctl = dhd_ioctl_entry,
  6973. .ndo_start_xmit = dhd_start_xmit,
  6974. #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
  6975. .ndo_set_mac_address = dhd_set_mac_address,
  6976. #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0))
  6977. .ndo_set_rx_mode = dhd_set_multicast_list,
  6978. #else
  6979. .ndo_set_multicast_list = dhd_set_multicast_list,
  6980. #endif // endif
  6981. };
  6982. int
  6983. dhd_os_write_file_posn(void *fp, unsigned long *posn, void *buf,
  6984. unsigned long buflen)
  6985. {
  6986. loff_t wr_posn = *posn;
  6987. if (!fp || !buf || buflen == 0)
  6988. return -1;
  6989. if (vfs_write((struct file *)fp, buf, buflen, &wr_posn) < 0)
  6990. return -1;
  6991. *posn = wr_posn;
  6992. return 0;
  6993. }
  6994. #ifdef SHOW_LOGTRACE
  6995. int
  6996. dhd_os_read_file(void *file, char *buf, uint32 size)
  6997. {
  6998. struct file *filep = (struct file *)file;
  6999. if (!file || !buf)
  7000. return -1;
  7001. return vfs_read(filep, buf, size, &filep->f_pos);
  7002. }
  7003. int
  7004. dhd_os_seek_file(void *file, int64 offset)
  7005. {
  7006. struct file *filep = (struct file *)file;
  7007. if (!file)
  7008. return -1;
  7009. /* offset can be -ve */
  7010. filep->f_pos = filep->f_pos + offset;
  7011. return 0;
  7012. }
  7013. static int
  7014. dhd_init_logstrs_array(osl_t *osh, dhd_event_log_t *temp)
  7015. {
  7016. struct file *filep = NULL;
  7017. struct kstat stat;
  7018. mm_segment_t fs;
  7019. char *raw_fmts = NULL;
  7020. int logstrs_size = 0;
  7021. int error = 0;
  7022. fs = get_fs();
  7023. set_fs(KERNEL_DS);
  7024. filep = filp_open(logstrs_path, O_RDONLY, 0);
  7025. if (IS_ERR(filep)) {
  7026. DHD_ERROR_NO_HW4(("%s: Failed to open the file %s \n", __FUNCTION__, logstrs_path));
  7027. goto fail;
  7028. }
  7029. error = vfs_stat(logstrs_path, &stat);
  7030. if (error) {
  7031. DHD_ERROR_NO_HW4(("%s: Failed to stat file %s \n", __FUNCTION__, logstrs_path));
  7032. goto fail;
  7033. }
  7034. logstrs_size = (int) stat.size;
  7035. if (logstrs_size == 0) {
  7036. DHD_ERROR(("%s: return as logstrs_size is 0\n", __FUNCTION__));
  7037. goto fail1;
  7038. }
  7039. raw_fmts = MALLOC(osh, logstrs_size);
  7040. if (raw_fmts == NULL) {
  7041. DHD_ERROR(("%s: Failed to allocate memory \n", __FUNCTION__));
  7042. goto fail;
  7043. }
  7044. if (vfs_read(filep, raw_fmts, logstrs_size, &filep->f_pos) != logstrs_size) {
  7045. DHD_ERROR_NO_HW4(("%s: Failed to read file %s\n", __FUNCTION__, logstrs_path));
  7046. goto fail;
  7047. }
  7048. if (dhd_parse_logstrs_file(osh, raw_fmts, logstrs_size, temp)
  7049. == BCME_OK) {
  7050. filp_close(filep, NULL);
  7051. set_fs(fs);
  7052. return BCME_OK;
  7053. }
  7054. fail:
  7055. if (raw_fmts) {
  7056. MFREE(osh, raw_fmts, logstrs_size);
  7057. raw_fmts = NULL;
  7058. }
  7059. fail1:
  7060. if (!IS_ERR(filep))
  7061. filp_close(filep, NULL);
  7062. set_fs(fs);
  7063. temp->fmts = NULL;
  7064. return BCME_ERROR;
  7065. }
  7066. static int
  7067. dhd_read_map(osl_t *osh, char *fname, uint32 *ramstart, uint32 *rodata_start,
  7068. uint32 *rodata_end)
  7069. {
  7070. struct file *filep = NULL;
  7071. mm_segment_t fs;
  7072. int err = BCME_ERROR;
  7073. if (fname == NULL) {
  7074. DHD_ERROR(("%s: ERROR fname is NULL \n", __FUNCTION__));
  7075. return BCME_ERROR;
  7076. }
  7077. fs = get_fs();
  7078. set_fs(KERNEL_DS);
  7079. filep = filp_open(fname, O_RDONLY, 0);
  7080. if (IS_ERR(filep)) {
  7081. DHD_ERROR_NO_HW4(("%s: Failed to open %s \n", __FUNCTION__, fname));
  7082. goto fail;
  7083. }
  7084. if ((err = dhd_parse_map_file(osh, filep, ramstart,
  7085. rodata_start, rodata_end)) < 0)
  7086. goto fail;
  7087. fail:
  7088. if (!IS_ERR(filep))
  7089. filp_close(filep, NULL);
  7090. set_fs(fs);
  7091. return err;
  7092. }
  7093. static int
  7094. dhd_init_static_strs_array(osl_t *osh, dhd_event_log_t *temp, char *str_file, char *map_file)
  7095. {
  7096. struct file *filep = NULL;
  7097. mm_segment_t fs;
  7098. char *raw_fmts = NULL;
  7099. uint32 logstrs_size = 0;
  7100. int error = 0;
  7101. uint32 ramstart = 0;
  7102. uint32 rodata_start = 0;
  7103. uint32 rodata_end = 0;
  7104. uint32 logfilebase = 0;
  7105. error = dhd_read_map(osh, map_file, &ramstart, &rodata_start, &rodata_end);
  7106. if (error != BCME_OK) {
  7107. DHD_ERROR(("readmap Error!! \n"));
  7108. /* don't do event log parsing in actual case */
  7109. if (strstr(str_file, ram_file_str) != NULL) {
  7110. temp->raw_sstr = NULL;
  7111. } else if (strstr(str_file, rom_file_str) != NULL) {
  7112. temp->rom_raw_sstr = NULL;
  7113. }
  7114. return error;
  7115. }
  7116. DHD_ERROR(("ramstart: 0x%x, rodata_start: 0x%x, rodata_end:0x%x\n",
  7117. ramstart, rodata_start, rodata_end));
  7118. fs = get_fs();
  7119. set_fs(KERNEL_DS);
  7120. filep = filp_open(str_file, O_RDONLY, 0);
  7121. if (IS_ERR(filep)) {
  7122. DHD_ERROR(("%s: Failed to open the file %s \n", __FUNCTION__, str_file));
  7123. goto fail;
  7124. }
  7125. if (TRUE) {
  7126. /* Full file size is huge. Just read required part */
  7127. logstrs_size = rodata_end - rodata_start;
  7128. logfilebase = rodata_start - ramstart;
  7129. }
  7130. if (logstrs_size == 0) {
  7131. DHD_ERROR(("%s: return as logstrs_size is 0\n", __FUNCTION__));
  7132. goto fail1;
  7133. }
  7134. raw_fmts = MALLOC(osh, logstrs_size);
  7135. if (raw_fmts == NULL) {
  7136. DHD_ERROR(("%s: Failed to allocate raw_fmts memory \n", __FUNCTION__));
  7137. goto fail;
  7138. }
  7139. if (TRUE) {
  7140. error = generic_file_llseek(filep, logfilebase, SEEK_SET);
  7141. if (error < 0) {
  7142. DHD_ERROR(("%s: %s llseek failed %d \n", __FUNCTION__, str_file, error));
  7143. goto fail;
  7144. }
  7145. }
  7146. error = vfs_read(filep, raw_fmts, logstrs_size, (&filep->f_pos));
  7147. if (error != logstrs_size) {
  7148. DHD_ERROR(("%s: %s read failed %d \n", __FUNCTION__, str_file, error));
  7149. goto fail;
  7150. }
  7151. if (strstr(str_file, ram_file_str) != NULL) {
  7152. temp->raw_sstr = raw_fmts;
  7153. temp->raw_sstr_size = logstrs_size;
  7154. temp->rodata_start = rodata_start;
  7155. temp->rodata_end = rodata_end;
  7156. } else if (strstr(str_file, rom_file_str) != NULL) {
  7157. temp->rom_raw_sstr = raw_fmts;
  7158. temp->rom_raw_sstr_size = logstrs_size;
  7159. temp->rom_rodata_start = rodata_start;
  7160. temp->rom_rodata_end = rodata_end;
  7161. }
  7162. filp_close(filep, NULL);
  7163. set_fs(fs);
  7164. return BCME_OK;
  7165. fail:
  7166. if (raw_fmts) {
  7167. MFREE(osh, raw_fmts, logstrs_size);
  7168. raw_fmts = NULL;
  7169. }
  7170. fail1:
  7171. if (!IS_ERR(filep))
  7172. filp_close(filep, NULL);
  7173. set_fs(fs);
  7174. if (strstr(str_file, ram_file_str) != NULL) {
  7175. temp->raw_sstr = NULL;
  7176. } else if (strstr(str_file, rom_file_str) != NULL) {
  7177. temp->rom_raw_sstr = NULL;
  7178. }
  7179. return error;
  7180. } /* dhd_init_static_strs_array */
  7181. #endif /* SHOW_LOGTRACE */
  7182. #ifdef DHD_ERPOM
  7183. uint enable_erpom = 0;
  7184. module_param(enable_erpom, int, 0);
  7185. int
  7186. dhd_wlan_power_off_handler(void *handler, unsigned char reason)
  7187. {
  7188. dhd_pub_t *dhdp = (dhd_pub_t *)handler;
  7189. bool dongle_isolation = dhdp->dongle_isolation;
  7190. DHD_ERROR(("%s: WLAN DHD cleanup reason: %d\n", __FUNCTION__, reason));
  7191. if ((reason == BY_BT_DUE_TO_BT) || (reason == BY_BT_DUE_TO_WLAN)) {
  7192. #if defined(DHD_FW_COREDUMP)
  7193. /* save core dump to a file */
  7194. if (dhdp->memdump_enabled) {
  7195. #ifdef DHD_SSSR_DUMP
  7196. dhdp->collect_sssr = TRUE;
  7197. #endif /* DHD_SSSR_DUMP */
  7198. dhdp->memdump_type = DUMP_TYPE_DUE_TO_BT;
  7199. dhd_bus_mem_dump(dhdp);
  7200. }
  7201. #endif /* DHD_FW_COREDUMP */
  7202. }
  7203. /* pause data on all the interfaces */
  7204. dhd_bus_stop_queue(dhdp->bus);
  7205. /* Devreset function will perform FLR again, to avoid it set dongle_isolation */
  7206. dhdp->dongle_isolation = TRUE;
  7207. dhd_bus_devreset(dhdp, 1); /* DHD structure cleanup */
  7208. dhdp->dongle_isolation = dongle_isolation; /* Restore the old value */
  7209. return 0;
  7210. }
  7211. int
  7212. dhd_wlan_power_on_handler(void *handler, unsigned char reason)
  7213. {
  7214. dhd_pub_t *dhdp = (dhd_pub_t *)handler;
  7215. bool dongle_isolation = dhdp->dongle_isolation;
  7216. DHD_ERROR(("%s: WLAN DHD re-init reason: %d\n", __FUNCTION__, reason));
  7217. /* Devreset function will perform FLR again, to avoid it set dongle_isolation */
  7218. dhdp->dongle_isolation = TRUE;
  7219. dhd_bus_devreset(dhdp, 0); /* DHD structure re-init */
  7220. dhdp->dongle_isolation = dongle_isolation; /* Restore the old value */
  7221. /* resume data on all the interfaces */
  7222. dhd_bus_start_queue(dhdp->bus);
  7223. return 0;
  7224. }
  7225. #endif /* DHD_ERPOM */
  7226. /** Called once for each hardware (dongle) instance that this DHD manages */
  7227. dhd_pub_t *
  7228. dhd_attach(osl_t *osh, struct dhd_bus *bus, uint bus_hdrlen)
  7229. {
  7230. dhd_info_t *dhd = NULL;
  7231. struct net_device *net = NULL;
  7232. char if_name[IFNAMSIZ] = {'\0'};
  7233. uint32 bus_type = -1;
  7234. uint32 bus_num = -1;
  7235. uint32 slot_num = -1;
  7236. #ifdef SHOW_LOGTRACE
  7237. int ret;
  7238. #endif /* SHOW_LOGTRACE */
  7239. #ifdef DHD_ERPOM
  7240. pom_func_handler_t *pom_handler;
  7241. #endif /* DHD_ERPOM */
  7242. wifi_adapter_info_t *adapter = NULL;
  7243. dhd_attach_states_t dhd_state = DHD_ATTACH_STATE_INIT;
  7244. DHD_TRACE(("%s: Enter\n", __FUNCTION__));
  7245. #ifdef PCIE_FULL_DONGLE
  7246. ASSERT(sizeof(dhd_pkttag_fd_t) <= OSL_PKTTAG_SZ);
  7247. ASSERT(sizeof(dhd_pkttag_fr_t) <= OSL_PKTTAG_SZ);
  7248. #endif /* PCIE_FULL_DONGLE */
  7249. /* will implement get_ids for DBUS later */
  7250. #if defined(BCMSDIO)
  7251. dhd_bus_get_ids(bus, &bus_type, &bus_num, &slot_num);
  7252. #endif // endif
  7253. adapter = dhd_wifi_platform_get_adapter(bus_type, bus_num, slot_num);
  7254. /* Allocate primary dhd_info */
  7255. dhd = wifi_platform_prealloc(adapter, DHD_PREALLOC_DHD_INFO, sizeof(dhd_info_t));
  7256. if (dhd == NULL) {
  7257. dhd = MALLOC(osh, sizeof(dhd_info_t));
  7258. if (dhd == NULL) {
  7259. DHD_ERROR(("%s: OOM - alloc dhd_info\n", __FUNCTION__));
  7260. goto dhd_null_flag;
  7261. }
  7262. }
  7263. memset(dhd, 0, sizeof(dhd_info_t));
  7264. dhd_state |= DHD_ATTACH_STATE_DHD_ALLOC;
  7265. dhd->unit = dhd_found + instance_base; /* do not increment dhd_found, yet */
  7266. dhd->pub.osh = osh;
  7267. #ifdef DUMP_IOCTL_IOV_LIST
  7268. dll_init(&(dhd->pub.dump_iovlist_head));
  7269. #endif /* DUMP_IOCTL_IOV_LIST */
  7270. dhd->adapter = adapter;
  7271. #ifdef BT_OVER_SDIO
  7272. dhd->pub.is_bt_recovery_required = FALSE;
  7273. mutex_init(&dhd->bus_user_lock);
  7274. #endif /* BT_OVER_SDIO */
  7275. g_dhd_pub = &dhd->pub;
  7276. #ifdef DHD_DEBUG
  7277. dll_init(&(dhd->pub.mw_list_head));
  7278. #endif /* DHD_DEBUG */
  7279. #ifdef GET_CUSTOM_MAC_ENABLE
  7280. wifi_platform_get_mac_addr(dhd->adapter, dhd->pub.mac.octet);
  7281. #endif /* GET_CUSTOM_MAC_ENABLE */
  7282. #ifdef CUSTOM_FORCE_NODFS_FLAG
  7283. dhd->pub.dhd_cflags |= WLAN_PLAT_NODFS_FLAG;
  7284. dhd->pub.force_country_change = TRUE;
  7285. #endif /* CUSTOM_FORCE_NODFS_FLAG */
  7286. #ifdef CUSTOM_COUNTRY_CODE
  7287. get_customized_country_code(dhd->adapter,
  7288. dhd->pub.dhd_cspec.country_abbrev, &dhd->pub.dhd_cspec,
  7289. dhd->pub.dhd_cflags);
  7290. #endif /* CUSTOM_COUNTRY_CODE */
  7291. dhd->thr_dpc_ctl.thr_pid = DHD_PID_KT_TL_INVALID;
  7292. dhd->thr_wdt_ctl.thr_pid = DHD_PID_KT_INVALID;
  7293. #ifdef DHD_WET
  7294. dhd->pub.wet_info = dhd_get_wet_info(&dhd->pub);
  7295. #endif /* DHD_WET */
  7296. /* Initialize thread based operation and lock */
  7297. sema_init(&dhd->sdsem, 1);
  7298. /* Some DHD modules (e.g. cfg80211) configures operation mode based on firmware name.
  7299. * This is indeed a hack but we have to make it work properly before we have a better
  7300. * solution
  7301. */
  7302. dhd_update_fw_nv_path(dhd);
  7303. dhd->pub.pcie_txs_metadata_enable = pcie_txs_metadata_enable;
  7304. /* Link to info module */
  7305. dhd->pub.info = dhd;
  7306. /* Link to bus module */
  7307. dhd->pub.bus = bus;
  7308. dhd->pub.hdrlen = bus_hdrlen;
  7309. dhd->pub.txoff = FALSE;
  7310. /* Set network interface name if it was provided as module parameter */
  7311. if (iface_name[0]) {
  7312. int len;
  7313. char ch;
  7314. strncpy(if_name, iface_name, IFNAMSIZ);
  7315. if_name[IFNAMSIZ - 1] = 0;
  7316. len = strlen(if_name);
  7317. ch = if_name[len - 1];
  7318. if ((ch > '9' || ch < '0') && (len < IFNAMSIZ - 2))
  7319. strncat(if_name, "%d", 2);
  7320. }
  7321. /* Passing NULL to dngl_name to ensure host gets if_name in dngl_name member */
  7322. net = dhd_allocate_if(&dhd->pub, 0, if_name, NULL, 0, TRUE, NULL);
  7323. if (net == NULL) {
  7324. goto fail;
  7325. }
  7326. mutex_init(&dhd->pub.ndev_op_sync);
  7327. dhd_state |= DHD_ATTACH_STATE_ADD_IF;
  7328. #ifdef DHD_L2_FILTER
  7329. /* initialize the l2_filter_cnt */
  7330. dhd->pub.l2_filter_cnt = 0;
  7331. #endif // endif
  7332. net->netdev_ops = NULL;
  7333. mutex_init(&dhd->dhd_iovar_mutex);
  7334. sema_init(&dhd->proto_sem, 1);
  7335. #ifdef DHD_ULP
  7336. if (!(dhd_ulp_init(osh, &dhd->pub)))
  7337. goto fail;
  7338. #endif /* DHD_ULP */
  7339. #if defined(DHD_HANG_SEND_UP_TEST)
  7340. dhd->pub.req_hang_type = 0;
  7341. #endif /* DHD_HANG_SEND_UP_TEST */
  7342. #ifdef PROP_TXSTATUS
  7343. spin_lock_init(&dhd->wlfc_spinlock);
  7344. dhd->pub.skip_fc = dhd_wlfc_skip_fc;
  7345. dhd->pub.plat_init = dhd_wlfc_plat_init;
  7346. dhd->pub.plat_deinit = dhd_wlfc_plat_deinit;
  7347. #ifdef DHD_WLFC_THREAD
  7348. init_waitqueue_head(&dhd->pub.wlfc_wqhead);
  7349. dhd->pub.wlfc_thread = kthread_create(dhd_wlfc_transfer_packets, &dhd->pub, "wlfc-thread");
  7350. if (IS_ERR(dhd->pub.wlfc_thread)) {
  7351. DHD_ERROR(("create wlfc thread failed\n"));
  7352. goto fail;
  7353. } else {
  7354. wake_up_process(dhd->pub.wlfc_thread);
  7355. }
  7356. #endif /* DHD_WLFC_THREAD */
  7357. #endif /* PROP_TXSTATUS */
  7358. /* Initialize other structure content */
  7359. init_waitqueue_head(&dhd->ioctl_resp_wait);
  7360. init_waitqueue_head(&dhd->d3ack_wait);
  7361. init_waitqueue_head(&dhd->ctrl_wait);
  7362. init_waitqueue_head(&dhd->dhd_bus_busy_state_wait);
  7363. init_waitqueue_head(&dhd->dmaxfer_wait);
  7364. init_waitqueue_head(&dhd->pub.tx_completion_wait);
  7365. dhd->pub.dhd_bus_busy_state = 0;
  7366. /* Initialize the spinlocks */
  7367. spin_lock_init(&dhd->sdlock);
  7368. spin_lock_init(&dhd->txqlock);
  7369. spin_lock_init(&dhd->dhd_lock);
  7370. spin_lock_init(&dhd->rxf_lock);
  7371. #ifdef WLTDLS
  7372. spin_lock_init(&dhd->pub.tdls_lock);
  7373. #endif /* WLTDLS */
  7374. #if defined(RXFRAME_THREAD)
  7375. dhd->rxthread_enabled = TRUE;
  7376. #endif /* defined(RXFRAME_THREAD) */
  7377. #ifdef DHDTCPACK_SUPPRESS
  7378. spin_lock_init(&dhd->tcpack_lock);
  7379. #endif /* DHDTCPACK_SUPPRESS */
  7380. /* Initialize Wakelock stuff */
  7381. spin_lock_init(&dhd->wakelock_spinlock);
  7382. spin_lock_init(&dhd->wakelock_evt_spinlock);
  7383. DHD_OS_WAKE_LOCK_INIT(dhd);
  7384. dhd->wakelock_counter = 0;
  7385. /* wakelocks prevent a system from going into a low power state */
  7386. #if defined(CONFIG_PM_WAKELOCKS) || defined(CONFIG_HAS_WAKELOCK)
  7387. wake_lock_init(&dhd->wl_wdwake, WAKE_LOCK_SUSPEND, "wlan_wd_wake");
  7388. #endif /* CONFIG_PM_WAKELOCKS || CONFIG_HAS_WAKELOCK */
  7389. #if defined(OEM_ANDROID)
  7390. mutex_init(&dhd->dhd_net_if_mutex);
  7391. mutex_init(&dhd->dhd_suspend_mutex);
  7392. #if defined(PKT_FILTER_SUPPORT) && defined(APF)
  7393. mutex_init(&dhd->dhd_apf_mutex);
  7394. #endif /* PKT_FILTER_SUPPORT && APF */
  7395. #endif /* defined(OEM_ANDROID) */
  7396. dhd_state |= DHD_ATTACH_STATE_WAKELOCKS_INIT;
  7397. /* Attach and link in the protocol */
  7398. if (dhd_prot_attach(&dhd->pub) != 0) {
  7399. DHD_ERROR(("dhd_prot_attach failed\n"));
  7400. goto fail;
  7401. }
  7402. dhd_state |= DHD_ATTACH_STATE_PROT_ATTACH;
  7403. #ifdef WL_CFG80211
  7404. spin_lock_init(&dhd->pub.up_lock);
  7405. /* Attach and link in the cfg80211 */
  7406. if (unlikely(wl_cfg80211_attach(net, &dhd->pub))) {
  7407. DHD_ERROR(("wl_cfg80211_attach failed\n"));
  7408. goto fail;
  7409. }
  7410. dhd_monitor_init(&dhd->pub);
  7411. dhd_state |= DHD_ATTACH_STATE_CFG80211;
  7412. #endif // endif
  7413. #if defined(WL_WIRELESS_EXT)
  7414. /* Attach and link in the iw */
  7415. if (!(dhd_state & DHD_ATTACH_STATE_CFG80211)) {
  7416. if (wl_iw_attach(net, (void *)&dhd->pub) != 0) {
  7417. DHD_ERROR(("wl_iw_attach failed\n"));
  7418. goto fail;
  7419. }
  7420. dhd_state |= DHD_ATTACH_STATE_WL_ATTACH;
  7421. }
  7422. #endif /* defined(WL_WIRELESS_EXT) */
  7423. #ifdef SHOW_LOGTRACE
  7424. ret = dhd_init_logstrs_array(osh, &dhd->event_data);
  7425. if (ret == BCME_OK) {
  7426. dhd_init_static_strs_array(osh, &dhd->event_data, st_str_file_path, map_file_path);
  7427. dhd_init_static_strs_array(osh, &dhd->event_data, rom_st_str_file_path,
  7428. rom_map_file_path);
  7429. dhd_state |= DHD_ATTACH_LOGTRACE_INIT;
  7430. }
  7431. #endif /* SHOW_LOGTRACE */
  7432. /* attach debug if support */
  7433. if (dhd_os_dbg_attach(&dhd->pub)) {
  7434. DHD_ERROR(("%s debug module attach failed\n", __FUNCTION__));
  7435. goto fail;
  7436. }
  7437. #ifdef DEBUGABILITY
  7438. #if defined(SHOW_LOGTRACE) && defined(DBG_RING_LOG_INIT_DEFAULT)
  7439. /* enable verbose ring to support dump_trace_buf */
  7440. dhd_os_start_logging(&dhd->pub, FW_VERBOSE_RING_NAME, 3, 0, 0, 0);
  7441. #endif /* SHOW_LOGTRACE */
  7442. #ifdef DBG_PKT_MON
  7443. dhd->pub.dbg->pkt_mon_lock = dhd_os_spin_lock_init(dhd->pub.osh);
  7444. #ifdef DBG_PKT_MON_INIT_DEFAULT
  7445. dhd_os_dbg_attach_pkt_monitor(&dhd->pub);
  7446. #endif /* DBG_PKT_MON_INIT_DEFAULT */
  7447. #endif /* DBG_PKT_MON */
  7448. #endif /* DEBUGABILITY */
  7449. #ifdef DHD_STATUS_LOGGING
  7450. dhd->pub.statlog = dhd_attach_statlog(&dhd->pub, MAX_STATLOG_ITEM,
  7451. MAX_STATLOG_REQ_ITEM, STATLOG_LOGBUF_LEN);
  7452. if (dhd->pub.statlog == NULL) {
  7453. DHD_ERROR(("%s: alloc statlog failed\n", __FUNCTION__));
  7454. }
  7455. #endif /* DHD_STATUS_LOGGING */
  7456. #ifdef DHD_LOG_DUMP
  7457. dhd_log_dump_init(&dhd->pub);
  7458. #endif /* DHD_LOG_DUMP */
  7459. #ifdef DHD_PKTDUMP_ROAM
  7460. dhd_dump_pkt_init(&dhd->pub);
  7461. #endif /* DHD_PKTDUMP_ROAM */
  7462. #ifdef DHD_PKT_LOGGING
  7463. dhd_os_attach_pktlog(&dhd->pub);
  7464. #endif /* DHD_PKT_LOGGING */
  7465. #ifdef WL_CFGVENDOR_SEND_HANG_EVENT
  7466. dhd->pub.hang_info = MALLOCZ(osh, VENDOR_SEND_HANG_EXT_INFO_LEN);
  7467. if (dhd->pub.hang_info == NULL) {
  7468. DHD_ERROR(("%s: alloc hang_info failed\n", __FUNCTION__));
  7469. }
  7470. #endif /* WL_CFGVENDOR_SEND_HANG_EVENT */
  7471. if (dhd_sta_pool_init(&dhd->pub, DHD_MAX_STA) != BCME_OK) {
  7472. DHD_ERROR(("%s: Initializing %u sta\n", __FUNCTION__, DHD_MAX_STA));
  7473. goto fail;
  7474. }
  7475. #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
  7476. dhd->tx_wq = alloc_workqueue("bcmdhd-tx-wq", WQ_HIGHPRI | WQ_UNBOUND | WQ_MEM_RECLAIM, 1);
  7477. if (!dhd->tx_wq) {
  7478. DHD_ERROR(("%s: alloc_workqueue(bcmdhd-tx-wq) failed\n", __FUNCTION__));
  7479. goto fail;
  7480. }
  7481. dhd->rx_wq = alloc_workqueue("bcmdhd-rx-wq", WQ_HIGHPRI | WQ_UNBOUND | WQ_MEM_RECLAIM, 1);
  7482. if (!dhd->rx_wq) {
  7483. DHD_ERROR(("%s: alloc_workqueue(bcmdhd-rx-wq) failed\n", __FUNCTION__));
  7484. destroy_workqueue(dhd->tx_wq);
  7485. dhd->tx_wq = NULL;
  7486. goto fail;
  7487. }
  7488. #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
  7489. /* Set up the watchdog timer */
  7490. init_timer_compat(&dhd->timer, dhd_watchdog, dhd);
  7491. dhd->default_wd_interval = dhd_watchdog_ms;
  7492. if (dhd_watchdog_prio >= 0) {
  7493. /* Initialize watchdog thread */
  7494. PROC_START(dhd_watchdog_thread, dhd, &dhd->thr_wdt_ctl, 0, "dhd_watchdog_thread");
  7495. if (dhd->thr_wdt_ctl.thr_pid < 0) {
  7496. goto fail;
  7497. }
  7498. } else {
  7499. dhd->thr_wdt_ctl.thr_pid = -1;
  7500. }
  7501. #ifdef DHD_PCIE_RUNTIMEPM
  7502. /* Setup up the runtime PM Idlecount timer */
  7503. init_timer_compat(&dhd->rpm_timer, dhd_runtimepm, dhd);
  7504. dhd->rpm_timer_valid = FALSE;
  7505. dhd->thr_rpm_ctl.thr_pid = DHD_PID_KT_INVALID;
  7506. PROC_START(dhd_rpm_state_thread, dhd, &dhd->thr_rpm_ctl, 0, "dhd_rpm_state_thread");
  7507. if (dhd->thr_rpm_ctl.thr_pid < 0) {
  7508. goto fail;
  7509. }
  7510. #endif /* DHD_PCIE_RUNTIMEPM */
  7511. #ifdef SHOW_LOGTRACE
  7512. skb_queue_head_init(&dhd->evt_trace_queue);
  7513. /* Create ring proc entries */
  7514. dhd_dbg_ring_proc_create(&dhd->pub);
  7515. #endif /* SHOW_LOGTRACE */
  7516. /* Set up the bottom half handler */
  7517. if (dhd_dpc_prio >= 0) {
  7518. /* Initialize DPC thread */
  7519. PROC_START(dhd_dpc_thread, dhd, &dhd->thr_dpc_ctl, 0, "dhd_dpc");
  7520. if (dhd->thr_dpc_ctl.thr_pid < 0) {
  7521. goto fail;
  7522. }
  7523. } else {
  7524. /* use tasklet for dpc */
  7525. tasklet_init(&dhd->tasklet, dhd_dpc, (ulong)dhd);
  7526. dhd->thr_dpc_ctl.thr_pid = -1;
  7527. }
  7528. if (dhd->rxthread_enabled) {
  7529. bzero(&dhd->pub.skbbuf[0], sizeof(void *) * MAXSKBPEND);
  7530. /* Initialize RXF thread */
  7531. PROC_START(dhd_rxf_thread, dhd, &dhd->thr_rxf_ctl, 0, "dhd_rxf");
  7532. if (dhd->thr_rxf_ctl.thr_pid < 0) {
  7533. goto fail;
  7534. }
  7535. }
  7536. dhd_state |= DHD_ATTACH_STATE_THREADS_CREATED;
  7537. #if defined(CONFIG_PM_SLEEP)
  7538. if (!dhd_pm_notifier_registered) {
  7539. dhd_pm_notifier_registered = TRUE;
  7540. dhd->pm_notifier.notifier_call = dhd_pm_callback;
  7541. dhd->pm_notifier.priority = 10;
  7542. register_pm_notifier(&dhd->pm_notifier);
  7543. }
  7544. #endif /* CONFIG_PM_SLEEP */
  7545. #if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
  7546. dhd->early_suspend.level = EARLY_SUSPEND_LEVEL_BLANK_SCREEN + 20;
  7547. dhd->early_suspend.suspend = dhd_early_suspend;
  7548. dhd->early_suspend.resume = dhd_late_resume;
  7549. register_early_suspend(&dhd->early_suspend);
  7550. dhd_state |= DHD_ATTACH_STATE_EARLYSUSPEND_DONE;
  7551. #endif /* CONFIG_HAS_EARLYSUSPEND && DHD_USE_EARLYSUSPEND */
  7552. #ifdef ARP_OFFLOAD_SUPPORT
  7553. dhd->pend_ipaddr = 0;
  7554. if (!dhd_inetaddr_notifier_registered) {
  7555. dhd_inetaddr_notifier_registered = TRUE;
  7556. register_inetaddr_notifier(&dhd_inetaddr_notifier);
  7557. }
  7558. #endif /* ARP_OFFLOAD_SUPPORT */
  7559. #if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT)
  7560. if (!dhd_inet6addr_notifier_registered) {
  7561. dhd_inet6addr_notifier_registered = TRUE;
  7562. register_inet6addr_notifier(&dhd_inet6addr_notifier);
  7563. }
  7564. #endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */
  7565. dhd->dhd_deferred_wq = dhd_deferred_work_init((void *)dhd);
  7566. #if defined(OEM_ANDROID)
  7567. INIT_WORK(&dhd->dhd_hang_process_work, dhd_hang_process);
  7568. #endif /* #if OEM_ANDROID */
  7569. #ifdef DEBUG_CPU_FREQ
  7570. dhd->new_freq = alloc_percpu(int);
  7571. dhd->freq_trans.notifier_call = dhd_cpufreq_notifier;
  7572. cpufreq_register_notifier(&dhd->freq_trans, CPUFREQ_TRANSITION_NOTIFIER);
  7573. #endif // endif
  7574. #ifdef DHDTCPACK_SUPPRESS
  7575. #ifdef BCMSDIO
  7576. dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_DELAYTX);
  7577. #elif defined(BCMPCIE)
  7578. dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_HOLD);
  7579. #else
  7580. dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_OFF);
  7581. #endif /* BCMSDIO */
  7582. #endif /* DHDTCPACK_SUPPRESS */
  7583. #if defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW)
  7584. #endif /* defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW) */
  7585. #ifdef DHD_DEBUG_PAGEALLOC
  7586. register_page_corrupt_cb(dhd_page_corrupt_cb, &dhd->pub);
  7587. #endif /* DHD_DEBUG_PAGEALLOC */
  7588. #if defined(DHD_LB)
  7589. dhd_lb_set_default_cpus(dhd);
  7590. DHD_LB_STATS_INIT(&dhd->pub);
  7591. /* Initialize the CPU Masks */
  7592. if (dhd_cpumasks_init(dhd) == 0) {
  7593. /* Now we have the current CPU maps, run through candidacy */
  7594. dhd_select_cpu_candidacy(dhd);
  7595. /* Register the call backs to CPU Hotplug sub-system */
  7596. dhd_register_cpuhp_callback(dhd);
  7597. } else {
  7598. /*
  7599. * We are unable to initialize CPU masks, so candidacy algorithm
  7600. * won't run, but still Load Balancing will be honoured based
  7601. * on the CPUs allocated for a given job statically during init
  7602. */
  7603. dhd->cpu_notifier.notifier_call = NULL;
  7604. DHD_ERROR(("%s():dhd_cpumasks_init failed CPUs for JOB would be static\n",
  7605. __FUNCTION__));
  7606. }
  7607. #ifdef DHD_LB_TXP
  7608. #ifdef DHD_LB_TXP_DEFAULT_ENAB
  7609. /* Trun ON the feature by default */
  7610. atomic_set(&dhd->lb_txp_active, 1);
  7611. #else
  7612. /* Trun OFF the feature by default */
  7613. atomic_set(&dhd->lb_txp_active, 0);
  7614. #endif /* DHD_LB_TXP_DEFAULT_ENAB */
  7615. #endif /* DHD_LB_TXP */
  7616. #ifdef DHD_LB_RXP
  7617. /* Trun ON the feature by default */
  7618. atomic_set(&dhd->lb_rxp_active, 1);
  7619. #endif /* DHD_LB_RXP */
  7620. /* Initialize the Load Balancing Tasklets and Napi object */
  7621. #if defined(DHD_LB_TXC)
  7622. tasklet_init(&dhd->tx_compl_tasklet,
  7623. dhd_lb_tx_compl_handler, (ulong)(&dhd->pub));
  7624. INIT_WORK(&dhd->tx_compl_dispatcher_work, dhd_tx_compl_dispatcher_fn);
  7625. DHD_INFO(("%s load balance init tx_compl_tasklet\n", __FUNCTION__));
  7626. #endif /* DHD_LB_TXC */
  7627. #if defined(DHD_LB_RXC)
  7628. tasklet_init(&dhd->rx_compl_tasklet,
  7629. dhd_lb_rx_compl_handler, (ulong)(&dhd->pub));
  7630. INIT_WORK(&dhd->rx_compl_dispatcher_work, dhd_rx_compl_dispatcher_fn);
  7631. DHD_INFO(("%s load balance init rx_compl_tasklet\n", __FUNCTION__));
  7632. #endif /* DHD_LB_RXC */
  7633. #if defined(DHD_LB_RXP)
  7634. __skb_queue_head_init(&dhd->rx_pend_queue);
  7635. skb_queue_head_init(&dhd->rx_napi_queue);
  7636. /* Initialize the work that dispatches NAPI job to a given core */
  7637. INIT_WORK(&dhd->rx_napi_dispatcher_work, dhd_rx_napi_dispatcher_fn);
  7638. DHD_INFO(("%s load balance init rx_napi_queue\n", __FUNCTION__));
  7639. #endif /* DHD_LB_RXP */
  7640. #if defined(DHD_LB_TXP)
  7641. INIT_WORK(&dhd->tx_dispatcher_work, dhd_tx_dispatcher_work);
  7642. skb_queue_head_init(&dhd->tx_pend_queue);
  7643. /* Initialize the work that dispatches TX job to a given core */
  7644. tasklet_init(&dhd->tx_tasklet,
  7645. dhd_lb_tx_handler, (ulong)(dhd));
  7646. DHD_INFO(("%s load balance init tx_pend_queue\n", __FUNCTION__));
  7647. #endif /* DHD_LB_TXP */
  7648. dhd_state |= DHD_ATTACH_STATE_LB_ATTACH_DONE;
  7649. #endif /* DHD_LB */
  7650. #if defined(DNGL_AXI_ERROR_LOGGING) && defined(DHD_USE_WQ_FOR_DNGL_AXI_ERROR)
  7651. INIT_WORK(&dhd->axi_error_dispatcher_work, dhd_axi_error_dispatcher_fn);
  7652. #endif /* DNGL_AXI_ERROR_LOGGING && DHD_USE_WQ_FOR_DNGL_AXI_ERROR */
  7653. #if defined(BCMPCIE)
  7654. dhd->pub.extended_trap_data = MALLOCZ(osh, BCMPCIE_EXT_TRAP_DATA_MAXLEN);
  7655. if (dhd->pub.extended_trap_data == NULL) {
  7656. DHD_ERROR(("%s: Failed to alloc extended_trap_data\n", __FUNCTION__));
  7657. }
  7658. #ifdef DNGL_AXI_ERROR_LOGGING
  7659. dhd->pub.axi_err_dump = MALLOCZ(osh, sizeof(dhd_axi_error_dump_t));
  7660. if (dhd->pub.axi_err_dump == NULL) {
  7661. DHD_ERROR(("%s: Failed to alloc axi_err_dump\n", __FUNCTION__));
  7662. }
  7663. #endif /* DNGL_AXI_ERROR_LOGGING */
  7664. #endif /* BCMPCIE && ETD */
  7665. #ifdef SHOW_LOGTRACE
  7666. if (dhd_init_logtrace_process(dhd) != BCME_OK) {
  7667. goto fail;
  7668. }
  7669. #endif /* SHOW_LOGTRACE */
  7670. DHD_SSSR_MEMPOOL_INIT(&dhd->pub);
  7671. #ifdef EWP_EDL
  7672. if (host_edl_support) {
  7673. if (DHD_EDL_MEM_INIT(&dhd->pub) != BCME_OK) {
  7674. host_edl_support = FALSE;
  7675. }
  7676. }
  7677. #endif /* EWP_EDL */
  7678. (void)dhd_sysfs_init(dhd);
  7679. #ifdef WL_NATOE
  7680. /* Open Netlink socket for NF_CONNTRACK notifications */
  7681. dhd->pub.nfct = dhd_ct_open(&dhd->pub, NFNL_SUBSYS_CTNETLINK | NFNL_SUBSYS_CTNETLINK_EXP,
  7682. CT_ALL);
  7683. #endif /* WL_NATOE */
  7684. dhd_state |= DHD_ATTACH_STATE_DONE;
  7685. dhd->dhd_state = dhd_state;
  7686. dhd_found++;
  7687. #ifdef DHD_DUMP_MNGR
  7688. dhd->pub.dump_file_manage =
  7689. (dhd_dump_file_manage_t *)MALLOCZ(dhd->pub.osh, sizeof(dhd_dump_file_manage_t));
  7690. if (unlikely(!dhd->pub.dump_file_manage)) {
  7691. DHD_ERROR(("%s(): could not allocate memory for - "
  7692. "dhd_dump_file_manage_t\n", __FUNCTION__));
  7693. }
  7694. #endif /* DHD_DUMP_MNGR */
  7695. #ifdef DHD_FW_COREDUMP
  7696. /* Set memdump default values */
  7697. #ifdef CUSTOMER_HW4_DEBUG
  7698. dhd->pub.memdump_enabled = DUMP_DISABLED;
  7699. #elif defined(OEM_ANDROID)
  7700. dhd->pub.memdump_enabled = DUMP_MEMFILE_BUGON;
  7701. #else
  7702. dhd->pub.memdump_enabled = DUMP_MEMFILE;
  7703. #endif /* CUSTOMER_HW4_DEBUG */
  7704. /* Check the memdump capability */
  7705. dhd_get_memdump_info(&dhd->pub);
  7706. #endif /* DHD_FW_COREDUMP */
  7707. #ifdef DHD_ERPOM
  7708. if (enable_erpom) {
  7709. pom_handler = &dhd->pub.pom_wlan_handler;
  7710. pom_handler->func_id = WLAN_FUNC_ID;
  7711. pom_handler->handler = (void *)g_dhd_pub;
  7712. pom_handler->power_off = dhd_wlan_power_off_handler;
  7713. pom_handler->power_on = dhd_wlan_power_on_handler;
  7714. dhd->pub.pom_func_register = NULL;
  7715. dhd->pub.pom_func_deregister = NULL;
  7716. dhd->pub.pom_toggle_reg_on = NULL;
  7717. dhd->pub.pom_func_register = symbol_get(pom_func_register);
  7718. dhd->pub.pom_func_deregister = symbol_get(pom_func_deregister);
  7719. dhd->pub.pom_toggle_reg_on = symbol_get(pom_toggle_reg_on);
  7720. symbol_put(pom_func_register);
  7721. symbol_put(pom_func_deregister);
  7722. symbol_put(pom_toggle_reg_on);
  7723. if (!dhd->pub.pom_func_register ||
  7724. !dhd->pub.pom_func_deregister ||
  7725. !dhd->pub.pom_toggle_reg_on) {
  7726. DHD_ERROR(("%s, enable_erpom enabled through module parameter but "
  7727. "POM is not loaded\n", __FUNCTION__));
  7728. ASSERT(0);
  7729. goto fail;
  7730. }
  7731. dhd->pub.pom_func_register(pom_handler);
  7732. dhd->pub.enable_erpom = TRUE;
  7733. }
  7734. #endif /* DHD_ERPOM */
  7735. return &dhd->pub;
  7736. fail:
  7737. if (dhd_state >= DHD_ATTACH_STATE_DHD_ALLOC) {
  7738. DHD_TRACE(("%s: Calling dhd_detach dhd_state 0x%x &dhd->pub %p\n",
  7739. __FUNCTION__, dhd_state, &dhd->pub));
  7740. dhd->dhd_state = dhd_state;
  7741. dhd_detach(&dhd->pub);
  7742. dhd_free(&dhd->pub);
  7743. }
  7744. dhd_null_flag:
  7745. return NULL;
  7746. }
  7747. int dhd_get_fw_mode(dhd_info_t *dhdinfo)
  7748. {
  7749. if (strstr(dhdinfo->fw_path, "_apsta") != NULL)
  7750. return DHD_FLAG_HOSTAP_MODE;
  7751. if (strstr(dhdinfo->fw_path, "_p2p") != NULL)
  7752. return DHD_FLAG_P2P_MODE;
  7753. if (strstr(dhdinfo->fw_path, "_ibss") != NULL)
  7754. return DHD_FLAG_IBSS_MODE;
  7755. if (strstr(dhdinfo->fw_path, "_mfg") != NULL)
  7756. return DHD_FLAG_MFG_MODE;
  7757. return DHD_FLAG_STA_MODE;
  7758. }
  7759. int dhd_bus_get_fw_mode(dhd_pub_t *dhdp)
  7760. {
  7761. return dhd_get_fw_mode(dhdp->info);
  7762. }
  7763. extern char * nvram_get(const char *name);
  7764. bool dhd_update_fw_nv_path(dhd_info_t *dhdinfo)
  7765. {
  7766. int fw_len;
  7767. int nv_len;
  7768. const char *fw = NULL;
  7769. const char *nv = NULL;
  7770. #ifdef DHD_UCODE_DOWNLOAD
  7771. int uc_len;
  7772. const char *uc = NULL;
  7773. #endif /* DHD_UCODE_DOWNLOAD */
  7774. wifi_adapter_info_t *adapter = dhdinfo->adapter;
  7775. int fw_path_len = sizeof(dhdinfo->fw_path);
  7776. int nv_path_len = sizeof(dhdinfo->nv_path);
  7777. /* Update firmware and nvram path. The path may be from adapter info or module parameter
  7778. * The path from adapter info is used for initialization only (as it won't change).
  7779. *
  7780. * The firmware_path/nvram_path module parameter may be changed by the system at run
  7781. * time. When it changes we need to copy it to dhdinfo->fw_path. Also Android private
  7782. * command may change dhdinfo->fw_path. As such we need to clear the path info in
  7783. * module parameter after it is copied. We won't update the path until the module parameter
  7784. * is changed again (first character is not '\0')
  7785. */
  7786. /* set default firmware and nvram path for built-in type driver */
  7787. if (!dhd_download_fw_on_driverload) {
  7788. #ifdef CONFIG_BCMDHD_FW_PATH
  7789. fw = VENDOR_PATH CONFIG_BCMDHD_FW_PATH;
  7790. #endif /* CONFIG_BCMDHD_FW_PATH */
  7791. #ifdef CONFIG_BCMDHD_NVRAM_PATH
  7792. nv = VENDOR_PATH CONFIG_BCMDHD_NVRAM_PATH;
  7793. #endif /* CONFIG_BCMDHD_NVRAM_PATH */
  7794. }
  7795. /* check if we need to initialize the path */
  7796. if (dhdinfo->fw_path[0] == '\0') {
  7797. if (adapter && adapter->fw_path && adapter->fw_path[0] != '\0')
  7798. fw = adapter->fw_path;
  7799. }
  7800. if (dhdinfo->nv_path[0] == '\0') {
  7801. if (adapter && adapter->nv_path && adapter->nv_path[0] != '\0')
  7802. nv = adapter->nv_path;
  7803. }
  7804. /* Use module parameter if it is valid, EVEN IF the path has not been initialized
  7805. *
  7806. * TODO: need a solution for multi-chip, can't use the same firmware for all chips
  7807. */
  7808. if (firmware_path[0] != '\0')
  7809. fw = firmware_path;
  7810. if (nvram_path[0] != '\0')
  7811. nv = nvram_path;
  7812. #ifdef DHD_UCODE_DOWNLOAD
  7813. if (ucode_path[0] != '\0')
  7814. uc = ucode_path;
  7815. #endif /* DHD_UCODE_DOWNLOAD */
  7816. if (fw && fw[0] != '\0') {
  7817. fw_len = strlen(fw);
  7818. if (fw_len >= fw_path_len) {
  7819. DHD_ERROR(("fw path len exceeds max len of dhdinfo->fw_path\n"));
  7820. return FALSE;
  7821. }
  7822. strncpy(dhdinfo->fw_path, fw, fw_path_len);
  7823. if (dhdinfo->fw_path[fw_len-1] == '\n')
  7824. dhdinfo->fw_path[fw_len-1] = '\0';
  7825. }
  7826. if (nv && nv[0] != '\0') {
  7827. nv_len = strlen(nv);
  7828. if (nv_len >= nv_path_len) {
  7829. DHD_ERROR(("nvram path len exceeds max len of dhdinfo->nv_path\n"));
  7830. return FALSE;
  7831. }
  7832. memset(dhdinfo->nv_path, 0, nv_path_len);
  7833. strncpy(dhdinfo->nv_path, nv, nv_path_len);
  7834. dhdinfo->nv_path[nv_len] = '\0';
  7835. #ifdef DHD_USE_SINGLE_NVRAM_FILE
  7836. /* Remove "_net" or "_mfg" tag from current nvram path */
  7837. {
  7838. char *nvram_tag = "nvram_";
  7839. char *ext_tag = ".txt";
  7840. char *sp_nvram = strnstr(dhdinfo->nv_path, nvram_tag, nv_path_len);
  7841. bool valid_buf = sp_nvram && ((uint32)(sp_nvram + strlen(nvram_tag) +
  7842. strlen(ext_tag) - dhdinfo->nv_path) <= nv_path_len);
  7843. if (valid_buf) {
  7844. char *sp = sp_nvram + strlen(nvram_tag) - 1;
  7845. uint32 padding_size = (uint32)(dhdinfo->nv_path +
  7846. nv_path_len - sp);
  7847. memset(sp, 0, padding_size);
  7848. strncat(dhdinfo->nv_path, ext_tag, strlen(ext_tag));
  7849. nv_len = strlen(dhdinfo->nv_path);
  7850. DHD_INFO(("%s: new nvram path = %s\n",
  7851. __FUNCTION__, dhdinfo->nv_path));
  7852. } else if (sp_nvram) {
  7853. DHD_ERROR(("%s: buffer space for nvram path is not enough\n",
  7854. __FUNCTION__));
  7855. return FALSE;
  7856. } else {
  7857. DHD_ERROR(("%s: Couldn't find the nvram tag. current"
  7858. " nvram path = %s\n", __FUNCTION__, dhdinfo->nv_path));
  7859. }
  7860. }
  7861. #endif /* DHD_USE_SINGLE_NVRAM_FILE */
  7862. if (dhdinfo->nv_path[nv_len-1] == '\n')
  7863. dhdinfo->nv_path[nv_len-1] = '\0';
  7864. }
  7865. #ifdef DHD_UCODE_DOWNLOAD
  7866. if (uc && uc[0] != '\0') {
  7867. uc_len = strlen(uc);
  7868. if (uc_len >= sizeof(dhdinfo->uc_path)) {
  7869. DHD_ERROR(("uc path len exceeds max len of dhdinfo->uc_path\n"));
  7870. return FALSE;
  7871. }
  7872. strncpy(dhdinfo->uc_path, uc, sizeof(dhdinfo->uc_path));
  7873. if (dhdinfo->uc_path[uc_len-1] == '\n')
  7874. dhdinfo->uc_path[uc_len-1] = '\0';
  7875. }
  7876. #endif /* DHD_UCODE_DOWNLOAD */
  7877. /* clear the path in module parameter */
  7878. if (dhd_download_fw_on_driverload) {
  7879. firmware_path[0] = '\0';
  7880. nvram_path[0] = '\0';
  7881. }
  7882. #ifdef DHD_UCODE_DOWNLOAD
  7883. ucode_path[0] = '\0';
  7884. DHD_ERROR(("ucode path: %s\n", dhdinfo->uc_path));
  7885. #endif /* DHD_UCODE_DOWNLOAD */
  7886. /* fw_path and nv_path are not mandatory for BCMEMBEDIMAGE */
  7887. if (dhdinfo->fw_path[0] == '\0') {
  7888. DHD_ERROR(("firmware path not found\n"));
  7889. return FALSE;
  7890. }
  7891. if (dhdinfo->nv_path[0] == '\0') {
  7892. DHD_ERROR(("nvram path not found\n"));
  7893. return FALSE;
  7894. }
  7895. return TRUE;
  7896. }
  7897. #if defined(BT_OVER_SDIO)
  7898. extern bool dhd_update_btfw_path(dhd_info_t *dhdinfo, char* btfw_path)
  7899. {
  7900. int fw_len;
  7901. const char *fw = NULL;
  7902. wifi_adapter_info_t *adapter = dhdinfo->adapter;
  7903. /* Update bt firmware path. The path may be from adapter info or module parameter
  7904. * The path from adapter info is used for initialization only (as it won't change).
  7905. *
  7906. * The btfw_path module parameter may be changed by the system at run
  7907. * time. When it changes we need to copy it to dhdinfo->btfw_path. Also Android private
  7908. * command may change dhdinfo->btfw_path. As such we need to clear the path info in
  7909. * module parameter after it is copied. We won't update the path until the module parameter
  7910. * is changed again (first character is not '\0')
  7911. */
  7912. /* set default firmware and nvram path for built-in type driver */
  7913. if (!dhd_download_fw_on_driverload) {
  7914. #ifdef CONFIG_BCMDHD_BTFW_PATH
  7915. fw = CONFIG_BCMDHD_BTFW_PATH;
  7916. #endif /* CONFIG_BCMDHD_FW_PATH */
  7917. }
  7918. /* check if we need to initialize the path */
  7919. if (dhdinfo->btfw_path[0] == '\0') {
  7920. if (adapter && adapter->btfw_path && adapter->btfw_path[0] != '\0')
  7921. fw = adapter->btfw_path;
  7922. }
  7923. /* Use module parameter if it is valid, EVEN IF the path has not been initialized
  7924. */
  7925. if (btfw_path[0] != '\0')
  7926. fw = btfw_path;
  7927. if (fw && fw[0] != '\0') {
  7928. fw_len = strlen(fw);
  7929. if (fw_len >= sizeof(dhdinfo->btfw_path)) {
  7930. DHD_ERROR(("fw path len exceeds max len of dhdinfo->btfw_path\n"));
  7931. return FALSE;
  7932. }
  7933. strncpy(dhdinfo->btfw_path, fw, sizeof(dhdinfo->btfw_path));
  7934. if (dhdinfo->btfw_path[fw_len-1] == '\n')
  7935. dhdinfo->btfw_path[fw_len-1] = '\0';
  7936. }
  7937. /* clear the path in module parameter */
  7938. btfw_path[0] = '\0';
  7939. if (dhdinfo->btfw_path[0] == '\0') {
  7940. DHD_ERROR(("bt firmware path not found\n"));
  7941. return FALSE;
  7942. }
  7943. return TRUE;
  7944. }
  7945. #endif /* defined (BT_OVER_SDIO) */
  7946. #ifdef CUSTOMER_HW4_DEBUG
  7947. bool dhd_validate_chipid(dhd_pub_t *dhdp)
  7948. {
  7949. uint chipid = dhd_bus_chip_id(dhdp);
  7950. uint config_chipid;
  7951. #ifdef BCM4375_CHIP
  7952. config_chipid = BCM4375_CHIP_ID;
  7953. #elif defined(BCM4361_CHIP)
  7954. config_chipid = BCM4361_CHIP_ID;
  7955. #elif defined(BCM4359_CHIP)
  7956. config_chipid = BCM4359_CHIP_ID;
  7957. #elif defined(BCM4358_CHIP)
  7958. config_chipid = BCM4358_CHIP_ID;
  7959. #elif defined(BCM4354_CHIP)
  7960. config_chipid = BCM4354_CHIP_ID;
  7961. #elif defined(BCM4339_CHIP)
  7962. config_chipid = BCM4339_CHIP_ID;
  7963. #elif defined(BCM4335_CHIP)
  7964. config_chipid = BCM4335_CHIP_ID;
  7965. #elif defined(BCM43430_CHIP)
  7966. config_chipid = BCM43430_CHIP_ID;
  7967. #elif defined(BCM43018_CHIP)
  7968. config_chipid = BCM43018_CHIP_ID;
  7969. #elif defined(BCM43455_CHIP)
  7970. config_chipid = BCM4345_CHIP_ID;
  7971. #elif defined(BCM43454_CHIP)
  7972. config_chipid = BCM43454_CHIP_ID;
  7973. #elif defined(BCM43012_CHIP_)
  7974. config_chipid = BCM43012_CHIP_ID;
  7975. #else
  7976. DHD_ERROR(("%s: Unknown chip id, if you use new chipset,"
  7977. " please add CONFIG_BCMXXXX into the Kernel and"
  7978. " BCMXXXX_CHIP definition into the DHD driver\n",
  7979. __FUNCTION__));
  7980. config_chipid = 0;
  7981. return FALSE;
  7982. #endif /* BCM4354_CHIP */
  7983. #if defined(BCM4354_CHIP) && defined(SUPPORT_MULTIPLE_REVISION)
  7984. if (chipid == BCM4350_CHIP_ID && config_chipid == BCM4354_CHIP_ID) {
  7985. return TRUE;
  7986. }
  7987. #endif /* BCM4354_CHIP && SUPPORT_MULTIPLE_REVISION */
  7988. #if defined(BCM4358_CHIP) && defined(SUPPORT_MULTIPLE_REVISION)
  7989. if (chipid == BCM43569_CHIP_ID && config_chipid == BCM4358_CHIP_ID) {
  7990. return TRUE;
  7991. }
  7992. #endif /* BCM4358_CHIP && SUPPORT_MULTIPLE_REVISION */
  7993. #if defined(BCM4359_CHIP)
  7994. if (chipid == BCM4355_CHIP_ID && config_chipid == BCM4359_CHIP_ID) {
  7995. return TRUE;
  7996. }
  7997. #endif /* BCM4359_CHIP */
  7998. #if defined(BCM4361_CHIP)
  7999. if (chipid == BCM4347_CHIP_ID && config_chipid == BCM4361_CHIP_ID) {
  8000. return TRUE;
  8001. }
  8002. #endif /* BCM4361_CHIP */
  8003. return config_chipid == chipid;
  8004. }
  8005. #endif /* CUSTOMER_HW4_DEBUG */
  8006. #if defined(BT_OVER_SDIO)
  8007. wlan_bt_handle_t dhd_bt_get_pub_hndl(void)
  8008. {
  8009. DHD_ERROR(("%s: g_dhd_pub %p\n", __FUNCTION__, g_dhd_pub));
  8010. /* assuming that dhd_pub_t type pointer is available from a global variable */
  8011. return (wlan_bt_handle_t) g_dhd_pub;
  8012. } EXPORT_SYMBOL(dhd_bt_get_pub_hndl);
  8013. int dhd_download_btfw(wlan_bt_handle_t handle, char* btfw_path)
  8014. {
  8015. int ret = -1;
  8016. dhd_pub_t *dhdp = (dhd_pub_t *)handle;
  8017. dhd_info_t *dhd = (dhd_info_t*)dhdp->info;
  8018. /* Download BT firmware image to the dongle */
  8019. if (dhd->pub.busstate == DHD_BUS_DATA && dhd_update_btfw_path(dhd, btfw_path)) {
  8020. DHD_INFO(("%s: download btfw from: %s\n", __FUNCTION__, dhd->btfw_path));
  8021. ret = dhd_bus_download_btfw(dhd->pub.bus, dhd->pub.osh, dhd->btfw_path);
  8022. if (ret < 0) {
  8023. DHD_ERROR(("%s: failed to download btfw from: %s\n",
  8024. __FUNCTION__, dhd->btfw_path));
  8025. return ret;
  8026. }
  8027. }
  8028. return ret;
  8029. } EXPORT_SYMBOL(dhd_download_btfw);
  8030. #endif /* defined (BT_OVER_SDIO) */
  8031. int
  8032. dhd_bus_start(dhd_pub_t *dhdp)
  8033. {
  8034. int ret = -1;
  8035. dhd_info_t *dhd = (dhd_info_t*)dhdp->info;
  8036. unsigned long flags;
  8037. #if defined(DHD_DEBUG) && defined(BCMSDIO)
  8038. int fw_download_start = 0, fw_download_end = 0, f2_sync_start = 0, f2_sync_end = 0;
  8039. #endif /* DHD_DEBUG && BCMSDIO */
  8040. ASSERT(dhd);
  8041. DHD_TRACE(("Enter %s:\n", __FUNCTION__));
  8042. dhdp->dongle_trap_occured = 0;
  8043. #ifdef DHD_SSSR_DUMP
  8044. /* Flag to indicate sssr dump is collected */
  8045. dhdp->sssr_dump_collected = 0;
  8046. #endif /* DHD_SSSR_DUMP */
  8047. dhdp->iovar_timeout_occured = 0;
  8048. #ifdef PCIE_FULL_DONGLE
  8049. dhdp->d3ack_timeout_occured = 0;
  8050. dhdp->livelock_occured = 0;
  8051. dhdp->pktid_audit_failed = 0;
  8052. #endif /* PCIE_FULL_DONGLE */
  8053. dhd->pub.iface_op_failed = 0;
  8054. dhd->pub.scan_timeout_occurred = 0;
  8055. dhd->pub.scan_busy_occurred = 0;
  8056. /* Clear induced error during initialize */
  8057. dhd->pub.dhd_induce_error = DHD_INDUCE_ERROR_CLEAR;
  8058. /* set default value for now. Will be updated again in dhd_preinit_ioctls()
  8059. * after querying FW
  8060. */
  8061. dhdp->event_log_max_sets = NUM_EVENT_LOG_SETS;
  8062. dhdp->event_log_max_sets_queried = FALSE;
  8063. dhdp->smmu_fault_occurred = 0;
  8064. #ifdef DNGL_AXI_ERROR_LOGGING
  8065. dhdp->axi_error = FALSE;
  8066. #endif /* DNGL_AXI_ERROR_LOGGING */
  8067. DHD_PERIM_LOCK(dhdp);
  8068. /* try to download image and nvram to the dongle */
  8069. if (dhd->pub.busstate == DHD_BUS_DOWN && dhd_update_fw_nv_path(dhd)) {
  8070. /* Indicate FW Download has not yet done */
  8071. dhd->pub.fw_download_status = FW_DOWNLOAD_IN_PROGRESS;
  8072. DHD_INFO(("%s download fw %s, nv %s\n", __FUNCTION__, dhd->fw_path, dhd->nv_path));
  8073. #if defined(DHD_DEBUG) && defined(BCMSDIO)
  8074. fw_download_start = OSL_SYSUPTIME();
  8075. #endif /* DHD_DEBUG && BCMSDIO */
  8076. ret = dhd_bus_download_firmware(dhd->pub.bus, dhd->pub.osh,
  8077. dhd->fw_path, dhd->nv_path);
  8078. #if defined(DHD_DEBUG) && defined(BCMSDIO)
  8079. fw_download_end = OSL_SYSUPTIME();
  8080. #endif /* DHD_DEBUG && BCMSDIO */
  8081. if (ret < 0) {
  8082. DHD_ERROR(("%s: failed to download firmware %s\n",
  8083. __FUNCTION__, dhd->fw_path));
  8084. DHD_PERIM_UNLOCK(dhdp);
  8085. return ret;
  8086. }
  8087. /* Indicate FW Download has succeeded */
  8088. dhd->pub.fw_download_status = FW_DOWNLOAD_DONE;
  8089. }
  8090. if (dhd->pub.busstate != DHD_BUS_LOAD) {
  8091. DHD_PERIM_UNLOCK(dhdp);
  8092. return -ENETDOWN;
  8093. }
  8094. #ifdef BCMSDIO
  8095. dhd_os_sdlock(dhdp);
  8096. #endif /* BCMSDIO */
  8097. /* Start the watchdog timer */
  8098. dhd->pub.tickcnt = 0;
  8099. dhd_os_wd_timer(&dhd->pub, dhd_watchdog_ms);
  8100. /* Bring up the bus */
  8101. if ((ret = dhd_bus_init(&dhd->pub, FALSE)) != 0) {
  8102. DHD_ERROR(("%s, dhd_bus_init failed %d\n", __FUNCTION__, ret));
  8103. #ifdef BCMSDIO
  8104. dhd_os_sdunlock(dhdp);
  8105. #endif /* BCMSDIO */
  8106. DHD_PERIM_UNLOCK(dhdp);
  8107. return ret;
  8108. }
  8109. DHD_ENABLE_RUNTIME_PM(&dhd->pub);
  8110. #ifdef DHD_ULP
  8111. dhd_ulp_set_ulp_state(dhdp, DHD_ULP_DISABLED);
  8112. #endif /* DHD_ULP */
  8113. #if defined(OOB_INTR_ONLY) || defined(BCMSPI_ANDROID) || defined(BCMPCIE_OOB_HOST_WAKE)
  8114. /* Host registration for OOB interrupt */
  8115. if (dhd_bus_oob_intr_register(dhdp)) {
  8116. /* deactivate timer and wait for the handler to finish */
  8117. #if !defined(BCMPCIE_OOB_HOST_WAKE)
  8118. DHD_GENERAL_LOCK(&dhd->pub, flags);
  8119. dhd->wd_timer_valid = FALSE;
  8120. DHD_GENERAL_UNLOCK(&dhd->pub, flags);
  8121. del_timer_sync(&dhd->timer);
  8122. #endif /* !BCMPCIE_OOB_HOST_WAKE */
  8123. DHD_DISABLE_RUNTIME_PM(&dhd->pub);
  8124. DHD_PERIM_UNLOCK(dhdp);
  8125. DHD_ERROR(("%s Host failed to register for OOB\n", __FUNCTION__));
  8126. DHD_OS_WD_WAKE_UNLOCK(&dhd->pub);
  8127. return -ENODEV;
  8128. }
  8129. #if defined(BCMPCIE_OOB_HOST_WAKE)
  8130. dhd_bus_oob_intr_set(dhdp, TRUE);
  8131. #else
  8132. /* Enable oob at firmware */
  8133. dhd_enable_oob_intr(dhd->pub.bus, TRUE);
  8134. #endif /* BCMPCIE_OOB_HOST_WAKE */
  8135. #endif /* OOB_INTR_ONLY || BCMSPI_ANDROID || BCMPCIE_OOB_HOST_WAKE */
  8136. #ifdef PCIE_FULL_DONGLE
  8137. {
  8138. /* max_h2d_rings includes H2D common rings */
  8139. uint32 max_h2d_rings = dhd_bus_max_h2d_queues(dhd->pub.bus);
  8140. DHD_ERROR(("%s: Initializing %u h2drings\n", __FUNCTION__,
  8141. max_h2d_rings));
  8142. if ((ret = dhd_flow_rings_init(&dhd->pub, max_h2d_rings)) != BCME_OK) {
  8143. #ifdef BCMSDIO
  8144. dhd_os_sdunlock(dhdp);
  8145. #endif /* BCMSDIO */
  8146. DHD_PERIM_UNLOCK(dhdp);
  8147. return ret;
  8148. }
  8149. }
  8150. #endif /* PCIE_FULL_DONGLE */
  8151. /* Do protocol initialization necessary for IOCTL/IOVAR */
  8152. ret = dhd_prot_init(&dhd->pub);
  8153. if (unlikely(ret) != BCME_OK) {
  8154. DHD_PERIM_UNLOCK(dhdp);
  8155. DHD_OS_WD_WAKE_UNLOCK(&dhd->pub);
  8156. return ret;
  8157. }
  8158. /* If bus is not ready, can't come up */
  8159. if (dhd->pub.busstate != DHD_BUS_DATA) {
  8160. DHD_GENERAL_LOCK(&dhd->pub, flags);
  8161. dhd->wd_timer_valid = FALSE;
  8162. DHD_GENERAL_UNLOCK(&dhd->pub, flags);
  8163. del_timer_sync(&dhd->timer);
  8164. DHD_ERROR(("%s failed bus is not ready\n", __FUNCTION__));
  8165. DHD_DISABLE_RUNTIME_PM(&dhd->pub);
  8166. #ifdef BCMSDIO
  8167. dhd_os_sdunlock(dhdp);
  8168. #endif /* BCMSDIO */
  8169. DHD_PERIM_UNLOCK(dhdp);
  8170. DHD_OS_WD_WAKE_UNLOCK(&dhd->pub);
  8171. return -ENODEV;
  8172. }
  8173. #ifdef BCMSDIO
  8174. dhd_os_sdunlock(dhdp);
  8175. #endif /* BCMSDIO */
  8176. /* Bus is ready, query any dongle information */
  8177. #if defined(DHD_DEBUG) && defined(BCMSDIO)
  8178. f2_sync_start = OSL_SYSUPTIME();
  8179. #endif /* DHD_DEBUG && BCMSDIO */
  8180. if ((ret = dhd_sync_with_dongle(&dhd->pub)) < 0) {
  8181. DHD_GENERAL_LOCK(&dhd->pub, flags);
  8182. dhd->wd_timer_valid = FALSE;
  8183. DHD_GENERAL_UNLOCK(&dhd->pub, flags);
  8184. del_timer_sync(&dhd->timer);
  8185. DHD_ERROR(("%s failed to sync with dongle\n", __FUNCTION__));
  8186. DHD_OS_WD_WAKE_UNLOCK(&dhd->pub);
  8187. DHD_PERIM_UNLOCK(dhdp);
  8188. return ret;
  8189. }
  8190. #if defined(CONFIG_SOC_EXYNOS8895) || defined(CONFIG_SOC_EXYNOS9810) || \
  8191. defined(CONFIG_SOC_EXYNOS9820)
  8192. DHD_ERROR(("%s: Enable L1ss EP side\n", __FUNCTION__));
  8193. exynos_pcie_l1ss_ctrl(1, PCIE_L1SS_CTRL_WIFI);
  8194. #endif /* CONFIG_SOC_EXYNOS8895 || CONFIG_SOC_EXYNOS9810 || CONFIG_SOC_EXYNOS9820 */
  8195. #if defined(DHD_DEBUG) && defined(BCMSDIO)
  8196. f2_sync_end = OSL_SYSUPTIME();
  8197. DHD_ERROR(("Time taken for FW download and F2 ready is: %d msec\n",
  8198. (fw_download_end - fw_download_start) + (f2_sync_end - f2_sync_start)));
  8199. #endif /* DHD_DEBUG && BCMSDIO */
  8200. #ifdef ARP_OFFLOAD_SUPPORT
  8201. if (dhd->pend_ipaddr) {
  8202. #ifdef AOE_IP_ALIAS_SUPPORT
  8203. aoe_update_host_ipv4_table(&dhd->pub, dhd->pend_ipaddr, TRUE, 0);
  8204. #endif /* AOE_IP_ALIAS_SUPPORT */
  8205. dhd->pend_ipaddr = 0;
  8206. }
  8207. #endif /* ARP_OFFLOAD_SUPPORT */
  8208. DHD_PERIM_UNLOCK(dhdp);
  8209. return 0;
  8210. }
  8211. #ifdef WLTDLS
  8212. int _dhd_tdls_enable(dhd_pub_t *dhd, bool tdls_on, bool auto_on, struct ether_addr *mac)
  8213. {
  8214. uint32 tdls = tdls_on;
  8215. int ret = 0;
  8216. uint32 tdls_auto_op = 0;
  8217. uint32 tdls_idle_time = CUSTOM_TDLS_IDLE_MODE_SETTING;
  8218. int32 tdls_rssi_high = CUSTOM_TDLS_RSSI_THRESHOLD_HIGH;
  8219. int32 tdls_rssi_low = CUSTOM_TDLS_RSSI_THRESHOLD_LOW;
  8220. uint32 tdls_pktcnt_high = CUSTOM_TDLS_PCKTCNT_THRESHOLD_HIGH;
  8221. uint32 tdls_pktcnt_low = CUSTOM_TDLS_PCKTCNT_THRESHOLD_LOW;
  8222. BCM_REFERENCE(mac);
  8223. if (!FW_SUPPORTED(dhd, tdls))
  8224. return BCME_ERROR;
  8225. if (dhd->tdls_enable == tdls_on)
  8226. goto auto_mode;
  8227. ret = dhd_iovar(dhd, 0, "tdls_enable", (char *)&tdls, sizeof(tdls), NULL, 0, TRUE);
  8228. if (ret < 0) {
  8229. DHD_ERROR(("%s: tdls %d failed %d\n", __FUNCTION__, tdls, ret));
  8230. goto exit;
  8231. }
  8232. dhd->tdls_enable = tdls_on;
  8233. auto_mode:
  8234. tdls_auto_op = auto_on;
  8235. ret = dhd_iovar(dhd, 0, "tdls_auto_op", (char *)&tdls_auto_op, sizeof(tdls_auto_op), NULL,
  8236. 0, TRUE);
  8237. if (ret < 0) {
  8238. DHD_ERROR(("%s: tdls_auto_op failed %d\n", __FUNCTION__, ret));
  8239. goto exit;
  8240. }
  8241. if (tdls_auto_op) {
  8242. ret = dhd_iovar(dhd, 0, "tdls_idle_time", (char *)&tdls_idle_time,
  8243. sizeof(tdls_idle_time), NULL, 0, TRUE);
  8244. if (ret < 0) {
  8245. DHD_ERROR(("%s: tdls_idle_time failed %d\n", __FUNCTION__, ret));
  8246. goto exit;
  8247. }
  8248. ret = dhd_iovar(dhd, 0, "tdls_rssi_high", (char *)&tdls_rssi_high,
  8249. sizeof(tdls_rssi_high), NULL, 0, TRUE);
  8250. if (ret < 0) {
  8251. DHD_ERROR(("%s: tdls_rssi_high failed %d\n", __FUNCTION__, ret));
  8252. goto exit;
  8253. }
  8254. ret = dhd_iovar(dhd, 0, "tdls_rssi_low", (char *)&tdls_rssi_low,
  8255. sizeof(tdls_rssi_low), NULL, 0, TRUE);
  8256. if (ret < 0) {
  8257. DHD_ERROR(("%s: tdls_rssi_low failed %d\n", __FUNCTION__, ret));
  8258. goto exit;
  8259. }
  8260. ret = dhd_iovar(dhd, 0, "tdls_trigger_pktcnt_high", (char *)&tdls_pktcnt_high,
  8261. sizeof(tdls_pktcnt_high), NULL, 0, TRUE);
  8262. if (ret < 0) {
  8263. DHD_ERROR(("%s: tdls_trigger_pktcnt_high failed %d\n", __FUNCTION__, ret));
  8264. goto exit;
  8265. }
  8266. ret = dhd_iovar(dhd, 0, "tdls_trigger_pktcnt_low", (char *)&tdls_pktcnt_low,
  8267. sizeof(tdls_pktcnt_low), NULL, 0, TRUE);
  8268. if (ret < 0) {
  8269. DHD_ERROR(("%s: tdls_trigger_pktcnt_low failed %d\n", __FUNCTION__, ret));
  8270. goto exit;
  8271. }
  8272. }
  8273. exit:
  8274. return ret;
  8275. }
  8276. int dhd_tdls_enable(struct net_device *dev, bool tdls_on, bool auto_on, struct ether_addr *mac)
  8277. {
  8278. dhd_info_t *dhd = DHD_DEV_INFO(dev);
  8279. int ret = 0;
  8280. if (dhd)
  8281. ret = _dhd_tdls_enable(&dhd->pub, tdls_on, auto_on, mac);
  8282. else
  8283. ret = BCME_ERROR;
  8284. return ret;
  8285. }
  8286. int
  8287. dhd_tdls_set_mode(dhd_pub_t *dhd, bool wfd_mode)
  8288. {
  8289. int ret = 0;
  8290. bool auto_on = false;
  8291. uint32 mode = wfd_mode;
  8292. #ifdef ENABLE_TDLS_AUTO_MODE
  8293. if (wfd_mode) {
  8294. auto_on = false;
  8295. } else {
  8296. auto_on = true;
  8297. }
  8298. #else
  8299. auto_on = false;
  8300. #endif /* ENABLE_TDLS_AUTO_MODE */
  8301. ret = _dhd_tdls_enable(dhd, false, auto_on, NULL);
  8302. if (ret < 0) {
  8303. DHD_ERROR(("Disable tdls_auto_op failed. %d\n", ret));
  8304. return ret;
  8305. }
  8306. ret = dhd_iovar(dhd, 0, "tdls_wfd_mode", (char *)&mode, sizeof(mode), NULL, 0, TRUE);
  8307. if ((ret < 0) && (ret != BCME_UNSUPPORTED)) {
  8308. DHD_ERROR(("%s: tdls_wfd_mode faile_wfd_mode %d\n", __FUNCTION__, ret));
  8309. return ret;
  8310. }
  8311. ret = _dhd_tdls_enable(dhd, true, auto_on, NULL);
  8312. if (ret < 0) {
  8313. DHD_ERROR(("enable tdls_auto_op failed. %d\n", ret));
  8314. return ret;
  8315. }
  8316. dhd->tdls_mode = mode;
  8317. return ret;
  8318. }
  8319. #ifdef PCIE_FULL_DONGLE
  8320. int dhd_tdls_update_peer_info(dhd_pub_t *dhdp, wl_event_msg_t *event)
  8321. {
  8322. dhd_pub_t *dhd_pub = dhdp;
  8323. tdls_peer_node_t *cur = dhd_pub->peer_tbl.node;
  8324. tdls_peer_node_t *new = NULL, *prev = NULL;
  8325. int ifindex = dhd_ifname2idx(dhd_pub->info, event->ifname);
  8326. uint8 *da = (uint8 *)&event->addr.octet[0];
  8327. bool connect = FALSE;
  8328. uint32 reason = ntoh32(event->reason);
  8329. unsigned long flags;
  8330. /* No handling needed for peer discovered reason */
  8331. if (reason == WLC_E_TDLS_PEER_DISCOVERED) {
  8332. return BCME_ERROR;
  8333. }
  8334. if (reason == WLC_E_TDLS_PEER_CONNECTED)
  8335. connect = TRUE;
  8336. else if (reason == WLC_E_TDLS_PEER_DISCONNECTED)
  8337. connect = FALSE;
  8338. else
  8339. {
  8340. DHD_ERROR(("%s: TDLS Event reason is unknown\n", __FUNCTION__));
  8341. return BCME_ERROR;
  8342. }
  8343. if (ifindex == DHD_BAD_IF)
  8344. return BCME_ERROR;
  8345. if (connect) {
  8346. while (cur != NULL) {
  8347. if (!memcmp(da, cur->addr, ETHER_ADDR_LEN)) {
  8348. DHD_ERROR(("%s: TDLS Peer exist already %d\n",
  8349. __FUNCTION__, __LINE__));
  8350. return BCME_ERROR;
  8351. }
  8352. cur = cur->next;
  8353. }
  8354. new = MALLOC(dhd_pub->osh, sizeof(tdls_peer_node_t));
  8355. if (new == NULL) {
  8356. DHD_ERROR(("%s: Failed to allocate memory\n", __FUNCTION__));
  8357. return BCME_ERROR;
  8358. }
  8359. memcpy(new->addr, da, ETHER_ADDR_LEN);
  8360. DHD_TDLS_LOCK(&dhdp->tdls_lock, flags);
  8361. new->next = dhd_pub->peer_tbl.node;
  8362. dhd_pub->peer_tbl.node = new;
  8363. dhd_pub->peer_tbl.tdls_peer_count++;
  8364. DHD_TDLS_UNLOCK(&dhdp->tdls_lock, flags);
  8365. } else {
  8366. while (cur != NULL) {
  8367. if (!memcmp(da, cur->addr, ETHER_ADDR_LEN)) {
  8368. dhd_flow_rings_delete_for_peer(dhd_pub, (uint8)ifindex, da);
  8369. DHD_TDLS_LOCK(&dhdp->tdls_lock, flags);
  8370. if (prev)
  8371. prev->next = cur->next;
  8372. else
  8373. dhd_pub->peer_tbl.node = cur->next;
  8374. MFREE(dhd_pub->osh, cur, sizeof(tdls_peer_node_t));
  8375. dhd_pub->peer_tbl.tdls_peer_count--;
  8376. DHD_TDLS_UNLOCK(&dhdp->tdls_lock, flags);
  8377. return BCME_OK;
  8378. }
  8379. prev = cur;
  8380. cur = cur->next;
  8381. }
  8382. DHD_ERROR(("%s: TDLS Peer Entry Not found\n", __FUNCTION__));
  8383. }
  8384. return BCME_OK;
  8385. }
  8386. #endif /* PCIE_FULL_DONGLE */
  8387. #endif // endif
  8388. bool dhd_is_concurrent_mode(dhd_pub_t *dhd)
  8389. {
  8390. if (!dhd)
  8391. return FALSE;
  8392. if (dhd->op_mode & DHD_FLAG_CONCURR_MULTI_CHAN_MODE)
  8393. return TRUE;
  8394. else if ((dhd->op_mode & DHD_FLAG_CONCURR_SINGLE_CHAN_MODE) ==
  8395. DHD_FLAG_CONCURR_SINGLE_CHAN_MODE)
  8396. return TRUE;
  8397. else
  8398. return FALSE;
  8399. }
  8400. #if defined(OEM_ANDROID) && !defined(AP) && defined(WLP2P)
  8401. /* From Android JerryBean release, the concurrent mode is enabled by default and the firmware
  8402. * name would be fw_bcmdhd.bin. So we need to determine whether P2P is enabled in the STA
  8403. * firmware and accordingly enable concurrent mode (Apply P2P settings). SoftAP firmware
  8404. * would still be named as fw_bcmdhd_apsta.
  8405. */
  8406. uint32
  8407. dhd_get_concurrent_capabilites(dhd_pub_t *dhd)
  8408. {
  8409. int32 ret = 0;
  8410. char buf[WLC_IOCTL_SMLEN];
  8411. bool mchan_supported = FALSE;
  8412. /* if dhd->op_mode is already set for HOSTAP and Manufacturing
  8413. * test mode, that means we only will use the mode as it is
  8414. */
  8415. if (dhd->op_mode & (DHD_FLAG_HOSTAP_MODE | DHD_FLAG_MFG_MODE))
  8416. return 0;
  8417. if (FW_SUPPORTED(dhd, vsdb)) {
  8418. mchan_supported = TRUE;
  8419. }
  8420. if (!FW_SUPPORTED(dhd, p2p)) {
  8421. DHD_TRACE(("Chip does not support p2p\n"));
  8422. return 0;
  8423. } else {
  8424. /* Chip supports p2p but ensure that p2p is really implemented in firmware or not */
  8425. memset(buf, 0, sizeof(buf));
  8426. ret = dhd_iovar(dhd, 0, "p2p", NULL, 0, (char *)&buf,
  8427. sizeof(buf), FALSE);
  8428. if (ret < 0) {
  8429. DHD_ERROR(("%s: Get P2P failed (error=%d)\n", __FUNCTION__, ret));
  8430. return 0;
  8431. } else {
  8432. if (buf[0] == 1) {
  8433. /* By default, chip supports single chan concurrency,
  8434. * now lets check for mchan
  8435. */
  8436. ret = DHD_FLAG_CONCURR_SINGLE_CHAN_MODE;
  8437. if (mchan_supported)
  8438. ret |= DHD_FLAG_CONCURR_MULTI_CHAN_MODE;
  8439. if (FW_SUPPORTED(dhd, rsdb)) {
  8440. ret |= DHD_FLAG_RSDB_MODE;
  8441. }
  8442. #ifdef WL_SUPPORT_MULTIP2P
  8443. if (FW_SUPPORTED(dhd, mp2p)) {
  8444. ret |= DHD_FLAG_MP2P_MODE;
  8445. }
  8446. #endif /* WL_SUPPORT_MULTIP2P */
  8447. #if defined(WL_ENABLE_P2P_IF) || defined(WL_CFG80211_P2P_DEV_IF)
  8448. return ret;
  8449. #else
  8450. return 0;
  8451. #endif /* WL_ENABLE_P2P_IF || WL_CFG80211_P2P_DEV_IF */
  8452. }
  8453. }
  8454. }
  8455. return 0;
  8456. }
  8457. #endif /* defined(OEM_ANDROID) && !defined(AP) && defined(WLP2P) */
  8458. #ifdef WLAIBSS
  8459. int
  8460. dhd_preinit_aibss_ioctls(dhd_pub_t *dhd, char *iov_buf_smlen)
  8461. {
  8462. int ret = BCME_OK;
  8463. aibss_bcn_force_config_t bcn_config;
  8464. uint32 aibss;
  8465. #ifdef WLAIBSS_PS
  8466. uint32 aibss_ps;
  8467. s32 atim;
  8468. #endif /* WLAIBSS_PS */
  8469. int ibss_coalesce;
  8470. aibss = 1;
  8471. ret = dhd_iovar(dhd, 0, "aibss", (char *)&aibss, sizeof(aibss), NULL, 0, TRUE);
  8472. if (ret < 0) {
  8473. if (ret == BCME_UNSUPPORTED) {
  8474. DHD_ERROR(("%s aibss is not supported\n",
  8475. __FUNCTION__));
  8476. return BCME_OK;
  8477. } else {
  8478. DHD_ERROR(("%s Set aibss to %d failed %d\n",
  8479. __FUNCTION__, aibss, ret));
  8480. return ret;
  8481. }
  8482. }
  8483. #ifdef WLAIBSS_PS
  8484. aibss_ps = 1;
  8485. ret = dhd_iovar(dhd, 0, "aibss_ps", (char *)&aibss_ps, sizeof(aibss_ps), NULL, 0, TRUE);
  8486. if (ret < 0) {
  8487. DHD_ERROR(("%s Set aibss PS to %d failed %d\n",
  8488. __FUNCTION__, aibss, ret));
  8489. return ret;
  8490. }
  8491. atim = 10;
  8492. if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_ATIM,
  8493. (char *)&atim, sizeof(atim), TRUE, 0)) < 0) {
  8494. DHD_ERROR(("%s Enable custom IBSS ATIM mode failed %d\n",
  8495. __FUNCTION__, ret));
  8496. return ret;
  8497. }
  8498. #endif /* WLAIBSS_PS */
  8499. memset(&bcn_config, 0, sizeof(bcn_config));
  8500. bcn_config.initial_min_bcn_dur = AIBSS_INITIAL_MIN_BCN_DUR;
  8501. bcn_config.min_bcn_dur = AIBSS_MIN_BCN_DUR;
  8502. bcn_config.bcn_flood_dur = AIBSS_BCN_FLOOD_DUR;
  8503. bcn_config.version = AIBSS_BCN_FORCE_CONFIG_VER_0;
  8504. bcn_config.len = sizeof(bcn_config);
  8505. ret = dhd_iovar(dhd, 0, "aibss_bcn_force_config", (char *)&bcn_config,
  8506. sizeof(aibss_bcn_force_config_t), NULL, 0, TRUE);
  8507. if (ret < 0) {
  8508. DHD_ERROR(("%s Set aibss_bcn_force_config to %d, %d, %d failed %d\n",
  8509. __FUNCTION__, AIBSS_INITIAL_MIN_BCN_DUR, AIBSS_MIN_BCN_DUR,
  8510. AIBSS_BCN_FLOOD_DUR, ret));
  8511. return ret;
  8512. }
  8513. ibss_coalesce = IBSS_COALESCE_DEFAULT;
  8514. ret = dhd_iovar(dhd, 0, "ibss_coalesce_allowed", (char *)&ibss_coalesce,
  8515. sizeof(ibss_coalesce), NULL, 0, TRUE);
  8516. if (ret < 0) {
  8517. DHD_ERROR(("%s Set ibss_coalesce_allowed failed %d\n",
  8518. __FUNCTION__, ret));
  8519. return ret;
  8520. }
  8521. dhd->op_mode |= DHD_FLAG_IBSS_MODE;
  8522. return BCME_OK;
  8523. }
  8524. #endif /* WLAIBSS */
  8525. #if defined(WLADPS) || defined(WLADPS_PRIVATE_CMD)
  8526. #ifdef WL_BAM
  8527. static int
  8528. dhd_check_adps_bad_ap(dhd_pub_t *dhd)
  8529. {
  8530. struct net_device *ndev;
  8531. struct bcm_cfg80211 *cfg;
  8532. struct wl_profile *profile;
  8533. struct ether_addr bssid;
  8534. if (!dhd_is_associated(dhd, 0, NULL)) {
  8535. DHD_ERROR(("%s - not associated\n", __FUNCTION__));
  8536. return BCME_OK;
  8537. }
  8538. ndev = dhd_linux_get_primary_netdev(dhd);
  8539. if (!ndev) {
  8540. DHD_ERROR(("%s: Cannot find primary netdev\n", __FUNCTION__));
  8541. return -ENODEV;
  8542. }
  8543. cfg = wl_get_cfg(ndev);
  8544. if (!cfg) {
  8545. DHD_ERROR(("%s: Cannot find cfg\n", __FUNCTION__));
  8546. return -EINVAL;
  8547. }
  8548. profile = wl_get_profile_by_netdev(cfg, ndev);
  8549. memcpy(bssid.octet, profile->bssid, ETHER_ADDR_LEN);
  8550. if (wl_adps_bad_ap_check(cfg, &bssid)) {
  8551. if (wl_adps_enabled(cfg, ndev)) {
  8552. wl_adps_set_suspend(cfg, ndev, ADPS_SUSPEND);
  8553. }
  8554. }
  8555. return BCME_OK;
  8556. }
  8557. #endif /* WL_BAM */
  8558. int
  8559. dhd_enable_adps(dhd_pub_t *dhd, uint8 on)
  8560. {
  8561. int i;
  8562. int len;
  8563. int ret = BCME_OK;
  8564. bcm_iov_buf_t *iov_buf = NULL;
  8565. wl_adps_params_v1_t *data = NULL;
  8566. len = OFFSETOF(bcm_iov_buf_t, data) + sizeof(*data);
  8567. iov_buf = MALLOC(dhd->osh, len);
  8568. if (iov_buf == NULL) {
  8569. DHD_ERROR(("%s - failed to allocate %d bytes for iov_buf\n", __FUNCTION__, len));
  8570. ret = BCME_NOMEM;
  8571. goto exit;
  8572. }
  8573. iov_buf->version = WL_ADPS_IOV_VER;
  8574. iov_buf->len = sizeof(*data);
  8575. iov_buf->id = WL_ADPS_IOV_MODE;
  8576. data = (wl_adps_params_v1_t *)iov_buf->data;
  8577. data->version = ADPS_SUB_IOV_VERSION_1;
  8578. data->length = sizeof(*data);
  8579. data->mode = on;
  8580. for (i = 1; i <= MAX_BANDS; i++) {
  8581. data->band = i;
  8582. ret = dhd_iovar(dhd, 0, "adps", (char *)iov_buf, len, NULL, 0, TRUE);
  8583. if (ret < 0) {
  8584. if (ret == BCME_UNSUPPORTED) {
  8585. DHD_ERROR(("%s adps is not supported\n", __FUNCTION__));
  8586. ret = BCME_OK;
  8587. goto exit;
  8588. }
  8589. else {
  8590. DHD_ERROR(("%s fail to set adps %s for band %d (%d)\n",
  8591. __FUNCTION__, on ? "On" : "Off", i, ret));
  8592. goto exit;
  8593. }
  8594. }
  8595. }
  8596. #ifdef WL_BAM
  8597. if (on) {
  8598. dhd_check_adps_bad_ap(dhd);
  8599. }
  8600. #endif /* WL_BAM */
  8601. exit:
  8602. if (iov_buf) {
  8603. MFREE(dhd->osh, iov_buf, len);
  8604. iov_buf = NULL;
  8605. }
  8606. return ret;
  8607. }
  8608. #endif /* WLADPS || WLADPS_PRIVATE_CMD */
  8609. int
  8610. dhd_preinit_ioctls(dhd_pub_t *dhd)
  8611. {
  8612. int ret = 0;
  8613. char eventmask[WL_EVENTING_MASK_LEN];
  8614. char iovbuf[WL_EVENTING_MASK_LEN + 12]; /* Room for "event_msgs" + '\0' + bitvec */
  8615. uint32 buf_key_b4_m4 = 1;
  8616. uint8 msglen;
  8617. eventmsgs_ext_t *eventmask_msg = NULL;
  8618. uint32 event_log_max_sets = 0;
  8619. char* iov_buf = NULL;
  8620. int ret2 = 0;
  8621. uint32 wnm_cap = 0;
  8622. #if defined(BCMSUP_4WAY_HANDSHAKE)
  8623. uint32 sup_wpa = 1;
  8624. #endif /* BCMSUP_4WAY_HANDSHAKE */
  8625. #if defined(CUSTOM_AMPDU_BA_WSIZE) || (defined(WLAIBSS) && \
  8626. defined(CUSTOM_IBSS_AMPDU_BA_WSIZE))
  8627. uint32 ampdu_ba_wsize = 0;
  8628. #endif /* CUSTOM_AMPDU_BA_WSIZE ||(WLAIBSS && CUSTOM_IBSS_AMPDU_BA_WSIZE) */
  8629. #if defined(CUSTOM_AMPDU_MPDU)
  8630. int32 ampdu_mpdu = 0;
  8631. #endif // endif
  8632. #if defined(CUSTOM_AMPDU_RELEASE)
  8633. int32 ampdu_release = 0;
  8634. #endif // endif
  8635. #if defined(CUSTOM_AMSDU_AGGSF)
  8636. int32 amsdu_aggsf = 0;
  8637. #endif // endif
  8638. #if defined(BCMSDIO)
  8639. #ifdef PROP_TXSTATUS
  8640. int wlfc_enable = TRUE;
  8641. #ifndef DISABLE_11N
  8642. uint32 hostreorder = 1;
  8643. uint chipid = 0;
  8644. #endif /* DISABLE_11N */
  8645. #endif /* PROP_TXSTATUS */
  8646. #endif // endif
  8647. #ifndef PCIE_FULL_DONGLE
  8648. uint32 wl_ap_isolate;
  8649. #endif /* PCIE_FULL_DONGLE */
  8650. uint32 frameburst = CUSTOM_FRAMEBURST_SET;
  8651. uint wnm_bsstrans_resp = 0;
  8652. #ifdef SUPPORT_SET_CAC
  8653. uint32 cac = 1;
  8654. #endif /* SUPPORT_SET_CAC */
  8655. #ifdef DHD_BUS_MEM_ACCESS
  8656. uint32 enable_memuse = 1;
  8657. #endif /* DHD_BUS_MEM_ACCESS */
  8658. #ifdef OEM_ANDROID
  8659. #ifdef DHD_ENABLE_LPC
  8660. uint32 lpc = 1;
  8661. #endif /* DHD_ENABLE_LPC */
  8662. uint power_mode = PM_FAST;
  8663. #if defined(BCMSDIO)
  8664. uint32 dongle_align = DHD_SDALIGN;
  8665. uint32 glom = CUSTOM_GLOM_SETTING;
  8666. #endif /* defined(BCMSDIO) */
  8667. #if (defined(CUSTOMER_HW2) || defined(BOARD_HIKEY)) && defined(USE_WL_CREDALL)
  8668. uint32 credall = 1;
  8669. #endif // endif
  8670. uint bcn_timeout = CUSTOM_BCN_TIMEOUT;
  8671. uint scancache_enab = TRUE;
  8672. #ifdef ENABLE_BCN_LI_BCN_WAKEUP
  8673. uint32 bcn_li_bcn = 1;
  8674. #endif /* ENABLE_BCN_LI_BCN_WAKEUP */
  8675. uint retry_max = CUSTOM_ASSOC_RETRY_MAX;
  8676. #if defined(ARP_OFFLOAD_SUPPORT)
  8677. int arpoe = 0;
  8678. #endif // endif
  8679. int scan_assoc_time = DHD_SCAN_ASSOC_ACTIVE_TIME;
  8680. int scan_unassoc_time = DHD_SCAN_UNASSOC_ACTIVE_TIME;
  8681. int scan_passive_time = DHD_SCAN_PASSIVE_TIME;
  8682. char buf[WLC_IOCTL_SMLEN];
  8683. char *ptr;
  8684. uint32 listen_interval = CUSTOM_LISTEN_INTERVAL; /* Default Listen Interval in Beacons */
  8685. #if defined(DHD_8021X_DUMP) && defined(SHOW_LOGTRACE)
  8686. wl_el_tag_params_t *el_tag = NULL;
  8687. #endif /* DHD_8021X_DUMP */
  8688. #ifdef ROAM_ENABLE
  8689. uint roamvar = 0;
  8690. int roam_trigger[2] = {CUSTOM_ROAM_TRIGGER_SETTING, WLC_BAND_ALL};
  8691. int roam_scan_period[2] = {10, WLC_BAND_ALL};
  8692. int roam_delta[2] = {CUSTOM_ROAM_DELTA_SETTING, WLC_BAND_ALL};
  8693. #ifdef ROAM_AP_ENV_DETECTION
  8694. int roam_env_mode = AP_ENV_INDETERMINATE;
  8695. #endif /* ROAM_AP_ENV_DETECTION */
  8696. #ifdef FULL_ROAMING_SCAN_PERIOD_60_SEC
  8697. int roam_fullscan_period = 60;
  8698. #else /* FULL_ROAMING_SCAN_PERIOD_60_SEC */
  8699. int roam_fullscan_period = 120;
  8700. #endif /* FULL_ROAMING_SCAN_PERIOD_60_SEC */
  8701. #ifdef DISABLE_BCNLOSS_ROAM
  8702. uint roam_bcnloss_off = 1;
  8703. #endif /* DISABLE_BCNLOSS_ROAM */
  8704. #else
  8705. #ifdef DISABLE_BUILTIN_ROAM
  8706. uint roamvar = 1;
  8707. #endif /* DISABLE_BUILTIN_ROAM */
  8708. #endif /* ROAM_ENABLE */
  8709. #if defined(SOFTAP)
  8710. uint dtim = 1;
  8711. #endif // endif
  8712. #if (defined(AP) && !defined(WLP2P)) || (!defined(AP) && defined(WL_CFG80211))
  8713. struct ether_addr p2p_ea;
  8714. #endif // endif
  8715. #ifdef BCMCCX
  8716. uint32 ccx = 1;
  8717. #endif // endif
  8718. #ifdef SOFTAP_UAPSD_OFF
  8719. uint32 wme_apsd = 0;
  8720. #endif /* SOFTAP_UAPSD_OFF */
  8721. #if (defined(AP) || defined(WLP2P)) && !defined(SOFTAP_AND_GC)
  8722. uint32 apsta = 1; /* Enable APSTA mode */
  8723. #elif defined(SOFTAP_AND_GC)
  8724. uint32 apsta = 0;
  8725. int ap_mode = 1;
  8726. #endif /* (defined(AP) || defined(WLP2P)) && !defined(SOFTAP_AND_GC) */
  8727. #ifdef GET_CUSTOM_MAC_ENABLE
  8728. struct ether_addr ea_addr;
  8729. #endif /* GET_CUSTOM_MAC_ENABLE */
  8730. #ifdef OKC_SUPPORT
  8731. uint32 okc = 1;
  8732. #endif // endif
  8733. #ifdef DISABLE_11N
  8734. uint32 nmode = 0;
  8735. #endif /* DISABLE_11N */
  8736. #ifdef USE_WL_TXBF
  8737. uint32 txbf = 1;
  8738. #endif /* USE_WL_TXBF */
  8739. #ifdef DISABLE_TXBFR
  8740. uint32 txbf_bfr_cap = 0;
  8741. #endif /* DISABLE_TXBFR */
  8742. #ifdef AMPDU_VO_ENABLE
  8743. struct ampdu_tid_control tid;
  8744. #endif // endif
  8745. #if defined(PROP_TXSTATUS)
  8746. #ifdef USE_WFA_CERT_CONF
  8747. uint32 proptx = 0;
  8748. #endif /* USE_WFA_CERT_CONF */
  8749. #endif /* PROP_TXSTATUS */
  8750. #ifdef DHD_SET_FW_HIGHSPEED
  8751. uint32 ack_ratio = 250;
  8752. uint32 ack_ratio_depth = 64;
  8753. #endif /* DHD_SET_FW_HIGHSPEED */
  8754. #if defined(SUPPORT_2G_VHT) || defined(SUPPORT_5G_1024QAM_VHT)
  8755. uint32 vht_features = 0; /* init to 0, will be set based on each support */
  8756. #endif /* SUPPORT_2G_VHT || SUPPORT_5G_1024QAM_VHT */
  8757. #ifdef DISABLE_11N_PROPRIETARY_RATES
  8758. uint32 ht_features = 0;
  8759. #endif /* DISABLE_11N_PROPRIETARY_RATES */
  8760. #ifdef CUSTOM_PSPRETEND_THR
  8761. uint32 pspretend_thr = CUSTOM_PSPRETEND_THR;
  8762. #endif // endif
  8763. #ifdef CUSTOM_EVENT_PM_WAKE
  8764. uint32 pm_awake_thresh = CUSTOM_EVENT_PM_WAKE;
  8765. #endif /* CUSTOM_EVENT_PM_WAKE */
  8766. #ifdef DISABLE_PRUNED_SCAN
  8767. uint32 scan_features = 0;
  8768. #endif /* DISABLE_PRUNED_SCAN */
  8769. #ifdef BCMPCIE_OOB_HOST_WAKE
  8770. uint32 hostwake_oob = 0;
  8771. #endif /* BCMPCIE_OOB_HOST_WAKE */
  8772. #ifdef EVENT_LOG_RATE_HC
  8773. /* threshold number of lines per second */
  8774. #define EVENT_LOG_RATE_HC_THRESHOLD 1000
  8775. uint32 event_log_rate_hc = EVENT_LOG_RATE_HC_THRESHOLD;
  8776. #endif /* EVENT_LOG_RATE_HC */
  8777. #if defined(WBTEXT) && defined(WBTEXT_BTMDELTA)
  8778. uint32 btmdelta = WBTEXT_BTMDELTA;
  8779. #endif /* WBTEXT && WBTEXT_BTMDELTA */
  8780. wl_wlc_version_t wlc_ver;
  8781. #ifdef PKT_FILTER_SUPPORT
  8782. dhd_pkt_filter_enable = TRUE;
  8783. #ifdef APF
  8784. dhd->apf_set = FALSE;
  8785. #endif /* APF */
  8786. #endif /* PKT_FILTER_SUPPORT */
  8787. dhd->suspend_bcn_li_dtim = CUSTOM_SUSPEND_BCN_LI_DTIM;
  8788. #ifdef ENABLE_MAX_DTIM_IN_SUSPEND
  8789. dhd->max_dtim_enable = TRUE;
  8790. #else
  8791. dhd->max_dtim_enable = FALSE;
  8792. #endif /* ENABLE_MAX_DTIM_IN_SUSPEND */
  8793. dhd->disable_dtim_in_suspend = FALSE;
  8794. #ifdef CUSTOM_SET_OCLOFF
  8795. dhd->ocl_off = FALSE;
  8796. #endif /* CUSTOM_SET_OCLOFF */
  8797. #ifdef SUPPORT_SET_TID
  8798. dhd->tid_mode = SET_TID_OFF;
  8799. dhd->target_uid = 0;
  8800. dhd->target_tid = 0;
  8801. #endif /* SUPPORT_SET_TID */
  8802. DHD_TRACE(("Enter %s\n", __FUNCTION__));
  8803. dhd->op_mode = 0;
  8804. #if defined(CUSTOM_COUNTRY_CODE) && (defined(CUSTOMER_HW2) || defined(BOARD_HIKEY))
  8805. /* clear AP flags */
  8806. dhd->dhd_cflags &= ~WLAN_PLAT_AP_FLAG;
  8807. #endif /* CUSTOM_COUNTRY_CODE && (CUSTOMER_HW2 || BOARD_HIKEY) */
  8808. #ifdef CUSTOMER_HW4_DEBUG
  8809. if (!dhd_validate_chipid(dhd)) {
  8810. DHD_ERROR(("%s: CONFIG_BCMXXX and CHIP ID(%x) is mismatched\n",
  8811. __FUNCTION__, dhd_bus_chip_id(dhd)));
  8812. #ifndef SUPPORT_MULTIPLE_CHIPS
  8813. ret = BCME_BADARG;
  8814. goto done;
  8815. #endif /* !SUPPORT_MULTIPLE_CHIPS */
  8816. }
  8817. #endif /* CUSTOMER_HW4_DEBUG */
  8818. /* query for 'ver' to get version info from firmware */
  8819. memset(buf, 0, sizeof(buf));
  8820. ptr = buf;
  8821. ret = dhd_iovar(dhd, 0, "ver", NULL, 0, (char *)&buf, sizeof(buf), FALSE);
  8822. if (ret < 0)
  8823. DHD_ERROR(("%s failed %d\n", __FUNCTION__, ret));
  8824. else {
  8825. bcmstrtok(&ptr, "\n", 0);
  8826. /* Print fw version info */
  8827. DHD_ERROR(("Firmware version = %s\n", buf));
  8828. strncpy(fw_version, buf, FW_VER_STR_LEN);
  8829. fw_version[FW_VER_STR_LEN-1] = '\0';
  8830. #if defined(BCMSDIO) || defined(BCMPCIE)
  8831. dhd_set_version_info(dhd, buf);
  8832. #endif /* BCMSDIO || BCMPCIE */
  8833. }
  8834. #ifdef BOARD_HIKEY
  8835. /* Set op_mode as MFG_MODE if WLTEST is present in "wl ver" */
  8836. if (strstr(fw_version, "WLTEST") != NULL) {
  8837. DHD_ERROR(("%s: wl ver has WLTEST, setting op_mode as DHD_FLAG_MFG_MODE\n",
  8838. __FUNCTION__));
  8839. op_mode = DHD_FLAG_MFG_MODE;
  8840. }
  8841. #endif /* BOARD_HIKEY */
  8842. if ((!op_mode && dhd_get_fw_mode(dhd->info) == DHD_FLAG_MFG_MODE) ||
  8843. (op_mode == DHD_FLAG_MFG_MODE)) {
  8844. dhd->op_mode = DHD_FLAG_MFG_MODE;
  8845. #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
  8846. /* disable runtimePM by default in MFG mode. */
  8847. pm_runtime_disable(dhd_bus_to_dev(dhd->bus));
  8848. #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
  8849. #ifdef DHD_PCIE_RUNTIMEPM
  8850. /* Disable RuntimePM in mfg mode */
  8851. DHD_DISABLE_RUNTIME_PM(dhd);
  8852. DHD_ERROR(("%s : Disable RuntimePM in Manufactring Firmware\n", __FUNCTION__));
  8853. #endif /* DHD_PCIE_RUNTIME_PM */
  8854. /* Check and adjust IOCTL response timeout for Manufactring firmware */
  8855. dhd_os_set_ioctl_resp_timeout(MFG_IOCTL_RESP_TIMEOUT);
  8856. DHD_ERROR(("%s : Set IOCTL response time for Manufactring Firmware\n",
  8857. __FUNCTION__));
  8858. } else {
  8859. dhd_os_set_ioctl_resp_timeout(IOCTL_RESP_TIMEOUT);
  8860. DHD_INFO(("%s : Set IOCTL response time.\n", __FUNCTION__));
  8861. }
  8862. #ifdef BCMPCIE_OOB_HOST_WAKE
  8863. ret = dhd_iovar(dhd, 0, "bus:hostwake_oob", NULL, 0, (char *)&hostwake_oob,
  8864. sizeof(hostwake_oob), FALSE);
  8865. if (ret < 0) {
  8866. DHD_ERROR(("%s: hostwake_oob IOVAR not present, proceed\n", __FUNCTION__));
  8867. } else {
  8868. if (hostwake_oob == 0) {
  8869. DHD_ERROR(("%s: hostwake_oob is not enabled in the NVRAM, STOP\n",
  8870. __FUNCTION__));
  8871. ret = BCME_UNSUPPORTED;
  8872. goto done;
  8873. } else {
  8874. DHD_ERROR(("%s: hostwake_oob enabled\n", __FUNCTION__));
  8875. }
  8876. }
  8877. #endif /* BCMPCIE_OOB_HOST_WAKE */
  8878. #ifdef DNGL_AXI_ERROR_LOGGING
  8879. ret = dhd_iovar(dhd, 0, "axierror_logbuf_addr", NULL, 0, (char *)&dhd->axierror_logbuf_addr,
  8880. sizeof(dhd->axierror_logbuf_addr), FALSE);
  8881. if (ret < 0) {
  8882. DHD_ERROR(("%s: axierror_logbuf_addr IOVAR not present, proceed\n", __FUNCTION__));
  8883. dhd->axierror_logbuf_addr = 0;
  8884. } else {
  8885. DHD_ERROR(("%s: axierror_logbuf_addr : 0x%x\n", __FUNCTION__,
  8886. dhd->axierror_logbuf_addr));
  8887. }
  8888. #endif /* DNGL_AXI_ERROR_LOGGING */
  8889. #ifdef EVENT_LOG_RATE_HC
  8890. ret = dhd_iovar(dhd, 0, "event_log_rate_hc", (char *)&event_log_rate_hc,
  8891. sizeof(event_log_rate_hc), NULL, 0, TRUE);
  8892. if (ret < 0) {
  8893. DHD_ERROR(("%s event_log_rate_hc set failed %d\n", __FUNCTION__, ret));
  8894. } else {
  8895. DHD_ERROR(("%s event_log_rate_hc set with threshold:%d\n", __FUNCTION__,
  8896. event_log_rate_hc));
  8897. }
  8898. #endif /* EVENT_LOG_RATE_HC */
  8899. #ifdef GET_CUSTOM_MAC_ENABLE
  8900. ret = wifi_platform_get_mac_addr(dhd->info->adapter, ea_addr.octet);
  8901. if (!ret) {
  8902. ret = dhd_iovar(dhd, 0, "cur_etheraddr", (char *)&ea_addr, ETHER_ADDR_LEN, NULL, 0,
  8903. TRUE);
  8904. if (ret < 0) {
  8905. DHD_ERROR(("%s: can't set MAC address , error=%d\n", __FUNCTION__, ret));
  8906. ret = BCME_NOTUP;
  8907. goto done;
  8908. }
  8909. memcpy(dhd->mac.octet, ea_addr.octet, ETHER_ADDR_LEN);
  8910. } else {
  8911. #endif /* GET_CUSTOM_MAC_ENABLE */
  8912. /* Get the default device MAC address directly from firmware */
  8913. ret = dhd_iovar(dhd, 0, "cur_etheraddr", NULL, 0, (char *)&buf, sizeof(buf), FALSE);
  8914. if (ret < 0) {
  8915. DHD_ERROR(("%s: can't get MAC address , error=%d\n", __FUNCTION__, ret));
  8916. ret = BCME_NOTUP;
  8917. goto done;
  8918. }
  8919. /* Update public MAC address after reading from Firmware */
  8920. memcpy(dhd->mac.octet, buf, ETHER_ADDR_LEN);
  8921. #ifdef GET_CUSTOM_MAC_ENABLE
  8922. }
  8923. #endif /* GET_CUSTOM_MAC_ENABLE */
  8924. if ((ret = dhd_apply_default_clm(dhd, clm_path)) < 0) {
  8925. DHD_ERROR(("%s: CLM set failed. Abort initialization.\n", __FUNCTION__));
  8926. goto done;
  8927. }
  8928. /* get a capabilities from firmware */
  8929. {
  8930. uint32 cap_buf_size = sizeof(dhd->fw_capabilities);
  8931. memset(dhd->fw_capabilities, 0, cap_buf_size);
  8932. ret = dhd_iovar(dhd, 0, "cap", NULL, 0, dhd->fw_capabilities, (cap_buf_size - 1),
  8933. FALSE);
  8934. if (ret < 0) {
  8935. DHD_ERROR(("%s: Get Capability failed (error=%d)\n",
  8936. __FUNCTION__, ret));
  8937. return 0;
  8938. }
  8939. memmove(&dhd->fw_capabilities[1], dhd->fw_capabilities, (cap_buf_size - 1));
  8940. dhd->fw_capabilities[0] = ' ';
  8941. dhd->fw_capabilities[cap_buf_size - 2] = ' ';
  8942. dhd->fw_capabilities[cap_buf_size - 1] = '\0';
  8943. }
  8944. if ((!op_mode && dhd_get_fw_mode(dhd->info) == DHD_FLAG_HOSTAP_MODE) ||
  8945. (op_mode == DHD_FLAG_HOSTAP_MODE)) {
  8946. #ifdef SET_RANDOM_MAC_SOFTAP
  8947. uint rand_mac;
  8948. #endif /* SET_RANDOM_MAC_SOFTAP */
  8949. dhd->op_mode = DHD_FLAG_HOSTAP_MODE;
  8950. #if defined(ARP_OFFLOAD_SUPPORT)
  8951. arpoe = 0;
  8952. #endif // endif
  8953. #ifdef PKT_FILTER_SUPPORT
  8954. dhd_pkt_filter_enable = FALSE;
  8955. #endif // endif
  8956. #ifdef SET_RANDOM_MAC_SOFTAP
  8957. SRANDOM32((uint)jiffies);
  8958. rand_mac = RANDOM32();
  8959. iovbuf[0] = (unsigned char)(vendor_oui >> 16) | 0x02; /* local admin bit */
  8960. iovbuf[1] = (unsigned char)(vendor_oui >> 8);
  8961. iovbuf[2] = (unsigned char)vendor_oui;
  8962. iovbuf[3] = (unsigned char)(rand_mac & 0x0F) | 0xF0;
  8963. iovbuf[4] = (unsigned char)(rand_mac >> 8);
  8964. iovbuf[5] = (unsigned char)(rand_mac >> 16);
  8965. ret = dhd_iovar(dhd, 0, "cur_etheraddr", (char *)&iovbuf, ETHER_ADDR_LEN, NULL, 0,
  8966. TRUE);
  8967. if (ret < 0) {
  8968. DHD_ERROR(("%s: can't set MAC address , error=%d\n", __FUNCTION__, ret));
  8969. } else
  8970. memcpy(dhd->mac.octet, iovbuf, ETHER_ADDR_LEN);
  8971. #endif /* SET_RANDOM_MAC_SOFTAP */
  8972. #ifdef USE_DYNAMIC_F2_BLKSIZE
  8973. dhdsdio_func_blocksize(dhd, 2, DYNAMIC_F2_BLKSIZE_FOR_NONLEGACY);
  8974. #endif /* USE_DYNAMIC_F2_BLKSIZE */
  8975. #ifdef SOFTAP_UAPSD_OFF
  8976. ret = dhd_iovar(dhd, 0, "wme_apsd", (char *)&wme_apsd, sizeof(wme_apsd), NULL, 0,
  8977. TRUE);
  8978. if (ret < 0) {
  8979. DHD_ERROR(("%s: set wme_apsd 0 fail (error=%d)\n",
  8980. __FUNCTION__, ret));
  8981. }
  8982. #endif /* SOFTAP_UAPSD_OFF */
  8983. #if defined(CUSTOM_COUNTRY_CODE) && (defined(CUSTOMER_HW2) || defined(BOARD_HIKEY))
  8984. /* set AP flag for specific country code of SOFTAP */
  8985. dhd->dhd_cflags |= WLAN_PLAT_AP_FLAG | WLAN_PLAT_NODFS_FLAG;
  8986. #endif /* CUSTOM_COUNTRY_CODE && (CUSTOMER_HW2 || BOARD_HIKEY) */
  8987. } else if ((!op_mode && dhd_get_fw_mode(dhd->info) == DHD_FLAG_MFG_MODE) ||
  8988. (op_mode == DHD_FLAG_MFG_MODE)) {
  8989. #if defined(ARP_OFFLOAD_SUPPORT)
  8990. arpoe = 0;
  8991. #endif /* ARP_OFFLOAD_SUPPORT */
  8992. #ifdef PKT_FILTER_SUPPORT
  8993. dhd_pkt_filter_enable = FALSE;
  8994. #endif /* PKT_FILTER_SUPPORT */
  8995. dhd->op_mode = DHD_FLAG_MFG_MODE;
  8996. #ifdef USE_DYNAMIC_F2_BLKSIZE
  8997. dhdsdio_func_blocksize(dhd, 2, DYNAMIC_F2_BLKSIZE_FOR_NONLEGACY);
  8998. #endif /* USE_DYNAMIC_F2_BLKSIZE */
  8999. #ifndef CUSTOM_SET_ANTNPM
  9000. #ifndef IGUANA_LEGACY_CHIPS
  9001. if (FW_SUPPORTED(dhd, rsdb)) {
  9002. wl_config_t rsdb_mode;
  9003. memset(&rsdb_mode, 0, sizeof(rsdb_mode));
  9004. ret = dhd_iovar(dhd, 0, "rsdb_mode", (char *)&rsdb_mode, sizeof(rsdb_mode),
  9005. NULL, 0, TRUE);
  9006. if (ret < 0) {
  9007. DHD_ERROR(("%s Disable rsdb_mode is failed ret= %d\n",
  9008. __FUNCTION__, ret));
  9009. }
  9010. }
  9011. #endif /* IGUANA_LEGACY_CHIPS */
  9012. #endif /* !CUSTOM_SET_ANTNPM */
  9013. } else {
  9014. uint32 concurrent_mode = 0;
  9015. if ((!op_mode && dhd_get_fw_mode(dhd->info) == DHD_FLAG_P2P_MODE) ||
  9016. (op_mode == DHD_FLAG_P2P_MODE)) {
  9017. #if defined(ARP_OFFLOAD_SUPPORT)
  9018. arpoe = 0;
  9019. #endif // endif
  9020. #ifdef PKT_FILTER_SUPPORT
  9021. dhd_pkt_filter_enable = FALSE;
  9022. #endif // endif
  9023. dhd->op_mode = DHD_FLAG_P2P_MODE;
  9024. } else if ((!op_mode && dhd_get_fw_mode(dhd->info) == DHD_FLAG_IBSS_MODE) ||
  9025. (op_mode == DHD_FLAG_IBSS_MODE)) {
  9026. dhd->op_mode = DHD_FLAG_IBSS_MODE;
  9027. } else
  9028. dhd->op_mode = DHD_FLAG_STA_MODE;
  9029. #if defined(OEM_ANDROID) && !defined(AP) && defined(WLP2P)
  9030. if (dhd->op_mode != DHD_FLAG_IBSS_MODE &&
  9031. (concurrent_mode = dhd_get_concurrent_capabilites(dhd))) {
  9032. #if defined(ARP_OFFLOAD_SUPPORT)
  9033. arpoe = 1;
  9034. #endif // endif
  9035. dhd->op_mode |= concurrent_mode;
  9036. }
  9037. /* Check if we are enabling p2p */
  9038. if (dhd->op_mode & DHD_FLAG_P2P_MODE) {
  9039. ret = dhd_iovar(dhd, 0, "apsta", (char *)&apsta, sizeof(apsta), NULL, 0,
  9040. TRUE);
  9041. if (ret < 0)
  9042. DHD_ERROR(("%s APSTA for P2P failed ret= %d\n", __FUNCTION__, ret));
  9043. #if defined(SOFTAP_AND_GC)
  9044. if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_AP,
  9045. (char *)&ap_mode, sizeof(ap_mode), TRUE, 0)) < 0) {
  9046. DHD_ERROR(("%s WLC_SET_AP failed %d\n", __FUNCTION__, ret));
  9047. }
  9048. #endif // endif
  9049. memcpy(&p2p_ea, &dhd->mac, ETHER_ADDR_LEN);
  9050. ETHER_SET_LOCALADDR(&p2p_ea);
  9051. ret = dhd_iovar(dhd, 0, "p2p_da_override", (char *)&p2p_ea, sizeof(p2p_ea),
  9052. NULL, 0, TRUE);
  9053. if (ret < 0)
  9054. DHD_ERROR(("%s p2p_da_override ret= %d\n", __FUNCTION__, ret));
  9055. else
  9056. DHD_INFO(("dhd_preinit_ioctls: p2p_da_override succeeded\n"));
  9057. }
  9058. #else
  9059. (void)concurrent_mode;
  9060. #endif /* defined(OEM_ANDROID) && !defined(AP) && defined(WLP2P) */
  9061. }
  9062. #ifdef DISABLE_PRUNED_SCAN
  9063. if (FW_SUPPORTED(dhd, rsdb)) {
  9064. ret = dhd_iovar(dhd, 0, "scan_features", (char *)&scan_features,
  9065. sizeof(scan_features), iovbuf, sizeof(iovbuf), FALSE);
  9066. if (ret < 0) {
  9067. DHD_ERROR(("%s get scan_features is failed ret=%d\n",
  9068. __FUNCTION__, ret));
  9069. } else {
  9070. memcpy(&scan_features, iovbuf, 4);
  9071. scan_features &= ~RSDB_SCAN_DOWNGRADED_CH_PRUNE_ROAM;
  9072. ret = dhd_iovar(dhd, 0, "scan_features", (char *)&scan_features,
  9073. sizeof(scan_features), NULL, 0, TRUE);
  9074. if (ret < 0) {
  9075. DHD_ERROR(("%s set scan_features is failed ret=%d\n",
  9076. __FUNCTION__, ret));
  9077. }
  9078. }
  9079. }
  9080. #endif /* DISABLE_PRUNED_SCAN */
  9081. DHD_ERROR(("Firmware up: op_mode=0x%04x, MAC="MACDBG"\n",
  9082. dhd->op_mode, MAC2STRDBG(dhd->mac.octet)));
  9083. #if defined(CUSTOMER_HW2) || defined(BOARD_HIKEY)
  9084. #if defined(DHD_BLOB_EXISTENCE_CHECK)
  9085. if (!dhd->is_blob)
  9086. #endif /* DHD_BLOB_EXISTENCE_CHECK */
  9087. {
  9088. /* get a ccode and revision for the country code */
  9089. #if defined(CUSTOM_COUNTRY_CODE)
  9090. get_customized_country_code(dhd->info->adapter, dhd->dhd_cspec.country_abbrev,
  9091. &dhd->dhd_cspec, dhd->dhd_cflags);
  9092. #else
  9093. get_customized_country_code(dhd->info->adapter, dhd->dhd_cspec.country_abbrev,
  9094. &dhd->dhd_cspec);
  9095. #endif /* CUSTOM_COUNTRY_CODE */
  9096. }
  9097. #endif /* CUSTOMER_HW2 || BOARD_HIKEY */
  9098. #if defined(RXFRAME_THREAD) && defined(RXTHREAD_ONLYSTA)
  9099. if (dhd->op_mode == DHD_FLAG_HOSTAP_MODE)
  9100. dhd->info->rxthread_enabled = FALSE;
  9101. else
  9102. dhd->info->rxthread_enabled = TRUE;
  9103. #endif // endif
  9104. /* Set Country code */
  9105. if (dhd->dhd_cspec.ccode[0] != 0) {
  9106. ret = dhd_iovar(dhd, 0, "country", (char *)&dhd->dhd_cspec, sizeof(wl_country_t),
  9107. NULL, 0, TRUE);
  9108. if (ret < 0)
  9109. DHD_ERROR(("%s: country code setting failed\n", __FUNCTION__));
  9110. }
  9111. /* Set Listen Interval */
  9112. ret = dhd_iovar(dhd, 0, "assoc_listen", (char *)&listen_interval, sizeof(listen_interval),
  9113. NULL, 0, TRUE);
  9114. if (ret < 0)
  9115. DHD_ERROR(("%s assoc_listen failed %d\n", __FUNCTION__, ret));
  9116. #if defined(ROAM_ENABLE) || defined(DISABLE_BUILTIN_ROAM)
  9117. #ifdef USE_WFA_CERT_CONF
  9118. if (sec_get_param_wfa_cert(dhd, SET_PARAM_ROAMOFF, &roamvar) == BCME_OK) {
  9119. DHD_ERROR(("%s: read roam_off param =%d\n", __FUNCTION__, roamvar));
  9120. }
  9121. #endif /* USE_WFA_CERT_CONF */
  9122. /* Disable built-in roaming to allowed ext supplicant to take care of roaming */
  9123. ret = dhd_iovar(dhd, 0, "roam_off", (char *)&roamvar, sizeof(roamvar), NULL, 0, TRUE);
  9124. #endif /* ROAM_ENABLE || DISABLE_BUILTIN_ROAM */
  9125. #if defined(ROAM_ENABLE)
  9126. #ifdef DISABLE_BCNLOSS_ROAM
  9127. ret = dhd_iovar(dhd, 0, "roam_bcnloss_off", (char *)&roam_bcnloss_off,
  9128. sizeof(roam_bcnloss_off), NULL, 0, TRUE);
  9129. #endif /* DISABLE_BCNLOSS_ROAM */
  9130. if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_ROAM_TRIGGER, roam_trigger,
  9131. sizeof(roam_trigger), TRUE, 0)) < 0)
  9132. DHD_ERROR(("%s: roam trigger set failed %d\n", __FUNCTION__, ret));
  9133. if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_ROAM_SCAN_PERIOD, roam_scan_period,
  9134. sizeof(roam_scan_period), TRUE, 0)) < 0)
  9135. DHD_ERROR(("%s: roam scan period set failed %d\n", __FUNCTION__, ret));
  9136. if ((dhd_wl_ioctl_cmd(dhd, WLC_SET_ROAM_DELTA, roam_delta,
  9137. sizeof(roam_delta), TRUE, 0)) < 0)
  9138. DHD_ERROR(("%s: roam delta set failed %d\n", __FUNCTION__, ret));
  9139. ret = dhd_iovar(dhd, 0, "fullroamperiod", (char *)&roam_fullscan_period,
  9140. sizeof(roam_fullscan_period), NULL, 0, TRUE);
  9141. if (ret < 0)
  9142. DHD_ERROR(("%s: roam fullscan period set failed %d\n", __FUNCTION__, ret));
  9143. #ifdef ROAM_AP_ENV_DETECTION
  9144. if (roam_trigger[0] == WL_AUTO_ROAM_TRIGGER) {
  9145. if (dhd_iovar(dhd, 0, "roam_env_detection", (char *)&roam_env_mode,
  9146. sizeof(roam_env_mode), NULL, 0, TRUE) == BCME_OK)
  9147. dhd->roam_env_detection = TRUE;
  9148. else
  9149. dhd->roam_env_detection = FALSE;
  9150. }
  9151. #endif /* ROAM_AP_ENV_DETECTION */
  9152. #endif /* ROAM_ENABLE */
  9153. #ifdef CUSTOM_EVENT_PM_WAKE
  9154. ret = dhd_iovar(dhd, 0, "const_awake_thresh", (char *)&pm_awake_thresh,
  9155. sizeof(pm_awake_thresh), NULL, 0, TRUE);
  9156. if (ret < 0) {
  9157. DHD_ERROR(("%s set const_awake_thresh failed %d\n", __FUNCTION__, ret));
  9158. }
  9159. #endif /* CUSTOM_EVENT_PM_WAKE */
  9160. #ifdef OKC_SUPPORT
  9161. ret = dhd_iovar(dhd, 0, "okc_enable", (char *)&okc, sizeof(okc), NULL, 0, TRUE);
  9162. #endif // endif
  9163. #ifdef BCMCCX
  9164. ret = dhd_iovar(dhd, 0, "ccx_enable", (char *)&ccx, sizeof(ccx), NULL, 0, TRUE);
  9165. #endif /* BCMCCX */
  9166. #ifdef WLTDLS
  9167. dhd->tdls_enable = FALSE;
  9168. dhd_tdls_set_mode(dhd, false);
  9169. #endif /* WLTDLS */
  9170. #ifdef DHD_ENABLE_LPC
  9171. /* Set lpc 1 */
  9172. ret = dhd_iovar(dhd, 0, "lpc", (char *)&lpc, sizeof(lpc), NULL, 0, TRUE);
  9173. if (ret < 0) {
  9174. DHD_ERROR(("%s Set lpc failed %d\n", __FUNCTION__, ret));
  9175. if (ret == BCME_NOTDOWN) {
  9176. uint wl_down = 1;
  9177. ret = dhd_wl_ioctl_cmd(dhd, WLC_DOWN,
  9178. (char *)&wl_down, sizeof(wl_down), TRUE, 0);
  9179. DHD_ERROR(("%s lpc fail WL_DOWN : %d, lpc = %d\n", __FUNCTION__, ret, lpc));
  9180. ret = dhd_iovar(dhd, 0, "lpc", (char *)&lpc, sizeof(lpc), NULL, 0, TRUE);
  9181. DHD_ERROR(("%s Set lpc ret --> %d\n", __FUNCTION__, ret));
  9182. }
  9183. }
  9184. #endif /* DHD_ENABLE_LPC */
  9185. #ifdef WLADPS
  9186. if (dhd->op_mode & DHD_FLAG_STA_MODE) {
  9187. if ((ret = dhd_enable_adps(dhd, ADPS_ENABLE)) != BCME_OK) {
  9188. DHD_ERROR(("%s dhd_enable_adps failed %d\n",
  9189. __FUNCTION__, ret));
  9190. }
  9191. }
  9192. #endif /* WLADPS */
  9193. #ifdef DHD_PM_CONTROL_FROM_FILE
  9194. sec_control_pm(dhd, &power_mode);
  9195. #else
  9196. /* Set PowerSave mode */
  9197. (void) dhd_wl_ioctl_cmd(dhd, WLC_SET_PM, (char *)&power_mode, sizeof(power_mode), TRUE, 0);
  9198. #endif /* DHD_PM_CONTROL_FROM_FILE */
  9199. #if defined(BCMSDIO)
  9200. /* Match Host and Dongle rx alignment */
  9201. ret = dhd_iovar(dhd, 0, "bus:txglomalign", (char *)&dongle_align, sizeof(dongle_align),
  9202. NULL, 0, TRUE);
  9203. #if (defined(CUSTOMER_HW2) || defined(BOARD_HIKEY))&& defined(USE_WL_CREDALL)
  9204. /* enable credall to reduce the chance of no bus credit happened. */
  9205. ret = dhd_iovar(dhd, 0, "bus:credall", (char *)&credall, sizeof(credall), NULL, 0, TRUE);
  9206. #endif // endif
  9207. #ifdef USE_WFA_CERT_CONF
  9208. if (sec_get_param_wfa_cert(dhd, SET_PARAM_BUS_TXGLOM_MODE, &glom) == BCME_OK) {
  9209. DHD_ERROR(("%s, read txglom param =%d\n", __FUNCTION__, glom));
  9210. }
  9211. #endif /* USE_WFA_CERT_CONF */
  9212. if (glom != DEFAULT_GLOM_VALUE) {
  9213. DHD_INFO(("%s set glom=0x%X\n", __FUNCTION__, glom));
  9214. ret = dhd_iovar(dhd, 0, "bus:txglom", (char *)&glom, sizeof(glom), NULL, 0, TRUE);
  9215. }
  9216. #endif /* defined(BCMSDIO) */
  9217. /* Setup timeout if Beacons are lost and roam is off to report link down */
  9218. ret = dhd_iovar(dhd, 0, "bcn_timeout", (char *)&bcn_timeout, sizeof(bcn_timeout), NULL, 0,
  9219. TRUE);
  9220. /* Setup assoc_retry_max count to reconnect target AP in dongle */
  9221. ret = dhd_iovar(dhd, 0, "assoc_retry_max", (char *)&retry_max, sizeof(retry_max), NULL, 0,
  9222. TRUE);
  9223. #if defined(AP) && !defined(WLP2P)
  9224. ret = dhd_iovar(dhd, 0, "apsta", (char *)&apsta, sizeof(apsta), NULL, 0, TRUE);
  9225. #endif /* defined(AP) && !defined(WLP2P) */
  9226. #ifdef MIMO_ANT_SETTING
  9227. dhd_sel_ant_from_file(dhd);
  9228. #endif /* MIMO_ANT_SETTING */
  9229. #if defined(OEM_ANDROID) && defined(SOFTAP)
  9230. if (ap_fw_loaded == TRUE) {
  9231. dhd_wl_ioctl_cmd(dhd, WLC_SET_DTIMPRD, (char *)&dtim, sizeof(dtim), TRUE, 0);
  9232. }
  9233. #endif /* defined(OEM_ANDROID) && defined(SOFTAP) */
  9234. #if defined(KEEP_ALIVE)
  9235. {
  9236. /* Set Keep Alive : be sure to use FW with -keepalive */
  9237. int res;
  9238. #if defined(OEM_ANDROID) && defined(SOFTAP)
  9239. if (ap_fw_loaded == FALSE)
  9240. #endif /* defined(OEM_ANDROID) && defined(SOFTAP) */
  9241. if (!(dhd->op_mode &
  9242. (DHD_FLAG_HOSTAP_MODE | DHD_FLAG_MFG_MODE))) {
  9243. if ((res = dhd_keep_alive_onoff(dhd)) < 0)
  9244. DHD_ERROR(("%s set keeplive failed %d\n",
  9245. __FUNCTION__, res));
  9246. }
  9247. }
  9248. #endif /* defined(KEEP_ALIVE) */
  9249. #ifdef USE_WL_TXBF
  9250. ret = dhd_iovar(dhd, 0, "txbf", (char *)&txbf, sizeof(txbf), NULL, 0, TRUE);
  9251. if (ret < 0)
  9252. DHD_ERROR(("%s Set txbf failed %d\n", __FUNCTION__, ret));
  9253. #endif /* USE_WL_TXBF */
  9254. ret = dhd_iovar(dhd, 0, "scancache", (char *)&scancache_enab, sizeof(scancache_enab), NULL,
  9255. 0, TRUE);
  9256. if (ret < 0) {
  9257. DHD_ERROR(("%s Set scancache failed %d\n", __FUNCTION__, ret));
  9258. }
  9259. #else /* OEM_ANDROID */
  9260. if ((ret = dhd_apply_default_clm(dhd, clm_path)) < 0) {
  9261. DHD_ERROR(("%s: CLM set failed. Abort initialization.\n", __FUNCTION__));
  9262. goto done;
  9263. }
  9264. #if defined(KEEP_ALIVE)
  9265. if (!(dhd->op_mode &
  9266. (DHD_FLAG_HOSTAP_MODE | DHD_FLAG_MFG_MODE))) {
  9267. if ((ret = dhd_keep_alive_onoff(dhd)) < 0)
  9268. DHD_ERROR(("%s set keeplive failed %d\n",
  9269. __FUNCTION__, ret));
  9270. }
  9271. #endif // endif
  9272. /* get a capabilities from firmware */
  9273. memset(dhd->fw_capabilities, 0, sizeof(dhd->fw_capabilities));
  9274. ret = dhd_iovar(dhd, 0, "cap", NULL, 0, dhd->fw_capabilities, sizeof(dhd->fw_capabilities),
  9275. FALSE);
  9276. if (ret < 0) {
  9277. DHD_ERROR(("%s: Get Capability failed (error=%d)\n",
  9278. __FUNCTION__, ret));
  9279. goto done;
  9280. }
  9281. #endif /* OEM_ANDROID */
  9282. ret = dhd_iovar(dhd, 0, "event_log_max_sets", NULL, 0, (char *)&event_log_max_sets,
  9283. sizeof(event_log_max_sets), FALSE);
  9284. if (ret == BCME_OK) {
  9285. dhd->event_log_max_sets = event_log_max_sets;
  9286. } else {
  9287. dhd->event_log_max_sets = NUM_EVENT_LOG_SETS;
  9288. }
  9289. /* Make sure max_sets is set first with wmb and then sets_queried,
  9290. * this will be used during parsing the logsets in the reverse order.
  9291. */
  9292. OSL_SMP_WMB();
  9293. dhd->event_log_max_sets_queried = TRUE;
  9294. DHD_ERROR(("%s: event_log_max_sets: %d ret: %d\n",
  9295. __FUNCTION__, dhd->event_log_max_sets, ret));
  9296. #ifdef DHD_BUS_MEM_ACCESS
  9297. ret = dhd_iovar(dhd, 0, "enable_memuse", (char *)&enable_memuse,
  9298. sizeof(enable_memuse), iovbuf, sizeof(iovbuf), FALSE);
  9299. if (ret < 0) {
  9300. DHD_ERROR(("%s: enable_memuse is failed ret=%d\n",
  9301. __FUNCTION__, ret));
  9302. } else {
  9303. DHD_ERROR(("%s: enable_memuse = %d\n",
  9304. __FUNCTION__, enable_memuse));
  9305. }
  9306. #endif /* DHD_BUS_MEM_ACCESS */
  9307. #ifdef DISABLE_TXBFR
  9308. ret = dhd_iovar(dhd, 0, "txbf_bfr_cap", (char *)&txbf_bfr_cap, sizeof(txbf_bfr_cap), NULL,
  9309. 0, TRUE);
  9310. if (ret < 0) {
  9311. DHD_ERROR(("%s Clear txbf_bfr_cap failed %d\n", __FUNCTION__, ret));
  9312. }
  9313. #endif /* DISABLE_TXBFR */
  9314. #ifdef USE_WFA_CERT_CONF
  9315. #ifdef USE_WL_FRAMEBURST
  9316. if (sec_get_param_wfa_cert(dhd, SET_PARAM_FRAMEBURST, &frameburst) == BCME_OK) {
  9317. DHD_ERROR(("%s, read frameburst param=%d\n", __FUNCTION__, frameburst));
  9318. }
  9319. #endif /* USE_WL_FRAMEBURST */
  9320. g_frameburst = frameburst;
  9321. #endif /* USE_WFA_CERT_CONF */
  9322. #ifdef DISABLE_WL_FRAMEBURST_SOFTAP
  9323. /* Disable Framebursting for SofAP */
  9324. if (dhd->op_mode & DHD_FLAG_HOSTAP_MODE) {
  9325. frameburst = 0;
  9326. }
  9327. #endif /* DISABLE_WL_FRAMEBURST_SOFTAP */
  9328. /* Set frameburst to value */
  9329. if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_FAKEFRAG, (char *)&frameburst,
  9330. sizeof(frameburst), TRUE, 0)) < 0) {
  9331. DHD_INFO(("%s frameburst not supported %d\n", __FUNCTION__, ret));
  9332. }
  9333. #ifdef DHD_SET_FW_HIGHSPEED
  9334. /* Set ack_ratio */
  9335. ret = dhd_iovar(dhd, 0, "ack_ratio", (char *)&ack_ratio, sizeof(ack_ratio), NULL, 0, TRUE);
  9336. if (ret < 0) {
  9337. DHD_ERROR(("%s Set ack_ratio failed %d\n", __FUNCTION__, ret));
  9338. }
  9339. /* Set ack_ratio_depth */
  9340. ret = dhd_iovar(dhd, 0, "ack_ratio_depth", (char *)&ack_ratio_depth,
  9341. sizeof(ack_ratio_depth), NULL, 0, TRUE);
  9342. if (ret < 0) {
  9343. DHD_ERROR(("%s Set ack_ratio_depth failed %d\n", __FUNCTION__, ret));
  9344. }
  9345. #endif /* DHD_SET_FW_HIGHSPEED */
  9346. iov_buf = (char*)MALLOC(dhd->osh, WLC_IOCTL_SMLEN);
  9347. if (iov_buf == NULL) {
  9348. DHD_ERROR(("failed to allocate %d bytes for iov_buf\n", WLC_IOCTL_SMLEN));
  9349. ret = BCME_NOMEM;
  9350. goto done;
  9351. }
  9352. #ifdef WLAIBSS
  9353. /* Apply AIBSS configurations */
  9354. if ((ret = dhd_preinit_aibss_ioctls(dhd, iov_buf)) != BCME_OK) {
  9355. DHD_ERROR(("%s dhd_preinit_aibss_ioctls failed %d\n",
  9356. __FUNCTION__, ret));
  9357. goto done;
  9358. }
  9359. #endif /* WLAIBSS */
  9360. #if defined(CUSTOM_AMPDU_BA_WSIZE) || (defined(WLAIBSS) && \
  9361. defined(CUSTOM_IBSS_AMPDU_BA_WSIZE))
  9362. /* Set ampdu ba wsize to 64 or 16 */
  9363. #ifdef CUSTOM_AMPDU_BA_WSIZE
  9364. ampdu_ba_wsize = CUSTOM_AMPDU_BA_WSIZE;
  9365. #endif // endif
  9366. #if defined(WLAIBSS) && defined(CUSTOM_IBSS_AMPDU_BA_WSIZE)
  9367. if (dhd->op_mode == DHD_FLAG_IBSS_MODE)
  9368. ampdu_ba_wsize = CUSTOM_IBSS_AMPDU_BA_WSIZE;
  9369. #endif /* WLAIBSS && CUSTOM_IBSS_AMPDU_BA_WSIZE */
  9370. if (ampdu_ba_wsize != 0) {
  9371. ret = dhd_iovar(dhd, 0, "ampdu_ba_wsize", (char *)&ampdu_ba_wsize,
  9372. sizeof(ampdu_ba_wsize), NULL, 0, TRUE);
  9373. if (ret < 0) {
  9374. DHD_ERROR(("%s Set ampdu_ba_wsize to %d failed %d\n",
  9375. __FUNCTION__, ampdu_ba_wsize, ret));
  9376. }
  9377. }
  9378. #endif /* CUSTOM_AMPDU_BA_WSIZE || (WLAIBSS && CUSTOM_IBSS_AMPDU_BA_WSIZE) */
  9379. #if defined(CUSTOM_AMPDU_MPDU)
  9380. ampdu_mpdu = CUSTOM_AMPDU_MPDU;
  9381. if (ampdu_mpdu != 0 && (ampdu_mpdu <= ampdu_ba_wsize)) {
  9382. ret = dhd_iovar(dhd, 0, "ampdu_mpdu", (char *)&ampdu_mpdu, sizeof(ampdu_mpdu),
  9383. NULL, 0, TRUE);
  9384. if (ret < 0) {
  9385. DHD_ERROR(("%s Set ampdu_mpdu to %d failed %d\n",
  9386. __FUNCTION__, CUSTOM_AMPDU_MPDU, ret));
  9387. }
  9388. }
  9389. #endif /* CUSTOM_AMPDU_MPDU */
  9390. #if defined(CUSTOM_AMPDU_RELEASE)
  9391. ampdu_release = CUSTOM_AMPDU_RELEASE;
  9392. if (ampdu_release != 0 && (ampdu_release <= ampdu_ba_wsize)) {
  9393. ret = dhd_iovar(dhd, 0, "ampdu_release", (char *)&ampdu_release,
  9394. sizeof(ampdu_release), NULL, 0, TRUE);
  9395. if (ret < 0) {
  9396. DHD_ERROR(("%s Set ampdu_release to %d failed %d\n",
  9397. __FUNCTION__, CUSTOM_AMPDU_RELEASE, ret));
  9398. }
  9399. }
  9400. #endif /* CUSTOM_AMPDU_RELEASE */
  9401. #if defined(CUSTOM_AMSDU_AGGSF)
  9402. amsdu_aggsf = CUSTOM_AMSDU_AGGSF;
  9403. if (amsdu_aggsf != 0) {
  9404. ret = dhd_iovar(dhd, 0, "amsdu_aggsf", (char *)&amsdu_aggsf, sizeof(amsdu_aggsf),
  9405. NULL, 0, TRUE);
  9406. if (ret < 0) {
  9407. DHD_ERROR(("%s Set amsdu_aggsf to %d failed %d\n",
  9408. __FUNCTION__, CUSTOM_AMSDU_AGGSF, ret));
  9409. }
  9410. }
  9411. #endif /* CUSTOM_AMSDU_AGGSF */
  9412. #if defined(BCMSUP_4WAY_HANDSHAKE)
  9413. /* Read 4-way handshake requirements */
  9414. if (dhd_use_idsup == 1) {
  9415. ret = dhd_iovar(dhd, 0, "sup_wpa", (char *)&sup_wpa, sizeof(sup_wpa),
  9416. (char *)&iovbuf, sizeof(iovbuf), FALSE);
  9417. /* sup_wpa iovar returns NOTREADY status on some platforms using modularized
  9418. * in-dongle supplicant.
  9419. */
  9420. if (ret >= 0 || ret == BCME_NOTREADY)
  9421. dhd->fw_4way_handshake = TRUE;
  9422. DHD_TRACE(("4-way handshake mode is: %d\n", dhd->fw_4way_handshake));
  9423. }
  9424. #endif /* BCMSUP_4WAY_HANDSHAKE */
  9425. #if defined(SUPPORT_2G_VHT) || defined(SUPPORT_5G_1024QAM_VHT)
  9426. ret = dhd_iovar(dhd, 0, "vht_features", (char *)&vht_features, sizeof(vht_features),
  9427. NULL, 0, FALSE);
  9428. if (ret < 0) {
  9429. DHD_ERROR(("%s vht_features get failed %d\n", __FUNCTION__, ret));
  9430. vht_features = 0;
  9431. } else {
  9432. #ifdef SUPPORT_2G_VHT
  9433. vht_features |= 0x3; /* 2G support */
  9434. #endif /* SUPPORT_2G_VHT */
  9435. #ifdef SUPPORT_5G_1024QAM_VHT
  9436. vht_features |= 0x6; /* 5G 1024 QAM support */
  9437. #endif /* SUPPORT_5G_1024QAM_VHT */
  9438. }
  9439. if (vht_features) {
  9440. ret = dhd_iovar(dhd, 0, "vht_features", (char *)&vht_features, sizeof(vht_features),
  9441. NULL, 0, TRUE);
  9442. if (ret < 0) {
  9443. DHD_ERROR(("%s vht_features set failed %d\n", __FUNCTION__, ret));
  9444. if (ret == BCME_NOTDOWN) {
  9445. uint wl_down = 1;
  9446. ret = dhd_wl_ioctl_cmd(dhd, WLC_DOWN,
  9447. (char *)&wl_down, sizeof(wl_down), TRUE, 0);
  9448. DHD_ERROR(("%s vht_features fail WL_DOWN : %d,"
  9449. " vht_features = 0x%x\n",
  9450. __FUNCTION__, ret, vht_features));
  9451. ret = dhd_iovar(dhd, 0, "vht_features", (char *)&vht_features,
  9452. sizeof(vht_features), NULL, 0, TRUE);
  9453. DHD_ERROR(("%s vht_features set. ret --> %d\n", __FUNCTION__, ret));
  9454. }
  9455. }
  9456. }
  9457. #endif /* SUPPORT_2G_VHT || SUPPORT_5G_1024QAM_VHT */
  9458. #ifdef DISABLE_11N_PROPRIETARY_RATES
  9459. ret = dhd_iovar(dhd, 0, "ht_features", (char *)&ht_features, sizeof(ht_features), NULL, 0,
  9460. TRUE);
  9461. if (ret < 0) {
  9462. DHD_ERROR(("%s ht_features set failed %d\n", __FUNCTION__, ret));
  9463. }
  9464. #endif /* DISABLE_11N_PROPRIETARY_RATES */
  9465. #if defined(DISABLE_HE_ENAB) || defined(CUSTOM_CONTROL_HE_ENAB)
  9466. #if defined(DISABLE_HE_ENAB)
  9467. control_he_enab = 0;
  9468. #endif /* DISABLE_HE_ENAB */
  9469. dhd_control_he_enab(dhd, control_he_enab);
  9470. #endif /* DISABLE_HE_ENAB || CUSTOM_CONTROL_HE_ENAB */
  9471. #ifdef CUSTOM_PSPRETEND_THR
  9472. /* Turn off MPC in AP mode */
  9473. ret = dhd_iovar(dhd, 0, "pspretend_threshold", (char *)&pspretend_thr,
  9474. sizeof(pspretend_thr), NULL, 0, TRUE);
  9475. if (ret < 0) {
  9476. DHD_ERROR(("%s pspretend_threshold for HostAPD failed %d\n",
  9477. __FUNCTION__, ret));
  9478. }
  9479. #endif // endif
  9480. ret = dhd_iovar(dhd, 0, "buf_key_b4_m4", (char *)&buf_key_b4_m4, sizeof(buf_key_b4_m4),
  9481. NULL, 0, TRUE);
  9482. if (ret < 0) {
  9483. DHD_ERROR(("%s buf_key_b4_m4 set failed %d\n", __FUNCTION__, ret));
  9484. }
  9485. #ifdef SUPPORT_SET_CAC
  9486. ret = dhd_iovar(dhd, 0, "cac", (char *)&cac, sizeof(cac), NULL, 0, TRUE);
  9487. if (ret < 0) {
  9488. DHD_ERROR(("%s Failed to set cac to %d, %d\n", __FUNCTION__, cac, ret));
  9489. }
  9490. #endif /* SUPPORT_SET_CAC */
  9491. #ifdef DHD_ULP
  9492. /* Get the required details from dongle during preinit ioctl */
  9493. dhd_ulp_preinit(dhd);
  9494. #endif /* DHD_ULP */
  9495. /* Read event_msgs mask */
  9496. ret = dhd_iovar(dhd, 0, "event_msgs", eventmask, WL_EVENTING_MASK_LEN, iovbuf,
  9497. sizeof(iovbuf), FALSE);
  9498. if (ret < 0) {
  9499. DHD_ERROR(("%s read Event mask failed %d\n", __FUNCTION__, ret));
  9500. goto done;
  9501. }
  9502. bcopy(iovbuf, eventmask, WL_EVENTING_MASK_LEN);
  9503. /* Setup event_msgs */
  9504. setbit(eventmask, WLC_E_SET_SSID);
  9505. setbit(eventmask, WLC_E_PRUNE);
  9506. setbit(eventmask, WLC_E_AUTH);
  9507. setbit(eventmask, WLC_E_AUTH_IND);
  9508. setbit(eventmask, WLC_E_ASSOC);
  9509. setbit(eventmask, WLC_E_REASSOC);
  9510. setbit(eventmask, WLC_E_REASSOC_IND);
  9511. if (!(dhd->op_mode & DHD_FLAG_IBSS_MODE))
  9512. setbit(eventmask, WLC_E_DEAUTH);
  9513. setbit(eventmask, WLC_E_DEAUTH_IND);
  9514. setbit(eventmask, WLC_E_DISASSOC_IND);
  9515. setbit(eventmask, WLC_E_DISASSOC);
  9516. setbit(eventmask, WLC_E_JOIN);
  9517. setbit(eventmask, WLC_E_START);
  9518. setbit(eventmask, WLC_E_ASSOC_IND);
  9519. setbit(eventmask, WLC_E_PSK_SUP);
  9520. setbit(eventmask, WLC_E_LINK);
  9521. setbit(eventmask, WLC_E_MIC_ERROR);
  9522. setbit(eventmask, WLC_E_ASSOC_REQ_IE);
  9523. setbit(eventmask, WLC_E_ASSOC_RESP_IE);
  9524. #ifdef LIMIT_BORROW
  9525. setbit(eventmask, WLC_E_ALLOW_CREDIT_BORROW);
  9526. #endif // endif
  9527. #ifndef WL_CFG80211
  9528. setbit(eventmask, WLC_E_PMKID_CACHE);
  9529. setbit(eventmask, WLC_E_TXFAIL);
  9530. #endif // endif
  9531. setbit(eventmask, WLC_E_JOIN_START);
  9532. setbit(eventmask, WLC_E_SCAN_COMPLETE);
  9533. setbit(eventmask, WLC_E_IND_DOS_STATUS);
  9534. #ifdef DHD_DEBUG
  9535. setbit(eventmask, WLC_E_SCAN_CONFIRM_IND);
  9536. #endif // endif
  9537. #ifdef PNO_SUPPORT
  9538. setbit(eventmask, WLC_E_PFN_NET_FOUND);
  9539. setbit(eventmask, WLC_E_PFN_BEST_BATCHING);
  9540. setbit(eventmask, WLC_E_PFN_BSSID_NET_FOUND);
  9541. setbit(eventmask, WLC_E_PFN_BSSID_NET_LOST);
  9542. #endif /* PNO_SUPPORT */
  9543. /* enable dongle roaming event */
  9544. #ifdef WL_CFG80211
  9545. #if !defined(ROAM_EVT_DISABLE)
  9546. setbit(eventmask, WLC_E_ROAM);
  9547. #endif /* !ROAM_EVT_DISABLE */
  9548. setbit(eventmask, WLC_E_BSSID);
  9549. #endif /* WL_CFG80211 */
  9550. #ifdef BCMCCX
  9551. setbit(eventmask, WLC_E_ADDTS_IND);
  9552. setbit(eventmask, WLC_E_DELTS_IND);
  9553. #endif /* BCMCCX */
  9554. #ifdef WLTDLS
  9555. setbit(eventmask, WLC_E_TDLS_PEER_EVENT);
  9556. #endif /* WLTDLS */
  9557. #ifdef RTT_SUPPORT
  9558. setbit(eventmask, WLC_E_PROXD);
  9559. #endif /* RTT_SUPPORT */
  9560. #if !defined(WL_CFG80211) && !defined(OEM_ANDROID)
  9561. setbit(eventmask, WLC_E_ESCAN_RESULT);
  9562. #endif // endif
  9563. #ifdef WL_CFG80211
  9564. setbit(eventmask, WLC_E_ESCAN_RESULT);
  9565. setbit(eventmask, WLC_E_AP_STARTED);
  9566. setbit(eventmask, WLC_E_ACTION_FRAME_RX);
  9567. if (dhd->op_mode & DHD_FLAG_P2P_MODE) {
  9568. setbit(eventmask, WLC_E_P2P_DISC_LISTEN_COMPLETE);
  9569. }
  9570. #endif /* WL_CFG80211 */
  9571. #ifdef WLAIBSS
  9572. setbit(eventmask, WLC_E_AIBSS_TXFAIL);
  9573. #endif /* WLAIBSS */
  9574. #if defined(SHOW_LOGTRACE) && defined(LOGTRACE_FROM_FILE)
  9575. if (dhd_logtrace_from_file(dhd)) {
  9576. setbit(eventmask, WLC_E_TRACE);
  9577. } else {
  9578. clrbit(eventmask, WLC_E_TRACE);
  9579. }
  9580. #elif defined(SHOW_LOGTRACE)
  9581. setbit(eventmask, WLC_E_TRACE);
  9582. #else
  9583. clrbit(eventmask, WLC_E_TRACE);
  9584. #endif /* defined(SHOW_LOGTRACE) && defined(LOGTRACE_FROM_FILE) */
  9585. setbit(eventmask, WLC_E_CSA_COMPLETE_IND);
  9586. #ifdef CUSTOM_EVENT_PM_WAKE
  9587. setbit(eventmask, WLC_E_EXCESS_PM_WAKE_EVENT);
  9588. #endif /* CUSTOM_EVENT_PM_WAKE */
  9589. #ifdef DHD_LOSSLESS_ROAMING
  9590. setbit(eventmask, WLC_E_ROAM_PREP);
  9591. #endif // endif
  9592. /* nan events */
  9593. setbit(eventmask, WLC_E_NAN);
  9594. #if defined(PCIE_FULL_DONGLE) && defined(DHD_LOSSLESS_ROAMING)
  9595. dhd_update_flow_prio_map(dhd, DHD_FLOW_PRIO_LLR_MAP);
  9596. #endif /* defined(PCIE_FULL_DONGLE) && defined(DHD_LOSSLESS_ROAMING) */
  9597. #if defined(BCMPCIE) && defined(EAPOL_PKT_PRIO)
  9598. dhd_update_flow_prio_map(dhd, DHD_FLOW_PRIO_LLR_MAP);
  9599. #endif /* defined(BCMPCIE) && defined(EAPOL_PKT_PRIO) */
  9600. /* Write updated Event mask */
  9601. ret = dhd_iovar(dhd, 0, "event_msgs", eventmask, WL_EVENTING_MASK_LEN, NULL, 0, TRUE);
  9602. if (ret < 0) {
  9603. DHD_ERROR(("%s Set Event mask failed %d\n", __FUNCTION__, ret));
  9604. goto done;
  9605. }
  9606. /* make up event mask ext message iovar for event larger than 128 */
  9607. msglen = ROUNDUP(WLC_E_LAST, NBBY)/NBBY + EVENTMSGS_EXT_STRUCT_SIZE;
  9608. eventmask_msg = (eventmsgs_ext_t*)MALLOC(dhd->osh, msglen);
  9609. if (eventmask_msg == NULL) {
  9610. DHD_ERROR(("failed to allocate %d bytes for event_msg_ext\n", msglen));
  9611. ret = BCME_NOMEM;
  9612. goto done;
  9613. }
  9614. bzero(eventmask_msg, msglen);
  9615. eventmask_msg->ver = EVENTMSGS_VER;
  9616. eventmask_msg->len = ROUNDUP(WLC_E_LAST, NBBY)/NBBY;
  9617. /* Read event_msgs_ext mask */
  9618. ret2 = dhd_iovar(dhd, 0, "event_msgs_ext", (char *)eventmask_msg, msglen, iov_buf,
  9619. WLC_IOCTL_SMLEN, FALSE);
  9620. if (ret2 == 0) { /* event_msgs_ext must be supported */
  9621. bcopy(iov_buf, eventmask_msg, msglen);
  9622. #ifdef RSSI_MONITOR_SUPPORT
  9623. setbit(eventmask_msg->mask, WLC_E_RSSI_LQM);
  9624. #endif /* RSSI_MONITOR_SUPPORT */
  9625. #ifdef GSCAN_SUPPORT
  9626. setbit(eventmask_msg->mask, WLC_E_PFN_GSCAN_FULL_RESULT);
  9627. setbit(eventmask_msg->mask, WLC_E_PFN_SCAN_COMPLETE);
  9628. setbit(eventmask_msg->mask, WLC_E_PFN_SSID_EXT);
  9629. setbit(eventmask_msg->mask, WLC_E_ROAM_EXP_EVENT);
  9630. #endif /* GSCAN_SUPPORT */
  9631. setbit(eventmask_msg->mask, WLC_E_RSSI_LQM);
  9632. #ifdef BT_WIFI_HANDOVER
  9633. setbit(eventmask_msg->mask, WLC_E_BT_WIFI_HANDOVER_REQ);
  9634. #endif /* BT_WIFI_HANDOVER */
  9635. #ifdef DBG_PKT_MON
  9636. setbit(eventmask_msg->mask, WLC_E_ROAM_PREP);
  9637. #endif /* DBG_PKT_MON */
  9638. #ifdef DHD_ULP
  9639. setbit(eventmask_msg->mask, WLC_E_ULP);
  9640. #endif // endif
  9641. #ifdef WL_NATOE
  9642. setbit(eventmask_msg->mask, WLC_E_NATOE_NFCT);
  9643. #endif /* WL_NATOE */
  9644. #ifdef WL_NAN
  9645. setbit(eventmask_msg->mask, WLC_E_SLOTTED_BSS_PEER_OP);
  9646. #endif /* WL_NAN */
  9647. #ifdef WL_MBO
  9648. setbit(eventmask_msg->mask, WLC_E_MBO);
  9649. #endif /* WL_MBO */
  9650. #ifdef WL_BCNRECV
  9651. setbit(eventmask_msg->mask, WLC_E_BCNRECV_ABORTED);
  9652. #endif /* WL_BCNRECV */
  9653. #ifdef WL_CAC_TS
  9654. setbit(eventmask_msg->mask, WLC_E_ADDTS_IND);
  9655. setbit(eventmask_msg->mask, WLC_E_DELTS_IND);
  9656. #endif /* WL_CAC_TS */
  9657. #ifdef WL_CHAN_UTIL
  9658. setbit(eventmask_msg->mask, WLC_E_BSS_LOAD);
  9659. #endif /* WL_CHAN_UTIL */
  9660. #ifdef WL_SAE
  9661. setbit(eventmask_msg->mask, WLC_E_EXT_AUTH_REQ);
  9662. setbit(eventmask_msg->mask, WLC_E_EXT_AUTH_FRAME_RX);
  9663. setbit(eventmask_msg->mask, WLC_E_MGMT_FRAME_TXSTATUS);
  9664. setbit(eventmask_msg->mask, WLC_E_MGMT_FRAME_OFF_CHAN_COMPLETE);
  9665. #endif /* WL_SAE */
  9666. /* Write updated Event mask */
  9667. eventmask_msg->ver = EVENTMSGS_VER;
  9668. eventmask_msg->command = EVENTMSGS_SET_MASK;
  9669. eventmask_msg->len = ROUNDUP(WLC_E_LAST, NBBY)/NBBY;
  9670. ret = dhd_iovar(dhd, 0, "event_msgs_ext", (char *)eventmask_msg, msglen, NULL, 0,
  9671. TRUE);
  9672. if (ret < 0) {
  9673. DHD_ERROR(("%s write event mask ext failed %d\n", __FUNCTION__, ret));
  9674. goto done;
  9675. }
  9676. } else if (ret2 == BCME_UNSUPPORTED || ret2 == BCME_VERSION) {
  9677. /* Skip for BCME_UNSUPPORTED or BCME_VERSION */
  9678. DHD_ERROR(("%s event_msgs_ext not support or version mismatch %d\n",
  9679. __FUNCTION__, ret2));
  9680. } else {
  9681. DHD_ERROR(("%s read event mask ext failed %d\n", __FUNCTION__, ret2));
  9682. ret = ret2;
  9683. goto done;
  9684. }
  9685. #if defined(DHD_8021X_DUMP) && defined(SHOW_LOGTRACE)
  9686. /* Enabling event log trace for EAP events */
  9687. el_tag = (wl_el_tag_params_t *)MALLOC(dhd->osh, sizeof(wl_el_tag_params_t));
  9688. if (el_tag == NULL) {
  9689. DHD_ERROR(("failed to allocate %d bytes for event_msg_ext\n",
  9690. (int)sizeof(wl_el_tag_params_t)));
  9691. ret = BCME_NOMEM;
  9692. goto done;
  9693. }
  9694. el_tag->tag = EVENT_LOG_TAG_4WAYHANDSHAKE;
  9695. el_tag->set = 1;
  9696. el_tag->flags = EVENT_LOG_TAG_FLAG_LOG;
  9697. ret = dhd_iovar(dhd, 0, "event_log_tag_control", (char *)el_tag, sizeof(*el_tag), NULL, 0,
  9698. TRUE);
  9699. #endif /* DHD_8021X_DUMP */
  9700. #ifdef OEM_ANDROID
  9701. dhd_wl_ioctl_cmd(dhd, WLC_SET_SCAN_CHANNEL_TIME, (char *)&scan_assoc_time,
  9702. sizeof(scan_assoc_time), TRUE, 0);
  9703. dhd_wl_ioctl_cmd(dhd, WLC_SET_SCAN_UNASSOC_TIME, (char *)&scan_unassoc_time,
  9704. sizeof(scan_unassoc_time), TRUE, 0);
  9705. dhd_wl_ioctl_cmd(dhd, WLC_SET_SCAN_PASSIVE_TIME, (char *)&scan_passive_time,
  9706. sizeof(scan_passive_time), TRUE, 0);
  9707. #ifdef ARP_OFFLOAD_SUPPORT
  9708. /* Set and enable ARP offload feature for STA only */
  9709. #if defined(OEM_ANDROID) && defined(SOFTAP)
  9710. if (arpoe && !ap_fw_loaded) {
  9711. #else
  9712. if (arpoe) {
  9713. #endif /* defined(OEM_ANDROID) && defined(SOFTAP) */
  9714. dhd_arp_offload_enable(dhd, TRUE);
  9715. dhd_arp_offload_set(dhd, dhd_arp_mode);
  9716. } else {
  9717. dhd_arp_offload_enable(dhd, FALSE);
  9718. dhd_arp_offload_set(dhd, 0);
  9719. }
  9720. dhd_arp_enable = arpoe;
  9721. #endif /* ARP_OFFLOAD_SUPPORT */
  9722. #ifdef PKT_FILTER_SUPPORT
  9723. /* Setup default defintions for pktfilter , enable in suspend */
  9724. dhd->pktfilter_count = 6;
  9725. dhd->pktfilter[DHD_BROADCAST_FILTER_NUM] = NULL;
  9726. if (!FW_SUPPORTED(dhd, pf6)) {
  9727. dhd->pktfilter[DHD_MULTICAST4_FILTER_NUM] = NULL;
  9728. dhd->pktfilter[DHD_MULTICAST6_FILTER_NUM] = NULL;
  9729. } else {
  9730. /* Immediately pkt filter TYPE 6 Discard IPv4/IPv6 Multicast Packet */
  9731. dhd->pktfilter[DHD_MULTICAST4_FILTER_NUM] = DISCARD_IPV4_MCAST;
  9732. dhd->pktfilter[DHD_MULTICAST6_FILTER_NUM] = DISCARD_IPV6_MCAST;
  9733. }
  9734. /* apply APP pktfilter */
  9735. dhd->pktfilter[DHD_ARP_FILTER_NUM] = "105 0 0 12 0xFFFF 0x0806";
  9736. #ifdef BLOCK_IPV6_PACKET
  9737. /* Setup filter to allow only IPv4 unicast frames */
  9738. dhd->pktfilter[DHD_UNICAST_FILTER_NUM] = "100 0 0 0 "
  9739. HEX_PREF_STR UNI_FILTER_STR ZERO_ADDR_STR ETHER_TYPE_STR IPV6_FILTER_STR
  9740. " "
  9741. HEX_PREF_STR ZERO_ADDR_STR ZERO_ADDR_STR ETHER_TYPE_STR ZERO_TYPE_STR;
  9742. #else
  9743. /* Setup filter to allow only unicast */
  9744. dhd->pktfilter[DHD_UNICAST_FILTER_NUM] = "100 0 0 0 0x01 0x00";
  9745. #endif /* BLOCK_IPV6_PACKET */
  9746. #ifdef PASS_IPV4_SUSPEND
  9747. dhd->pktfilter[DHD_MDNS_FILTER_NUM] = "104 0 0 0 0xFFFFFF 0x01005E";
  9748. #else
  9749. /* Add filter to pass multicastDNS packet and NOT filter out as Broadcast */
  9750. dhd->pktfilter[DHD_MDNS_FILTER_NUM] = NULL;
  9751. #endif /* PASS_IPV4_SUSPEND */
  9752. if (FW_SUPPORTED(dhd, pf6)) {
  9753. /* Immediately pkt filter TYPE 6 Dicard Broadcast IP packet */
  9754. dhd->pktfilter[DHD_IP4BCAST_DROP_FILTER_NUM] = DISCARD_IPV4_BCAST;
  9755. /* Immediately pkt filter TYPE 6 Dicard Cisco STP packet */
  9756. dhd->pktfilter[DHD_LLC_STP_DROP_FILTER_NUM] = DISCARD_LLC_STP;
  9757. /* Immediately pkt filter TYPE 6 Dicard Cisco XID protocol */
  9758. dhd->pktfilter[DHD_LLC_XID_DROP_FILTER_NUM] = DISCARD_LLC_XID;
  9759. dhd->pktfilter_count = 10;
  9760. }
  9761. #ifdef GAN_LITE_NAT_KEEPALIVE_FILTER
  9762. dhd->pktfilter_count = 4;
  9763. /* Setup filter to block broadcast and NAT Keepalive packets */
  9764. /* discard all broadcast packets */
  9765. dhd->pktfilter[DHD_UNICAST_FILTER_NUM] = "100 0 0 0 0xffffff 0xffffff";
  9766. /* discard NAT Keepalive packets */
  9767. dhd->pktfilter[DHD_BROADCAST_FILTER_NUM] = "102 0 0 36 0xffffffff 0x11940009";
  9768. /* discard NAT Keepalive packets */
  9769. dhd->pktfilter[DHD_MULTICAST4_FILTER_NUM] = "104 0 0 38 0xffffffff 0x11940009";
  9770. dhd->pktfilter[DHD_MULTICAST6_FILTER_NUM] = NULL;
  9771. #endif /* GAN_LITE_NAT_KEEPALIVE_FILTER */
  9772. #if defined(SOFTAP)
  9773. if (ap_fw_loaded) {
  9774. dhd_enable_packet_filter(0, dhd);
  9775. }
  9776. #endif /* defined(SOFTAP) */
  9777. dhd_set_packet_filter(dhd);
  9778. #endif /* PKT_FILTER_SUPPORT */
  9779. #ifdef DISABLE_11N
  9780. ret = dhd_iovar(dhd, 0, "nmode", (char *)&nmode, sizeof(nmode), NULL, 0, TRUE);
  9781. if (ret < 0)
  9782. DHD_ERROR(("%s wl nmode 0 failed %d\n", __FUNCTION__, ret));
  9783. #endif /* DISABLE_11N */
  9784. #ifdef ENABLE_BCN_LI_BCN_WAKEUP
  9785. ret = dhd_iovar(dhd, 0, "bcn_li_bcn", (char *)&bcn_li_bcn, sizeof(bcn_li_bcn), NULL, 0,
  9786. TRUE);
  9787. #endif /* ENABLE_BCN_LI_BCN_WAKEUP */
  9788. #ifdef AMPDU_VO_ENABLE
  9789. tid.tid = PRIO_8021D_VO; /* Enable TID(6) for voice */
  9790. tid.enable = TRUE;
  9791. ret = dhd_iovar(dhd, 0, "ampdu_tid", (char *)&tid, sizeof(tid), NULL, 0, TRUE);
  9792. tid.tid = PRIO_8021D_NC; /* Enable TID(7) for voice */
  9793. tid.enable = TRUE;
  9794. ret = dhd_iovar(dhd, 0, "ampdu_tid", (char *)&tid, sizeof(tid), NULL, 0, TRUE);
  9795. #endif // endif
  9796. /* query for 'clmver' to get clm version info from firmware */
  9797. memset(buf, 0, sizeof(buf));
  9798. ret = dhd_iovar(dhd, 0, "clmver", NULL, 0, buf, sizeof(buf), FALSE);
  9799. if (ret < 0)
  9800. DHD_ERROR(("%s failed %d\n", __FUNCTION__, ret));
  9801. else {
  9802. char *ver_temp_buf = NULL;
  9803. if ((ver_temp_buf = bcmstrstr(buf, "Data:")) == NULL) {
  9804. DHD_ERROR(("Couldn't find \"Data:\"\n"));
  9805. } else {
  9806. ptr = (ver_temp_buf + strlen("Data:"));
  9807. if ((ver_temp_buf = bcmstrtok(&ptr, "\n", 0)) == NULL) {
  9808. DHD_ERROR(("Couldn't find New line character\n"));
  9809. } else {
  9810. memset(clm_version, 0, CLM_VER_STR_LEN);
  9811. strncpy(clm_version, ver_temp_buf,
  9812. MIN(strlen(ver_temp_buf) + 1, CLM_VER_STR_LEN - 1));
  9813. DHD_INFO(("CLM version = %s\n", clm_version));
  9814. }
  9815. }
  9816. #if defined(CUSTOMER_HW4_DEBUG)
  9817. if ((ver_temp_buf = bcmstrstr(ptr, "Customization:")) == NULL) {
  9818. DHD_ERROR(("Couldn't find \"Customization:\"\n"));
  9819. } else {
  9820. char tokenlim;
  9821. ptr = (ver_temp_buf + strlen("Customization:"));
  9822. if ((ver_temp_buf = bcmstrtok(&ptr, "(\n", &tokenlim)) == NULL) {
  9823. DHD_ERROR(("Couldn't find project blob version"
  9824. "or New line character\n"));
  9825. } else if (tokenlim == '(') {
  9826. snprintf(clm_version,
  9827. CLM_VER_STR_LEN - 1, "%s, Blob ver = Major : %s minor : ",
  9828. clm_version, ver_temp_buf);
  9829. DHD_INFO(("[INFO]CLM/Blob version = %s\n", clm_version));
  9830. if ((ver_temp_buf = bcmstrtok(&ptr, "\n", &tokenlim)) == NULL) {
  9831. DHD_ERROR(("Couldn't find New line character\n"));
  9832. } else {
  9833. snprintf(clm_version,
  9834. strlen(clm_version) + strlen(ver_temp_buf),
  9835. "%s%s", clm_version, ver_temp_buf);
  9836. DHD_INFO(("[INFO]CLM/Blob/project version = %s\n",
  9837. clm_version));
  9838. }
  9839. } else if (tokenlim == '\n') {
  9840. snprintf(clm_version,
  9841. strlen(clm_version) + strlen(", Blob ver = Major : ") + 1,
  9842. "%s, Blob ver = Major : ", clm_version);
  9843. snprintf(clm_version,
  9844. strlen(clm_version) + strlen(ver_temp_buf) + 1,
  9845. "%s%s", clm_version, ver_temp_buf);
  9846. DHD_INFO(("[INFO]CLM/Blob/project version = %s\n", clm_version));
  9847. }
  9848. }
  9849. #endif /* CUSTOMER_HW4_DEBUG */
  9850. if (strlen(clm_version)) {
  9851. DHD_ERROR(("CLM version = %s\n", clm_version));
  9852. } else {
  9853. DHD_ERROR(("Couldn't find CLM version!\n"));
  9854. }
  9855. }
  9856. #ifdef WRITE_WLANINFO
  9857. sec_save_wlinfo(fw_version, EPI_VERSION_STR, dhd->info->nv_path, clm_version);
  9858. #endif /* WRITE_WLANINFO */
  9859. /* query for 'wlc_ver' to get version info from firmware */
  9860. memset(&wlc_ver, 0, sizeof(wl_wlc_version_t));
  9861. ret = dhd_iovar(dhd, 0, "wlc_ver", NULL, 0, (char *)&wlc_ver,
  9862. sizeof(wl_wlc_version_t), FALSE);
  9863. if (ret < 0)
  9864. DHD_ERROR(("%s failed %d\n", __FUNCTION__, ret));
  9865. else {
  9866. dhd->wlc_ver_major = wlc_ver.wlc_ver_major;
  9867. dhd->wlc_ver_minor = wlc_ver.wlc_ver_minor;
  9868. }
  9869. #endif /* defined(OEM_ANDROID) */
  9870. #ifdef GEN_SOFTAP_INFO_FILE
  9871. sec_save_softap_info();
  9872. #endif /* GEN_SOFTAP_INFO_FILE */
  9873. #if defined(BCMSDIO)
  9874. dhd_txglom_enable(dhd, TRUE);
  9875. #endif /* defined(BCMSDIO) */
  9876. #if defined(BCMSDIO)
  9877. #ifdef PROP_TXSTATUS
  9878. if (disable_proptx ||
  9879. #ifdef PROP_TXSTATUS_VSDB
  9880. /* enable WLFC only if the firmware is VSDB when it is in STA mode */
  9881. (dhd->op_mode != DHD_FLAG_HOSTAP_MODE &&
  9882. dhd->op_mode != DHD_FLAG_IBSS_MODE) ||
  9883. #endif /* PROP_TXSTATUS_VSDB */
  9884. FALSE) {
  9885. wlfc_enable = FALSE;
  9886. }
  9887. #if defined(PROP_TXSTATUS)
  9888. #ifdef USE_WFA_CERT_CONF
  9889. if (sec_get_param_wfa_cert(dhd, SET_PARAM_PROPTX, &proptx) == BCME_OK) {
  9890. DHD_ERROR(("%s , read proptx param=%d\n", __FUNCTION__, proptx));
  9891. wlfc_enable = proptx;
  9892. }
  9893. #endif /* USE_WFA_CERT_CONF */
  9894. #endif /* PROP_TXSTATUS */
  9895. #ifndef DISABLE_11N
  9896. ret2 = dhd_iovar(dhd, 0, "ampdu_hostreorder", (char *)&hostreorder, sizeof(hostreorder),
  9897. NULL, 0, TRUE);
  9898. chipid = dhd_bus_chip_id(dhd);
  9899. if (ret2 < 0) {
  9900. DHD_ERROR(("%s wl ampdu_hostreorder failed %d\n", __FUNCTION__, ret2));
  9901. if (ret2 != BCME_UNSUPPORTED && chipid != BCM4373_CHIP_ID)
  9902. ret = ret2;
  9903. if (ret == BCME_NOTDOWN) {
  9904. uint wl_down = 1;
  9905. ret2 = dhd_wl_ioctl_cmd(dhd, WLC_DOWN, (char *)&wl_down,
  9906. sizeof(wl_down), TRUE, 0);
  9907. DHD_ERROR(("%s ampdu_hostreorder fail WL_DOWN : %d, hostreorder :%d\n",
  9908. __FUNCTION__, ret2, hostreorder));
  9909. ret2 = dhd_iovar(dhd, 0, "ampdu_hostreorder", (char *)&hostreorder,
  9910. sizeof(hostreorder), NULL, 0, TRUE);
  9911. DHD_ERROR(("%s wl ampdu_hostreorder. ret --> %d\n", __FUNCTION__, ret2));
  9912. if (ret2 != BCME_UNSUPPORTED && chipid != BCM4373_CHIP_ID)
  9913. ret = ret2;
  9914. }
  9915. if (ret2 != BCME_OK)
  9916. hostreorder = 0;
  9917. }
  9918. #endif /* DISABLE_11N */
  9919. if (wlfc_enable)
  9920. dhd_wlfc_init(dhd);
  9921. #ifndef DISABLE_11N
  9922. else if (hostreorder)
  9923. dhd_wlfc_hostreorder_init(dhd);
  9924. #endif /* DISABLE_11N */
  9925. #endif /* PROP_TXSTATUS */
  9926. #endif /* BCMSDIO || BCMBUS */
  9927. #ifndef PCIE_FULL_DONGLE
  9928. /* For FD we need all the packets at DHD to handle intra-BSS forwarding */
  9929. if (FW_SUPPORTED(dhd, ap)) {
  9930. wl_ap_isolate = AP_ISOLATE_SENDUP_ALL;
  9931. ret = dhd_iovar(dhd, 0, "ap_isolate", (char *)&wl_ap_isolate, sizeof(wl_ap_isolate),
  9932. NULL, 0, TRUE);
  9933. if (ret < 0)
  9934. DHD_ERROR(("%s failed %d\n", __FUNCTION__, ret));
  9935. }
  9936. #endif /* PCIE_FULL_DONGLE */
  9937. #ifdef PNO_SUPPORT
  9938. if (!dhd->pno_state) {
  9939. dhd_pno_init(dhd);
  9940. }
  9941. #endif // endif
  9942. #ifdef RTT_SUPPORT
  9943. if (!dhd->rtt_state) {
  9944. ret = dhd_rtt_init(dhd);
  9945. if (ret < 0) {
  9946. DHD_ERROR(("%s failed to initialize RTT\n", __FUNCTION__));
  9947. }
  9948. }
  9949. #endif // endif
  9950. #ifdef FILTER_IE
  9951. /* Failure to configure filter IE is not a fatal error, ignore it. */
  9952. if (!(dhd->op_mode & (DHD_FLAG_HOSTAP_MODE | DHD_FLAG_MFG_MODE)))
  9953. dhd_read_from_file(dhd);
  9954. #endif /* FILTER_IE */
  9955. #ifdef WL11U
  9956. dhd_interworking_enable(dhd);
  9957. #endif /* WL11U */
  9958. #ifdef NDO_CONFIG_SUPPORT
  9959. dhd->ndo_enable = FALSE;
  9960. dhd->ndo_host_ip_overflow = FALSE;
  9961. dhd->ndo_max_host_ip = NDO_MAX_HOST_IP_ENTRIES;
  9962. #endif /* NDO_CONFIG_SUPPORT */
  9963. /* ND offload version supported */
  9964. dhd->ndo_version = dhd_ndo_get_version(dhd);
  9965. if (dhd->ndo_version > 0) {
  9966. DHD_INFO(("%s: ndo version %d\n", __FUNCTION__, dhd->ndo_version));
  9967. #ifdef NDO_CONFIG_SUPPORT
  9968. /* enable Unsolicited NA filter */
  9969. ret = dhd_ndo_unsolicited_na_filter_enable(dhd, 1);
  9970. if (ret < 0) {
  9971. DHD_ERROR(("%s failed to enable Unsolicited NA filter\n", __FUNCTION__));
  9972. }
  9973. #endif /* NDO_CONFIG_SUPPORT */
  9974. }
  9975. /* check dongle supports wbtext (product policy) or not */
  9976. dhd->wbtext_support = FALSE;
  9977. if (dhd_wl_ioctl_get_intiovar(dhd, "wnm_bsstrans_resp", &wnm_bsstrans_resp,
  9978. WLC_GET_VAR, FALSE, 0) != BCME_OK) {
  9979. DHD_ERROR(("failed to get wnm_bsstrans_resp\n"));
  9980. }
  9981. dhd->wbtext_policy = wnm_bsstrans_resp;
  9982. if (dhd->wbtext_policy == WL_BSSTRANS_POLICY_PRODUCT_WBTEXT) {
  9983. dhd->wbtext_support = TRUE;
  9984. }
  9985. #ifndef WBTEXT
  9986. /* driver can turn off wbtext feature through makefile */
  9987. if (dhd->wbtext_support) {
  9988. if (dhd_wl_ioctl_set_intiovar(dhd, "wnm_bsstrans_resp",
  9989. WL_BSSTRANS_POLICY_ROAM_ALWAYS,
  9990. WLC_SET_VAR, FALSE, 0) != BCME_OK) {
  9991. DHD_ERROR(("failed to disable WBTEXT\n"));
  9992. }
  9993. }
  9994. #endif /* !WBTEXT */
  9995. #ifdef DHD_NON_DMA_M2M_CORRUPTION
  9996. /* check pcie non dma loopback */
  9997. if (dhd->op_mode == DHD_FLAG_MFG_MODE &&
  9998. (dhd_bus_dmaxfer_lpbk(dhd, M2M_NON_DMA_LPBK) < 0)) {
  9999. goto done;
  10000. }
  10001. #endif /* DHD_NON_DMA_M2M_CORRUPTION */
  10002. /* WNM capabilities */
  10003. wnm_cap = 0
  10004. #ifdef WL11U
  10005. | WL_WNM_BSSTRANS | WL_WNM_NOTIF
  10006. #endif // endif
  10007. #ifdef WBTEXT
  10008. | WL_WNM_BSSTRANS | WL_WNM_MAXIDLE
  10009. #endif // endif
  10010. ;
  10011. #if defined(WL_MBO) && defined(WL_OCE)
  10012. if (FW_SUPPORTED(dhd, estm)) {
  10013. wnm_cap |= WL_WNM_ESTM;
  10014. }
  10015. #endif /* WL_MBO && WL_OCE */
  10016. if (dhd_iovar(dhd, 0, "wnm", (char *)&wnm_cap, sizeof(wnm_cap), NULL, 0, TRUE) < 0) {
  10017. DHD_ERROR(("failed to set WNM capabilities\n"));
  10018. }
  10019. if (FW_SUPPORTED(dhd, ecounters) && enable_ecounter) {
  10020. dhd_ecounter_configure(dhd, TRUE);
  10021. }
  10022. /* store the preserve log set numbers */
  10023. if (dhd_get_preserve_log_numbers(dhd, &dhd->logset_prsrv_mask)
  10024. != BCME_OK) {
  10025. DHD_ERROR(("%s: Failed to get preserve log # !\n", __FUNCTION__));
  10026. }
  10027. #if defined(WBTEXT) && defined(WBTEXT_BTMDELTA)
  10028. if (dhd_iovar(dhd, 0, "wnm_btmdelta", (char *)&btmdelta, sizeof(btmdelta),
  10029. NULL, 0, TRUE) < 0) {
  10030. DHD_ERROR(("failed to set BTM delta\n"));
  10031. }
  10032. #endif /* WBTEXT && WBTEXT_BTMDELTA */
  10033. #ifdef WL_MONITOR
  10034. if (FW_SUPPORTED(dhd, monitor)) {
  10035. dhd->monitor_enable = TRUE;
  10036. DHD_ERROR(("%s: Monitor mode is enabled in FW cap\n", __FUNCTION__));
  10037. } else {
  10038. dhd->monitor_enable = FALSE;
  10039. DHD_ERROR(("%s: Monitor mode is not enabled in FW cap\n", __FUNCTION__));
  10040. }
  10041. #endif /* WL_MONITOR */
  10042. #ifdef CONFIG_SILENT_ROAM
  10043. dhd->sroam_turn_on = TRUE;
  10044. dhd->sroamed = FALSE;
  10045. #endif /* CONFIG_SILENT_ROAM */
  10046. done:
  10047. if (eventmask_msg) {
  10048. MFREE(dhd->osh, eventmask_msg, msglen);
  10049. eventmask_msg = NULL;
  10050. }
  10051. if (iov_buf) {
  10052. MFREE(dhd->osh, iov_buf, WLC_IOCTL_SMLEN);
  10053. iov_buf = NULL;
  10054. }
  10055. #if defined(DHD_8021X_DUMP) && defined(SHOW_LOGTRACE)
  10056. if (el_tag) {
  10057. MFREE(dhd->osh, el_tag, sizeof(wl_el_tag_params_t));
  10058. el_tag = NULL;
  10059. }
  10060. #endif /* DHD_8021X_DUMP */
  10061. return ret;
  10062. }
  10063. int
  10064. dhd_iovar(dhd_pub_t *pub, int ifidx, char *name, char *param_buf, uint param_len, char *res_buf,
  10065. uint res_len, int set)
  10066. {
  10067. char *buf = NULL;
  10068. int input_len;
  10069. wl_ioctl_t ioc;
  10070. int ret;
  10071. if (res_len > WLC_IOCTL_MAXLEN || param_len > WLC_IOCTL_MAXLEN)
  10072. return BCME_BADARG;
  10073. input_len = strlen(name) + 1 + param_len;
  10074. if (input_len > WLC_IOCTL_MAXLEN)
  10075. return BCME_BADARG;
  10076. buf = NULL;
  10077. if (set) {
  10078. if (res_buf || res_len != 0) {
  10079. DHD_ERROR(("%s: SET wrong arguemnet\n", __FUNCTION__));
  10080. ret = BCME_BADARG;
  10081. goto exit;
  10082. }
  10083. buf = MALLOCZ(pub->osh, input_len);
  10084. if (!buf) {
  10085. DHD_ERROR(("%s: mem alloc failed\n", __FUNCTION__));
  10086. ret = BCME_NOMEM;
  10087. goto exit;
  10088. }
  10089. ret = bcm_mkiovar(name, param_buf, param_len, buf, input_len);
  10090. if (!ret) {
  10091. ret = BCME_NOMEM;
  10092. goto exit;
  10093. }
  10094. ioc.cmd = WLC_SET_VAR;
  10095. ioc.buf = buf;
  10096. ioc.len = input_len;
  10097. ioc.set = set;
  10098. ret = dhd_wl_ioctl(pub, ifidx, &ioc, ioc.buf, ioc.len);
  10099. } else {
  10100. if (!res_buf || !res_len) {
  10101. DHD_ERROR(("%s: GET failed. resp_buf NULL or length 0.\n", __FUNCTION__));
  10102. ret = BCME_BADARG;
  10103. goto exit;
  10104. }
  10105. if (res_len < input_len) {
  10106. DHD_INFO(("%s: res_len(%d) < input_len(%d)\n", __FUNCTION__,
  10107. res_len, input_len));
  10108. buf = MALLOCZ(pub->osh, input_len);
  10109. if (!buf) {
  10110. DHD_ERROR(("%s: mem alloc failed\n", __FUNCTION__));
  10111. ret = BCME_NOMEM;
  10112. goto exit;
  10113. }
  10114. ret = bcm_mkiovar(name, param_buf, param_len, buf, input_len);
  10115. if (!ret) {
  10116. ret = BCME_NOMEM;
  10117. goto exit;
  10118. }
  10119. ioc.cmd = WLC_GET_VAR;
  10120. ioc.buf = buf;
  10121. ioc.len = input_len;
  10122. ioc.set = set;
  10123. ret = dhd_wl_ioctl(pub, ifidx, &ioc, ioc.buf, ioc.len);
  10124. if (ret == BCME_OK) {
  10125. memcpy(res_buf, buf, res_len);
  10126. }
  10127. } else {
  10128. memset(res_buf, 0, res_len);
  10129. ret = bcm_mkiovar(name, param_buf, param_len, res_buf, res_len);
  10130. if (!ret) {
  10131. ret = BCME_NOMEM;
  10132. goto exit;
  10133. }
  10134. ioc.cmd = WLC_GET_VAR;
  10135. ioc.buf = res_buf;
  10136. ioc.len = res_len;
  10137. ioc.set = set;
  10138. ret = dhd_wl_ioctl(pub, ifidx, &ioc, ioc.buf, ioc.len);
  10139. }
  10140. }
  10141. exit:
  10142. if (buf) {
  10143. MFREE(pub->osh, buf, input_len);
  10144. buf = NULL;
  10145. }
  10146. return ret;
  10147. }
  10148. int
  10149. dhd_getiovar(dhd_pub_t *pub, int ifidx, char *name, char *cmd_buf,
  10150. uint cmd_len, char **resptr, uint resp_len)
  10151. {
  10152. int len = resp_len;
  10153. int ret;
  10154. char *buf = *resptr;
  10155. wl_ioctl_t ioc;
  10156. if (resp_len > WLC_IOCTL_MAXLEN)
  10157. return BCME_BADARG;
  10158. memset(buf, 0, resp_len);
  10159. ret = bcm_mkiovar(name, cmd_buf, cmd_len, buf, len);
  10160. if (ret == 0) {
  10161. return BCME_BUFTOOSHORT;
  10162. }
  10163. memset(&ioc, 0, sizeof(ioc));
  10164. ioc.cmd = WLC_GET_VAR;
  10165. ioc.buf = buf;
  10166. ioc.len = len;
  10167. ioc.set = 0;
  10168. ret = dhd_wl_ioctl(pub, ifidx, &ioc, ioc.buf, ioc.len);
  10169. return ret;
  10170. }
  10171. int dhd_change_mtu(dhd_pub_t *dhdp, int new_mtu, int ifidx)
  10172. {
  10173. struct dhd_info *dhd = dhdp->info;
  10174. struct net_device *dev = NULL;
  10175. ASSERT(dhd && dhd->iflist[ifidx]);
  10176. dev = dhd->iflist[ifidx]->net;
  10177. ASSERT(dev);
  10178. if (netif_running(dev)) {
  10179. DHD_ERROR(("%s: Must be down to change its MTU", dev->name));
  10180. return BCME_NOTDOWN;
  10181. }
  10182. #define DHD_MIN_MTU 1500
  10183. #define DHD_MAX_MTU 1752
  10184. if ((new_mtu < DHD_MIN_MTU) || (new_mtu > DHD_MAX_MTU)) {
  10185. DHD_ERROR(("%s: MTU size %d is invalid.\n", __FUNCTION__, new_mtu));
  10186. return BCME_BADARG;
  10187. }
  10188. dev->mtu = new_mtu;
  10189. return 0;
  10190. }
  10191. #ifdef ARP_OFFLOAD_SUPPORT
  10192. /* add or remove AOE host ip(s) (up to 8 IPs on the interface) */
  10193. void
  10194. aoe_update_host_ipv4_table(dhd_pub_t *dhd_pub, u32 ipa, bool add, int idx)
  10195. {
  10196. u32 ipv4_buf[MAX_IPV4_ENTRIES]; /* temp save for AOE host_ip table */
  10197. int i;
  10198. int ret;
  10199. bzero(ipv4_buf, sizeof(ipv4_buf));
  10200. /* display what we've got */
  10201. ret = dhd_arp_get_arp_hostip_table(dhd_pub, ipv4_buf, sizeof(ipv4_buf), idx);
  10202. DHD_ARPOE(("%s: hostip table read from Dongle:\n", __FUNCTION__));
  10203. #ifdef AOE_DBG
  10204. dhd_print_buf(ipv4_buf, 32, 4); /* max 8 IPs 4b each */
  10205. #endif // endif
  10206. /* now we saved hoste_ip table, clr it in the dongle AOE */
  10207. dhd_aoe_hostip_clr(dhd_pub, idx);
  10208. if (ret) {
  10209. DHD_ERROR(("%s failed\n", __FUNCTION__));
  10210. return;
  10211. }
  10212. for (i = 0; i < MAX_IPV4_ENTRIES; i++) {
  10213. if (add && (ipv4_buf[i] == 0)) {
  10214. ipv4_buf[i] = ipa;
  10215. add = FALSE; /* added ipa to local table */
  10216. DHD_ARPOE(("%s: Saved new IP in temp arp_hostip[%d]\n",
  10217. __FUNCTION__, i));
  10218. } else if (ipv4_buf[i] == ipa) {
  10219. ipv4_buf[i] = 0;
  10220. DHD_ARPOE(("%s: removed IP:%x from temp table %d\n",
  10221. __FUNCTION__, ipa, i));
  10222. }
  10223. if (ipv4_buf[i] != 0) {
  10224. /* add back host_ip entries from our local cache */
  10225. dhd_arp_offload_add_ip(dhd_pub, ipv4_buf[i], idx);
  10226. DHD_ARPOE(("%s: added IP:%x to dongle arp_hostip[%d]\n\n",
  10227. __FUNCTION__, ipv4_buf[i], i));
  10228. }
  10229. }
  10230. #ifdef AOE_DBG
  10231. /* see the resulting hostip table */
  10232. dhd_arp_get_arp_hostip_table(dhd_pub, ipv4_buf, sizeof(ipv4_buf), idx);
  10233. DHD_ARPOE(("%s: read back arp_hostip table:\n", __FUNCTION__));
  10234. dhd_print_buf(ipv4_buf, 32, 4); /* max 8 IPs 4b each */
  10235. #endif // endif
  10236. }
  10237. /*
  10238. * Notification mechanism from kernel to our driver. This function is called by the Linux kernel
  10239. * whenever there is an event related to an IP address.
  10240. * ptr : kernel provided pointer to IP address that has changed
  10241. */
  10242. static int dhd_inetaddr_notifier_call(struct notifier_block *this,
  10243. unsigned long event,
  10244. void *ptr)
  10245. {
  10246. struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
  10247. dhd_info_t *dhd;
  10248. dhd_pub_t *dhd_pub;
  10249. int idx;
  10250. if (!dhd_arp_enable)
  10251. return NOTIFY_DONE;
  10252. if (!ifa || !(ifa->ifa_dev->dev))
  10253. return NOTIFY_DONE;
  10254. /* Filter notifications meant for non Broadcom devices */
  10255. if ((ifa->ifa_dev->dev->netdev_ops != &dhd_ops_pri) &&
  10256. (ifa->ifa_dev->dev->netdev_ops != &dhd_ops_virt)) {
  10257. #if defined(WL_ENABLE_P2P_IF)
  10258. if (!wl_cfgp2p_is_ifops(ifa->ifa_dev->dev->netdev_ops))
  10259. #endif /* WL_ENABLE_P2P_IF */
  10260. return NOTIFY_DONE;
  10261. }
  10262. dhd = DHD_DEV_INFO(ifa->ifa_dev->dev);
  10263. if (!dhd)
  10264. return NOTIFY_DONE;
  10265. dhd_pub = &dhd->pub;
  10266. if (dhd_pub->arp_version == 1) {
  10267. idx = 0;
  10268. } else {
  10269. for (idx = 0; idx < DHD_MAX_IFS; idx++) {
  10270. if (dhd->iflist[idx] && dhd->iflist[idx]->net == ifa->ifa_dev->dev)
  10271. break;
  10272. }
  10273. if (idx < DHD_MAX_IFS)
  10274. DHD_TRACE(("ifidx : %p %s %d\n", dhd->iflist[idx]->net,
  10275. dhd->iflist[idx]->name, dhd->iflist[idx]->idx));
  10276. else {
  10277. DHD_ERROR(("Cannot find ifidx for(%s) set to 0\n", ifa->ifa_label));
  10278. idx = 0;
  10279. }
  10280. }
  10281. switch (event) {
  10282. case NETDEV_UP:
  10283. DHD_ARPOE(("%s: [%s] Up IP: 0x%x\n",
  10284. __FUNCTION__, ifa->ifa_label, ifa->ifa_address));
  10285. /*
  10286. * Skip if Bus is not in a state to transport the IOVAR
  10287. * (or) the Dongle is not ready.
  10288. */
  10289. if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(&dhd->pub) ||
  10290. dhd->pub.busstate == DHD_BUS_LOAD) {
  10291. DHD_ERROR(("%s: bus not ready, exit NETDEV_UP : %d\n",
  10292. __FUNCTION__, dhd->pub.busstate));
  10293. if (dhd->pend_ipaddr) {
  10294. DHD_ERROR(("%s: overwrite pending ipaddr: 0x%x\n",
  10295. __FUNCTION__, dhd->pend_ipaddr));
  10296. }
  10297. dhd->pend_ipaddr = ifa->ifa_address;
  10298. break;
  10299. }
  10300. #ifdef AOE_IP_ALIAS_SUPPORT
  10301. DHD_ARPOE(("%s:add aliased IP to AOE hostip cache\n",
  10302. __FUNCTION__));
  10303. aoe_update_host_ipv4_table(dhd_pub, ifa->ifa_address, TRUE, idx);
  10304. #endif /* AOE_IP_ALIAS_SUPPORT */
  10305. break;
  10306. case NETDEV_DOWN:
  10307. DHD_ARPOE(("%s: [%s] Down IP: 0x%x\n",
  10308. __FUNCTION__, ifa->ifa_label, ifa->ifa_address));
  10309. dhd->pend_ipaddr = 0;
  10310. #ifdef AOE_IP_ALIAS_SUPPORT
  10311. DHD_ARPOE(("%s:interface is down, AOE clr all for this if\n",
  10312. __FUNCTION__));
  10313. if ((dhd_pub->op_mode & DHD_FLAG_HOSTAP_MODE) ||
  10314. (ifa->ifa_dev->dev != dhd_linux_get_primary_netdev(dhd_pub))) {
  10315. aoe_update_host_ipv4_table(dhd_pub, ifa->ifa_address, FALSE, idx);
  10316. } else
  10317. #endif /* AOE_IP_ALIAS_SUPPORT */
  10318. {
  10319. dhd_aoe_hostip_clr(&dhd->pub, idx);
  10320. dhd_aoe_arp_clr(&dhd->pub, idx);
  10321. }
  10322. break;
  10323. default:
  10324. DHD_ARPOE(("%s: do noting for [%s] Event: %lu\n",
  10325. __func__, ifa->ifa_label, event));
  10326. break;
  10327. }
  10328. return NOTIFY_DONE;
  10329. }
  10330. #endif /* ARP_OFFLOAD_SUPPORT */
  10331. #if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT)
  10332. /* Neighbor Discovery Offload: defered handler */
  10333. static void
  10334. dhd_inet6_work_handler(void *dhd_info, void *event_data, u8 event)
  10335. {
  10336. struct ipv6_work_info_t *ndo_work = (struct ipv6_work_info_t *)event_data;
  10337. dhd_info_t *dhd = (dhd_info_t *)dhd_info;
  10338. dhd_pub_t *dhdp;
  10339. int ret;
  10340. if (!dhd) {
  10341. DHD_ERROR(("%s: invalid dhd_info\n", __FUNCTION__));
  10342. goto done;
  10343. }
  10344. dhdp = &dhd->pub;
  10345. if (event != DHD_WQ_WORK_IPV6_NDO) {
  10346. DHD_ERROR(("%s: unexpected event\n", __FUNCTION__));
  10347. goto done;
  10348. }
  10349. if (!ndo_work) {
  10350. DHD_ERROR(("%s: ipv6 work info is not initialized\n", __FUNCTION__));
  10351. return;
  10352. }
  10353. switch (ndo_work->event) {
  10354. case NETDEV_UP:
  10355. #ifndef NDO_CONFIG_SUPPORT
  10356. DHD_TRACE(("%s: Enable NDO \n ", __FUNCTION__));
  10357. ret = dhd_ndo_enable(dhdp, TRUE);
  10358. if (ret < 0) {
  10359. DHD_ERROR(("%s: Enabling NDO Failed %d\n", __FUNCTION__, ret));
  10360. }
  10361. #endif /* !NDO_CONFIG_SUPPORT */
  10362. DHD_TRACE(("%s: Add a host ip for NDO\n", __FUNCTION__));
  10363. if (dhdp->ndo_version > 0) {
  10364. /* inet6 addr notifier called only for unicast address */
  10365. ret = dhd_ndo_add_ip_with_type(dhdp, &ndo_work->ipv6_addr[0],
  10366. WL_ND_IPV6_ADDR_TYPE_UNICAST, ndo_work->if_idx);
  10367. } else {
  10368. ret = dhd_ndo_add_ip(dhdp, &ndo_work->ipv6_addr[0],
  10369. ndo_work->if_idx);
  10370. }
  10371. if (ret < 0) {
  10372. DHD_ERROR(("%s: Adding a host ip for NDO failed %d\n",
  10373. __FUNCTION__, ret));
  10374. }
  10375. break;
  10376. case NETDEV_DOWN:
  10377. if (dhdp->ndo_version > 0) {
  10378. DHD_TRACE(("%s: Remove a host ip for NDO\n", __FUNCTION__));
  10379. ret = dhd_ndo_remove_ip_by_addr(dhdp,
  10380. &ndo_work->ipv6_addr[0], ndo_work->if_idx);
  10381. } else {
  10382. DHD_TRACE(("%s: Clear host ip table for NDO \n", __FUNCTION__));
  10383. ret = dhd_ndo_remove_ip(dhdp, ndo_work->if_idx);
  10384. }
  10385. if (ret < 0) {
  10386. DHD_ERROR(("%s: Removing host ip for NDO failed %d\n",
  10387. __FUNCTION__, ret));
  10388. goto done;
  10389. }
  10390. #ifdef NDO_CONFIG_SUPPORT
  10391. if (dhdp->ndo_host_ip_overflow) {
  10392. ret = dhd_dev_ndo_update_inet6addr(
  10393. dhd_idx2net(dhdp, ndo_work->if_idx));
  10394. if ((ret < 0) && (ret != BCME_NORESOURCE)) {
  10395. DHD_ERROR(("%s: Updating host ip for NDO failed %d\n",
  10396. __FUNCTION__, ret));
  10397. goto done;
  10398. }
  10399. }
  10400. #else /* !NDO_CONFIG_SUPPORT */
  10401. DHD_TRACE(("%s: Disable NDO\n ", __FUNCTION__));
  10402. ret = dhd_ndo_enable(dhdp, FALSE);
  10403. if (ret < 0) {
  10404. DHD_ERROR(("%s: disabling NDO Failed %d\n", __FUNCTION__, ret));
  10405. goto done;
  10406. }
  10407. #endif /* NDO_CONFIG_SUPPORT */
  10408. break;
  10409. default:
  10410. DHD_ERROR(("%s: unknown notifier event \n", __FUNCTION__));
  10411. break;
  10412. }
  10413. done:
  10414. /* free ndo_work. alloced while scheduling the work */
  10415. if (ndo_work) {
  10416. kfree(ndo_work);
  10417. }
  10418. return;
  10419. } /* dhd_init_logstrs_array */
  10420. /*
  10421. * Neighbor Discovery Offload: Called when an interface
  10422. * is assigned with ipv6 address.
  10423. * Handles only primary interface
  10424. */
  10425. int dhd_inet6addr_notifier_call(struct notifier_block *this, unsigned long event, void *ptr)
  10426. {
  10427. dhd_info_t *dhd;
  10428. dhd_pub_t *dhdp;
  10429. struct inet6_ifaddr *inet6_ifa = ptr;
  10430. struct ipv6_work_info_t *ndo_info;
  10431. int idx;
  10432. /* Filter notifications meant for non Broadcom devices */
  10433. if (inet6_ifa->idev->dev->netdev_ops != &dhd_ops_pri) {
  10434. return NOTIFY_DONE;
  10435. }
  10436. dhd = DHD_DEV_INFO(inet6_ifa->idev->dev);
  10437. if (!dhd) {
  10438. return NOTIFY_DONE;
  10439. }
  10440. dhdp = &dhd->pub;
  10441. /* Supports only primary interface */
  10442. idx = dhd_net2idx(dhd, inet6_ifa->idev->dev);
  10443. if (idx != 0) {
  10444. return NOTIFY_DONE;
  10445. }
  10446. /* FW capability */
  10447. if (!FW_SUPPORTED(dhdp, ndoe)) {
  10448. return NOTIFY_DONE;
  10449. }
  10450. ndo_info = (struct ipv6_work_info_t *)kzalloc(sizeof(struct ipv6_work_info_t), GFP_ATOMIC);
  10451. if (!ndo_info) {
  10452. DHD_ERROR(("%s: ipv6 work alloc failed\n", __FUNCTION__));
  10453. return NOTIFY_DONE;
  10454. }
  10455. /* fill up ndo_info */
  10456. ndo_info->event = event;
  10457. ndo_info->if_idx = idx;
  10458. memcpy(ndo_info->ipv6_addr, &inet6_ifa->addr, IPV6_ADDR_LEN);
  10459. /* defer the work to thread as it may block kernel */
  10460. dhd_deferred_schedule_work(dhd->dhd_deferred_wq, (void *)ndo_info, DHD_WQ_WORK_IPV6_NDO,
  10461. dhd_inet6_work_handler, DHD_WQ_WORK_PRIORITY_LOW);
  10462. return NOTIFY_DONE;
  10463. }
  10464. #endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */
  10465. /* Network attach to be invoked from the bus probe handlers */
  10466. int
  10467. dhd_attach_net(dhd_pub_t *dhdp, bool need_rtnl_lock)
  10468. {
  10469. struct net_device *primary_ndev;
  10470. BCM_REFERENCE(primary_ndev);
  10471. /* Register primary net device */
  10472. if (dhd_register_if(dhdp, 0, need_rtnl_lock) != 0) {
  10473. return BCME_ERROR;
  10474. }
  10475. #if defined(WL_CFG80211)
  10476. primary_ndev = dhd_linux_get_primary_netdev(dhdp);
  10477. if (wl_cfg80211_net_attach(primary_ndev) < 0) {
  10478. /* fail the init */
  10479. dhd_remove_if(dhdp, 0, TRUE);
  10480. return BCME_ERROR;
  10481. }
  10482. #endif /* WL_CFG80211 */
  10483. return BCME_OK;
  10484. }
  10485. int
  10486. dhd_register_if(dhd_pub_t *dhdp, int ifidx, bool need_rtnl_lock)
  10487. {
  10488. dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
  10489. dhd_if_t *ifp;
  10490. struct net_device *net = NULL;
  10491. int err = 0;
  10492. uint8 temp_addr[ETHER_ADDR_LEN] = { 0x00, 0x90, 0x4c, 0x11, 0x22, 0x33 };
  10493. DHD_TRACE(("%s: ifidx %d\n", __FUNCTION__, ifidx));
  10494. if (dhd == NULL || dhd->iflist[ifidx] == NULL) {
  10495. DHD_ERROR(("%s: Invalid Interface\n", __FUNCTION__));
  10496. return BCME_ERROR;
  10497. }
  10498. ASSERT(dhd && dhd->iflist[ifidx]);
  10499. ifp = dhd->iflist[ifidx];
  10500. net = ifp->net;
  10501. ASSERT(net && (ifp->idx == ifidx));
  10502. ASSERT(!net->netdev_ops);
  10503. net->netdev_ops = &dhd_ops_virt;
  10504. /* Ok, link into the network layer... */
  10505. if (ifidx == 0) {
  10506. /*
  10507. * device functions for the primary interface only
  10508. */
  10509. net->netdev_ops = &dhd_ops_pri;
  10510. if (!ETHER_ISNULLADDR(dhd->pub.mac.octet))
  10511. memcpy(temp_addr, dhd->pub.mac.octet, ETHER_ADDR_LEN);
  10512. memcpy(dhd->iflist[0]->mac_addr, dhd->pub.mac.octet, ETHER_ADDR_LEN);
  10513. } else {
  10514. /*
  10515. * We have to use the primary MAC for virtual interfaces
  10516. */
  10517. memcpy(temp_addr, ifp->mac_addr, ETHER_ADDR_LEN);
  10518. #if defined(OEM_ANDROID)
  10519. /*
  10520. * Android sets the locally administered bit to indicate that this is a
  10521. * portable hotspot. This will not work in simultaneous AP/STA mode,
  10522. * nor with P2P. Need to set the Donlge's MAC address, and then use that.
  10523. */
  10524. if (!memcmp(temp_addr, dhd->iflist[0]->mac_addr,
  10525. ETHER_ADDR_LEN)) {
  10526. DHD_ERROR(("%s interface [%s]: set locally administered bit in MAC\n",
  10527. __func__, net->name));
  10528. temp_addr[0] |= 0x02;
  10529. memcpy(dhd->iflist[ifidx]->mac_addr, temp_addr, ETHER_ADDR_LEN);
  10530. }
  10531. #endif /* defined(OEM_ANDROID) */
  10532. }
  10533. net->hard_header_len = ETH_HLEN + dhd->pub.hdrlen;
  10534. net->ethtool_ops = &dhd_ethtool_ops;
  10535. #if defined(WL_WIRELESS_EXT)
  10536. #if WIRELESS_EXT < 19
  10537. net->get_wireless_stats = dhd_get_wireless_stats;
  10538. #endif /* WIRELESS_EXT < 19 */
  10539. #if WIRELESS_EXT > 12
  10540. net->wireless_handlers = &wl_iw_handler_def;
  10541. #endif /* WIRELESS_EXT > 12 */
  10542. #endif /* defined(WL_WIRELESS_EXT) */
  10543. dhd->pub.rxsz = DBUS_RX_BUFFER_SIZE_DHD(net);
  10544. memcpy(net->dev_addr, temp_addr, ETHER_ADDR_LEN);
  10545. if (ifidx == 0)
  10546. printf("%s\n", dhd_version);
  10547. if (need_rtnl_lock)
  10548. err = register_netdev(net);
  10549. else
  10550. err = register_netdevice(net);
  10551. if (err != 0) {
  10552. DHD_ERROR(("couldn't register the net device [%s], err %d\n", net->name, err));
  10553. goto fail;
  10554. }
  10555. printf("Register interface [%s] MAC: "MACDBG"\n\n", net->name,
  10556. #if defined(CUSTOMER_HW4_DEBUG)
  10557. MAC2STRDBG(dhd->pub.mac.octet));
  10558. #else
  10559. MAC2STRDBG(net->dev_addr));
  10560. #endif /* CUSTOMER_HW4_DEBUG */
  10561. #if defined(OEM_ANDROID) && defined(SOFTAP) && defined(WL_WIRELESS_EXT) && \
  10562. !defined(WL_CFG80211)
  10563. wl_iw_iscan_set_scan_broadcast_prep(net, 1);
  10564. #endif // endif
  10565. #if defined(OEM_ANDROID) && (defined(BCMPCIE) || defined(BCMLXSDMMC))
  10566. if (ifidx == 0) {
  10567. #ifdef BCMLXSDMMC
  10568. up(&dhd_registration_sem);
  10569. #endif /* BCMLXSDMMC */
  10570. #ifndef ENABLE_INSMOD_NO_FW_LOAD
  10571. if (!dhd_download_fw_on_driverload) {
  10572. #ifdef WL_CFG80211
  10573. wl_terminate_event_handler(net);
  10574. #endif /* WL_CFG80211 */
  10575. #if defined(DHD_LB_RXP)
  10576. __skb_queue_purge(&dhd->rx_pend_queue);
  10577. #endif /* DHD_LB_RXP */
  10578. #if defined(DHD_LB_TXP)
  10579. skb_queue_purge(&dhd->tx_pend_queue);
  10580. #endif /* DHD_LB_TXP */
  10581. #ifdef SHOW_LOGTRACE
  10582. /* Release the skbs from queue for WLC_E_TRACE event */
  10583. dhd_event_logtrace_flush_queue(dhdp);
  10584. #endif /* SHOW_LOGTRACE */
  10585. #if defined(BCMPCIE) && defined(DHDTCPACK_SUPPRESS)
  10586. dhd_tcpack_suppress_set(dhdp, TCPACK_SUP_OFF);
  10587. #endif /* BCMPCIE && DHDTCPACK_SUPPRESS */
  10588. dhd_net_bus_devreset(net, TRUE);
  10589. #ifdef BCMLXSDMMC
  10590. dhd_net_bus_suspend(net);
  10591. #endif /* BCMLXSDMMC */
  10592. wifi_platform_set_power(dhdp->info->adapter, FALSE, WIFI_TURNOFF_DELAY);
  10593. #endif /* ENABLE_INSMOD_NO_FW_LOAD */
  10594. #if defined(BT_OVER_SDIO)
  10595. dhd->bus_user_count--;
  10596. #endif /* BT_OVER_SDIO */
  10597. }
  10598. }
  10599. #endif /* OEM_ANDROID && (BCMPCIE || BCMLXSDMMC) */
  10600. return 0;
  10601. fail:
  10602. net->netdev_ops = NULL;
  10603. return err;
  10604. }
  10605. #ifdef WL_VIF_SUPPORT
  10606. #define MAX_VIF_NUM 8
  10607. int
  10608. dhd_register_vif(dhd_pub_t *dhdp)
  10609. {
  10610. dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
  10611. dhd_if_t *ifp;
  10612. struct net_device *net;
  10613. int err = BCME_OK, i;
  10614. char viface_name[IFNAMSIZ] = {'\0'};
  10615. ifp = dhd->iflist[0];
  10616. net = ifp->net;
  10617. if (vif_num && vif_num > MAX_VIF_NUM)
  10618. vif_num = MAX_VIF_NUM;
  10619. /* Set virtual interface name if it was provided as module parameter */
  10620. if (vif_name[0]) {
  10621. int len;
  10622. char ch;
  10623. strncpy(viface_name, vif_name, IFNAMSIZ);
  10624. viface_name[IFNAMSIZ - 1] = 0;
  10625. len = strlen(viface_name);
  10626. ch = viface_name[len - 1];
  10627. if ((ch > '9' || ch < '0') && (len < IFNAMSIZ - 2))
  10628. strcat(viface_name, "%d");
  10629. } else {
  10630. DHD_ERROR(("%s check vif_name\n", __FUNCTION__));
  10631. return BCME_BADOPTION;
  10632. }
  10633. DHD_INFO(("%s Virtual interface [%s]:\n", __FUNCTION__, viface_name));
  10634. rtnl_lock();
  10635. for (i = 0; i < vif_num; i++) {
  10636. if (wl_cfg80211_add_if(wl_get_cfg(net), net, WL_IF_TYPE_STA, viface_name, NULL)
  10637. == NULL) {
  10638. DHD_ERROR(("%s error Virtual interface [%s], i:%d\n", __FUNCTION__,
  10639. viface_name, i));
  10640. break;
  10641. }
  10642. }
  10643. rtnl_unlock();
  10644. return err;
  10645. }
  10646. #endif /* WL_VIF_SUPPORT */
  10647. void
  10648. dhd_bus_detach(dhd_pub_t *dhdp)
  10649. {
  10650. dhd_info_t *dhd;
  10651. DHD_TRACE(("%s: Enter\n", __FUNCTION__));
  10652. if (dhdp) {
  10653. dhd = (dhd_info_t *)dhdp->info;
  10654. if (dhd) {
  10655. /*
  10656. * In case of Android cfg80211 driver, the bus is down in dhd_stop,
  10657. * calling stop again will cuase SD read/write errors.
  10658. */
  10659. if (dhd->pub.busstate != DHD_BUS_DOWN) {
  10660. /* Stop the protocol module */
  10661. dhd_prot_stop(&dhd->pub);
  10662. /* Stop the bus module */
  10663. dhd_bus_stop(dhd->pub.bus, TRUE);
  10664. }
  10665. #if defined(OOB_INTR_ONLY) || defined(BCMSPI_ANDROID) || defined(BCMPCIE_OOB_HOST_WAKE)
  10666. dhd_bus_oob_intr_unregister(dhdp);
  10667. #endif /* OOB_INTR_ONLY || BCMSPI_ANDROID || BCMPCIE_OOB_HOST_WAKE */
  10668. }
  10669. }
  10670. }
  10671. void dhd_detach(dhd_pub_t *dhdp)
  10672. {
  10673. dhd_info_t *dhd;
  10674. unsigned long flags;
  10675. int timer_valid = FALSE;
  10676. struct net_device *dev;
  10677. #ifdef WL_CFG80211
  10678. struct bcm_cfg80211 *cfg = NULL;
  10679. #endif // endif
  10680. if (!dhdp)
  10681. return;
  10682. dhd = (dhd_info_t *)dhdp->info;
  10683. if (!dhd)
  10684. return;
  10685. dev = dhd->iflist[0]->net;
  10686. if (dev) {
  10687. rtnl_lock();
  10688. if (dev->flags & IFF_UP) {
  10689. /* If IFF_UP is still up, it indicates that
  10690. * "ifconfig wlan0 down" hasn't been called.
  10691. * So invoke dev_close explicitly here to
  10692. * bring down the interface.
  10693. */
  10694. DHD_TRACE(("IFF_UP flag is up. Enforcing dev_close from detach \n"));
  10695. dev_close(dev);
  10696. }
  10697. rtnl_unlock();
  10698. }
  10699. DHD_TRACE(("%s: Enter state 0x%x\n", __FUNCTION__, dhd->dhd_state));
  10700. DHD_ERROR(("%s: making dhdpub up FALSE\n", __FUNCTION__));
  10701. dhd->pub.up = 0;
  10702. if (!(dhd->dhd_state & DHD_ATTACH_STATE_DONE)) {
  10703. /* Give sufficient time for threads to start running in case
  10704. * dhd_attach() has failed
  10705. */
  10706. OSL_SLEEP(100);
  10707. }
  10708. #ifdef DHD_WET
  10709. dhd_free_wet_info(&dhd->pub, dhd->pub.wet_info);
  10710. #endif /* DHD_WET */
  10711. #if defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW)
  10712. #endif /* defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW) */
  10713. #ifdef PROP_TXSTATUS
  10714. #ifdef DHD_WLFC_THREAD
  10715. if (dhd->pub.wlfc_thread) {
  10716. kthread_stop(dhd->pub.wlfc_thread);
  10717. dhdp->wlfc_thread_go = TRUE;
  10718. wake_up_interruptible(&dhdp->wlfc_wqhead);
  10719. }
  10720. dhd->pub.wlfc_thread = NULL;
  10721. #endif /* DHD_WLFC_THREAD */
  10722. #endif /* PROP_TXSTATUS */
  10723. #ifdef WL_CFG80211
  10724. if (dev)
  10725. wl_cfg80211_down(dev);
  10726. #endif /* WL_CFG80211 */
  10727. if (dhd->dhd_state & DHD_ATTACH_STATE_PROT_ATTACH) {
  10728. #if defined(OEM_ANDROID) || !defined(BCMSDIO)
  10729. dhd_bus_detach(dhdp);
  10730. #endif /* OEM_ANDROID || !BCMSDIO */
  10731. #ifdef OEM_ANDROID
  10732. #ifdef BCMPCIE
  10733. if (is_reboot == SYS_RESTART) {
  10734. extern bcmdhd_wifi_platdata_t *dhd_wifi_platdata;
  10735. if (dhd_wifi_platdata && !dhdp->dongle_reset) {
  10736. dhdpcie_bus_clock_stop(dhdp->bus);
  10737. wifi_platform_set_power(dhd_wifi_platdata->adapters,
  10738. FALSE, WIFI_TURNOFF_DELAY);
  10739. }
  10740. }
  10741. #endif /* BCMPCIE */
  10742. #endif /* OEM_ANDROID */
  10743. #ifndef PCIE_FULL_DONGLE
  10744. #if defined(OEM_ANDROID) || !defined(BCMSDIO)
  10745. if (dhdp->prot)
  10746. dhd_prot_detach(dhdp);
  10747. #endif /* OEM_ANDROID || !BCMSDIO */
  10748. #endif /* !PCIE_FULL_DONGLE */
  10749. }
  10750. #ifdef ARP_OFFLOAD_SUPPORT
  10751. if (dhd_inetaddr_notifier_registered) {
  10752. dhd_inetaddr_notifier_registered = FALSE;
  10753. unregister_inetaddr_notifier(&dhd_inetaddr_notifier);
  10754. }
  10755. #endif /* ARP_OFFLOAD_SUPPORT */
  10756. #if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT)
  10757. if (dhd_inet6addr_notifier_registered) {
  10758. dhd_inet6addr_notifier_registered = FALSE;
  10759. unregister_inet6addr_notifier(&dhd_inet6addr_notifier);
  10760. }
  10761. #endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */
  10762. #if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
  10763. if (dhd->dhd_state & DHD_ATTACH_STATE_EARLYSUSPEND_DONE) {
  10764. if (dhd->early_suspend.suspend)
  10765. unregister_early_suspend(&dhd->early_suspend);
  10766. }
  10767. #endif /* CONFIG_HAS_EARLYSUSPEND && DHD_USE_EARLYSUSPEND */
  10768. #if defined(WL_WIRELESS_EXT)
  10769. if (dhd->dhd_state & DHD_ATTACH_STATE_WL_ATTACH) {
  10770. /* Detatch and unlink in the iw */
  10771. wl_iw_detach();
  10772. }
  10773. #endif /* defined(WL_WIRELESS_EXT) */
  10774. #ifdef DHD_ULP
  10775. dhd_ulp_deinit(dhd->pub.osh, dhdp);
  10776. #endif /* DHD_ULP */
  10777. /* delete all interfaces, start with virtual */
  10778. if (dhd->dhd_state & DHD_ATTACH_STATE_ADD_IF) {
  10779. int i = 1;
  10780. dhd_if_t *ifp;
  10781. /* Cleanup virtual interfaces */
  10782. dhd_net_if_lock_local(dhd);
  10783. for (i = 1; i < DHD_MAX_IFS; i++) {
  10784. if (dhd->iflist[i]) {
  10785. dhd_remove_if(&dhd->pub, i, TRUE);
  10786. }
  10787. }
  10788. dhd_net_if_unlock_local(dhd);
  10789. /* delete primary interface 0 */
  10790. ifp = dhd->iflist[0];
  10791. if (ifp && ifp->net) {
  10792. #ifdef WL_CFG80211
  10793. cfg = wl_get_cfg(ifp->net);
  10794. #endif // endif
  10795. /* in unregister_netdev case, the interface gets freed by net->destructor
  10796. * (which is set to free_netdev)
  10797. */
  10798. if (ifp->net->reg_state == NETREG_UNINITIALIZED) {
  10799. free_netdev(ifp->net);
  10800. } else {
  10801. #if defined(ARGOS_NOTIFY_CB)
  10802. argos_register_notifier_deinit();
  10803. #endif // endif
  10804. #ifdef SET_RPS_CPUS
  10805. custom_rps_map_clear(ifp->net->_rx);
  10806. #endif /* SET_RPS_CPUS */
  10807. netif_tx_disable(ifp->net);
  10808. unregister_netdev(ifp->net);
  10809. }
  10810. #ifdef PCIE_FULL_DONGLE
  10811. ifp->net = DHD_NET_DEV_NULL;
  10812. #else
  10813. ifp->net = NULL;
  10814. #endif /* PCIE_FULL_DONGLE */
  10815. #if defined(BCMSDIO) && !defined(OEM_ANDROID)
  10816. dhd_bus_detach(dhdp);
  10817. if (dhdp->prot)
  10818. dhd_prot_detach(dhdp);
  10819. #endif /* BCMSDIO && !OEM_ANDROID */
  10820. #ifdef DHD_L2_FILTER
  10821. bcm_l2_filter_arp_table_update(dhdp->osh, ifp->phnd_arp_table, TRUE,
  10822. NULL, FALSE, dhdp->tickcnt);
  10823. deinit_l2_filter_arp_table(dhdp->osh, ifp->phnd_arp_table);
  10824. ifp->phnd_arp_table = NULL;
  10825. #endif /* DHD_L2_FILTER */
  10826. dhd_if_del_sta_list(ifp);
  10827. MFREE(dhd->pub.osh, ifp, sizeof(*ifp));
  10828. dhd->iflist[0] = NULL;
  10829. }
  10830. }
  10831. /* Clear the watchdog timer */
  10832. DHD_GENERAL_LOCK(&dhd->pub, flags);
  10833. timer_valid = dhd->wd_timer_valid;
  10834. dhd->wd_timer_valid = FALSE;
  10835. DHD_GENERAL_UNLOCK(&dhd->pub, flags);
  10836. if (timer_valid)
  10837. del_timer_sync(&dhd->timer);
  10838. DHD_DISABLE_RUNTIME_PM(&dhd->pub);
  10839. if (dhd->dhd_state & DHD_ATTACH_STATE_THREADS_CREATED) {
  10840. #ifdef DHD_PCIE_RUNTIMEPM
  10841. if (dhd->thr_rpm_ctl.thr_pid >= 0) {
  10842. PROC_STOP(&dhd->thr_rpm_ctl);
  10843. }
  10844. #endif /* DHD_PCIE_RUNTIMEPM */
  10845. if (dhd->thr_wdt_ctl.thr_pid >= 0) {
  10846. PROC_STOP(&dhd->thr_wdt_ctl);
  10847. }
  10848. if (dhd->rxthread_enabled && dhd->thr_rxf_ctl.thr_pid >= 0) {
  10849. PROC_STOP(&dhd->thr_rxf_ctl);
  10850. }
  10851. if (dhd->thr_dpc_ctl.thr_pid >= 0) {
  10852. PROC_STOP(&dhd->thr_dpc_ctl);
  10853. } else
  10854. {
  10855. tasklet_kill(&dhd->tasklet);
  10856. }
  10857. }
  10858. #ifdef WL_NATOE
  10859. if (dhd->pub.nfct) {
  10860. dhd_ct_close(dhd->pub.nfct);
  10861. }
  10862. #endif /* WL_NATOE */
  10863. #ifdef DHD_LB
  10864. if (dhd->dhd_state & DHD_ATTACH_STATE_LB_ATTACH_DONE) {
  10865. /* Clear the flag first to avoid calling the cpu notifier */
  10866. dhd->dhd_state &= ~DHD_ATTACH_STATE_LB_ATTACH_DONE;
  10867. /* Kill the Load Balancing Tasklets */
  10868. #ifdef DHD_LB_RXP
  10869. cancel_work_sync(&dhd->rx_napi_dispatcher_work);
  10870. __skb_queue_purge(&dhd->rx_pend_queue);
  10871. #endif /* DHD_LB_RXP */
  10872. #ifdef DHD_LB_TXP
  10873. cancel_work_sync(&dhd->tx_dispatcher_work);
  10874. tasklet_kill(&dhd->tx_tasklet);
  10875. __skb_queue_purge(&dhd->tx_pend_queue);
  10876. #endif /* DHD_LB_TXP */
  10877. #ifdef DHD_LB_TXC
  10878. cancel_work_sync(&dhd->tx_compl_dispatcher_work);
  10879. tasklet_kill(&dhd->tx_compl_tasklet);
  10880. #endif /* DHD_LB_TXC */
  10881. #ifdef DHD_LB_RXC
  10882. tasklet_kill(&dhd->rx_compl_tasklet);
  10883. #endif /* DHD_LB_RXC */
  10884. /* Unregister from CPU Hotplug framework */
  10885. dhd_unregister_cpuhp_callback(dhd);
  10886. dhd_cpumasks_deinit(dhd);
  10887. DHD_LB_STATS_DEINIT(&dhd->pub);
  10888. }
  10889. #endif /* DHD_LB */
  10890. #if defined(DNGL_AXI_ERROR_LOGGING) && defined(DHD_USE_WQ_FOR_DNGL_AXI_ERROR)
  10891. cancel_work_sync(&dhd->axi_error_dispatcher_work);
  10892. #endif /* DNGL_AXI_ERROR_LOGGING && DHD_USE_WQ_FOR_DNGL_AXI_ERROR */
  10893. DHD_SSSR_MEMPOOL_DEINIT(&dhd->pub);
  10894. #ifdef WL_CFG80211
  10895. if (dhd->dhd_state & DHD_ATTACH_STATE_CFG80211) {
  10896. if (!cfg) {
  10897. DHD_ERROR(("cfg NULL!\n"));
  10898. ASSERT(0);
  10899. } else {
  10900. wl_cfg80211_detach(cfg);
  10901. dhd_monitor_uninit();
  10902. }
  10903. }
  10904. #endif // endif
  10905. #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
  10906. destroy_workqueue(dhd->tx_wq);
  10907. dhd->tx_wq = NULL;
  10908. destroy_workqueue(dhd->rx_wq);
  10909. dhd->rx_wq = NULL;
  10910. #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
  10911. #ifdef DEBUGABILITY
  10912. if (dhdp->dbg) {
  10913. #ifdef DBG_PKT_MON
  10914. dhd_os_dbg_detach_pkt_monitor(dhdp);
  10915. dhd_os_spin_lock_deinit(dhd->pub.osh, dhd->pub.dbg->pkt_mon_lock);
  10916. #endif /* DBG_PKT_MON */
  10917. }
  10918. #endif /* DEBUGABILITY */
  10919. if (dhdp->dbg) {
  10920. dhd_os_dbg_detach(dhdp);
  10921. }
  10922. #ifdef DHD_PKT_LOGGING
  10923. dhd_os_detach_pktlog(dhdp);
  10924. #endif /* DHD_PKT_LOGGING */
  10925. #ifdef DHD_STATUS_LOGGING
  10926. dhd_detach_statlog(dhdp);
  10927. #endif /* DHD_STATUS_LOGGING */
  10928. #ifdef DHD_PKTDUMP_ROAM
  10929. dhd_dump_pkt_deinit(dhdp);
  10930. #endif /* DHD_PKTDUMP_ROAM */
  10931. #ifdef WL_CFGVENDOR_SEND_HANG_EVENT
  10932. if (dhd->pub.hang_info) {
  10933. MFREE(dhd->pub.osh, dhd->pub.hang_info, VENDOR_SEND_HANG_EXT_INFO_LEN);
  10934. }
  10935. #endif /* WL_CFGVENDOR_SEND_HANG_EVENT */
  10936. #ifdef SHOW_LOGTRACE
  10937. /* Release the skbs from queue for WLC_E_TRACE event */
  10938. dhd_event_logtrace_flush_queue(dhdp);
  10939. /* Wait till event logtrace context finishes */
  10940. dhd_cancel_logtrace_process_sync(dhd);
  10941. /* Remove ring proc entries */
  10942. dhd_dbg_ring_proc_destroy(&dhd->pub);
  10943. if (dhd->dhd_state & DHD_ATTACH_LOGTRACE_INIT) {
  10944. if (dhd->event_data.fmts) {
  10945. MFREE(dhd->pub.osh, dhd->event_data.fmts,
  10946. dhd->event_data.fmts_size);
  10947. dhd->event_data.fmts = NULL;
  10948. }
  10949. if (dhd->event_data.raw_fmts) {
  10950. MFREE(dhd->pub.osh, dhd->event_data.raw_fmts,
  10951. dhd->event_data.raw_fmts_size);
  10952. dhd->event_data.raw_fmts = NULL;
  10953. }
  10954. if (dhd->event_data.raw_sstr) {
  10955. MFREE(dhd->pub.osh, dhd->event_data.raw_sstr,
  10956. dhd->event_data.raw_sstr_size);
  10957. dhd->event_data.raw_sstr = NULL;
  10958. }
  10959. if (dhd->event_data.rom_raw_sstr) {
  10960. MFREE(dhd->pub.osh, dhd->event_data.rom_raw_sstr,
  10961. dhd->event_data.rom_raw_sstr_size);
  10962. dhd->event_data.rom_raw_sstr = NULL;
  10963. }
  10964. dhd->dhd_state &= ~DHD_ATTACH_LOGTRACE_INIT;
  10965. }
  10966. #endif /* SHOW_LOGTRACE */
  10967. #ifdef PNO_SUPPORT
  10968. if (dhdp->pno_state)
  10969. dhd_pno_deinit(dhdp);
  10970. #endif // endif
  10971. #ifdef RTT_SUPPORT
  10972. if (dhdp->rtt_state) {
  10973. dhd_rtt_deinit(dhdp);
  10974. }
  10975. #endif // endif
  10976. #if defined(CONFIG_PM_SLEEP)
  10977. if (dhd_pm_notifier_registered) {
  10978. unregister_pm_notifier(&dhd->pm_notifier);
  10979. dhd_pm_notifier_registered = FALSE;
  10980. }
  10981. #endif /* CONFIG_PM_SLEEP */
  10982. #ifdef DEBUG_CPU_FREQ
  10983. if (dhd->new_freq)
  10984. free_percpu(dhd->new_freq);
  10985. dhd->new_freq = NULL;
  10986. cpufreq_unregister_notifier(&dhd->freq_trans, CPUFREQ_TRANSITION_NOTIFIER);
  10987. #endif // endif
  10988. #if defined(CONFIG_PM_WAKELOCKS) || defined(CONFIG_HAS_WAKELOCK)
  10989. dhd->wakelock_wd_counter = 0;
  10990. wake_lock_destroy(&dhd->wl_wdwake);
  10991. #endif /* CONFIG_PM_WAKELOCKS || CONFIG_HAS_WAKELOCK */
  10992. if (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT) {
  10993. DHD_TRACE(("wd wakelock count:%d\n", dhd->wakelock_wd_counter));
  10994. DHD_OS_WAKE_LOCK_DESTROY(dhd);
  10995. }
  10996. #ifdef DHDTCPACK_SUPPRESS
  10997. /* This will free all MEM allocated for TCPACK SUPPRESS */
  10998. dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_OFF);
  10999. #endif /* DHDTCPACK_SUPPRESS */
  11000. #ifdef PCIE_FULL_DONGLE
  11001. dhd_flow_rings_deinit(dhdp);
  11002. if (dhdp->prot)
  11003. dhd_prot_detach(dhdp);
  11004. #endif // endif
  11005. #if defined(WLTDLS) && defined(PCIE_FULL_DONGLE)
  11006. dhd_free_tdls_peer_list(dhdp);
  11007. #endif // endif
  11008. #ifdef DUMP_IOCTL_IOV_LIST
  11009. dhd_iov_li_delete(dhdp, &(dhdp->dump_iovlist_head));
  11010. #endif /* DUMP_IOCTL_IOV_LIST */
  11011. #ifdef DHD_DEBUG
  11012. /* memory waste feature list initilization */
  11013. dhd_mw_list_delete(dhdp, &(dhdp->mw_list_head));
  11014. #endif /* DHD_DEBUG */
  11015. #ifdef WL_MONITOR
  11016. dhd_del_monitor_if(dhd);
  11017. #endif /* WL_MONITOR */
  11018. #ifdef DHD_ERPOM
  11019. if (dhdp->enable_erpom) {
  11020. dhdp->pom_func_deregister(&dhdp->pom_wlan_handler);
  11021. }
  11022. #endif /* DHD_ERPOM */
  11023. cancel_work_sync(&dhd->dhd_hang_process_work);
  11024. /* Prefer adding de-init code above this comment unless necessary.
  11025. * The idea is to cancel work queue, sysfs and flags at the end.
  11026. */
  11027. dhd_deferred_work_deinit(dhd->dhd_deferred_wq);
  11028. dhd->dhd_deferred_wq = NULL;
  11029. /* log dump related buffers should be freed after wq is purged */
  11030. #ifdef DHD_LOG_DUMP
  11031. dhd_log_dump_deinit(&dhd->pub);
  11032. #endif /* DHD_LOG_DUMP */
  11033. #if defined(BCMPCIE)
  11034. if (dhdp->extended_trap_data)
  11035. {
  11036. MFREE(dhdp->osh, dhdp->extended_trap_data, BCMPCIE_EXT_TRAP_DATA_MAXLEN);
  11037. dhdp->extended_trap_data = NULL;
  11038. }
  11039. #ifdef DNGL_AXI_ERROR_LOGGING
  11040. if (dhdp->axi_err_dump)
  11041. {
  11042. MFREE(dhdp->osh, dhdp->axi_err_dump, sizeof(dhd_axi_error_dump_t));
  11043. dhdp->axi_err_dump = NULL;
  11044. }
  11045. #endif /* DNGL_AXI_ERROR_LOGGING */
  11046. #endif /* BCMPCIE */
  11047. #ifdef DHD_DUMP_MNGR
  11048. if (dhd->pub.dump_file_manage) {
  11049. MFREE(dhd->pub.osh, dhd->pub.dump_file_manage,
  11050. sizeof(dhd_dump_file_manage_t));
  11051. }
  11052. #endif /* DHD_DUMP_MNGR */
  11053. dhd_sysfs_exit(dhd);
  11054. dhd->pub.fw_download_status = FW_UNLOADED;
  11055. #if defined(BT_OVER_SDIO)
  11056. mutex_destroy(&dhd->bus_user_lock);
  11057. #endif /* BT_OVER_SDIO */
  11058. } /* dhd_detach */
  11059. void
  11060. dhd_free(dhd_pub_t *dhdp)
  11061. {
  11062. dhd_info_t *dhd;
  11063. DHD_TRACE(("%s: Enter\n", __FUNCTION__));
  11064. if (dhdp) {
  11065. int i;
  11066. for (i = 0; i < ARRAYSIZE(dhdp->reorder_bufs); i++) {
  11067. if (dhdp->reorder_bufs[i]) {
  11068. reorder_info_t *ptr;
  11069. uint32 buf_size = sizeof(struct reorder_info);
  11070. ptr = dhdp->reorder_bufs[i];
  11071. buf_size += ((ptr->max_idx + 1) * sizeof(void*));
  11072. DHD_REORDER(("free flow id buf %d, maxidx is %d, buf_size %d\n",
  11073. i, ptr->max_idx, buf_size));
  11074. MFREE(dhdp->osh, dhdp->reorder_bufs[i], buf_size);
  11075. dhdp->reorder_bufs[i] = NULL;
  11076. }
  11077. }
  11078. dhd_sta_pool_fini(dhdp, DHD_MAX_STA);
  11079. dhd = (dhd_info_t *)dhdp->info;
  11080. if (dhdp->soc_ram) {
  11081. #if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP)
  11082. DHD_OS_PREFREE(dhdp, dhdp->soc_ram, dhdp->soc_ram_length);
  11083. #else
  11084. MFREE(dhdp->osh, dhdp->soc_ram, dhdp->soc_ram_length);
  11085. #endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_MEMDUMP */
  11086. dhdp->soc_ram = NULL;
  11087. }
  11088. if (dhd != NULL) {
  11089. /* If pointer is allocated by dhd_os_prealloc then avoid MFREE */
  11090. if (dhd != (dhd_info_t *)dhd_os_prealloc(dhdp,
  11091. DHD_PREALLOC_DHD_INFO, 0, FALSE))
  11092. MFREE(dhd->pub.osh, dhd, sizeof(*dhd));
  11093. dhd = NULL;
  11094. }
  11095. }
  11096. }
  11097. void
  11098. dhd_clear(dhd_pub_t *dhdp)
  11099. {
  11100. DHD_TRACE(("%s: Enter\n", __FUNCTION__));
  11101. if (dhdp) {
  11102. int i;
  11103. #ifdef DHDTCPACK_SUPPRESS
  11104. /* Clean up timer/data structure for any remaining/pending packet or timer. */
  11105. dhd_tcpack_info_tbl_clean(dhdp);
  11106. #endif /* DHDTCPACK_SUPPRESS */
  11107. for (i = 0; i < ARRAYSIZE(dhdp->reorder_bufs); i++) {
  11108. if (dhdp->reorder_bufs[i]) {
  11109. reorder_info_t *ptr;
  11110. uint32 buf_size = sizeof(struct reorder_info);
  11111. ptr = dhdp->reorder_bufs[i];
  11112. buf_size += ((ptr->max_idx + 1) * sizeof(void*));
  11113. DHD_REORDER(("free flow id buf %d, maxidx is %d, buf_size %d\n",
  11114. i, ptr->max_idx, buf_size));
  11115. MFREE(dhdp->osh, dhdp->reorder_bufs[i], buf_size);
  11116. dhdp->reorder_bufs[i] = NULL;
  11117. }
  11118. }
  11119. dhd_sta_pool_clear(dhdp, DHD_MAX_STA);
  11120. if (dhdp->soc_ram) {
  11121. #if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP)
  11122. DHD_OS_PREFREE(dhdp, dhdp->soc_ram, dhdp->soc_ram_length);
  11123. #else
  11124. MFREE(dhdp->osh, dhdp->soc_ram, dhdp->soc_ram_length);
  11125. #endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_MEMDUMP */
  11126. dhdp->soc_ram = NULL;
  11127. }
  11128. }
  11129. }
  11130. static void
  11131. dhd_module_cleanup(void)
  11132. {
  11133. DHD_TRACE(("%s: Enter\n", __FUNCTION__));
  11134. dhd_bus_unregister();
  11135. #if defined(OEM_ANDROID)
  11136. wl_android_exit();
  11137. #endif /* OEM_ANDROID */
  11138. dhd_wifi_platform_unregister_drv();
  11139. }
  11140. static void __exit
  11141. dhd_module_exit(void)
  11142. {
  11143. atomic_set(&exit_in_progress, 1);
  11144. dhd_module_cleanup();
  11145. unregister_reboot_notifier(&dhd_reboot_notifier);
  11146. dhd_destroy_to_notifier_skt();
  11147. }
  11148. static int __init
  11149. dhd_module_init(void)
  11150. {
  11151. int err;
  11152. int retry = POWERUP_MAX_RETRY;
  11153. DHD_ERROR(("%s in\n", __FUNCTION__));
  11154. DHD_PERIM_RADIO_INIT();
  11155. if (firmware_path[0] != '\0') {
  11156. strncpy(fw_bak_path, firmware_path, MOD_PARAM_PATHLEN);
  11157. fw_bak_path[MOD_PARAM_PATHLEN-1] = '\0';
  11158. }
  11159. if (nvram_path[0] != '\0') {
  11160. strncpy(nv_bak_path, nvram_path, MOD_PARAM_PATHLEN);
  11161. nv_bak_path[MOD_PARAM_PATHLEN-1] = '\0';
  11162. }
  11163. do {
  11164. err = dhd_wifi_platform_register_drv();
  11165. if (!err) {
  11166. register_reboot_notifier(&dhd_reboot_notifier);
  11167. break;
  11168. } else {
  11169. DHD_ERROR(("%s: Failed to load the driver, try cnt %d\n",
  11170. __FUNCTION__, retry));
  11171. strncpy(firmware_path, fw_bak_path, MOD_PARAM_PATHLEN);
  11172. firmware_path[MOD_PARAM_PATHLEN-1] = '\0';
  11173. strncpy(nvram_path, nv_bak_path, MOD_PARAM_PATHLEN);
  11174. nvram_path[MOD_PARAM_PATHLEN-1] = '\0';
  11175. }
  11176. } while (retry--);
  11177. dhd_create_to_notifier_skt();
  11178. if (err) {
  11179. DHD_ERROR(("%s: Failed to load driver max retry reached**\n", __FUNCTION__));
  11180. } else {
  11181. if (!dhd_download_fw_on_driverload) {
  11182. dhd_driver_init_done = TRUE;
  11183. }
  11184. }
  11185. DHD_ERROR(("%s out\n", __FUNCTION__));
  11186. return err;
  11187. }
  11188. static int
  11189. dhd_reboot_callback(struct notifier_block *this, unsigned long code, void *unused)
  11190. {
  11191. DHD_TRACE(("%s: code = %ld\n", __FUNCTION__, code));
  11192. if (code == SYS_RESTART) {
  11193. #ifdef OEM_ANDROID
  11194. #ifdef BCMPCIE
  11195. is_reboot = code;
  11196. #endif /* BCMPCIE */
  11197. #else
  11198. dhd_module_cleanup();
  11199. #endif /* OEM_ANDROID */
  11200. }
  11201. return NOTIFY_DONE;
  11202. }
  11203. #if defined(CONFIG_DEFERRED_INITCALLS) && !defined(EXYNOS_PCIE_MODULE_PATCH)
  11204. #if defined(CONFIG_MACH_UNIVERSAL7420) || defined(CONFIG_SOC_EXYNOS8890) || \
  11205. defined(CONFIG_ARCH_MSM8996) || defined(CONFIG_ARCH_MSM8998) || \
  11206. defined(CONFIG_SOC_EXYNOS8895) || defined(CONFIG_SOC_EXYNOS9810) || \
  11207. defined(CONFIG_ARCH_SDM845) || defined(CONFIG_SOC_EXYNOS9820) || \
  11208. defined(CONFIG_ARCH_SM8150)
  11209. deferred_module_init_sync(dhd_module_init);
  11210. #else
  11211. deferred_module_init(dhd_module_init);
  11212. #endif /* CONFIG_MACH_UNIVERSAL7420 || CONFIG_SOC_EXYNOS8890 ||
  11213. * CONFIG_ARCH_MSM8996 || CONFIG_ARCH_MSM8998 || CONFIG_SOC_EXYNOS8895
  11214. * CONFIG_SOC_EXYNOS9810 || CONFIG_ARCH_SDM845 || CONFIG_SOC_EXYNOS9820
  11215. * CONFIG_ARCH_SM8150
  11216. */
  11217. #elif defined(USE_LATE_INITCALL_SYNC)
  11218. late_initcall_sync(dhd_module_init);
  11219. #else
  11220. late_initcall(dhd_module_init);
  11221. #endif /* USE_LATE_INITCALL_SYNC */
  11222. module_exit(dhd_module_exit);
  11223. /*
  11224. * OS specific functions required to implement DHD driver in OS independent way
  11225. */
  11226. int
  11227. dhd_os_proto_block(dhd_pub_t *pub)
  11228. {
  11229. dhd_info_t * dhd = (dhd_info_t *)(pub->info);
  11230. if (dhd) {
  11231. DHD_PERIM_UNLOCK(pub);
  11232. down(&dhd->proto_sem);
  11233. DHD_PERIM_LOCK(pub);
  11234. return 1;
  11235. }
  11236. return 0;
  11237. }
  11238. int
  11239. dhd_os_proto_unblock(dhd_pub_t *pub)
  11240. {
  11241. dhd_info_t * dhd = (dhd_info_t *)(pub->info);
  11242. if (dhd) {
  11243. up(&dhd->proto_sem);
  11244. return 1;
  11245. }
  11246. return 0;
  11247. }
  11248. void
  11249. dhd_os_dhdiovar_lock(dhd_pub_t *pub)
  11250. {
  11251. dhd_info_t * dhd = (dhd_info_t *)(pub->info);
  11252. if (dhd) {
  11253. mutex_lock(&dhd->dhd_iovar_mutex);
  11254. }
  11255. }
  11256. void
  11257. dhd_os_dhdiovar_unlock(dhd_pub_t *pub)
  11258. {
  11259. dhd_info_t * dhd = (dhd_info_t *)(pub->info);
  11260. if (dhd) {
  11261. mutex_unlock(&dhd->dhd_iovar_mutex);
  11262. }
  11263. }
  11264. void
  11265. dhd_os_logdump_lock(dhd_pub_t *pub)
  11266. {
  11267. dhd_info_t *dhd = NULL;
  11268. if (!pub)
  11269. return;
  11270. dhd = (dhd_info_t *)(pub->info);
  11271. if (dhd) {
  11272. mutex_lock(&dhd->logdump_lock);
  11273. }
  11274. }
  11275. void
  11276. dhd_os_logdump_unlock(dhd_pub_t *pub)
  11277. {
  11278. dhd_info_t *dhd = NULL;
  11279. if (!pub)
  11280. return;
  11281. dhd = (dhd_info_t *)(pub->info);
  11282. if (dhd) {
  11283. mutex_unlock(&dhd->logdump_lock);
  11284. }
  11285. }
  11286. unsigned long
  11287. dhd_os_dbgring_lock(void *lock)
  11288. {
  11289. if (!lock)
  11290. return 0;
  11291. mutex_lock((struct mutex *)lock);
  11292. return 0;
  11293. }
  11294. void
  11295. dhd_os_dbgring_unlock(void *lock, unsigned long flags)
  11296. {
  11297. BCM_REFERENCE(flags);
  11298. if (!lock)
  11299. return;
  11300. mutex_unlock((struct mutex *)lock);
  11301. }
  11302. unsigned int
  11303. dhd_os_get_ioctl_resp_timeout(void)
  11304. {
  11305. return ((unsigned int)dhd_ioctl_timeout_msec);
  11306. }
  11307. void
  11308. dhd_os_set_ioctl_resp_timeout(unsigned int timeout_msec)
  11309. {
  11310. dhd_ioctl_timeout_msec = (int)timeout_msec;
  11311. }
  11312. int
  11313. dhd_os_ioctl_resp_wait(dhd_pub_t *pub, uint *condition)
  11314. {
  11315. dhd_info_t * dhd = (dhd_info_t *)(pub->info);
  11316. int timeout;
  11317. /* Convert timeout in millsecond to jiffies */
  11318. timeout = msecs_to_jiffies(dhd_ioctl_timeout_msec);
  11319. DHD_PERIM_UNLOCK(pub);
  11320. timeout = wait_event_timeout(dhd->ioctl_resp_wait, (*condition), timeout);
  11321. DHD_PERIM_LOCK(pub);
  11322. return timeout;
  11323. }
  11324. int
  11325. dhd_os_ioctl_resp_wake(dhd_pub_t *pub)
  11326. {
  11327. dhd_info_t *dhd = (dhd_info_t *)(pub->info);
  11328. wake_up(&dhd->ioctl_resp_wait);
  11329. return 0;
  11330. }
  11331. int
  11332. dhd_os_d3ack_wait(dhd_pub_t *pub, uint *condition)
  11333. {
  11334. dhd_info_t * dhd = (dhd_info_t *)(pub->info);
  11335. int timeout;
  11336. /* Convert timeout in millsecond to jiffies */
  11337. timeout = msecs_to_jiffies(D3_ACK_RESP_TIMEOUT);
  11338. DHD_PERIM_UNLOCK(pub);
  11339. timeout = wait_event_timeout(dhd->d3ack_wait, (*condition), timeout);
  11340. DHD_PERIM_LOCK(pub);
  11341. return timeout;
  11342. }
  11343. int
  11344. dhd_os_d3ack_wake(dhd_pub_t *pub)
  11345. {
  11346. dhd_info_t *dhd = (dhd_info_t *)(pub->info);
  11347. wake_up(&dhd->d3ack_wait);
  11348. return 0;
  11349. }
  11350. int
  11351. dhd_os_busbusy_wait_negation(dhd_pub_t *pub, uint *condition)
  11352. {
  11353. dhd_info_t * dhd = (dhd_info_t *)(pub->info);
  11354. int timeout;
  11355. /* Wait for bus usage contexts to gracefully exit within some timeout value
  11356. * Set time out to little higher than dhd_ioctl_timeout_msec,
  11357. * so that IOCTL timeout should not get affected.
  11358. */
  11359. /* Convert timeout in millsecond to jiffies */
  11360. timeout = msecs_to_jiffies(DHD_BUS_BUSY_TIMEOUT);
  11361. timeout = wait_event_timeout(dhd->dhd_bus_busy_state_wait, !(*condition), timeout);
  11362. return timeout;
  11363. }
  11364. /*
  11365. * Wait until the condition *var == condition is met.
  11366. * Returns 0 if the @condition evaluated to false after the timeout elapsed
  11367. * Returns 1 if the @condition evaluated to true
  11368. */
  11369. int
  11370. dhd_os_busbusy_wait_condition(dhd_pub_t *pub, uint *var, uint condition)
  11371. {
  11372. dhd_info_t * dhd = (dhd_info_t *)(pub->info);
  11373. int timeout;
  11374. /* Convert timeout in millsecond to jiffies */
  11375. timeout = msecs_to_jiffies(DHD_BUS_BUSY_TIMEOUT);
  11376. timeout = wait_event_timeout(dhd->dhd_bus_busy_state_wait, (*var == condition), timeout);
  11377. return timeout;
  11378. }
  11379. /*
  11380. * Wait until the '(*var & bitmask) == condition' is met.
  11381. * Returns 0 if the @condition evaluated to false after the timeout elapsed
  11382. * Returns 1 if the @condition evaluated to true
  11383. */
  11384. int
  11385. dhd_os_busbusy_wait_bitmask(dhd_pub_t *pub, uint *var,
  11386. uint bitmask, uint condition)
  11387. {
  11388. dhd_info_t * dhd = (dhd_info_t *)(pub->info);
  11389. int timeout;
  11390. /* Convert timeout in millsecond to jiffies */
  11391. timeout = msecs_to_jiffies(DHD_BUS_BUSY_TIMEOUT);
  11392. timeout = wait_event_timeout(dhd->dhd_bus_busy_state_wait,
  11393. ((*var & bitmask) == condition), timeout);
  11394. return timeout;
  11395. }
  11396. int
  11397. dhd_os_dmaxfer_wait(dhd_pub_t *pub, uint *condition)
  11398. {
  11399. int ret = 0;
  11400. dhd_info_t * dhd = (dhd_info_t *)(pub->info);
  11401. int timeout;
  11402. timeout = msecs_to_jiffies(IOCTL_DMAXFER_TIMEOUT);
  11403. DHD_PERIM_UNLOCK(pub);
  11404. ret = wait_event_timeout(dhd->dmaxfer_wait, (*condition), timeout);
  11405. DHD_PERIM_LOCK(pub);
  11406. return ret;
  11407. }
  11408. int
  11409. dhd_os_dmaxfer_wake(dhd_pub_t *pub)
  11410. {
  11411. dhd_info_t *dhd = (dhd_info_t *)(pub->info);
  11412. wake_up(&dhd->dmaxfer_wait);
  11413. return 0;
  11414. }
  11415. void
  11416. dhd_os_tx_completion_wake(dhd_pub_t *dhd)
  11417. {
  11418. /* Call wmb() to make sure before waking up the other event value gets updated */
  11419. OSL_SMP_WMB();
  11420. wake_up(&dhd->tx_completion_wait);
  11421. }
  11422. /* Fix compilation error for FC11 */
  11423. INLINE int
  11424. dhd_os_busbusy_wake(dhd_pub_t *pub)
  11425. {
  11426. dhd_info_t *dhd = (dhd_info_t *)(pub->info);
  11427. /* Call wmb() to make sure before waking up the other event value gets updated */
  11428. OSL_SMP_WMB();
  11429. wake_up(&dhd->dhd_bus_busy_state_wait);
  11430. return 0;
  11431. }
  11432. void
  11433. dhd_os_wd_timer_extend(void *bus, bool extend)
  11434. {
  11435. dhd_pub_t *pub = bus;
  11436. dhd_info_t *dhd = (dhd_info_t *)pub->info;
  11437. if (extend)
  11438. dhd_os_wd_timer(bus, WATCHDOG_EXTEND_INTERVAL);
  11439. else
  11440. dhd_os_wd_timer(bus, dhd->default_wd_interval);
  11441. }
  11442. void
  11443. dhd_os_wd_timer(void *bus, uint wdtick)
  11444. {
  11445. dhd_pub_t *pub = bus;
  11446. dhd_info_t *dhd = (dhd_info_t *)pub->info;
  11447. unsigned long flags;
  11448. DHD_TRACE(("%s: Enter\n", __FUNCTION__));
  11449. if (!dhd) {
  11450. DHD_ERROR(("%s: dhd NULL\n", __FUNCTION__));
  11451. return;
  11452. }
  11453. DHD_GENERAL_LOCK(pub, flags);
  11454. /* don't start the wd until fw is loaded */
  11455. if (pub->busstate == DHD_BUS_DOWN) {
  11456. DHD_GENERAL_UNLOCK(pub, flags);
  11457. #ifdef BCMSDIO
  11458. if (!wdtick) {
  11459. DHD_OS_WD_WAKE_UNLOCK(pub);
  11460. }
  11461. #endif /* BCMSDIO */
  11462. return;
  11463. }
  11464. /* Totally stop the timer */
  11465. if (!wdtick && dhd->wd_timer_valid == TRUE) {
  11466. dhd->wd_timer_valid = FALSE;
  11467. DHD_GENERAL_UNLOCK(pub, flags);
  11468. del_timer_sync(&dhd->timer);
  11469. #ifdef BCMSDIO
  11470. DHD_OS_WD_WAKE_UNLOCK(pub);
  11471. #endif /* BCMSDIO */
  11472. return;
  11473. }
  11474. if (wdtick) {
  11475. #ifdef BCMSDIO
  11476. DHD_OS_WD_WAKE_LOCK(pub);
  11477. dhd_watchdog_ms = (uint)wdtick;
  11478. #endif /* BCMSDIO */
  11479. /* Re arm the timer, at last watchdog period */
  11480. mod_timer(&dhd->timer, jiffies + msecs_to_jiffies(dhd_watchdog_ms));
  11481. dhd->wd_timer_valid = TRUE;
  11482. }
  11483. DHD_GENERAL_UNLOCK(pub, flags);
  11484. }
  11485. #ifdef DHD_PCIE_RUNTIMEPM
  11486. void
  11487. dhd_os_runtimepm_timer(void *bus, uint tick)
  11488. {
  11489. dhd_pub_t *pub = bus;
  11490. dhd_info_t *dhd = (dhd_info_t *)pub->info;
  11491. unsigned long flags;
  11492. DHD_TRACE(("%s: Enter\n", __FUNCTION__));
  11493. if (!dhd) {
  11494. DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__));
  11495. return;
  11496. }
  11497. DHD_GENERAL_LOCK(pub, flags);
  11498. /* don't start the RPM until fw is loaded */
  11499. if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(pub)) {
  11500. DHD_GENERAL_UNLOCK(pub, flags);
  11501. return;
  11502. }
  11503. /* If tick is non-zero, the request is to start the timer */
  11504. if (tick) {
  11505. /* Start the timer only if its not already running */
  11506. if (dhd->rpm_timer_valid == FALSE) {
  11507. mod_timer(&dhd->rpm_timer, jiffies + msecs_to_jiffies(dhd_runtimepm_ms));
  11508. dhd->rpm_timer_valid = TRUE;
  11509. DHD_ERROR(("DHD Runtime PM Enabled \n"));
  11510. }
  11511. } else {
  11512. /* tick is zero, we have to stop the timer */
  11513. /* Stop the timer only if its running, otherwise we don't have to do anything */
  11514. if (dhd->rpm_timer_valid == TRUE) {
  11515. dhd->rpm_timer_valid = FALSE;
  11516. DHD_GENERAL_UNLOCK(pub, flags);
  11517. del_timer_sync(&dhd->rpm_timer);
  11518. DHD_ERROR(("DHD Runtime PM Disabled \n"));
  11519. /* we have already released the lock, so just go to exit */
  11520. goto exit;
  11521. }
  11522. }
  11523. DHD_GENERAL_UNLOCK(pub, flags);
  11524. exit:
  11525. return;
  11526. }
  11527. #endif /* DHD_PCIE_RUNTIMEPM */
  11528. void *
  11529. dhd_os_open_image1(dhd_pub_t *pub, char *filename)
  11530. {
  11531. struct file *fp;
  11532. int size;
  11533. fp = filp_open(filename, O_RDONLY, 0);
  11534. /*
  11535. * 2.6.11 (FC4) supports filp_open() but later revs don't?
  11536. * Alternative:
  11537. * fp = open_namei(AT_FDCWD, filename, O_RD, 0);
  11538. * ???
  11539. */
  11540. if (IS_ERR(fp)) {
  11541. fp = NULL;
  11542. goto err;
  11543. }
  11544. if (!S_ISREG(file_inode(fp)->i_mode)) {
  11545. DHD_ERROR(("%s: %s is not regular file\n", __FUNCTION__, filename));
  11546. fp = NULL;
  11547. goto err;
  11548. }
  11549. size = i_size_read(file_inode(fp));
  11550. if (size <= 0) {
  11551. DHD_ERROR(("%s: %s file size invalid %d\n", __FUNCTION__, filename, size));
  11552. fp = NULL;
  11553. goto err;
  11554. }
  11555. DHD_ERROR(("%s: %s (%d bytes) open success\n", __FUNCTION__, filename, size));
  11556. err:
  11557. return fp;
  11558. }
  11559. int
  11560. dhd_os_get_image_block(char *buf, int len, void *image)
  11561. {
  11562. struct file *fp = (struct file *)image;
  11563. int rdlen;
  11564. int size;
  11565. if (!image) {
  11566. return 0;
  11567. }
  11568. size = i_size_read(file_inode(fp));
  11569. rdlen = compat_kernel_read(fp, fp->f_pos, buf, MIN(len, size));
  11570. if (len >= size && size != rdlen) {
  11571. return -EIO;
  11572. }
  11573. if (rdlen > 0) {
  11574. fp->f_pos += rdlen;
  11575. }
  11576. return rdlen;
  11577. }
  11578. #if defined(BT_OVER_SDIO)
  11579. int
  11580. dhd_os_gets_image(dhd_pub_t *pub, char *str, int len, void *image)
  11581. {
  11582. struct file *fp = (struct file *)image;
  11583. int rd_len;
  11584. uint str_len = 0;
  11585. char *str_end = NULL;
  11586. if (!image)
  11587. return 0;
  11588. rd_len = compat_kernel_read(fp, fp->f_pos, str, len);
  11589. str_end = strnchr(str, len, '\n');
  11590. if (str_end == NULL) {
  11591. goto err;
  11592. }
  11593. str_len = (uint)(str_end - str);
  11594. /* Advance file pointer past the string length */
  11595. fp->f_pos += str_len + 1;
  11596. bzero(str_end, rd_len - str_len);
  11597. err:
  11598. return str_len;
  11599. }
  11600. #endif /* defined (BT_OVER_SDIO) */
  11601. int
  11602. dhd_os_get_image_size(void *image)
  11603. {
  11604. struct file *fp = (struct file *)image;
  11605. int size;
  11606. if (!image) {
  11607. return 0;
  11608. }
  11609. size = i_size_read(file_inode(fp));
  11610. return size;
  11611. }
  11612. void
  11613. dhd_os_close_image1(dhd_pub_t *pub, void *image)
  11614. {
  11615. if (image) {
  11616. filp_close((struct file *)image, NULL);
  11617. }
  11618. }
  11619. void
  11620. dhd_os_sdlock(dhd_pub_t *pub)
  11621. {
  11622. dhd_info_t *dhd;
  11623. dhd = (dhd_info_t *)(pub->info);
  11624. if (dhd_dpc_prio >= 0)
  11625. down(&dhd->sdsem);
  11626. else
  11627. spin_lock_bh(&dhd->sdlock);
  11628. }
  11629. void
  11630. dhd_os_sdunlock(dhd_pub_t *pub)
  11631. {
  11632. dhd_info_t *dhd;
  11633. dhd = (dhd_info_t *)(pub->info);
  11634. if (dhd_dpc_prio >= 0)
  11635. up(&dhd->sdsem);
  11636. else
  11637. spin_unlock_bh(&dhd->sdlock);
  11638. }
  11639. void
  11640. dhd_os_sdlock_txq(dhd_pub_t *pub)
  11641. {
  11642. dhd_info_t *dhd;
  11643. dhd = (dhd_info_t *)(pub->info);
  11644. spin_lock_bh(&dhd->txqlock);
  11645. }
  11646. void
  11647. dhd_os_sdunlock_txq(dhd_pub_t *pub)
  11648. {
  11649. dhd_info_t *dhd;
  11650. dhd = (dhd_info_t *)(pub->info);
  11651. spin_unlock_bh(&dhd->txqlock);
  11652. }
  11653. void
  11654. dhd_os_sdlock_rxq(dhd_pub_t *pub)
  11655. {
  11656. }
  11657. void
  11658. dhd_os_sdunlock_rxq(dhd_pub_t *pub)
  11659. {
  11660. }
  11661. static void
  11662. dhd_os_rxflock(dhd_pub_t *pub)
  11663. {
  11664. dhd_info_t *dhd;
  11665. dhd = (dhd_info_t *)(pub->info);
  11666. spin_lock_bh(&dhd->rxf_lock);
  11667. }
  11668. static void
  11669. dhd_os_rxfunlock(dhd_pub_t *pub)
  11670. {
  11671. dhd_info_t *dhd;
  11672. dhd = (dhd_info_t *)(pub->info);
  11673. spin_unlock_bh(&dhd->rxf_lock);
  11674. }
  11675. #ifdef DHDTCPACK_SUPPRESS
  11676. unsigned long
  11677. dhd_os_tcpacklock(dhd_pub_t *pub)
  11678. {
  11679. dhd_info_t *dhd;
  11680. unsigned long flags = 0;
  11681. dhd = (dhd_info_t *)(pub->info);
  11682. if (dhd) {
  11683. #ifdef BCMSDIO
  11684. spin_lock_bh(&dhd->tcpack_lock);
  11685. #else
  11686. spin_lock_irqsave(&dhd->tcpack_lock, flags);
  11687. #endif /* BCMSDIO */
  11688. }
  11689. return flags;
  11690. }
  11691. void
  11692. dhd_os_tcpackunlock(dhd_pub_t *pub, unsigned long flags)
  11693. {
  11694. dhd_info_t *dhd;
  11695. #ifdef BCMSDIO
  11696. BCM_REFERENCE(flags);
  11697. #endif /* BCMSDIO */
  11698. dhd = (dhd_info_t *)(pub->info);
  11699. if (dhd) {
  11700. #ifdef BCMSDIO
  11701. spin_unlock_bh(&dhd->tcpack_lock);
  11702. #else
  11703. spin_unlock_irqrestore(&dhd->tcpack_lock, flags);
  11704. #endif /* BCMSDIO */
  11705. }
  11706. }
  11707. #endif /* DHDTCPACK_SUPPRESS */
  11708. uint8* dhd_os_prealloc(dhd_pub_t *dhdpub, int section, uint size, bool kmalloc_if_fail)
  11709. {
  11710. uint8* buf;
  11711. gfp_t flags = CAN_SLEEP() ? GFP_KERNEL: GFP_ATOMIC;
  11712. buf = (uint8*)wifi_platform_prealloc(dhdpub->info->adapter, section, size);
  11713. if (buf == NULL && kmalloc_if_fail)
  11714. buf = kmalloc(size, flags);
  11715. return buf;
  11716. }
  11717. void dhd_os_prefree(dhd_pub_t *dhdpub, void *addr, uint size)
  11718. {
  11719. }
  11720. #if defined(WL_WIRELESS_EXT)
  11721. struct iw_statistics *
  11722. dhd_get_wireless_stats(struct net_device *dev)
  11723. {
  11724. int res = 0;
  11725. dhd_info_t *dhd = DHD_DEV_INFO(dev);
  11726. if (!dhd->pub.up) {
  11727. return NULL;
  11728. }
  11729. res = wl_iw_get_wireless_stats(dev, &dhd->iw.wstats);
  11730. if (res == 0)
  11731. return &dhd->iw.wstats;
  11732. else
  11733. return NULL;
  11734. }
  11735. #endif /* defined(WL_WIRELESS_EXT) */
  11736. static int
  11737. dhd_wl_host_event(dhd_info_t *dhd, int ifidx, void *pktdata, uint16 pktlen,
  11738. wl_event_msg_t *event, void **data)
  11739. {
  11740. int bcmerror = 0;
  11741. #ifdef WL_CFG80211
  11742. unsigned long flags = 0;
  11743. #endif /* WL_CFG80211 */
  11744. ASSERT(dhd != NULL);
  11745. #ifdef SHOW_LOGTRACE
  11746. bcmerror = wl_process_host_event(&dhd->pub, &ifidx, pktdata, pktlen, event, data,
  11747. &dhd->event_data);
  11748. #else
  11749. bcmerror = wl_process_host_event(&dhd->pub, &ifidx, pktdata, pktlen, event, data,
  11750. NULL);
  11751. #endif /* SHOW_LOGTRACE */
  11752. if (unlikely(bcmerror != BCME_OK)) {
  11753. return bcmerror;
  11754. }
  11755. if (ntoh32(event->event_type) == WLC_E_IF) {
  11756. /* WLC_E_IF event types are consumed by wl_process_host_event.
  11757. * For ifadd/del ops, the netdev ptr may not be valid at this
  11758. * point. so return before invoking cfg80211/wext handlers.
  11759. */
  11760. return BCME_OK;
  11761. }
  11762. #if defined(WL_WIRELESS_EXT)
  11763. if (event->bsscfgidx == 0) {
  11764. /*
  11765. * Wireless ext is on primary interface only
  11766. */
  11767. ASSERT(dhd->iflist[ifidx] != NULL);
  11768. ASSERT(dhd->iflist[ifidx]->net != NULL);
  11769. if (dhd->iflist[ifidx]->net) {
  11770. wl_iw_event(dhd->iflist[ifidx]->net, event, *data);
  11771. }
  11772. }
  11773. #endif /* defined(WL_WIRELESS_EXT) */
  11774. #ifdef WL_CFG80211
  11775. if (dhd->iflist[ifidx]->net) {
  11776. spin_lock_irqsave(&dhd->pub.up_lock, flags);
  11777. if (dhd->pub.up) {
  11778. wl_cfg80211_event(dhd->iflist[ifidx]->net, event, *data);
  11779. }
  11780. spin_unlock_irqrestore(&dhd->pub.up_lock, flags);
  11781. }
  11782. #endif /* defined(WL_CFG80211) */
  11783. return (bcmerror);
  11784. }
  11785. /* send up locally generated event */
  11786. void
  11787. dhd_sendup_event(dhd_pub_t *dhdp, wl_event_msg_t *event, void *data)
  11788. {
  11789. switch (ntoh32(event->event_type)) {
  11790. /* Handle error case or further events here */
  11791. default:
  11792. break;
  11793. }
  11794. }
  11795. #ifdef LOG_INTO_TCPDUMP
  11796. void
  11797. dhd_sendup_log(dhd_pub_t *dhdp, void *data, int data_len)
  11798. {
  11799. struct sk_buff *p, *skb;
  11800. uint32 pktlen;
  11801. int len;
  11802. dhd_if_t *ifp;
  11803. dhd_info_t *dhd;
  11804. uchar *skb_data;
  11805. int ifidx = 0;
  11806. struct ether_header eth;
  11807. pktlen = sizeof(eth) + data_len;
  11808. dhd = dhdp->info;
  11809. if ((p = PKTGET(dhdp->osh, pktlen, FALSE))) {
  11810. ASSERT(ISALIGNED((uintptr)PKTDATA(dhdp->osh, p), sizeof(uint32)));
  11811. bcopy(&dhdp->mac, &eth.ether_dhost, ETHER_ADDR_LEN);
  11812. bcopy(&dhdp->mac, &eth.ether_shost, ETHER_ADDR_LEN);
  11813. ETHER_TOGGLE_LOCALADDR(&eth.ether_shost);
  11814. eth.ether_type = hton16(ETHER_TYPE_BRCM);
  11815. bcopy((void *)&eth, PKTDATA(dhdp->osh, p), sizeof(eth));
  11816. bcopy(data, PKTDATA(dhdp->osh, p) + sizeof(eth), data_len);
  11817. skb = PKTTONATIVE(dhdp->osh, p);
  11818. skb_data = skb->data;
  11819. len = skb->len;
  11820. ifidx = dhd_ifname2idx(dhd, "wlan0");
  11821. ifp = dhd->iflist[ifidx];
  11822. if (ifp == NULL)
  11823. ifp = dhd->iflist[0];
  11824. ASSERT(ifp);
  11825. skb->dev = ifp->net;
  11826. skb->protocol = eth_type_trans(skb, skb->dev);
  11827. skb->data = skb_data;
  11828. skb->len = len;
  11829. /* Strip header, count, deliver upward */
  11830. skb_pull(skb, ETH_HLEN);
  11831. bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE,
  11832. __FUNCTION__, __LINE__);
  11833. /* Send the packet */
  11834. if (in_interrupt()) {
  11835. netif_rx(skb);
  11836. } else {
  11837. netif_rx_ni(skb);
  11838. }
  11839. } else {
  11840. /* Could not allocate a sk_buf */
  11841. DHD_ERROR(("%s: unable to alloc sk_buf", __FUNCTION__));
  11842. }
  11843. }
  11844. #endif /* LOG_INTO_TCPDUMP */
  11845. void dhd_wait_for_event(dhd_pub_t *dhd, bool *lockvar)
  11846. {
  11847. #if defined(BCMSDIO)
  11848. struct dhd_info *dhdinfo = dhd->info;
  11849. int timeout = msecs_to_jiffies(IOCTL_RESP_TIMEOUT);
  11850. dhd_os_sdunlock(dhd);
  11851. wait_event_timeout(dhdinfo->ctrl_wait, (*lockvar == FALSE), timeout);
  11852. dhd_os_sdlock(dhd);
  11853. #endif /* defined(BCMSDIO) */
  11854. return;
  11855. } /* dhd_init_static_strs_array */
  11856. void dhd_wait_event_wakeup(dhd_pub_t *dhd)
  11857. {
  11858. #if defined(BCMSDIO)
  11859. struct dhd_info *dhdinfo = dhd->info;
  11860. if (waitqueue_active(&dhdinfo->ctrl_wait))
  11861. wake_up(&dhdinfo->ctrl_wait);
  11862. #endif // endif
  11863. return;
  11864. }
  11865. #if defined(BCMSDIO) || defined(BCMPCIE)
  11866. int
  11867. dhd_net_bus_devreset(struct net_device *dev, uint8 flag)
  11868. {
  11869. int ret;
  11870. dhd_info_t *dhd = DHD_DEV_INFO(dev);
  11871. #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
  11872. if (pm_runtime_get_sync(dhd_bus_to_dev(dhd->pub.bus)) < 0)
  11873. return BCME_ERROR;
  11874. #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
  11875. if (flag == TRUE) {
  11876. /* Issue wl down command before resetting the chip */
  11877. if (dhd_wl_ioctl_cmd(&dhd->pub, WLC_DOWN, NULL, 0, TRUE, 0) < 0) {
  11878. DHD_TRACE(("%s: wl down failed\n", __FUNCTION__));
  11879. }
  11880. #ifdef PROP_TXSTATUS
  11881. if (dhd->pub.wlfc_enabled) {
  11882. dhd_wlfc_deinit(&dhd->pub);
  11883. }
  11884. #endif /* PROP_TXSTATUS */
  11885. #ifdef PNO_SUPPORT
  11886. if (dhd->pub.pno_state) {
  11887. dhd_pno_deinit(&dhd->pub);
  11888. }
  11889. #endif // endif
  11890. #ifdef RTT_SUPPORT
  11891. if (dhd->pub.rtt_state) {
  11892. dhd_rtt_deinit(&dhd->pub);
  11893. }
  11894. #endif /* RTT_SUPPORT */
  11895. #if defined(DBG_PKT_MON) && !defined(DBG_PKT_MON_INIT_DEFAULT)
  11896. dhd_os_dbg_detach_pkt_monitor(&dhd->pub);
  11897. #endif /* DBG_PKT_MON */
  11898. }
  11899. #ifdef BCMSDIO
  11900. if (!flag) {
  11901. dhd_update_fw_nv_path(dhd);
  11902. /* update firmware and nvram path to sdio bus */
  11903. dhd_bus_update_fw_nv_path(dhd->pub.bus,
  11904. dhd->fw_path, dhd->nv_path);
  11905. }
  11906. #endif /* BCMSDIO */
  11907. ret = dhd_bus_devreset(&dhd->pub, flag);
  11908. #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
  11909. pm_runtime_mark_last_busy(dhd_bus_to_dev(dhd->pub.bus));
  11910. pm_runtime_put_autosuspend(dhd_bus_to_dev(dhd->pub.bus));
  11911. #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
  11912. if (flag) {
  11913. /* Clear some flags for recovery logic */
  11914. dhd->pub.dongle_trap_occured = 0;
  11915. dhd->pub.iovar_timeout_occured = 0;
  11916. #ifdef PCIE_FULL_DONGLE
  11917. dhd->pub.d3ack_timeout_occured = 0;
  11918. dhd->pub.livelock_occured = 0;
  11919. dhd->pub.pktid_audit_failed = 0;
  11920. #endif /* PCIE_FULL_DONGLE */
  11921. dhd->pub.iface_op_failed = 0;
  11922. dhd->pub.scan_timeout_occurred = 0;
  11923. dhd->pub.scan_busy_occurred = 0;
  11924. dhd->pub.smmu_fault_occurred = 0;
  11925. }
  11926. if (ret) {
  11927. DHD_ERROR(("%s: dhd_bus_devreset: %d\n", __FUNCTION__, ret));
  11928. }
  11929. return ret;
  11930. }
  11931. #ifdef BCMSDIO
  11932. int
  11933. dhd_net_bus_suspend(struct net_device *dev)
  11934. {
  11935. dhd_info_t *dhd = DHD_DEV_INFO(dev);
  11936. return dhd_bus_suspend(&dhd->pub);
  11937. }
  11938. int
  11939. dhd_net_bus_resume(struct net_device *dev, uint8 stage)
  11940. {
  11941. dhd_info_t *dhd = DHD_DEV_INFO(dev);
  11942. return dhd_bus_resume(&dhd->pub, stage);
  11943. }
  11944. #endif /* BCMSDIO */
  11945. #endif /* BCMSDIO || BCMPCIE */
  11946. int net_os_set_suspend_disable(struct net_device *dev, int val)
  11947. {
  11948. dhd_info_t *dhd = DHD_DEV_INFO(dev);
  11949. int ret = 0;
  11950. if (dhd) {
  11951. ret = dhd->pub.suspend_disable_flag;
  11952. dhd->pub.suspend_disable_flag = val;
  11953. }
  11954. return ret;
  11955. }
  11956. int net_os_set_suspend(struct net_device *dev, int val, int force)
  11957. {
  11958. int ret = 0;
  11959. dhd_info_t *dhd = DHD_DEV_INFO(dev);
  11960. if (dhd) {
  11961. #ifdef CONFIG_MACH_UNIVERSAL7420
  11962. #endif /* CONFIG_MACH_UNIVERSAL7420 */
  11963. #if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
  11964. ret = dhd_set_suspend(val, &dhd->pub);
  11965. #else
  11966. ret = dhd_suspend_resume_helper(dhd, val, force);
  11967. #endif // endif
  11968. #ifdef WL_CFG80211
  11969. wl_cfg80211_update_power_mode(dev);
  11970. #endif // endif
  11971. }
  11972. return ret;
  11973. }
  11974. int net_os_set_suspend_bcn_li_dtim(struct net_device *dev, int val)
  11975. {
  11976. dhd_info_t *dhd = DHD_DEV_INFO(dev);
  11977. if (dhd) {
  11978. DHD_ERROR(("%s: Set bcn_li_dtim in suspend %d\n",
  11979. __FUNCTION__, val));
  11980. dhd->pub.suspend_bcn_li_dtim = val;
  11981. }
  11982. return 0;
  11983. }
  11984. int net_os_set_max_dtim_enable(struct net_device *dev, int val)
  11985. {
  11986. dhd_info_t *dhd = DHD_DEV_INFO(dev);
  11987. if (dhd) {
  11988. DHD_ERROR(("%s: use MAX bcn_li_dtim in suspend %s\n",
  11989. __FUNCTION__, (val ? "Enable" : "Disable")));
  11990. if (val) {
  11991. dhd->pub.max_dtim_enable = TRUE;
  11992. } else {
  11993. dhd->pub.max_dtim_enable = FALSE;
  11994. }
  11995. } else {
  11996. return -1;
  11997. }
  11998. return 0;
  11999. }
  12000. #ifdef DISABLE_DTIM_IN_SUSPEND
  12001. int net_os_set_disable_dtim_in_suspend(struct net_device *dev, int val)
  12002. {
  12003. dhd_info_t *dhd = DHD_DEV_INFO(dev);
  12004. if (dhd) {
  12005. DHD_ERROR(("%s: Disable bcn_li_dtim in suspend %s\n",
  12006. __FUNCTION__, (val ? "Enable" : "Disable")));
  12007. if (val) {
  12008. dhd->pub.disable_dtim_in_suspend = TRUE;
  12009. } else {
  12010. dhd->pub.disable_dtim_in_suspend = FALSE;
  12011. }
  12012. } else {
  12013. return -1;
  12014. }
  12015. return 0;
  12016. }
  12017. #endif /* DISABLE_DTIM_IN_SUSPEND */
  12018. #ifdef PKT_FILTER_SUPPORT
  12019. int net_os_rxfilter_add_remove(struct net_device *dev, int add_remove, int num)
  12020. {
  12021. int ret = 0;
  12022. #ifndef GAN_LITE_NAT_KEEPALIVE_FILTER
  12023. dhd_info_t *dhd = DHD_DEV_INFO(dev);
  12024. DHD_ERROR(("%s: add_remove = %d, num = %d\n", __FUNCTION__, add_remove, num));
  12025. if (!dhd || (num == DHD_UNICAST_FILTER_NUM)) {
  12026. return 0;
  12027. }
  12028. #ifdef BLOCK_IPV6_PACKET
  12029. /* customer want to use NO IPV6 packets only */
  12030. if (num == DHD_MULTICAST6_FILTER_NUM) {
  12031. return 0;
  12032. }
  12033. #endif /* BLOCK_IPV6_PACKET */
  12034. if (num >= dhd->pub.pktfilter_count) {
  12035. return -EINVAL;
  12036. }
  12037. ret = dhd_packet_filter_add_remove(&dhd->pub, add_remove, num);
  12038. #endif /* !GAN_LITE_NAT_KEEPALIVE_FILTER */
  12039. return ret;
  12040. }
  12041. int dhd_os_enable_packet_filter(dhd_pub_t *dhdp, int val)
  12042. {
  12043. int ret = 0;
  12044. /* Packet filtering is set only if we still in early-suspend and
  12045. * we need either to turn it ON or turn it OFF
  12046. * We can always turn it OFF in case of early-suspend, but we turn it
  12047. * back ON only if suspend_disable_flag was not set
  12048. */
  12049. if (dhdp && dhdp->up) {
  12050. if (dhdp->in_suspend) {
  12051. if (!val || (val && !dhdp->suspend_disable_flag))
  12052. dhd_enable_packet_filter(val, dhdp);
  12053. }
  12054. }
  12055. return ret;
  12056. }
  12057. /* function to enable/disable packet for Network device */
  12058. int net_os_enable_packet_filter(struct net_device *dev, int val)
  12059. {
  12060. dhd_info_t *dhd = DHD_DEV_INFO(dev);
  12061. DHD_ERROR(("%s: val = %d\n", __FUNCTION__, val));
  12062. return dhd_os_enable_packet_filter(&dhd->pub, val);
  12063. }
  12064. #endif /* PKT_FILTER_SUPPORT */
  12065. int
  12066. dhd_dev_init_ioctl(struct net_device *dev)
  12067. {
  12068. dhd_info_t *dhd = DHD_DEV_INFO(dev);
  12069. int ret;
  12070. if ((ret = dhd_sync_with_dongle(&dhd->pub)) < 0)
  12071. goto done;
  12072. done:
  12073. return ret;
  12074. }
  12075. int
  12076. dhd_dev_get_feature_set(struct net_device *dev)
  12077. {
  12078. dhd_info_t *ptr = *(dhd_info_t **)netdev_priv(dev);
  12079. dhd_pub_t *dhd = (&ptr->pub);
  12080. int feature_set = 0;
  12081. if (FW_SUPPORTED(dhd, sta))
  12082. feature_set |= WIFI_FEATURE_INFRA;
  12083. if (FW_SUPPORTED(dhd, dualband))
  12084. feature_set |= WIFI_FEATURE_INFRA_5G;
  12085. if (FW_SUPPORTED(dhd, p2p))
  12086. feature_set |= WIFI_FEATURE_P2P;
  12087. if (dhd->op_mode & DHD_FLAG_HOSTAP_MODE)
  12088. feature_set |= WIFI_FEATURE_SOFT_AP;
  12089. if (FW_SUPPORTED(dhd, tdls))
  12090. feature_set |= WIFI_FEATURE_TDLS;
  12091. if (FW_SUPPORTED(dhd, vsdb))
  12092. feature_set |= WIFI_FEATURE_TDLS_OFFCHANNEL;
  12093. if (FW_SUPPORTED(dhd, nan)) {
  12094. feature_set |= WIFI_FEATURE_NAN;
  12095. /* NAN is essentail for d2d rtt */
  12096. if (FW_SUPPORTED(dhd, rttd2d))
  12097. feature_set |= WIFI_FEATURE_D2D_RTT;
  12098. }
  12099. #ifdef RTT_SUPPORT
  12100. feature_set |= WIFI_FEATURE_D2D_RTT;
  12101. feature_set |= WIFI_FEATURE_D2AP_RTT;
  12102. #endif /* RTT_SUPPORT */
  12103. #ifdef LINKSTAT_SUPPORT
  12104. feature_set |= WIFI_FEATURE_LINKSTAT;
  12105. #endif /* LINKSTAT_SUPPORT */
  12106. #if defined(PNO_SUPPORT) && !defined(DISABLE_ANDROID_PNO)
  12107. if (dhd_is_pno_supported(dhd)) {
  12108. feature_set |= WIFI_FEATURE_PNO;
  12109. #ifdef GSCAN_SUPPORT
  12110. feature_set |= WIFI_FEATURE_GSCAN;
  12111. feature_set |= WIFI_FEATURE_HAL_EPNO;
  12112. #endif /* GSCAN_SUPPORT */
  12113. }
  12114. #endif /* PNO_SUPPORT && !DISABLE_ANDROID_PNO */
  12115. #ifdef RSSI_MONITOR_SUPPORT
  12116. if (FW_SUPPORTED(dhd, rssi_mon)) {
  12117. feature_set |= WIFI_FEATURE_RSSI_MONITOR;
  12118. }
  12119. #endif /* RSSI_MONITOR_SUPPORT */
  12120. #ifdef WL11U
  12121. feature_set |= WIFI_FEATURE_HOTSPOT;
  12122. #endif /* WL11U */
  12123. #ifdef NDO_CONFIG_SUPPORT
  12124. feature_set |= WIFI_FEATURE_CONFIG_NDO;
  12125. #endif /* NDO_CONFIG_SUPPORT */
  12126. #ifdef KEEP_ALIVE
  12127. feature_set |= WIFI_FEATURE_MKEEP_ALIVE;
  12128. #endif /* KEEP_ALIVE */
  12129. #ifdef SUPPORT_RANDOM_MAC_SCAN
  12130. feature_set |= WIFI_FEATURE_SCAN_RAND;
  12131. #endif /* SUPPORT_RANDOM_MAC_SCAN */
  12132. #ifdef FILTER_IE
  12133. if (FW_SUPPORTED(dhd, fie)) {
  12134. feature_set |= WIFI_FEATURE_FILTER_IE;
  12135. }
  12136. #endif /* FILTER_IE */
  12137. #ifdef ROAMEXP_SUPPORT
  12138. /* Check if the Android O roam feature is supported by FW */
  12139. if (!(BCME_UNSUPPORTED == dhd_dev_set_whitelist_ssid(dev, NULL, 0, true))) {
  12140. feature_set |= WIFI_FEATURE_CONTROL_ROAMING;
  12141. }
  12142. #endif /* ROAMEXP_SUPPORT */
  12143. return feature_set;
  12144. }
  12145. int
  12146. dhd_dev_get_feature_set_matrix(struct net_device *dev, int num)
  12147. {
  12148. int feature_set_full;
  12149. int ret = 0;
  12150. feature_set_full = dhd_dev_get_feature_set(dev);
  12151. /* Common feature set for all interface */
  12152. ret = (feature_set_full & WIFI_FEATURE_INFRA) |
  12153. (feature_set_full & WIFI_FEATURE_INFRA_5G) |
  12154. (feature_set_full & WIFI_FEATURE_D2D_RTT) |
  12155. (feature_set_full & WIFI_FEATURE_D2AP_RTT) |
  12156. (feature_set_full & WIFI_FEATURE_RSSI_MONITOR) |
  12157. (feature_set_full & WIFI_FEATURE_EPR);
  12158. /* Specific feature group for each interface */
  12159. switch (num) {
  12160. case 0:
  12161. ret |= (feature_set_full & WIFI_FEATURE_P2P) |
  12162. /* Not supported yet */
  12163. /* (feature_set_full & WIFI_FEATURE_NAN) | */
  12164. (feature_set_full & WIFI_FEATURE_TDLS) |
  12165. (feature_set_full & WIFI_FEATURE_PNO) |
  12166. (feature_set_full & WIFI_FEATURE_HAL_EPNO) |
  12167. (feature_set_full & WIFI_FEATURE_BATCH_SCAN) |
  12168. (feature_set_full & WIFI_FEATURE_GSCAN) |
  12169. (feature_set_full & WIFI_FEATURE_HOTSPOT) |
  12170. (feature_set_full & WIFI_FEATURE_ADDITIONAL_STA);
  12171. break;
  12172. case 1:
  12173. ret |= (feature_set_full & WIFI_FEATURE_P2P);
  12174. /* Not yet verified NAN with P2P */
  12175. /* (feature_set_full & WIFI_FEATURE_NAN) | */
  12176. break;
  12177. case 2:
  12178. ret |= (feature_set_full & WIFI_FEATURE_NAN) |
  12179. (feature_set_full & WIFI_FEATURE_TDLS) |
  12180. (feature_set_full & WIFI_FEATURE_TDLS_OFFCHANNEL);
  12181. break;
  12182. default:
  12183. ret = WIFI_FEATURE_INVALID;
  12184. DHD_ERROR(("%s: Out of index(%d) for get feature set\n", __FUNCTION__, num));
  12185. break;
  12186. }
  12187. return ret;
  12188. }
  12189. #ifdef CUSTOM_FORCE_NODFS_FLAG
  12190. int
  12191. dhd_dev_set_nodfs(struct net_device *dev, u32 nodfs)
  12192. {
  12193. dhd_info_t *dhd = DHD_DEV_INFO(dev);
  12194. if (nodfs)
  12195. dhd->pub.dhd_cflags |= WLAN_PLAT_NODFS_FLAG;
  12196. else
  12197. dhd->pub.dhd_cflags &= ~WLAN_PLAT_NODFS_FLAG;
  12198. dhd->pub.force_country_change = TRUE;
  12199. return 0;
  12200. }
  12201. #endif /* CUSTOM_FORCE_NODFS_FLAG */
  12202. #ifdef NDO_CONFIG_SUPPORT
  12203. int
  12204. dhd_dev_ndo_cfg(struct net_device *dev, u8 enable)
  12205. {
  12206. dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
  12207. dhd_pub_t *dhdp = &dhd->pub;
  12208. int ret = 0;
  12209. if (enable) {
  12210. /* enable ND offload feature (will be enabled in FW on suspend) */
  12211. dhdp->ndo_enable = TRUE;
  12212. /* Update changes of anycast address & DAD failed address */
  12213. ret = dhd_dev_ndo_update_inet6addr(dev);
  12214. if ((ret < 0) && (ret != BCME_NORESOURCE)) {
  12215. DHD_ERROR(("%s: failed to update host ip addr: %d\n", __FUNCTION__, ret));
  12216. return ret;
  12217. }
  12218. } else {
  12219. /* disable ND offload feature */
  12220. dhdp->ndo_enable = FALSE;
  12221. /* disable ND offload in FW */
  12222. ret = dhd_ndo_enable(dhdp, FALSE);
  12223. if (ret < 0) {
  12224. DHD_ERROR(("%s: failed to disable NDO: %d\n", __FUNCTION__, ret));
  12225. }
  12226. }
  12227. return ret;
  12228. }
  12229. /* #pragma used as a WAR to fix build failure,
  12230. * ignore dropping of 'const' qualifier in 'list_entry' macro
  12231. * this pragma disables the warning only for the following function
  12232. */
  12233. #if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6)
  12234. #pragma GCC diagnostic push
  12235. #pragma GCC diagnostic ignored "-Wcast-qual"
  12236. #endif /* __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6 */
  12237. static int
  12238. dhd_dev_ndo_get_valid_inet6addr_count(struct inet6_dev *inet6)
  12239. {
  12240. struct inet6_ifaddr *ifa;
  12241. struct ifacaddr6 *acaddr = NULL;
  12242. int addr_count = 0;
  12243. /* lock */
  12244. read_lock_bh(&inet6->lock);
  12245. /* Count valid unicast address */
  12246. list_for_each_entry(ifa, &inet6->addr_list, if_list) {
  12247. if ((ifa->flags & IFA_F_DADFAILED) == 0) {
  12248. addr_count++;
  12249. }
  12250. }
  12251. /* Count anycast address */
  12252. acaddr = inet6->ac_list;
  12253. while (acaddr) {
  12254. addr_count++;
  12255. acaddr = acaddr->aca_next;
  12256. }
  12257. /* unlock */
  12258. read_unlock_bh(&inet6->lock);
  12259. return addr_count;
  12260. }
  12261. int
  12262. dhd_dev_ndo_update_inet6addr(struct net_device *dev)
  12263. {
  12264. dhd_info_t *dhd;
  12265. dhd_pub_t *dhdp;
  12266. struct inet6_dev *inet6;
  12267. struct inet6_ifaddr *ifa;
  12268. struct ifacaddr6 *acaddr = NULL;
  12269. struct in6_addr *ipv6_addr = NULL;
  12270. int cnt, i;
  12271. int ret = BCME_OK;
  12272. /*
  12273. * this function evaulates host ip address in struct inet6_dev
  12274. * unicast addr in inet6_dev->addr_list
  12275. * anycast addr in inet6_dev->ac_list
  12276. * while evaluating inet6_dev, read_lock_bh() is required to prevent
  12277. * access on null(freed) pointer.
  12278. */
  12279. if (dev) {
  12280. inet6 = dev->ip6_ptr;
  12281. if (!inet6) {
  12282. DHD_ERROR(("%s: Invalid inet6_dev\n", __FUNCTION__));
  12283. return BCME_ERROR;
  12284. }
  12285. dhd = DHD_DEV_INFO(dev);
  12286. if (!dhd) {
  12287. DHD_ERROR(("%s: Invalid dhd_info\n", __FUNCTION__));
  12288. return BCME_ERROR;
  12289. }
  12290. dhdp = &dhd->pub;
  12291. if (dhd_net2idx(dhd, dev) != 0) {
  12292. DHD_ERROR(("%s: Not primary interface\n", __FUNCTION__));
  12293. return BCME_ERROR;
  12294. }
  12295. } else {
  12296. DHD_ERROR(("%s: Invalid net_device\n", __FUNCTION__));
  12297. return BCME_ERROR;
  12298. }
  12299. /* Check host IP overflow */
  12300. cnt = dhd_dev_ndo_get_valid_inet6addr_count(inet6);
  12301. if (cnt > dhdp->ndo_max_host_ip) {
  12302. if (!dhdp->ndo_host_ip_overflow) {
  12303. dhdp->ndo_host_ip_overflow = TRUE;
  12304. /* Disable ND offload in FW */
  12305. DHD_INFO(("%s: Host IP overflow, disable NDO\n", __FUNCTION__));
  12306. ret = dhd_ndo_enable(dhdp, FALSE);
  12307. }
  12308. return ret;
  12309. }
  12310. /*
  12311. * Allocate ipv6 addr buffer to store addresses to be added/removed.
  12312. * driver need to lock inet6_dev while accessing structure. but, driver
  12313. * cannot use ioctl while inet6_dev locked since it requires scheduling
  12314. * hence, copy addresses to the buffer and do ioctl after unlock.
  12315. */
  12316. ipv6_addr = (struct in6_addr *)MALLOC(dhdp->osh,
  12317. sizeof(struct in6_addr) * dhdp->ndo_max_host_ip);
  12318. if (!ipv6_addr) {
  12319. DHD_ERROR(("%s: failed to alloc ipv6 addr buffer\n", __FUNCTION__));
  12320. return BCME_NOMEM;
  12321. }
  12322. /* Find DAD failed unicast address to be removed */
  12323. cnt = 0;
  12324. read_lock_bh(&inet6->lock);
  12325. list_for_each_entry(ifa, &inet6->addr_list, if_list) {
  12326. /* DAD failed unicast address */
  12327. if ((ifa->flags & IFA_F_DADFAILED) &&
  12328. (cnt < dhdp->ndo_max_host_ip)) {
  12329. memcpy(&ipv6_addr[cnt], &ifa->addr, sizeof(struct in6_addr));
  12330. cnt++;
  12331. }
  12332. }
  12333. read_unlock_bh(&inet6->lock);
  12334. /* Remove DAD failed unicast address */
  12335. for (i = 0; i < cnt; i++) {
  12336. DHD_INFO(("%s: Remove DAD failed addr\n", __FUNCTION__));
  12337. ret = dhd_ndo_remove_ip_by_addr(dhdp, (char *)&ipv6_addr[i], 0);
  12338. if (ret < 0) {
  12339. goto done;
  12340. }
  12341. }
  12342. /* Remove all anycast address */
  12343. ret = dhd_ndo_remove_ip_by_type(dhdp, WL_ND_IPV6_ADDR_TYPE_ANYCAST, 0);
  12344. if (ret < 0) {
  12345. goto done;
  12346. }
  12347. /*
  12348. * if ND offload was disabled due to host ip overflow,
  12349. * attempt to add valid unicast address.
  12350. */
  12351. if (dhdp->ndo_host_ip_overflow) {
  12352. /* Find valid unicast address */
  12353. cnt = 0;
  12354. read_lock_bh(&inet6->lock);
  12355. list_for_each_entry(ifa, &inet6->addr_list, if_list) {
  12356. /* valid unicast address */
  12357. if (!(ifa->flags & IFA_F_DADFAILED) &&
  12358. (cnt < dhdp->ndo_max_host_ip)) {
  12359. memcpy(&ipv6_addr[cnt], &ifa->addr,
  12360. sizeof(struct in6_addr));
  12361. cnt++;
  12362. }
  12363. }
  12364. read_unlock_bh(&inet6->lock);
  12365. /* Add valid unicast address */
  12366. for (i = 0; i < cnt; i++) {
  12367. ret = dhd_ndo_add_ip_with_type(dhdp,
  12368. (char *)&ipv6_addr[i], WL_ND_IPV6_ADDR_TYPE_UNICAST, 0);
  12369. if (ret < 0) {
  12370. goto done;
  12371. }
  12372. }
  12373. }
  12374. /* Find anycast address */
  12375. cnt = 0;
  12376. read_lock_bh(&inet6->lock);
  12377. acaddr = inet6->ac_list;
  12378. while (acaddr) {
  12379. if (cnt < dhdp->ndo_max_host_ip) {
  12380. memcpy(&ipv6_addr[cnt], &acaddr->aca_addr, sizeof(struct in6_addr));
  12381. cnt++;
  12382. }
  12383. acaddr = acaddr->aca_next;
  12384. }
  12385. read_unlock_bh(&inet6->lock);
  12386. /* Add anycast address */
  12387. for (i = 0; i < cnt; i++) {
  12388. ret = dhd_ndo_add_ip_with_type(dhdp,
  12389. (char *)&ipv6_addr[i], WL_ND_IPV6_ADDR_TYPE_ANYCAST, 0);
  12390. if (ret < 0) {
  12391. goto done;
  12392. }
  12393. }
  12394. /* Now All host IP addr were added successfully */
  12395. if (dhdp->ndo_host_ip_overflow) {
  12396. dhdp->ndo_host_ip_overflow = FALSE;
  12397. if (dhdp->in_suspend) {
  12398. /* drvier is in (early) suspend state, need to enable ND offload in FW */
  12399. DHD_INFO(("%s: enable NDO\n", __FUNCTION__));
  12400. ret = dhd_ndo_enable(dhdp, TRUE);
  12401. }
  12402. }
  12403. done:
  12404. if (ipv6_addr) {
  12405. MFREE(dhdp->osh, ipv6_addr, sizeof(struct in6_addr) * dhdp->ndo_max_host_ip);
  12406. }
  12407. return ret;
  12408. }
  12409. #if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6)
  12410. #pragma GCC diagnostic pop
  12411. #endif /* __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6) */
  12412. #endif /* NDO_CONFIG_SUPPORT */
  12413. #ifdef PNO_SUPPORT
  12414. /* Linux wrapper to call common dhd_pno_stop_for_ssid */
  12415. int
  12416. dhd_dev_pno_stop_for_ssid(struct net_device *dev)
  12417. {
  12418. dhd_info_t *dhd = DHD_DEV_INFO(dev);
  12419. return (dhd_pno_stop_for_ssid(&dhd->pub));
  12420. }
  12421. /* Linux wrapper to call common dhd_pno_set_for_ssid */
  12422. int
  12423. dhd_dev_pno_set_for_ssid(struct net_device *dev, wlc_ssid_ext_t* ssids_local, int nssid,
  12424. uint16 scan_fr, int pno_repeat, int pno_freq_expo_max, uint16 *channel_list, int nchan)
  12425. {
  12426. dhd_info_t *dhd = DHD_DEV_INFO(dev);
  12427. return (dhd_pno_set_for_ssid(&dhd->pub, ssids_local, nssid, scan_fr,
  12428. pno_repeat, pno_freq_expo_max, channel_list, nchan));
  12429. }
  12430. /* Linux wrapper to call common dhd_pno_enable */
  12431. int
  12432. dhd_dev_pno_enable(struct net_device *dev, int enable)
  12433. {
  12434. dhd_info_t *dhd = DHD_DEV_INFO(dev);
  12435. return (dhd_pno_enable(&dhd->pub, enable));
  12436. }
  12437. /* Linux wrapper to call common dhd_pno_set_for_hotlist */
  12438. int
  12439. dhd_dev_pno_set_for_hotlist(struct net_device *dev, wl_pfn_bssid_t *p_pfn_bssid,
  12440. struct dhd_pno_hotlist_params *hotlist_params)
  12441. {
  12442. dhd_info_t *dhd = DHD_DEV_INFO(dev);
  12443. return (dhd_pno_set_for_hotlist(&dhd->pub, p_pfn_bssid, hotlist_params));
  12444. }
  12445. /* Linux wrapper to call common dhd_dev_pno_stop_for_batch */
  12446. int
  12447. dhd_dev_pno_stop_for_batch(struct net_device *dev)
  12448. {
  12449. dhd_info_t *dhd = DHD_DEV_INFO(dev);
  12450. return (dhd_pno_stop_for_batch(&dhd->pub));
  12451. }
  12452. /* Linux wrapper to call common dhd_dev_pno_set_for_batch */
  12453. int
  12454. dhd_dev_pno_set_for_batch(struct net_device *dev, struct dhd_pno_batch_params *batch_params)
  12455. {
  12456. dhd_info_t *dhd = DHD_DEV_INFO(dev);
  12457. return (dhd_pno_set_for_batch(&dhd->pub, batch_params));
  12458. }
  12459. /* Linux wrapper to call common dhd_dev_pno_get_for_batch */
  12460. int
  12461. dhd_dev_pno_get_for_batch(struct net_device *dev, char *buf, int bufsize)
  12462. {
  12463. dhd_info_t *dhd = DHD_DEV_INFO(dev);
  12464. return (dhd_pno_get_for_batch(&dhd->pub, buf, bufsize, PNO_STATUS_NORMAL));
  12465. }
  12466. #endif /* PNO_SUPPORT */
  12467. #if defined(OEM_ANDROID) && defined(PNO_SUPPORT)
  12468. #ifdef GSCAN_SUPPORT
  12469. bool
  12470. dhd_dev_is_legacy_pno_enabled(struct net_device *dev)
  12471. {
  12472. dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
  12473. return (dhd_is_legacy_pno_enabled(&dhd->pub));
  12474. }
  12475. int
  12476. dhd_dev_set_epno(struct net_device *dev)
  12477. {
  12478. dhd_info_t *dhd = DHD_DEV_INFO(dev);
  12479. if (!dhd) {
  12480. return BCME_ERROR;
  12481. }
  12482. return dhd_pno_set_epno(&dhd->pub);
  12483. }
  12484. int
  12485. dhd_dev_flush_fw_epno(struct net_device *dev)
  12486. {
  12487. dhd_info_t *dhd = DHD_DEV_INFO(dev);
  12488. if (!dhd) {
  12489. return BCME_ERROR;
  12490. }
  12491. return dhd_pno_flush_fw_epno(&dhd->pub);
  12492. }
  12493. /* Linux wrapper to call common dhd_pno_set_cfg_gscan */
  12494. int
  12495. dhd_dev_pno_set_cfg_gscan(struct net_device *dev, dhd_pno_gscan_cmd_cfg_t type,
  12496. void *buf, bool flush)
  12497. {
  12498. dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
  12499. return (dhd_pno_set_cfg_gscan(&dhd->pub, type, buf, flush));
  12500. }
  12501. /* Linux wrapper to call common dhd_wait_batch_results_complete */
  12502. int
  12503. dhd_dev_wait_batch_results_complete(struct net_device *dev)
  12504. {
  12505. dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
  12506. return (dhd_wait_batch_results_complete(&dhd->pub));
  12507. }
  12508. /* Linux wrapper to call common dhd_pno_lock_batch_results */
  12509. int
  12510. dhd_dev_pno_lock_access_batch_results(struct net_device *dev)
  12511. {
  12512. dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
  12513. return (dhd_pno_lock_batch_results(&dhd->pub));
  12514. }
  12515. /* Linux wrapper to call common dhd_pno_unlock_batch_results */
  12516. void
  12517. dhd_dev_pno_unlock_access_batch_results(struct net_device *dev)
  12518. {
  12519. dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
  12520. return (dhd_pno_unlock_batch_results(&dhd->pub));
  12521. }
  12522. /* Linux wrapper to call common dhd_pno_initiate_gscan_request */
  12523. int
  12524. dhd_dev_pno_run_gscan(struct net_device *dev, bool run, bool flush)
  12525. {
  12526. dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
  12527. return (dhd_pno_initiate_gscan_request(&dhd->pub, run, flush));
  12528. }
  12529. /* Linux wrapper to call common dhd_pno_enable_full_scan_result */
  12530. int
  12531. dhd_dev_pno_enable_full_scan_result(struct net_device *dev, bool real_time_flag)
  12532. {
  12533. dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
  12534. return (dhd_pno_enable_full_scan_result(&dhd->pub, real_time_flag));
  12535. }
  12536. /* Linux wrapper to call common dhd_handle_hotlist_scan_evt */
  12537. void *
  12538. dhd_dev_hotlist_scan_event(struct net_device *dev,
  12539. const void *data, int *send_evt_bytes, hotlist_type_t type, u32 *buf_len)
  12540. {
  12541. dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
  12542. return (dhd_handle_hotlist_scan_evt(&dhd->pub, data, send_evt_bytes, type, buf_len));
  12543. }
  12544. /* Linux wrapper to call common dhd_process_full_gscan_result */
  12545. void *
  12546. dhd_dev_process_full_gscan_result(struct net_device *dev,
  12547. const void *data, uint32 len, int *send_evt_bytes)
  12548. {
  12549. dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
  12550. return (dhd_process_full_gscan_result(&dhd->pub, data, len, send_evt_bytes));
  12551. }
  12552. void
  12553. dhd_dev_gscan_hotlist_cache_cleanup(struct net_device *dev, hotlist_type_t type)
  12554. {
  12555. dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
  12556. dhd_gscan_hotlist_cache_cleanup(&dhd->pub, type);
  12557. return;
  12558. }
  12559. int
  12560. dhd_dev_gscan_batch_cache_cleanup(struct net_device *dev)
  12561. {
  12562. dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
  12563. return (dhd_gscan_batch_cache_cleanup(&dhd->pub));
  12564. }
  12565. /* Linux wrapper to call common dhd_retreive_batch_scan_results */
  12566. int
  12567. dhd_dev_retrieve_batch_scan(struct net_device *dev)
  12568. {
  12569. dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
  12570. return (dhd_retreive_batch_scan_results(&dhd->pub));
  12571. }
  12572. /* Linux wrapper to call common dhd_pno_process_epno_result */
  12573. void * dhd_dev_process_epno_result(struct net_device *dev,
  12574. const void *data, uint32 event, int *send_evt_bytes)
  12575. {
  12576. dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
  12577. return (dhd_pno_process_epno_result(&dhd->pub, data, event, send_evt_bytes));
  12578. }
  12579. int
  12580. dhd_dev_set_lazy_roam_cfg(struct net_device *dev,
  12581. wlc_roam_exp_params_t *roam_param)
  12582. {
  12583. dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
  12584. wl_roam_exp_cfg_t roam_exp_cfg;
  12585. int err;
  12586. if (!roam_param) {
  12587. return BCME_BADARG;
  12588. }
  12589. DHD_INFO(("a_band_boost_thr %d a_band_penalty_thr %d\n",
  12590. roam_param->a_band_boost_threshold, roam_param->a_band_penalty_threshold));
  12591. DHD_INFO(("a_band_boost_factor %d a_band_penalty_factor %d cur_bssid_boost %d\n",
  12592. roam_param->a_band_boost_factor, roam_param->a_band_penalty_factor,
  12593. roam_param->cur_bssid_boost));
  12594. DHD_INFO(("alert_roam_trigger_thr %d a_band_max_boost %d\n",
  12595. roam_param->alert_roam_trigger_threshold, roam_param->a_band_max_boost));
  12596. memcpy(&roam_exp_cfg.params, roam_param, sizeof(*roam_param));
  12597. roam_exp_cfg.version = ROAM_EXP_CFG_VERSION;
  12598. roam_exp_cfg.flags = ROAM_EXP_CFG_PRESENT;
  12599. if (dhd->pub.lazy_roam_enable) {
  12600. roam_exp_cfg.flags |= ROAM_EXP_ENABLE_FLAG;
  12601. }
  12602. err = dhd_iovar(&dhd->pub, 0, "roam_exp_params",
  12603. (char *)&roam_exp_cfg, sizeof(roam_exp_cfg), NULL, 0,
  12604. TRUE);
  12605. if (err < 0) {
  12606. DHD_ERROR(("%s : Failed to execute roam_exp_params %d\n", __FUNCTION__, err));
  12607. }
  12608. return err;
  12609. }
  12610. int
  12611. dhd_dev_lazy_roam_enable(struct net_device *dev, uint32 enable)
  12612. {
  12613. int err;
  12614. dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
  12615. wl_roam_exp_cfg_t roam_exp_cfg;
  12616. memset(&roam_exp_cfg, 0, sizeof(roam_exp_cfg));
  12617. roam_exp_cfg.version = ROAM_EXP_CFG_VERSION;
  12618. if (enable) {
  12619. roam_exp_cfg.flags = ROAM_EXP_ENABLE_FLAG;
  12620. }
  12621. err = dhd_iovar(&dhd->pub, 0, "roam_exp_params",
  12622. (char *)&roam_exp_cfg, sizeof(roam_exp_cfg), NULL, 0,
  12623. TRUE);
  12624. if (err < 0) {
  12625. DHD_ERROR(("%s : Failed to execute roam_exp_params %d\n", __FUNCTION__, err));
  12626. } else {
  12627. dhd->pub.lazy_roam_enable = (enable != 0);
  12628. }
  12629. return err;
  12630. }
  12631. int
  12632. dhd_dev_set_lazy_roam_bssid_pref(struct net_device *dev,
  12633. wl_bssid_pref_cfg_t *bssid_pref, uint32 flush)
  12634. {
  12635. int err;
  12636. uint len;
  12637. dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
  12638. bssid_pref->version = BSSID_PREF_LIST_VERSION;
  12639. /* By default programming bssid pref flushes out old values */
  12640. bssid_pref->flags = (flush && !bssid_pref->count) ? ROAM_EXP_CLEAR_BSSID_PREF: 0;
  12641. len = sizeof(wl_bssid_pref_cfg_t);
  12642. if (bssid_pref->count) {
  12643. len += (bssid_pref->count - 1) * sizeof(wl_bssid_pref_list_t);
  12644. }
  12645. err = dhd_iovar(&dhd->pub, 0, "roam_exp_bssid_pref",
  12646. (char *)bssid_pref, len, NULL, 0, TRUE);
  12647. if (err != BCME_OK) {
  12648. DHD_ERROR(("%s : Failed to execute roam_exp_bssid_pref %d\n", __FUNCTION__, err));
  12649. }
  12650. return err;
  12651. }
  12652. #endif /* GSCAN_SUPPORT */
  12653. #if defined(GSCAN_SUPPORT) || defined(ROAMEXP_SUPPORT)
  12654. int
  12655. dhd_dev_set_blacklist_bssid(struct net_device *dev, maclist_t *blacklist,
  12656. uint32 len, uint32 flush)
  12657. {
  12658. int err;
  12659. dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
  12660. int macmode;
  12661. if (blacklist) {
  12662. err = dhd_wl_ioctl_cmd(&(dhd->pub), WLC_SET_MACLIST, (char *)blacklist,
  12663. len, TRUE, 0);
  12664. if (err != BCME_OK) {
  12665. DHD_ERROR(("%s : WLC_SET_MACLIST failed %d\n", __FUNCTION__, err));
  12666. return err;
  12667. }
  12668. }
  12669. /* By default programming blacklist flushes out old values */
  12670. macmode = (flush && !blacklist) ? WLC_MACMODE_DISABLED : WLC_MACMODE_DENY;
  12671. err = dhd_wl_ioctl_cmd(&(dhd->pub), WLC_SET_MACMODE, (char *)&macmode,
  12672. sizeof(macmode), TRUE, 0);
  12673. if (err != BCME_OK) {
  12674. DHD_ERROR(("%s : WLC_SET_MACMODE failed %d\n", __FUNCTION__, err));
  12675. }
  12676. return err;
  12677. }
  12678. int
  12679. dhd_dev_set_whitelist_ssid(struct net_device *dev, wl_ssid_whitelist_t *ssid_whitelist,
  12680. uint32 len, uint32 flush)
  12681. {
  12682. int err;
  12683. dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
  12684. wl_ssid_whitelist_t whitelist_ssid_flush;
  12685. if (!ssid_whitelist) {
  12686. if (flush) {
  12687. ssid_whitelist = &whitelist_ssid_flush;
  12688. ssid_whitelist->ssid_count = 0;
  12689. } else {
  12690. DHD_ERROR(("%s : Nothing to do here\n", __FUNCTION__));
  12691. return BCME_BADARG;
  12692. }
  12693. }
  12694. ssid_whitelist->version = SSID_WHITELIST_VERSION;
  12695. ssid_whitelist->flags = flush ? ROAM_EXP_CLEAR_SSID_WHITELIST : 0;
  12696. err = dhd_iovar(&dhd->pub, 0, "roam_exp_ssid_whitelist", (char *)ssid_whitelist, len, NULL,
  12697. 0, TRUE);
  12698. if (err != BCME_OK) {
  12699. DHD_ERROR(("%s : Failed to execute roam_exp_bssid_pref %d\n", __FUNCTION__, err));
  12700. }
  12701. return err;
  12702. }
  12703. #endif /* GSCAN_SUPPORT || ROAMEXP_SUPPORT */
  12704. #if defined(GSCAN_SUPPORT) || defined(DHD_GET_VALID_CHANNELS)
  12705. /* Linux wrapper to call common dhd_pno_get_gscan */
  12706. void *
  12707. dhd_dev_pno_get_gscan(struct net_device *dev, dhd_pno_gscan_cmd_cfg_t type,
  12708. void *info, uint32 *len)
  12709. {
  12710. dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
  12711. return (dhd_pno_get_gscan(&dhd->pub, type, info, len));
  12712. }
  12713. #endif /* GSCAN_SUPPORT || DHD_GET_VALID_CHANNELS */
  12714. #endif /* defined(OEM_ANDROID) && defined(PNO_SUPPORT) */
  12715. #ifdef RSSI_MONITOR_SUPPORT
  12716. int
  12717. dhd_dev_set_rssi_monitor_cfg(struct net_device *dev, int start,
  12718. int8 max_rssi, int8 min_rssi)
  12719. {
  12720. int err;
  12721. wl_rssi_monitor_cfg_t rssi_monitor;
  12722. dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
  12723. rssi_monitor.version = RSSI_MONITOR_VERSION;
  12724. rssi_monitor.max_rssi = max_rssi;
  12725. rssi_monitor.min_rssi = min_rssi;
  12726. rssi_monitor.flags = start ? 0: RSSI_MONITOR_STOP;
  12727. err = dhd_iovar(&dhd->pub, 0, "rssi_monitor", (char *)&rssi_monitor, sizeof(rssi_monitor),
  12728. NULL, 0, TRUE);
  12729. if (err < 0 && err != BCME_UNSUPPORTED) {
  12730. DHD_ERROR(("%s : Failed to execute rssi_monitor %d\n", __FUNCTION__, err));
  12731. }
  12732. return err;
  12733. }
  12734. #endif /* RSSI_MONITOR_SUPPORT */
  12735. #ifdef DHDTCPACK_SUPPRESS
  12736. int
  12737. dhd_dev_set_tcpack_sup_mode_cfg(struct net_device *dev, uint8 enable)
  12738. {
  12739. int err;
  12740. dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
  12741. err = dhd_tcpack_suppress_set(&dhd->pub, enable);
  12742. if (err != BCME_OK) {
  12743. DHD_ERROR(("%s : Failed to set tcpack_suppress mode: %d\n", __FUNCTION__, err));
  12744. }
  12745. return err;
  12746. }
  12747. #endif /* DHDTCPACK_SUPPRESS */
  12748. int
  12749. dhd_dev_cfg_rand_mac_oui(struct net_device *dev, uint8 *oui)
  12750. {
  12751. dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
  12752. dhd_pub_t *dhdp = &dhd->pub;
  12753. if (!dhdp || !oui) {
  12754. DHD_ERROR(("NULL POINTER : %s\n",
  12755. __FUNCTION__));
  12756. return BCME_ERROR;
  12757. }
  12758. if (ETHER_ISMULTI(oui)) {
  12759. DHD_ERROR(("Expected unicast OUI\n"));
  12760. return BCME_ERROR;
  12761. } else {
  12762. uint8 *rand_mac_oui = dhdp->rand_mac_oui;
  12763. memcpy(rand_mac_oui, oui, DOT11_OUI_LEN);
  12764. DHD_ERROR(("Random MAC OUI to be used - "MACOUIDBG"\n",
  12765. MACOUI2STRDBG(rand_mac_oui)));
  12766. }
  12767. return BCME_OK;
  12768. }
  12769. int
  12770. dhd_set_rand_mac_oui(dhd_pub_t *dhd)
  12771. {
  12772. int err;
  12773. wl_pfn_macaddr_cfg_t wl_cfg;
  12774. uint8 *rand_mac_oui = dhd->rand_mac_oui;
  12775. memset(&wl_cfg.macaddr, 0, ETHER_ADDR_LEN);
  12776. memcpy(&wl_cfg.macaddr, rand_mac_oui, DOT11_OUI_LEN);
  12777. wl_cfg.version = WL_PFN_MACADDR_CFG_VER;
  12778. if (ETHER_ISNULLADDR(&wl_cfg.macaddr)) {
  12779. wl_cfg.flags = 0;
  12780. } else {
  12781. wl_cfg.flags = (WL_PFN_MAC_OUI_ONLY_MASK | WL_PFN_SET_MAC_UNASSOC_MASK);
  12782. }
  12783. DHD_ERROR(("Setting rand mac oui to FW - "MACOUIDBG"\n",
  12784. MACOUI2STRDBG(rand_mac_oui)));
  12785. err = dhd_iovar(dhd, 0, "pfn_macaddr", (char *)&wl_cfg, sizeof(wl_cfg), NULL, 0, TRUE);
  12786. if (err < 0) {
  12787. DHD_ERROR(("%s : failed to execute pfn_macaddr %d\n", __FUNCTION__, err));
  12788. }
  12789. return err;
  12790. }
  12791. #if defined(RTT_SUPPORT) && defined(WL_CFG80211)
  12792. /* Linux wrapper to call common dhd_pno_set_cfg_gscan */
  12793. int
  12794. dhd_dev_rtt_set_cfg(struct net_device *dev, void *buf)
  12795. {
  12796. dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
  12797. return (dhd_rtt_set_cfg(&dhd->pub, buf));
  12798. }
  12799. int
  12800. dhd_dev_rtt_cancel_cfg(struct net_device *dev, struct ether_addr *mac_list, int mac_cnt)
  12801. {
  12802. dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
  12803. return (dhd_rtt_stop(&dhd->pub, mac_list, mac_cnt));
  12804. }
  12805. int
  12806. dhd_dev_rtt_register_noti_callback(struct net_device *dev, void *ctx, dhd_rtt_compl_noti_fn noti_fn)
  12807. {
  12808. dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
  12809. return (dhd_rtt_register_noti_callback(&dhd->pub, ctx, noti_fn));
  12810. }
  12811. int
  12812. dhd_dev_rtt_unregister_noti_callback(struct net_device *dev, dhd_rtt_compl_noti_fn noti_fn)
  12813. {
  12814. dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
  12815. return (dhd_rtt_unregister_noti_callback(&dhd->pub, noti_fn));
  12816. }
  12817. int
  12818. dhd_dev_rtt_capability(struct net_device *dev, rtt_capabilities_t *capa)
  12819. {
  12820. dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
  12821. return (dhd_rtt_capability(&dhd->pub, capa));
  12822. }
  12823. int
  12824. dhd_dev_rtt_avail_channel(struct net_device *dev, wifi_channel_info *channel_info)
  12825. {
  12826. dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
  12827. return (dhd_rtt_avail_channel(&dhd->pub, channel_info));
  12828. }
  12829. int
  12830. dhd_dev_rtt_enable_responder(struct net_device *dev, wifi_channel_info *channel_info)
  12831. {
  12832. dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
  12833. return (dhd_rtt_enable_responder(&dhd->pub, channel_info));
  12834. }
  12835. int dhd_dev_rtt_cancel_responder(struct net_device *dev)
  12836. {
  12837. dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
  12838. return (dhd_rtt_cancel_responder(&dhd->pub));
  12839. }
  12840. #endif /* RTT_SUPPORT */
  12841. #ifdef KEEP_ALIVE
  12842. #define KA_TEMP_BUF_SIZE 512
  12843. #define KA_FRAME_SIZE 300
  12844. int
  12845. dhd_dev_start_mkeep_alive(dhd_pub_t *dhd_pub, uint8 mkeep_alive_id, uint8 *ip_pkt,
  12846. uint16 ip_pkt_len, uint8* src_mac, uint8* dst_mac, uint32 period_msec)
  12847. {
  12848. const int ETHERTYPE_LEN = 2;
  12849. char *pbuf = NULL;
  12850. const char *str;
  12851. wl_mkeep_alive_pkt_t mkeep_alive_pkt;
  12852. wl_mkeep_alive_pkt_t *mkeep_alive_pktp = NULL;
  12853. int buf_len = 0;
  12854. int str_len = 0;
  12855. int res = BCME_ERROR;
  12856. int len_bytes = 0;
  12857. int i = 0;
  12858. /* ether frame to have both max IP pkt (256 bytes) and ether header */
  12859. char *pmac_frame = NULL;
  12860. char *pmac_frame_begin = NULL;
  12861. /*
  12862. * The mkeep_alive packet is for STA interface only; if the bss is configured as AP,
  12863. * dongle shall reject a mkeep_alive request.
  12864. */
  12865. if (!dhd_support_sta_mode(dhd_pub))
  12866. return res;
  12867. DHD_TRACE(("%s execution\n", __FUNCTION__));
  12868. if ((pbuf = MALLOCZ(dhd_pub->osh, KA_TEMP_BUF_SIZE)) == NULL) {
  12869. DHD_ERROR(("failed to allocate buf with size %d\n", KA_TEMP_BUF_SIZE));
  12870. res = BCME_NOMEM;
  12871. return res;
  12872. }
  12873. if ((pmac_frame = MALLOCZ(dhd_pub->osh, KA_FRAME_SIZE)) == NULL) {
  12874. DHD_ERROR(("failed to allocate mac_frame with size %d\n", KA_FRAME_SIZE));
  12875. res = BCME_NOMEM;
  12876. goto exit;
  12877. }
  12878. pmac_frame_begin = pmac_frame;
  12879. /*
  12880. * Get current mkeep-alive status.
  12881. */
  12882. res = dhd_iovar(dhd_pub, 0, "mkeep_alive", &mkeep_alive_id, sizeof(mkeep_alive_id), pbuf,
  12883. KA_TEMP_BUF_SIZE, FALSE);
  12884. if (res < 0) {
  12885. DHD_ERROR(("%s: Get mkeep_alive failed (error=%d)\n", __FUNCTION__, res));
  12886. goto exit;
  12887. } else {
  12888. /* Check available ID whether it is occupied */
  12889. mkeep_alive_pktp = (wl_mkeep_alive_pkt_t *) pbuf;
  12890. if (dtoh32(mkeep_alive_pktp->period_msec != 0)) {
  12891. DHD_ERROR(("%s: Get mkeep_alive failed, ID %u is in use.\n",
  12892. __FUNCTION__, mkeep_alive_id));
  12893. /* Current occupied ID info */
  12894. DHD_ERROR(("%s: mkeep_alive\n", __FUNCTION__));
  12895. DHD_ERROR((" Id : %d\n"
  12896. " Period: %d msec\n"
  12897. " Length: %d\n"
  12898. " Packet: 0x",
  12899. mkeep_alive_pktp->keep_alive_id,
  12900. dtoh32(mkeep_alive_pktp->period_msec),
  12901. dtoh16(mkeep_alive_pktp->len_bytes)));
  12902. for (i = 0; i < mkeep_alive_pktp->len_bytes; i++) {
  12903. DHD_ERROR(("%02x", mkeep_alive_pktp->data[i]));
  12904. }
  12905. DHD_ERROR(("\n"));
  12906. res = BCME_NOTFOUND;
  12907. goto exit;
  12908. }
  12909. }
  12910. /* Request the specified ID */
  12911. memset(&mkeep_alive_pkt, 0, sizeof(wl_mkeep_alive_pkt_t));
  12912. memset(pbuf, 0, KA_TEMP_BUF_SIZE);
  12913. str = "mkeep_alive";
  12914. str_len = strlen(str);
  12915. strncpy(pbuf, str, str_len);
  12916. pbuf[str_len] = '\0';
  12917. mkeep_alive_pktp = (wl_mkeep_alive_pkt_t *) (pbuf + str_len + 1);
  12918. mkeep_alive_pkt.period_msec = htod32(period_msec);
  12919. buf_len = str_len + 1;
  12920. mkeep_alive_pkt.version = htod16(WL_MKEEP_ALIVE_VERSION);
  12921. mkeep_alive_pkt.length = htod16(WL_MKEEP_ALIVE_FIXED_LEN);
  12922. /* ID assigned */
  12923. mkeep_alive_pkt.keep_alive_id = mkeep_alive_id;
  12924. buf_len += WL_MKEEP_ALIVE_FIXED_LEN;
  12925. /*
  12926. * Build up Ethernet Frame
  12927. */
  12928. /* Mapping dest mac addr */
  12929. memcpy(pmac_frame, dst_mac, ETHER_ADDR_LEN);
  12930. pmac_frame += ETHER_ADDR_LEN;
  12931. /* Mapping src mac addr */
  12932. memcpy(pmac_frame, src_mac, ETHER_ADDR_LEN);
  12933. pmac_frame += ETHER_ADDR_LEN;
  12934. /* Mapping Ethernet type (ETHERTYPE_IP: 0x0800) */
  12935. *(pmac_frame++) = 0x08;
  12936. *(pmac_frame++) = 0x00;
  12937. /* Mapping IP pkt */
  12938. memcpy(pmac_frame, ip_pkt, ip_pkt_len);
  12939. pmac_frame += ip_pkt_len;
  12940. /*
  12941. * Length of ether frame (assume to be all hexa bytes)
  12942. * = src mac + dst mac + ether type + ip pkt len
  12943. */
  12944. len_bytes = ETHER_ADDR_LEN*2 + ETHERTYPE_LEN + ip_pkt_len;
  12945. memcpy(mkeep_alive_pktp->data, pmac_frame_begin, len_bytes);
  12946. buf_len += len_bytes;
  12947. mkeep_alive_pkt.len_bytes = htod16(len_bytes);
  12948. /*
  12949. * Keep-alive attributes are set in local variable (mkeep_alive_pkt), and
  12950. * then memcpy'ed into buffer (mkeep_alive_pktp) since there is no
  12951. * guarantee that the buffer is properly aligned.
  12952. */
  12953. memcpy((char *)mkeep_alive_pktp, &mkeep_alive_pkt, WL_MKEEP_ALIVE_FIXED_LEN);
  12954. res = dhd_wl_ioctl_cmd(dhd_pub, WLC_SET_VAR, pbuf, buf_len, TRUE, 0);
  12955. exit:
  12956. if (pmac_frame_begin) {
  12957. MFREE(dhd_pub->osh, pmac_frame_begin, KA_FRAME_SIZE);
  12958. pmac_frame_begin = NULL;
  12959. }
  12960. if (pbuf) {
  12961. MFREE(dhd_pub->osh, pbuf, KA_TEMP_BUF_SIZE);
  12962. pbuf = NULL;
  12963. }
  12964. return res;
  12965. }
  12966. int
  12967. dhd_dev_stop_mkeep_alive(dhd_pub_t *dhd_pub, uint8 mkeep_alive_id)
  12968. {
  12969. char *pbuf = NULL;
  12970. wl_mkeep_alive_pkt_t mkeep_alive_pkt;
  12971. wl_mkeep_alive_pkt_t *mkeep_alive_pktp = NULL;
  12972. int res = BCME_ERROR;
  12973. int i = 0;
  12974. /*
  12975. * The mkeep_alive packet is for STA interface only; if the bss is configured as AP,
  12976. * dongle shall reject a mkeep_alive request.
  12977. */
  12978. if (!dhd_support_sta_mode(dhd_pub))
  12979. return res;
  12980. DHD_TRACE(("%s execution\n", __FUNCTION__));
  12981. /*
  12982. * Get current mkeep-alive status. Skip ID 0 which is being used for NULL pkt.
  12983. */
  12984. if ((pbuf = MALLOC(dhd_pub->osh, KA_TEMP_BUF_SIZE)) == NULL) {
  12985. DHD_ERROR(("failed to allocate buf with size %d\n", KA_TEMP_BUF_SIZE));
  12986. return res;
  12987. }
  12988. res = dhd_iovar(dhd_pub, 0, "mkeep_alive", &mkeep_alive_id,
  12989. sizeof(mkeep_alive_id), pbuf, KA_TEMP_BUF_SIZE, FALSE);
  12990. if (res < 0) {
  12991. DHD_ERROR(("%s: Get mkeep_alive failed (error=%d)\n", __FUNCTION__, res));
  12992. goto exit;
  12993. } else {
  12994. /* Check occupied ID */
  12995. mkeep_alive_pktp = (wl_mkeep_alive_pkt_t *) pbuf;
  12996. DHD_INFO(("%s: mkeep_alive\n", __FUNCTION__));
  12997. DHD_INFO((" Id : %d\n"
  12998. " Period: %d msec\n"
  12999. " Length: %d\n"
  13000. " Packet: 0x",
  13001. mkeep_alive_pktp->keep_alive_id,
  13002. dtoh32(mkeep_alive_pktp->period_msec),
  13003. dtoh16(mkeep_alive_pktp->len_bytes)));
  13004. for (i = 0; i < mkeep_alive_pktp->len_bytes; i++) {
  13005. DHD_INFO(("%02x", mkeep_alive_pktp->data[i]));
  13006. }
  13007. DHD_INFO(("\n"));
  13008. }
  13009. /* Make it stop if available */
  13010. if (dtoh32(mkeep_alive_pktp->period_msec != 0)) {
  13011. DHD_INFO(("stop mkeep_alive on ID %d\n", mkeep_alive_id));
  13012. memset(&mkeep_alive_pkt, 0, sizeof(wl_mkeep_alive_pkt_t));
  13013. mkeep_alive_pkt.period_msec = 0;
  13014. mkeep_alive_pkt.version = htod16(WL_MKEEP_ALIVE_VERSION);
  13015. mkeep_alive_pkt.length = htod16(WL_MKEEP_ALIVE_FIXED_LEN);
  13016. mkeep_alive_pkt.keep_alive_id = mkeep_alive_id;
  13017. res = dhd_iovar(dhd_pub, 0, "mkeep_alive",
  13018. (char *)&mkeep_alive_pkt,
  13019. WL_MKEEP_ALIVE_FIXED_LEN, NULL, 0, TRUE);
  13020. } else {
  13021. DHD_ERROR(("%s: ID %u does not exist.\n", __FUNCTION__, mkeep_alive_id));
  13022. res = BCME_NOTFOUND;
  13023. }
  13024. exit:
  13025. if (pbuf) {
  13026. MFREE(dhd_pub->osh, pbuf, KA_TEMP_BUF_SIZE);
  13027. pbuf = NULL;
  13028. }
  13029. return res;
  13030. }
  13031. #endif /* KEEP_ALIVE */
  13032. #if defined(PKT_FILTER_SUPPORT) && defined(APF)
  13033. static void _dhd_apf_lock_local(dhd_info_t *dhd)
  13034. {
  13035. if (dhd) {
  13036. mutex_lock(&dhd->dhd_apf_mutex);
  13037. }
  13038. }
  13039. static void _dhd_apf_unlock_local(dhd_info_t *dhd)
  13040. {
  13041. if (dhd) {
  13042. mutex_unlock(&dhd->dhd_apf_mutex);
  13043. }
  13044. }
  13045. static int
  13046. __dhd_apf_add_filter(struct net_device *ndev, uint32 filter_id,
  13047. u8* program, uint32 program_len)
  13048. {
  13049. dhd_info_t *dhd = DHD_DEV_INFO(ndev);
  13050. dhd_pub_t *dhdp = &dhd->pub;
  13051. wl_pkt_filter_t * pkt_filterp;
  13052. wl_apf_program_t *apf_program;
  13053. char *buf;
  13054. u32 cmd_len, buf_len;
  13055. int ifidx, ret;
  13056. char cmd[] = "pkt_filter_add";
  13057. ifidx = dhd_net2idx(dhd, ndev);
  13058. if (ifidx == DHD_BAD_IF) {
  13059. DHD_ERROR(("%s: bad ifidx\n", __FUNCTION__));
  13060. return -ENODEV;
  13061. }
  13062. cmd_len = sizeof(cmd);
  13063. /* Check if the program_len is more than the expected len
  13064. * and if the program is NULL return from here.
  13065. */
  13066. if ((program_len > WL_APF_PROGRAM_MAX_SIZE) || (program == NULL)) {
  13067. DHD_ERROR(("%s Invalid program_len: %d, program: %pK\n",
  13068. __FUNCTION__, program_len, program));
  13069. return -EINVAL;
  13070. }
  13071. buf_len = cmd_len + WL_PKT_FILTER_FIXED_LEN +
  13072. WL_APF_PROGRAM_FIXED_LEN + program_len;
  13073. buf = MALLOCZ(dhdp->osh, buf_len);
  13074. if (unlikely(!buf)) {
  13075. DHD_ERROR(("%s: MALLOC failure, %d bytes\n", __FUNCTION__, buf_len));
  13076. return -ENOMEM;
  13077. }
  13078. memcpy(buf, cmd, cmd_len);
  13079. pkt_filterp = (wl_pkt_filter_t *) (buf + cmd_len);
  13080. pkt_filterp->id = htod32(filter_id);
  13081. pkt_filterp->negate_match = htod32(FALSE);
  13082. pkt_filterp->type = htod32(WL_PKT_FILTER_TYPE_APF_MATCH);
  13083. apf_program = &pkt_filterp->u.apf_program;
  13084. apf_program->version = htod16(WL_APF_INTERNAL_VERSION);
  13085. apf_program->instr_len = htod16(program_len);
  13086. memcpy(apf_program->instrs, program, program_len);
  13087. ret = dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR, buf, buf_len, TRUE, ifidx);
  13088. if (unlikely(ret)) {
  13089. DHD_ERROR(("%s: failed to add APF filter, id=%d, ret=%d\n",
  13090. __FUNCTION__, filter_id, ret));
  13091. }
  13092. if (buf) {
  13093. MFREE(dhdp->osh, buf, buf_len);
  13094. }
  13095. return ret;
  13096. }
  13097. static int
  13098. __dhd_apf_config_filter(struct net_device *ndev, uint32 filter_id,
  13099. uint32 mode, uint32 enable)
  13100. {
  13101. dhd_info_t *dhd = DHD_DEV_INFO(ndev);
  13102. dhd_pub_t *dhdp = &dhd->pub;
  13103. wl_pkt_filter_enable_t * pkt_filterp;
  13104. char *buf;
  13105. u32 cmd_len, buf_len;
  13106. int ifidx, ret;
  13107. char cmd[] = "pkt_filter_enable";
  13108. ifidx = dhd_net2idx(dhd, ndev);
  13109. if (ifidx == DHD_BAD_IF) {
  13110. DHD_ERROR(("%s: bad ifidx\n", __FUNCTION__));
  13111. return -ENODEV;
  13112. }
  13113. cmd_len = sizeof(cmd);
  13114. buf_len = cmd_len + sizeof(*pkt_filterp);
  13115. buf = MALLOCZ(dhdp->osh, buf_len);
  13116. if (unlikely(!buf)) {
  13117. DHD_ERROR(("%s: MALLOC failure, %d bytes\n", __FUNCTION__, buf_len));
  13118. return -ENOMEM;
  13119. }
  13120. memcpy(buf, cmd, cmd_len);
  13121. pkt_filterp = (wl_pkt_filter_enable_t *) (buf + cmd_len);
  13122. pkt_filterp->id = htod32(filter_id);
  13123. pkt_filterp->enable = htod32(enable);
  13124. ret = dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR, buf, buf_len, TRUE, ifidx);
  13125. if (unlikely(ret)) {
  13126. DHD_ERROR(("%s: failed to enable APF filter, id=%d, ret=%d\n",
  13127. __FUNCTION__, filter_id, ret));
  13128. goto exit;
  13129. }
  13130. ret = dhd_wl_ioctl_set_intiovar(dhdp, "pkt_filter_mode", dhd_master_mode,
  13131. WLC_SET_VAR, TRUE, ifidx);
  13132. if (unlikely(ret)) {
  13133. DHD_ERROR(("%s: failed to set APF filter mode, id=%d, ret=%d\n",
  13134. __FUNCTION__, filter_id, ret));
  13135. }
  13136. exit:
  13137. if (buf) {
  13138. MFREE(dhdp->osh, buf, buf_len);
  13139. }
  13140. return ret;
  13141. }
  13142. static int
  13143. __dhd_apf_delete_filter(struct net_device *ndev, uint32 filter_id)
  13144. {
  13145. dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(ndev);
  13146. dhd_pub_t *dhdp = &dhd->pub;
  13147. int ifidx, ret;
  13148. ifidx = dhd_net2idx(dhd, ndev);
  13149. if (ifidx == DHD_BAD_IF) {
  13150. DHD_ERROR(("%s: bad ifidx\n", __FUNCTION__));
  13151. return -ENODEV;
  13152. }
  13153. ret = dhd_wl_ioctl_set_intiovar(dhdp, "pkt_filter_delete",
  13154. htod32(filter_id), WLC_SET_VAR, TRUE, ifidx);
  13155. if (unlikely(ret)) {
  13156. DHD_ERROR(("%s: failed to delete APF filter, id=%d, ret=%d\n",
  13157. __FUNCTION__, filter_id, ret));
  13158. }
  13159. return ret;
  13160. }
  13161. void dhd_apf_lock(struct net_device *dev)
  13162. {
  13163. dhd_info_t *dhd = DHD_DEV_INFO(dev);
  13164. _dhd_apf_lock_local(dhd);
  13165. }
  13166. void dhd_apf_unlock(struct net_device *dev)
  13167. {
  13168. dhd_info_t *dhd = DHD_DEV_INFO(dev);
  13169. _dhd_apf_unlock_local(dhd);
  13170. }
  13171. int
  13172. dhd_dev_apf_get_version(struct net_device *ndev, uint32 *version)
  13173. {
  13174. dhd_info_t *dhd = DHD_DEV_INFO(ndev);
  13175. dhd_pub_t *dhdp = &dhd->pub;
  13176. int ifidx, ret;
  13177. if (!FW_SUPPORTED(dhdp, apf)) {
  13178. DHD_ERROR(("%s: firmware doesn't support APF\n", __FUNCTION__));
  13179. /*
  13180. * Notify Android framework that APF is not supported by setting
  13181. * version as zero.
  13182. */
  13183. *version = 0;
  13184. return BCME_OK;
  13185. }
  13186. ifidx = dhd_net2idx(dhd, ndev);
  13187. if (ifidx == DHD_BAD_IF) {
  13188. DHD_ERROR(("%s: bad ifidx\n", __FUNCTION__));
  13189. return -ENODEV;
  13190. }
  13191. ret = dhd_wl_ioctl_get_intiovar(dhdp, "apf_ver", version,
  13192. WLC_GET_VAR, FALSE, ifidx);
  13193. if (unlikely(ret)) {
  13194. DHD_ERROR(("%s: failed to get APF version, ret=%d\n",
  13195. __FUNCTION__, ret));
  13196. }
  13197. return ret;
  13198. }
  13199. int
  13200. dhd_dev_apf_get_max_len(struct net_device *ndev, uint32 *max_len)
  13201. {
  13202. dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(ndev);
  13203. dhd_pub_t *dhdp = &dhd->pub;
  13204. int ifidx, ret;
  13205. if (!FW_SUPPORTED(dhdp, apf)) {
  13206. DHD_ERROR(("%s: firmware doesn't support APF\n", __FUNCTION__));
  13207. *max_len = 0;
  13208. return BCME_OK;
  13209. }
  13210. ifidx = dhd_net2idx(dhd, ndev);
  13211. if (ifidx == DHD_BAD_IF) {
  13212. DHD_ERROR(("%s bad ifidx\n", __FUNCTION__));
  13213. return -ENODEV;
  13214. }
  13215. ret = dhd_wl_ioctl_get_intiovar(dhdp, "apf_size_limit", max_len,
  13216. WLC_GET_VAR, FALSE, ifidx);
  13217. if (unlikely(ret)) {
  13218. DHD_ERROR(("%s: failed to get APF size limit, ret=%d\n",
  13219. __FUNCTION__, ret));
  13220. }
  13221. return ret;
  13222. }
  13223. int
  13224. dhd_dev_apf_add_filter(struct net_device *ndev, u8* program,
  13225. uint32 program_len)
  13226. {
  13227. dhd_info_t *dhd = DHD_DEV_INFO(ndev);
  13228. dhd_pub_t *dhdp = &dhd->pub;
  13229. int ret;
  13230. DHD_APF_LOCK(ndev);
  13231. /* delete, if filter already exists */
  13232. if (dhdp->apf_set) {
  13233. ret = __dhd_apf_delete_filter(ndev, PKT_FILTER_APF_ID);
  13234. if (unlikely(ret)) {
  13235. goto exit;
  13236. }
  13237. dhdp->apf_set = FALSE;
  13238. }
  13239. ret = __dhd_apf_add_filter(ndev, PKT_FILTER_APF_ID, program, program_len);
  13240. if (ret) {
  13241. goto exit;
  13242. }
  13243. dhdp->apf_set = TRUE;
  13244. if (dhdp->in_suspend && dhdp->apf_set && !(dhdp->op_mode & DHD_FLAG_HOSTAP_MODE)) {
  13245. /* Driver is still in (early) suspend state, enable APF filter back */
  13246. ret = __dhd_apf_config_filter(ndev, PKT_FILTER_APF_ID,
  13247. PKT_FILTER_MODE_FORWARD_ON_MATCH, TRUE);
  13248. }
  13249. exit:
  13250. DHD_APF_UNLOCK(ndev);
  13251. return ret;
  13252. }
  13253. int
  13254. dhd_dev_apf_enable_filter(struct net_device *ndev)
  13255. {
  13256. dhd_info_t *dhd = DHD_DEV_INFO(ndev);
  13257. dhd_pub_t *dhdp = &dhd->pub;
  13258. int ret = 0;
  13259. bool nan_dp_active = false;
  13260. DHD_APF_LOCK(ndev);
  13261. #ifdef WL_NAN
  13262. nan_dp_active = wl_cfgnan_is_dp_active(ndev);
  13263. #endif /* WL_NAN */
  13264. if (dhdp->apf_set && (!(dhdp->op_mode & DHD_FLAG_HOSTAP_MODE) &&
  13265. !nan_dp_active)) {
  13266. ret = __dhd_apf_config_filter(ndev, PKT_FILTER_APF_ID,
  13267. PKT_FILTER_MODE_FORWARD_ON_MATCH, TRUE);
  13268. }
  13269. DHD_APF_UNLOCK(ndev);
  13270. return ret;
  13271. }
  13272. int
  13273. dhd_dev_apf_disable_filter(struct net_device *ndev)
  13274. {
  13275. dhd_info_t *dhd = DHD_DEV_INFO(ndev);
  13276. dhd_pub_t *dhdp = &dhd->pub;
  13277. int ret = 0;
  13278. DHD_APF_LOCK(ndev);
  13279. if (dhdp->apf_set) {
  13280. ret = __dhd_apf_config_filter(ndev, PKT_FILTER_APF_ID,
  13281. PKT_FILTER_MODE_FORWARD_ON_MATCH, FALSE);
  13282. }
  13283. DHD_APF_UNLOCK(ndev);
  13284. return ret;
  13285. }
  13286. int
  13287. dhd_dev_apf_delete_filter(struct net_device *ndev)
  13288. {
  13289. dhd_info_t *dhd = DHD_DEV_INFO(ndev);
  13290. dhd_pub_t *dhdp = &dhd->pub;
  13291. int ret = 0;
  13292. DHD_APF_LOCK(ndev);
  13293. if (dhdp->apf_set) {
  13294. ret = __dhd_apf_delete_filter(ndev, PKT_FILTER_APF_ID);
  13295. if (!ret) {
  13296. dhdp->apf_set = FALSE;
  13297. }
  13298. }
  13299. DHD_APF_UNLOCK(ndev);
  13300. return ret;
  13301. }
  13302. #endif /* PKT_FILTER_SUPPORT && APF */
  13303. #if defined(OEM_ANDROID)
  13304. static void dhd_hang_process(struct work_struct *work_data)
  13305. {
  13306. struct net_device *dev;
  13307. #ifdef IFACE_HANG_FORCE_DEV_CLOSE
  13308. struct net_device *ndev;
  13309. uint8 i = 0;
  13310. #endif /* IFACE_HANG_FORCE_DEV_CLOSE */
  13311. /* Ignore compiler warnings due to -Werror=cast-qual */
  13312. #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
  13313. #pragma GCC diagnostic push
  13314. #pragma GCC diagnostic ignored "-Wcast-qual"
  13315. #endif // endif
  13316. struct dhd_info *dhd =
  13317. container_of(work_data, dhd_info_t, dhd_hang_process_work);
  13318. #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
  13319. #pragma GCC diagnostic pop
  13320. #endif // endif
  13321. dev = dhd->iflist[0]->net;
  13322. if (dev) {
  13323. #if defined(WL_WIRELESS_EXT)
  13324. wl_iw_send_priv_event(dev, "HANG");
  13325. #endif // endif
  13326. #if defined(WL_CFG80211)
  13327. wl_cfg80211_hang(dev, WLAN_REASON_UNSPECIFIED);
  13328. #endif // endif
  13329. }
  13330. #ifdef IFACE_HANG_FORCE_DEV_CLOSE
  13331. /*
  13332. * For HW2, dev_close need to be done to recover
  13333. * from upper layer after hang. For Interposer skip
  13334. * dev_close so that dhd iovars can be used to take
  13335. * socramdump after crash, also skip for HW4 as
  13336. * handling of hang event is different
  13337. */
  13338. rtnl_lock();
  13339. for (i = 0; i < DHD_MAX_IFS; i++) {
  13340. ndev = dhd->iflist[i] ? dhd->iflist[i]->net : NULL;
  13341. if (ndev && (ndev->flags & IFF_UP)) {
  13342. DHD_ERROR(("ndev->name : %s dev close\n",
  13343. ndev->name));
  13344. dev_close(ndev);
  13345. }
  13346. }
  13347. rtnl_unlock();
  13348. #endif /* IFACE_HANG_FORCE_DEV_CLOSE */
  13349. }
  13350. #ifdef EXYNOS_PCIE_LINKDOWN_RECOVERY
  13351. extern dhd_pub_t *link_recovery;
  13352. void dhd_host_recover_link(void)
  13353. {
  13354. DHD_ERROR(("****** %s ******\n", __FUNCTION__));
  13355. link_recovery->hang_reason = HANG_REASON_PCIE_LINK_DOWN_RC_DETECT;
  13356. dhd_bus_set_linkdown(link_recovery, TRUE);
  13357. dhd_os_send_hang_message(link_recovery);
  13358. }
  13359. EXPORT_SYMBOL(dhd_host_recover_link);
  13360. #endif /* EXYNOS_PCIE_LINKDOWN_RECOVERY */
  13361. int dhd_os_send_hang_message(dhd_pub_t *dhdp)
  13362. {
  13363. int ret = 0;
  13364. #ifdef WL_CFG80211
  13365. struct net_device *primary_ndev;
  13366. struct bcm_cfg80211 *cfg;
  13367. #ifdef DHD_FILE_DUMP_EVENT
  13368. dhd_info_t *dhd_info = NULL;
  13369. #endif /* DHD_FILE_DUMP_EVENT */
  13370. #endif /* WL_CFG80211 */
  13371. if (!dhdp) {
  13372. DHD_ERROR(("%s: dhdp is null\n", __FUNCTION__));
  13373. return -EINVAL;
  13374. }
  13375. #if defined(WL_CFG80211) && defined(DHD_FILE_DUMP_EVENT)
  13376. dhd_info = (dhd_info_t *)dhdp->info;
  13377. if (dhd_info->scheduled_memdump) {
  13378. DHD_ERROR_RLMT(("[DUMP]:%s, memdump in progress. return\n", __FUNCTION__));
  13379. dhdp->hang_was_pending = 1;
  13380. return BCME_OK;
  13381. }
  13382. #endif /* WL_CFG80211 && DHD_FILE_DUMP_EVENT */
  13383. #ifdef WL_CFG80211
  13384. primary_ndev = dhd_linux_get_primary_netdev(dhdp);
  13385. if (!primary_ndev) {
  13386. DHD_ERROR(("%s: Cannot find primary netdev\n", __FUNCTION__));
  13387. return -ENODEV;
  13388. }
  13389. cfg = wl_get_cfg(primary_ndev);
  13390. if (!cfg) {
  13391. DHD_ERROR(("%s: Cannot find cfg\n", __FUNCTION__));
  13392. return -EINVAL;
  13393. }
  13394. /* Skip sending HANG event to framework if driver is not ready */
  13395. if (!wl_get_drv_status(cfg, READY, primary_ndev)) {
  13396. DHD_ERROR(("%s: device is not ready\n", __FUNCTION__));
  13397. return -ENODEV;
  13398. }
  13399. #endif /* WL_CFG80211 */
  13400. #if defined(DHD_HANG_SEND_UP_TEST)
  13401. if (dhdp->req_hang_type) {
  13402. DHD_ERROR(("%s, Clear HANG test request 0x%x\n",
  13403. __FUNCTION__, dhdp->req_hang_type));
  13404. dhdp->req_hang_type = 0;
  13405. }
  13406. #endif /* DHD_HANG_SEND_UP_TEST */
  13407. if (!dhdp->hang_was_sent) {
  13408. #if defined(CONFIG_BCM_DETECT_CONSECUTIVE_HANG)
  13409. dhdp->hang_counts++;
  13410. if (dhdp->hang_counts >= MAX_CONSECUTIVE_HANG_COUNTS) {
  13411. DHD_ERROR(("%s, Consecutive hang from Dongle :%u\n",
  13412. __func__, dhdp->hang_counts));
  13413. BUG_ON(1);
  13414. }
  13415. #endif /* CONFIG_BCM_DETECT_CONSECUTIVE_HANG */
  13416. #ifdef DHD_DEBUG_UART
  13417. /* If PCIe lane has broken, execute the debug uart application
  13418. * to gether a ramdump data from dongle via uart
  13419. */
  13420. if (!dhdp->info->duart_execute) {
  13421. dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq,
  13422. (void *)dhdp, DHD_WQ_WORK_DEBUG_UART_DUMP,
  13423. dhd_debug_uart_exec_rd, DHD_WQ_WORK_PRIORITY_HIGH);
  13424. }
  13425. #endif /* DHD_DEBUG_UART */
  13426. dhdp->hang_was_sent = 1;
  13427. #ifdef BT_OVER_SDIO
  13428. dhdp->is_bt_recovery_required = TRUE;
  13429. #endif // endif
  13430. schedule_work(&dhdp->info->dhd_hang_process_work);
  13431. }
  13432. return ret;
  13433. }
  13434. int net_os_send_hang_message(struct net_device *dev)
  13435. {
  13436. dhd_info_t *dhd = DHD_DEV_INFO(dev);
  13437. int ret = 0;
  13438. if (dhd) {
  13439. /* Report FW problem when enabled */
  13440. if (dhd->pub.hang_report) {
  13441. #ifdef BT_OVER_SDIO
  13442. if (netif_running(dev)) {
  13443. #endif /* BT_OVER_SDIO */
  13444. ret = dhd_os_send_hang_message(&dhd->pub);
  13445. #ifdef BT_OVER_SDIO
  13446. }
  13447. DHD_ERROR(("%s: HANG -> Reset BT\n", __FUNCTION__));
  13448. bcmsdh_btsdio_process_dhd_hang_notification(!netif_running(dev));
  13449. #endif /* BT_OVER_SDIO */
  13450. } else {
  13451. DHD_ERROR(("%s: FW HANG ignored (for testing purpose) and not sent up\n",
  13452. __FUNCTION__));
  13453. }
  13454. }
  13455. return ret;
  13456. }
  13457. int net_os_send_hang_message_reason(struct net_device *dev, const char *string_num)
  13458. {
  13459. dhd_info_t *dhd = NULL;
  13460. dhd_pub_t *dhdp = NULL;
  13461. int reason;
  13462. dhd = DHD_DEV_INFO(dev);
  13463. if (dhd) {
  13464. dhdp = &dhd->pub;
  13465. }
  13466. if (!dhd || !dhdp) {
  13467. return 0;
  13468. }
  13469. reason = bcm_strtoul(string_num, NULL, 0);
  13470. DHD_INFO(("%s: Enter, reason=0x%x\n", __FUNCTION__, reason));
  13471. if ((reason <= HANG_REASON_MASK) || (reason >= HANG_REASON_MAX)) {
  13472. reason = 0;
  13473. }
  13474. dhdp->hang_reason = reason;
  13475. return net_os_send_hang_message(dev);
  13476. }
  13477. #endif /* OEM_ANDROID */
  13478. int dhd_net_wifi_platform_set_power(struct net_device *dev, bool on, unsigned long delay_msec)
  13479. {
  13480. dhd_info_t *dhd = DHD_DEV_INFO(dev);
  13481. return wifi_platform_set_power(dhd->adapter, on, delay_msec);
  13482. }
  13483. bool dhd_force_country_change(struct net_device *dev)
  13484. {
  13485. dhd_info_t *dhd = DHD_DEV_INFO(dev);
  13486. if (dhd && dhd->pub.up)
  13487. return dhd->pub.force_country_change;
  13488. return FALSE;
  13489. }
  13490. void dhd_get_customized_country_code(struct net_device *dev, char *country_iso_code,
  13491. wl_country_t *cspec)
  13492. {
  13493. dhd_info_t *dhd = DHD_DEV_INFO(dev);
  13494. #if defined(DHD_BLOB_EXISTENCE_CHECK)
  13495. if (!dhd->pub.is_blob)
  13496. #endif /* DHD_BLOB_EXISTENCE_CHECK */
  13497. {
  13498. #if defined(CUSTOM_COUNTRY_CODE)
  13499. get_customized_country_code(dhd->adapter, country_iso_code, cspec,
  13500. dhd->pub.dhd_cflags);
  13501. #else
  13502. get_customized_country_code(dhd->adapter, country_iso_code, cspec);
  13503. #endif /* CUSTOM_COUNTRY_CODE */
  13504. }
  13505. #if defined(DHD_BLOB_EXISTENCE_CHECK) && !defined(CUSTOM_COUNTRY_CODE)
  13506. else {
  13507. /* Replace the ccode to XZ if ccode is undefined country */
  13508. if (strncmp(country_iso_code, "", WLC_CNTRY_BUF_SZ) == 0) {
  13509. strlcpy(country_iso_code, "XZ", WLC_CNTRY_BUF_SZ);
  13510. strlcpy(cspec->country_abbrev, country_iso_code, WLC_CNTRY_BUF_SZ);
  13511. strlcpy(cspec->ccode, country_iso_code, WLC_CNTRY_BUF_SZ);
  13512. DHD_ERROR(("%s: ccode change to %s\n", __FUNCTION__, country_iso_code));
  13513. }
  13514. }
  13515. #endif /* DHD_BLOB_EXISTENCE_CHECK && !CUSTOM_COUNTRY_CODE */
  13516. BCM_REFERENCE(dhd);
  13517. }
  13518. void dhd_bus_country_set(struct net_device *dev, wl_country_t *cspec, bool notify)
  13519. {
  13520. dhd_info_t *dhd = DHD_DEV_INFO(dev);
  13521. #ifdef WL_CFG80211
  13522. struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
  13523. #endif // endif
  13524. if (dhd && dhd->pub.up) {
  13525. memcpy(&dhd->pub.dhd_cspec, cspec, sizeof(wl_country_t));
  13526. #ifdef WL_CFG80211
  13527. wl_update_wiphybands(cfg, notify);
  13528. #endif // endif
  13529. }
  13530. }
  13531. void dhd_bus_band_set(struct net_device *dev, uint band)
  13532. {
  13533. dhd_info_t *dhd = DHD_DEV_INFO(dev);
  13534. #ifdef WL_CFG80211
  13535. struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
  13536. #endif // endif
  13537. if (dhd && dhd->pub.up) {
  13538. #ifdef WL_CFG80211
  13539. wl_update_wiphybands(cfg, true);
  13540. #endif // endif
  13541. }
  13542. }
  13543. int dhd_net_set_fw_path(struct net_device *dev, char *fw)
  13544. {
  13545. dhd_info_t *dhd = DHD_DEV_INFO(dev);
  13546. if (!fw || fw[0] == '\0')
  13547. return -EINVAL;
  13548. strncpy(dhd->fw_path, fw, sizeof(dhd->fw_path) - 1);
  13549. dhd->fw_path[sizeof(dhd->fw_path)-1] = '\0';
  13550. #if defined(OEM_ANDROID) && defined(SOFTAP)
  13551. if (strstr(fw, "apsta") != NULL) {
  13552. DHD_INFO(("GOT APSTA FIRMWARE\n"));
  13553. ap_fw_loaded = TRUE;
  13554. } else {
  13555. DHD_INFO(("GOT STA FIRMWARE\n"));
  13556. ap_fw_loaded = FALSE;
  13557. }
  13558. #endif /* defined(OEM_ANDROID) && defined(SOFTAP) */
  13559. return 0;
  13560. }
  13561. void dhd_net_if_lock(struct net_device *dev)
  13562. {
  13563. dhd_info_t *dhd = DHD_DEV_INFO(dev);
  13564. dhd_net_if_lock_local(dhd);
  13565. }
  13566. void dhd_net_if_unlock(struct net_device *dev)
  13567. {
  13568. dhd_info_t *dhd = DHD_DEV_INFO(dev);
  13569. dhd_net_if_unlock_local(dhd);
  13570. }
  13571. static void dhd_net_if_lock_local(dhd_info_t *dhd)
  13572. {
  13573. #if defined(OEM_ANDROID)
  13574. if (dhd)
  13575. mutex_lock(&dhd->dhd_net_if_mutex);
  13576. #endif // endif
  13577. }
  13578. static void dhd_net_if_unlock_local(dhd_info_t *dhd)
  13579. {
  13580. #if defined(OEM_ANDROID)
  13581. if (dhd)
  13582. mutex_unlock(&dhd->dhd_net_if_mutex);
  13583. #endif // endif
  13584. }
  13585. static void dhd_suspend_lock(dhd_pub_t *pub)
  13586. {
  13587. #if defined(OEM_ANDROID)
  13588. dhd_info_t *dhd = (dhd_info_t *)(pub->info);
  13589. if (dhd)
  13590. mutex_lock(&dhd->dhd_suspend_mutex);
  13591. #endif // endif
  13592. }
  13593. static void dhd_suspend_unlock(dhd_pub_t *pub)
  13594. {
  13595. #if defined(OEM_ANDROID)
  13596. dhd_info_t *dhd = (dhd_info_t *)(pub->info);
  13597. if (dhd)
  13598. mutex_unlock(&dhd->dhd_suspend_mutex);
  13599. #endif // endif
  13600. }
  13601. unsigned long dhd_os_general_spin_lock(dhd_pub_t *pub)
  13602. {
  13603. dhd_info_t *dhd = (dhd_info_t *)(pub->info);
  13604. unsigned long flags = 0;
  13605. if (dhd)
  13606. spin_lock_irqsave(&dhd->dhd_lock, flags);
  13607. return flags;
  13608. }
  13609. void dhd_os_general_spin_unlock(dhd_pub_t *pub, unsigned long flags)
  13610. {
  13611. dhd_info_t *dhd = (dhd_info_t *)(pub->info);
  13612. if (dhd)
  13613. spin_unlock_irqrestore(&dhd->dhd_lock, flags);
  13614. }
  13615. /* Linux specific multipurpose spinlock API */
  13616. void *
  13617. dhd_os_spin_lock_init(osl_t *osh)
  13618. {
  13619. /* Adding 4 bytes since the sizeof(spinlock_t) could be 0 */
  13620. /* if CONFIG_SMP and CONFIG_DEBUG_SPINLOCK are not defined */
  13621. /* and this results in kernel asserts in internal builds */
  13622. spinlock_t * lock = MALLOC(osh, sizeof(spinlock_t) + 4);
  13623. if (lock)
  13624. spin_lock_init(lock);
  13625. return ((void *)lock);
  13626. }
  13627. void
  13628. dhd_os_spin_lock_deinit(osl_t *osh, void *lock)
  13629. {
  13630. if (lock)
  13631. MFREE(osh, lock, sizeof(spinlock_t) + 4);
  13632. }
  13633. unsigned long
  13634. dhd_os_spin_lock(void *lock)
  13635. {
  13636. unsigned long flags = 0;
  13637. if (lock)
  13638. spin_lock_irqsave((spinlock_t *)lock, flags);
  13639. return flags;
  13640. }
  13641. void
  13642. dhd_os_spin_unlock(void *lock, unsigned long flags)
  13643. {
  13644. if (lock)
  13645. spin_unlock_irqrestore((spinlock_t *)lock, flags);
  13646. }
  13647. void *
  13648. dhd_os_dbgring_lock_init(osl_t *osh)
  13649. {
  13650. struct mutex *mtx = NULL;
  13651. mtx = MALLOCZ(osh, sizeof(*mtx));
  13652. if (mtx)
  13653. mutex_init(mtx);
  13654. return mtx;
  13655. }
  13656. void
  13657. dhd_os_dbgring_lock_deinit(osl_t *osh, void *mtx)
  13658. {
  13659. if (mtx) {
  13660. mutex_destroy(mtx);
  13661. MFREE(osh, mtx, sizeof(struct mutex));
  13662. }
  13663. }
  13664. static int
  13665. dhd_get_pend_8021x_cnt(dhd_info_t *dhd)
  13666. {
  13667. return (atomic_read(&dhd->pend_8021x_cnt));
  13668. }
  13669. #define MAX_WAIT_FOR_8021X_TX 100
  13670. int
  13671. dhd_wait_pend8021x(struct net_device *dev)
  13672. {
  13673. dhd_info_t *dhd = DHD_DEV_INFO(dev);
  13674. int timeout = msecs_to_jiffies(10);
  13675. int ntimes = MAX_WAIT_FOR_8021X_TX;
  13676. int pend = dhd_get_pend_8021x_cnt(dhd);
  13677. while (ntimes && pend) {
  13678. if (pend) {
  13679. set_current_state(TASK_INTERRUPTIBLE);
  13680. DHD_PERIM_UNLOCK(&dhd->pub);
  13681. schedule_timeout(timeout);
  13682. DHD_PERIM_LOCK(&dhd->pub);
  13683. set_current_state(TASK_RUNNING);
  13684. ntimes--;
  13685. }
  13686. pend = dhd_get_pend_8021x_cnt(dhd);
  13687. }
  13688. if (ntimes == 0)
  13689. {
  13690. atomic_set(&dhd->pend_8021x_cnt, 0);
  13691. DHD_ERROR(("%s: TIMEOUT\n", __FUNCTION__));
  13692. }
  13693. return pend;
  13694. }
  13695. #if defined(DHD_DEBUG)
  13696. int write_file(const char * file_name, uint32 flags, uint8 *buf, int size)
  13697. {
  13698. int ret = 0;
  13699. struct file *fp = NULL;
  13700. mm_segment_t old_fs;
  13701. loff_t pos = 0;
  13702. /* change to KERNEL_DS address limit */
  13703. old_fs = get_fs();
  13704. set_fs(KERNEL_DS);
  13705. /* open file to write */
  13706. fp = filp_open(file_name, flags, 0664);
  13707. if (IS_ERR(fp)) {
  13708. DHD_ERROR(("open file error, err = %ld\n", PTR_ERR(fp)));
  13709. goto exit;
  13710. }
  13711. /* Write buf to file */
  13712. ret = vfs_write(fp, buf, size, &pos);
  13713. if (ret < 0) {
  13714. DHD_ERROR(("write file error, err = %d\n", ret));
  13715. goto exit;
  13716. }
  13717. /* Sync file from filesystem to physical media */
  13718. ret = vfs_fsync(fp, 0);
  13719. if (ret < 0) {
  13720. DHD_ERROR(("sync file error, error = %d\n", ret));
  13721. goto exit;
  13722. }
  13723. ret = BCME_OK;
  13724. exit:
  13725. /* close file before return */
  13726. if (!IS_ERR(fp))
  13727. filp_close(fp, current->files);
  13728. /* restore previous address limit */
  13729. set_fs(old_fs);
  13730. return ret;
  13731. }
  13732. #endif // endif
  13733. #ifdef DHD_DEBUG
  13734. static void
  13735. dhd_convert_memdump_type_to_str(uint32 type, char *buf, int substr_type)
  13736. {
  13737. char *type_str = NULL;
  13738. switch (type) {
  13739. case DUMP_TYPE_RESUMED_ON_TIMEOUT:
  13740. type_str = "resumed_on_timeout";
  13741. break;
  13742. case DUMP_TYPE_D3_ACK_TIMEOUT:
  13743. type_str = "D3_ACK_timeout";
  13744. break;
  13745. case DUMP_TYPE_DONGLE_TRAP:
  13746. type_str = "Dongle_Trap";
  13747. break;
  13748. case DUMP_TYPE_MEMORY_CORRUPTION:
  13749. type_str = "Memory_Corruption";
  13750. break;
  13751. case DUMP_TYPE_PKTID_AUDIT_FAILURE:
  13752. type_str = "PKTID_AUDIT_Fail";
  13753. break;
  13754. case DUMP_TYPE_PKTID_INVALID:
  13755. type_str = "PKTID_INVALID";
  13756. break;
  13757. case DUMP_TYPE_SCAN_TIMEOUT:
  13758. type_str = "SCAN_timeout";
  13759. break;
  13760. case DUMP_TYPE_SCAN_BUSY:
  13761. type_str = "SCAN_Busy";
  13762. break;
  13763. case DUMP_TYPE_BY_SYSDUMP:
  13764. if (substr_type == CMD_UNWANTED) {
  13765. type_str = "BY_SYSDUMP_FORUSER_unwanted";
  13766. } else if (substr_type == CMD_DISCONNECTED) {
  13767. type_str = "BY_SYSDUMP_FORUSER_disconnected";
  13768. } else {
  13769. type_str = "BY_SYSDUMP_FORUSER";
  13770. }
  13771. break;
  13772. case DUMP_TYPE_BY_LIVELOCK:
  13773. type_str = "BY_LIVELOCK";
  13774. break;
  13775. case DUMP_TYPE_AP_LINKUP_FAILURE:
  13776. type_str = "BY_AP_LINK_FAILURE";
  13777. break;
  13778. case DUMP_TYPE_AP_ABNORMAL_ACCESS:
  13779. type_str = "INVALID_ACCESS";
  13780. break;
  13781. case DUMP_TYPE_RESUMED_ON_TIMEOUT_RX:
  13782. type_str = "ERROR_RX_TIMED_OUT";
  13783. break;
  13784. case DUMP_TYPE_RESUMED_ON_TIMEOUT_TX:
  13785. type_str = "ERROR_TX_TIMED_OUT";
  13786. break;
  13787. case DUMP_TYPE_CFG_VENDOR_TRIGGERED:
  13788. type_str = "CFG_VENDOR_TRIGGERED";
  13789. break;
  13790. case DUMP_TYPE_RESUMED_ON_INVALID_RING_RDWR:
  13791. type_str = "BY_INVALID_RING_RDWR";
  13792. break;
  13793. case DUMP_TYPE_IFACE_OP_FAILURE:
  13794. type_str = "BY_IFACE_OP_FAILURE";
  13795. break;
  13796. case DUMP_TYPE_TRANS_ID_MISMATCH:
  13797. type_str = "BY_TRANS_ID_MISMATCH";
  13798. break;
  13799. #ifdef DEBUG_DNGL_INIT_FAIL
  13800. case DUMP_TYPE_DONGLE_INIT_FAILURE:
  13801. type_str = "DONGLE_INIT_FAIL";
  13802. break;
  13803. #endif /* DEBUG_DNGL_INIT_FAIL */
  13804. #ifdef SUPPORT_LINKDOWN_RECOVERY
  13805. case DUMP_TYPE_READ_SHM_FAIL:
  13806. type_str = "READ_SHM_FAIL";
  13807. break;
  13808. #endif /* SUPPORT_LINKDOWN_RECOVERY */
  13809. case DUMP_TYPE_DONGLE_HOST_EVENT:
  13810. type_str = "BY_DONGLE_HOST_EVENT";
  13811. break;
  13812. case DUMP_TYPE_SMMU_FAULT:
  13813. type_str = "SMMU_FAULT";
  13814. break;
  13815. case DUMP_TYPE_BY_USER:
  13816. type_str = "BY_USER";
  13817. break;
  13818. #ifdef DHD_ERPOM
  13819. case DUMP_TYPE_DUE_TO_BT:
  13820. type_str = "DUE_TO_BT";
  13821. break;
  13822. #endif /* DHD_ERPOM */
  13823. case DUMP_TYPE_LOGSET_BEYOND_RANGE:
  13824. type_str = "LOGSET_BEYOND_RANGE";
  13825. break;
  13826. case DUMP_TYPE_CTO_RECOVERY:
  13827. type_str = "CTO_RECOVERY";
  13828. break;
  13829. case DUMP_TYPE_SEQUENTIAL_PRIVCMD_ERROR:
  13830. type_str = "SEQUENTIAL_PRIVCMD_ERROR";
  13831. break;
  13832. case DUMP_TYPE_PROXD_TIMEOUT:
  13833. type_str = "PROXD_TIMEOUT";
  13834. break;
  13835. case DUMP_TYPE_PKTID_POOL_DEPLETED:
  13836. type_str = "PKTID_POOL_DEPLETED";
  13837. break;
  13838. default:
  13839. type_str = "Unknown_type";
  13840. break;
  13841. }
  13842. strncpy(buf, type_str, strlen(type_str));
  13843. buf[strlen(type_str)] = 0;
  13844. }
  13845. void
  13846. dhd_get_memdump_filename(struct net_device *ndev, char *memdump_path, int len, char *fname)
  13847. {
  13848. char memdump_type[32];
  13849. dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(ndev);
  13850. dhd_pub_t *dhdp = &dhd->pub;
  13851. /* Init file name */
  13852. memset(memdump_path, 0, len);
  13853. memset(memdump_type, 0, sizeof(memdump_type));
  13854. dhd_convert_memdump_type_to_str(dhdp->memdump_type, memdump_type, dhdp->debug_dump_subcmd);
  13855. clear_debug_dump_time(dhdp->debug_dump_time_str);
  13856. get_debug_dump_time(dhdp->debug_dump_time_str);
  13857. #ifdef CUSTOMER_HW4_DEBUG
  13858. snprintf(memdump_path, len, "%s%s_%s_" "%s",
  13859. DHD_COMMON_DUMP_PATH, fname, memdump_type, dhdp->debug_dump_time_str);
  13860. #elif defined(CUSTOMER_HW2) || defined(BOARD_HIKEY)
  13861. snprintf(memdump_path, len, "%s%s_%s_" "%s",
  13862. DHD_COMMON_DUMP_PATH, fname, memdump_type, dhdp->debug_dump_time_str);
  13863. #elif defined(OEM_ANDROID) && (defined(BOARD_PANDA) || defined(__ARM_ARCH_7A__))
  13864. snprintf(memdump_path, len, "%s%s_%s_" "%s",
  13865. DHD_COMMON_DUMP_PATH, fname, memdump_type, dhdp->debug_dump_time_str);
  13866. #elif defined(OEM_ANDROID)
  13867. snprintf(memdump_path, len, "%s%s_%s_" "%s",
  13868. DHD_COMMON_DUMP_PATH, fname, memdump_type, dhdp->debug_dump_time_str);
  13869. #else
  13870. snprintf(memdump_path, len, "%s%s_%s_" "%s",
  13871. DHD_COMMON_DUMP_PATH, fname, memdump_type, dhdp->debug_dump_time_str);
  13872. #endif /* CUSTOMER_HW4_DEBUG */
  13873. if (strstr(fname, "sssr_dump")) {
  13874. DHD_SSSR_PRINT_FILEPATH(dhdp, memdump_path);
  13875. } else {
  13876. DHD_ERROR(("%s: file_path = %s%s\n", __FUNCTION__,
  13877. memdump_path, FILE_NAME_HAL_TAG));
  13878. }
  13879. }
  13880. int
  13881. write_dump_to_file(dhd_pub_t *dhd, uint8 *buf, int size, char *fname)
  13882. {
  13883. int ret = 0;
  13884. char memdump_path[128];
  13885. char memdump_type[32];
  13886. uint32 file_mode;
  13887. /* Init file name */
  13888. memset(memdump_path, 0, sizeof(memdump_path));
  13889. memset(memdump_type, 0, sizeof(memdump_type));
  13890. dhd_convert_memdump_type_to_str(dhd->memdump_type, memdump_type, dhd->debug_dump_subcmd);
  13891. clear_debug_dump_time(dhd->debug_dump_time_str);
  13892. get_debug_dump_time(dhd->debug_dump_time_str);
  13893. #ifdef CUSTOMER_HW4_DEBUG
  13894. snprintf(memdump_path, sizeof(memdump_path), "%s%s_%s_" "%s",
  13895. DHD_COMMON_DUMP_PATH, fname, memdump_type, dhd->debug_dump_time_str);
  13896. file_mode = O_CREAT | O_WRONLY | O_SYNC;
  13897. #elif defined(CUSTOMER_HW2) || defined(BOARD_HIKEY)
  13898. snprintf(memdump_path, sizeof(memdump_path), "%s%s_%s_" "%s",
  13899. DHD_COMMON_DUMP_PATH, fname, memdump_type, dhd->debug_dump_time_str);
  13900. file_mode = O_CREAT | O_WRONLY | O_SYNC;
  13901. #elif defined(OEM_ANDROID) && (defined(BOARD_PANDA) || defined(__ARM_ARCH_7A__))
  13902. snprintf(memdump_path, sizeof(memdump_path), "%s%s_%s_" "%s",
  13903. DHD_COMMON_DUMP_PATH, fname, memdump_type, dhd->debug_dump_time_str);
  13904. file_mode = O_CREAT | O_WRONLY;
  13905. #elif defined(OEM_ANDROID)
  13906. snprintf(memdump_path, sizeof(memdump_path), "%s%s_%s_" "%s",
  13907. DHD_COMMON_DUMP_PATH, fname, memdump_type, dhd->debug_dump_time_str);
  13908. /* Extra flags O_DIRECT and O_SYNC are required for Brix Android, as we are
  13909. * calling BUG_ON immediately after collecting the socram dump.
  13910. * So the file write operation should directly write the contents into the
  13911. * file instead of caching it. O_TRUNC flag ensures that file will be re-written
  13912. * instead of appending.
  13913. */
  13914. file_mode = O_CREAT | O_WRONLY | O_SYNC;
  13915. {
  13916. struct file *fp = filp_open(memdump_path, file_mode, 0664);
  13917. /* Check if it is live Brix image having /installmedia, else use /data */
  13918. if (IS_ERR(fp)) {
  13919. DHD_ERROR(("open file %s, try /data/\n", memdump_path));
  13920. snprintf(memdump_path, sizeof(memdump_path), "%s%s_%s_" "%s",
  13921. "/data/", fname, memdump_type, dhd->debug_dump_time_str);
  13922. } else {
  13923. filp_close(fp, NULL);
  13924. }
  13925. }
  13926. #else
  13927. snprintf(memdump_path, sizeof(memdump_path), "%s%s_%s_" "%s",
  13928. DHD_COMMON_DUMP_PATH, fname, memdump_type, dhd->debug_dump_time_str);
  13929. file_mode = O_CREAT | O_WRONLY;
  13930. #endif /* CUSTOMER_HW4_DEBUG */
  13931. /* print SOCRAM dump file path */
  13932. DHD_ERROR(("%s: file_path = %s\n", __FUNCTION__, memdump_path));
  13933. #ifdef DHD_LOG_DUMP
  13934. dhd_print_buf_addr(dhd, "write_dump_to_file", buf, size);
  13935. #endif /* DHD_LOG_DUMP */
  13936. /* Write file */
  13937. ret = write_file(memdump_path, file_mode, buf, size);
  13938. #ifdef DHD_DUMP_MNGR
  13939. if (ret == BCME_OK) {
  13940. dhd_dump_file_manage_enqueue(dhd, memdump_path, fname);
  13941. }
  13942. #endif /* DHD_DUMP_MNGR */
  13943. return ret;
  13944. }
  13945. #endif /* DHD_DEBUG */
  13946. int dhd_os_wake_lock_timeout(dhd_pub_t *pub)
  13947. {
  13948. dhd_info_t *dhd = (dhd_info_t *)(pub->info);
  13949. unsigned long flags;
  13950. int ret = 0;
  13951. if (dhd && (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT)) {
  13952. spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
  13953. ret = dhd->wakelock_rx_timeout_enable > dhd->wakelock_ctrl_timeout_enable ?
  13954. dhd->wakelock_rx_timeout_enable : dhd->wakelock_ctrl_timeout_enable;
  13955. #if defined(CONFIG_PM_WAKELOCKS) || defined(CONFIG_HAS_WAKELOCK)
  13956. if (dhd->wakelock_rx_timeout_enable)
  13957. wake_lock_timeout(&dhd->wl_rxwake,
  13958. msecs_to_jiffies(dhd->wakelock_rx_timeout_enable));
  13959. if (dhd->wakelock_ctrl_timeout_enable)
  13960. wake_lock_timeout(&dhd->wl_ctrlwake,
  13961. msecs_to_jiffies(dhd->wakelock_ctrl_timeout_enable));
  13962. #endif // endif
  13963. dhd->wakelock_rx_timeout_enable = 0;
  13964. dhd->wakelock_ctrl_timeout_enable = 0;
  13965. spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
  13966. }
  13967. return ret;
  13968. }
  13969. int net_os_wake_lock_timeout(struct net_device *dev)
  13970. {
  13971. dhd_info_t *dhd = DHD_DEV_INFO(dev);
  13972. int ret = 0;
  13973. if (dhd)
  13974. ret = dhd_os_wake_lock_timeout(&dhd->pub);
  13975. return ret;
  13976. }
  13977. int dhd_os_wake_lock_rx_timeout_enable(dhd_pub_t *pub, int val)
  13978. {
  13979. dhd_info_t *dhd = (dhd_info_t *)(pub->info);
  13980. unsigned long flags;
  13981. if (dhd && (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT)) {
  13982. spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
  13983. if (val > dhd->wakelock_rx_timeout_enable)
  13984. dhd->wakelock_rx_timeout_enable = val;
  13985. spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
  13986. }
  13987. return 0;
  13988. }
  13989. int dhd_os_wake_lock_ctrl_timeout_enable(dhd_pub_t *pub, int val)
  13990. {
  13991. dhd_info_t *dhd = (dhd_info_t *)(pub->info);
  13992. unsigned long flags;
  13993. if (dhd && (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT)) {
  13994. spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
  13995. if (val > dhd->wakelock_ctrl_timeout_enable)
  13996. dhd->wakelock_ctrl_timeout_enable = val;
  13997. spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
  13998. }
  13999. return 0;
  14000. }
  14001. int dhd_os_wake_lock_ctrl_timeout_cancel(dhd_pub_t *pub)
  14002. {
  14003. dhd_info_t *dhd = (dhd_info_t *)(pub->info);
  14004. unsigned long flags;
  14005. if (dhd && (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT)) {
  14006. spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
  14007. dhd->wakelock_ctrl_timeout_enable = 0;
  14008. #if defined(CONFIG_PM_WAKELOCKS) || defined(CONFIG_HAS_WAKELOCK)
  14009. if (wake_lock_active(&dhd->wl_ctrlwake))
  14010. wake_unlock(&dhd->wl_ctrlwake);
  14011. #endif // endif
  14012. spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
  14013. }
  14014. return 0;
  14015. }
  14016. int net_os_wake_lock_rx_timeout_enable(struct net_device *dev, int val)
  14017. {
  14018. dhd_info_t *dhd = DHD_DEV_INFO(dev);
  14019. int ret = 0;
  14020. if (dhd)
  14021. ret = dhd_os_wake_lock_rx_timeout_enable(&dhd->pub, val);
  14022. return ret;
  14023. }
  14024. int net_os_wake_lock_ctrl_timeout_enable(struct net_device *dev, int val)
  14025. {
  14026. dhd_info_t *dhd = DHD_DEV_INFO(dev);
  14027. int ret = 0;
  14028. if (dhd)
  14029. ret = dhd_os_wake_lock_ctrl_timeout_enable(&dhd->pub, val);
  14030. return ret;
  14031. }
  14032. #if defined(DHD_TRACE_WAKE_LOCK)
  14033. #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
  14034. #include <linux/hashtable.h>
  14035. #else
  14036. #include <linux/hash.h>
  14037. #endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */
  14038. #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
  14039. /* Define 2^5 = 32 bucket size hash table */
  14040. DEFINE_HASHTABLE(wklock_history, 5);
  14041. #else
  14042. /* Define 2^5 = 32 bucket size hash table */
  14043. struct hlist_head wklock_history[32] = { [0 ... 31] = HLIST_HEAD_INIT };
  14044. #endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */
  14045. atomic_t trace_wklock_onoff;
  14046. typedef enum dhd_wklock_type {
  14047. DHD_WAKE_LOCK,
  14048. DHD_WAKE_UNLOCK,
  14049. DHD_WAIVE_LOCK,
  14050. DHD_RESTORE_LOCK
  14051. } dhd_wklock_t;
  14052. struct wk_trace_record {
  14053. unsigned long addr; /* Address of the instruction */
  14054. dhd_wklock_t lock_type; /* lock_type */
  14055. unsigned long long counter; /* counter information */
  14056. struct hlist_node wklock_node; /* hash node */
  14057. };
  14058. static struct wk_trace_record *find_wklock_entry(unsigned long addr)
  14059. {
  14060. struct wk_trace_record *wklock_info;
  14061. #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
  14062. hash_for_each_possible(wklock_history, wklock_info, wklock_node, addr)
  14063. #else
  14064. struct hlist_node *entry;
  14065. int index = hash_long(addr, ilog2(ARRAY_SIZE(wklock_history)));
  14066. hlist_for_each_entry(wklock_info, entry, &wklock_history[index], wklock_node)
  14067. #endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */
  14068. {
  14069. if (wklock_info->addr == addr) {
  14070. return wklock_info;
  14071. }
  14072. }
  14073. return NULL;
  14074. }
  14075. #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
  14076. #define HASH_ADD(hashtable, node, key) \
  14077. do { \
  14078. hash_add(hashtable, node, key); \
  14079. } while (0);
  14080. #else
  14081. #define HASH_ADD(hashtable, node, key) \
  14082. do { \
  14083. int index = hash_long(key, ilog2(ARRAY_SIZE(hashtable))); \
  14084. hlist_add_head(node, &hashtable[index]); \
  14085. } while (0);
  14086. #endif /* KERNEL_VER < KERNEL_VERSION(3, 7, 0) */
  14087. #define STORE_WKLOCK_RECORD(wklock_type) \
  14088. do { \
  14089. struct wk_trace_record *wklock_info = NULL; \
  14090. unsigned long func_addr = (unsigned long)__builtin_return_address(0); \
  14091. wklock_info = find_wklock_entry(func_addr); \
  14092. if (wklock_info) { \
  14093. if (wklock_type == DHD_WAIVE_LOCK || wklock_type == DHD_RESTORE_LOCK) { \
  14094. wklock_info->counter = dhd->wakelock_counter; \
  14095. } else { \
  14096. wklock_info->counter++; \
  14097. } \
  14098. } else { \
  14099. wklock_info = kzalloc(sizeof(*wklock_info), GFP_ATOMIC); \
  14100. if (!wklock_info) {\
  14101. printk("Can't allocate wk_trace_record \n"); \
  14102. } else { \
  14103. wklock_info->addr = func_addr; \
  14104. wklock_info->lock_type = wklock_type; \
  14105. if (wklock_type == DHD_WAIVE_LOCK || \
  14106. wklock_type == DHD_RESTORE_LOCK) { \
  14107. wklock_info->counter = dhd->wakelock_counter; \
  14108. } else { \
  14109. wklock_info->counter++; \
  14110. } \
  14111. HASH_ADD(wklock_history, &wklock_info->wklock_node, func_addr); \
  14112. } \
  14113. } \
  14114. } while (0);
  14115. static inline void dhd_wk_lock_rec_dump(void)
  14116. {
  14117. int bkt;
  14118. struct wk_trace_record *wklock_info;
  14119. #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
  14120. hash_for_each(wklock_history, bkt, wklock_info, wklock_node)
  14121. #else
  14122. struct hlist_node *entry = NULL;
  14123. int max_index = ARRAY_SIZE(wklock_history);
  14124. for (bkt = 0; bkt < max_index; bkt++)
  14125. hlist_for_each_entry(wklock_info, entry, &wklock_history[bkt], wklock_node)
  14126. #endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */
  14127. {
  14128. switch (wklock_info->lock_type) {
  14129. case DHD_WAKE_LOCK:
  14130. printk("wakelock lock : %pS lock_counter : %llu \n",
  14131. (void *)wklock_info->addr, wklock_info->counter);
  14132. break;
  14133. case DHD_WAKE_UNLOCK:
  14134. printk("wakelock unlock : %pS, unlock_counter : %llu \n",
  14135. (void *)wklock_info->addr, wklock_info->counter);
  14136. break;
  14137. case DHD_WAIVE_LOCK:
  14138. printk("wakelock waive : %pS before_waive : %llu \n",
  14139. (void *)wklock_info->addr, wklock_info->counter);
  14140. break;
  14141. case DHD_RESTORE_LOCK:
  14142. printk("wakelock restore : %pS, after_waive : %llu \n",
  14143. (void *)wklock_info->addr, wklock_info->counter);
  14144. break;
  14145. }
  14146. }
  14147. }
  14148. static void dhd_wk_lock_trace_init(struct dhd_info *dhd)
  14149. {
  14150. unsigned long flags;
  14151. #if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 7, 0))
  14152. int i;
  14153. #endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */
  14154. spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
  14155. #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
  14156. hash_init(wklock_history);
  14157. #else
  14158. for (i = 0; i < ARRAY_SIZE(wklock_history); i++)
  14159. INIT_HLIST_HEAD(&wklock_history[i]);
  14160. #endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */
  14161. spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
  14162. atomic_set(&trace_wklock_onoff, 1);
  14163. }
  14164. static void dhd_wk_lock_trace_deinit(struct dhd_info *dhd)
  14165. {
  14166. int bkt;
  14167. struct wk_trace_record *wklock_info;
  14168. struct hlist_node *tmp;
  14169. unsigned long flags;
  14170. #if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 7, 0))
  14171. struct hlist_node *entry = NULL;
  14172. int max_index = ARRAY_SIZE(wklock_history);
  14173. #endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */
  14174. spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
  14175. #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
  14176. hash_for_each_safe(wklock_history, bkt, tmp, wklock_info, wklock_node)
  14177. #else
  14178. for (bkt = 0; bkt < max_index; bkt++)
  14179. hlist_for_each_entry_safe(wklock_info, entry, tmp,
  14180. &wklock_history[bkt], wklock_node)
  14181. #endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0)) */
  14182. {
  14183. #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
  14184. hash_del(&wklock_info->wklock_node);
  14185. #else
  14186. hlist_del_init(&wklock_info->wklock_node);
  14187. #endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0)) */
  14188. kfree(wklock_info);
  14189. }
  14190. spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
  14191. }
  14192. void dhd_wk_lock_stats_dump(dhd_pub_t *dhdp)
  14193. {
  14194. dhd_info_t *dhd = (dhd_info_t *)(dhdp->info);
  14195. unsigned long flags;
  14196. printk(KERN_ERR"DHD Printing wl_wake Lock/Unlock Record \r\n");
  14197. spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
  14198. dhd_wk_lock_rec_dump();
  14199. spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
  14200. }
  14201. #else
  14202. #define STORE_WKLOCK_RECORD(wklock_type)
  14203. #endif /* ! DHD_TRACE_WAKE_LOCK */
  14204. int dhd_os_wake_lock(dhd_pub_t *pub)
  14205. {
  14206. dhd_info_t *dhd = (dhd_info_t *)(pub->info);
  14207. unsigned long flags;
  14208. int ret = 0;
  14209. if (dhd && (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT)) {
  14210. spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
  14211. if (dhd->wakelock_counter == 0 && !dhd->waive_wakelock) {
  14212. #if defined(CONFIG_PM_WAKELOCKS) || defined(CONFIG_HAS_WAKELOCK)
  14213. wake_lock(&dhd->wl_wifi);
  14214. #elif defined(BCMSDIO)
  14215. dhd_bus_dev_pm_stay_awake(pub);
  14216. #endif // endif
  14217. }
  14218. #ifdef DHD_TRACE_WAKE_LOCK
  14219. if (atomic_read(&trace_wklock_onoff)) {
  14220. STORE_WKLOCK_RECORD(DHD_WAKE_LOCK);
  14221. }
  14222. #endif /* DHD_TRACE_WAKE_LOCK */
  14223. dhd->wakelock_counter++;
  14224. ret = dhd->wakelock_counter;
  14225. spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
  14226. }
  14227. return ret;
  14228. }
  14229. void dhd_event_wake_lock(dhd_pub_t *pub)
  14230. {
  14231. dhd_info_t *dhd = (dhd_info_t *)(pub->info);
  14232. if (dhd) {
  14233. #if defined(CONFIG_PM_WAKELOCKS) || defined(CONFIG_HAS_WAKELOCK)
  14234. wake_lock(&dhd->wl_evtwake);
  14235. #elif defined(BCMSDIO)
  14236. dhd_bus_dev_pm_stay_awake(pub);
  14237. #endif // endif
  14238. }
  14239. }
  14240. void
  14241. dhd_pm_wake_lock_timeout(dhd_pub_t *pub, int val)
  14242. {
  14243. #if defined(CONFIG_PM_WAKELOCKS) || defined(CONFIG_HAS_WAKELOCK)
  14244. dhd_info_t *dhd = (dhd_info_t *)(pub->info);
  14245. if (dhd) {
  14246. wake_lock_timeout(&dhd->wl_pmwake, msecs_to_jiffies(val));
  14247. }
  14248. #endif /* CONFIG_PM_WAKELOCKS || CONFIG_HAS_WAKELOCK */
  14249. }
  14250. void
  14251. dhd_txfl_wake_lock_timeout(dhd_pub_t *pub, int val)
  14252. {
  14253. #if defined(CONFIG_PM_WAKELOCKS) || defined(CONFIG_HAS_WAKELOCK)
  14254. dhd_info_t *dhd = (dhd_info_t *)(pub->info);
  14255. if (dhd) {
  14256. wake_lock_timeout(&dhd->wl_txflwake, msecs_to_jiffies(val));
  14257. }
  14258. #endif /* CONFIG_PM_WAKELOCKS || CONFIG_HAS_WAKELOCK */
  14259. }
  14260. int net_os_wake_lock(struct net_device *dev)
  14261. {
  14262. dhd_info_t *dhd = DHD_DEV_INFO(dev);
  14263. int ret = 0;
  14264. if (dhd)
  14265. ret = dhd_os_wake_lock(&dhd->pub);
  14266. return ret;
  14267. }
  14268. int dhd_os_wake_unlock(dhd_pub_t *pub)
  14269. {
  14270. dhd_info_t *dhd = (dhd_info_t *)(pub->info);
  14271. unsigned long flags;
  14272. int ret = 0;
  14273. dhd_os_wake_lock_timeout(pub);
  14274. if (dhd && (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT)) {
  14275. spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
  14276. if (dhd->wakelock_counter > 0) {
  14277. dhd->wakelock_counter--;
  14278. #ifdef DHD_TRACE_WAKE_LOCK
  14279. if (atomic_read(&trace_wklock_onoff)) {
  14280. STORE_WKLOCK_RECORD(DHD_WAKE_UNLOCK);
  14281. }
  14282. #endif /* DHD_TRACE_WAKE_LOCK */
  14283. if (dhd->wakelock_counter == 0 && !dhd->waive_wakelock) {
  14284. #if defined(CONFIG_PM_WAKELOCKS) || defined(CONFIG_HAS_WAKELOCK)
  14285. wake_unlock(&dhd->wl_wifi);
  14286. #elif defined(BCMSDIO)
  14287. dhd_bus_dev_pm_relax(pub);
  14288. #endif // endif
  14289. }
  14290. ret = dhd->wakelock_counter;
  14291. }
  14292. spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
  14293. }
  14294. return ret;
  14295. }
  14296. void dhd_event_wake_unlock(dhd_pub_t *pub)
  14297. {
  14298. dhd_info_t *dhd = (dhd_info_t *)(pub->info);
  14299. if (dhd) {
  14300. #if defined(CONFIG_PM_WAKELOCKS) || defined(CONFIG_HAS_WAKELOCK)
  14301. wake_unlock(&dhd->wl_evtwake);
  14302. #elif defined(BCMSDIO)
  14303. dhd_bus_dev_pm_relax(pub);
  14304. #endif // endif
  14305. }
  14306. }
  14307. void dhd_pm_wake_unlock(dhd_pub_t *pub)
  14308. {
  14309. #if defined(CONFIG_PM_WAKELOCKS) || defined(CONFIG_HAS_WAKELOCK)
  14310. dhd_info_t *dhd = (dhd_info_t *)(pub->info);
  14311. if (dhd) {
  14312. /* if wl_pmwake is active, unlock it */
  14313. if (wake_lock_active(&dhd->wl_pmwake)) {
  14314. wake_unlock(&dhd->wl_pmwake);
  14315. }
  14316. }
  14317. #endif /* CONFIG_PM_WAKELOCKS || CONFIG_HAS_WAKELOCK */
  14318. }
  14319. void dhd_txfl_wake_unlock(dhd_pub_t *pub)
  14320. {
  14321. #if defined(CONFIG_PM_WAKELOCKS) || defined(CONFIG_HAS_WAKELOCK)
  14322. dhd_info_t *dhd = (dhd_info_t *)(pub->info);
  14323. if (dhd) {
  14324. /* if wl_txflwake is active, unlock it */
  14325. if (wake_lock_active(&dhd->wl_txflwake)) {
  14326. wake_unlock(&dhd->wl_txflwake);
  14327. }
  14328. }
  14329. #endif /* CONFIG_PM_WAKELOCKS || CONFIG_HAS_WAKELOCK */
  14330. }
  14331. int dhd_os_check_wakelock(dhd_pub_t *pub)
  14332. {
  14333. #if defined(CONFIG_PM_WAKELOCKS) || defined(CONFIG_HAS_WAKELOCK) || defined(BCMSDIO)
  14334. dhd_info_t *dhd;
  14335. if (!pub)
  14336. return 0;
  14337. dhd = (dhd_info_t *)(pub->info);
  14338. #endif /* CONFIG_PM_WAKELOCKS || CONFIG_HAS_WAKELOCK || BCMSDIO */
  14339. #if defined(CONFIG_PM_WAKELOCKS) || defined(CONFIG_HAS_WAKELOCK)
  14340. /* Indicate to the SD Host to avoid going to suspend if internal locks are up */
  14341. if (dhd && (wake_lock_active(&dhd->wl_wifi) ||
  14342. (wake_lock_active(&dhd->wl_wdwake))))
  14343. return 1;
  14344. #elif defined(BCMSDIO)
  14345. if (dhd && (dhd->wakelock_counter > 0) && dhd_bus_dev_pm_enabled(pub))
  14346. return 1;
  14347. #endif // endif
  14348. return 0;
  14349. }
  14350. int
  14351. dhd_os_check_wakelock_all(dhd_pub_t *pub)
  14352. {
  14353. #if defined(CONFIG_PM_WAKELOCKS) || defined(CONFIG_HAS_WAKELOCK) || defined(BCMSDIO)
  14354. #if defined(CONFIG_PM_WAKELOCKS) || defined(CONFIG_HAS_WAKELOCK)
  14355. int l1, l2, l3, l4, l7, l8, l9;
  14356. int l5 = 0, l6 = 0;
  14357. int c, lock_active;
  14358. #endif /* CONFIG_PM_WAKELOCKS || CONFIG_HAS_WAKELOCK */
  14359. dhd_info_t *dhd;
  14360. if (!pub) {
  14361. return 0;
  14362. }
  14363. dhd = (dhd_info_t *)(pub->info);
  14364. if (!dhd) {
  14365. return 0;
  14366. }
  14367. #endif /* CONFIG_PM_WAKELOCKS || CONFIG_HAS_WAKELOCK || BCMSDIO */
  14368. #if defined(CONFIG_PM_WAKELOCKS) || defined(CONFIG_HAS_WAKELOCK)
  14369. c = dhd->wakelock_counter;
  14370. l1 = wake_lock_active(&dhd->wl_wifi);
  14371. l2 = wake_lock_active(&dhd->wl_wdwake);
  14372. l3 = wake_lock_active(&dhd->wl_rxwake);
  14373. l4 = wake_lock_active(&dhd->wl_ctrlwake);
  14374. l7 = wake_lock_active(&dhd->wl_evtwake);
  14375. #ifdef BCMPCIE_OOB_HOST_WAKE
  14376. l5 = wake_lock_active(&dhd->wl_intrwake);
  14377. #endif /* BCMPCIE_OOB_HOST_WAKE */
  14378. #ifdef DHD_USE_SCAN_WAKELOCK
  14379. l6 = wake_lock_active(&dhd->wl_scanwake);
  14380. #endif /* DHD_USE_SCAN_WAKELOCK */
  14381. l8 = wake_lock_active(&dhd->wl_pmwake);
  14382. l9 = wake_lock_active(&dhd->wl_txflwake);
  14383. lock_active = (l1 || l2 || l3 || l4 || l5 || l6 || l7 || l8 || l9);
  14384. /* Indicate to the Host to avoid going to suspend if internal locks are up */
  14385. if (lock_active) {
  14386. DHD_ERROR(("%s wakelock c-%d wl-%d wd-%d rx-%d "
  14387. "ctl-%d intr-%d scan-%d evt-%d, pm-%d, txfl-%d\n",
  14388. __FUNCTION__, c, l1, l2, l3, l4, l5, l6, l7, l8, l9));
  14389. return 1;
  14390. }
  14391. #elif defined(BCMSDIO)
  14392. if (dhd && (dhd->wakelock_counter > 0) && dhd_bus_dev_pm_enabled(pub)) {
  14393. return 1;
  14394. }
  14395. #endif /* defined(BCMSDIO) */
  14396. return 0;
  14397. }
  14398. int net_os_wake_unlock(struct net_device *dev)
  14399. {
  14400. dhd_info_t *dhd = DHD_DEV_INFO(dev);
  14401. int ret = 0;
  14402. if (dhd)
  14403. ret = dhd_os_wake_unlock(&dhd->pub);
  14404. return ret;
  14405. }
  14406. int dhd_os_wd_wake_lock(dhd_pub_t *pub)
  14407. {
  14408. dhd_info_t *dhd = (dhd_info_t *)(pub->info);
  14409. unsigned long flags;
  14410. int ret = 0;
  14411. if (dhd) {
  14412. spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
  14413. if (dhd->wakelock_wd_counter == 0 && !dhd->waive_wakelock) {
  14414. #if defined(CONFIG_PM_WAKELOCKS) || defined(CONFIG_HAS_WAKELOCK)
  14415. /* if wakelock_wd_counter was never used : lock it at once */
  14416. wake_lock(&dhd->wl_wdwake);
  14417. #endif /* CONFIG_PM_WAKELOCKS || CONFIG_HAS_WAKELOCK */
  14418. }
  14419. dhd->wakelock_wd_counter++;
  14420. ret = dhd->wakelock_wd_counter;
  14421. spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
  14422. }
  14423. return ret;
  14424. }
  14425. int dhd_os_wd_wake_unlock(dhd_pub_t *pub)
  14426. {
  14427. dhd_info_t *dhd = (dhd_info_t *)(pub->info);
  14428. unsigned long flags;
  14429. int ret = 0;
  14430. if (dhd) {
  14431. spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
  14432. if (dhd->wakelock_wd_counter > 0) {
  14433. dhd->wakelock_wd_counter = 0;
  14434. if (!dhd->waive_wakelock) {
  14435. #if defined(CONFIG_PM_WAKELOCKS) || defined(CONFIG_HAS_WAKELOCK)
  14436. wake_unlock(&dhd->wl_wdwake);
  14437. #endif /* CONFIG_PM_WAKELOCKS || CONFIG_HAS_WAKELOCK */
  14438. }
  14439. }
  14440. spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
  14441. }
  14442. return ret;
  14443. }
  14444. #ifdef BCMPCIE_OOB_HOST_WAKE
  14445. void
  14446. dhd_os_oob_irq_wake_lock_timeout(dhd_pub_t *pub, int val)
  14447. {
  14448. #if defined(CONFIG_PM_WAKELOCKS) || defined(CONFIG_HAS_WAKELOCK)
  14449. dhd_info_t *dhd = (dhd_info_t *)(pub->info);
  14450. if (dhd) {
  14451. wake_lock_timeout(&dhd->wl_intrwake, msecs_to_jiffies(val));
  14452. }
  14453. #endif /* CONFIG_PM_WAKELOCKS || CONFIG_HAS_WAKELOCK */
  14454. }
  14455. void
  14456. dhd_os_oob_irq_wake_unlock(dhd_pub_t *pub)
  14457. {
  14458. #if defined(CONFIG_PM_WAKELOCKS) || defined(CONFIG_HAS_WAKELOCK)
  14459. dhd_info_t *dhd = (dhd_info_t *)(pub->info);
  14460. if (dhd) {
  14461. /* if wl_intrwake is active, unlock it */
  14462. if (wake_lock_active(&dhd->wl_intrwake)) {
  14463. wake_unlock(&dhd->wl_intrwake);
  14464. }
  14465. }
  14466. #endif /* CONFIG_PM_WAKELOCKS || CONFIG_HAS_WAKELOCK */
  14467. }
  14468. #endif /* BCMPCIE_OOB_HOST_WAKE */
  14469. #ifdef DHD_USE_SCAN_WAKELOCK
  14470. void
  14471. dhd_os_scan_wake_lock_timeout(dhd_pub_t *pub, int val)
  14472. {
  14473. #if defined(CONFIG_PM_WAKELOCKS) || defined(CONFIG_HAS_WAKELOCK)
  14474. dhd_info_t *dhd = (dhd_info_t *)(pub->info);
  14475. if (dhd) {
  14476. wake_lock_timeout(&dhd->wl_scanwake, msecs_to_jiffies(val));
  14477. }
  14478. #endif /* CONFIG_PM_WAKELOCKS || CONFIG_HAS_WAKELOCK */
  14479. }
  14480. void
  14481. dhd_os_scan_wake_unlock(dhd_pub_t *pub)
  14482. {
  14483. #if defined(CONFIG_PM_WAKELOCKS) || defined(CONFIG_HAS_WAKELOCK)
  14484. dhd_info_t *dhd = (dhd_info_t *)(pub->info);
  14485. if (dhd) {
  14486. /* if wl_scanwake is active, unlock it */
  14487. if (wake_lock_active(&dhd->wl_scanwake)) {
  14488. wake_unlock(&dhd->wl_scanwake);
  14489. }
  14490. }
  14491. #endif /* CONFIG_PM_WAKELOCKS || CONFIG_HAS_WAKELOCK */
  14492. }
  14493. #endif /* DHD_USE_SCAN_WAKELOCK */
  14494. /* waive wakelocks for operations such as IOVARs in suspend function, must be closed
  14495. * by a paired function call to dhd_wakelock_restore. returns current wakelock counter
  14496. */
  14497. int dhd_os_wake_lock_waive(dhd_pub_t *pub)
  14498. {
  14499. dhd_info_t *dhd = (dhd_info_t *)(pub->info);
  14500. unsigned long flags;
  14501. int ret = 0;
  14502. if (dhd && (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT)) {
  14503. spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
  14504. /* dhd_wakelock_waive/dhd_wakelock_restore must be paired */
  14505. if (dhd->waive_wakelock == FALSE) {
  14506. #ifdef DHD_TRACE_WAKE_LOCK
  14507. if (atomic_read(&trace_wklock_onoff)) {
  14508. STORE_WKLOCK_RECORD(DHD_WAIVE_LOCK);
  14509. }
  14510. #endif /* DHD_TRACE_WAKE_LOCK */
  14511. /* record current lock status */
  14512. dhd->wakelock_before_waive = dhd->wakelock_counter;
  14513. dhd->waive_wakelock = TRUE;
  14514. }
  14515. ret = dhd->wakelock_wd_counter;
  14516. spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
  14517. }
  14518. return ret;
  14519. }
  14520. int dhd_os_wake_lock_restore(dhd_pub_t *pub)
  14521. {
  14522. dhd_info_t *dhd = (dhd_info_t *)(pub->info);
  14523. unsigned long flags;
  14524. int ret = 0;
  14525. if (!dhd)
  14526. return 0;
  14527. if ((dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT) == 0)
  14528. return 0;
  14529. spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
  14530. /* dhd_wakelock_waive/dhd_wakelock_restore must be paired */
  14531. if (!dhd->waive_wakelock)
  14532. goto exit;
  14533. dhd->waive_wakelock = FALSE;
  14534. /* if somebody else acquires wakelock between dhd_wakelock_waive/dhd_wakelock_restore,
  14535. * we need to make it up by calling wake_lock or pm_stay_awake. or if somebody releases
  14536. * the lock in between, do the same by calling wake_unlock or pm_relax
  14537. */
  14538. #ifdef DHD_TRACE_WAKE_LOCK
  14539. if (atomic_read(&trace_wklock_onoff)) {
  14540. STORE_WKLOCK_RECORD(DHD_RESTORE_LOCK);
  14541. }
  14542. #endif /* DHD_TRACE_WAKE_LOCK */
  14543. if (dhd->wakelock_before_waive == 0 && dhd->wakelock_counter > 0) {
  14544. #if defined(CONFIG_PM_WAKELOCKS) || defined(CONFIG_HAS_WAKELOCK)
  14545. wake_lock(&dhd->wl_wifi);
  14546. #elif defined(BCMSDIO)
  14547. dhd_bus_dev_pm_stay_awake(&dhd->pub);
  14548. #endif // endif
  14549. } else if (dhd->wakelock_before_waive > 0 && dhd->wakelock_counter == 0) {
  14550. #if defined(CONFIG_PM_WAKELOCKS) || defined(CONFIG_HAS_WAKELOCK)
  14551. wake_unlock(&dhd->wl_wifi);
  14552. #elif defined(BCMSDIO)
  14553. dhd_bus_dev_pm_relax(&dhd->pub);
  14554. #endif // endif
  14555. }
  14556. dhd->wakelock_before_waive = 0;
  14557. exit:
  14558. ret = dhd->wakelock_wd_counter;
  14559. spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
  14560. return ret;
  14561. }
  14562. void dhd_os_wake_lock_init(struct dhd_info *dhd)
  14563. {
  14564. DHD_TRACE(("%s: initialize wake_lock_counters\n", __FUNCTION__));
  14565. dhd->wakelock_counter = 0;
  14566. dhd->wakelock_rx_timeout_enable = 0;
  14567. dhd->wakelock_ctrl_timeout_enable = 0;
  14568. /* wakelocks prevent a system from going into a low power state */
  14569. #if defined(CONFIG_PM_WAKELOCKS) || defined(CONFIG_HAS_WAKELOCK)
  14570. wake_lock_init(&dhd->wl_wifi, WAKE_LOCK_SUSPEND, "wlan_wake");
  14571. wake_lock_init(&dhd->wl_rxwake, WAKE_LOCK_SUSPEND, "wlan_rx_wake");
  14572. wake_lock_init(&dhd->wl_ctrlwake, WAKE_LOCK_SUSPEND, "wlan_ctrl_wake");
  14573. wake_lock_init(&dhd->wl_evtwake, WAKE_LOCK_SUSPEND, "wlan_evt_wake");
  14574. wake_lock_init(&dhd->wl_pmwake, WAKE_LOCK_SUSPEND, "wlan_pm_wake");
  14575. wake_lock_init(&dhd->wl_txflwake, WAKE_LOCK_SUSPEND, "wlan_txfl_wake");
  14576. #ifdef BCMPCIE_OOB_HOST_WAKE
  14577. wake_lock_init(&dhd->wl_intrwake, WAKE_LOCK_SUSPEND, "wlan_oob_irq_wake");
  14578. #endif /* BCMPCIE_OOB_HOST_WAKE */
  14579. #ifdef DHD_USE_SCAN_WAKELOCK
  14580. wake_lock_init(&dhd->wl_scanwake, WAKE_LOCK_SUSPEND, "wlan_scan_wake");
  14581. #endif /* DHD_USE_SCAN_WAKELOCK */
  14582. #endif /* CONFIG_PM_WAKELOCKS || CONFIG_HAS_WAKELOCK */
  14583. #ifdef DHD_TRACE_WAKE_LOCK
  14584. dhd_wk_lock_trace_init(dhd);
  14585. #endif /* DHD_TRACE_WAKE_LOCK */
  14586. }
  14587. void dhd_os_wake_lock_destroy(struct dhd_info *dhd)
  14588. {
  14589. DHD_TRACE(("%s: deinit wake_lock_counters\n", __FUNCTION__));
  14590. #if defined(CONFIG_PM_WAKELOCKS) || defined(CONFIG_HAS_WAKELOCK)
  14591. dhd->wakelock_counter = 0;
  14592. dhd->wakelock_rx_timeout_enable = 0;
  14593. dhd->wakelock_ctrl_timeout_enable = 0;
  14594. wake_lock_destroy(&dhd->wl_wifi);
  14595. wake_lock_destroy(&dhd->wl_rxwake);
  14596. wake_lock_destroy(&dhd->wl_ctrlwake);
  14597. wake_lock_destroy(&dhd->wl_evtwake);
  14598. wake_lock_destroy(&dhd->wl_pmwake);
  14599. wake_lock_destroy(&dhd->wl_txflwake);
  14600. #ifdef BCMPCIE_OOB_HOST_WAKE
  14601. wake_lock_destroy(&dhd->wl_intrwake);
  14602. #endif /* BCMPCIE_OOB_HOST_WAKE */
  14603. #ifdef DHD_USE_SCAN_WAKELOCK
  14604. wake_lock_destroy(&dhd->wl_scanwake);
  14605. #endif /* DHD_USE_SCAN_WAKELOCK */
  14606. #ifdef DHD_TRACE_WAKE_LOCK
  14607. dhd_wk_lock_trace_deinit(dhd);
  14608. #endif /* DHD_TRACE_WAKE_LOCK */
  14609. #endif /* CONFIG_PM_WAKELOCKS || CONFIG_HAS_WAKELOCK */
  14610. }
  14611. bool dhd_os_check_if_up(dhd_pub_t *pub)
  14612. {
  14613. if (!pub)
  14614. return FALSE;
  14615. return pub->up;
  14616. }
  14617. #if defined(BCMSDIO) || defined(BCMPCIE)
  14618. /* function to collect firmware, chip id and chip version info */
  14619. void dhd_set_version_info(dhd_pub_t *dhdp, char *fw)
  14620. {
  14621. int i;
  14622. i = snprintf(info_string, sizeof(info_string),
  14623. " Driver: %s\n Firmware: %s ", EPI_VERSION_STR, fw);
  14624. if (!dhdp)
  14625. return;
  14626. i = snprintf(&info_string[i], sizeof(info_string) - i,
  14627. "\n Chip: %x Rev %x Pkg %x", dhd_bus_chip_id(dhdp),
  14628. dhd_bus_chiprev_id(dhdp), dhd_bus_chippkg_id(dhdp));
  14629. }
  14630. #endif /* BCMSDIO || BCMPCIE */
  14631. int dhd_ioctl_entry_local(struct net_device *net, wl_ioctl_t *ioc, int cmd)
  14632. {
  14633. int ifidx;
  14634. int ret = 0;
  14635. dhd_info_t *dhd = NULL;
  14636. if (!net || !DEV_PRIV(net)) {
  14637. DHD_ERROR(("%s invalid parameter net %p dev_priv %p\n",
  14638. __FUNCTION__, net, DEV_PRIV(net)));
  14639. return -EINVAL;
  14640. }
  14641. dhd = DHD_DEV_INFO(net);
  14642. if (!dhd)
  14643. return -EINVAL;
  14644. ifidx = dhd_net2idx(dhd, net);
  14645. if (ifidx == DHD_BAD_IF) {
  14646. DHD_ERROR(("%s bad ifidx\n", __FUNCTION__));
  14647. return -ENODEV;
  14648. }
  14649. DHD_OS_WAKE_LOCK(&dhd->pub);
  14650. DHD_PERIM_LOCK(&dhd->pub);
  14651. ret = dhd_wl_ioctl(&dhd->pub, ifidx, ioc, ioc->buf, ioc->len);
  14652. dhd_check_hang(net, &dhd->pub, ret);
  14653. DHD_PERIM_UNLOCK(&dhd->pub);
  14654. DHD_OS_WAKE_UNLOCK(&dhd->pub);
  14655. return ret;
  14656. }
  14657. bool dhd_os_check_hang(dhd_pub_t *dhdp, int ifidx, int ret)
  14658. {
  14659. struct net_device *net;
  14660. net = dhd_idx2net(dhdp, ifidx);
  14661. if (!net) {
  14662. DHD_ERROR(("%s : Invalid index : %d\n", __FUNCTION__, ifidx));
  14663. return -EINVAL;
  14664. }
  14665. return dhd_check_hang(net, dhdp, ret);
  14666. }
  14667. /* Return instance */
  14668. int dhd_get_instance(dhd_pub_t *dhdp)
  14669. {
  14670. return dhdp->info->unit;
  14671. }
  14672. #if defined(WL_CFG80211) && defined(SUPPORT_DEEP_SLEEP)
  14673. #define MAX_TRY_CNT 5 /* Number of tries to disable deepsleep */
  14674. int dhd_deepsleep(struct net_device *dev, int flag)
  14675. {
  14676. char iovbuf[20];
  14677. uint powervar = 0;
  14678. dhd_info_t *dhd;
  14679. dhd_pub_t *dhdp;
  14680. int cnt = 0;
  14681. int ret = 0;
  14682. dhd = DHD_DEV_INFO(dev);
  14683. dhdp = &dhd->pub;
  14684. switch (flag) {
  14685. case 1 : /* Deepsleep on */
  14686. DHD_ERROR(("[WiFi] Deepsleep On\n"));
  14687. /* give some time to sysioc_work before deepsleep */
  14688. OSL_SLEEP(200);
  14689. #ifdef PKT_FILTER_SUPPORT
  14690. /* disable pkt filter */
  14691. dhd_enable_packet_filter(0, dhdp);
  14692. #endif /* PKT_FILTER_SUPPORT */
  14693. /* Disable MPC */
  14694. powervar = 0;
  14695. ret = dhd_iovar(dhdp, 0, "mpc", (char *)&powervar, sizeof(powervar), NULL,
  14696. 0, TRUE);
  14697. /* Enable Deepsleep */
  14698. powervar = 1;
  14699. ret = dhd_iovar(dhdp, 0, "deepsleep", (char *)&powervar, sizeof(powervar),
  14700. NULL, 0, TRUE);
  14701. break;
  14702. case 0: /* Deepsleep Off */
  14703. DHD_ERROR(("[WiFi] Deepsleep Off\n"));
  14704. /* Disable Deepsleep */
  14705. for (cnt = 0; cnt < MAX_TRY_CNT; cnt++) {
  14706. powervar = 0;
  14707. ret = dhd_iovar(dhdp, 0, "deepsleep", (char *)&powervar,
  14708. sizeof(powervar), NULL, 0, TRUE);
  14709. ret = dhd_iovar(dhdp, 0, "deepsleep", (char *)&powervar,
  14710. sizeof(powervar), iovbuf, sizeof(iovbuf), FALSE);
  14711. if (ret < 0) {
  14712. DHD_ERROR(("the error of dhd deepsleep status"
  14713. " ret value :%d\n", ret));
  14714. } else {
  14715. if (!(*(int *)iovbuf)) {
  14716. DHD_ERROR(("deepsleep mode is 0,"
  14717. " count: %d\n", cnt));
  14718. break;
  14719. }
  14720. }
  14721. }
  14722. /* Enable MPC */
  14723. powervar = 1;
  14724. ret = dhd_iovar(dhdp, 0, "mpc", (char *)&powervar, sizeof(powervar), NULL,
  14725. 0, TRUE);
  14726. break;
  14727. }
  14728. return 0;
  14729. }
  14730. #endif /* WL_CFG80211 && SUPPORT_DEEP_SLEEP */
  14731. #ifdef PROP_TXSTATUS
  14732. void dhd_wlfc_plat_init(void *dhd)
  14733. {
  14734. #ifdef USE_DYNAMIC_F2_BLKSIZE
  14735. dhdsdio_func_blocksize((dhd_pub_t *)dhd, 2, DYNAMIC_F2_BLKSIZE_FOR_NONLEGACY);
  14736. #endif /* USE_DYNAMIC_F2_BLKSIZE */
  14737. return;
  14738. }
  14739. void dhd_wlfc_plat_deinit(void *dhd)
  14740. {
  14741. #ifdef USE_DYNAMIC_F2_BLKSIZE
  14742. dhdsdio_func_blocksize((dhd_pub_t *)dhd, 2, sd_f2_blocksize);
  14743. #endif /* USE_DYNAMIC_F2_BLKSIZE */
  14744. return;
  14745. }
  14746. bool dhd_wlfc_skip_fc(void * dhdp, uint8 idx)
  14747. {
  14748. #ifdef SKIP_WLFC_ON_CONCURRENT
  14749. #ifdef WL_CFG80211
  14750. struct net_device * net = dhd_idx2net((dhd_pub_t *)dhdp, idx);
  14751. if (net)
  14752. /* enable flow control in vsdb mode */
  14753. return !(wl_cfg80211_is_concurrent_mode(net));
  14754. #else
  14755. return TRUE; /* skip flow control */
  14756. #endif /* WL_CFG80211 */
  14757. #else
  14758. return FALSE;
  14759. #endif /* SKIP_WLFC_ON_CONCURRENT */
  14760. return FALSE;
  14761. }
  14762. #endif /* PROP_TXSTATUS */
  14763. #ifdef BCMDBGFS
  14764. #include <linux/debugfs.h>
  14765. typedef struct dhd_dbgfs {
  14766. struct dentry *debugfs_dir;
  14767. struct dentry *debugfs_mem;
  14768. dhd_pub_t *dhdp;
  14769. uint32 size;
  14770. } dhd_dbgfs_t;
  14771. dhd_dbgfs_t g_dbgfs;
  14772. extern uint32 dhd_readregl(void *bp, uint32 addr);
  14773. extern uint32 dhd_writeregl(void *bp, uint32 addr, uint32 data);
  14774. static int
  14775. dhd_dbg_state_open(struct inode *inode, struct file *file)
  14776. {
  14777. file->private_data = inode->i_private;
  14778. return 0;
  14779. }
  14780. static ssize_t
  14781. dhd_dbg_state_read(struct file *file, char __user *ubuf,
  14782. size_t count, loff_t *ppos)
  14783. {
  14784. ssize_t rval;
  14785. uint32 tmp;
  14786. loff_t pos = *ppos;
  14787. size_t ret;
  14788. if (pos < 0)
  14789. return -EINVAL;
  14790. if (pos >= g_dbgfs.size || !count)
  14791. return 0;
  14792. if (count > g_dbgfs.size - pos)
  14793. count = g_dbgfs.size - pos;
  14794. /* Basically enforce aligned 4 byte reads. It's up to the user to work out the details */
  14795. tmp = dhd_readregl(g_dbgfs.dhdp->bus, file->f_pos & (~3));
  14796. ret = copy_to_user(ubuf, &tmp, 4);
  14797. if (ret == count)
  14798. return -EFAULT;
  14799. count -= ret;
  14800. *ppos = pos + count;
  14801. rval = count;
  14802. return rval;
  14803. }
  14804. static ssize_t
  14805. dhd_debugfs_write(struct file *file, const char __user *ubuf, size_t count, loff_t *ppos)
  14806. {
  14807. loff_t pos = *ppos;
  14808. size_t ret;
  14809. uint32 buf;
  14810. if (pos < 0)
  14811. return -EINVAL;
  14812. if (pos >= g_dbgfs.size || !count)
  14813. return 0;
  14814. if (count > g_dbgfs.size - pos)
  14815. count = g_dbgfs.size - pos;
  14816. ret = copy_from_user(&buf, ubuf, sizeof(uint32));
  14817. if (ret == count)
  14818. return -EFAULT;
  14819. /* Basically enforce aligned 4 byte writes. It's up to the user to work out the details */
  14820. dhd_writeregl(g_dbgfs.dhdp->bus, file->f_pos & (~3), buf);
  14821. return count;
  14822. }
  14823. loff_t
  14824. dhd_debugfs_lseek(struct file *file, loff_t off, int whence)
  14825. {
  14826. loff_t pos = -1;
  14827. switch (whence) {
  14828. case 0:
  14829. pos = off;
  14830. break;
  14831. case 1:
  14832. pos = file->f_pos + off;
  14833. break;
  14834. case 2:
  14835. pos = g_dbgfs.size - off;
  14836. }
  14837. return (pos < 0 || pos > g_dbgfs.size) ? -EINVAL : (file->f_pos = pos);
  14838. }
  14839. static const struct file_operations dhd_dbg_state_ops = {
  14840. .read = dhd_dbg_state_read,
  14841. .write = dhd_debugfs_write,
  14842. .open = dhd_dbg_state_open,
  14843. .llseek = dhd_debugfs_lseek
  14844. };
  14845. static void dhd_dbgfs_create(void)
  14846. {
  14847. if (g_dbgfs.debugfs_dir) {
  14848. g_dbgfs.debugfs_mem = debugfs_create_file("mem", 0644, g_dbgfs.debugfs_dir,
  14849. NULL, &dhd_dbg_state_ops);
  14850. }
  14851. }
  14852. void dhd_dbgfs_init(dhd_pub_t *dhdp)
  14853. {
  14854. g_dbgfs.dhdp = dhdp;
  14855. g_dbgfs.size = 0x20000000; /* Allow access to various cores regs */
  14856. g_dbgfs.debugfs_dir = debugfs_create_dir("dhd", 0);
  14857. if (IS_ERR(g_dbgfs.debugfs_dir)) {
  14858. g_dbgfs.debugfs_dir = NULL;
  14859. return;
  14860. }
  14861. dhd_dbgfs_create();
  14862. return;
  14863. }
  14864. void dhd_dbgfs_remove(void)
  14865. {
  14866. debugfs_remove(g_dbgfs.debugfs_mem);
  14867. debugfs_remove(g_dbgfs.debugfs_dir);
  14868. bzero((unsigned char *) &g_dbgfs, sizeof(g_dbgfs));
  14869. }
  14870. #endif /* BCMDBGFS */
  14871. #ifdef CUSTOM_SET_CPUCORE
  14872. void dhd_set_cpucore(dhd_pub_t *dhd, int set)
  14873. {
  14874. int e_dpc = 0, e_rxf = 0, retry_set = 0;
  14875. if (!(dhd->chan_isvht80)) {
  14876. DHD_ERROR(("%s: chan_status(%d) cpucore!!!\n", __FUNCTION__, dhd->chan_isvht80));
  14877. return;
  14878. }
  14879. if (DPC_CPUCORE) {
  14880. do {
  14881. if (set == TRUE) {
  14882. e_dpc = set_cpus_allowed_ptr(dhd->current_dpc,
  14883. cpumask_of(DPC_CPUCORE));
  14884. } else {
  14885. e_dpc = set_cpus_allowed_ptr(dhd->current_dpc,
  14886. cpumask_of(PRIMARY_CPUCORE));
  14887. }
  14888. if (retry_set++ > MAX_RETRY_SET_CPUCORE) {
  14889. DHD_ERROR(("%s: dpc(%d) invalid cpu!\n", __FUNCTION__, e_dpc));
  14890. return;
  14891. }
  14892. if (e_dpc < 0)
  14893. OSL_SLEEP(1);
  14894. } while (e_dpc < 0);
  14895. }
  14896. if (RXF_CPUCORE) {
  14897. do {
  14898. if (set == TRUE) {
  14899. e_rxf = set_cpus_allowed_ptr(dhd->current_rxf,
  14900. cpumask_of(RXF_CPUCORE));
  14901. } else {
  14902. e_rxf = set_cpus_allowed_ptr(dhd->current_rxf,
  14903. cpumask_of(PRIMARY_CPUCORE));
  14904. }
  14905. if (retry_set++ > MAX_RETRY_SET_CPUCORE) {
  14906. DHD_ERROR(("%s: rxf(%d) invalid cpu!\n", __FUNCTION__, e_rxf));
  14907. return;
  14908. }
  14909. if (e_rxf < 0)
  14910. OSL_SLEEP(1);
  14911. } while (e_rxf < 0);
  14912. }
  14913. #ifdef DHD_OF_SUPPORT
  14914. interrupt_set_cpucore(set, DPC_CPUCORE, PRIMARY_CPUCORE);
  14915. #endif /* DHD_OF_SUPPORT */
  14916. DHD_TRACE(("%s: set(%d) cpucore success!\n", __FUNCTION__, set));
  14917. return;
  14918. }
  14919. #endif /* CUSTOM_SET_CPUCORE */
  14920. #ifdef DHD_MCAST_REGEN
  14921. /* Get interface specific ap_isolate configuration */
  14922. int dhd_get_mcast_regen_bss_enable(dhd_pub_t *dhdp, uint32 idx)
  14923. {
  14924. dhd_info_t *dhd = dhdp->info;
  14925. dhd_if_t *ifp;
  14926. ASSERT(idx < DHD_MAX_IFS);
  14927. ifp = dhd->iflist[idx];
  14928. return ifp->mcast_regen_bss_enable;
  14929. }
  14930. /* Set interface specific mcast_regen configuration */
  14931. int dhd_set_mcast_regen_bss_enable(dhd_pub_t *dhdp, uint32 idx, int val)
  14932. {
  14933. dhd_info_t *dhd = dhdp->info;
  14934. dhd_if_t *ifp;
  14935. ASSERT(idx < DHD_MAX_IFS);
  14936. ifp = dhd->iflist[idx];
  14937. ifp->mcast_regen_bss_enable = val;
  14938. /* Disable rx_pkt_chain feature for interface, if mcast_regen feature
  14939. * is enabled
  14940. */
  14941. dhd_update_rx_pkt_chainable_state(dhdp, idx);
  14942. return BCME_OK;
  14943. }
  14944. #endif /* DHD_MCAST_REGEN */
  14945. /* Get interface specific ap_isolate configuration */
  14946. int dhd_get_ap_isolate(dhd_pub_t *dhdp, uint32 idx)
  14947. {
  14948. dhd_info_t *dhd = dhdp->info;
  14949. dhd_if_t *ifp;
  14950. ASSERT(idx < DHD_MAX_IFS);
  14951. ifp = dhd->iflist[idx];
  14952. return ifp->ap_isolate;
  14953. }
  14954. /* Set interface specific ap_isolate configuration */
  14955. int dhd_set_ap_isolate(dhd_pub_t *dhdp, uint32 idx, int val)
  14956. {
  14957. dhd_info_t *dhd = dhdp->info;
  14958. dhd_if_t *ifp;
  14959. ASSERT(idx < DHD_MAX_IFS);
  14960. ifp = dhd->iflist[idx];
  14961. if (ifp)
  14962. ifp->ap_isolate = val;
  14963. return 0;
  14964. }
  14965. #ifdef DHD_RND_DEBUG
  14966. #ifdef CUSTOMER_HW4_DEBUG
  14967. #define RNDINFO PLATFORM_PATH".rnd"
  14968. #elif defined(CUSTOMER_HW2) || defined(BOARD_HIKEY)
  14969. #define RNDINFO "/data/misc/wifi/.rnd"
  14970. #elif defined(OEM_ANDROID) && (defined(BOARD_PANDA) || defined(__ARM_ARCH_7A__))
  14971. #define RNDINFO "/data/misc/wifi/.rnd"
  14972. #elif defined(OEM_ANDROID)
  14973. #define RNDINFO_LIVE "/installmedia/.rnd"
  14974. #define RNDINFO_INST "/data/.rnd"
  14975. #define RNDINFO RNDINFO_LIVE
  14976. #else /* FC19 and Others */
  14977. #define RNDINFO "/root/.rnd"
  14978. #endif /* CUSTOMER_HW4_DEBUG */
  14979. #define RND_IN RNDINFO".in"
  14980. #define RND_OUT RNDINFO".out"
  14981. int
  14982. dhd_get_rnd_info(dhd_pub_t *dhd)
  14983. {
  14984. struct file *fp = NULL;
  14985. int ret = BCME_ERROR;
  14986. char *filepath = RND_IN;
  14987. uint32 file_mode = O_RDONLY;
  14988. mm_segment_t old_fs;
  14989. loff_t pos = 0;
  14990. /* Read memdump info from the file */
  14991. fp = filp_open(filepath, file_mode, 0);
  14992. if (IS_ERR(fp)) {
  14993. DHD_ERROR(("%s: File [%s] doesn't exist\n", __FUNCTION__, filepath));
  14994. #if defined(CONFIG_X86) && defined(OEM_ANDROID)
  14995. /* Check if it is Live Brix Image */
  14996. if (bcmstrstr(filepath, RNDINFO_LIVE)) {
  14997. goto err1;
  14998. }
  14999. /* Try if it is Installed Brix Image */
  15000. filepath = RNDINFO_INST".in";
  15001. DHD_ERROR(("%s: Try File [%s]\n", __FUNCTION__, filepath));
  15002. fp = filp_open(filepath, file_mode, 0);
  15003. if (IS_ERR(fp)) {
  15004. DHD_ERROR(("%s: File [%s] doesn't exist\n", __FUNCTION__, filepath));
  15005. goto err1;
  15006. }
  15007. #else /* Non Brix Android platform */
  15008. goto err1;
  15009. #endif /* CONFIG_X86 && OEM_ANDROID */
  15010. }
  15011. old_fs = get_fs();
  15012. set_fs(KERNEL_DS);
  15013. /* Handle success case */
  15014. ret = vfs_read(fp, (char *)&dhd->rnd_len, sizeof(dhd->rnd_len), &pos);
  15015. if (ret < 0) {
  15016. DHD_ERROR(("%s: rnd_len read error, ret=%d\n", __FUNCTION__, ret));
  15017. goto err2;
  15018. }
  15019. dhd->rnd_buf = MALLOCZ(dhd->osh, dhd->rnd_len);
  15020. if (!dhd->rnd_buf) {
  15021. DHD_ERROR(("%s: MALLOC failed\n", __FUNCTION__));
  15022. goto err2;
  15023. }
  15024. ret = vfs_read(fp, (char *)dhd->rnd_buf, dhd->rnd_len, &pos);
  15025. if (ret < 0) {
  15026. DHD_ERROR(("%s: rnd_buf read error, ret=%d\n", __FUNCTION__, ret));
  15027. goto err3;
  15028. }
  15029. set_fs(old_fs);
  15030. filp_close(fp, NULL);
  15031. DHD_ERROR(("%s: RND read from %s\n", __FUNCTION__, filepath));
  15032. return BCME_OK;
  15033. err3:
  15034. MFREE(dhd->osh, dhd->rnd_buf, dhd->rnd_len);
  15035. dhd->rnd_buf = NULL;
  15036. err2:
  15037. set_fs(old_fs);
  15038. filp_close(fp, NULL);
  15039. err1:
  15040. return BCME_ERROR;
  15041. }
  15042. int
  15043. dhd_dump_rnd_info(dhd_pub_t *dhd, uint8 *rnd_buf, uint32 rnd_len)
  15044. {
  15045. struct file *fp = NULL;
  15046. int ret = BCME_OK;
  15047. char *filepath = RND_OUT;
  15048. uint32 file_mode = O_CREAT | O_WRONLY | O_SYNC;
  15049. mm_segment_t old_fs;
  15050. loff_t pos = 0;
  15051. /* Read memdump info from the file */
  15052. fp = filp_open(filepath, file_mode, 0664);
  15053. if (IS_ERR(fp)) {
  15054. DHD_ERROR(("%s: File [%s] doesn't exist\n", __FUNCTION__, filepath));
  15055. #if defined(CONFIG_X86) && defined(OEM_ANDROID)
  15056. /* Check if it is Live Brix Image */
  15057. if (bcmstrstr(filepath, RNDINFO_LIVE)) {
  15058. goto err1;
  15059. }
  15060. /* Try if it is Installed Brix Image */
  15061. filepath = RNDINFO_INST".out";
  15062. DHD_ERROR(("%s: Try File [%s]\n", __FUNCTION__, filepath));
  15063. fp = filp_open(filepath, file_mode, 0664);
  15064. if (IS_ERR(fp)) {
  15065. DHD_ERROR(("%s: File [%s] doesn't exist\n", __FUNCTION__, filepath));
  15066. goto err1;
  15067. }
  15068. #else /* Non Brix Android platform */
  15069. goto err1;
  15070. #endif /* CONFIG_X86 && OEM_ANDROID */
  15071. }
  15072. old_fs = get_fs();
  15073. set_fs(KERNEL_DS);
  15074. /* Handle success case */
  15075. ret = vfs_write(fp, (char *)&rnd_len, sizeof(rnd_len), &pos);
  15076. if (ret < 0) {
  15077. DHD_ERROR(("%s: rnd_len write error, ret=%d\n", __FUNCTION__, ret));
  15078. goto err2;
  15079. }
  15080. ret = vfs_write(fp, (char *)rnd_buf, rnd_len, &pos);
  15081. if (ret < 0) {
  15082. DHD_ERROR(("%s: rnd_buf write error, ret=%d\n", __FUNCTION__, ret));
  15083. goto err2;
  15084. }
  15085. set_fs(old_fs);
  15086. filp_close(fp, NULL);
  15087. DHD_ERROR(("%s: RND written to %s\n", __FUNCTION__, filepath));
  15088. return BCME_OK;
  15089. err2:
  15090. set_fs(old_fs);
  15091. filp_close(fp, NULL);
  15092. err1:
  15093. return BCME_ERROR;
  15094. }
  15095. #endif /* DHD_RND_DEBUG */
  15096. #ifdef DHD_FW_COREDUMP
  15097. void dhd_schedule_memdump(dhd_pub_t *dhdp, uint8 *buf, uint32 size)
  15098. {
  15099. unsigned long flags = 0;
  15100. dhd_dump_t *dump = NULL;
  15101. dhd_info_t *dhd_info = NULL;
  15102. #if !defined(DHD_DUMP_FILE_WRITE_FROM_KERNEL)
  15103. log_dump_type_t type = DLD_BUF_TYPE_ALL;
  15104. #endif /* !DHD_DUMP_FILE_WRITE_FROM_KERNEL */
  15105. dhd_info = (dhd_info_t *)dhdp->info;
  15106. dump = (dhd_dump_t *)MALLOC(dhdp->osh, sizeof(dhd_dump_t));
  15107. if (dump == NULL) {
  15108. DHD_ERROR(("%s: dhd dump memory allocation failed\n", __FUNCTION__));
  15109. return;
  15110. }
  15111. dump->buf = buf;
  15112. dump->bufsize = size;
  15113. #ifdef BCMPCIE
  15114. dhd_get_hscb_info(dhdp, (void*)(&dump->hscb_buf),
  15115. (uint32 *)(&dump->hscb_bufsize));
  15116. #else /* BCMPCIE */
  15117. dump->hscb_bufsize = 0;
  15118. #endif /* BCMPCIE */
  15119. #ifdef DHD_LOG_DUMP
  15120. dhd_print_buf_addr(dhdp, "memdump", buf, size);
  15121. #if !defined(DHD_DUMP_FILE_WRITE_FROM_KERNEL)
  15122. /* Print out buffer infomation */
  15123. dhd_log_dump_buf_addr(dhdp, &type);
  15124. #endif /* !DHD_DUMP_FILE_WRITE_FROM_KERNEL */
  15125. #endif /* DHD_LOG_DUMP */
  15126. if (dhdp->memdump_enabled == DUMP_MEMONLY) {
  15127. BUG_ON(1);
  15128. }
  15129. #if defined(DEBUG_DNGL_INIT_FAIL) || defined(DHD_ERPOM) || \
  15130. defined(DNGL_AXI_ERROR_LOGGING)
  15131. if (
  15132. #if defined(DEBUG_DNGL_INIT_FAIL)
  15133. (dhdp->memdump_type == DUMP_TYPE_DONGLE_INIT_FAILURE) ||
  15134. #endif /* DEBUG_DNGL_INIT_FAIL */
  15135. #ifdef DHD_ERPOM
  15136. (dhdp->memdump_type == DUMP_TYPE_DUE_TO_BT) ||
  15137. #endif /* DHD_ERPOM */
  15138. #ifdef DNGL_AXI_ERROR_LOGGING
  15139. (dhdp->memdump_type == DUMP_TYPE_SMMU_FAULT) ||
  15140. #endif /* DNGL_AXI_ERROR_LOGGING */
  15141. FALSE)
  15142. {
  15143. #if defined(DHD_DUMP_FILE_WRITE_FROM_KERNEL) && defined(DHD_LOG_DUMP)
  15144. log_dump_type_t *flush_type = NULL;
  15145. #endif /* DHD_DUMP_FILE_WRITE_FROM_KERNEL && DHD_LOG_DUMP */
  15146. dhd_info->scheduled_memdump = FALSE;
  15147. (void)dhd_mem_dump((void *)dhdp->info, (void *)dump, 0);
  15148. #if defined(DHD_DUMP_FILE_WRITE_FROM_KERNEL) && defined(DHD_LOG_DUMP)
  15149. /* for dongle init fail cases, 'dhd_mem_dump' does
  15150. * not call 'dhd_log_dump', so call it here.
  15151. */
  15152. flush_type = MALLOCZ(dhdp->osh,
  15153. sizeof(log_dump_type_t));
  15154. if (flush_type) {
  15155. *flush_type = DLD_BUF_TYPE_ALL;
  15156. DHD_ERROR(("%s: calling log dump.. \n", __FUNCTION__));
  15157. dhd_log_dump(dhdp->info, flush_type, 0);
  15158. }
  15159. #endif /* DHD_DUMP_FILE_WRITE_FROM_KERNEL && DHD_LOG_DUMP */
  15160. return;
  15161. }
  15162. #endif /* DEBUG_DNGL_INIT_FAIL || DHD_ERPOM || DNGL_AXI_ERROR_LOGGING */
  15163. dhd_info->scheduled_memdump = TRUE;
  15164. /* bus busy bit for mem dump will be cleared in mem dump
  15165. * work item context, after mem dump file is written
  15166. */
  15167. DHD_GENERAL_LOCK(dhdp, flags);
  15168. DHD_BUS_BUSY_SET_IN_MEMDUMP(dhdp);
  15169. DHD_GENERAL_UNLOCK(dhdp, flags);
  15170. DHD_ERROR(("%s: scheduling mem dump.. \n", __FUNCTION__));
  15171. dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq, (void *)dump,
  15172. DHD_WQ_WORK_SOC_RAM_DUMP, (void *)dhd_mem_dump, DHD_WQ_WORK_PRIORITY_HIGH);
  15173. }
  15174. static int
  15175. dhd_mem_dump(void *handle, void *event_info, u8 event)
  15176. {
  15177. dhd_info_t *dhd = handle;
  15178. dhd_pub_t *dhdp = NULL;
  15179. unsigned long flags = 0;
  15180. int ret = 0;
  15181. dhd_dump_t *dump = NULL;
  15182. DHD_ERROR(("%s: ENTER, memdump type %u\n", __FUNCTION__, dhd->pub.memdump_type));
  15183. if (!dhd) {
  15184. DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__));
  15185. return -ENODEV;
  15186. }
  15187. dhdp = &dhd->pub;
  15188. if (!dhdp) {
  15189. DHD_ERROR(("%s: dhdp is NULL\n", __FUNCTION__));
  15190. return -ENODEV;
  15191. }
  15192. DHD_GENERAL_LOCK(dhdp, flags);
  15193. if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(dhdp)) {
  15194. DHD_GENERAL_UNLOCK(dhdp, flags);
  15195. DHD_ERROR(("%s: bus is down! can't collect mem dump. \n", __FUNCTION__));
  15196. ret = -ENODEV;
  15197. goto exit;
  15198. }
  15199. DHD_GENERAL_UNLOCK(dhdp, flags);
  15200. #ifdef DHD_SSSR_DUMP
  15201. if (dhdp->sssr_inited && dhdp->collect_sssr) {
  15202. dhdpcie_sssr_dump(dhdp);
  15203. }
  15204. dhdp->collect_sssr = FALSE;
  15205. #endif /* DHD_SSSR_DUMP */
  15206. #if defined(WL_CFG80211) && defined(DHD_FILE_DUMP_EVENT)
  15207. dhd_wait_for_file_dump(dhdp);
  15208. #endif /* WL_CFG80211 && DHD_FILE_DUMP_EVENT */
  15209. dump = (dhd_dump_t *)event_info;
  15210. if (!dump) {
  15211. DHD_ERROR(("%s: dump is NULL\n", __FUNCTION__));
  15212. ret = -EINVAL;
  15213. goto exit;
  15214. }
  15215. /*
  15216. * If kernel does not have file write access enabled
  15217. * then skip writing dumps to files.
  15218. * The dumps will be pushed to HAL layer which will
  15219. * write into files
  15220. */
  15221. #ifdef DHD_DUMP_FILE_WRITE_FROM_KERNEL
  15222. if (write_dump_to_file(&dhd->pub, dump->buf, dump->bufsize, "mem_dump")) {
  15223. DHD_ERROR(("%s: writing SoC_RAM dump to the file failed\n", __FUNCTION__));
  15224. #ifdef DHD_DEBUG_UART
  15225. dhd->pub.memdump_success = FALSE;
  15226. #endif /* DHD_DEBUG_UART */
  15227. }
  15228. /* directly call dhd_log_dump for debug_dump collection from the mem_dump work queue
  15229. * context, no need to schedule another work queue for log dump. In case of
  15230. * user initiated DEBUG_DUMP wpa_cli command (DUMP_TYPE_BY_SYSDUMP),
  15231. * cfg layer is itself scheduling the log_dump work queue.
  15232. * that path is not disturbed. If 'dhd_mem_dump' is called directly then we will not
  15233. * collect debug_dump as it may be called from non-sleepable context.
  15234. */
  15235. #ifdef DHD_LOG_DUMP
  15236. if (dhd->scheduled_memdump &&
  15237. dhdp->memdump_type != DUMP_TYPE_BY_SYSDUMP) {
  15238. log_dump_type_t *flush_type = MALLOCZ(dhdp->osh,
  15239. sizeof(log_dump_type_t));
  15240. if (flush_type) {
  15241. *flush_type = DLD_BUF_TYPE_ALL;
  15242. DHD_ERROR(("%s: calling log dump.. \n", __FUNCTION__));
  15243. dhd_log_dump(dhd, flush_type, 0);
  15244. }
  15245. }
  15246. #endif /* DHD_LOG_DUMP */
  15247. #ifdef DHD_PKT_LOGGING
  15248. copy_debug_dump_time(dhdp->debug_dump_time_pktlog_str, dhdp->debug_dump_time_str);
  15249. #endif /* DHD_PKT_LOGGING */
  15250. clear_debug_dump_time(dhdp->debug_dump_time_str);
  15251. /* before calling bug on, wait for other logs to be dumped.
  15252. * we cannot wait in case dhd_mem_dump is called directly
  15253. * as it may not be in a sleepable context
  15254. */
  15255. if (dhd->scheduled_memdump) {
  15256. uint bitmask = 0;
  15257. int timeleft = 0;
  15258. #ifdef DHD_SSSR_DUMP
  15259. bitmask |= DHD_BUS_BUSY_IN_SSSRDUMP;
  15260. #endif // endif
  15261. if (bitmask != 0) {
  15262. DHD_ERROR(("%s: wait to clear dhd_bus_busy_state: 0x%x\n",
  15263. __FUNCTION__, dhdp->dhd_bus_busy_state));
  15264. timeleft = dhd_os_busbusy_wait_bitmask(dhdp,
  15265. &dhdp->dhd_bus_busy_state, bitmask, 0);
  15266. if ((timeleft == 0) || (timeleft == 1)) {
  15267. DHD_ERROR(("%s: Timed out dhd_bus_busy_state=0x%x\n",
  15268. __FUNCTION__, dhdp->dhd_bus_busy_state));
  15269. }
  15270. }
  15271. }
  15272. if (dump->hscb_buf && dump->hscb_bufsize) {
  15273. DHD_ERROR(("%s: write HSCB dump... \n", __FUNCTION__));
  15274. if (write_dump_to_file(&dhd->pub, dump->hscb_buf,
  15275. dump->hscb_bufsize, "mem_dump_hscb")) {
  15276. DHD_ERROR(("%s: writing HSCB dump to the file failed\n", __FUNCTION__));
  15277. #ifdef DHD_DEBUG_UART
  15278. dhd->pub.memdump_success = FALSE;
  15279. #endif /* DHD_DEBUG_UART */
  15280. }
  15281. }
  15282. #endif /* DHD_DUMP_FILE_WRITE_FROM_KERNEL */
  15283. DHD_ERROR(("%s: memdump type %u\n", __FUNCTION__, dhd->pub.memdump_type));
  15284. if (dhd->pub.memdump_enabled == DUMP_MEMFILE_BUGON &&
  15285. #ifdef DHD_LOG_DUMP
  15286. dhd->pub.memdump_type != DUMP_TYPE_BY_SYSDUMP &&
  15287. #endif /* DHD_LOG_DUMP */
  15288. dhd->pub.memdump_type != DUMP_TYPE_BY_USER &&
  15289. #ifdef DHD_DEBUG_UART
  15290. dhd->pub.memdump_success == TRUE &&
  15291. #endif /* DHD_DEBUG_UART */
  15292. #ifdef DNGL_EVENT_SUPPORT
  15293. dhd->pub.memdump_type != DUMP_TYPE_DONGLE_HOST_EVENT &&
  15294. #endif /* DNGL_EVENT_SUPPORT */
  15295. dhd->pub.memdump_type != DUMP_TYPE_CFG_VENDOR_TRIGGERED) {
  15296. #ifdef SHOW_LOGTRACE
  15297. /* Wait till logtrace context is flushed */
  15298. dhd_flush_logtrace_process(dhd);
  15299. #endif /* SHOW_LOGTRACE */
  15300. DHD_ERROR(("%s: call BUG_ON \n", __FUNCTION__));
  15301. BUG_ON(1);
  15302. }
  15303. DHD_ERROR(("%s: No BUG ON, memdump type %u \n", __FUNCTION__, dhd->pub.memdump_type));
  15304. exit:
  15305. if (dump) {
  15306. MFREE(dhd->pub.osh, dump, sizeof(dhd_dump_t));
  15307. }
  15308. DHD_GENERAL_LOCK(dhdp, flags);
  15309. DHD_BUS_BUSY_CLEAR_IN_MEMDUMP(&dhd->pub);
  15310. dhd_os_busbusy_wake(dhdp);
  15311. DHD_GENERAL_UNLOCK(dhdp, flags);
  15312. dhd->scheduled_memdump = FALSE;
  15313. #ifdef OEM_ANDROID
  15314. if (dhdp->hang_was_pending) {
  15315. DHD_ERROR(("%s: Send pending HANG event...\n", __FUNCTION__));
  15316. dhd_os_send_hang_message(dhdp);
  15317. dhdp->hang_was_pending = 0;
  15318. }
  15319. #endif /* OEM_ANDROID */
  15320. DHD_ERROR(("%s: EXIT \n", __FUNCTION__));
  15321. return ret;
  15322. }
  15323. #endif /* DHD_FW_COREDUMP */
  15324. #ifdef DHD_SSSR_DUMP
  15325. int
  15326. dhd_sssr_dump_dig_buf_before(void *dev, const void *user_buf, uint32 len)
  15327. {
  15328. dhd_info_t *dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)dev);
  15329. dhd_pub_t *dhdp = &dhd_info->pub;
  15330. int pos = 0, ret = BCME_ERROR;
  15331. uint dig_buf_size = 0;
  15332. if (dhdp->sssr_reg_info.vasip_regs.vasip_sr_size) {
  15333. dig_buf_size = dhdp->sssr_reg_info.vasip_regs.vasip_sr_size;
  15334. } else if ((dhdp->sssr_reg_info.length > OFFSETOF(sssr_reg_info_v1_t, dig_mem_info)) &&
  15335. dhdp->sssr_reg_info.dig_mem_info.dig_sr_size) {
  15336. dig_buf_size = dhdp->sssr_reg_info.dig_mem_info.dig_sr_size;
  15337. }
  15338. if (dhdp->sssr_dig_buf_before && (dhdp->sssr_dump_mode == SSSR_DUMP_MODE_SSSR)) {
  15339. ret = dhd_export_debug_data((char *)dhdp->sssr_dig_buf_before,
  15340. NULL, user_buf, dig_buf_size, &pos);
  15341. }
  15342. return ret;
  15343. }
  15344. int
  15345. dhd_sssr_dump_dig_buf_after(void *dev, const void *user_buf, uint32 len)
  15346. {
  15347. dhd_info_t *dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)dev);
  15348. dhd_pub_t *dhdp = &dhd_info->pub;
  15349. int pos = 0, ret = BCME_ERROR;
  15350. uint dig_buf_size = 0;
  15351. if (dhdp->sssr_reg_info.vasip_regs.vasip_sr_size) {
  15352. dig_buf_size = dhdp->sssr_reg_info.vasip_regs.vasip_sr_size;
  15353. } else if ((dhdp->sssr_reg_info.length > OFFSETOF(sssr_reg_info_v1_t, dig_mem_info)) &&
  15354. dhdp->sssr_reg_info.dig_mem_info.dig_sr_size) {
  15355. dig_buf_size = dhdp->sssr_reg_info.dig_mem_info.dig_sr_size;
  15356. }
  15357. if (dhdp->sssr_dig_buf_after) {
  15358. ret = dhd_export_debug_data((char *)dhdp->sssr_dig_buf_after,
  15359. NULL, user_buf, dig_buf_size, &pos);
  15360. }
  15361. return ret;
  15362. }
  15363. int
  15364. dhd_sssr_dump_d11_buf_before(void *dev, const void *user_buf, uint32 len, int core)
  15365. {
  15366. dhd_info_t *dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)dev);
  15367. dhd_pub_t *dhdp = &dhd_info->pub;
  15368. int pos = 0, ret = BCME_ERROR;
  15369. if (dhdp->sssr_d11_before[core] &&
  15370. dhdp->sssr_d11_outofreset[core] &&
  15371. (dhdp->sssr_dump_mode == SSSR_DUMP_MODE_SSSR)) {
  15372. ret = dhd_export_debug_data((char *)dhdp->sssr_d11_before[core],
  15373. NULL, user_buf, len, &pos);
  15374. }
  15375. return ret;
  15376. }
  15377. int
  15378. dhd_sssr_dump_d11_buf_after(void *dev, const void *user_buf, uint32 len, int core)
  15379. {
  15380. dhd_info_t *dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)dev);
  15381. dhd_pub_t *dhdp = &dhd_info->pub;
  15382. int pos = 0, ret = BCME_ERROR;
  15383. if (dhdp->sssr_d11_after[core] &&
  15384. dhdp->sssr_d11_outofreset[core]) {
  15385. ret = dhd_export_debug_data((char *)dhdp->sssr_d11_after[core],
  15386. NULL, user_buf, len, &pos);
  15387. }
  15388. return ret;
  15389. }
  15390. static void
  15391. dhd_sssr_dump_to_file(dhd_info_t* dhdinfo)
  15392. {
  15393. dhd_info_t *dhd = dhdinfo;
  15394. dhd_pub_t *dhdp;
  15395. int i;
  15396. char before_sr_dump[128];
  15397. char after_sr_dump[128];
  15398. unsigned long flags = 0;
  15399. uint dig_buf_size = 0;
  15400. DHD_ERROR(("%s: ENTER \n", __FUNCTION__));
  15401. if (!dhd) {
  15402. DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__));
  15403. return;
  15404. }
  15405. dhdp = &dhd->pub;
  15406. DHD_GENERAL_LOCK(dhdp, flags);
  15407. DHD_BUS_BUSY_SET_IN_SSSRDUMP(dhdp);
  15408. if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(dhdp)) {
  15409. DHD_GENERAL_UNLOCK(dhdp, flags);
  15410. DHD_ERROR(("%s: bus is down! can't collect sssr dump. \n", __FUNCTION__));
  15411. goto exit;
  15412. }
  15413. DHD_GENERAL_UNLOCK(dhdp, flags);
  15414. for (i = 0; i < MAX_NUM_D11CORES; i++) {
  15415. /* Init file name */
  15416. memset(before_sr_dump, 0, sizeof(before_sr_dump));
  15417. memset(after_sr_dump, 0, sizeof(after_sr_dump));
  15418. snprintf(before_sr_dump, sizeof(before_sr_dump), "%s_%d_%s",
  15419. "sssr_dump_core", i, "before_SR");
  15420. snprintf(after_sr_dump, sizeof(after_sr_dump), "%s_%d_%s",
  15421. "sssr_dump_core", i, "after_SR");
  15422. if (dhdp->sssr_d11_before[i] && dhdp->sssr_d11_outofreset[i] &&
  15423. (dhdp->sssr_dump_mode == SSSR_DUMP_MODE_SSSR)) {
  15424. if (write_dump_to_file(dhdp, (uint8 *)dhdp->sssr_d11_before[i],
  15425. dhdp->sssr_reg_info.mac_regs[i].sr_size, before_sr_dump)) {
  15426. DHD_ERROR(("%s: writing SSSR MAIN dump before to the file failed\n",
  15427. __FUNCTION__));
  15428. }
  15429. }
  15430. if (dhdp->sssr_d11_after[i] && dhdp->sssr_d11_outofreset[i]) {
  15431. if (write_dump_to_file(dhdp, (uint8 *)dhdp->sssr_d11_after[i],
  15432. dhdp->sssr_reg_info.mac_regs[i].sr_size, after_sr_dump)) {
  15433. DHD_ERROR(("%s: writing SSSR AUX dump after to the file failed\n",
  15434. __FUNCTION__));
  15435. }
  15436. }
  15437. }
  15438. if (dhdp->sssr_reg_info.vasip_regs.vasip_sr_size) {
  15439. dig_buf_size = dhdp->sssr_reg_info.vasip_regs.vasip_sr_size;
  15440. } else if ((dhdp->sssr_reg_info.length > OFFSETOF(sssr_reg_info_v1_t, dig_mem_info)) &&
  15441. dhdp->sssr_reg_info.dig_mem_info.dig_sr_size) {
  15442. dig_buf_size = dhdp->sssr_reg_info.dig_mem_info.dig_sr_size;
  15443. }
  15444. if (dhdp->sssr_dig_buf_before && (dhdp->sssr_dump_mode == SSSR_DUMP_MODE_SSSR)) {
  15445. if (write_dump_to_file(dhdp, (uint8 *)dhdp->sssr_dig_buf_before,
  15446. dig_buf_size, "sssr_dump_dig_before_SR")) {
  15447. DHD_ERROR(("%s: writing SSSR Dig dump before to the file failed\n",
  15448. __FUNCTION__));
  15449. }
  15450. }
  15451. if (dhdp->sssr_dig_buf_after) {
  15452. if (write_dump_to_file(dhdp, (uint8 *)dhdp->sssr_dig_buf_after,
  15453. dig_buf_size, "sssr_dump_dig_after_SR")) {
  15454. DHD_ERROR(("%s: writing SSSR Dig VASIP dump after to the file failed\n",
  15455. __FUNCTION__));
  15456. }
  15457. }
  15458. exit:
  15459. DHD_GENERAL_LOCK(dhdp, flags);
  15460. DHD_BUS_BUSY_CLEAR_IN_SSSRDUMP(dhdp);
  15461. dhd_os_busbusy_wake(dhdp);
  15462. DHD_GENERAL_UNLOCK(dhdp, flags);
  15463. }
  15464. void
  15465. dhd_write_sssr_dump(dhd_pub_t *dhdp, uint32 dump_mode)
  15466. {
  15467. dhdp->sssr_dump_mode = dump_mode;
  15468. /*
  15469. * If kernel does not have file write access enabled
  15470. * then skip writing dumps to files.
  15471. * The dumps will be pushed to HAL layer which will
  15472. * write into files
  15473. */
  15474. #if !defined(DHD_DUMP_FILE_WRITE_FROM_KERNEL)
  15475. return;
  15476. #endif /* !DHD_DUMP_FILE_WRITE_FROM_KERNEL */
  15477. /*
  15478. * dhd_mem_dump -> dhd_sssr_dump -> dhd_write_sssr_dump
  15479. * Without workqueue -
  15480. * DUMP_TYPE_DONGLE_INIT_FAILURE/DUMP_TYPE_DUE_TO_BT/DUMP_TYPE_SMMU_FAULT
  15481. * : These are called in own handler, not in the interrupt context
  15482. * With workqueue - all other DUMP_TYPEs : dhd_mem_dump is called in workqueue
  15483. * Thus, it doesn't neeed to dump SSSR in workqueue
  15484. */
  15485. DHD_ERROR(("%s: writing sssr dump to file... \n", __FUNCTION__));
  15486. dhd_sssr_dump_to_file(dhdp->info);
  15487. }
  15488. #endif /* DHD_SSSR_DUMP */
  15489. #ifdef DHD_LOG_DUMP
  15490. static void
  15491. dhd_log_dump(void *handle, void *event_info, u8 event)
  15492. {
  15493. dhd_info_t *dhd = handle;
  15494. log_dump_type_t *type = (log_dump_type_t *)event_info;
  15495. if (!dhd || !type) {
  15496. DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__));
  15497. return;
  15498. }
  15499. #ifdef WL_CFG80211
  15500. /* flush the fw side logs */
  15501. wl_flush_fw_log_buffer(dhd_linux_get_primary_netdev(&dhd->pub),
  15502. FW_LOGSET_MASK_ALL);
  15503. #endif // endif
  15504. /* there are currently 3 possible contexts from which
  15505. * log dump can be scheduled -
  15506. * 1.TRAP 2.supplicant DEBUG_DUMP pvt driver command
  15507. * 3.HEALTH CHECK event
  15508. * The concise debug info buffer is a shared resource
  15509. * and in case a trap is one of the contexts then both the
  15510. * scheduled work queues need to run because trap data is
  15511. * essential for debugging. Hence a mutex lock is acquired
  15512. * before calling do_dhd_log_dump().
  15513. */
  15514. DHD_ERROR(("%s: calling log dump.. \n", __FUNCTION__));
  15515. dhd_os_logdump_lock(&dhd->pub);
  15516. DHD_OS_WAKE_LOCK(&dhd->pub);
  15517. if (do_dhd_log_dump(&dhd->pub, type) != BCME_OK) {
  15518. DHD_ERROR(("%s: writing debug dump to the file failed\n", __FUNCTION__));
  15519. }
  15520. DHD_OS_WAKE_UNLOCK(&dhd->pub);
  15521. dhd_os_logdump_unlock(&dhd->pub);
  15522. }
  15523. void dhd_schedule_log_dump(dhd_pub_t *dhdp, void *type)
  15524. {
  15525. DHD_ERROR(("%s: scheduling log dump.. \n", __FUNCTION__));
  15526. dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq,
  15527. type, DHD_WQ_WORK_DHD_LOG_DUMP,
  15528. dhd_log_dump, DHD_WQ_WORK_PRIORITY_HIGH);
  15529. }
  15530. static void
  15531. dhd_print_buf_addr(dhd_pub_t *dhdp, char *name, void *buf, unsigned int size)
  15532. {
  15533. if ((dhdp->memdump_enabled == DUMP_MEMONLY) ||
  15534. (dhdp->memdump_enabled == DUMP_MEMFILE_BUGON) ||
  15535. (dhdp->memdump_type == DUMP_TYPE_SMMU_FAULT)) {
  15536. #if defined(CONFIG_ARM64)
  15537. DHD_ERROR(("-------- %s: buf(va)=%llx, buf(pa)=%llx, bufsize=%d\n",
  15538. name, (uint64)buf, (uint64)__virt_to_phys((ulong)buf), size));
  15539. #elif defined(__ARM_ARCH_7A__)
  15540. DHD_ERROR(("-------- %s: buf(va)=%x, buf(pa)=%x, bufsize=%d\n",
  15541. name, (uint32)buf, (uint32)__virt_to_phys((ulong)buf), size));
  15542. #endif /* __ARM_ARCH_7A__ */
  15543. }
  15544. }
  15545. static void
  15546. dhd_log_dump_buf_addr(dhd_pub_t *dhdp, log_dump_type_t *type)
  15547. {
  15548. int i;
  15549. unsigned long wr_size = 0;
  15550. struct dhd_log_dump_buf *dld_buf = &g_dld_buf[0];
  15551. size_t log_size = 0;
  15552. char buf_name[DHD_PRINT_BUF_NAME_LEN];
  15553. dhd_dbg_ring_t *ring = NULL;
  15554. BCM_REFERENCE(ring);
  15555. for (i = 0; i < DLD_BUFFER_NUM; i++) {
  15556. dld_buf = &g_dld_buf[i];
  15557. log_size = (unsigned long)dld_buf->max -
  15558. (unsigned long)dld_buf->buffer;
  15559. if (dld_buf->wraparound) {
  15560. wr_size = log_size;
  15561. } else {
  15562. wr_size = (unsigned long)dld_buf->present -
  15563. (unsigned long)dld_buf->front;
  15564. }
  15565. scnprintf(buf_name, sizeof(buf_name), "dlb_buf[%d]", i);
  15566. dhd_print_buf_addr(dhdp, buf_name, dld_buf, dld_buf_size[i]);
  15567. scnprintf(buf_name, sizeof(buf_name), "dlb_buf[%d] buffer", i);
  15568. dhd_print_buf_addr(dhdp, buf_name, dld_buf->buffer, wr_size);
  15569. scnprintf(buf_name, sizeof(buf_name), "dlb_buf[%d] present", i);
  15570. dhd_print_buf_addr(dhdp, buf_name, dld_buf->present, wr_size);
  15571. scnprintf(buf_name, sizeof(buf_name), "dlb_buf[%d] front", i);
  15572. dhd_print_buf_addr(dhdp, buf_name, dld_buf->front, wr_size);
  15573. }
  15574. #ifdef EWP_ECNTRS_LOGGING
  15575. /* periodic flushing of ecounters is NOT supported */
  15576. if (*type == DLD_BUF_TYPE_ALL &&
  15577. logdump_ecntr_enable &&
  15578. dhdp->ecntr_dbg_ring) {
  15579. ring = (dhd_dbg_ring_t *)dhdp->ecntr_dbg_ring;
  15580. dhd_print_buf_addr(dhdp, "ecntr_dbg_ring", ring, LOG_DUMP_ECNTRS_MAX_BUFSIZE);
  15581. dhd_print_buf_addr(dhdp, "ecntr_dbg_ring ring_buf", ring->ring_buf,
  15582. LOG_DUMP_ECNTRS_MAX_BUFSIZE);
  15583. }
  15584. #endif /* EWP_ECNTRS_LOGGING */
  15585. #ifdef DHD_STATUS_LOGGING
  15586. if (dhdp->statlog) {
  15587. dhd_print_buf_addr(dhdp, "statlog_logbuf", dhd_statlog_get_logbuf(dhdp),
  15588. dhd_statlog_get_logbuf_len(dhdp));
  15589. }
  15590. #endif /* DHD_STATUS_LOGGING */
  15591. #ifdef EWP_RTT_LOGGING
  15592. /* periodic flushing of ecounters is NOT supported */
  15593. if (*type == DLD_BUF_TYPE_ALL &&
  15594. logdump_rtt_enable &&
  15595. dhdp->rtt_dbg_ring) {
  15596. ring = (dhd_dbg_ring_t *)dhdp->rtt_dbg_ring;
  15597. dhd_print_buf_addr(dhdp, "rtt_dbg_ring", ring, LOG_DUMP_RTT_MAX_BUFSIZE);
  15598. dhd_print_buf_addr(dhdp, "rtt_dbg_ring ring_buf", ring->ring_buf,
  15599. LOG_DUMP_RTT_MAX_BUFSIZE);
  15600. }
  15601. #endif /* EWP_RTT_LOGGING */
  15602. #ifdef BCMPCIE
  15603. if (dhdp->dongle_trap_occured && dhdp->extended_trap_data) {
  15604. dhd_print_buf_addr(dhdp, "extended_trap_data", dhdp->extended_trap_data,
  15605. BCMPCIE_EXT_TRAP_DATA_MAXLEN);
  15606. }
  15607. #endif /* BCMPCIE */
  15608. #if defined(DHD_FW_COREDUMP) && defined(DNGL_EVENT_SUPPORT)
  15609. /* if health check event was received */
  15610. if (dhdp->memdump_type == DUMP_TYPE_DONGLE_HOST_EVENT) {
  15611. dhd_print_buf_addr(dhdp, "health_chk_event_data", dhdp->health_chk_event_data,
  15612. HEALTH_CHK_BUF_SIZE);
  15613. }
  15614. #endif /* DHD_FW_COREDUMP && DNGL_EVENT_SUPPORT */
  15615. /* append the concise debug information */
  15616. if (dhdp->concise_dbg_buf) {
  15617. dhd_print_buf_addr(dhdp, "concise_dbg_buf", dhdp->concise_dbg_buf,
  15618. CONCISE_DUMP_BUFLEN);
  15619. }
  15620. }
  15621. #ifdef CUSTOMER_HW4_DEBUG
  15622. static void
  15623. dhd_log_dump_print_to_kmsg(char *bufptr, unsigned long len)
  15624. {
  15625. char tmp_buf[DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE + 1];
  15626. char *end = NULL;
  15627. unsigned long plen = 0;
  15628. if (!bufptr || !len)
  15629. return;
  15630. memset(tmp_buf, 0, DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE);
  15631. end = bufptr + len;
  15632. while (bufptr < end) {
  15633. if ((bufptr + DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE) < end) {
  15634. memcpy(tmp_buf, bufptr, DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE);
  15635. tmp_buf[DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE] = '\0';
  15636. printf("%s", tmp_buf);
  15637. bufptr += DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE;
  15638. } else {
  15639. plen = (unsigned long)end - (unsigned long)bufptr;
  15640. memcpy(tmp_buf, bufptr, plen);
  15641. tmp_buf[plen] = '\0';
  15642. printf("%s", tmp_buf);
  15643. bufptr += plen;
  15644. }
  15645. }
  15646. }
  15647. static void
  15648. dhd_log_dump_print_tail(dhd_pub_t *dhdp,
  15649. struct dhd_log_dump_buf *dld_buf,
  15650. uint tail_len)
  15651. {
  15652. char *flush_ptr1 = NULL, *flush_ptr2 = NULL;
  15653. unsigned long len_flush1 = 0, len_flush2 = 0;
  15654. unsigned long flags = 0;
  15655. /* need to hold the lock before accessing 'present' and 'remain' ptrs */
  15656. spin_lock_irqsave(&dld_buf->lock, flags);
  15657. flush_ptr1 = dld_buf->present - tail_len;
  15658. if (flush_ptr1 >= dld_buf->front) {
  15659. /* tail content is within the buffer */
  15660. flush_ptr2 = NULL;
  15661. len_flush1 = tail_len;
  15662. } else if (dld_buf->wraparound) {
  15663. /* tail content spans the buffer length i.e, wrap around */
  15664. flush_ptr1 = dld_buf->front;
  15665. len_flush1 = (unsigned long)dld_buf->present - (unsigned long)flush_ptr1;
  15666. len_flush2 = (unsigned long)tail_len - len_flush1;
  15667. flush_ptr2 = (char *)((unsigned long)dld_buf->max -
  15668. (unsigned long)len_flush2);
  15669. } else {
  15670. /* amt of logs in buffer is less than tail size */
  15671. flush_ptr1 = dld_buf->front;
  15672. flush_ptr2 = NULL;
  15673. len_flush1 = (unsigned long)dld_buf->present - (unsigned long)dld_buf->front;
  15674. }
  15675. spin_unlock_irqrestore(&dld_buf->lock, flags);
  15676. printf("\n================= LOG_DUMP tail =================\n");
  15677. if (flush_ptr2) {
  15678. dhd_log_dump_print_to_kmsg(flush_ptr2, len_flush2);
  15679. }
  15680. dhd_log_dump_print_to_kmsg(flush_ptr1, len_flush1);
  15681. printf("\n===================================================\n");
  15682. }
  15683. #endif /* CUSTOMER_HW4_DEBUG */
  15684. #ifdef DHD_SSSR_DUMP
  15685. int
  15686. dhdpcie_sssr_dump_get_before_after_len(dhd_pub_t *dhd, uint32 *arr_len)
  15687. {
  15688. int i = 0;
  15689. DHD_ERROR(("%s\n", __FUNCTION__));
  15690. /* core 0 */
  15691. i = 0;
  15692. if (dhd->sssr_d11_before[i] && dhd->sssr_d11_outofreset[i] &&
  15693. (dhd->sssr_dump_mode == SSSR_DUMP_MODE_SSSR)) {
  15694. arr_len[SSSR_C0_D11_BEFORE] = (dhd->sssr_reg_info.mac_regs[i].sr_size);
  15695. DHD_ERROR(("%s: arr_len[SSSR_C0_D11_BEFORE] : %d\n", __FUNCTION__,
  15696. arr_len[SSSR_C0_D11_BEFORE]));
  15697. #ifdef DHD_LOG_DUMP
  15698. dhd_print_buf_addr(dhd, "SSSR_C0_D11_BEFORE",
  15699. dhd->sssr_d11_before[i], arr_len[SSSR_C0_D11_BEFORE]);
  15700. #endif /* DHD_LOG_DUMP */
  15701. }
  15702. if (dhd->sssr_d11_after[i] && dhd->sssr_d11_outofreset[i]) {
  15703. arr_len[SSSR_C0_D11_AFTER] = (dhd->sssr_reg_info.mac_regs[i].sr_size);
  15704. DHD_ERROR(("%s: arr_len[SSSR_C0_D11_AFTER] : %d\n", __FUNCTION__,
  15705. arr_len[SSSR_C0_D11_AFTER]));
  15706. #ifdef DHD_LOG_DUMP
  15707. dhd_print_buf_addr(dhd, "SSSR_C0_D11_AFTER",
  15708. dhd->sssr_d11_after[i], arr_len[SSSR_C0_D11_AFTER]);
  15709. #endif /* DHD_LOG_DUMP */
  15710. }
  15711. /* core 1 */
  15712. i = 1;
  15713. if (dhd->sssr_d11_before[i] && dhd->sssr_d11_outofreset[i] &&
  15714. (dhd->sssr_dump_mode == SSSR_DUMP_MODE_SSSR)) {
  15715. arr_len[SSSR_C1_D11_BEFORE] = (dhd->sssr_reg_info.mac_regs[i].sr_size);
  15716. DHD_ERROR(("%s: arr_len[SSSR_C1_D11_BEFORE] : %d\n", __FUNCTION__,
  15717. arr_len[SSSR_C1_D11_BEFORE]));
  15718. #ifdef DHD_LOG_DUMP
  15719. dhd_print_buf_addr(dhd, "SSSR_C1_D11_BEFORE",
  15720. dhd->sssr_d11_before[i], arr_len[SSSR_C1_D11_BEFORE]);
  15721. #endif /* DHD_LOG_DUMP */
  15722. }
  15723. if (dhd->sssr_d11_after[i] && dhd->sssr_d11_outofreset[i]) {
  15724. arr_len[SSSR_C1_D11_AFTER] = (dhd->sssr_reg_info.mac_regs[i].sr_size);
  15725. DHD_ERROR(("%s: arr_len[SSSR_C1_D11_AFTER] : %d\n", __FUNCTION__,
  15726. arr_len[SSSR_C1_D11_AFTER]));
  15727. #ifdef DHD_LOG_DUMP
  15728. dhd_print_buf_addr(dhd, "SSSR_C1_D11_AFTER",
  15729. dhd->sssr_d11_after[i], arr_len[SSSR_C1_D11_AFTER]);
  15730. #endif /* DHD_LOG_DUMP */
  15731. }
  15732. if (dhd->sssr_reg_info.vasip_regs.vasip_sr_size) {
  15733. arr_len[SSSR_DIG_BEFORE] = (dhd->sssr_reg_info.vasip_regs.vasip_sr_size);
  15734. arr_len[SSSR_DIG_AFTER] = (dhd->sssr_reg_info.vasip_regs.vasip_sr_size);
  15735. DHD_ERROR(("%s: arr_len[SSSR_DIG_BEFORE] : %d\n", __FUNCTION__,
  15736. arr_len[SSSR_DIG_BEFORE]));
  15737. DHD_ERROR(("%s: arr_len[SSSR_DIG_AFTER] : %d\n", __FUNCTION__,
  15738. arr_len[SSSR_DIG_AFTER]));
  15739. #ifdef DHD_LOG_DUMP
  15740. if (dhd->sssr_dig_buf_before) {
  15741. dhd_print_buf_addr(dhd, "SSSR_DIG_BEFORE",
  15742. dhd->sssr_dig_buf_before, arr_len[SSSR_DIG_BEFORE]);
  15743. }
  15744. if (dhd->sssr_dig_buf_after) {
  15745. dhd_print_buf_addr(dhd, "SSSR_DIG_AFTER",
  15746. dhd->sssr_dig_buf_after, arr_len[SSSR_DIG_AFTER]);
  15747. }
  15748. #endif /* DHD_LOG_DUMP */
  15749. } else if ((dhd->sssr_reg_info.length > OFFSETOF(sssr_reg_info_v1_t, dig_mem_info)) &&
  15750. dhd->sssr_reg_info.dig_mem_info.dig_sr_addr) {
  15751. arr_len[SSSR_DIG_BEFORE] = (dhd->sssr_reg_info.dig_mem_info.dig_sr_size);
  15752. arr_len[SSSR_DIG_AFTER] = (dhd->sssr_reg_info.dig_mem_info.dig_sr_size);
  15753. DHD_ERROR(("%s: arr_len[SSSR_DIG_BEFORE] : %d\n", __FUNCTION__,
  15754. arr_len[SSSR_DIG_BEFORE]));
  15755. DHD_ERROR(("%s: arr_len[SSSR_DIG_AFTER] : %d\n", __FUNCTION__,
  15756. arr_len[SSSR_DIG_AFTER]));
  15757. #ifdef DHD_LOG_DUMP
  15758. if (dhd->sssr_dig_buf_before) {
  15759. dhd_print_buf_addr(dhd, "SSSR_DIG_BEFORE",
  15760. dhd->sssr_dig_buf_before, arr_len[SSSR_DIG_BEFORE]);
  15761. }
  15762. if (dhd->sssr_dig_buf_after) {
  15763. dhd_print_buf_addr(dhd, "SSSR_DIG_AFTER",
  15764. dhd->sssr_dig_buf_after, arr_len[SSSR_DIG_AFTER]);
  15765. }
  15766. #endif /* DHD_LOG_DUMP */
  15767. }
  15768. return BCME_OK;
  15769. }
  15770. void
  15771. dhd_nla_put_sssr_dump_len(void *ndev, uint32 *arr_len)
  15772. {
  15773. dhd_info_t *dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)ndev);
  15774. dhd_pub_t *dhdp = &dhd_info->pub;
  15775. if (dhdp->sssr_dump_collected) {
  15776. dhdpcie_sssr_dump_get_before_after_len(dhdp, arr_len);
  15777. }
  15778. }
  15779. #endif /* DHD_SSSR_DUMP */
  15780. uint32
  15781. dhd_get_time_str_len()
  15782. {
  15783. char *ts = NULL, time_str[128];
  15784. ts = dhd_log_dump_get_timestamp();
  15785. snprintf(time_str, sizeof(time_str),
  15786. "\n\n ========== LOG DUMP TAKEN AT : %s =========\n", ts);
  15787. return strlen(time_str);
  15788. }
  15789. #ifdef BCMPCIE
  15790. uint32
  15791. dhd_get_ext_trap_len(void *ndev, dhd_pub_t *dhdp)
  15792. {
  15793. int length = 0;
  15794. log_dump_section_hdr_t sec_hdr;
  15795. dhd_info_t *dhd_info;
  15796. if (ndev) {
  15797. dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)ndev);
  15798. dhdp = &dhd_info->pub;
  15799. }
  15800. if (!dhdp)
  15801. return length;
  15802. if (dhdp->extended_trap_data) {
  15803. length = (strlen(EXT_TRAP_LOG_HDR)
  15804. + sizeof(sec_hdr) + BCMPCIE_EXT_TRAP_DATA_MAXLEN);
  15805. }
  15806. return length;
  15807. }
  15808. #endif /* BCMPCIE */
  15809. #if defined(DHD_FW_COREDUMP) && defined(DNGL_EVENT_SUPPORT)
  15810. uint32
  15811. dhd_get_health_chk_len(void *ndev, dhd_pub_t *dhdp)
  15812. {
  15813. int length = 0;
  15814. log_dump_section_hdr_t sec_hdr;
  15815. dhd_info_t *dhd_info;
  15816. if (ndev) {
  15817. dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)ndev);
  15818. dhdp = &dhd_info->pub;
  15819. }
  15820. if (!dhdp)
  15821. return length;
  15822. if (dhdp->memdump_type == DUMP_TYPE_DONGLE_HOST_EVENT) {
  15823. length = (strlen(HEALTH_CHK_LOG_HDR)
  15824. + sizeof(sec_hdr) + HEALTH_CHK_BUF_SIZE);
  15825. }
  15826. return length;
  15827. }
  15828. #endif /* DHD_FW_COREDUMP && DNGL_EVENT_SUPPORT */
  15829. uint32
  15830. dhd_get_dhd_dump_len(void *ndev, dhd_pub_t *dhdp)
  15831. {
  15832. int length = 0;
  15833. log_dump_section_hdr_t sec_hdr;
  15834. dhd_info_t *dhd_info;
  15835. uint32 remain_len = 0;
  15836. if (ndev) {
  15837. dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)ndev);
  15838. dhdp = &dhd_info->pub;
  15839. }
  15840. if (!dhdp)
  15841. return length;
  15842. if (dhdp->concise_dbg_buf) {
  15843. remain_len = dhd_dump(dhdp, (char *)dhdp->concise_dbg_buf, CONCISE_DUMP_BUFLEN);
  15844. if (remain_len <= 0) {
  15845. DHD_ERROR(("%s: error getting concise debug info !\n",
  15846. __FUNCTION__));
  15847. return length;
  15848. }
  15849. length = (strlen(DHD_DUMP_LOG_HDR) + sizeof(sec_hdr) +
  15850. (CONCISE_DUMP_BUFLEN - remain_len));
  15851. }
  15852. return length;
  15853. }
  15854. uint32
  15855. dhd_get_cookie_log_len(void *ndev, dhd_pub_t *dhdp)
  15856. {
  15857. int length = 0;
  15858. dhd_info_t *dhd_info;
  15859. if (ndev) {
  15860. dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)ndev);
  15861. dhdp = &dhd_info->pub;
  15862. }
  15863. if (!dhdp)
  15864. return length;
  15865. if (dhdp->logdump_cookie && dhd_logdump_cookie_count(dhdp) > 0) {
  15866. length = dhd_log_dump_cookie_len(dhdp);
  15867. }
  15868. return length;
  15869. }
  15870. #ifdef DHD_DUMP_PCIE_RINGS
  15871. uint32
  15872. dhd_get_flowring_len(void *ndev, dhd_pub_t *dhdp)
  15873. {
  15874. int length = 0;
  15875. log_dump_section_hdr_t sec_hdr;
  15876. dhd_info_t *dhd_info;
  15877. uint16 h2d_flowrings_total;
  15878. uint32 remain_len = 0;
  15879. if (ndev) {
  15880. dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)ndev);
  15881. dhdp = &dhd_info->pub;
  15882. }
  15883. if (!dhdp)
  15884. return length;
  15885. if (dhdp->concise_dbg_buf) {
  15886. remain_len = dhd_dump(dhdp, (char *)dhdp->concise_dbg_buf, CONCISE_DUMP_BUFLEN);
  15887. if (remain_len <= 0) {
  15888. DHD_ERROR(("%s: error getting concise debug info !\n",
  15889. __FUNCTION__));
  15890. return length;
  15891. }
  15892. }
  15893. length += strlen(FLOWRING_DUMP_HDR);
  15894. length += CONCISE_DUMP_BUFLEN - remain_len;
  15895. length += sizeof(sec_hdr);
  15896. h2d_flowrings_total = dhd_get_max_flow_rings(dhdp);
  15897. length += ((H2DRING_TXPOST_ITEMSIZE
  15898. * H2DRING_TXPOST_MAX_ITEM * h2d_flowrings_total)
  15899. + (D2HRING_TXCMPLT_ITEMSIZE * D2HRING_TXCMPLT_MAX_ITEM)
  15900. + (H2DRING_RXPOST_ITEMSIZE * H2DRING_RXPOST_MAX_ITEM)
  15901. + (D2HRING_RXCMPLT_ITEMSIZE * D2HRING_RXCMPLT_MAX_ITEM)
  15902. + (H2DRING_CTRL_SUB_ITEMSIZE * H2DRING_CTRL_SUB_MAX_ITEM)
  15903. + (D2HRING_CTRL_CMPLT_ITEMSIZE * D2HRING_CTRL_CMPLT_MAX_ITEM)
  15904. #ifdef EWP_EDL
  15905. + (D2HRING_EDL_HDR_SIZE * D2HRING_EDL_MAX_ITEM));
  15906. #else
  15907. + (H2DRING_INFO_BUFPOST_ITEMSIZE * H2DRING_DYNAMIC_INFO_MAX_ITEM)
  15908. + (D2HRING_INFO_BUFCMPLT_ITEMSIZE * D2HRING_DYNAMIC_INFO_MAX_ITEM));
  15909. #endif /* EWP_EDL */
  15910. return length;
  15911. }
  15912. #endif /* DHD_DUMP_PCIE_RINGS */
  15913. #ifdef EWP_ECNTRS_LOGGING
  15914. uint32
  15915. dhd_get_ecntrs_len(void *ndev, dhd_pub_t *dhdp)
  15916. {
  15917. dhd_info_t *dhd_info;
  15918. log_dump_section_hdr_t sec_hdr;
  15919. int length = 0;
  15920. dhd_dbg_ring_t *ring;
  15921. if (ndev) {
  15922. dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)ndev);
  15923. dhdp = &dhd_info->pub;
  15924. }
  15925. if (!dhdp)
  15926. return length;
  15927. if (logdump_ecntr_enable && dhdp->ecntr_dbg_ring) {
  15928. ring = (dhd_dbg_ring_t *)dhdp->ecntr_dbg_ring;
  15929. length = ring->ring_size + strlen(ECNTRS_LOG_HDR) + sizeof(sec_hdr);
  15930. }
  15931. return length;
  15932. }
  15933. #endif /* EWP_ECNTRS_LOGGING */
  15934. #ifdef EWP_RTT_LOGGING
  15935. uint32
  15936. dhd_get_rtt_len(void *ndev, dhd_pub_t *dhdp)
  15937. {
  15938. dhd_info_t *dhd_info;
  15939. log_dump_section_hdr_t sec_hdr;
  15940. int length = 0;
  15941. dhd_dbg_ring_t *ring;
  15942. if (ndev) {
  15943. dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)ndev);
  15944. dhdp = &dhd_info->pub;
  15945. }
  15946. if (!dhdp)
  15947. return length;
  15948. if (logdump_rtt_enable && dhdp->rtt_dbg_ring) {
  15949. ring = (dhd_dbg_ring_t *)dhdp->rtt_dbg_ring;
  15950. length = ring->ring_size + strlen(RTT_LOG_HDR) + sizeof(sec_hdr);
  15951. }
  15952. return length;
  15953. }
  15954. #endif /* EWP_RTT_LOGGING */
  15955. int
  15956. dhd_get_dld_log_dump(void *dev, dhd_pub_t *dhdp, const void *user_buf,
  15957. void *fp, uint32 len, int type, void *pos)
  15958. {
  15959. int ret = BCME_OK;
  15960. struct dhd_log_dump_buf *dld_buf;
  15961. log_dump_section_hdr_t sec_hdr;
  15962. dhd_info_t *dhd_info;
  15963. dld_buf = &g_dld_buf[type];
  15964. if (dev) {
  15965. dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)dev);
  15966. dhdp = &dhd_info->pub;
  15967. } else if (!dhdp) {
  15968. return BCME_ERROR;
  15969. }
  15970. DHD_ERROR(("%s: ENTER \n", __FUNCTION__));
  15971. dhd_init_sec_hdr(&sec_hdr);
  15972. /* write the section header first */
  15973. ret = dhd_export_debug_data(dld_hdrs[type].hdr_str, fp, user_buf,
  15974. strlen(dld_hdrs[type].hdr_str), pos);
  15975. if (ret < 0)
  15976. goto exit;
  15977. len -= (uint32)strlen(dld_hdrs[type].hdr_str);
  15978. len -= (uint32)sizeof(sec_hdr);
  15979. sec_hdr.type = dld_hdrs[type].sec_type;
  15980. sec_hdr.length = len;
  15981. ret = dhd_export_debug_data((char *)&sec_hdr, fp, user_buf, sizeof(sec_hdr), pos);
  15982. if (ret < 0)
  15983. goto exit;
  15984. ret = dhd_export_debug_data(dld_buf->buffer, fp, user_buf, len, pos);
  15985. if (ret < 0)
  15986. goto exit;
  15987. exit:
  15988. return ret;
  15989. }
  15990. static int
  15991. dhd_log_flush(dhd_pub_t *dhdp, log_dump_type_t *type)
  15992. {
  15993. unsigned long flags = 0;
  15994. #ifdef EWP_EDL
  15995. int i = 0;
  15996. #endif /* EWP_EDL */
  15997. dhd_info_t *dhd_info = NULL;
  15998. /* if dhdp is null, its extremely unlikely that log dump will be scheduled
  15999. * so not freeing 'type' here is ok, even if we want to free 'type'
  16000. * we cannot do so, since 'dhdp->osh' is unavailable
  16001. * as dhdp is null
  16002. */
  16003. if (!dhdp || !type) {
  16004. if (dhdp) {
  16005. DHD_GENERAL_LOCK(dhdp, flags);
  16006. DHD_BUS_BUSY_CLEAR_IN_LOGDUMP(dhdp);
  16007. dhd_os_busbusy_wake(dhdp);
  16008. DHD_GENERAL_UNLOCK(dhdp, flags);
  16009. }
  16010. return BCME_ERROR;
  16011. }
  16012. dhd_info = (dhd_info_t *)dhdp->info;
  16013. /* in case of trap get preserve logs from ETD */
  16014. #if defined(BCMPCIE) && defined(EWP_ETD_PRSRV_LOGS)
  16015. if (dhdp->dongle_trap_occured &&
  16016. dhdp->extended_trap_data) {
  16017. dhdpcie_get_etd_preserve_logs(dhdp, (uint8 *)dhdp->extended_trap_data,
  16018. &dhd_info->event_data);
  16019. }
  16020. #endif /* BCMPCIE */
  16021. /* flush the event work items to get any fw events/logs
  16022. * flush_work is a blocking call
  16023. */
  16024. #ifdef EWP_EDL
  16025. if (dhd_info->pub.dongle_edl_support) {
  16026. /* wait till existing edl items are processed */
  16027. dhd_flush_logtrace_process(dhd_info);
  16028. /* dhd_flush_logtrace_process will ensure the work items in the ring
  16029. * (EDL ring) from rd to wr are processed. But if wr had
  16030. * wrapped around, only the work items from rd to ring-end are processed.
  16031. * So to ensure that the work items at the
  16032. * beginning of ring are also processed in the wrap around case, call
  16033. * it twice
  16034. */
  16035. for (i = 0; i < 2; i++) {
  16036. /* blocks till the edl items are processed */
  16037. dhd_flush_logtrace_process(dhd_info);
  16038. }
  16039. } else {
  16040. dhd_flush_logtrace_process(dhd_info);
  16041. }
  16042. #else
  16043. dhd_flush_logtrace_process(dhd_info);
  16044. #endif /* EWP_EDL */
  16045. #ifdef CUSTOMER_HW4_DEBUG
  16046. /* print last 'x' KB of preserve buffer data to kmsg console
  16047. * this is to address cases where debug_dump is not
  16048. * available for debugging
  16049. */
  16050. dhd_log_dump_print_tail(dhdp,
  16051. &g_dld_buf[DLD_BUF_TYPE_PRESERVE], logdump_prsrv_tailsize);
  16052. #endif /* CUSTOMER_HW4_DEBUG */
  16053. return BCME_OK;
  16054. }
  16055. int
  16056. dhd_get_debug_dump_file_name(void *dev, dhd_pub_t *dhdp, char *dump_path, int size)
  16057. {
  16058. dhd_info_t *dhd_info;
  16059. if (dev) {
  16060. dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)dev);
  16061. dhdp = &dhd_info->pub;
  16062. }
  16063. if (!dhdp)
  16064. return BCME_ERROR;
  16065. memset(dump_path, 0, size);
  16066. switch (dhdp->debug_dump_subcmd) {
  16067. case CMD_UNWANTED:
  16068. snprintf(dump_path, size, "%s",
  16069. DHD_COMMON_DUMP_PATH DHD_DEBUG_DUMP_TYPE
  16070. DHD_DUMP_SUBSTR_UNWANTED);
  16071. break;
  16072. case CMD_DISCONNECTED:
  16073. snprintf(dump_path, size, "%s",
  16074. DHD_COMMON_DUMP_PATH DHD_DEBUG_DUMP_TYPE
  16075. DHD_DUMP_SUBSTR_DISCONNECTED);
  16076. break;
  16077. default:
  16078. snprintf(dump_path, size, "%s",
  16079. DHD_COMMON_DUMP_PATH DHD_DEBUG_DUMP_TYPE);
  16080. }
  16081. if (!dhdp->logdump_periodic_flush) {
  16082. get_debug_dump_time(dhdp->debug_dump_time_str);
  16083. snprintf(dump_path + strlen(dump_path),
  16084. size - strlen(dump_path),
  16085. "_%s", dhdp->debug_dump_time_str);
  16086. }
  16087. return BCME_OK;
  16088. }
  16089. uint32
  16090. dhd_get_dld_len(int log_type)
  16091. {
  16092. unsigned long wr_size = 0;
  16093. unsigned long buf_size = 0;
  16094. unsigned long flags = 0;
  16095. struct dhd_log_dump_buf *dld_buf;
  16096. log_dump_section_hdr_t sec_hdr;
  16097. /* calculate the length of the log */
  16098. dld_buf = &g_dld_buf[log_type];
  16099. buf_size = (unsigned long)dld_buf->max -
  16100. (unsigned long)dld_buf->buffer;
  16101. if (dld_buf->wraparound) {
  16102. wr_size = buf_size;
  16103. } else {
  16104. /* need to hold the lock before accessing 'present' and 'remain' ptrs */
  16105. spin_lock_irqsave(&dld_buf->lock, flags);
  16106. wr_size = (unsigned long)dld_buf->present -
  16107. (unsigned long)dld_buf->front;
  16108. spin_unlock_irqrestore(&dld_buf->lock, flags);
  16109. }
  16110. return (wr_size + sizeof(sec_hdr) + strlen(dld_hdrs[log_type].hdr_str));
  16111. }
  16112. static void
  16113. dhd_get_time_str(dhd_pub_t *dhdp, char *time_str, int size)
  16114. {
  16115. char *ts = NULL;
  16116. memset(time_str, 0, size);
  16117. ts = dhd_log_dump_get_timestamp();
  16118. snprintf(time_str, size,
  16119. "\n\n ========== LOG DUMP TAKEN AT : %s =========\n", ts);
  16120. }
  16121. int
  16122. dhd_print_time_str(const void *user_buf, void *fp, uint32 len, void *pos)
  16123. {
  16124. char *ts = NULL;
  16125. int ret = 0;
  16126. char time_str[128];
  16127. memset_s(time_str, sizeof(time_str), 0, sizeof(time_str));
  16128. ts = dhd_log_dump_get_timestamp();
  16129. snprintf(time_str, sizeof(time_str),
  16130. "\n\n ========== LOG DUMP TAKEN AT : %s =========\n", ts);
  16131. /* write the timestamp hdr to the file first */
  16132. ret = dhd_export_debug_data(time_str, fp, user_buf, strlen(time_str), pos);
  16133. if (ret < 0) {
  16134. DHD_ERROR(("write file error, err = %d\n", ret));
  16135. }
  16136. return ret;
  16137. }
  16138. #if defined(DHD_FW_COREDUMP) && defined(DNGL_EVENT_SUPPORT)
  16139. int
  16140. dhd_print_health_chk_data(void *dev, dhd_pub_t *dhdp, const void *user_buf,
  16141. void *fp, uint32 len, void *pos)
  16142. {
  16143. int ret = BCME_OK;
  16144. log_dump_section_hdr_t sec_hdr;
  16145. dhd_info_t *dhd_info;
  16146. if (dev) {
  16147. dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)dev);
  16148. dhdp = &dhd_info->pub;
  16149. }
  16150. if (!dhdp)
  16151. return BCME_ERROR;
  16152. dhd_init_sec_hdr(&sec_hdr);
  16153. if (dhdp->memdump_type == DUMP_TYPE_DONGLE_HOST_EVENT) {
  16154. /* write the section header first */
  16155. ret = dhd_export_debug_data(HEALTH_CHK_LOG_HDR, fp, user_buf,
  16156. strlen(HEALTH_CHK_LOG_HDR), pos);
  16157. if (ret < 0)
  16158. goto exit;
  16159. len -= (uint32)strlen(HEALTH_CHK_LOG_HDR);
  16160. sec_hdr.type = LOG_DUMP_SECTION_HEALTH_CHK;
  16161. sec_hdr.length = HEALTH_CHK_BUF_SIZE;
  16162. ret = dhd_export_debug_data((char *)&sec_hdr, fp, user_buf, sizeof(sec_hdr), pos);
  16163. if (ret < 0)
  16164. goto exit;
  16165. len -= (uint32)sizeof(sec_hdr);
  16166. /* write the log */
  16167. ret = dhd_export_debug_data((char *)dhdp->health_chk_event_data, fp,
  16168. user_buf, len, pos);
  16169. if (ret < 0)
  16170. goto exit;
  16171. }
  16172. exit:
  16173. return ret;
  16174. }
  16175. #endif /* DHD_FW_COREDUMP && DNGL_EVENT_SUPPORT */
  16176. #ifdef BCMPCIE
  16177. int
  16178. dhd_print_ext_trap_data(void *dev, dhd_pub_t *dhdp, const void *user_buf,
  16179. void *fp, uint32 len, void *pos)
  16180. {
  16181. int ret = BCME_OK;
  16182. log_dump_section_hdr_t sec_hdr;
  16183. dhd_info_t *dhd_info;
  16184. if (dev) {
  16185. dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)dev);
  16186. dhdp = &dhd_info->pub;
  16187. }
  16188. if (!dhdp)
  16189. return BCME_ERROR;
  16190. dhd_init_sec_hdr(&sec_hdr);
  16191. /* append extended trap data to the file in case of traps */
  16192. if (dhdp->dongle_trap_occured &&
  16193. dhdp->extended_trap_data) {
  16194. /* write the section header first */
  16195. ret = dhd_export_debug_data(EXT_TRAP_LOG_HDR, fp, user_buf,
  16196. strlen(EXT_TRAP_LOG_HDR), pos);
  16197. if (ret < 0)
  16198. goto exit;
  16199. len -= (uint32)strlen(EXT_TRAP_LOG_HDR);
  16200. sec_hdr.type = LOG_DUMP_SECTION_EXT_TRAP;
  16201. sec_hdr.length = BCMPCIE_EXT_TRAP_DATA_MAXLEN;
  16202. ret = dhd_export_debug_data((uint8 *)&sec_hdr, fp, user_buf, sizeof(sec_hdr), pos);
  16203. if (ret < 0)
  16204. goto exit;
  16205. len -= (uint32)sizeof(sec_hdr);
  16206. /* write the log */
  16207. ret = dhd_export_debug_data((uint8 *)dhdp->extended_trap_data, fp,
  16208. user_buf, len, pos);
  16209. if (ret < 0)
  16210. goto exit;
  16211. }
  16212. exit:
  16213. return ret;
  16214. }
  16215. #endif /* BCMPCIE */
  16216. int
  16217. dhd_print_dump_data(void *dev, dhd_pub_t *dhdp, const void *user_buf,
  16218. void *fp, uint32 len, void *pos)
  16219. {
  16220. int ret = BCME_OK;
  16221. log_dump_section_hdr_t sec_hdr;
  16222. dhd_info_t *dhd_info;
  16223. if (dev) {
  16224. dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)dev);
  16225. dhdp = &dhd_info->pub;
  16226. }
  16227. if (!dhdp)
  16228. return BCME_ERROR;
  16229. dhd_init_sec_hdr(&sec_hdr);
  16230. ret = dhd_export_debug_data(DHD_DUMP_LOG_HDR, fp, user_buf, strlen(DHD_DUMP_LOG_HDR), pos);
  16231. if (ret < 0)
  16232. goto exit;
  16233. len -= (uint32)strlen(DHD_DUMP_LOG_HDR);
  16234. sec_hdr.type = LOG_DUMP_SECTION_DHD_DUMP;
  16235. sec_hdr.length = len;
  16236. ret = dhd_export_debug_data((char *)&sec_hdr, fp, user_buf, sizeof(sec_hdr), pos);
  16237. if (ret < 0)
  16238. goto exit;
  16239. len -= (uint32)sizeof(sec_hdr);
  16240. if (dhdp->concise_dbg_buf) {
  16241. dhd_dump(dhdp, (char *)dhdp->concise_dbg_buf, CONCISE_DUMP_BUFLEN);
  16242. ret = dhd_export_debug_data(dhdp->concise_dbg_buf, fp, user_buf, len, pos);
  16243. if (ret < 0)
  16244. goto exit;
  16245. }
  16246. exit:
  16247. return ret;
  16248. }
  16249. int
  16250. dhd_print_cookie_data(void *dev, dhd_pub_t *dhdp, const void *user_buf,
  16251. void *fp, uint32 len, void *pos)
  16252. {
  16253. int ret = BCME_OK;
  16254. dhd_info_t *dhd_info;
  16255. if (dev) {
  16256. dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)dev);
  16257. dhdp = &dhd_info->pub;
  16258. }
  16259. if (!dhdp)
  16260. return BCME_ERROR;
  16261. if (dhdp->logdump_cookie && dhd_logdump_cookie_count(dhdp) > 0) {
  16262. ret = dhd_log_dump_cookie_to_file(dhdp, fp, user_buf, (unsigned long *)pos);
  16263. }
  16264. return ret;
  16265. }
  16266. #ifdef DHD_DUMP_PCIE_RINGS
  16267. int
  16268. dhd_print_flowring_data(void *dev, dhd_pub_t *dhdp, const void *user_buf,
  16269. void *fp, uint32 len, void *pos)
  16270. {
  16271. log_dump_section_hdr_t sec_hdr;
  16272. int ret = BCME_OK;
  16273. uint32 remain_len = 0;
  16274. dhd_info_t *dhd_info;
  16275. if (dev) {
  16276. dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)dev);
  16277. dhdp = &dhd_info->pub;
  16278. }
  16279. if (!dhdp)
  16280. return BCME_ERROR;
  16281. dhd_init_sec_hdr(&sec_hdr);
  16282. remain_len = dhd_dump(dhdp, (char *)dhdp->concise_dbg_buf, CONCISE_DUMP_BUFLEN);
  16283. memset(dhdp->concise_dbg_buf, 0, CONCISE_DUMP_BUFLEN);
  16284. /* write the section header first */
  16285. ret = dhd_export_debug_data(FLOWRING_DUMP_HDR, fp, user_buf,
  16286. strlen(FLOWRING_DUMP_HDR), pos);
  16287. if (ret < 0)
  16288. goto exit;
  16289. /* Write the ring summary */
  16290. ret = dhd_export_debug_data(dhdp->concise_dbg_buf, fp, user_buf,
  16291. (CONCISE_DUMP_BUFLEN - remain_len), pos);
  16292. if (ret < 0)
  16293. goto exit;
  16294. sec_hdr.type = LOG_DUMP_SECTION_FLOWRING;
  16295. sec_hdr.length = len;
  16296. ret = dhd_export_debug_data((char *)&sec_hdr, fp, user_buf, sizeof(sec_hdr), pos);
  16297. if (ret < 0)
  16298. goto exit;
  16299. /* write the log */
  16300. ret = dhd_d2h_h2d_ring_dump(dhdp, fp, user_buf, (unsigned long *)pos, TRUE);
  16301. if (ret < 0)
  16302. goto exit;
  16303. exit:
  16304. return ret;
  16305. }
  16306. #endif /* DHD_DUMP_PCIE_RINGS */
  16307. #ifdef EWP_ECNTRS_LOGGING
  16308. int
  16309. dhd_print_ecntrs_data(void *dev, dhd_pub_t *dhdp, const void *user_buf,
  16310. void *fp, uint32 len, void *pos)
  16311. {
  16312. log_dump_section_hdr_t sec_hdr;
  16313. int ret = BCME_OK;
  16314. dhd_info_t *dhd_info;
  16315. if (dev) {
  16316. dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)dev);
  16317. dhdp = &dhd_info->pub;
  16318. }
  16319. if (!dhdp)
  16320. return BCME_ERROR;
  16321. dhd_init_sec_hdr(&sec_hdr);
  16322. if (logdump_ecntr_enable &&
  16323. dhdp->ecntr_dbg_ring) {
  16324. sec_hdr.type = LOG_DUMP_SECTION_ECNTRS;
  16325. ret = dhd_dump_debug_ring(dhdp, dhdp->ecntr_dbg_ring,
  16326. user_buf, &sec_hdr, ECNTRS_LOG_HDR, len, LOG_DUMP_SECTION_ECNTRS);
  16327. }
  16328. return ret;
  16329. }
  16330. #endif /* EWP_ECNTRS_LOGGING */
  16331. #ifdef EWP_RTT_LOGGING
  16332. int
  16333. dhd_print_rtt_data(void *dev, dhd_pub_t *dhdp, const void *user_buf,
  16334. void *fp, uint32 len, void *pos)
  16335. {
  16336. log_dump_section_hdr_t sec_hdr;
  16337. int ret = BCME_OK;
  16338. dhd_info_t *dhd_info;
  16339. if (dev) {
  16340. dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)dev);
  16341. dhdp = &dhd_info->pub;
  16342. }
  16343. if (!dhdp)
  16344. return BCME_ERROR;
  16345. dhd_init_sec_hdr(&sec_hdr);
  16346. if (logdump_rtt_enable && dhdp->rtt_dbg_ring) {
  16347. ret = dhd_dump_debug_ring(dhdp, dhdp->rtt_dbg_ring,
  16348. user_buf, &sec_hdr, RTT_LOG_HDR, len, LOG_DUMP_SECTION_RTT);
  16349. }
  16350. return ret;
  16351. }
  16352. #endif /* EWP_RTT_LOGGING */
  16353. #ifdef DHD_STATUS_LOGGING
  16354. int
  16355. dhd_print_status_log_data(void *dev, dhd_pub_t *dhdp, const void *user_buf,
  16356. void *fp, uint32 len, void *pos)
  16357. {
  16358. dhd_info_t *dhd_info;
  16359. if (dev) {
  16360. dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)dev);
  16361. dhdp = &dhd_info->pub;
  16362. }
  16363. if (!dhdp) {
  16364. return BCME_ERROR;
  16365. }
  16366. return dhd_statlog_write_logdump(dhdp, user_buf, fp, len, pos);
  16367. }
  16368. uint32
  16369. dhd_get_status_log_len(void *ndev, dhd_pub_t *dhdp)
  16370. {
  16371. dhd_info_t *dhd_info;
  16372. uint32 length = 0;
  16373. if (ndev) {
  16374. dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)ndev);
  16375. dhdp = &dhd_info->pub;
  16376. }
  16377. if (dhdp) {
  16378. length = dhd_statlog_get_logbuf_len(dhdp);
  16379. }
  16380. return length;
  16381. }
  16382. #endif /* DHD_STATUS_LOGGING */
  16383. void
  16384. dhd_init_sec_hdr(log_dump_section_hdr_t *sec_hdr)
  16385. {
  16386. /* prep the section header */
  16387. memset(sec_hdr, 0, sizeof(*sec_hdr));
  16388. sec_hdr->magic = LOG_DUMP_MAGIC;
  16389. sec_hdr->timestamp = local_clock();
  16390. }
  16391. /* Must hold 'dhd_os_logdump_lock' before calling this function ! */
  16392. static int
  16393. do_dhd_log_dump(dhd_pub_t *dhdp, log_dump_type_t *type)
  16394. {
  16395. int ret = 0, i = 0;
  16396. struct file *fp = NULL;
  16397. mm_segment_t old_fs;
  16398. loff_t pos = 0;
  16399. char dump_path[128];
  16400. uint32 file_mode;
  16401. unsigned long flags = 0;
  16402. size_t log_size = 0;
  16403. size_t fspace_remain = 0;
  16404. struct kstat stat;
  16405. char time_str[128];
  16406. unsigned int len = 0;
  16407. log_dump_section_hdr_t sec_hdr;
  16408. DHD_ERROR(("%s: ENTER \n", __FUNCTION__));
  16409. DHD_GENERAL_LOCK(dhdp, flags);
  16410. if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(dhdp)) {
  16411. DHD_GENERAL_UNLOCK(dhdp, flags);
  16412. DHD_ERROR(("%s: bus is down! can't collect log dump. \n", __FUNCTION__));
  16413. goto exit1;
  16414. }
  16415. DHD_BUS_BUSY_SET_IN_LOGDUMP(dhdp);
  16416. DHD_GENERAL_UNLOCK(dhdp, flags);
  16417. if ((ret = dhd_log_flush(dhdp, type)) < 0) {
  16418. goto exit1;
  16419. }
  16420. /* change to KERNEL_DS address limit */
  16421. old_fs = get_fs();
  16422. set_fs(KERNEL_DS);
  16423. dhd_get_debug_dump_file_name(NULL, dhdp, dump_path, sizeof(dump_path));
  16424. DHD_ERROR(("debug_dump_path = %s\n", dump_path));
  16425. DHD_ERROR(("DHD version: %s\n", dhd_version));
  16426. DHD_ERROR(("F/W version: %s\n", fw_version));
  16427. dhd_log_dump_buf_addr(dhdp, type);
  16428. dhd_get_time_str(dhdp, time_str, 128);
  16429. /* if this is the first time after dhd is loaded,
  16430. * or, if periodic flush is disabled, clear the log file
  16431. */
  16432. if (!dhdp->logdump_periodic_flush || dhdp->last_file_posn == 0)
  16433. file_mode = O_CREAT | O_WRONLY | O_SYNC | O_TRUNC;
  16434. else
  16435. file_mode = O_CREAT | O_RDWR | O_SYNC;
  16436. fp = filp_open(dump_path, file_mode, 0664);
  16437. if (IS_ERR(fp)) {
  16438. /* If android installed image, try '/data' directory */
  16439. #if defined(CONFIG_X86) && defined(OEM_ANDROID)
  16440. DHD_ERROR(("%s: File open error on Installed android image, trying /data...\n",
  16441. __FUNCTION__));
  16442. snprintf(dump_path, sizeof(dump_path), "/data/" DHD_DEBUG_DUMP_TYPE);
  16443. if (!dhdp->logdump_periodic_flush) {
  16444. snprintf(dump_path + strlen(dump_path),
  16445. sizeof(dump_path) - strlen(dump_path),
  16446. "_%s", dhdp->debug_dump_time_str);
  16447. }
  16448. fp = filp_open(dump_path, file_mode, 0664);
  16449. if (IS_ERR(fp)) {
  16450. ret = PTR_ERR(fp);
  16451. DHD_ERROR(("open file error, err = %d\n", ret));
  16452. goto exit2;
  16453. }
  16454. DHD_ERROR(("debug_dump_path = %s\n", dump_path));
  16455. #else
  16456. ret = PTR_ERR(fp);
  16457. DHD_ERROR(("open file error, err = %d\n", ret));
  16458. goto exit2;
  16459. #endif /* CONFIG_X86 && OEM_ANDROID */
  16460. }
  16461. ret = vfs_stat(dump_path, &stat);
  16462. if (ret < 0) {
  16463. DHD_ERROR(("file stat error, err = %d\n", ret));
  16464. goto exit2;
  16465. }
  16466. /* if some one else has changed the file */
  16467. if (dhdp->last_file_posn != 0 &&
  16468. stat.size < dhdp->last_file_posn) {
  16469. dhdp->last_file_posn = 0;
  16470. }
  16471. if (dhdp->logdump_periodic_flush) {
  16472. log_size = strlen(time_str) + strlen(DHD_DUMP_LOG_HDR) + sizeof(sec_hdr);
  16473. /* calculate the amount of space required to dump all logs */
  16474. for (i = 0; i < DLD_BUFFER_NUM; ++i) {
  16475. if (*type != DLD_BUF_TYPE_ALL && i != *type)
  16476. continue;
  16477. if (g_dld_buf[i].wraparound) {
  16478. log_size += (unsigned long)g_dld_buf[i].max
  16479. - (unsigned long)g_dld_buf[i].buffer;
  16480. } else {
  16481. spin_lock_irqsave(&g_dld_buf[i].lock, flags);
  16482. log_size += (unsigned long)g_dld_buf[i].present -
  16483. (unsigned long)g_dld_buf[i].front;
  16484. spin_unlock_irqrestore(&g_dld_buf[i].lock, flags);
  16485. }
  16486. log_size += strlen(dld_hdrs[i].hdr_str) + sizeof(sec_hdr);
  16487. if (*type != DLD_BUF_TYPE_ALL && i == *type)
  16488. break;
  16489. }
  16490. ret = generic_file_llseek(fp, dhdp->last_file_posn, SEEK_CUR);
  16491. if (ret < 0) {
  16492. DHD_ERROR(("file seek last posn error ! err = %d \n", ret));
  16493. goto exit2;
  16494. }
  16495. pos = fp->f_pos;
  16496. /* if the max file size is reached, wrap around to beginning of the file
  16497. * we're treating the file as a large ring buffer
  16498. */
  16499. fspace_remain = logdump_max_filesize - pos;
  16500. if (log_size > fspace_remain) {
  16501. fp->f_pos -= pos;
  16502. pos = fp->f_pos;
  16503. }
  16504. }
  16505. dhd_print_time_str(0, fp, len, &pos);
  16506. for (i = 0; i < DLD_BUFFER_NUM; ++i) {
  16507. if (*type != DLD_BUF_TYPE_ALL && i != *type)
  16508. continue;
  16509. len = dhd_get_dld_len(i);
  16510. dhd_get_dld_log_dump(NULL, dhdp, 0, fp, len, i, &pos);
  16511. if (*type != DLD_BUF_TYPE_ALL)
  16512. break;
  16513. }
  16514. #ifdef EWP_ECNTRS_LOGGING
  16515. /* periodic flushing of ecounters is NOT supported */
  16516. if (*type == DLD_BUF_TYPE_ALL &&
  16517. logdump_ecntr_enable &&
  16518. dhdp->ecntr_dbg_ring) {
  16519. dhd_log_dump_ring_to_file(dhdp, dhdp->ecntr_dbg_ring,
  16520. fp, (unsigned long *)&pos,
  16521. &sec_hdr, ECNTRS_LOG_HDR, LOG_DUMP_SECTION_ECNTRS);
  16522. }
  16523. #endif /* EWP_ECNTRS_LOGGING */
  16524. #ifdef DHD_STATUS_LOGGING
  16525. if (dhdp->statlog) {
  16526. /* write the statlog */
  16527. len = dhd_get_status_log_len(NULL, dhdp);
  16528. if (len) {
  16529. if (dhd_print_status_log_data(NULL, dhdp, 0, fp,
  16530. len, &pos) < 0) {
  16531. goto exit2;
  16532. }
  16533. }
  16534. }
  16535. #endif /* DHD_STATUS_LOGGING */
  16536. #ifdef EWP_RTT_LOGGING
  16537. /* periodic flushing of ecounters is NOT supported */
  16538. if (*type == DLD_BUF_TYPE_ALL &&
  16539. logdump_rtt_enable &&
  16540. dhdp->rtt_dbg_ring) {
  16541. dhd_log_dump_ring_to_file(dhdp, dhdp->rtt_dbg_ring,
  16542. fp, (unsigned long *)&pos,
  16543. &sec_hdr, RTT_LOG_HDR, LOG_DUMP_SECTION_RTT);
  16544. }
  16545. #endif /* EWP_RTT_LOGGING */
  16546. #ifdef BCMPCIE
  16547. len = dhd_get_ext_trap_len(NULL, dhdp);
  16548. if (len) {
  16549. if (dhd_print_ext_trap_data(NULL, dhdp, 0, fp, len, &pos) < 0)
  16550. goto exit2;
  16551. }
  16552. #endif /* BCMPCIE */
  16553. #if defined(DHD_FW_COREDUMP) && defined(DNGL_EVENT_SUPPORT) && defined(BCMPCIE)
  16554. len = dhd_get_health_chk_len(NULL, dhdp);
  16555. if (len) {
  16556. if (dhd_print_ext_trap_data(NULL, dhdp, 0, fp, len, &pos) < 0)
  16557. goto exit2;
  16558. }
  16559. #endif /* DHD_FW_COREDUMP && DNGL_EVENT_SUPPORT && BCMPCIE */
  16560. len = dhd_get_dhd_dump_len(NULL, dhdp);
  16561. if (len) {
  16562. if (dhd_print_dump_data(NULL, dhdp, 0, fp, len, &pos) < 0)
  16563. goto exit2;
  16564. }
  16565. len = dhd_get_cookie_log_len(NULL, dhdp);
  16566. if (len) {
  16567. if (dhd_print_cookie_data(NULL, dhdp, 0, fp, len, &pos) < 0)
  16568. goto exit2;
  16569. }
  16570. #ifdef DHD_DUMP_PCIE_RINGS
  16571. len = dhd_get_flowring_len(NULL, dhdp);
  16572. if (len) {
  16573. if (dhd_print_flowring_data(NULL, dhdp, 0, fp, len, &pos) < 0)
  16574. goto exit2;
  16575. }
  16576. #endif // endif
  16577. if (dhdp->logdump_periodic_flush) {
  16578. /* store the last position written to in the file for future use */
  16579. dhdp->last_file_posn = pos;
  16580. }
  16581. exit2:
  16582. if (!IS_ERR(fp) && fp != NULL) {
  16583. filp_close(fp, NULL);
  16584. DHD_ERROR(("%s: Finished writing log dump to file - '%s' \n",
  16585. __FUNCTION__, dump_path));
  16586. }
  16587. set_fs(old_fs);
  16588. exit1:
  16589. if (type) {
  16590. MFREE(dhdp->osh, type, sizeof(*type));
  16591. }
  16592. DHD_GENERAL_LOCK(dhdp, flags);
  16593. DHD_BUS_BUSY_CLEAR_IN_LOGDUMP(dhdp);
  16594. dhd_os_busbusy_wake(dhdp);
  16595. DHD_GENERAL_UNLOCK(dhdp, flags);
  16596. #ifdef DHD_DUMP_MNGR
  16597. if (ret >= 0) {
  16598. dhd_dump_file_manage_enqueue(dhdp, dump_path, DHD_DEBUG_DUMP_TYPE);
  16599. }
  16600. #endif /* DHD_DUMP_MNGR */
  16601. return (ret < 0) ? BCME_ERROR : BCME_OK;
  16602. }
  16603. #endif /* DHD_LOG_DUMP */
  16604. /* This function writes data to the file pointed by fp, OR
  16605. * copies data to the user buffer sent by upper layer(HAL).
  16606. */
  16607. int
  16608. dhd_export_debug_data(void *mem_buf, void *fp, const void *user_buf, int buf_len, void *pos)
  16609. {
  16610. int ret = BCME_OK;
  16611. if (fp) {
  16612. ret = vfs_write(fp, mem_buf, buf_len, (loff_t *)pos);
  16613. if (ret < 0) {
  16614. DHD_ERROR(("write file error, err = %d\n", ret));
  16615. goto exit;
  16616. }
  16617. } else {
  16618. {
  16619. ret = copy_to_user((void *)((uintptr_t)user_buf + (*(int *)pos)),
  16620. mem_buf, buf_len);
  16621. if (ret) {
  16622. DHD_ERROR(("failed to copy into user buffer : %d\n", ret));
  16623. goto exit;
  16624. }
  16625. }
  16626. (*(int *)pos) += buf_len;
  16627. }
  16628. exit:
  16629. return ret;
  16630. }
  16631. /*
  16632. * This call is to get the memdump size so that,
  16633. * halutil can alloc that much buffer in user space.
  16634. */
  16635. int
  16636. dhd_os_socram_dump(struct net_device *dev, uint32 *dump_size)
  16637. {
  16638. int ret = BCME_OK;
  16639. dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
  16640. dhd_pub_t *dhdp = &dhd->pub;
  16641. if (dhdp->busstate == DHD_BUS_DOWN) {
  16642. DHD_ERROR(("%s: bus is down\n", __FUNCTION__));
  16643. return BCME_ERROR;
  16644. }
  16645. if (DHD_BUS_CHECK_SUSPEND_OR_SUSPEND_IN_PROGRESS(dhdp)) {
  16646. DHD_ERROR(("%s: bus is in suspend(%d) or suspending(0x%x) state, so skip\n",
  16647. __FUNCTION__, dhdp->busstate, dhdp->dhd_bus_busy_state));
  16648. return BCME_ERROR;
  16649. }
  16650. #ifdef DHD_PCIE_RUNTIMEPM
  16651. dhdpcie_runtime_bus_wake(dhdp, TRUE, __builtin_return_address(0));
  16652. #endif /* DHD_PCIE_RUNTIMEPM */
  16653. ret = dhd_common_socram_dump(dhdp);
  16654. if (ret == BCME_OK) {
  16655. *dump_size = dhdp->soc_ram_length;
  16656. }
  16657. return ret;
  16658. }
  16659. /*
  16660. * This is to get the actual memdup after getting the memdump size
  16661. */
  16662. int
  16663. dhd_os_get_socram_dump(struct net_device *dev, char **buf, uint32 *size)
  16664. {
  16665. int ret = BCME_OK;
  16666. int orig_len = 0;
  16667. dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
  16668. dhd_pub_t *dhdp = &dhd->pub;
  16669. if (buf == NULL)
  16670. return BCME_ERROR;
  16671. orig_len = *size;
  16672. if (dhdp->soc_ram) {
  16673. if (orig_len >= dhdp->soc_ram_length) {
  16674. *buf = dhdp->soc_ram;
  16675. *size = dhdp->soc_ram_length;
  16676. } else {
  16677. ret = BCME_BUFTOOSHORT;
  16678. DHD_ERROR(("The length of the buffer is too short"
  16679. " to save the memory dump with %d\n", dhdp->soc_ram_length));
  16680. }
  16681. } else {
  16682. DHD_ERROR(("socram_dump is not ready to get\n"));
  16683. ret = BCME_NOTREADY;
  16684. }
  16685. return ret;
  16686. }
  16687. int
  16688. dhd_os_get_version(struct net_device *dev, bool dhd_ver, char **buf, uint32 size)
  16689. {
  16690. char *fw_str;
  16691. if (size == 0)
  16692. return BCME_BADARG;
  16693. fw_str = strstr(info_string, "Firmware: ");
  16694. if (fw_str == NULL) {
  16695. return BCME_ERROR;
  16696. }
  16697. memset(*buf, 0, size);
  16698. if (dhd_ver) {
  16699. strncpy(*buf, dhd_version, size - 1);
  16700. } else {
  16701. strncpy(*buf, fw_str, size - 1);
  16702. }
  16703. return BCME_OK;
  16704. }
  16705. #ifdef DHD_PKT_LOGGING
  16706. int
  16707. dhd_os_get_pktlog_dump(void *dev, const void *user_buf, uint32 len)
  16708. {
  16709. int ret = BCME_OK;
  16710. dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
  16711. dhd_pub_t *dhdp = &dhd->pub;
  16712. if (user_buf == NULL) {
  16713. DHD_ERROR(("%s(): user buffer is NULL\n", __FUNCTION__));
  16714. return BCME_ERROR;
  16715. }
  16716. ret = dhd_pktlog_dump_write_memory(dhdp, user_buf, len);
  16717. if (ret < 0) {
  16718. DHD_ERROR(("%s(): fail to dump pktlog, err = %d\n", __FUNCTION__, ret));
  16719. return ret;
  16720. }
  16721. return ret;
  16722. }
  16723. uint32
  16724. dhd_os_get_pktlog_dump_size(struct net_device *dev)
  16725. {
  16726. uint32 size = 0;
  16727. dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
  16728. dhd_pub_t *dhdp = &dhd->pub;
  16729. size = dhd_pktlog_get_dump_length(dhdp);
  16730. if (size == 0) {
  16731. DHD_ERROR(("%s(): fail to get pktlog size, err = %d\n", __FUNCTION__, size));
  16732. }
  16733. return size;
  16734. }
  16735. void
  16736. dhd_os_get_pktlogdump_filename(struct net_device *dev, char *dump_path, int len)
  16737. {
  16738. dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
  16739. dhd_pub_t *dhdp = &dhd->pub;
  16740. dhd_pktlog_get_filename(dhdp, dump_path, len);
  16741. }
  16742. #endif /* DHD_PKT_LOGGING */
  16743. #ifdef DNGL_AXI_ERROR_LOGGING
  16744. int
  16745. dhd_os_get_axi_error_dump(void *dev, const void *user_buf, uint32 len)
  16746. {
  16747. int ret = BCME_OK;
  16748. dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
  16749. dhd_pub_t *dhdp = &dhd->pub;
  16750. loff_t pos = 0;
  16751. if (user_buf == NULL) {
  16752. DHD_ERROR(("%s(): user buffer is NULL\n", __FUNCTION__));
  16753. return BCME_ERROR;
  16754. }
  16755. ret = dhd_export_debug_data((char *)dhdp->axi_err_dump,
  16756. NULL, user_buf, sizeof(dhd_axi_error_dump_t), &pos);
  16757. if (ret < 0) {
  16758. DHD_ERROR(("%s(): fail to dump pktlog, err = %d\n", __FUNCTION__, ret));
  16759. return ret;
  16760. }
  16761. return ret;
  16762. }
  16763. int
  16764. dhd_os_get_axi_error_dump_size(struct net_device *dev)
  16765. {
  16766. int size = -1;
  16767. size = sizeof(dhd_axi_error_dump_t);
  16768. if (size < 0) {
  16769. DHD_ERROR(("%s(): fail to get axi error size, err = %d\n", __FUNCTION__, size));
  16770. }
  16771. return size;
  16772. }
  16773. void
  16774. dhd_os_get_axi_error_filename(struct net_device *dev, char *dump_path, int len)
  16775. {
  16776. snprintf(dump_path, len, "%s",
  16777. DHD_COMMON_DUMP_PATH DHD_DUMP_AXI_ERROR_FILENAME);
  16778. }
  16779. #endif /* DNGL_AXI_ERROR_LOGGING */
  16780. bool dhd_sta_associated(dhd_pub_t *dhdp, uint32 bssidx, uint8 *mac)
  16781. {
  16782. return dhd_find_sta(dhdp, bssidx, mac) ? TRUE : FALSE;
  16783. }
  16784. #ifdef DHD_L2_FILTER
  16785. arp_table_t*
  16786. dhd_get_ifp_arp_table_handle(dhd_pub_t *dhdp, uint32 bssidx)
  16787. {
  16788. dhd_info_t *dhd = dhdp->info;
  16789. dhd_if_t *ifp;
  16790. ASSERT(bssidx < DHD_MAX_IFS);
  16791. ifp = dhd->iflist[bssidx];
  16792. return ifp->phnd_arp_table;
  16793. }
  16794. int dhd_get_parp_status(dhd_pub_t *dhdp, uint32 idx)
  16795. {
  16796. dhd_info_t *dhd = dhdp->info;
  16797. dhd_if_t *ifp;
  16798. ASSERT(idx < DHD_MAX_IFS);
  16799. ifp = dhd->iflist[idx];
  16800. if (ifp)
  16801. return ifp->parp_enable;
  16802. else
  16803. return FALSE;
  16804. }
  16805. /* Set interface specific proxy arp configuration */
  16806. int dhd_set_parp_status(dhd_pub_t *dhdp, uint32 idx, int val)
  16807. {
  16808. dhd_info_t *dhd = dhdp->info;
  16809. dhd_if_t *ifp;
  16810. ASSERT(idx < DHD_MAX_IFS);
  16811. ifp = dhd->iflist[idx];
  16812. if (!ifp)
  16813. return BCME_ERROR;
  16814. /* At present all 3 variables are being
  16815. * handled at once
  16816. */
  16817. ifp->parp_enable = val;
  16818. ifp->parp_discard = val;
  16819. ifp->parp_allnode = val;
  16820. /* Flush ARP entries when disabled */
  16821. if (val == FALSE) {
  16822. bcm_l2_filter_arp_table_update(dhdp->osh, ifp->phnd_arp_table, TRUE, NULL,
  16823. FALSE, dhdp->tickcnt);
  16824. }
  16825. return BCME_OK;
  16826. }
  16827. bool dhd_parp_discard_is_enabled(dhd_pub_t *dhdp, uint32 idx)
  16828. {
  16829. dhd_info_t *dhd = dhdp->info;
  16830. dhd_if_t *ifp;
  16831. ASSERT(idx < DHD_MAX_IFS);
  16832. ifp = dhd->iflist[idx];
  16833. ASSERT(ifp);
  16834. return ifp->parp_discard;
  16835. }
  16836. bool
  16837. dhd_parp_allnode_is_enabled(dhd_pub_t *dhdp, uint32 idx)
  16838. {
  16839. dhd_info_t *dhd = dhdp->info;
  16840. dhd_if_t *ifp;
  16841. ASSERT(idx < DHD_MAX_IFS);
  16842. ifp = dhd->iflist[idx];
  16843. ASSERT(ifp);
  16844. return ifp->parp_allnode;
  16845. }
  16846. int dhd_get_dhcp_unicast_status(dhd_pub_t *dhdp, uint32 idx)
  16847. {
  16848. dhd_info_t *dhd = dhdp->info;
  16849. dhd_if_t *ifp;
  16850. ASSERT(idx < DHD_MAX_IFS);
  16851. ifp = dhd->iflist[idx];
  16852. ASSERT(ifp);
  16853. return ifp->dhcp_unicast;
  16854. }
  16855. int dhd_set_dhcp_unicast_status(dhd_pub_t *dhdp, uint32 idx, int val)
  16856. {
  16857. dhd_info_t *dhd = dhdp->info;
  16858. dhd_if_t *ifp;
  16859. ASSERT(idx < DHD_MAX_IFS);
  16860. ifp = dhd->iflist[idx];
  16861. ASSERT(ifp);
  16862. ifp->dhcp_unicast = val;
  16863. return BCME_OK;
  16864. }
  16865. int dhd_get_block_ping_status(dhd_pub_t *dhdp, uint32 idx)
  16866. {
  16867. dhd_info_t *dhd = dhdp->info;
  16868. dhd_if_t *ifp;
  16869. ASSERT(idx < DHD_MAX_IFS);
  16870. ifp = dhd->iflist[idx];
  16871. ASSERT(ifp);
  16872. return ifp->block_ping;
  16873. }
  16874. int dhd_set_block_ping_status(dhd_pub_t *dhdp, uint32 idx, int val)
  16875. {
  16876. dhd_info_t *dhd = dhdp->info;
  16877. dhd_if_t *ifp;
  16878. ASSERT(idx < DHD_MAX_IFS);
  16879. ifp = dhd->iflist[idx];
  16880. ASSERT(ifp);
  16881. ifp->block_ping = val;
  16882. /* Disable rx_pkt_chain feature for interface if block_ping option is
  16883. * enabled
  16884. */
  16885. dhd_update_rx_pkt_chainable_state(dhdp, idx);
  16886. return BCME_OK;
  16887. }
  16888. int dhd_get_grat_arp_status(dhd_pub_t *dhdp, uint32 idx)
  16889. {
  16890. dhd_info_t *dhd = dhdp->info;
  16891. dhd_if_t *ifp;
  16892. ASSERT(idx < DHD_MAX_IFS);
  16893. ifp = dhd->iflist[idx];
  16894. ASSERT(ifp);
  16895. return ifp->grat_arp;
  16896. }
  16897. int dhd_set_grat_arp_status(dhd_pub_t *dhdp, uint32 idx, int val)
  16898. {
  16899. dhd_info_t *dhd = dhdp->info;
  16900. dhd_if_t *ifp;
  16901. ASSERT(idx < DHD_MAX_IFS);
  16902. ifp = dhd->iflist[idx];
  16903. ASSERT(ifp);
  16904. ifp->grat_arp = val;
  16905. return BCME_OK;
  16906. }
  16907. int dhd_get_block_tdls_status(dhd_pub_t *dhdp, uint32 idx)
  16908. {
  16909. dhd_info_t *dhd = dhdp->info;
  16910. dhd_if_t *ifp;
  16911. ASSERT(idx < DHD_MAX_IFS);
  16912. ifp = dhd->iflist[idx];
  16913. ASSERT(ifp);
  16914. return ifp->block_tdls;
  16915. }
  16916. int dhd_set_block_tdls_status(dhd_pub_t *dhdp, uint32 idx, int val)
  16917. {
  16918. dhd_info_t *dhd = dhdp->info;
  16919. dhd_if_t *ifp;
  16920. ASSERT(idx < DHD_MAX_IFS);
  16921. ifp = dhd->iflist[idx];
  16922. ASSERT(ifp);
  16923. ifp->block_tdls = val;
  16924. return BCME_OK;
  16925. }
  16926. #endif /* DHD_L2_FILTER */
  16927. #if defined(SET_RPS_CPUS)
  16928. int dhd_rps_cpus_enable(struct net_device *net, int enable)
  16929. {
  16930. dhd_info_t *dhd = DHD_DEV_INFO(net);
  16931. dhd_if_t *ifp;
  16932. int ifidx;
  16933. char * RPS_CPU_SETBUF;
  16934. ifidx = dhd_net2idx(dhd, net);
  16935. if (ifidx == DHD_BAD_IF) {
  16936. DHD_ERROR(("%s bad ifidx\n", __FUNCTION__));
  16937. return -ENODEV;
  16938. }
  16939. if (ifidx == PRIMARY_INF) {
  16940. if (dhd->pub.op_mode == DHD_FLAG_IBSS_MODE) {
  16941. DHD_INFO(("%s : set for IBSS.\n", __FUNCTION__));
  16942. RPS_CPU_SETBUF = RPS_CPUS_MASK_IBSS;
  16943. } else {
  16944. DHD_INFO(("%s : set for BSS.\n", __FUNCTION__));
  16945. RPS_CPU_SETBUF = RPS_CPUS_MASK;
  16946. }
  16947. } else if (ifidx == VIRTUAL_INF) {
  16948. DHD_INFO(("%s : set for P2P.\n", __FUNCTION__));
  16949. RPS_CPU_SETBUF = RPS_CPUS_MASK_P2P;
  16950. } else {
  16951. DHD_ERROR(("%s : Invalid index : %d.\n", __FUNCTION__, ifidx));
  16952. return -EINVAL;
  16953. }
  16954. ifp = dhd->iflist[ifidx];
  16955. if (ifp) {
  16956. if (enable) {
  16957. DHD_INFO(("%s : set rps_cpus as [%s]\n", __FUNCTION__, RPS_CPU_SETBUF));
  16958. custom_rps_map_set(ifp->net->_rx, RPS_CPU_SETBUF, strlen(RPS_CPU_SETBUF));
  16959. } else {
  16960. custom_rps_map_clear(ifp->net->_rx);
  16961. }
  16962. } else {
  16963. DHD_ERROR(("%s : ifp is NULL!!\n", __FUNCTION__));
  16964. return -ENODEV;
  16965. }
  16966. return BCME_OK;
  16967. }
  16968. int custom_rps_map_set(struct netdev_rx_queue *queue, char *buf, size_t len)
  16969. {
  16970. struct rps_map *old_map, *map;
  16971. cpumask_var_t mask;
  16972. int err, cpu, i;
  16973. static DEFINE_SPINLOCK(rps_map_lock);
  16974. DHD_INFO(("%s : Entered.\n", __FUNCTION__));
  16975. if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
  16976. DHD_ERROR(("%s : alloc_cpumask_var fail.\n", __FUNCTION__));
  16977. return -ENOMEM;
  16978. }
  16979. err = bitmap_parse(buf, len, cpumask_bits(mask), nr_cpumask_bits);
  16980. if (err) {
  16981. free_cpumask_var(mask);
  16982. DHD_ERROR(("%s : bitmap_parse fail.\n", __FUNCTION__));
  16983. return err;
  16984. }
  16985. map = kzalloc(max_t(unsigned int,
  16986. RPS_MAP_SIZE(cpumask_weight(mask)), L1_CACHE_BYTES),
  16987. GFP_KERNEL);
  16988. if (!map) {
  16989. free_cpumask_var(mask);
  16990. DHD_ERROR(("%s : map malloc fail.\n", __FUNCTION__));
  16991. return -ENOMEM;
  16992. }
  16993. i = 0;
  16994. for_each_cpu(cpu, mask) {
  16995. map->cpus[i++] = cpu;
  16996. }
  16997. if (i) {
  16998. map->len = i;
  16999. } else {
  17000. kfree(map);
  17001. map = NULL;
  17002. free_cpumask_var(mask);
  17003. DHD_ERROR(("%s : mapping cpu fail.\n", __FUNCTION__));
  17004. return -1;
  17005. }
  17006. spin_lock(&rps_map_lock);
  17007. old_map = rcu_dereference_protected(queue->rps_map,
  17008. lockdep_is_held(&rps_map_lock));
  17009. rcu_assign_pointer(queue->rps_map, map);
  17010. spin_unlock(&rps_map_lock);
  17011. if (map) {
  17012. static_key_slow_inc(&rps_needed);
  17013. }
  17014. if (old_map) {
  17015. kfree_rcu(old_map, rcu);
  17016. static_key_slow_dec(&rps_needed);
  17017. }
  17018. free_cpumask_var(mask);
  17019. DHD_INFO(("%s : Done. mapping cpu nummber : %d\n", __FUNCTION__, map->len));
  17020. return map->len;
  17021. }
  17022. void custom_rps_map_clear(struct netdev_rx_queue *queue)
  17023. {
  17024. struct rps_map *map;
  17025. DHD_INFO(("%s : Entered.\n", __FUNCTION__));
  17026. map = rcu_dereference_protected(queue->rps_map, 1);
  17027. if (map) {
  17028. RCU_INIT_POINTER(queue->rps_map, NULL);
  17029. kfree_rcu(map, rcu);
  17030. DHD_INFO(("%s : rps_cpus map clear.\n", __FUNCTION__));
  17031. }
  17032. }
  17033. #endif // endif
  17034. #if defined(ARGOS_NOTIFY_CB)
  17035. static int argos_status_notifier_wifi_cb(struct notifier_block *notifier,
  17036. unsigned long speed, void *v);
  17037. static int argos_status_notifier_p2p_cb(struct notifier_block *notifier,
  17038. unsigned long speed, void *v);
  17039. int
  17040. argos_register_notifier_init(struct net_device *net)
  17041. {
  17042. int ret = 0;
  17043. DHD_INFO(("DHD: %s: \n", __FUNCTION__));
  17044. argos_rps_ctrl_data.wlan_primary_netdev = net;
  17045. argos_rps_ctrl_data.argos_rps_cpus_enabled = 0;
  17046. if (argos_wifi.notifier_call == NULL) {
  17047. argos_wifi.notifier_call = argos_status_notifier_wifi_cb;
  17048. ret = sec_argos_register_notifier(&argos_wifi, ARGOS_WIFI_TABLE_LABEL);
  17049. if (ret < 0) {
  17050. DHD_ERROR(("DHD:Failed to register WIFI notifier, ret=%d\n", ret));
  17051. goto exit;
  17052. }
  17053. }
  17054. if (argos_p2p.notifier_call == NULL) {
  17055. argos_p2p.notifier_call = argos_status_notifier_p2p_cb;
  17056. ret = sec_argos_register_notifier(&argos_p2p, ARGOS_P2P_TABLE_LABEL);
  17057. if (ret < 0) {
  17058. DHD_ERROR(("DHD:Failed to register P2P notifier, ret=%d\n", ret));
  17059. sec_argos_unregister_notifier(&argos_wifi, ARGOS_WIFI_TABLE_LABEL);
  17060. goto exit;
  17061. }
  17062. }
  17063. return 0;
  17064. exit:
  17065. if (argos_wifi.notifier_call) {
  17066. argos_wifi.notifier_call = NULL;
  17067. }
  17068. if (argos_p2p.notifier_call) {
  17069. argos_p2p.notifier_call = NULL;
  17070. }
  17071. return ret;
  17072. }
  17073. int
  17074. argos_register_notifier_deinit(void)
  17075. {
  17076. DHD_INFO(("DHD: %s: \n", __FUNCTION__));
  17077. if (argos_rps_ctrl_data.wlan_primary_netdev == NULL) {
  17078. DHD_ERROR(("DHD: primary_net_dev is null %s: \n", __FUNCTION__));
  17079. return -1;
  17080. }
  17081. #ifndef DHD_LB
  17082. custom_rps_map_clear(argos_rps_ctrl_data.wlan_primary_netdev->_rx);
  17083. #endif /* !DHD_LB */
  17084. if (argos_p2p.notifier_call) {
  17085. sec_argos_unregister_notifier(&argos_p2p, ARGOS_P2P_TABLE_LABEL);
  17086. argos_p2p.notifier_call = NULL;
  17087. }
  17088. if (argos_wifi.notifier_call) {
  17089. sec_argos_unregister_notifier(&argos_wifi, ARGOS_WIFI_TABLE_LABEL);
  17090. argos_wifi.notifier_call = NULL;
  17091. }
  17092. argos_rps_ctrl_data.wlan_primary_netdev = NULL;
  17093. argos_rps_ctrl_data.argos_rps_cpus_enabled = 0;
  17094. return 0;
  17095. }
  17096. int
  17097. argos_status_notifier_wifi_cb(struct notifier_block *notifier,
  17098. unsigned long speed, void *v)
  17099. {
  17100. dhd_info_t *dhd;
  17101. dhd_pub_t *dhdp;
  17102. #if defined(ARGOS_NOTIFY_CB)
  17103. unsigned int pcie_irq = 0;
  17104. #endif /* ARGOS_NOTIFY_CB */
  17105. DHD_INFO(("DHD: %s: speed=%ld\n", __FUNCTION__, speed));
  17106. if (argos_rps_ctrl_data.wlan_primary_netdev == NULL) {
  17107. goto exit;
  17108. }
  17109. dhd = DHD_DEV_INFO(argos_rps_ctrl_data.wlan_primary_netdev);
  17110. if (dhd == NULL) {
  17111. goto exit;
  17112. }
  17113. dhdp = &dhd->pub;
  17114. if (dhdp == NULL || !dhdp->up) {
  17115. goto exit;
  17116. }
  17117. /* Check if reported TPut value is more than threshold value */
  17118. if (speed > RPS_TPUT_THRESHOLD) {
  17119. if (argos_rps_ctrl_data.argos_rps_cpus_enabled == 0) {
  17120. /* It does not need to configre rps_cpus
  17121. * if Load Balance is enabled
  17122. */
  17123. #ifndef DHD_LB
  17124. int err = 0;
  17125. if (cpu_online(RPS_CPUS_WLAN_CORE_ID)) {
  17126. err = custom_rps_map_set(
  17127. argos_rps_ctrl_data.wlan_primary_netdev->_rx,
  17128. RPS_CPUS_MASK, strlen(RPS_CPUS_MASK));
  17129. } else {
  17130. DHD_ERROR(("DHD: %s: RPS_Set fail,"
  17131. " Core=%d Offline\n", __FUNCTION__,
  17132. RPS_CPUS_WLAN_CORE_ID));
  17133. err = -1;
  17134. }
  17135. if (err < 0) {
  17136. DHD_ERROR(("DHD: %s: Failed to RPS_CPUs. "
  17137. "speed=%ld, error=%d\n",
  17138. __FUNCTION__, speed, err));
  17139. } else {
  17140. #endif /* !DHD_LB */
  17141. #if (defined(DHDTCPACK_SUPPRESS) && defined(BCMPCIE))
  17142. if (dhdp->tcpack_sup_mode != TCPACK_SUP_HOLD) {
  17143. DHD_ERROR(("%s : set ack suppress. TCPACK_SUP_ON(%d)\n",
  17144. __FUNCTION__, TCPACK_SUP_HOLD));
  17145. dhd_tcpack_suppress_set(dhdp, TCPACK_SUP_HOLD);
  17146. }
  17147. #endif /* DHDTCPACK_SUPPRESS && BCMPCIE */
  17148. argos_rps_ctrl_data.argos_rps_cpus_enabled = 1;
  17149. #ifndef DHD_LB
  17150. DHD_ERROR(("DHD: %s: Set RPS_CPUs, speed=%ld\n",
  17151. __FUNCTION__, speed));
  17152. }
  17153. #endif /* !DHD_LB */
  17154. }
  17155. } else {
  17156. if (argos_rps_ctrl_data.argos_rps_cpus_enabled == 1) {
  17157. #if (defined(DHDTCPACK_SUPPRESS) && defined(BCMPCIE))
  17158. if (dhdp->tcpack_sup_mode != TCPACK_SUP_OFF) {
  17159. DHD_ERROR(("%s : set ack suppress. TCPACK_SUP_OFF\n",
  17160. __FUNCTION__));
  17161. dhd_tcpack_suppress_set(dhdp, TCPACK_SUP_OFF);
  17162. }
  17163. #endif /* DHDTCPACK_SUPPRESS && BCMPCIE */
  17164. #ifndef DHD_LB
  17165. /* It does not need to configre rps_cpus
  17166. * if Load Balance is enabled
  17167. */
  17168. custom_rps_map_clear(argos_rps_ctrl_data.wlan_primary_netdev->_rx);
  17169. DHD_ERROR(("DHD: %s: Clear RPS_CPUs, speed=%ld\n", __FUNCTION__, speed));
  17170. OSL_SLEEP(DELAY_TO_CLEAR_RPS_CPUS);
  17171. #endif /* !DHD_LB */
  17172. argos_rps_ctrl_data.argos_rps_cpus_enabled = 0;
  17173. }
  17174. }
  17175. exit:
  17176. return NOTIFY_OK;
  17177. }
  17178. int
  17179. argos_status_notifier_p2p_cb(struct notifier_block *notifier,
  17180. unsigned long speed, void *v)
  17181. {
  17182. DHD_INFO(("DHD: %s: speed=%ld\n", __FUNCTION__, speed));
  17183. return argos_status_notifier_wifi_cb(notifier, speed, v);
  17184. }
  17185. #endif // endif
  17186. #ifdef DHD_DEBUG_PAGEALLOC
  17187. void
  17188. dhd_page_corrupt_cb(void *handle, void *addr_corrupt, size_t len)
  17189. {
  17190. dhd_pub_t *dhdp = (dhd_pub_t *)handle;
  17191. DHD_ERROR(("%s: Got dhd_page_corrupt_cb 0x%p %d\n",
  17192. __FUNCTION__, addr_corrupt, (uint32)len));
  17193. DHD_OS_WAKE_LOCK(dhdp);
  17194. prhex("Page Corruption:", addr_corrupt, len);
  17195. dhd_dump_to_kernelog(dhdp);
  17196. #if defined(BCMPCIE) && defined(DHD_FW_COREDUMP)
  17197. /* Load the dongle side dump to host memory and then BUG_ON() */
  17198. dhdp->memdump_enabled = DUMP_MEMONLY;
  17199. dhdp->memdump_type = DUMP_TYPE_MEMORY_CORRUPTION;
  17200. dhd_bus_mem_dump(dhdp);
  17201. #endif /* BCMPCIE && DHD_FW_COREDUMP */
  17202. DHD_OS_WAKE_UNLOCK(dhdp);
  17203. }
  17204. EXPORT_SYMBOL(dhd_page_corrupt_cb);
  17205. #endif /* DHD_DEBUG_PAGEALLOC */
  17206. #if defined(BCMPCIE) && defined(DHD_PKTID_AUDIT_ENABLED)
  17207. void
  17208. dhd_pktid_error_handler(dhd_pub_t *dhdp)
  17209. {
  17210. DHD_ERROR(("%s: Got Pkt Id Audit failure \n", __FUNCTION__));
  17211. DHD_OS_WAKE_LOCK(dhdp);
  17212. dhd_dump_to_kernelog(dhdp);
  17213. #ifdef DHD_FW_COREDUMP
  17214. /* Load the dongle side dump to host memory */
  17215. if (dhdp->memdump_enabled == DUMP_DISABLED) {
  17216. dhdp->memdump_enabled = DUMP_MEMFILE;
  17217. }
  17218. dhdp->memdump_type = DUMP_TYPE_PKTID_AUDIT_FAILURE;
  17219. dhd_bus_mem_dump(dhdp);
  17220. #endif /* DHD_FW_COREDUMP */
  17221. #ifdef OEM_ANDROID
  17222. dhdp->hang_reason = HANG_REASON_PCIE_PKTID_ERROR;
  17223. dhd_os_check_hang(dhdp, 0, -EREMOTEIO);
  17224. #endif /* OEM_ANDROID */
  17225. DHD_OS_WAKE_UNLOCK(dhdp);
  17226. }
  17227. #endif /* BCMPCIE && DHD_PKTID_AUDIT_ENABLED */
  17228. struct net_device *
  17229. dhd_linux_get_primary_netdev(dhd_pub_t *dhdp)
  17230. {
  17231. dhd_info_t *dhd = dhdp->info;
  17232. if (dhd->iflist[0] && dhd->iflist[0]->net)
  17233. return dhd->iflist[0]->net;
  17234. else
  17235. return NULL;
  17236. }
  17237. fw_download_status_t
  17238. dhd_fw_download_status(dhd_pub_t * dhd_pub)
  17239. {
  17240. return dhd_pub->fw_download_status;
  17241. }
  17242. static int
  17243. dhd_create_to_notifier_skt(void)
  17244. {
  17245. #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
  17246. /* Kernel 3.7 onwards this API accepts only 3 arguments. */
  17247. /* Kernel version 3.6 is a special case which accepts 4 arguments */
  17248. nl_to_event_sk = netlink_kernel_create(&init_net, BCM_NL_USER, &dhd_netlink_cfg);
  17249. #elif (LINUX_VERSION_CODE < KERNEL_VERSION(3, 6, 0))
  17250. /* Kernel version 3.5 and below use this old API format */
  17251. nl_to_event_sk = netlink_kernel_create(&init_net, BCM_NL_USER, 0,
  17252. dhd_process_daemon_msg, NULL, THIS_MODULE);
  17253. #else
  17254. nl_to_event_sk = netlink_kernel_create(&init_net, BCM_NL_USER, THIS_MODULE,
  17255. &dhd_netlink_cfg);
  17256. #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0)) */
  17257. if (!nl_to_event_sk)
  17258. {
  17259. printf("Error creating socket.\n");
  17260. return -1;
  17261. }
  17262. DHD_INFO(("nl_to socket created successfully...\n"));
  17263. return 0;
  17264. }
  17265. void
  17266. dhd_destroy_to_notifier_skt(void)
  17267. {
  17268. DHD_INFO(("Destroying nl_to socket\n"));
  17269. netlink_kernel_release(nl_to_event_sk);
  17270. }
  17271. static void
  17272. dhd_recv_msg_from_daemon(struct sk_buff *skb)
  17273. {
  17274. struct nlmsghdr *nlh;
  17275. bcm_to_info_t *cmd;
  17276. nlh = (struct nlmsghdr *)skb->data;
  17277. cmd = (bcm_to_info_t *)nlmsg_data(nlh);
  17278. if ((cmd->magic == BCM_TO_MAGIC) && (cmd->reason == REASON_DAEMON_STARTED)) {
  17279. sender_pid = ((struct nlmsghdr *)(skb->data))->nlmsg_pid;
  17280. DHD_INFO(("DHD Daemon Started\n"));
  17281. }
  17282. }
  17283. int
  17284. dhd_send_msg_to_daemon(struct sk_buff *skb, void *data, int size)
  17285. {
  17286. struct nlmsghdr *nlh;
  17287. struct sk_buff *skb_out;
  17288. int ret = BCME_ERROR;
  17289. BCM_REFERENCE(skb);
  17290. if (sender_pid == 0) {
  17291. DHD_INFO(("Invalid PID 0\n"));
  17292. skb_out = NULL;
  17293. goto err;
  17294. }
  17295. if ((skb_out = nlmsg_new(size, 0)) == NULL) {
  17296. DHD_ERROR(("%s: skb alloc failed\n", __FUNCTION__));
  17297. ret = BCME_NOMEM;
  17298. goto err;
  17299. }
  17300. nlh = nlmsg_put(skb_out, 0, 0, NLMSG_DONE, size, 0);
  17301. if (nlh == NULL) {
  17302. DHD_ERROR(("%s: nlmsg_put failed\n", __FUNCTION__));
  17303. goto err;
  17304. }
  17305. NETLINK_CB(skb_out).dst_group = 0; /* Unicast */
  17306. (void)memcpy_s(nlmsg_data(nlh), size, (char *)data, size);
  17307. if ((ret = nlmsg_unicast(nl_to_event_sk, skb_out, sender_pid)) < 0) {
  17308. DHD_ERROR(("Error sending message, ret:%d\n", ret));
  17309. /* skb is already freed inside nlmsg_unicast() on error case */
  17310. /* explicitly making skb_out to NULL to avoid double free */
  17311. skb_out = NULL;
  17312. goto err;
  17313. }
  17314. return BCME_OK;
  17315. err:
  17316. if (skb_out) {
  17317. nlmsg_free(skb_out);
  17318. }
  17319. return ret;
  17320. }
  17321. static void
  17322. dhd_process_daemon_msg(struct sk_buff *skb)
  17323. {
  17324. bcm_to_info_t to_info;
  17325. to_info.magic = BCM_TO_MAGIC;
  17326. to_info.reason = REASON_DAEMON_STARTED;
  17327. to_info.trap = NO_TRAP;
  17328. dhd_recv_msg_from_daemon(skb);
  17329. dhd_send_msg_to_daemon(skb, &to_info, sizeof(to_info));
  17330. }
  17331. #ifdef DHD_LOG_DUMP
  17332. bool
  17333. dhd_log_dump_ecntr_enabled(void)
  17334. {
  17335. return (bool)logdump_ecntr_enable;
  17336. }
  17337. bool
  17338. dhd_log_dump_rtt_enabled(void)
  17339. {
  17340. return (bool)logdump_rtt_enable;
  17341. }
  17342. void
  17343. dhd_log_dump_init(dhd_pub_t *dhd)
  17344. {
  17345. struct dhd_log_dump_buf *dld_buf, *dld_buf_special;
  17346. int i = 0;
  17347. uint8 *prealloc_buf = NULL, *bufptr = NULL;
  17348. #if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP)
  17349. int prealloc_idx = DHD_PREALLOC_DHD_LOG_DUMP_BUF;
  17350. #endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_MEMDUMP */
  17351. int ret;
  17352. dhd_dbg_ring_t *ring = NULL;
  17353. unsigned long flags = 0;
  17354. dhd_info_t *dhd_info = dhd->info;
  17355. void *cookie_buf = NULL;
  17356. BCM_REFERENCE(ret);
  17357. BCM_REFERENCE(ring);
  17358. BCM_REFERENCE(flags);
  17359. /* sanity check */
  17360. if (logdump_prsrv_tailsize <= 0 ||
  17361. logdump_prsrv_tailsize > DHD_LOG_DUMP_MAX_TAIL_FLUSH_SIZE) {
  17362. logdump_prsrv_tailsize = DHD_LOG_DUMP_MAX_TAIL_FLUSH_SIZE;
  17363. }
  17364. /* now adjust the preserve log flush size based on the
  17365. * kernel printk log buffer size
  17366. */
  17367. #ifdef CONFIG_LOG_BUF_SHIFT
  17368. DHD_ERROR(("%s: kernel log buf size = %uKB; logdump_prsrv_tailsize = %uKB;"
  17369. " limit prsrv tail size to = %uKB\n",
  17370. __FUNCTION__, (1 << CONFIG_LOG_BUF_SHIFT)/1024,
  17371. logdump_prsrv_tailsize/1024, LOG_DUMP_KERNEL_TAIL_FLUSH_SIZE/1024));
  17372. if (logdump_prsrv_tailsize > LOG_DUMP_KERNEL_TAIL_FLUSH_SIZE) {
  17373. logdump_prsrv_tailsize = LOG_DUMP_KERNEL_TAIL_FLUSH_SIZE;
  17374. }
  17375. #else
  17376. DHD_ERROR(("%s: logdump_prsrv_tailsize = %uKB \n",
  17377. __FUNCTION__, logdump_prsrv_tailsize/1024);
  17378. #endif /* CONFIG_LOG_BUF_SHIFT */
  17379. mutex_init(&dhd_info->logdump_lock);
  17380. /* initialize log dump buf structures */
  17381. memset(g_dld_buf, 0, sizeof(struct dhd_log_dump_buf) * DLD_BUFFER_NUM);
  17382. /* set the log dump buffer size based on the module_param */
  17383. if (logdump_max_bufsize > LOG_DUMP_GENERAL_MAX_BUFSIZE ||
  17384. logdump_max_bufsize <= 0)
  17385. dld_buf_size[DLD_BUF_TYPE_GENERAL] = LOG_DUMP_GENERAL_MAX_BUFSIZE;
  17386. else
  17387. dld_buf_size[DLD_BUF_TYPE_GENERAL] = logdump_max_bufsize;
  17388. /* pre-alloc the memory for the log buffers & 'special' buffer */
  17389. dld_buf_special = &g_dld_buf[DLD_BUF_TYPE_SPECIAL];
  17390. #if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP)
  17391. DHD_ERROR(("%s : Try to allocate memory total(%d) special(%d)\n",
  17392. __FUNCTION__, LOG_DUMP_TOTAL_BUFSIZE, LOG_DUMP_SPECIAL_MAX_BUFSIZE));
  17393. prealloc_buf = DHD_OS_PREALLOC(dhd, prealloc_idx++, LOG_DUMP_TOTAL_BUFSIZE);
  17394. dld_buf_special->buffer = DHD_OS_PREALLOC(dhd, prealloc_idx++,
  17395. dld_buf_size[DLD_BUF_TYPE_SPECIAL]);
  17396. #else
  17397. prealloc_buf = MALLOCZ(dhd->osh, LOG_DUMP_TOTAL_BUFSIZE);
  17398. dld_buf_special->buffer = MALLOCZ(dhd->osh, dld_buf_size[DLD_BUF_TYPE_SPECIAL]);
  17399. #endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_MEMDUMP */
  17400. if (!prealloc_buf) {
  17401. DHD_ERROR(("Failed to pre-allocate memory for log buffers !\n"));
  17402. goto fail;
  17403. }
  17404. if (!dld_buf_special->buffer) {
  17405. DHD_ERROR(("Failed to pre-allocate memory for special buffer !\n"));
  17406. goto fail;
  17407. }
  17408. bufptr = prealloc_buf;
  17409. for (i = 0; i < DLD_BUFFER_NUM; i++) {
  17410. dld_buf = &g_dld_buf[i];
  17411. dld_buf->dhd_pub = dhd;
  17412. spin_lock_init(&dld_buf->lock);
  17413. dld_buf->wraparound = 0;
  17414. if (i != DLD_BUF_TYPE_SPECIAL) {
  17415. dld_buf->buffer = bufptr;
  17416. dld_buf->max = (unsigned long)dld_buf->buffer + dld_buf_size[i];
  17417. bufptr = (uint8 *)dld_buf->max;
  17418. } else {
  17419. dld_buf->max = (unsigned long)dld_buf->buffer + dld_buf_size[i];
  17420. }
  17421. dld_buf->present = dld_buf->front = dld_buf->buffer;
  17422. dld_buf->remain = dld_buf_size[i];
  17423. dld_buf->enable = 1;
  17424. }
  17425. #ifdef EWP_ECNTRS_LOGGING
  17426. /* now use the rest of the pre-alloc'd memory for filter and ecounter log */
  17427. dhd->ecntr_dbg_ring = MALLOCZ(dhd->osh, sizeof(dhd_dbg_ring_t));
  17428. if (!dhd->ecntr_dbg_ring)
  17429. goto fail;
  17430. ring = (dhd_dbg_ring_t *)dhd->ecntr_dbg_ring;
  17431. ret = dhd_dbg_ring_init(dhd, ring, ECNTR_RING_ID,
  17432. ECNTR_RING_NAME, LOG_DUMP_ECNTRS_MAX_BUFSIZE,
  17433. bufptr, TRUE);
  17434. if (ret != BCME_OK) {
  17435. DHD_ERROR(("%s: unable to init ecntr ring !\n",
  17436. __FUNCTION__));
  17437. goto fail;
  17438. }
  17439. DHD_DBG_RING_LOCK(ring->lock, flags);
  17440. ring->state = RING_ACTIVE;
  17441. ring->threshold = 0;
  17442. DHD_DBG_RING_UNLOCK(ring->lock, flags);
  17443. bufptr += LOG_DUMP_ECNTRS_MAX_BUFSIZE;
  17444. #endif /* EWP_ECNTRS_LOGGING */
  17445. #ifdef EWP_RTT_LOGGING
  17446. /* now use the rest of the pre-alloc'd memory for filter and ecounter log */
  17447. dhd->rtt_dbg_ring = MALLOCZ(dhd->osh, sizeof(dhd_dbg_ring_t));
  17448. if (!dhd->rtt_dbg_ring)
  17449. goto fail;
  17450. ring = (dhd_dbg_ring_t *)dhd->rtt_dbg_ring;
  17451. ret = dhd_dbg_ring_init(dhd, ring, RTT_RING_ID,
  17452. RTT_RING_NAME, LOG_DUMP_RTT_MAX_BUFSIZE,
  17453. bufptr, TRUE);
  17454. if (ret != BCME_OK) {
  17455. DHD_ERROR(("%s: unable to init ecntr ring !\n",
  17456. __FUNCTION__));
  17457. goto fail;
  17458. }
  17459. DHD_DBG_RING_LOCK(ring->lock, flags);
  17460. ring->state = RING_ACTIVE;
  17461. ring->threshold = 0;
  17462. DHD_DBG_RING_UNLOCK(ring->lock, flags);
  17463. bufptr += LOG_DUMP_RTT_MAX_BUFSIZE;
  17464. #endif /* EWP_RTT_LOGGING */
  17465. /* Concise buffer is used as intermediate buffer for following purposes
  17466. * a) pull ecounters records temporarily before
  17467. * writing it to file
  17468. * b) to store dhd dump data before putting it to file
  17469. * It should have a size equal to
  17470. * MAX(largest possible ecntr record, 'dhd dump' data size)
  17471. */
  17472. dhd->concise_dbg_buf = MALLOC(dhd->osh, CONCISE_DUMP_BUFLEN);
  17473. if (!dhd->concise_dbg_buf) {
  17474. DHD_ERROR(("%s: unable to alloc mem for concise debug info !\n",
  17475. __FUNCTION__));
  17476. goto fail;
  17477. }
  17478. #if defined(DHD_EVENT_LOG_FILTER)
  17479. ret = dhd_event_log_filter_init(dhd,
  17480. bufptr,
  17481. LOG_DUMP_FILTER_MAX_BUFSIZE);
  17482. if (ret != BCME_OK) {
  17483. goto fail;
  17484. }
  17485. #endif /* DHD_EVENT_LOG_FILTER */
  17486. cookie_buf = MALLOC(dhd->osh, LOG_DUMP_COOKIE_BUFSIZE);
  17487. if (!cookie_buf) {
  17488. DHD_ERROR(("%s: unable to alloc mem for logdump cookie buffer\n",
  17489. __FUNCTION__));
  17490. goto fail;
  17491. }
  17492. ret = dhd_logdump_cookie_init(dhd, cookie_buf, LOG_DUMP_COOKIE_BUFSIZE);
  17493. if (ret != BCME_OK) {
  17494. MFREE(dhd->osh, cookie_buf, LOG_DUMP_COOKIE_BUFSIZE);
  17495. goto fail;
  17496. }
  17497. return;
  17498. fail:
  17499. if (dhd->logdump_cookie) {
  17500. dhd_logdump_cookie_deinit(dhd);
  17501. MFREE(dhd->osh, dhd->logdump_cookie, LOG_DUMP_COOKIE_BUFSIZE);
  17502. dhd->logdump_cookie = NULL;
  17503. }
  17504. #if defined(DHD_EVENT_LOG_FILTER)
  17505. if (dhd->event_log_filter) {
  17506. dhd_event_log_filter_deinit(dhd);
  17507. }
  17508. #endif /* DHD_EVENT_LOG_FILTER */
  17509. if (dhd->concise_dbg_buf) {
  17510. MFREE(dhd->osh, dhd->concise_dbg_buf, CONCISE_DUMP_BUFLEN);
  17511. }
  17512. #ifdef EWP_ECNTRS_LOGGING
  17513. if (dhd->ecntr_dbg_ring) {
  17514. ring = (dhd_dbg_ring_t *)dhd->ecntr_dbg_ring;
  17515. dhd_dbg_ring_deinit(dhd, ring);
  17516. ring->ring_buf = NULL;
  17517. ring->ring_size = 0;
  17518. MFREE(dhd->osh, ring, sizeof(dhd_dbg_ring_t));
  17519. dhd->ecntr_dbg_ring = NULL;
  17520. }
  17521. #endif /* EWP_ECNTRS_LOGGING */
  17522. #ifdef EWP_RTT_LOGGING
  17523. if (dhd->rtt_dbg_ring) {
  17524. ring = (dhd_dbg_ring_t *)dhd->rtt_dbg_ring;
  17525. dhd_dbg_ring_deinit(dhd, ring);
  17526. ring->ring_buf = NULL;
  17527. ring->ring_size = 0;
  17528. MFREE(dhd->osh, ring, sizeof(dhd_dbg_ring_t));
  17529. dhd->rtt_dbg_ring = NULL;
  17530. }
  17531. #endif /* EWP_RTT_LOGGING */
  17532. #if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP)
  17533. if (prealloc_buf) {
  17534. DHD_OS_PREFREE(dhd, prealloc_buf, LOG_DUMP_TOTAL_BUFSIZE);
  17535. }
  17536. if (dld_buf_special->buffer) {
  17537. DHD_OS_PREFREE(dhd, dld_buf_special->buffer,
  17538. dld_buf_size[DLD_BUF_TYPE_SPECIAL]);
  17539. }
  17540. #else
  17541. if (prealloc_buf) {
  17542. MFREE(dhd->osh, prealloc_buf, LOG_DUMP_TOTAL_BUFSIZE);
  17543. }
  17544. if (dld_buf_special->buffer) {
  17545. MFREE(dhd->osh, dld_buf_special->buffer,
  17546. dld_buf_size[DLD_BUF_TYPE_SPECIAL]);
  17547. }
  17548. #endif /* CONFIG_DHD_USE_STATIC_BUF */
  17549. for (i = 0; i < DLD_BUFFER_NUM; i++) {
  17550. dld_buf = &g_dld_buf[i];
  17551. dld_buf->enable = 0;
  17552. dld_buf->buffer = NULL;
  17553. }
  17554. mutex_destroy(&dhd_info->logdump_lock);
  17555. }
  17556. void
  17557. dhd_log_dump_deinit(dhd_pub_t *dhd)
  17558. {
  17559. struct dhd_log_dump_buf *dld_buf = NULL, *dld_buf_special = NULL;
  17560. int i = 0;
  17561. dhd_info_t *dhd_info = dhd->info;
  17562. dhd_dbg_ring_t *ring = NULL;
  17563. BCM_REFERENCE(ring);
  17564. if (dhd->concise_dbg_buf) {
  17565. MFREE(dhd->osh, dhd->concise_dbg_buf, CONCISE_DUMP_BUFLEN);
  17566. dhd->concise_dbg_buf = NULL;
  17567. }
  17568. if (dhd->logdump_cookie) {
  17569. dhd_logdump_cookie_deinit(dhd);
  17570. MFREE(dhd->osh, dhd->logdump_cookie, LOG_DUMP_COOKIE_BUFSIZE);
  17571. dhd->logdump_cookie = NULL;
  17572. }
  17573. #if defined(DHD_EVENT_LOG_FILTER)
  17574. if (dhd->event_log_filter) {
  17575. dhd_event_log_filter_deinit(dhd);
  17576. }
  17577. #endif /* DHD_EVENT_LOG_FILTER */
  17578. #ifdef EWP_ECNTRS_LOGGING
  17579. if (dhd->ecntr_dbg_ring) {
  17580. ring = (dhd_dbg_ring_t *)dhd->ecntr_dbg_ring;
  17581. dhd_dbg_ring_deinit(dhd, ring);
  17582. ring->ring_buf = NULL;
  17583. ring->ring_size = 0;
  17584. MFREE(dhd->osh, ring, sizeof(dhd_dbg_ring_t));
  17585. dhd->ecntr_dbg_ring = NULL;
  17586. }
  17587. #endif /* EWP_ECNTRS_LOGGING */
  17588. #ifdef EWP_RTT_LOGGING
  17589. if (dhd->rtt_dbg_ring) {
  17590. ring = (dhd_dbg_ring_t *)dhd->rtt_dbg_ring;
  17591. dhd_dbg_ring_deinit(dhd, ring);
  17592. ring->ring_buf = NULL;
  17593. ring->ring_size = 0;
  17594. MFREE(dhd->osh, ring, sizeof(dhd_dbg_ring_t));
  17595. dhd->rtt_dbg_ring = NULL;
  17596. }
  17597. #endif /* EWP_RTT_LOGGING */
  17598. /* 'general' buffer points to start of the pre-alloc'd memory */
  17599. dld_buf = &g_dld_buf[DLD_BUF_TYPE_GENERAL];
  17600. dld_buf_special = &g_dld_buf[DLD_BUF_TYPE_SPECIAL];
  17601. #if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP)
  17602. if (dld_buf->buffer) {
  17603. DHD_OS_PREFREE(dhd, dld_buf->buffer, LOG_DUMP_TOTAL_BUFSIZE);
  17604. }
  17605. if (dld_buf_special->buffer) {
  17606. DHD_OS_PREFREE(dhd, dld_buf_special->buffer,
  17607. dld_buf_size[DLD_BUF_TYPE_SPECIAL]);
  17608. }
  17609. #else
  17610. if (dld_buf->buffer) {
  17611. MFREE(dhd->osh, dld_buf->buffer, LOG_DUMP_TOTAL_BUFSIZE);
  17612. }
  17613. if (dld_buf_special->buffer) {
  17614. MFREE(dhd->osh, dld_buf_special->buffer,
  17615. dld_buf_size[DLD_BUF_TYPE_SPECIAL]);
  17616. }
  17617. #endif /* CONFIG_DHD_USE_STATIC_BUF */
  17618. for (i = 0; i < DLD_BUFFER_NUM; i++) {
  17619. dld_buf = &g_dld_buf[i];
  17620. dld_buf->enable = 0;
  17621. dld_buf->buffer = NULL;
  17622. }
  17623. mutex_destroy(&dhd_info->logdump_lock);
  17624. }
  17625. void
  17626. dhd_log_dump_write(int type, char *binary_data,
  17627. int binary_len, const char *fmt, ...)
  17628. {
  17629. int len = 0;
  17630. char tmp_buf[DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE] = {0, };
  17631. va_list args;
  17632. unsigned long flags = 0;
  17633. struct dhd_log_dump_buf *dld_buf = NULL;
  17634. bool flush_log = FALSE;
  17635. if (type < 0 || type >= DLD_BUFFER_NUM) {
  17636. DHD_INFO(("%s: Unknown DHD_LOG_DUMP_BUF_TYPE(%d).\n",
  17637. __FUNCTION__, type));
  17638. return;
  17639. }
  17640. dld_buf = &g_dld_buf[type];
  17641. if (dld_buf->enable != 1) {
  17642. return;
  17643. }
  17644. va_start(args, fmt);
  17645. len = vsnprintf(tmp_buf, DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE, fmt, args);
  17646. /* Non ANSI C99 compliant returns -1,
  17647. * ANSI compliant return len >= DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE
  17648. */
  17649. va_end(args);
  17650. if (len < 0) {
  17651. return;
  17652. }
  17653. if (len >= DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE) {
  17654. len = DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE - 1;
  17655. tmp_buf[len] = '\0';
  17656. }
  17657. /* make a critical section to eliminate race conditions */
  17658. spin_lock_irqsave(&dld_buf->lock, flags);
  17659. if (dld_buf->remain < len) {
  17660. dld_buf->wraparound = 1;
  17661. dld_buf->present = dld_buf->front;
  17662. dld_buf->remain = dld_buf_size[type];
  17663. /* if wrap around happens, flush the ring buffer to the file */
  17664. flush_log = TRUE;
  17665. }
  17666. memcpy(dld_buf->present, tmp_buf, len);
  17667. dld_buf->remain -= len;
  17668. dld_buf->present += len;
  17669. spin_unlock_irqrestore(&dld_buf->lock, flags);
  17670. /* double check invalid memory operation */
  17671. ASSERT((unsigned long)dld_buf->present <= dld_buf->max);
  17672. if (dld_buf->dhd_pub) {
  17673. dhd_pub_t *dhdp = (dhd_pub_t *)dld_buf->dhd_pub;
  17674. dhdp->logdump_periodic_flush =
  17675. logdump_periodic_flush;
  17676. if (logdump_periodic_flush && flush_log) {
  17677. log_dump_type_t *flush_type = MALLOCZ(dhdp->osh,
  17678. sizeof(log_dump_type_t));
  17679. if (flush_type) {
  17680. *flush_type = type;
  17681. dhd_schedule_log_dump(dld_buf->dhd_pub, flush_type);
  17682. }
  17683. }
  17684. }
  17685. }
  17686. char*
  17687. dhd_log_dump_get_timestamp(void)
  17688. {
  17689. static char buf[16];
  17690. u64 ts_nsec;
  17691. unsigned long rem_nsec;
  17692. ts_nsec = local_clock();
  17693. rem_nsec = DIV_AND_MOD_U64_BY_U32(ts_nsec, NSEC_PER_SEC);
  17694. snprintf(buf, sizeof(buf), "%5lu.%06lu",
  17695. (unsigned long)ts_nsec, rem_nsec / NSEC_PER_USEC);
  17696. return buf;
  17697. }
  17698. #endif /* DHD_LOG_DUMP */
  17699. #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
  17700. void
  17701. dhd_flush_rx_tx_wq(dhd_pub_t *dhdp)
  17702. {
  17703. dhd_info_t * dhd;
  17704. if (dhdp) {
  17705. dhd = dhdp->info;
  17706. if (dhd) {
  17707. flush_workqueue(dhd->tx_wq);
  17708. flush_workqueue(dhd->rx_wq);
  17709. }
  17710. }
  17711. return;
  17712. }
  17713. #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
  17714. #ifdef DHD_DEBUG_UART
  17715. bool
  17716. dhd_debug_uart_is_running(struct net_device *dev)
  17717. {
  17718. dhd_info_t *dhd = DHD_DEV_INFO(dev);
  17719. if (dhd->duart_execute) {
  17720. return TRUE;
  17721. }
  17722. return FALSE;
  17723. }
  17724. static void
  17725. dhd_debug_uart_exec_rd(void *handle, void *event_info, u8 event)
  17726. {
  17727. dhd_pub_t *dhdp = handle;
  17728. dhd_debug_uart_exec(dhdp, "rd");
  17729. }
  17730. static void
  17731. dhd_debug_uart_exec(dhd_pub_t *dhdp, char *cmd)
  17732. {
  17733. int ret;
  17734. char *argv[] = {DHD_DEBUG_UART_EXEC_PATH, cmd, NULL};
  17735. char *envp[] = {"HOME=/", "TERM=linux", "PATH=/sbin:/system/bin", NULL};
  17736. #ifdef DHD_FW_COREDUMP
  17737. if (dhdp->memdump_enabled == DUMP_MEMFILE_BUGON)
  17738. #endif // endif
  17739. {
  17740. if (dhdp->hang_reason == HANG_REASON_PCIE_LINK_DOWN_RC_DETECT ||
  17741. dhdp->hang_reason == HANG_REASON_PCIE_LINK_DOWN_EP_DETECT ||
  17742. #ifdef DHD_FW_COREDUMP
  17743. dhdp->memdump_success == FALSE ||
  17744. #endif // endif
  17745. FALSE) {
  17746. dhdp->info->duart_execute = TRUE;
  17747. DHD_ERROR(("DHD: %s - execute %s %s\n",
  17748. __FUNCTION__, DHD_DEBUG_UART_EXEC_PATH, cmd));
  17749. ret = call_usermodehelper(argv[0], argv, envp, UMH_WAIT_PROC);
  17750. DHD_ERROR(("DHD: %s - %s %s ret = %d\n",
  17751. __FUNCTION__, DHD_DEBUG_UART_EXEC_PATH, cmd, ret));
  17752. dhdp->info->duart_execute = FALSE;
  17753. #ifdef DHD_LOG_DUMP
  17754. if (dhdp->memdump_type != DUMP_TYPE_BY_SYSDUMP)
  17755. #endif // endif
  17756. {
  17757. BUG_ON(1);
  17758. }
  17759. }
  17760. }
  17761. }
  17762. #endif /* DHD_DEBUG_UART */
  17763. #if defined(DHD_BLOB_EXISTENCE_CHECK)
  17764. void
  17765. dhd_set_blob_support(dhd_pub_t *dhdp, char *fw_path)
  17766. {
  17767. struct file *fp;
  17768. char *filepath = VENDOR_PATH CONFIG_BCMDHD_CLM_PATH;
  17769. fp = filp_open(filepath, O_RDONLY, 0);
  17770. if (IS_ERR(fp)) {
  17771. DHD_ERROR(("%s: ----- blob file doesn't exist (%s) -----\n", __FUNCTION__,
  17772. filepath));
  17773. dhdp->is_blob = FALSE;
  17774. } else {
  17775. DHD_ERROR(("%s: ----- blob file exists (%s)-----\n", __FUNCTION__, filepath));
  17776. dhdp->is_blob = TRUE;
  17777. #if defined(CONCATE_BLOB)
  17778. strncat(fw_path, "_blob", strlen("_blob"));
  17779. #else
  17780. BCM_REFERENCE(fw_path);
  17781. #endif /* SKIP_CONCATE_BLOB */
  17782. filp_close(fp, NULL);
  17783. }
  17784. }
  17785. #endif /* DHD_BLOB_EXISTENCE_CHECK */
  17786. #if defined(PCIE_FULL_DONGLE)
  17787. /** test / loopback */
  17788. void
  17789. dmaxfer_free_dmaaddr_handler(void *handle, void *event_info, u8 event)
  17790. {
  17791. dmaxref_mem_map_t *dmmap = (dmaxref_mem_map_t *)event_info;
  17792. dhd_info_t *dhd_info = (dhd_info_t *)handle;
  17793. if (event != DHD_WQ_WORK_DMA_LB_MEM_REL) {
  17794. DHD_ERROR(("%s: Unexpected event \n", __FUNCTION__));
  17795. return;
  17796. }
  17797. if (dhd_info == NULL) {
  17798. DHD_ERROR(("%s: Invalid dhd_info\n", __FUNCTION__));
  17799. return;
  17800. }
  17801. if (dmmap == NULL) {
  17802. DHD_ERROR(("%s: dmmap is null\n", __FUNCTION__));
  17803. return;
  17804. }
  17805. dmaxfer_free_prev_dmaaddr(&dhd_info->pub, dmmap);
  17806. }
  17807. void
  17808. dhd_schedule_dmaxfer_free(dhd_pub_t *dhdp, dmaxref_mem_map_t *dmmap)
  17809. {
  17810. dhd_info_t *dhd_info = dhdp->info;
  17811. dhd_deferred_schedule_work(dhd_info->dhd_deferred_wq, (void *)dmmap,
  17812. DHD_WQ_WORK_DMA_LB_MEM_REL, dmaxfer_free_dmaaddr_handler, DHD_WQ_WORK_PRIORITY_LOW);
  17813. }
  17814. #endif /* PCIE_FULL_DONGLE */
  17815. /* ---------------------------- End of sysfs implementation ------------------------------------- */
  17816. #ifdef SET_PCIE_IRQ_CPU_CORE
  17817. void
  17818. dhd_set_irq_cpucore(dhd_pub_t *dhdp, int affinity_cmd)
  17819. {
  17820. unsigned int pcie_irq = 0;
  17821. if (!dhdp) {
  17822. DHD_ERROR(("%s : dhd is NULL\n", __FUNCTION__));
  17823. return;
  17824. }
  17825. if (!dhdp->bus) {
  17826. DHD_ERROR(("%s : dhd->bus is NULL\n", __FUNCTION__));
  17827. return;
  17828. }
  17829. DHD_ERROR(("Enter %s, PCIe affinity cmd=0x%x\n", __FUNCTION__, affinity_cmd));
  17830. if (dhdpcie_get_pcieirq(dhdp->bus, &pcie_irq)) {
  17831. DHD_ERROR(("%s : Can't get interrupt number\n", __FUNCTION__));
  17832. return;
  17833. }
  17834. /*
  17835. irq_set_affinity() assign dedicated CPU core PCIe interrupt
  17836. If dedicated CPU core is not on-line,
  17837. PCIe interrupt scheduled on CPU core 0
  17838. */
  17839. switch (affinity_cmd) {
  17840. case PCIE_IRQ_AFFINITY_OFF:
  17841. break;
  17842. case PCIE_IRQ_AFFINITY_BIG_CORE_ANY:
  17843. #if defined(CONFIG_ARCH_SM8150)
  17844. irq_set_affinity_hint(pcie_irq, dhdp->info->cpumask_primary);
  17845. irq_set_affinity(pcie_irq, dhdp->info->cpumask_primary);
  17846. #else /* Exynos and Others */
  17847. irq_set_affinity(pcie_irq, dhdp->info->cpumask_primary);
  17848. #endif /* CONFIG_ARCH_SM8150 */
  17849. break;
  17850. #if defined(CONFIG_SOC_EXYNOS9810) || defined(CONFIG_SOC_EXYNOS9820)
  17851. case PCIE_IRQ_AFFINITY_BIG_CORE_EXYNOS:
  17852. DHD_ERROR(("%s, PCIe IRQ:%u set Core %d\n",
  17853. __FUNCTION__, pcie_irq, PCIE_IRQ_CPU_CORE));
  17854. irq_set_affinity(pcie_irq, cpumask_of(PCIE_IRQ_CPU_CORE));
  17855. break;
  17856. #endif /* CONFIG_SOC_EXYNOS9810 || CONFIG_SOC_EXYNOS9820 */
  17857. default:
  17858. DHD_ERROR(("%s, Unknown PCIe affinity cmd=0x%x\n",
  17859. __FUNCTION__, affinity_cmd));
  17860. }
  17861. }
  17862. #endif /* SET_PCIE_IRQ_CPU_CORE */
  17863. int
  17864. dhd_write_file(const char *filepath, char *buf, int buf_len)
  17865. {
  17866. struct file *fp = NULL;
  17867. mm_segment_t old_fs;
  17868. int ret = 0;
  17869. /* change to KERNEL_DS address limit */
  17870. old_fs = get_fs();
  17871. set_fs(KERNEL_DS);
  17872. /* File is always created. */
  17873. fp = filp_open(filepath, O_RDWR | O_CREAT, 0664);
  17874. if (IS_ERR(fp)) {
  17875. DHD_ERROR(("%s: Couldn't open file '%s' err %ld\n",
  17876. __FUNCTION__, filepath, PTR_ERR(fp)));
  17877. ret = BCME_ERROR;
  17878. } else {
  17879. if (fp->f_mode & FMODE_WRITE) {
  17880. ret = vfs_write(fp, buf, buf_len, &fp->f_pos);
  17881. if (ret < 0) {
  17882. DHD_ERROR(("%s: Couldn't write file '%s'\n",
  17883. __FUNCTION__, filepath));
  17884. ret = BCME_ERROR;
  17885. } else {
  17886. ret = BCME_OK;
  17887. }
  17888. }
  17889. filp_close(fp, NULL);
  17890. }
  17891. /* restore previous address limit */
  17892. set_fs(old_fs);
  17893. return ret;
  17894. }
  17895. int
  17896. dhd_read_file(const char *filepath, char *buf, int buf_len)
  17897. {
  17898. struct file *fp = NULL;
  17899. mm_segment_t old_fs;
  17900. int ret;
  17901. /* change to KERNEL_DS address limit */
  17902. old_fs = get_fs();
  17903. set_fs(KERNEL_DS);
  17904. fp = filp_open(filepath, O_RDONLY, 0);
  17905. if (IS_ERR(fp)) {
  17906. set_fs(old_fs);
  17907. DHD_ERROR(("%s: File %s doesn't exist\n", __FUNCTION__, filepath));
  17908. return BCME_ERROR;
  17909. }
  17910. ret = compat_kernel_read(fp, 0, buf, buf_len);
  17911. filp_close(fp, NULL);
  17912. /* restore previous address limit */
  17913. set_fs(old_fs);
  17914. /* Return the number of bytes read */
  17915. if (ret > 0) {
  17916. /* Success to read */
  17917. ret = 0;
  17918. } else {
  17919. DHD_ERROR(("%s: Couldn't read the file %s, ret=%d\n",
  17920. __FUNCTION__, filepath, ret));
  17921. ret = BCME_ERROR;
  17922. }
  17923. return ret;
  17924. }
  17925. int
  17926. dhd_write_file_and_check(const char *filepath, char *buf, int buf_len)
  17927. {
  17928. int ret;
  17929. ret = dhd_write_file(filepath, buf, buf_len);
  17930. if (ret < 0) {
  17931. return ret;
  17932. }
  17933. /* Read the file again and check if the file size is not zero */
  17934. memset(buf, 0, buf_len);
  17935. ret = dhd_read_file(filepath, buf, buf_len);
  17936. return ret;
  17937. }
  17938. #ifdef DHD_BANDSTEER
  17939. /*
  17940. * Function return true only if there exactly two GO interfaces
  17941. * TODO: Make it flexible to have AP + AP
  17942. */
  17943. s32
  17944. dhd_bandsteer_get_ifaces(void *pub, void *ifaces)
  17945. {
  17946. dhd_if_t *iflist; /* For supporting multiple interfaces */
  17947. uint8 idx;
  17948. uint8 ap_idx_count = 0;
  17949. dhd_pub_t *dhd = (dhd_pub_t *) pub;
  17950. dhd_bandsteer_iface_info_t *bsd_ifp = (dhd_bandsteer_iface_info_t *)ifaces;
  17951. DHD_INFO(("%s: entered\n", __FUNCTION__));
  17952. for (idx = 0; idx < DHD_MAX_IFS; idx++) {
  17953. iflist = dhd->info->iflist[idx];
  17954. if (iflist == NULL) {
  17955. continue;
  17956. }
  17957. if (iflist->net != NULL) {
  17958. if (iflist->net->ieee80211_ptr != NULL) {
  17959. if (
  17960. (iflist->net->ieee80211_ptr->iftype == NL80211_IFTYPE_P2P_GO) ||
  17961. (iflist->net->ieee80211_ptr->iftype == NL80211_IFTYPE_AP)) {
  17962. ap_idx_count++;
  17963. if (ap_idx_count > 2) {
  17964. continue;
  17965. }
  17966. bsd_ifp->ndev = iflist->net;
  17967. bsd_ifp->bssidx = iflist->bssidx;
  17968. bsd_ifp++;
  17969. }
  17970. }
  17971. }
  17972. }
  17973. if (ap_idx_count == 2) {
  17974. return BCME_OK;
  17975. } else {
  17976. return BCME_ERROR;
  17977. }
  17978. }
  17979. void
  17980. dhd_bandsteer_schedule_work_on_timeout(dhd_bandsteer_mac_entry_t *dhd_bandsteer_mac)
  17981. {
  17982. dhd_bandsteer_context_t *dhd_bandsteer_cntx = dhd_bandsteer_mac->dhd_bandsteer_cntx;
  17983. dhd_pub_t *dhd = (dhd_pub_t *) dhd_bandsteer_cntx->dhd_pub;
  17984. dhd_deferred_schedule_work(dhd->info->dhd_deferred_wq,
  17985. (void *)dhd_bandsteer_mac, DHD_WQ_WORK_BANDSTEER_STEP_MOVE,
  17986. dhd_bandsteer_workqueue_wrapper, DHD_WQ_WORK_PRIORITY_LOW);
  17987. }
  17988. #endif /* DHD_BANDSTEER */
  17989. #ifdef FILTER_IE
  17990. int dhd_read_from_file(dhd_pub_t *dhd)
  17991. {
  17992. int ret = 0, nread = 0;
  17993. void *fd;
  17994. uint8 *buf;
  17995. NULL_CHECK(dhd, "dhd is NULL", ret);
  17996. buf = MALLOCZ(dhd->osh, FILE_BLOCK_READ_SIZE);
  17997. if (!buf) {
  17998. DHD_ERROR(("error: failed to alllocate buf.\n"));
  17999. return BCME_NOMEM;
  18000. }
  18001. /* open file to read */
  18002. fd = dhd_os_open_image1(dhd, FILTER_IE_PATH);
  18003. if (!fd) {
  18004. DHD_ERROR(("error: failed to open %s\n", FILTER_IE_PATH));
  18005. ret = BCME_EPERM;
  18006. goto exit;
  18007. }
  18008. nread = dhd_os_get_image_block(buf, (FILE_BLOCK_READ_SIZE - 1), fd);
  18009. if (nread > 0) {
  18010. buf[nread] = '\0';
  18011. if ((ret = dhd_parse_filter_ie(dhd, buf)) < 0) {
  18012. DHD_ERROR(("error: failed to parse filter ie\n"));
  18013. }
  18014. } else {
  18015. DHD_ERROR(("error: zero length file.failed to read\n"));
  18016. ret = BCME_ERROR;
  18017. }
  18018. dhd_os_close_image1(dhd, fd);
  18019. exit:
  18020. if (buf) {
  18021. MFREE(dhd->osh, buf, FILE_BLOCK_READ_SIZE);
  18022. buf = NULL;
  18023. }
  18024. return ret;
  18025. }
  18026. int dhd_get_filter_ie_count(dhd_pub_t *dhdp, uint8* buf)
  18027. {
  18028. uint8* pstr = buf;
  18029. int element_count = 0;
  18030. if (buf == NULL) {
  18031. return BCME_ERROR;
  18032. }
  18033. while (*pstr != '\0') {
  18034. if (*pstr == '\n') {
  18035. element_count++;
  18036. }
  18037. pstr++;
  18038. }
  18039. /*
  18040. * New line character must not be present after last line.
  18041. * To count last line
  18042. */
  18043. element_count++;
  18044. return element_count;
  18045. }
  18046. int dhd_parse_oui(dhd_pub_t *dhd, uint8 *inbuf, uint8 *oui, int len)
  18047. {
  18048. uint8 i, j, msb, lsb, oui_len = 0;
  18049. /*
  18050. * OUI can vary from 3 bytes to 5 bytes.
  18051. * While reading from file as ascii input it can
  18052. * take maximum size of 14 bytes and minumum size of
  18053. * 8 bytes including ":"
  18054. * Example 5byte OUI <AB:DE:BE:CD:FA>
  18055. * Example 3byte OUI <AB:DC:EF>
  18056. */
  18057. if ((inbuf == NULL) || (len < 8) || (len > 14)) {
  18058. DHD_ERROR(("error: failed to parse OUI \n"));
  18059. return BCME_ERROR;
  18060. }
  18061. for (j = 0, i = 0; i < len; i += 3, ++j) {
  18062. if (!bcm_isxdigit(inbuf[i]) || !bcm_isxdigit(inbuf[i + 1])) {
  18063. DHD_ERROR(("error: invalid OUI format \n"));
  18064. return BCME_ERROR;
  18065. }
  18066. msb = inbuf[i] > '9' ? bcm_toupper(inbuf[i]) - 'A' + 10 : inbuf[i] - '0';
  18067. lsb = inbuf[i + 1] > '9' ? bcm_toupper(inbuf[i + 1]) -
  18068. 'A' + 10 : inbuf[i + 1] - '0';
  18069. oui[j] = (msb << 4) | lsb;
  18070. }
  18071. /* Size of oui.It can vary from 3/4/5 */
  18072. oui_len = j;
  18073. return oui_len;
  18074. }
  18075. int dhd_check_valid_ie(dhd_pub_t *dhdp, uint8* buf, int len)
  18076. {
  18077. int i = 0;
  18078. while (i < len) {
  18079. if (!bcm_isdigit(buf[i])) {
  18080. DHD_ERROR(("error: non digit value found in filter_ie \n"));
  18081. return BCME_ERROR;
  18082. }
  18083. i++;
  18084. }
  18085. if (bcm_atoi((char*)buf) > 255) {
  18086. DHD_ERROR(("error: element id cannot be greater than 255 \n"));
  18087. return BCME_ERROR;
  18088. }
  18089. return BCME_OK;
  18090. }
  18091. int dhd_parse_filter_ie(dhd_pub_t *dhd, uint8 *buf)
  18092. {
  18093. int element_count = 0, i = 0, oui_size = 0, ret = 0;
  18094. uint16 bufsize, buf_space_left, id = 0, len = 0;
  18095. uint16 filter_iovsize, all_tlvsize;
  18096. wl_filter_ie_tlv_t *p_ie_tlv = NULL;
  18097. wl_filter_ie_iov_v1_t *p_filter_iov = (wl_filter_ie_iov_v1_t *) NULL;
  18098. char *token = NULL, *ele_token = NULL, *oui_token = NULL, *type = NULL;
  18099. uint8 data[20];
  18100. element_count = dhd_get_filter_ie_count(dhd, buf);
  18101. DHD_INFO(("total element count %d \n", element_count));
  18102. /* Calculate the whole buffer size */
  18103. filter_iovsize = sizeof(wl_filter_ie_iov_v1_t) + FILTER_IE_BUFSZ;
  18104. p_filter_iov = MALLOCZ(dhd->osh, filter_iovsize);
  18105. if (p_filter_iov == NULL) {
  18106. DHD_ERROR(("error: failed to allocate %d bytes of memory\n", filter_iovsize));
  18107. return BCME_ERROR;
  18108. }
  18109. /* setup filter iovar header */
  18110. p_filter_iov->version = WL_FILTER_IE_VERSION;
  18111. p_filter_iov->len = filter_iovsize;
  18112. p_filter_iov->fixed_length = p_filter_iov->len - FILTER_IE_BUFSZ;
  18113. p_filter_iov->pktflag = FC_PROBE_REQ;
  18114. p_filter_iov->option = WL_FILTER_IE_CHECK_SUB_OPTION;
  18115. /* setup TLVs */
  18116. bufsize = filter_iovsize - WL_FILTER_IE_IOV_HDR_SIZE; /* adjust available size for TLVs */
  18117. p_ie_tlv = (wl_filter_ie_tlv_t *)&p_filter_iov->tlvs[0];
  18118. buf_space_left = bufsize;
  18119. while ((i < element_count) && (buf != NULL)) {
  18120. len = 0;
  18121. /* token contains one line of input data */
  18122. token = bcmstrtok((char**)&buf, "\n", NULL);
  18123. if (token == NULL) {
  18124. break;
  18125. }
  18126. if ((ele_token = bcmstrstr(token, ",")) == NULL) {
  18127. /* only element id is present */
  18128. if (dhd_check_valid_ie(dhd, token, strlen(token)) == BCME_ERROR) {
  18129. DHD_ERROR(("error: Invalid element id \n"));
  18130. ret = BCME_ERROR;
  18131. goto exit;
  18132. }
  18133. id = bcm_atoi((char*)token);
  18134. data[len++] = WL_FILTER_IE_SET;
  18135. } else {
  18136. /* oui is present */
  18137. ele_token = bcmstrtok(&token, ",", NULL);
  18138. if ((ele_token == NULL) || (dhd_check_valid_ie(dhd, ele_token,
  18139. strlen(ele_token)) == BCME_ERROR)) {
  18140. DHD_ERROR(("error: Invalid element id \n"));
  18141. ret = BCME_ERROR;
  18142. goto exit;
  18143. }
  18144. id = bcm_atoi((char*)ele_token);
  18145. data[len++] = WL_FILTER_IE_SET;
  18146. if ((oui_token = bcmstrstr(token, ",")) == NULL) {
  18147. oui_size = dhd_parse_oui(dhd, token, &(data[len]), strlen(token));
  18148. if (oui_size == BCME_ERROR) {
  18149. DHD_ERROR(("error: Invalid OUI \n"));
  18150. ret = BCME_ERROR;
  18151. goto exit;
  18152. }
  18153. len += oui_size;
  18154. } else {
  18155. /* type is present */
  18156. oui_token = bcmstrtok(&token, ",", NULL);
  18157. if ((oui_token == NULL) || ((oui_size =
  18158. dhd_parse_oui(dhd, oui_token,
  18159. &(data[len]), strlen(oui_token))) == BCME_ERROR)) {
  18160. DHD_ERROR(("error: Invalid OUI \n"));
  18161. ret = BCME_ERROR;
  18162. goto exit;
  18163. }
  18164. len += oui_size;
  18165. if ((type = bcmstrstr(token, ",")) == NULL) {
  18166. if (dhd_check_valid_ie(dhd, token,
  18167. strlen(token)) == BCME_ERROR) {
  18168. DHD_ERROR(("error: Invalid type \n"));
  18169. ret = BCME_ERROR;
  18170. goto exit;
  18171. }
  18172. data[len++] = bcm_atoi((char*)token);
  18173. } else {
  18174. /* subtype is present */
  18175. type = bcmstrtok(&token, ",", NULL);
  18176. if ((type == NULL) || (dhd_check_valid_ie(dhd, type,
  18177. strlen(type)) == BCME_ERROR)) {
  18178. DHD_ERROR(("error: Invalid type \n"));
  18179. ret = BCME_ERROR;
  18180. goto exit;
  18181. }
  18182. data[len++] = bcm_atoi((char*)type);
  18183. /* subtype is last element */
  18184. if ((token == NULL) || (*token == '\0') ||
  18185. (dhd_check_valid_ie(dhd, token,
  18186. strlen(token)) == BCME_ERROR)) {
  18187. DHD_ERROR(("error: Invalid subtype \n"));
  18188. ret = BCME_ERROR;
  18189. goto exit;
  18190. }
  18191. data[len++] = bcm_atoi((char*)token);
  18192. }
  18193. }
  18194. }
  18195. ret = bcm_pack_xtlv_entry((uint8 **)&p_ie_tlv,
  18196. &buf_space_left, id, len, data, BCM_XTLV_OPTION_ALIGN32);
  18197. if (ret != BCME_OK) {
  18198. DHD_ERROR(("%s : bcm_pack_xtlv_entry() failed ,"
  18199. "status=%d\n", __FUNCTION__, ret));
  18200. goto exit;
  18201. }
  18202. i++;
  18203. }
  18204. if (i == 0) {
  18205. /* file is empty or first line is blank */
  18206. DHD_ERROR(("error: filter_ie file is empty or first line is blank \n"));
  18207. ret = BCME_ERROR;
  18208. goto exit;
  18209. }
  18210. /* update the iov header, set len to include all TLVs + header */
  18211. all_tlvsize = (bufsize - buf_space_left);
  18212. p_filter_iov->len = htol16(all_tlvsize + WL_FILTER_IE_IOV_HDR_SIZE);
  18213. ret = dhd_iovar(dhd, 0, "filter_ie", (void *)p_filter_iov,
  18214. p_filter_iov->len, NULL, 0, TRUE);
  18215. if (ret != BCME_OK) {
  18216. DHD_ERROR(("error: IOVAR failed, status=%d\n", ret));
  18217. }
  18218. exit:
  18219. /* clean up */
  18220. if (p_filter_iov) {
  18221. MFREE(dhd->osh, p_filter_iov, filter_iovsize);
  18222. p_filter_iov = NULL;
  18223. }
  18224. return ret;
  18225. }
  18226. #endif /* FILTER_IE */
  18227. #ifdef DHD_WAKE_STATUS
  18228. wake_counts_t*
  18229. dhd_get_wakecount(dhd_pub_t *dhdp)
  18230. {
  18231. return dhd_bus_get_wakecount(dhdp);
  18232. }
  18233. #endif /* DHD_WAKE_STATUS */
  18234. int
  18235. dhd_get_random_bytes(uint8 *buf, uint len)
  18236. {
  18237. #ifdef BCMPCIE
  18238. #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 19, 0))
  18239. int rndlen = get_random_bytes_arch(buf, len);
  18240. if (rndlen != len) {
  18241. bzero(buf, len);
  18242. get_random_bytes(buf, len);
  18243. }
  18244. #else
  18245. get_random_bytes_arch(buf, len);
  18246. #endif // endif
  18247. #endif /* BCMPCIE */
  18248. return BCME_OK;
  18249. }
  18250. #if defined(DHD_HANG_SEND_UP_TEST)
  18251. void
  18252. dhd_make_hang_with_reason(struct net_device *dev, const char *string_num)
  18253. {
  18254. dhd_info_t *dhd = NULL;
  18255. dhd_pub_t *dhdp = NULL;
  18256. uint reason = HANG_REASON_MAX;
  18257. uint32 fw_test_code = 0;
  18258. dhd = DHD_DEV_INFO(dev);
  18259. if (dhd) {
  18260. dhdp = &dhd->pub;
  18261. }
  18262. if (!dhd || !dhdp) {
  18263. return;
  18264. }
  18265. reason = (uint) bcm_strtoul(string_num, NULL, 0);
  18266. DHD_ERROR(("Enter %s, reason=0x%x\n", __FUNCTION__, reason));
  18267. if (reason == 0) {
  18268. if (dhdp->req_hang_type) {
  18269. DHD_ERROR(("%s, Clear HANG test request 0x%x\n",
  18270. __FUNCTION__, dhdp->req_hang_type));
  18271. dhdp->req_hang_type = 0;
  18272. return;
  18273. } else {
  18274. DHD_ERROR(("%s, No requested HANG test\n", __FUNCTION__));
  18275. return;
  18276. }
  18277. } else if ((reason <= HANG_REASON_MASK) || (reason >= HANG_REASON_MAX)) {
  18278. DHD_ERROR(("Invalid HANG request, reason 0x%x\n", reason));
  18279. return;
  18280. }
  18281. if (dhdp->req_hang_type != 0) {
  18282. DHD_ERROR(("Already HANG requested for test\n"));
  18283. return;
  18284. }
  18285. switch (reason) {
  18286. case HANG_REASON_IOCTL_RESP_TIMEOUT:
  18287. DHD_ERROR(("Make HANG!!!: IOCTL response timeout(0x%x)\n", reason));
  18288. dhdp->req_hang_type = reason;
  18289. fw_test_code = 102; /* resumed on timeour */
  18290. (void) dhd_wl_ioctl_set_intiovar(dhdp, "bus:disconnect", fw_test_code,
  18291. WLC_SET_VAR, TRUE, 0);
  18292. break;
  18293. case HANG_REASON_DONGLE_TRAP:
  18294. DHD_ERROR(("Make HANG!!!: Dongle trap (0x%x)\n", reason));
  18295. dhdp->req_hang_type = reason;
  18296. fw_test_code = 99; /* dongle trap */
  18297. (void) dhd_wl_ioctl_set_intiovar(dhdp, "bus:disconnect", fw_test_code,
  18298. WLC_SET_VAR, TRUE, 0);
  18299. break;
  18300. case HANG_REASON_D3_ACK_TIMEOUT:
  18301. DHD_ERROR(("Make HANG!!!: D3 ACK timeout (0x%x)\n", reason));
  18302. dhdp->req_hang_type = reason;
  18303. break;
  18304. case HANG_REASON_BUS_DOWN:
  18305. DHD_ERROR(("Make HANG!!!: BUS down(0x%x)\n", reason));
  18306. dhdp->req_hang_type = reason;
  18307. break;
  18308. case HANG_REASON_PCIE_LINK_DOWN_RC_DETECT:
  18309. case HANG_REASON_PCIE_LINK_DOWN_EP_DETECT:
  18310. case HANG_REASON_MSGBUF_LIVELOCK:
  18311. dhdp->req_hang_type = 0;
  18312. DHD_ERROR(("Does not support requested HANG(0x%x)\n", reason));
  18313. break;
  18314. case HANG_REASON_IFACE_DEL_FAILURE:
  18315. dhdp->req_hang_type = 0;
  18316. DHD_ERROR(("Does not support requested HANG(0x%x)\n", reason));
  18317. break;
  18318. case HANG_REASON_HT_AVAIL_ERROR:
  18319. dhdp->req_hang_type = 0;
  18320. DHD_ERROR(("PCIe does not support requested HANG(0x%x)\n", reason));
  18321. break;
  18322. case HANG_REASON_PCIE_RC_LINK_UP_FAIL:
  18323. DHD_ERROR(("Make HANG!!!:Link Up(0x%x)\n", reason));
  18324. dhdp->req_hang_type = reason;
  18325. break;
  18326. default:
  18327. dhdp->req_hang_type = 0;
  18328. DHD_ERROR(("Unknown HANG request (0x%x)\n", reason));
  18329. break;
  18330. }
  18331. }
  18332. #endif /* DHD_HANG_SEND_UP_TEST */
  18333. #ifdef DHD_ERPOM
  18334. static void
  18335. dhd_error_recovery(void *handle, void *event_info, u8 event)
  18336. {
  18337. dhd_info_t *dhd = handle;
  18338. dhd_pub_t *dhdp;
  18339. int ret = 0;
  18340. if (!dhd) {
  18341. DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__));
  18342. return;
  18343. }
  18344. dhdp = &dhd->pub;
  18345. if (!(dhd->dhd_state & DHD_ATTACH_STATE_DONE)) {
  18346. DHD_ERROR(("%s: init not completed, cannot initiate recovery\n",
  18347. __FUNCTION__));
  18348. return;
  18349. }
  18350. ret = dhd_bus_perform_flr_with_quiesce(dhdp, dhdp->bus, FALSE);
  18351. if (ret != BCME_DNGL_DEVRESET) {
  18352. DHD_ERROR(("%s: dhd_bus_perform_flr_with_quiesce failed with ret: %d,"
  18353. "toggle REG_ON\n", __FUNCTION__, ret));
  18354. /* toggle REG_ON */
  18355. dhdp->pom_toggle_reg_on(WLAN_FUNC_ID, BY_WLAN_DUE_TO_WLAN);
  18356. return;
  18357. }
  18358. }
  18359. void
  18360. dhd_schedule_reset(dhd_pub_t *dhdp)
  18361. {
  18362. if (dhdp->enable_erpom) {
  18363. dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq, NULL,
  18364. DHD_WQ_WORK_ERROR_RECOVERY, dhd_error_recovery, DHD_WQ_WORK_PRIORITY_HIGH);
  18365. }
  18366. }
  18367. #endif /* DHD_ERPOM */
  18368. #ifdef DHD_PKT_LOGGING
  18369. void
  18370. dhd_pktlog_dump(void *handle, void *event_info, u8 event)
  18371. {
  18372. dhd_info_t *dhd = handle;
  18373. if (!dhd) {
  18374. DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__));
  18375. return;
  18376. }
  18377. if (dhd_pktlog_dump_write_file(&dhd->pub)) {
  18378. DHD_ERROR(("%s: writing pktlog dump file failed\n", __FUNCTION__));
  18379. return;
  18380. }
  18381. }
  18382. void
  18383. dhd_schedule_pktlog_dump(dhd_pub_t *dhdp)
  18384. {
  18385. dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq,
  18386. (void*)NULL, DHD_WQ_WORK_PKTLOG_DUMP,
  18387. dhd_pktlog_dump, DHD_WQ_WORK_PRIORITY_HIGH);
  18388. }
  18389. #endif /* DHD_PKT_LOGGING */
  18390. #ifdef BIGDATA_SOFTAP
  18391. void dhd_schedule_gather_ap_stadata(void *bcm_cfg, void *ndev, const wl_event_msg_t *e)
  18392. {
  18393. struct bcm_cfg80211 *cfg;
  18394. dhd_pub_t *dhdp;
  18395. ap_sta_wq_data_t *p_wq_data;
  18396. if (!bcm_cfg || !ndev || !e) {
  18397. WL_ERR(("bcm_cfg=%p ndev=%p e=%p\n", bcm_cfg, ndev, e));
  18398. return;
  18399. }
  18400. cfg = (struct bcm_cfg80211 *)bcm_cfg;
  18401. dhdp = (dhd_pub_t *)cfg->pub;
  18402. if (!dhdp || !cfg->ap_sta_info) {
  18403. WL_ERR(("dhdp=%p ap_sta_info=%p\n", dhdp, cfg->ap_sta_info));
  18404. return;
  18405. }
  18406. p_wq_data = (ap_sta_wq_data_t *)MALLOCZ(dhdp->osh, sizeof(ap_sta_wq_data_t));
  18407. if (unlikely(!p_wq_data)) {
  18408. DHD_ERROR(("%s(): could not allocate memory for - "
  18409. "ap_sta_wq_data_t\n", __FUNCTION__));
  18410. return;
  18411. }
  18412. mutex_lock(&cfg->ap_sta_info->wq_data_sync);
  18413. memcpy(&p_wq_data->e, e, sizeof(wl_event_msg_t));
  18414. p_wq_data->dhdp = dhdp;
  18415. p_wq_data->bcm_cfg = cfg;
  18416. p_wq_data->ndev = (struct net_device *)ndev;
  18417. mutex_unlock(&cfg->ap_sta_info->wq_data_sync);
  18418. dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq,
  18419. p_wq_data, DHD_WQ_WORK_GET_BIGDATA_AP,
  18420. wl_gather_ap_stadata, DHD_WQ_WORK_PRIORITY_HIGH);
  18421. }
  18422. #endif /* BIGDATA_SOFTAP */
  18423. void
  18424. get_debug_dump_time(char *str)
  18425. {
  18426. struct timeval curtime;
  18427. unsigned long local_time;
  18428. struct rtc_time tm;
  18429. if (!strlen(str)) {
  18430. do_gettimeofday(&curtime);
  18431. local_time = (u32)(curtime.tv_sec -
  18432. (sys_tz.tz_minuteswest * DHD_LOG_DUMP_TS_MULTIPLIER_VALUE));
  18433. rtc_time_to_tm(local_time, &tm);
  18434. snprintf(str, DEBUG_DUMP_TIME_BUF_LEN, DHD_LOG_DUMP_TS_FMT_YYMMDDHHMMSSMSMS,
  18435. tm.tm_year - 100, tm.tm_mon + 1, tm.tm_mday, tm.tm_hour, tm.tm_min,
  18436. tm.tm_sec, (int)(curtime.tv_usec/NSEC_PER_USEC));
  18437. }
  18438. }
  18439. void
  18440. clear_debug_dump_time(char *str)
  18441. {
  18442. memset(str, 0, DEBUG_DUMP_TIME_BUF_LEN);
  18443. }
  18444. #if defined(WL_CFGVENDOR_SEND_HANG_EVENT) || defined(DHD_PKT_LOGGING)
  18445. void
  18446. copy_debug_dump_time(char *dest, char *src)
  18447. {
  18448. memcpy(dest, src, DEBUG_DUMP_TIME_BUF_LEN);
  18449. }
  18450. #endif /* WL_CFGVENDOR_SEND_HANG_EVENT || DHD_PKT_LOGGING */
  18451. void
  18452. dhd_print_tasklet_status(dhd_pub_t *dhd)
  18453. {
  18454. dhd_info_t *dhdinfo;
  18455. if (!dhd) {
  18456. DHD_ERROR(("%s : DHD is null\n", __FUNCTION__));
  18457. return;
  18458. }
  18459. dhdinfo = dhd->info;
  18460. if (!dhdinfo) {
  18461. DHD_ERROR(("%s : DHD INFO is null \n", __FUNCTION__));
  18462. return;
  18463. }
  18464. DHD_ERROR(("DHD Tasklet status : 0x%lx\n", dhdinfo->tasklet.state));
  18465. }
  18466. /*
  18467. * DHD RING
  18468. */
  18469. #define DHD_RING_ERR_INTERNAL(fmt, ...) DHD_ERROR(("EWPF-" fmt, ##__VA_ARGS__))
  18470. #define DHD_RING_TRACE_INTERNAL(fmt, ...) DHD_INFO(("EWPF-" fmt, ##__VA_ARGS__))
  18471. #define DHD_RING_ERR(x) DHD_RING_ERR_INTERNAL x
  18472. #define DHD_RING_TRACE(x) DHD_RING_TRACE_INTERNAL x
  18473. #define DHD_RING_MAGIC 0x20170910
  18474. #define DHD_RING_IDX_INVALID 0xffffffff
  18475. #define DHD_RING_SYNC_LOCK_INIT(osh) dhd_os_spin_lock_init(osh)
  18476. #define DHD_RING_SYNC_LOCK_DEINIT(osh, lock) dhd_os_spin_lock_deinit(osh, lock)
  18477. #define DHD_RING_SYNC_LOCK(lock, flags) (flags) = dhd_os_spin_lock(lock)
  18478. #define DHD_RING_SYNC_UNLOCK(lock, flags) dhd_os_spin_unlock(lock, flags)
  18479. typedef struct {
  18480. uint32 elem_size;
  18481. uint32 elem_cnt;
  18482. uint32 write_idx; /* next write index, -1 : not started */
  18483. uint32 read_idx; /* next read index, -1 : not start */
  18484. /* protected elements during serialization */
  18485. int lock_idx; /* start index of locked, element will not be overried */
  18486. int lock_count; /* number of locked, from lock idx */
  18487. /* saved data elements */
  18488. void *elem;
  18489. } dhd_fixed_ring_info_t;
  18490. typedef struct {
  18491. uint32 elem_size;
  18492. uint32 elem_cnt;
  18493. uint32 idx; /* -1 : not started */
  18494. uint32 rsvd; /* reserved for future use */
  18495. /* protected elements during serialization */
  18496. atomic_t ring_locked;
  18497. /* check the overwriting */
  18498. uint32 ring_overwrited;
  18499. /* saved data elements */
  18500. void *elem;
  18501. } dhd_singleidx_ring_info_t;
  18502. typedef struct {
  18503. uint32 magic;
  18504. uint32 type;
  18505. void *ring_sync; /* spinlock for sync */
  18506. union {
  18507. dhd_fixed_ring_info_t fixed;
  18508. dhd_singleidx_ring_info_t single;
  18509. };
  18510. } dhd_ring_info_t;
  18511. uint32
  18512. dhd_ring_get_hdr_size(void)
  18513. {
  18514. return sizeof(dhd_ring_info_t);
  18515. }
  18516. void *
  18517. dhd_ring_init(dhd_pub_t *dhdp, uint8 *buf, uint32 buf_size, uint32 elem_size,
  18518. uint32 elem_cnt, uint32 type)
  18519. {
  18520. dhd_ring_info_t *ret_ring;
  18521. if (!buf) {
  18522. DHD_RING_ERR(("NO RING BUFFER\n"));
  18523. return NULL;
  18524. }
  18525. if (buf_size < dhd_ring_get_hdr_size() + elem_size * elem_cnt) {
  18526. DHD_RING_ERR(("RING SIZE IS TOO SMALL\n"));
  18527. return NULL;
  18528. }
  18529. if (type != DHD_RING_TYPE_FIXED && type != DHD_RING_TYPE_SINGLE_IDX) {
  18530. DHD_RING_ERR(("UNSUPPORTED RING TYPE\n"));
  18531. return NULL;
  18532. }
  18533. ret_ring = (dhd_ring_info_t *)buf;
  18534. ret_ring->type = type;
  18535. ret_ring->ring_sync = DHD_RING_SYNC_LOCK_INIT(dhdp->osh);
  18536. ret_ring->magic = DHD_RING_MAGIC;
  18537. if (type == DHD_RING_TYPE_FIXED) {
  18538. ret_ring->fixed.read_idx = DHD_RING_IDX_INVALID;
  18539. ret_ring->fixed.write_idx = DHD_RING_IDX_INVALID;
  18540. ret_ring->fixed.lock_idx = DHD_RING_IDX_INVALID;
  18541. ret_ring->fixed.elem = buf + sizeof(dhd_ring_info_t);
  18542. ret_ring->fixed.elem_size = elem_size;
  18543. ret_ring->fixed.elem_cnt = elem_cnt;
  18544. } else {
  18545. ret_ring->single.idx = DHD_RING_IDX_INVALID;
  18546. atomic_set(&ret_ring->single.ring_locked, 0);
  18547. ret_ring->single.ring_overwrited = 0;
  18548. ret_ring->single.rsvd = 0;
  18549. ret_ring->single.elem = buf + sizeof(dhd_ring_info_t);
  18550. ret_ring->single.elem_size = elem_size;
  18551. ret_ring->single.elem_cnt = elem_cnt;
  18552. }
  18553. return ret_ring;
  18554. }
  18555. void
  18556. dhd_ring_deinit(dhd_pub_t *dhdp, void *_ring)
  18557. {
  18558. dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
  18559. if (!ring) {
  18560. return;
  18561. }
  18562. if (ring->magic != DHD_RING_MAGIC) {
  18563. return;
  18564. }
  18565. if (ring->type != DHD_RING_TYPE_FIXED &&
  18566. ring->type != DHD_RING_TYPE_SINGLE_IDX) {
  18567. return;
  18568. }
  18569. DHD_RING_SYNC_LOCK_DEINIT(dhdp->osh, ring->ring_sync);
  18570. ring->ring_sync = NULL;
  18571. if (ring->type == DHD_RING_TYPE_FIXED) {
  18572. dhd_fixed_ring_info_t *fixed = &ring->fixed;
  18573. memset(fixed->elem, 0, fixed->elem_size * fixed->elem_cnt);
  18574. fixed->elem_size = fixed->elem_cnt = 0;
  18575. } else {
  18576. dhd_singleidx_ring_info_t *single = &ring->single;
  18577. memset(single->elem, 0, single->elem_size * single->elem_cnt);
  18578. single->elem_size = single->elem_cnt = 0;
  18579. }
  18580. ring->type = 0;
  18581. ring->magic = 0;
  18582. }
  18583. static inline uint32
  18584. __dhd_ring_ptr2idx(void *ring, void *ptr, char *sig, uint32 type)
  18585. {
  18586. uint32 diff;
  18587. uint32 ret_idx = (uint32)DHD_RING_IDX_INVALID;
  18588. uint32 elem_size, elem_cnt;
  18589. void *elem;
  18590. if (type == DHD_RING_TYPE_FIXED) {
  18591. dhd_fixed_ring_info_t *fixed = (dhd_fixed_ring_info_t *)ring;
  18592. elem_size = fixed->elem_size;
  18593. elem_cnt = fixed->elem_cnt;
  18594. elem = fixed->elem;
  18595. } else if (type == DHD_RING_TYPE_SINGLE_IDX) {
  18596. dhd_singleidx_ring_info_t *single = (dhd_singleidx_ring_info_t *)ring;
  18597. elem_size = single->elem_size;
  18598. elem_cnt = single->elem_cnt;
  18599. elem = single->elem;
  18600. } else {
  18601. DHD_RING_ERR(("UNSUPPORTED RING TYPE %d\n", type));
  18602. return ret_idx;
  18603. }
  18604. if (ptr < elem) {
  18605. DHD_RING_ERR(("INVALID POINTER %s:%p, ring->elem:%p\n", sig, ptr, elem));
  18606. return ret_idx;
  18607. }
  18608. diff = (uint32)((uint8 *)ptr - (uint8 *)elem);
  18609. if (diff % elem_size != 0) {
  18610. DHD_RING_ERR(("INVALID POINTER %s:%p, ring->elem:%p\n", sig, ptr, elem));
  18611. return ret_idx;
  18612. }
  18613. ret_idx = diff / elem_size;
  18614. if (ret_idx >= elem_cnt) {
  18615. DHD_RING_ERR(("INVALID POINTER max:%d cur:%d\n", elem_cnt, ret_idx));
  18616. }
  18617. return ret_idx;
  18618. }
  18619. /* Sub functions for fixed ring */
  18620. /* get counts between two indexes of ring buffer (internal only) */
  18621. static inline int
  18622. __dhd_fixed_ring_get_count(dhd_fixed_ring_info_t *ring, int start, int end)
  18623. {
  18624. if (start == DHD_RING_IDX_INVALID || end == DHD_RING_IDX_INVALID) {
  18625. return 0;
  18626. }
  18627. return (ring->elem_cnt + end - start) % ring->elem_cnt + 1;
  18628. }
  18629. static inline int
  18630. __dhd_fixed_ring_get_cur_size(dhd_fixed_ring_info_t *ring)
  18631. {
  18632. return __dhd_fixed_ring_get_count(ring, ring->read_idx, ring->write_idx);
  18633. }
  18634. static inline void *
  18635. __dhd_fixed_ring_get_first(dhd_fixed_ring_info_t *ring)
  18636. {
  18637. if (ring->read_idx == DHD_RING_IDX_INVALID) {
  18638. return NULL;
  18639. }
  18640. return (uint8 *)ring->elem + (ring->elem_size * ring->read_idx);
  18641. }
  18642. static inline void
  18643. __dhd_fixed_ring_free_first(dhd_fixed_ring_info_t *ring)
  18644. {
  18645. uint32 next_idx;
  18646. if (ring->read_idx == DHD_RING_IDX_INVALID) {
  18647. DHD_RING_ERR(("EMPTY RING\n"));
  18648. return;
  18649. }
  18650. next_idx = (ring->read_idx + 1) % ring->elem_cnt;
  18651. if (ring->read_idx == ring->write_idx) {
  18652. /* Become empty */
  18653. ring->read_idx = ring->write_idx = DHD_RING_IDX_INVALID;
  18654. return;
  18655. }
  18656. ring->read_idx = next_idx;
  18657. return;
  18658. }
  18659. static inline void *
  18660. __dhd_fixed_ring_get_last(dhd_fixed_ring_info_t *ring)
  18661. {
  18662. if (ring->read_idx == DHD_RING_IDX_INVALID) {
  18663. return NULL;
  18664. }
  18665. return (uint8 *)ring->elem + (ring->elem_size * ring->write_idx);
  18666. }
  18667. static inline void *
  18668. __dhd_fixed_ring_get_empty(dhd_fixed_ring_info_t *ring)
  18669. {
  18670. uint32 tmp_idx;
  18671. if (ring->read_idx == DHD_RING_IDX_INVALID) {
  18672. ring->read_idx = ring->write_idx = 0;
  18673. return (uint8 *)ring->elem;
  18674. }
  18675. /* check next index is not locked */
  18676. tmp_idx = (ring->write_idx + 1) % ring->elem_cnt;
  18677. if (ring->lock_idx == tmp_idx) {
  18678. return NULL;
  18679. }
  18680. ring->write_idx = tmp_idx;
  18681. if (ring->write_idx == ring->read_idx) {
  18682. /* record is full, drop oldest one */
  18683. ring->read_idx = (ring->read_idx + 1) % ring->elem_cnt;
  18684. }
  18685. return (uint8 *)ring->elem + (ring->elem_size * ring->write_idx);
  18686. }
  18687. static inline void *
  18688. __dhd_fixed_ring_get_next(dhd_fixed_ring_info_t *ring, void *prev, uint32 type)
  18689. {
  18690. uint32 cur_idx;
  18691. if (ring->read_idx == DHD_RING_IDX_INVALID) {
  18692. DHD_RING_ERR(("EMPTY RING\n"));
  18693. return NULL;
  18694. }
  18695. cur_idx = __dhd_ring_ptr2idx(ring, prev, "NEXT", type);
  18696. if (cur_idx >= ring->elem_cnt) {
  18697. return NULL;
  18698. }
  18699. if (cur_idx == ring->write_idx) {
  18700. /* no more new record */
  18701. return NULL;
  18702. }
  18703. cur_idx = (cur_idx + 1) % ring->elem_cnt;
  18704. return (uint8 *)ring->elem + ring->elem_size * cur_idx;
  18705. }
  18706. static inline void *
  18707. __dhd_fixed_ring_get_prev(dhd_fixed_ring_info_t *ring, void *prev, uint32 type)
  18708. {
  18709. uint32 cur_idx;
  18710. if (ring->read_idx == DHD_RING_IDX_INVALID) {
  18711. DHD_RING_ERR(("EMPTY RING\n"));
  18712. return NULL;
  18713. }
  18714. cur_idx = __dhd_ring_ptr2idx(ring, prev, "PREV", type);
  18715. if (cur_idx >= ring->elem_cnt) {
  18716. return NULL;
  18717. }
  18718. if (cur_idx == ring->read_idx) {
  18719. /* no more new record */
  18720. return NULL;
  18721. }
  18722. cur_idx = (cur_idx + ring->elem_cnt - 1) % ring->elem_cnt;
  18723. return (uint8 *)ring->elem + ring->elem_size * cur_idx;
  18724. }
  18725. static inline void
  18726. __dhd_fixed_ring_lock(dhd_fixed_ring_info_t *ring, void *first_ptr, void *last_ptr, uint32 type)
  18727. {
  18728. uint32 first_idx;
  18729. uint32 last_idx;
  18730. uint32 ring_filled_cnt;
  18731. uint32 tmp_cnt;
  18732. if (ring->read_idx == DHD_RING_IDX_INVALID) {
  18733. DHD_RING_ERR(("EMPTY RING\n"));
  18734. return;
  18735. }
  18736. if (first_ptr) {
  18737. first_idx = __dhd_ring_ptr2idx(ring, first_ptr, "LCK FIRST", type);
  18738. if (first_idx >= ring->elem_cnt) {
  18739. return;
  18740. }
  18741. } else {
  18742. first_idx = ring->read_idx;
  18743. }
  18744. if (last_ptr) {
  18745. last_idx = __dhd_ring_ptr2idx(ring, last_ptr, "LCK LAST", type);
  18746. if (last_idx >= ring->elem_cnt) {
  18747. return;
  18748. }
  18749. } else {
  18750. last_idx = ring->write_idx;
  18751. }
  18752. ring_filled_cnt = __dhd_fixed_ring_get_count(ring, ring->read_idx, ring->write_idx);
  18753. tmp_cnt = __dhd_fixed_ring_get_count(ring, ring->read_idx, first_idx);
  18754. if (tmp_cnt > ring_filled_cnt) {
  18755. DHD_RING_ERR(("LOCK FIRST IS TO EMPTY ELEM: write: %d read: %d cur:%d\n",
  18756. ring->write_idx, ring->read_idx, first_idx));
  18757. return;
  18758. }
  18759. tmp_cnt = __dhd_fixed_ring_get_count(ring, ring->read_idx, last_idx);
  18760. if (tmp_cnt > ring_filled_cnt) {
  18761. DHD_RING_ERR(("LOCK LAST IS TO EMPTY ELEM: write: %d read: %d cur:%d\n",
  18762. ring->write_idx, ring->read_idx, last_idx));
  18763. return;
  18764. }
  18765. ring->lock_idx = first_idx;
  18766. ring->lock_count = __dhd_fixed_ring_get_count(ring, first_idx, last_idx);
  18767. return;
  18768. }
  18769. static inline void
  18770. __dhd_fixed_ring_lock_free(dhd_fixed_ring_info_t *ring)
  18771. {
  18772. if (ring->read_idx == DHD_RING_IDX_INVALID) {
  18773. DHD_RING_ERR(("EMPTY RING\n"));
  18774. return;
  18775. }
  18776. ring->lock_idx = DHD_RING_IDX_INVALID;
  18777. ring->lock_count = 0;
  18778. return;
  18779. }
  18780. static inline void *
  18781. __dhd_fixed_ring_lock_get_first(dhd_fixed_ring_info_t *ring)
  18782. {
  18783. if (ring->read_idx == DHD_RING_IDX_INVALID) {
  18784. DHD_RING_ERR(("EMPTY RING\n"));
  18785. return NULL;
  18786. }
  18787. if (ring->lock_idx == DHD_RING_IDX_INVALID) {
  18788. DHD_RING_ERR(("NO LOCK POINT\n"));
  18789. return NULL;
  18790. }
  18791. return (uint8 *)ring->elem + ring->elem_size * ring->lock_idx;
  18792. }
  18793. static inline void *
  18794. __dhd_fixed_ring_lock_get_last(dhd_fixed_ring_info_t *ring)
  18795. {
  18796. int lock_last_idx;
  18797. if (ring->read_idx == DHD_RING_IDX_INVALID) {
  18798. DHD_RING_ERR(("EMPTY RING\n"));
  18799. return NULL;
  18800. }
  18801. if (ring->lock_idx == DHD_RING_IDX_INVALID) {
  18802. DHD_RING_ERR(("NO LOCK POINT\n"));
  18803. return NULL;
  18804. }
  18805. lock_last_idx = (ring->lock_idx + ring->lock_count - 1) % ring->elem_cnt;
  18806. return (uint8 *)ring->elem + ring->elem_size * lock_last_idx;
  18807. }
  18808. static inline int
  18809. __dhd_fixed_ring_lock_get_count(dhd_fixed_ring_info_t *ring)
  18810. {
  18811. if (ring->read_idx == DHD_RING_IDX_INVALID) {
  18812. DHD_RING_ERR(("EMPTY RING\n"));
  18813. return BCME_ERROR;
  18814. }
  18815. if (ring->lock_idx == DHD_RING_IDX_INVALID) {
  18816. DHD_RING_ERR(("NO LOCK POINT\n"));
  18817. return BCME_ERROR;
  18818. }
  18819. return ring->lock_count;
  18820. }
  18821. static inline void
  18822. __dhd_fixed_ring_lock_free_first(dhd_fixed_ring_info_t *ring)
  18823. {
  18824. if (ring->read_idx == DHD_RING_IDX_INVALID) {
  18825. DHD_RING_ERR(("EMPTY RING\n"));
  18826. return;
  18827. }
  18828. if (ring->lock_idx == DHD_RING_IDX_INVALID) {
  18829. DHD_RING_ERR(("NO LOCK POINT\n"));
  18830. return;
  18831. }
  18832. ring->lock_count--;
  18833. if (ring->lock_count <= 0) {
  18834. ring->lock_idx = DHD_RING_IDX_INVALID;
  18835. } else {
  18836. ring->lock_idx = (ring->lock_idx + 1) % ring->elem_cnt;
  18837. }
  18838. return;
  18839. }
  18840. static inline void
  18841. __dhd_fixed_ring_set_read_idx(dhd_fixed_ring_info_t *ring, uint32 idx)
  18842. {
  18843. ring->read_idx = idx;
  18844. }
  18845. static inline void
  18846. __dhd_fixed_ring_set_write_idx(dhd_fixed_ring_info_t *ring, uint32 idx)
  18847. {
  18848. ring->write_idx = idx;
  18849. }
  18850. static inline uint32
  18851. __dhd_fixed_ring_get_read_idx(dhd_fixed_ring_info_t *ring)
  18852. {
  18853. return ring->read_idx;
  18854. }
  18855. static inline uint32
  18856. __dhd_fixed_ring_get_write_idx(dhd_fixed_ring_info_t *ring)
  18857. {
  18858. return ring->write_idx;
  18859. }
  18860. /* Sub functions for single index ring */
  18861. static inline void *
  18862. __dhd_singleidx_ring_get_first(dhd_singleidx_ring_info_t *ring)
  18863. {
  18864. uint32 tmp_idx = 0;
  18865. if (ring->idx == DHD_RING_IDX_INVALID) {
  18866. return NULL;
  18867. }
  18868. if (ring->ring_overwrited) {
  18869. tmp_idx = (ring->idx + 1) % ring->elem_cnt;
  18870. }
  18871. return (uint8 *)ring->elem + (ring->elem_size * tmp_idx);
  18872. }
  18873. static inline void *
  18874. __dhd_singleidx_ring_get_last(dhd_singleidx_ring_info_t *ring)
  18875. {
  18876. if (ring->idx == DHD_RING_IDX_INVALID) {
  18877. return NULL;
  18878. }
  18879. return (uint8 *)ring->elem + (ring->elem_size * ring->idx);
  18880. }
  18881. static inline void *
  18882. __dhd_singleidx_ring_get_empty(dhd_singleidx_ring_info_t *ring)
  18883. {
  18884. if (ring->idx == DHD_RING_IDX_INVALID) {
  18885. ring->idx = 0;
  18886. return (uint8 *)ring->elem;
  18887. }
  18888. /* check the lock is held */
  18889. if (atomic_read(&ring->ring_locked)) {
  18890. return NULL;
  18891. }
  18892. /* check the index rollover */
  18893. if (!ring->ring_overwrited && ring->idx == (ring->elem_cnt - 1)) {
  18894. ring->ring_overwrited = 1;
  18895. }
  18896. ring->idx = (ring->idx + 1) % ring->elem_cnt;
  18897. return (uint8 *)ring->elem + (ring->elem_size * ring->idx);
  18898. }
  18899. static inline void *
  18900. __dhd_singleidx_ring_get_next(dhd_singleidx_ring_info_t *ring, void *prev, uint32 type)
  18901. {
  18902. uint32 cur_idx;
  18903. if (ring->idx == DHD_RING_IDX_INVALID) {
  18904. DHD_RING_ERR(("EMPTY RING\n"));
  18905. return NULL;
  18906. }
  18907. cur_idx = __dhd_ring_ptr2idx(ring, prev, "NEXT", type);
  18908. if (cur_idx >= ring->elem_cnt) {
  18909. return NULL;
  18910. }
  18911. if (cur_idx == ring->idx) {
  18912. /* no more new record */
  18913. return NULL;
  18914. }
  18915. cur_idx = (cur_idx + 1) % ring->elem_cnt;
  18916. return (uint8 *)ring->elem + ring->elem_size * cur_idx;
  18917. }
  18918. static inline void *
  18919. __dhd_singleidx_ring_get_prev(dhd_singleidx_ring_info_t *ring, void *prev, uint32 type)
  18920. {
  18921. uint32 cur_idx;
  18922. if (ring->idx == DHD_RING_IDX_INVALID) {
  18923. DHD_RING_ERR(("EMPTY RING\n"));
  18924. return NULL;
  18925. }
  18926. cur_idx = __dhd_ring_ptr2idx(ring, prev, "PREV", type);
  18927. if (cur_idx >= ring->elem_cnt) {
  18928. return NULL;
  18929. }
  18930. if (!ring->ring_overwrited && cur_idx == 0) {
  18931. /* no more new record */
  18932. return NULL;
  18933. }
  18934. cur_idx = (cur_idx + ring->elem_cnt - 1) % ring->elem_cnt;
  18935. if (ring->ring_overwrited && cur_idx == ring->idx) {
  18936. /* no more new record */
  18937. return NULL;
  18938. }
  18939. return (uint8 *)ring->elem + ring->elem_size * cur_idx;
  18940. }
  18941. static inline void
  18942. __dhd_singleidx_ring_whole_lock(dhd_singleidx_ring_info_t *ring)
  18943. {
  18944. if (!atomic_read(&ring->ring_locked)) {
  18945. atomic_set(&ring->ring_locked, 1);
  18946. }
  18947. }
  18948. static inline void
  18949. __dhd_singleidx_ring_whole_unlock(dhd_singleidx_ring_info_t *ring)
  18950. {
  18951. if (atomic_read(&ring->ring_locked)) {
  18952. atomic_set(&ring->ring_locked, 0);
  18953. }
  18954. }
  18955. /* Get first element : oldest element */
  18956. void *
  18957. dhd_ring_get_first(void *_ring)
  18958. {
  18959. dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
  18960. void *ret = NULL;
  18961. unsigned long flags;
  18962. if (!ring || ring->magic != DHD_RING_MAGIC) {
  18963. DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
  18964. return NULL;
  18965. }
  18966. DHD_RING_SYNC_LOCK(ring->ring_sync, flags);
  18967. if (ring->type == DHD_RING_TYPE_FIXED) {
  18968. ret = __dhd_fixed_ring_get_first(&ring->fixed);
  18969. }
  18970. if (ring->type == DHD_RING_TYPE_SINGLE_IDX) {
  18971. ret = __dhd_singleidx_ring_get_first(&ring->single);
  18972. }
  18973. DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags);
  18974. return ret;
  18975. }
  18976. /* Free first element : oldest element */
  18977. void
  18978. dhd_ring_free_first(void *_ring)
  18979. {
  18980. dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
  18981. unsigned long flags;
  18982. if (!ring || ring->magic != DHD_RING_MAGIC) {
  18983. DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
  18984. return;
  18985. }
  18986. DHD_RING_SYNC_LOCK(ring->ring_sync, flags);
  18987. if (ring->type == DHD_RING_TYPE_FIXED) {
  18988. __dhd_fixed_ring_free_first(&ring->fixed);
  18989. }
  18990. DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags);
  18991. }
  18992. void
  18993. dhd_ring_set_read_idx(void *_ring, uint32 read_idx)
  18994. {
  18995. dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
  18996. unsigned long flags;
  18997. if (!ring || ring->magic != DHD_RING_MAGIC) {
  18998. DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
  18999. return;
  19000. }
  19001. DHD_RING_SYNC_LOCK(ring->ring_sync, flags);
  19002. if (ring->type == DHD_RING_TYPE_FIXED) {
  19003. __dhd_fixed_ring_set_read_idx(&ring->fixed, read_idx);
  19004. }
  19005. DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags);
  19006. }
  19007. void
  19008. dhd_ring_set_write_idx(void *_ring, uint32 write_idx)
  19009. {
  19010. dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
  19011. unsigned long flags;
  19012. if (!ring || ring->magic != DHD_RING_MAGIC) {
  19013. DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
  19014. return;
  19015. }
  19016. DHD_RING_SYNC_LOCK(ring->ring_sync, flags);
  19017. if (ring->type == DHD_RING_TYPE_FIXED) {
  19018. __dhd_fixed_ring_set_write_idx(&ring->fixed, write_idx);
  19019. }
  19020. DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags);
  19021. }
  19022. uint32
  19023. dhd_ring_get_read_idx(void *_ring)
  19024. {
  19025. dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
  19026. uint32 read_idx = DHD_RING_IDX_INVALID;
  19027. unsigned long flags;
  19028. if (!ring || ring->magic != DHD_RING_MAGIC) {
  19029. DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
  19030. return read_idx;
  19031. }
  19032. DHD_RING_SYNC_LOCK(ring->ring_sync, flags);
  19033. if (ring->type == DHD_RING_TYPE_FIXED) {
  19034. read_idx = __dhd_fixed_ring_get_read_idx(&ring->fixed);
  19035. }
  19036. DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags);
  19037. return read_idx;
  19038. }
  19039. uint32
  19040. dhd_ring_get_write_idx(void *_ring)
  19041. {
  19042. dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
  19043. uint32 write_idx = DHD_RING_IDX_INVALID;
  19044. unsigned long flags;
  19045. if (!ring || ring->magic != DHD_RING_MAGIC) {
  19046. DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
  19047. return write_idx;
  19048. }
  19049. DHD_RING_SYNC_LOCK(ring->ring_sync, flags);
  19050. if (ring->type == DHD_RING_TYPE_FIXED) {
  19051. write_idx = __dhd_fixed_ring_get_write_idx(&ring->fixed);
  19052. }
  19053. DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags);
  19054. return write_idx;
  19055. }
  19056. /* Get latest element */
  19057. void *
  19058. dhd_ring_get_last(void *_ring)
  19059. {
  19060. dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
  19061. void *ret = NULL;
  19062. unsigned long flags;
  19063. if (!ring || ring->magic != DHD_RING_MAGIC) {
  19064. DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
  19065. return NULL;
  19066. }
  19067. DHD_RING_SYNC_LOCK(ring->ring_sync, flags);
  19068. if (ring->type == DHD_RING_TYPE_FIXED) {
  19069. ret = __dhd_fixed_ring_get_last(&ring->fixed);
  19070. }
  19071. if (ring->type == DHD_RING_TYPE_SINGLE_IDX) {
  19072. ret = __dhd_singleidx_ring_get_last(&ring->single);
  19073. }
  19074. DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags);
  19075. return ret;
  19076. }
  19077. /* Get next point can be written
  19078. * will overwrite which doesn't read
  19079. * will return NULL if next pointer is locked
  19080. */
  19081. void *
  19082. dhd_ring_get_empty(void *_ring)
  19083. {
  19084. dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
  19085. void *ret = NULL;
  19086. unsigned long flags;
  19087. if (!ring || ring->magic != DHD_RING_MAGIC) {
  19088. DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
  19089. return NULL;
  19090. }
  19091. DHD_RING_SYNC_LOCK(ring->ring_sync, flags);
  19092. if (ring->type == DHD_RING_TYPE_FIXED) {
  19093. ret = __dhd_fixed_ring_get_empty(&ring->fixed);
  19094. }
  19095. if (ring->type == DHD_RING_TYPE_SINGLE_IDX) {
  19096. ret = __dhd_singleidx_ring_get_empty(&ring->single);
  19097. }
  19098. DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags);
  19099. return ret;
  19100. }
  19101. void *
  19102. dhd_ring_get_next(void *_ring, void *cur)
  19103. {
  19104. dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
  19105. void *ret = NULL;
  19106. unsigned long flags;
  19107. if (!ring || ring->magic != DHD_RING_MAGIC) {
  19108. DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
  19109. return NULL;
  19110. }
  19111. DHD_RING_SYNC_LOCK(ring->ring_sync, flags);
  19112. if (ring->type == DHD_RING_TYPE_FIXED) {
  19113. ret = __dhd_fixed_ring_get_next(&ring->fixed, cur, ring->type);
  19114. }
  19115. if (ring->type == DHD_RING_TYPE_SINGLE_IDX) {
  19116. ret = __dhd_singleidx_ring_get_next(&ring->single, cur, ring->type);
  19117. }
  19118. DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags);
  19119. return ret;
  19120. }
  19121. void *
  19122. dhd_ring_get_prev(void *_ring, void *cur)
  19123. {
  19124. dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
  19125. void *ret = NULL;
  19126. unsigned long flags;
  19127. if (!ring || ring->magic != DHD_RING_MAGIC) {
  19128. DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
  19129. return NULL;
  19130. }
  19131. DHD_RING_SYNC_LOCK(ring->ring_sync, flags);
  19132. if (ring->type == DHD_RING_TYPE_FIXED) {
  19133. ret = __dhd_fixed_ring_get_prev(&ring->fixed, cur, ring->type);
  19134. }
  19135. if (ring->type == DHD_RING_TYPE_SINGLE_IDX) {
  19136. ret = __dhd_singleidx_ring_get_prev(&ring->single, cur, ring->type);
  19137. }
  19138. DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags);
  19139. return ret;
  19140. }
  19141. int
  19142. dhd_ring_get_cur_size(void *_ring)
  19143. {
  19144. dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
  19145. int cnt = 0;
  19146. unsigned long flags;
  19147. if (!ring || ring->magic != DHD_RING_MAGIC) {
  19148. DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
  19149. return cnt;
  19150. }
  19151. DHD_RING_SYNC_LOCK(ring->ring_sync, flags);
  19152. if (ring->type == DHD_RING_TYPE_FIXED) {
  19153. cnt = __dhd_fixed_ring_get_cur_size(&ring->fixed);
  19154. }
  19155. DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags);
  19156. return cnt;
  19157. }
  19158. /* protect element between lock_ptr and write_idx */
  19159. void
  19160. dhd_ring_lock(void *_ring, void *first_ptr, void *last_ptr)
  19161. {
  19162. dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
  19163. unsigned long flags;
  19164. if (!ring || ring->magic != DHD_RING_MAGIC) {
  19165. DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
  19166. return;
  19167. }
  19168. DHD_RING_SYNC_LOCK(ring->ring_sync, flags);
  19169. if (ring->type == DHD_RING_TYPE_FIXED) {
  19170. __dhd_fixed_ring_lock(&ring->fixed, first_ptr, last_ptr, ring->type);
  19171. }
  19172. DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags);
  19173. }
  19174. /* free all lock */
  19175. void
  19176. dhd_ring_lock_free(void *_ring)
  19177. {
  19178. dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
  19179. unsigned long flags;
  19180. if (!ring || ring->magic != DHD_RING_MAGIC) {
  19181. DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
  19182. return;
  19183. }
  19184. DHD_RING_SYNC_LOCK(ring->ring_sync, flags);
  19185. if (ring->type == DHD_RING_TYPE_FIXED) {
  19186. __dhd_fixed_ring_lock_free(&ring->fixed);
  19187. }
  19188. DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags);
  19189. }
  19190. void *
  19191. dhd_ring_lock_get_first(void *_ring)
  19192. {
  19193. dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
  19194. void *ret = NULL;
  19195. unsigned long flags;
  19196. if (!ring || ring->magic != DHD_RING_MAGIC) {
  19197. DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
  19198. return NULL;
  19199. }
  19200. DHD_RING_SYNC_LOCK(ring->ring_sync, flags);
  19201. if (ring->type == DHD_RING_TYPE_FIXED) {
  19202. ret = __dhd_fixed_ring_lock_get_first(&ring->fixed);
  19203. }
  19204. DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags);
  19205. return ret;
  19206. }
  19207. void *
  19208. dhd_ring_lock_get_last(void *_ring)
  19209. {
  19210. dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
  19211. void *ret = NULL;
  19212. unsigned long flags;
  19213. if (!ring || ring->magic != DHD_RING_MAGIC) {
  19214. DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
  19215. return NULL;
  19216. }
  19217. DHD_RING_SYNC_LOCK(ring->ring_sync, flags);
  19218. if (ring->type == DHD_RING_TYPE_FIXED) {
  19219. ret = __dhd_fixed_ring_lock_get_last(&ring->fixed);
  19220. }
  19221. DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags);
  19222. return ret;
  19223. }
  19224. int
  19225. dhd_ring_lock_get_count(void *_ring)
  19226. {
  19227. dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
  19228. int ret = BCME_ERROR;
  19229. unsigned long flags;
  19230. if (!ring || ring->magic != DHD_RING_MAGIC) {
  19231. DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
  19232. return ret;
  19233. }
  19234. DHD_RING_SYNC_LOCK(ring->ring_sync, flags);
  19235. if (ring->type == DHD_RING_TYPE_FIXED) {
  19236. ret = __dhd_fixed_ring_lock_get_count(&ring->fixed);
  19237. }
  19238. DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags);
  19239. return ret;
  19240. }
  19241. /* free first locked element */
  19242. void
  19243. dhd_ring_lock_free_first(void *_ring)
  19244. {
  19245. dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
  19246. unsigned long flags;
  19247. if (!ring || ring->magic != DHD_RING_MAGIC) {
  19248. DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
  19249. return;
  19250. }
  19251. DHD_RING_SYNC_LOCK(ring->ring_sync, flags);
  19252. if (ring->type == DHD_RING_TYPE_FIXED) {
  19253. __dhd_fixed_ring_lock_free_first(&ring->fixed);
  19254. }
  19255. DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags);
  19256. }
  19257. void
  19258. dhd_ring_whole_lock(void *_ring)
  19259. {
  19260. dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
  19261. unsigned long flags;
  19262. if (!ring || ring->magic != DHD_RING_MAGIC) {
  19263. DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
  19264. return;
  19265. }
  19266. DHD_RING_SYNC_LOCK(ring->ring_sync, flags);
  19267. if (ring->type == DHD_RING_TYPE_SINGLE_IDX) {
  19268. __dhd_singleidx_ring_whole_lock(&ring->single);
  19269. }
  19270. DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags);
  19271. }
  19272. void
  19273. dhd_ring_whole_unlock(void *_ring)
  19274. {
  19275. dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
  19276. unsigned long flags;
  19277. if (!ring || ring->magic != DHD_RING_MAGIC) {
  19278. DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
  19279. return;
  19280. }
  19281. DHD_RING_SYNC_LOCK(ring->ring_sync, flags);
  19282. if (ring->type == DHD_RING_TYPE_SINGLE_IDX) {
  19283. __dhd_singleidx_ring_whole_unlock(&ring->single);
  19284. }
  19285. DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags);
  19286. }
  19287. #if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 19, 0))
  19288. #define DHD_VFS_INODE(dir) (dir->d_inode)
  19289. #else
  19290. #define DHD_VFS_INODE(dir) d_inode(dir)
  19291. #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(3, 19, 0) */
  19292. #if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 13, 0))
  19293. #define DHD_VFS_UNLINK(dir, b, c) vfs_unlink(DHD_VFS_INODE(dir), b)
  19294. #else
  19295. #define DHD_VFS_UNLINK(dir, b, c) vfs_unlink(DHD_VFS_INODE(dir), b, c)
  19296. #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(3, 13, 0) */
  19297. int
  19298. dhd_file_delete(char *path)
  19299. {
  19300. struct path file_path;
  19301. int err;
  19302. struct dentry *dir;
  19303. err = kern_path(path, 0, &file_path);
  19304. if (err < 0) {
  19305. DHD_ERROR(("Failed to get kern-path delete file: %s error: %d\n", path, err));
  19306. return err;
  19307. }
  19308. if (
  19309. #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0))
  19310. !d_is_file(file_path.dentry) ||
  19311. #if (LINUX_VERSION_CODE > KERNEL_VERSION(4, 1, 0))
  19312. d_really_is_negative(file_path.dentry) ||
  19313. #endif /* LINUX_VERSION_CODE > KERNEL_VERSION(4, 1, 0) */
  19314. #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0) */
  19315. FALSE)
  19316. {
  19317. err = -EINVAL;
  19318. } else {
  19319. dir = dget_parent(file_path.dentry);
  19320. if (!IS_ERR(dir)) {
  19321. err = DHD_VFS_UNLINK(dir, file_path.dentry, NULL);
  19322. dput(dir);
  19323. } else {
  19324. err = PTR_ERR(dir);
  19325. }
  19326. }
  19327. path_put(&file_path);
  19328. if (err < 0) {
  19329. DHD_ERROR(("Failed to delete file: %s error: %d\n", path, err));
  19330. }
  19331. return err;
  19332. }
  19333. #ifdef DHD_DUMP_MNGR
  19334. static int
  19335. dhd_dump_file_manage_idx(dhd_dump_file_manage_t *fm_ptr, char *fname)
  19336. {
  19337. int i;
  19338. int fm_idx = -1;
  19339. for (i = 0; i < DHD_DUMP_TYPE_COUNT_MAX; i++) {
  19340. if (strlen(fm_ptr->elems[i].type_name) == 0) {
  19341. fm_idx = i;
  19342. break;
  19343. }
  19344. if (!(strncmp(fname, fm_ptr->elems[i].type_name, strlen(fname)))) {
  19345. fm_idx = i;
  19346. break;
  19347. }
  19348. }
  19349. if (fm_idx == -1) {
  19350. return fm_idx;
  19351. }
  19352. if (strlen(fm_ptr->elems[fm_idx].type_name) == 0) {
  19353. strncpy(fm_ptr->elems[fm_idx].type_name, fname, DHD_DUMP_TYPE_NAME_SIZE);
  19354. fm_ptr->elems[fm_idx].type_name[DHD_DUMP_TYPE_NAME_SIZE - 1] = '\0';
  19355. fm_ptr->elems[fm_idx].file_idx = 0;
  19356. }
  19357. return fm_idx;
  19358. }
  19359. /*
  19360. * dhd_dump_file_manage_enqueue - enqueue dump file path
  19361. * and delete odest file if file count is max.
  19362. */
  19363. void
  19364. dhd_dump_file_manage_enqueue(dhd_pub_t *dhd, char *dump_path, char *fname)
  19365. {
  19366. int fm_idx;
  19367. int fp_idx;
  19368. dhd_dump_file_manage_t *fm_ptr;
  19369. DFM_elem_t *elem;
  19370. if (!dhd || !dhd->dump_file_manage) {
  19371. DHD_ERROR(("%s(): dhdp=%p dump_file_manage=%p\n",
  19372. __FUNCTION__, dhd, (dhd ? dhd->dump_file_manage : NULL)));
  19373. return;
  19374. }
  19375. fm_ptr = dhd->dump_file_manage;
  19376. /* find file_manage idx */
  19377. DHD_INFO(("%s(): fname: %s dump_path: %s\n", __FUNCTION__, fname, dump_path));
  19378. if ((fm_idx = dhd_dump_file_manage_idx(fm_ptr, fname)) < 0) {
  19379. DHD_ERROR(("%s(): Out of file manager entries, fname: %s\n",
  19380. __FUNCTION__, fname));
  19381. return;
  19382. }
  19383. elem = &fm_ptr->elems[fm_idx];
  19384. fp_idx = elem->file_idx;
  19385. DHD_INFO(("%s(): fm_idx: %d fp_idx: %d path: %s\n",
  19386. __FUNCTION__, fm_idx, fp_idx, elem->file_path[fp_idx]));
  19387. /* delete oldest file */
  19388. if (strlen(elem->file_path[fp_idx]) != 0) {
  19389. if (dhd_file_delete(elem->file_path[fp_idx]) < 0) {
  19390. DHD_ERROR(("%s(): Failed to delete file: %s\n",
  19391. __FUNCTION__, elem->file_path[fp_idx]));
  19392. } else {
  19393. DHD_ERROR(("%s(): Successed to delete file: %s\n",
  19394. __FUNCTION__, elem->file_path[fp_idx]));
  19395. }
  19396. }
  19397. /* save dump file path */
  19398. strncpy(elem->file_path[fp_idx], dump_path, DHD_DUMP_FILE_PATH_SIZE);
  19399. elem->file_path[fp_idx][DHD_DUMP_FILE_PATH_SIZE - 1] = '\0';
  19400. /* change file index to next file index */
  19401. elem->file_idx = (elem->file_idx + 1) % DHD_DUMP_FILE_COUNT_MAX;
  19402. }
  19403. #endif /* DHD_DUMP_MNGR */
  19404. #ifdef DHD_MAP_LOGGING
  19405. /* Will be called from SMMU fault handler */
  19406. void
  19407. dhd_smmu_fault_handler(uint32 axid, ulong fault_addr)
  19408. {
  19409. dhd_pub_t *dhdp = (dhd_pub_t *)g_dhd_pub;
  19410. uint32 irq = (uint32)-1;
  19411. DHD_ERROR(("%s: Trigger SMMU Fault\n", __FUNCTION__));
  19412. DHD_ERROR(("%s: axid:0x%x, fault_addr:0x%lx", __FUNCTION__, axid, fault_addr));
  19413. dhdp->smmu_fault_occurred = TRUE;
  19414. #ifdef DNGL_AXI_ERROR_LOGGING
  19415. dhdp->axi_error = TRUE;
  19416. dhdp->axi_err_dump->axid = axid;
  19417. dhdp->axi_err_dump->fault_address = fault_addr;
  19418. #endif /* DNGL_AXI_ERROR_LOGGING */
  19419. /* Disable PCIe IRQ */
  19420. dhdpcie_get_pcieirq(dhdp->bus, &irq);
  19421. if (irq != (uint32)-1) {
  19422. disable_irq_nosync(irq);
  19423. }
  19424. /* Take debug information first */
  19425. DHD_OS_WAKE_LOCK(dhdp);
  19426. dhd_prot_smmu_fault_dump(dhdp);
  19427. DHD_OS_WAKE_UNLOCK(dhdp);
  19428. /* Take AXI information if possible */
  19429. #ifdef DNGL_AXI_ERROR_LOGGING
  19430. #ifdef DHD_USE_WQ_FOR_DNGL_AXI_ERROR
  19431. dhd_axi_error_dispatch(dhdp);
  19432. #else
  19433. dhd_axi_error(dhdp);
  19434. #endif /* DHD_USE_WQ_FOR_DNGL_AXI_ERROR */
  19435. #endif /* DNGL_AXI_ERROR_LOGGING */
  19436. }
  19437. EXPORT_SYMBOL(dhd_smmu_fault_handler);
  19438. #endif /* DHD_MAP_LOGGING */
  19439. #ifdef DHD_WIFI_SHUTDOWN
  19440. void wifi_plat_dev_drv_shutdown(struct platform_device *pdev)
  19441. {
  19442. dhd_pub_t *dhd_pub = NULL;
  19443. dhd_info_t *dhd_info = NULL;
  19444. dhd_if_t *dhd_if = NULL;
  19445. DHD_ERROR(("%s enter\n", __FUNCTION__));
  19446. dhd_pub = g_dhd_pub;
  19447. if (dhd_os_check_if_up(dhd_pub)) {
  19448. dhd_info = (dhd_info_t *)dhd_pub->info;
  19449. dhd_if = dhd_info->iflist[0];
  19450. ASSERT(dhd_if);
  19451. ASSERT(dhd_if->net);
  19452. if (dhd_if && dhd_if->net) {
  19453. dhd_stop(dhd_if->net);
  19454. }
  19455. }
  19456. }
  19457. #endif /* DHD_WIFI_SHUTDOWN */
  19458. #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0))
  19459. int
  19460. compat_kernel_read(struct file *file, loff_t offset, char *addr, unsigned long count)
  19461. {
  19462. return (int)kernel_read(file, addr, (size_t)count, &offset);
  19463. }
  19464. #else
  19465. int
  19466. compat_kernel_read(struct file *file, loff_t offset, char *addr, unsigned long count)
  19467. {
  19468. return kernel_read(file, offset, addr, count);
  19469. }
  19470. #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)) */
  19471. #ifdef DHDTCPSYNC_FLOOD_BLK
  19472. static void dhd_blk_tsfl_handler(struct work_struct * work)
  19473. {
  19474. dhd_if_t *ifp = NULL;
  19475. dhd_pub_t *dhdp = NULL;
  19476. /* Ignore compiler warnings due to -Werror=cast-qual */
  19477. #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
  19478. #pragma GCC diagnostic push
  19479. #pragma GCC diagnostic ignored "-Wcast-qual"
  19480. #endif /* STRICT_GCC_WARNINGS && __GNUC__ */
  19481. ifp = container_of(work, dhd_if_t, blk_tsfl_work);
  19482. #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
  19483. #pragma GCC diagnostic pop
  19484. #endif /* STRICT_GCC_WARNINGS && __GNUC__ */
  19485. if (ifp) {
  19486. dhdp = &ifp->info->pub;
  19487. if (dhdp) {
  19488. if ((dhdp->op_mode & DHD_FLAG_P2P_GO_MODE)||
  19489. (dhdp->op_mode & DHD_FLAG_HOSTAP_MODE)) {
  19490. DHD_ERROR(("Disassoc due to TCP SYNC FLOOD ATTACK\n"));
  19491. wl_cfg80211_del_all_sta(ifp->net, WLAN_REASON_UNSPECIFIED);
  19492. } else if ((dhdp->op_mode & DHD_FLAG_P2P_GC_MODE)||
  19493. (dhdp->op_mode & DHD_FLAG_STA_MODE)) {
  19494. DHD_ERROR(("Diconnect due to TCP SYNC FLOOD ATTACK\n"));
  19495. wl_cfg80211_disassoc(ifp->net, WLAN_REASON_UNSPECIFIED);
  19496. }
  19497. }
  19498. }
  19499. }
  19500. void dhd_reset_tcpsync_info_by_ifp(dhd_if_t *ifp)
  19501. {
  19502. ifp->tsync_rcvd = 0;
  19503. ifp->tsyncack_txed = 0;
  19504. ifp->last_sync = DIV_U64_BY_U32(OSL_LOCALTIME_NS(), NSEC_PER_SEC);
  19505. }
  19506. void dhd_reset_tcpsync_info_by_dev(struct net_device *dev)
  19507. {
  19508. dhd_if_t *ifp = NULL;
  19509. if (dev) {
  19510. ifp = DHD_DEV_IFP(dev);
  19511. }
  19512. if (ifp) {
  19513. ifp->tsync_rcvd = 0;
  19514. ifp->tsyncack_txed = 0;
  19515. ifp->last_sync = DIV_U64_BY_U32(OSL_LOCALTIME_NS(), NSEC_PER_SEC);
  19516. }
  19517. }
  19518. #endif /* DHDTCPSYNC_FLOOD_BLK */
  19519. #ifdef DHD_4WAYM4_FAIL_DISCONNECT
  19520. static void dhd_m4_state_handler(struct work_struct *work)
  19521. {
  19522. dhd_if_t *ifp = NULL;
  19523. /* Ignore compiler warnings due to -Werror=cast-qual */
  19524. #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
  19525. #pragma GCC diagnostic push
  19526. #pragma GCC diagnostic ignored "-Wcast-qual"
  19527. #endif // endif
  19528. struct delayed_work *dw = to_delayed_work(work);
  19529. ifp = container_of(dw, dhd_if_t, m4state_work);
  19530. #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
  19531. #pragma GCC diagnostic pop
  19532. #endif // endif
  19533. if (ifp && ifp->net &&
  19534. (OSL_ATOMIC_READ(ifp->info->pub->osh, &ifp->m4state) == M4_TXFAILED)) {
  19535. DHD_ERROR(("Disassoc for 4WAY_HANDSHAKE_TIMEOUT at %s\n",
  19536. ifp->net->name));
  19537. wl_cfg80211_disassoc(ifp->net, WLAN_REASON_4WAY_HANDSHAKE_TIMEOUT);
  19538. }
  19539. }
  19540. void
  19541. dhd_eap_txcomplete(dhd_pub_t *dhdp, void *txp, bool success, int ifidx)
  19542. {
  19543. dhd_info_t *dhd = (dhd_info_t *)(dhdp->info);
  19544. struct ether_header *eh;
  19545. uint16 type;
  19546. if (!success) {
  19547. dhd_prot_hdrpull(dhdp, NULL, txp, NULL, NULL);
  19548. eh = (struct ether_header *)PKTDATA(dhdp->osh, txp);
  19549. type = ntoh16(eh->ether_type);
  19550. if (type == ETHER_TYPE_802_1X) {
  19551. if (dhd_is_4way_msg((uint8 *)eh) == EAPOL_4WAY_M4) {
  19552. dhd_if_t *ifp = NULL;
  19553. ifp = dhd->iflist[ifidx];
  19554. if (!ifp || !ifp->net) {
  19555. return;
  19556. }
  19557. DHD_INFO(("%s: M4 TX failed on %d.\n",
  19558. __FUNCTION__, ifidx));
  19559. OSL_ATOMIC_SET(dhdp->osh, &ifp->m4state, M4_TXFAILED);
  19560. schedule_delayed_work(&ifp->m4state_work,
  19561. msecs_to_jiffies(MAX_4WAY_TIMEOUT_MS));
  19562. }
  19563. }
  19564. }
  19565. }
  19566. void
  19567. dhd_cleanup_m4_state_work(dhd_pub_t *dhdp, int ifidx)
  19568. {
  19569. dhd_info_t *dhdinfo;
  19570. dhd_if_t *ifp;
  19571. if ((ifidx < 0) || (ifidx >= DHD_MAX_IFS)) {
  19572. DHD_ERROR(("%s: invalid ifidx %d\n", __FUNCTION__, ifidx));
  19573. return;
  19574. }
  19575. dhdinfo = (dhd_info_t *)(dhdp->info);
  19576. if (!dhdinfo) {
  19577. DHD_ERROR(("%s: dhdinfo is NULL\n", __FUNCTION__));
  19578. return;
  19579. }
  19580. ifp = dhdinfo->iflist[ifidx];
  19581. if (ifp) {
  19582. cancel_delayed_work_sync(&ifp->m4state_work);
  19583. }
  19584. }
  19585. #endif /* DHD_4WAYM4_FAIL_DISCONNECT */
  19586. #ifdef DHD_HP2P
  19587. unsigned long
  19588. dhd_os_hp2plock(dhd_pub_t *pub)
  19589. {
  19590. dhd_info_t *dhd;
  19591. unsigned long flags = 0;
  19592. dhd = (dhd_info_t *)(pub->info);
  19593. if (dhd) {
  19594. spin_lock_irqsave(&dhd->hp2p_lock, flags);
  19595. }
  19596. return flags;
  19597. }
  19598. void
  19599. dhd_os_hp2punlock(dhd_pub_t *pub, unsigned long flags)
  19600. {
  19601. dhd_info_t *dhd;
  19602. dhd = (dhd_info_t *)(pub->info);
  19603. if (dhd) {
  19604. spin_unlock_irqrestore(&dhd->hp2p_lock, flags);
  19605. }
  19606. }
  19607. #endif /* DHD_HP2P */
  19608. #ifdef DNGL_AXI_ERROR_LOGGING
  19609. static void
  19610. dhd_axi_error_dump(void *handle, void *event_info, u8 event)
  19611. {
  19612. dhd_info_t *dhd = (dhd_info_t *)handle;
  19613. dhd_pub_t *dhdp = NULL;
  19614. if (!dhd) {
  19615. DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__));
  19616. goto exit;
  19617. }
  19618. dhdp = &dhd->pub;
  19619. if (!dhdp) {
  19620. DHD_ERROR(("%s: dhdp is NULL\n", __FUNCTION__));
  19621. goto exit;
  19622. }
  19623. /**
  19624. * First save axi error information to a file
  19625. * because panic should happen right after this.
  19626. * After dhd reset, dhd reads the file, and do hang event process
  19627. * to send axi error stored on the file to Bigdata server
  19628. */
  19629. if (dhdp->axi_err_dump->etd_axi_error_v1.version != HND_EXT_TRAP_AXIERROR_VERSION_1) {
  19630. DHD_ERROR(("%s: Invalid AXI version: 0x%x\n",
  19631. __FUNCTION__, dhdp->axi_err_dump->etd_axi_error_v1.version));
  19632. }
  19633. DHD_OS_WAKE_LOCK(dhdp);
  19634. #ifdef DHD_FW_COREDUMP
  19635. #ifdef DHD_SSSR_DUMP
  19636. dhdp->collect_sssr = TRUE;
  19637. #endif /* DHD_SSSR_DUMP */
  19638. DHD_ERROR(("%s: scheduling mem dump.. \n", __FUNCTION__));
  19639. dhd_schedule_memdump(dhdp, dhdp->soc_ram, dhdp->soc_ram_length);
  19640. #endif /* DHD_FW_COREDUMP */
  19641. DHD_OS_WAKE_UNLOCK(dhdp);
  19642. exit:
  19643. /* Trigger kernel panic after taking necessary dumps */
  19644. BUG_ON(1);
  19645. }
  19646. void dhd_schedule_axi_error_dump(dhd_pub_t *dhdp, void *type)
  19647. {
  19648. DHD_ERROR(("%s: scheduling axi_error_dump.. \n", __FUNCTION__));
  19649. dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq,
  19650. type, DHD_WQ_WORK_AXI_ERROR_DUMP,
  19651. dhd_axi_error_dump, DHD_WQ_WORK_PRIORITY_HIGH);
  19652. }
  19653. #endif /* DNGL_AXI_ERROR_LOGGING */
  19654. #ifdef BCMPCIE
  19655. static void
  19656. dhd_cto_recovery_handler(void *handle, void *event_info, u8 event)
  19657. {
  19658. dhd_info_t *dhd = handle;
  19659. dhd_pub_t *dhdp = NULL;
  19660. if (!dhd) {
  19661. DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__));
  19662. BUG_ON(1);
  19663. return;
  19664. }
  19665. dhdp = &dhd->pub;
  19666. dhdpcie_cto_recovery_handler(dhdp);
  19667. }
  19668. void
  19669. dhd_schedule_cto_recovery(dhd_pub_t *dhdp)
  19670. {
  19671. DHD_ERROR(("%s: scheduling cto recovery.. \n", __FUNCTION__));
  19672. dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq,
  19673. NULL, DHD_WQ_WORK_CTO_RECOVERY,
  19674. dhd_cto_recovery_handler, DHD_WQ_WORK_PRIORITY_HIGH);
  19675. }
  19676. #endif /* BCMPCIE */
  19677. #ifdef SUPPORT_SET_TID
  19678. /*
  19679. * Set custom TID value for UDP frame based on UID value.
  19680. * This will be triggered by android private command below.
  19681. * DRIVER SET_TID <Mode:uint8> <Target UID:uint32> <Custom TID:uint8>
  19682. * Mode 0(SET_TID_OFF) : Disable changing TID
  19683. * Mode 1(SET_TID_ALL_UDP) : Change TID for all UDP frames
  19684. * Mode 2(SET_TID_BASED_ON_UID) : Change TID for UDP frames based on target UID
  19685. */
  19686. void
  19687. dhd_set_tid_based_on_uid(dhd_pub_t *dhdp, void *pkt)
  19688. {
  19689. struct ether_header *eh = NULL;
  19690. struct sock *sk = NULL;
  19691. uint8 *pktdata = NULL;
  19692. uint8 *ip_hdr = NULL;
  19693. uint8 cur_prio;
  19694. uint8 prio;
  19695. uint32 uid;
  19696. if (dhdp->tid_mode == SET_TID_OFF) {
  19697. return;
  19698. }
  19699. pktdata = (uint8 *)PKTDATA(dhdp->osh, pkt);
  19700. eh = (struct ether_header *) pktdata;
  19701. ip_hdr = (uint8 *)eh + ETHER_HDR_LEN;
  19702. if (IPV4_PROT(ip_hdr) != IP_PROT_UDP) {
  19703. return;
  19704. }
  19705. cur_prio = PKTPRIO(pkt);
  19706. prio = dhdp->target_tid;
  19707. uid = dhdp->target_uid;
  19708. if ((cur_prio == prio) ||
  19709. (cur_prio != PRIO_8021D_BE)) {
  19710. return;
  19711. }
  19712. sk = ((struct sk_buff*)(pkt))->sk;
  19713. if ((dhdp->tid_mode == SET_TID_ALL_UDP) ||
  19714. (sk && (uid == __kuid_val(sock_i_uid(sk))))) {
  19715. PKTSETPRIO(pkt, prio);
  19716. }
  19717. }
  19718. #endif /* SUPPORT_SET_TID */