wl_cfgnan.c 234 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938493949404941494249434944494549464947494849494950495149524953495449554956495749584959496049614962496349644965496649674968496949704971497249734974497549764977497849794980498149824983498449854986498749884989499049914992499349944995499649974998499950005001500250035004500550065007500850095010501150125013501450155016501750185019502050215022502350245025502650275028502950305031503250335034503550365037503850395040504150425043504450455046504750485049505050515052505350545055505650575058505950605061506250635064506550665067506850695070507150725073507450755076507750785079508050815082508350845085508650875088508950905091509250935094509550965097509850995100510151025103510451055106510751085109511051115112511351145115511651175118511951205121512251235124512551265127512851295130513151325133513451355136513751385139514051415142514351445145514651475148514951505151515251535154515551565157515851595160516151625163516451655166516751685169517051715172517351745175517651775178517951805181518251835184518551865187518851895190519151925193519451955196519751985199520052015202520352045205520652075208520952105211521252135214521552165217521852195220522152225223522452255226522752285229523052315232523352345235523652375238523952405241524252435244524552465247524852495250525152525253525452555256525752585259526052615262526352645265526652675268526952705271527252735274527552765277527852795280528152825283528452855286528752885289529052915292529352945295529652975298529953005301530253035304530553065307530853095310531153125313531453155316531753185319532053215322532353245325532653275328532953305331533253335334533553365337533853395340534153425343534453455346534753485349535053515352535353545355535653575358535953605361536253635364536553665367536853695370537153725373537453755376537753785379538053815382538353845385538653875388538953905391539253935394539553965397539853995400540154025403540454055406540754085409541054115412541354145415541654175418541954205421542254235424542554265427542854295430543154325433543454355436543754385439544054415442544354445445544654475448544954505451545254535454545554565457545854595460546154625463546454655466546754685469547054715472547354745475547654775478547954805481548254835484548554865487548854895490549154925493549454955496549754985499550055015502550355045505550655075508550955105511551255135514551555165517551855195520552155225523552455255526552755285529553055315532553355345535553655375538553955405541554255435544554555465547554855495550555155525553555455555556555755585559556055615562556355645565556655675568556955705571557255735574557555765577557855795580558155825583558455855586558755885589559055915592559355945595559655975598559956005601560256035604560556065607560856095610561156125613561456155616561756185619562056215622562356245625562656275628562956305631563256335634563556365637563856395640564156425643564456455646564756485649565056515652565356545655565656575658565956605661566256635664566556665667566856695670567156725673567456755676567756785679568056815682568356845685568656875688568956905691569256935694569556965697569856995700570157025703570457055706570757085709571057115712571357145715571657175718571957205721572257235724572557265727572857295730573157325733573457355736573757385739574057415742574357445745574657475748574957505751575257535754575557565757575857595760576157625763576457655766576757685769577057715772577357745775577657775778577957805781578257835784578557865787578857895790579157925793579457955796579757985799580058015802580358045805580658075808580958105811581258135814581558165817581858195820582158225823582458255826582758285829583058315832583358345835583658375838583958405841584258435844584558465847584858495850585158525853585458555856585758585859586058615862586358645865586658675868586958705871587258735874587558765877587858795880588158825883588458855886588758885889589058915892589358945895589658975898589959005901590259035904590559065907590859095910591159125913591459155916591759185919592059215922592359245925592659275928592959305931593259335934593559365937593859395940594159425943594459455946594759485949595059515952595359545955595659575958595959605961596259635964596559665967596859695970597159725973597459755976597759785979598059815982598359845985598659875988598959905991599259935994599559965997599859996000600160026003600460056006600760086009601060116012601360146015601660176018601960206021602260236024602560266027602860296030603160326033603460356036603760386039604060416042604360446045604660476048604960506051605260536054605560566057605860596060606160626063606460656066606760686069607060716072607360746075607660776078607960806081608260836084608560866087608860896090609160926093609460956096609760986099610061016102610361046105610661076108610961106111611261136114611561166117611861196120612161226123612461256126612761286129613061316132613361346135613661376138613961406141614261436144614561466147614861496150615161526153615461556156615761586159616061616162616361646165616661676168616961706171617261736174617561766177617861796180618161826183618461856186618761886189619061916192619361946195619661976198619962006201620262036204620562066207620862096210621162126213621462156216621762186219622062216222622362246225622662276228622962306231623262336234623562366237623862396240624162426243624462456246624762486249625062516252625362546255625662576258625962606261626262636264626562666267626862696270627162726273627462756276627762786279628062816282628362846285628662876288628962906291629262936294629562966297629862996300630163026303630463056306630763086309631063116312631363146315631663176318631963206321632263236324632563266327632863296330633163326333633463356336633763386339634063416342634363446345634663476348634963506351635263536354635563566357635863596360636163626363636463656366636763686369637063716372637363746375637663776378637963806381638263836384638563866387638863896390639163926393639463956396639763986399640064016402640364046405640664076408640964106411641264136414641564166417641864196420642164226423642464256426642764286429643064316432643364346435643664376438643964406441644264436444644564466447644864496450645164526453645464556456645764586459646064616462646364646465646664676468646964706471647264736474647564766477647864796480648164826483648464856486648764886489649064916492649364946495649664976498649965006501650265036504650565066507650865096510651165126513651465156516651765186519652065216522652365246525652665276528652965306531653265336534653565366537653865396540654165426543654465456546654765486549655065516552655365546555655665576558655965606561656265636564656565666567656865696570657165726573657465756576657765786579658065816582658365846585658665876588658965906591659265936594659565966597659865996600660166026603660466056606660766086609661066116612661366146615661666176618661966206621662266236624662566266627662866296630663166326633663466356636663766386639664066416642664366446645664666476648664966506651665266536654665566566657665866596660666166626663666466656666666766686669667066716672667366746675667666776678667966806681668266836684668566866687668866896690669166926693669466956696669766986699670067016702670367046705670667076708670967106711671267136714671567166717671867196720672167226723672467256726672767286729673067316732673367346735673667376738673967406741674267436744674567466747674867496750675167526753675467556756675767586759676067616762676367646765676667676768676967706771677267736774677567766777677867796780678167826783678467856786678767886789679067916792679367946795679667976798679968006801680268036804680568066807680868096810681168126813681468156816681768186819682068216822682368246825682668276828682968306831683268336834683568366837683868396840684168426843684468456846684768486849685068516852685368546855685668576858685968606861686268636864686568666867686868696870687168726873687468756876687768786879688068816882688368846885688668876888688968906891689268936894689568966897689868996900690169026903690469056906690769086909691069116912691369146915691669176918691969206921692269236924692569266927692869296930693169326933693469356936693769386939694069416942694369446945694669476948694969506951695269536954695569566957695869596960696169626963696469656966696769686969697069716972697369746975697669776978697969806981698269836984698569866987698869896990699169926993699469956996699769986999700070017002700370047005700670077008700970107011701270137014701570167017701870197020702170227023702470257026702770287029703070317032703370347035703670377038703970407041704270437044704570467047704870497050705170527053705470557056705770587059706070617062706370647065706670677068706970707071707270737074707570767077707870797080708170827083708470857086708770887089709070917092709370947095709670977098709971007101710271037104710571067107710871097110711171127113711471157116711771187119712071217122712371247125712671277128712971307131713271337134713571367137713871397140714171427143714471457146714771487149715071517152715371547155715671577158715971607161716271637164716571667167716871697170717171727173717471757176717771787179718071817182718371847185718671877188718971907191719271937194719571967197719871997200720172027203720472057206720772087209721072117212721372147215721672177218721972207221722272237224722572267227722872297230723172327233723472357236723772387239724072417242724372447245724672477248724972507251725272537254725572567257725872597260726172627263726472657266726772687269727072717272727372747275727672777278727972807281728272837284728572867287728872897290729172927293729472957296729772987299730073017302730373047305730673077308730973107311731273137314731573167317731873197320732173227323732473257326732773287329733073317332733373347335733673377338733973407341734273437344734573467347734873497350735173527353735473557356735773587359736073617362736373647365736673677368736973707371737273737374737573767377737873797380738173827383738473857386738773887389739073917392739373947395739673977398739974007401740274037404740574067407740874097410741174127413741474157416741774187419742074217422742374247425742674277428742974307431743274337434743574367437743874397440744174427443744474457446744774487449745074517452745374547455745674577458745974607461746274637464746574667467746874697470747174727473747474757476747774787479748074817482748374847485748674877488748974907491749274937494749574967497749874997500750175027503750475057506750775087509751075117512751375147515751675177518751975207521752275237524752575267527752875297530753175327533753475357536753775387539754075417542754375447545754675477548754975507551755275537554755575567557755875597560756175627563756475657566756775687569757075717572757375747575757675777578757975807581758275837584758575867587758875897590759175927593759475957596759775987599760076017602760376047605760676077608760976107611761276137614761576167617761876197620762176227623762476257626762776287629763076317632763376347635763676377638763976407641764276437644764576467647764876497650765176527653765476557656765776587659766076617662766376647665766676677668766976707671767276737674767576767677767876797680768176827683768476857686768776887689769076917692769376947695769676977698769977007701770277037704770577067707770877097710771177127713771477157716771777187719772077217722772377247725772677277728772977307731773277337734773577367737773877397740774177427743774477457746774777487749775077517752775377547755775677577758775977607761776277637764776577667767776877697770777177727773777477757776777777787779778077817782778377847785778677877788778977907791779277937794779577967797779877997800780178027803780478057806780778087809781078117812781378147815781678177818781978207821782278237824782578267827782878297830783178327833783478357836783778387839784078417842784378447845784678477848784978507851785278537854
  1. /*
  2. * Neighbor Awareness Networking
  3. *
  4. * Portions of this code are copyright (c) 2020 Cypress Semiconductor Corporation
  5. *
  6. * Copyright (C) 1999-2020, Broadcom Corporation
  7. *
  8. * Unless you and Broadcom execute a separate written software license
  9. * agreement governing use of this software, this software is licensed to you
  10. * under the terms of the GNU General Public License version 2 (the "GPL"),
  11. * available at http://www.broadcom.com/licenses/GPLv2.php, with the
  12. * following added to such license:
  13. *
  14. * As a special exception, the copyright holders of this software give you
  15. * permission to link this software with independent modules, and to copy and
  16. * distribute the resulting executable under terms of your choice, provided that
  17. * you also meet, for each linked independent module, the terms and conditions of
  18. * the license of that module. An independent module is a module which is not
  19. * derived from this software. The special exception does not apply to any
  20. * modifications of the software.
  21. *
  22. * Notwithstanding the above, under no circumstances may you combine this
  23. * software in any way with any other Broadcom software provided under a license
  24. * other than the GPL, without Broadcom's express prior written consent.
  25. *
  26. *
  27. * <<Broadcom-WL-IPTag/Open:>>
  28. *
  29. * $Id: wl_cfgnan.c 815812 2019-04-20 14:30:23Z $
  30. */
  31. #ifdef WL_NAN
  32. #include <bcmutils.h>
  33. #include <bcmendian.h>
  34. #include <bcmwifi_channels.h>
  35. #include <nan.h>
  36. #include <bcmiov.h>
  37. #include <wl_cfg80211.h>
  38. #include <wl_cfgscan.h>
  39. #include <wl_android.h>
  40. #include <wl_cfgnan.h>
  41. #include <dngl_stats.h>
  42. #include <dhd.h>
  43. #ifdef RTT_SUPPORT
  44. #include <dhd_rtt.h>
  45. #endif /* RTT_SUPPORT */
  46. #include <wl_cfgvendor.h>
  47. #include <bcmbloom.h>
  48. #include <wl_cfgp2p.h>
  49. #ifdef RTT_SUPPORT
  50. #include <dhd_rtt.h>
  51. #endif /* RTT_SUPPORT */
  52. #include <bcmstdlib_s.h>
  53. #define NAN_RANGE_REQ_EVNT 1
  54. #define NAN_RAND_MAC_RETRIES 10
  55. #define NAN_SCAN_DWELL_TIME_DELTA_MS 10
  56. #define NAN_RNG_TERM_FLAG_NONE 0
  57. #ifdef WL_NAN_DISC_CACHE
  58. /* Disc Cache Parameters update Flags */
  59. #define NAN_DISC_CACHE_PARAM_SDE_CONTROL 0x0001
  60. static int wl_cfgnan_cache_disc_result(struct bcm_cfg80211 *cfg, void * data,
  61. u16 *disc_cache_update_flags);
  62. static int wl_cfgnan_remove_disc_result(struct bcm_cfg80211 * cfg, uint8 local_subid);
  63. static nan_disc_result_cache * wl_cfgnan_get_disc_result(struct bcm_cfg80211 *cfg,
  64. uint8 remote_pubid, struct ether_addr *peer);
  65. #endif /* WL_NAN_DISC_CACHE */
  66. static int wl_cfgnan_clear_disc_cache(struct bcm_cfg80211 *cfg, wl_nan_instance_id_t sub_id);
  67. static void wl_cfgnan_update_dp_mask(struct bcm_cfg80211 *cfg, bool enable, u8 nan_dp_id);
  68. static int wl_cfgnan_set_if_addr(struct bcm_cfg80211 *cfg);
  69. static int wl_cfgnan_get_capability(struct net_device *ndev,
  70. struct bcm_cfg80211 *cfg, nan_hal_capabilities_t *capabilities);
  71. static int32 wl_cfgnan_notify_disc_with_ranging(struct bcm_cfg80211 *cfg,
  72. nan_ranging_inst_t *rng_inst, nan_event_data_t *nan_event_data, uint32 distance);
  73. static void wl_cfgnan_disc_result_on_geofence_cancel(struct bcm_cfg80211 *cfg,
  74. nan_ranging_inst_t *rng_inst);
  75. static void wl_cfgnan_clear_nan_event_data(struct bcm_cfg80211 *cfg,
  76. nan_event_data_t *nan_event_data);
  77. void wl_cfgnan_data_remove_peer(struct bcm_cfg80211 *cfg,
  78. struct ether_addr *peer_addr);
  79. static void wl_cfgnan_send_stop_event(struct bcm_cfg80211 *cfg);
  80. #ifdef RTT_SUPPORT
  81. static s32 wl_cfgnan_clear_peer_ranging(struct bcm_cfg80211 * cfg,
  82. struct ether_addr * peer, int reason);
  83. #endif /* RTT_SUPPORT */
  84. static const char *nan_role_to_str(u8 role)
  85. {
  86. switch (role) {
  87. C2S(WL_NAN_ROLE_AUTO)
  88. C2S(WL_NAN_ROLE_NON_MASTER_NON_SYNC)
  89. C2S(WL_NAN_ROLE_NON_MASTER_SYNC)
  90. C2S(WL_NAN_ROLE_MASTER)
  91. C2S(WL_NAN_ROLE_ANCHOR_MASTER)
  92. default:
  93. return "WL_NAN_ROLE_UNKNOWN";
  94. }
  95. }
  96. static const char *nan_event_to_str(u16 cmd)
  97. {
  98. switch (cmd) {
  99. C2S(WL_NAN_EVENT_START)
  100. C2S(WL_NAN_EVENT_DISCOVERY_RESULT)
  101. C2S(WL_NAN_EVENT_TERMINATED)
  102. C2S(WL_NAN_EVENT_RECEIVE)
  103. C2S(WL_NAN_EVENT_MERGE)
  104. C2S(WL_NAN_EVENT_STOP)
  105. C2S(WL_NAN_EVENT_PEER_DATAPATH_IND)
  106. C2S(WL_NAN_EVENT_DATAPATH_ESTB)
  107. C2S(WL_NAN_EVENT_SDF_RX)
  108. C2S(WL_NAN_EVENT_DATAPATH_END)
  109. C2S(WL_NAN_EVENT_RNG_REQ_IND)
  110. C2S(WL_NAN_EVENT_RNG_RPT_IND)
  111. C2S(WL_NAN_EVENT_RNG_TERM_IND)
  112. C2S(WL_NAN_EVENT_TXS)
  113. C2S(WL_NAN_EVENT_INVALID)
  114. default:
  115. return "WL_NAN_EVENT_UNKNOWN";
  116. }
  117. }
  118. static int wl_cfgnan_execute_ioctl(struct net_device *ndev,
  119. struct bcm_cfg80211 *cfg, bcm_iov_batch_buf_t *nan_buf,
  120. uint16 nan_buf_size, uint32 *status, uint8 *resp_buf,
  121. uint16 resp_buf_len);
  122. int
  123. wl_cfgnan_generate_inst_id(struct bcm_cfg80211 *cfg, uint8 *p_inst_id)
  124. {
  125. s32 ret = BCME_OK;
  126. uint8 i = 0;
  127. if (p_inst_id == NULL) {
  128. WL_ERR(("Invalid arguments\n"));
  129. ret = -EINVAL;
  130. goto exit;
  131. }
  132. if (cfg->nancfg.inst_id_start == NAN_ID_MAX) {
  133. WL_ERR(("Consumed all IDs, resetting the counter\n"));
  134. cfg->nancfg.inst_id_start = 0;
  135. }
  136. for (i = cfg->nancfg.inst_id_start; i < NAN_ID_MAX; i++) {
  137. if (isclr(cfg->nancfg.svc_inst_id_mask, i)) {
  138. setbit(cfg->nancfg.svc_inst_id_mask, i);
  139. *p_inst_id = i + 1;
  140. cfg->nancfg.inst_id_start = *p_inst_id;
  141. WL_DBG(("Instance ID=%d\n", *p_inst_id));
  142. goto exit;
  143. }
  144. }
  145. WL_ERR(("Allocated maximum IDs\n"));
  146. ret = BCME_NORESOURCE;
  147. exit:
  148. return ret;
  149. }
  150. int
  151. wl_cfgnan_remove_inst_id(struct bcm_cfg80211 *cfg, uint8 inst_id)
  152. {
  153. s32 ret = BCME_OK;
  154. WL_DBG(("%s: Removing svc instance id %d\n", __FUNCTION__, inst_id));
  155. clrbit(cfg->nancfg.svc_inst_id_mask, inst_id-1);
  156. return ret;
  157. }
  158. s32 wl_cfgnan_parse_sdea_data(osl_t *osh, const uint8 *p_attr,
  159. uint16 len, nan_event_data_t *tlv_data)
  160. {
  161. const wifi_nan_svc_desc_ext_attr_t *nan_svc_desc_ext_attr = NULL;
  162. uint8 offset;
  163. s32 ret = BCME_OK;
  164. /* service descriptor ext attributes */
  165. nan_svc_desc_ext_attr = (const wifi_nan_svc_desc_ext_attr_t *)p_attr;
  166. /* attribute ID */
  167. WL_TRACE(("> attr id: 0x%02x\n", nan_svc_desc_ext_attr->id));
  168. /* attribute length */
  169. WL_TRACE(("> attr len: 0x%x\n", nan_svc_desc_ext_attr->len));
  170. if (nan_svc_desc_ext_attr->instance_id == tlv_data->pub_id) {
  171. tlv_data->sde_control_flag = nan_svc_desc_ext_attr->control;
  172. }
  173. offset = sizeof(*nan_svc_desc_ext_attr);
  174. if (offset > len) {
  175. WL_ERR(("Invalid event buffer len\n"));
  176. ret = BCME_BUFTOOSHORT;
  177. goto fail;
  178. }
  179. p_attr += offset;
  180. len -= offset;
  181. if (tlv_data->sde_control_flag & NAN_SC_RANGE_LIMITED) {
  182. WL_TRACE(("> svc_control: range limited present\n"));
  183. }
  184. if (tlv_data->sde_control_flag & NAN_SDE_CF_SVC_UPD_IND_PRESENT) {
  185. WL_TRACE(("> svc_control: sdea svc specific info present\n"));
  186. tlv_data->sde_svc_info.dlen = (p_attr[1] | (p_attr[2] << 8));
  187. WL_TRACE(("> sdea svc info len: 0x%02x\n", tlv_data->sde_svc_info.dlen));
  188. if (!tlv_data->sde_svc_info.dlen ||
  189. tlv_data->sde_svc_info.dlen > NAN_MAX_SERVICE_SPECIFIC_INFO_LEN) {
  190. /* must be able to handle null msg which is not error */
  191. tlv_data->sde_svc_info.dlen = 0;
  192. WL_ERR(("sde data length is invalid\n"));
  193. ret = BCME_BADLEN;
  194. goto fail;
  195. }
  196. if (tlv_data->sde_svc_info.dlen > 0) {
  197. tlv_data->sde_svc_info.data = MALLOCZ(osh, tlv_data->sde_svc_info.dlen);
  198. if (!tlv_data->sde_svc_info.data) {
  199. WL_ERR(("%s: memory allocation failed\n", __FUNCTION__));
  200. tlv_data->sde_svc_info.dlen = 0;
  201. ret = BCME_NOMEM;
  202. goto fail;
  203. }
  204. /* advance read pointer, consider sizeof of Service Update Indicator */
  205. offset = sizeof(tlv_data->sde_svc_info.dlen) - 1;
  206. if (offset > len) {
  207. WL_ERR(("Invalid event buffer len\n"));
  208. ret = BCME_BUFTOOSHORT;
  209. goto fail;
  210. }
  211. p_attr += offset;
  212. len -= offset;
  213. ret = memcpy_s(tlv_data->sde_svc_info.data, tlv_data->sde_svc_info.dlen,
  214. p_attr, tlv_data->sde_svc_info.dlen);
  215. if (ret != BCME_OK) {
  216. WL_ERR(("Failed to copy sde_svc_info\n"));
  217. goto fail;
  218. }
  219. } else {
  220. /* must be able to handle null msg which is not error */
  221. tlv_data->sde_svc_info.dlen = 0;
  222. WL_DBG(("%s: sdea svc info length is zero, null info data\n",
  223. __FUNCTION__));
  224. }
  225. }
  226. return ret;
  227. fail:
  228. if (tlv_data->sde_svc_info.data) {
  229. MFREE(osh, tlv_data->sde_svc_info.data,
  230. tlv_data->sde_svc_info.dlen);
  231. tlv_data->sde_svc_info.data = NULL;
  232. }
  233. WL_DBG(("Parse SDEA event data, status = %d\n", ret));
  234. return ret;
  235. }
  236. /*
  237. * This attribute contains some mandatory fields and some optional fields
  238. * depending on the content of the service discovery request.
  239. */
  240. s32
  241. wl_cfgnan_parse_sda_data(osl_t *osh, const uint8 *p_attr,
  242. uint16 len, nan_event_data_t *tlv_data)
  243. {
  244. uint8 svc_control = 0, offset = 0;
  245. s32 ret = BCME_OK;
  246. const wifi_nan_svc_descriptor_attr_t *nan_svc_desc_attr = NULL;
  247. /* service descriptor attributes */
  248. nan_svc_desc_attr = (const wifi_nan_svc_descriptor_attr_t *)p_attr;
  249. /* attribute ID */
  250. WL_TRACE(("> attr id: 0x%02x\n", nan_svc_desc_attr->id));
  251. /* attribute length */
  252. WL_TRACE(("> attr len: 0x%x\n", nan_svc_desc_attr->len));
  253. /* service ID */
  254. ret = memcpy_s(tlv_data->svc_name, sizeof(tlv_data->svc_name),
  255. nan_svc_desc_attr->svc_hash, NAN_SVC_HASH_LEN);
  256. if (ret != BCME_OK) {
  257. WL_ERR(("Failed to copy svc_hash_name:\n"));
  258. return ret;
  259. }
  260. WL_TRACE(("> svc_hash_name: " MACDBG "\n", MAC2STRDBG(tlv_data->svc_name)));
  261. /* local instance ID */
  262. tlv_data->local_inst_id = nan_svc_desc_attr->instance_id;
  263. WL_TRACE(("> local instance id: 0x%02x\n", tlv_data->local_inst_id));
  264. /* requestor instance ID */
  265. tlv_data->requestor_id = nan_svc_desc_attr->requestor_id;
  266. WL_TRACE(("> requestor id: 0x%02x\n", tlv_data->requestor_id));
  267. /* service control */
  268. svc_control = nan_svc_desc_attr->svc_control;
  269. if ((svc_control & NAN_SVC_CONTROL_TYPE_MASK) == NAN_SC_PUBLISH) {
  270. WL_TRACE(("> Service control type: NAN_SC_PUBLISH\n"));
  271. } else if ((svc_control & NAN_SVC_CONTROL_TYPE_MASK) == NAN_SC_SUBSCRIBE) {
  272. WL_TRACE(("> Service control type: NAN_SC_SUBSCRIBE\n"));
  273. } else if ((svc_control & NAN_SVC_CONTROL_TYPE_MASK) == NAN_SC_FOLLOWUP) {
  274. WL_TRACE(("> Service control type: NAN_SC_FOLLOWUP\n"));
  275. }
  276. offset = sizeof(*nan_svc_desc_attr);
  277. if (offset > len) {
  278. WL_ERR(("Invalid event buffer len\n"));
  279. ret = BCME_BUFTOOSHORT;
  280. goto fail;
  281. }
  282. p_attr += offset;
  283. len -= offset;
  284. /*
  285. * optional fields:
  286. * must be in order following by service descriptor attribute format
  287. */
  288. /* binding bitmap */
  289. if (svc_control & NAN_SC_BINDING_BITMAP_PRESENT) {
  290. uint16 bitmap = 0;
  291. WL_TRACE(("> svc_control: binding bitmap present\n"));
  292. /* Copy binding bitmap */
  293. ret = memcpy_s(&bitmap, sizeof(bitmap),
  294. p_attr, NAN_BINDING_BITMAP_LEN);
  295. if (ret != BCME_OK) {
  296. WL_ERR(("Failed to copy bit map\n"));
  297. return ret;
  298. }
  299. WL_TRACE(("> sc binding bitmap: 0x%04x\n", bitmap));
  300. if (NAN_BINDING_BITMAP_LEN > len) {
  301. WL_ERR(("Invalid event buffer len\n"));
  302. ret = BCME_BUFTOOSHORT;
  303. goto fail;
  304. }
  305. p_attr += NAN_BINDING_BITMAP_LEN;
  306. len -= NAN_BINDING_BITMAP_LEN;
  307. }
  308. /* matching filter */
  309. if (svc_control & NAN_SC_MATCHING_FILTER_PRESENT) {
  310. WL_TRACE(("> svc_control: matching filter present\n"));
  311. tlv_data->tx_match_filter.dlen = *p_attr++;
  312. WL_TRACE(("> matching filter len: 0x%02x\n",
  313. tlv_data->tx_match_filter.dlen));
  314. if (!tlv_data->tx_match_filter.dlen ||
  315. tlv_data->tx_match_filter.dlen > MAX_MATCH_FILTER_LEN) {
  316. tlv_data->tx_match_filter.dlen = 0;
  317. WL_ERR(("tx match filter length is invalid\n"));
  318. ret = -EINVAL;
  319. goto fail;
  320. }
  321. tlv_data->tx_match_filter.data =
  322. MALLOCZ(osh, tlv_data->tx_match_filter.dlen);
  323. if (!tlv_data->tx_match_filter.data) {
  324. WL_ERR(("%s: memory allocation failed\n", __FUNCTION__));
  325. tlv_data->tx_match_filter.dlen = 0;
  326. ret = -ENOMEM;
  327. goto fail;
  328. }
  329. ret = memcpy_s(tlv_data->tx_match_filter.data, tlv_data->tx_match_filter.dlen,
  330. p_attr, tlv_data->tx_match_filter.dlen);
  331. if (ret != BCME_OK) {
  332. WL_ERR(("Failed to copy tx match filter data\n"));
  333. goto fail;
  334. }
  335. /* advance read pointer */
  336. offset = tlv_data->tx_match_filter.dlen;
  337. if (offset > len) {
  338. WL_ERR(("Invalid event buffer\n"));
  339. ret = BCME_BUFTOOSHORT;
  340. goto fail;
  341. }
  342. p_attr += offset;
  343. len -= offset;
  344. }
  345. /* service response filter */
  346. if (svc_control & NAN_SC_SR_FILTER_PRESENT) {
  347. WL_TRACE(("> svc_control: service response filter present\n"));
  348. tlv_data->rx_match_filter.dlen = *p_attr++;
  349. WL_TRACE(("> sr match filter len: 0x%02x\n",
  350. tlv_data->rx_match_filter.dlen));
  351. if (!tlv_data->rx_match_filter.dlen ||
  352. tlv_data->rx_match_filter.dlen > MAX_MATCH_FILTER_LEN) {
  353. tlv_data->rx_match_filter.dlen = 0;
  354. WL_ERR(("%s: sr matching filter length is invalid\n",
  355. __FUNCTION__));
  356. ret = BCME_BADLEN;
  357. goto fail;
  358. }
  359. tlv_data->rx_match_filter.data =
  360. MALLOCZ(osh, tlv_data->rx_match_filter.dlen);
  361. if (!tlv_data->rx_match_filter.data) {
  362. WL_ERR(("%s: memory allocation failed\n", __FUNCTION__));
  363. tlv_data->rx_match_filter.dlen = 0;
  364. ret = BCME_NOMEM;
  365. goto fail;
  366. }
  367. ret = memcpy_s(tlv_data->rx_match_filter.data, tlv_data->rx_match_filter.dlen,
  368. p_attr, tlv_data->rx_match_filter.dlen);
  369. if (ret != BCME_OK) {
  370. WL_ERR(("Failed to copy rx match filter data\n"));
  371. goto fail;
  372. }
  373. /* advance read pointer */
  374. offset = tlv_data->rx_match_filter.dlen;
  375. if (offset > len) {
  376. WL_ERR(("Invalid event buffer len\n"));
  377. ret = BCME_BUFTOOSHORT;
  378. goto fail;
  379. }
  380. p_attr += offset;
  381. len -= offset;
  382. }
  383. /* service specific info */
  384. if (svc_control & NAN_SC_SVC_INFO_PRESENT) {
  385. WL_TRACE(("> svc_control: svc specific info present\n"));
  386. tlv_data->svc_info.dlen = *p_attr++;
  387. WL_TRACE(("> svc info len: 0x%02x\n", tlv_data->svc_info.dlen));
  388. if (!tlv_data->svc_info.dlen ||
  389. tlv_data->svc_info.dlen > NAN_MAX_SERVICE_SPECIFIC_INFO_LEN) {
  390. /* must be able to handle null msg which is not error */
  391. tlv_data->svc_info.dlen = 0;
  392. WL_ERR(("sde data length is invalid\n"));
  393. ret = BCME_BADLEN;
  394. goto fail;
  395. }
  396. if (tlv_data->svc_info.dlen > 0) {
  397. tlv_data->svc_info.data =
  398. MALLOCZ(osh, tlv_data->svc_info.dlen);
  399. if (!tlv_data->svc_info.data) {
  400. WL_ERR(("%s: memory allocation failed\n", __FUNCTION__));
  401. tlv_data->svc_info.dlen = 0;
  402. ret = BCME_NOMEM;
  403. goto fail;
  404. }
  405. ret = memcpy_s(tlv_data->svc_info.data, tlv_data->svc_info.dlen,
  406. p_attr, tlv_data->svc_info.dlen);
  407. if (ret != BCME_OK) {
  408. WL_ERR(("Failed to copy svc info\n"));
  409. goto fail;
  410. }
  411. /* advance read pointer */
  412. offset = tlv_data->svc_info.dlen;
  413. if (offset > len) {
  414. WL_ERR(("Invalid event buffer len\n"));
  415. ret = BCME_BUFTOOSHORT;
  416. goto fail;
  417. }
  418. p_attr += offset;
  419. len -= offset;
  420. } else {
  421. /* must be able to handle null msg which is not error */
  422. tlv_data->svc_info.dlen = 0;
  423. WL_TRACE(("%s: svc info length is zero, null info data\n",
  424. __FUNCTION__));
  425. }
  426. }
  427. /*
  428. * discovery range limited:
  429. * If set to 1, the pub/sub msg is limited in range to close proximity.
  430. * If set to 0, the pub/sub msg is not limited in range.
  431. * Valid only when the message is either of a publish or a sub.
  432. */
  433. if (svc_control & NAN_SC_RANGE_LIMITED) {
  434. if (((svc_control & NAN_SVC_CONTROL_TYPE_MASK) == NAN_SC_PUBLISH) ||
  435. ((svc_control & NAN_SVC_CONTROL_TYPE_MASK) == NAN_SC_SUBSCRIBE)) {
  436. WL_TRACE(("> svc_control: range limited present\n"));
  437. } else {
  438. WL_TRACE(("range limited is only valid on pub or sub\n"));
  439. }
  440. /* TODO: send up */
  441. /* advance read pointer */
  442. p_attr++;
  443. }
  444. return ret;
  445. fail:
  446. if (tlv_data->tx_match_filter.data) {
  447. MFREE(osh, tlv_data->tx_match_filter.data,
  448. tlv_data->tx_match_filter.dlen);
  449. tlv_data->tx_match_filter.data = NULL;
  450. }
  451. if (tlv_data->rx_match_filter.data) {
  452. MFREE(osh, tlv_data->rx_match_filter.data,
  453. tlv_data->rx_match_filter.dlen);
  454. tlv_data->rx_match_filter.data = NULL;
  455. }
  456. if (tlv_data->svc_info.data) {
  457. MFREE(osh, tlv_data->svc_info.data,
  458. tlv_data->svc_info.dlen);
  459. tlv_data->svc_info.data = NULL;
  460. }
  461. WL_DBG(("Parse SDA event data, status = %d\n", ret));
  462. return ret;
  463. }
  464. static s32
  465. wl_cfgnan_parse_sd_attr_data(osl_t *osh, uint16 len, const uint8 *data,
  466. nan_event_data_t *tlv_data, uint16 type) {
  467. const uint8 *p_attr = data;
  468. uint16 offset = 0;
  469. s32 ret = BCME_OK;
  470. const wl_nan_event_disc_result_t *ev_disc = NULL;
  471. const wl_nan_event_replied_t *ev_replied = NULL;
  472. const wl_nan_ev_receive_t *ev_fup = NULL;
  473. /*
  474. * Mapping wifi_nan_svc_descriptor_attr_t, and svc controls are optional.
  475. */
  476. if (type == WL_NAN_XTLV_SD_DISC_RESULTS) {
  477. u8 iter;
  478. ev_disc = (const wl_nan_event_disc_result_t *)p_attr;
  479. WL_DBG((">> WL_NAN_XTLV_RESULTS: Discovery result\n"));
  480. tlv_data->pub_id = (wl_nan_instance_id_t)ev_disc->pub_id;
  481. tlv_data->sub_id = (wl_nan_instance_id_t)ev_disc->sub_id;
  482. tlv_data->publish_rssi = ev_disc->publish_rssi;
  483. ret = memcpy_s(&tlv_data->remote_nmi, ETHER_ADDR_LEN,
  484. &ev_disc->pub_mac, ETHER_ADDR_LEN);
  485. if (ret != BCME_OK) {
  486. WL_ERR(("Failed to copy remote nmi\n"));
  487. goto fail;
  488. }
  489. WL_TRACE(("publish id: %d\n", ev_disc->pub_id));
  490. WL_TRACE(("subscribe d: %d\n", ev_disc->sub_id));
  491. WL_TRACE(("publish mac addr: " MACDBG "\n",
  492. MAC2STRDBG(ev_disc->pub_mac.octet)));
  493. WL_TRACE(("publish rssi: %d\n", (int8)ev_disc->publish_rssi));
  494. WL_TRACE(("attribute no: %d\n", ev_disc->attr_num));
  495. WL_TRACE(("attribute len: %d\n", (uint16)ev_disc->attr_list_len));
  496. /* advance to the service descricptor */
  497. offset = OFFSETOF(wl_nan_event_disc_result_t, attr_list[0]);
  498. if (offset > len) {
  499. WL_ERR(("Invalid event buffer len\n"));
  500. ret = BCME_BUFTOOSHORT;
  501. goto fail;
  502. }
  503. p_attr += offset;
  504. len -= offset;
  505. iter = ev_disc->attr_num;
  506. while (iter) {
  507. if ((uint8)*p_attr == NAN_ATTR_SVC_DESCRIPTOR) {
  508. WL_TRACE(("> attr id: 0x%02x\n", (uint8)*p_attr));
  509. ret = wl_cfgnan_parse_sda_data(osh, p_attr, len, tlv_data);
  510. if (unlikely(ret)) {
  511. WL_ERR(("wl_cfgnan_parse_sda_data failed,"
  512. "error = %d \n", ret));
  513. goto fail;
  514. }
  515. }
  516. if ((uint8)*p_attr == NAN_ATTR_SVC_DESC_EXTENSION) {
  517. WL_TRACE(("> attr id: 0x%02x\n", (uint8)*p_attr));
  518. ret = wl_cfgnan_parse_sdea_data(osh, p_attr, len, tlv_data);
  519. if (unlikely(ret)) {
  520. WL_ERR(("wl_cfgnan_parse_sdea_data failed,"
  521. "error = %d \n", ret));
  522. goto fail;
  523. }
  524. }
  525. offset = (sizeof(*p_attr) +
  526. sizeof(ev_disc->attr_list_len) +
  527. (p_attr[1] | (p_attr[2] << 8)));
  528. if (offset > len) {
  529. WL_ERR(("Invalid event buffer len\n"));
  530. ret = BCME_BUFTOOSHORT;
  531. goto fail;
  532. }
  533. p_attr += offset;
  534. len -= offset;
  535. iter--;
  536. }
  537. } else if (type == WL_NAN_XTLV_SD_FUP_RECEIVED) {
  538. uint8 iter;
  539. ev_fup = (const wl_nan_ev_receive_t *)p_attr;
  540. WL_TRACE((">> WL_NAN_XTLV_SD_FUP_RECEIVED: Transmit follow-up\n"));
  541. tlv_data->local_inst_id = (wl_nan_instance_id_t)ev_fup->local_id;
  542. tlv_data->requestor_id = (wl_nan_instance_id_t)ev_fup->remote_id;
  543. tlv_data->fup_rssi = ev_fup->fup_rssi;
  544. ret = memcpy_s(&tlv_data->remote_nmi, ETHER_ADDR_LEN,
  545. &ev_fup->remote_addr, ETHER_ADDR_LEN);
  546. if (ret != BCME_OK) {
  547. WL_ERR(("Failed to copy remote nmi\n"));
  548. goto fail;
  549. }
  550. WL_TRACE(("local id: %d\n", ev_fup->local_id));
  551. WL_TRACE(("remote id: %d\n", ev_fup->remote_id));
  552. WL_TRACE(("peer mac addr: " MACDBG "\n",
  553. MAC2STRDBG(ev_fup->remote_addr.octet)));
  554. WL_TRACE(("peer rssi: %d\n", (int8)ev_fup->fup_rssi));
  555. WL_TRACE(("attribute no: %d\n", ev_fup->attr_num));
  556. WL_TRACE(("attribute len: %d\n", ev_fup->attr_list_len));
  557. /* advance to the service descriptor which is attr_list[0] */
  558. offset = OFFSETOF(wl_nan_ev_receive_t, attr_list[0]);
  559. if (offset > len) {
  560. WL_ERR(("Invalid event buffer len\n"));
  561. ret = BCME_BUFTOOSHORT;
  562. goto fail;
  563. }
  564. p_attr += offset;
  565. len -= offset;
  566. iter = ev_fup->attr_num;
  567. while (iter) {
  568. if ((uint8)*p_attr == NAN_ATTR_SVC_DESCRIPTOR) {
  569. WL_TRACE(("> attr id: 0x%02x\n", (uint8)*p_attr));
  570. ret = wl_cfgnan_parse_sda_data(osh, p_attr, len, tlv_data);
  571. if (unlikely(ret)) {
  572. WL_ERR(("wl_cfgnan_parse_sda_data failed,"
  573. "error = %d \n", ret));
  574. goto fail;
  575. }
  576. }
  577. if ((uint8)*p_attr == NAN_ATTR_SVC_DESC_EXTENSION) {
  578. WL_TRACE(("> attr id: 0x%02x\n", (uint8)*p_attr));
  579. ret = wl_cfgnan_parse_sdea_data(osh, p_attr, len, tlv_data);
  580. if (unlikely(ret)) {
  581. WL_ERR(("wl_cfgnan_parse_sdea_data failed,"
  582. "error = %d \n", ret));
  583. goto fail;
  584. }
  585. }
  586. offset = (sizeof(*p_attr) +
  587. sizeof(ev_fup->attr_list_len) +
  588. (p_attr[1] | (p_attr[2] << 8)));
  589. if (offset > len) {
  590. WL_ERR(("Invalid event buffer len\n"));
  591. ret = BCME_BUFTOOSHORT;
  592. goto fail;
  593. }
  594. p_attr += offset;
  595. len -= offset;
  596. iter--;
  597. }
  598. } else if (type == WL_NAN_XTLV_SD_SDF_RX) {
  599. /*
  600. * SDF followed by nan2_pub_act_frame_t and wifi_nan_svc_descriptor_attr_t,
  601. * and svc controls are optional.
  602. */
  603. const nan2_pub_act_frame_t *nan_pub_af =
  604. (const nan2_pub_act_frame_t *)p_attr;
  605. WL_TRACE((">> WL_NAN_XTLV_SD_SDF_RX\n"));
  606. /* nan2_pub_act_frame_t */
  607. WL_TRACE(("pub category: 0x%02x\n", nan_pub_af->category_id));
  608. WL_TRACE(("pub action: 0x%02x\n", nan_pub_af->action_field));
  609. WL_TRACE(("nan oui: %2x-%2x-%2x\n",
  610. nan_pub_af->oui[0], nan_pub_af->oui[1], nan_pub_af->oui[2]));
  611. WL_TRACE(("oui type: 0x%02x\n", nan_pub_af->oui_type));
  612. WL_TRACE(("oui subtype: 0x%02x\n", nan_pub_af->oui_sub_type));
  613. offset = sizeof(*nan_pub_af);
  614. if (offset > len) {
  615. WL_ERR(("Invalid event buffer len\n"));
  616. ret = BCME_BUFTOOSHORT;
  617. goto fail;
  618. }
  619. p_attr += offset;
  620. len -= offset;
  621. } else if (type == WL_NAN_XTLV_SD_REPLIED) {
  622. ev_replied = (const wl_nan_event_replied_t *)p_attr;
  623. WL_TRACE((">> WL_NAN_XTLV_SD_REPLIED: Replied Event\n"));
  624. tlv_data->pub_id = (wl_nan_instance_id_t)ev_replied->pub_id;
  625. tlv_data->sub_id = (wl_nan_instance_id_t)ev_replied->sub_id;
  626. tlv_data->sub_rssi = ev_replied->sub_rssi;
  627. ret = memcpy_s(&tlv_data->remote_nmi, ETHER_ADDR_LEN,
  628. &ev_replied->sub_mac, ETHER_ADDR_LEN);
  629. if (ret != BCME_OK) {
  630. WL_ERR(("Failed to copy remote nmi\n"));
  631. goto fail;
  632. }
  633. WL_TRACE(("publish id: %d\n", ev_replied->pub_id));
  634. WL_TRACE(("subscribe d: %d\n", ev_replied->sub_id));
  635. WL_TRACE(("Subscriber mac addr: " MACDBG "\n",
  636. MAC2STRDBG(ev_replied->sub_mac.octet)));
  637. WL_TRACE(("subscribe rssi: %d\n", (int8)ev_replied->sub_rssi));
  638. WL_TRACE(("attribute no: %d\n", ev_replied->attr_num));
  639. WL_TRACE(("attribute len: %d\n", (uint16)ev_replied->attr_list_len));
  640. /* advance to the service descriptor which is attr_list[0] */
  641. offset = OFFSETOF(wl_nan_event_replied_t, attr_list[0]);
  642. if (offset > len) {
  643. WL_ERR(("Invalid event buffer len\n"));
  644. ret = BCME_BUFTOOSHORT;
  645. goto fail;
  646. }
  647. p_attr += offset;
  648. len -= offset;
  649. ret = wl_cfgnan_parse_sda_data(osh, p_attr, len, tlv_data);
  650. if (unlikely(ret)) {
  651. WL_ERR(("wl_cfgnan_parse_sdea_data failed,"
  652. "error = %d \n", ret));
  653. }
  654. }
  655. fail:
  656. return ret;
  657. }
  658. /* Based on each case of tlv type id, fill into tlv data */
  659. int
  660. wl_cfgnan_set_vars_cbfn(void *ctx, const uint8 *data, uint16 type, uint16 len)
  661. {
  662. nan_parse_event_ctx_t *ctx_tlv_data = ((nan_parse_event_ctx_t *)(ctx));
  663. nan_event_data_t *tlv_data = ((nan_event_data_t *)(ctx_tlv_data->nan_evt_data));
  664. int ret = BCME_OK;
  665. NAN_DBG_ENTER();
  666. if (!data || !len) {
  667. WL_ERR(("data length is invalid\n"));
  668. ret = BCME_ERROR;
  669. goto fail;
  670. }
  671. switch (type) {
  672. /*
  673. * Need to parse service descript attributes including service control,
  674. * when Follow up or Discovery result come
  675. */
  676. case WL_NAN_XTLV_SD_FUP_RECEIVED:
  677. case WL_NAN_XTLV_SD_DISC_RESULTS: {
  678. ret = wl_cfgnan_parse_sd_attr_data(ctx_tlv_data->cfg->osh,
  679. len, data, tlv_data, type);
  680. break;
  681. }
  682. case WL_NAN_XTLV_SD_SVC_INFO: {
  683. tlv_data->svc_info.data =
  684. MALLOCZ(ctx_tlv_data->cfg->osh, len);
  685. if (!tlv_data->svc_info.data) {
  686. WL_ERR(("%s: memory allocation failed\n", __FUNCTION__));
  687. tlv_data->svc_info.dlen = 0;
  688. ret = BCME_NOMEM;
  689. goto fail;
  690. }
  691. tlv_data->svc_info.dlen = len;
  692. ret = memcpy_s(tlv_data->svc_info.data, tlv_data->svc_info.dlen,
  693. data, tlv_data->svc_info.dlen);
  694. if (ret != BCME_OK) {
  695. WL_ERR(("Failed to copy svc info data\n"));
  696. goto fail;
  697. }
  698. break;
  699. }
  700. default:
  701. WL_ERR(("Not available for tlv type = 0x%x\n", type));
  702. ret = BCME_ERROR;
  703. break;
  704. }
  705. fail:
  706. NAN_DBG_EXIT();
  707. return ret;
  708. }
  709. int
  710. wl_cfg_nan_check_cmd_len(uint16 nan_iov_len, uint16 data_size,
  711. uint16 *subcmd_len)
  712. {
  713. s32 ret = BCME_OK;
  714. if (subcmd_len != NULL) {
  715. *subcmd_len = OFFSETOF(bcm_iov_batch_subcmd_t, data) +
  716. ALIGN_SIZE(data_size, 4);
  717. if (*subcmd_len > nan_iov_len) {
  718. WL_ERR(("%s: Buf short, requested:%d, available:%d\n",
  719. __FUNCTION__, *subcmd_len, nan_iov_len));
  720. ret = BCME_NOMEM;
  721. }
  722. } else {
  723. WL_ERR(("Invalid subcmd_len\n"));
  724. ret = BCME_ERROR;
  725. }
  726. return ret;
  727. }
  728. int
  729. wl_cfgnan_config_eventmask(struct net_device *ndev, struct bcm_cfg80211 *cfg,
  730. uint8 event_ind_flag, bool disable_events)
  731. {
  732. bcm_iov_batch_buf_t *nan_buf = NULL;
  733. s32 ret = BCME_OK;
  734. uint16 nan_buf_size = NAN_IOCTL_BUF_SIZE;
  735. uint16 subcmd_len;
  736. uint32 status;
  737. bcm_iov_batch_subcmd_t *sub_cmd = NULL;
  738. bcm_iov_batch_subcmd_t *sub_cmd_resp = NULL;
  739. uint8 event_mask[WL_NAN_EVMASK_EXTN_LEN];
  740. wl_nan_evmask_extn_t *evmask;
  741. uint16 evmask_cmd_len;
  742. uint8 resp_buf[NAN_IOCTL_BUF_SIZE];
  743. NAN_DBG_ENTER();
  744. /* same src and dest len here */
  745. (void)memset_s(event_mask, WL_NAN_EVMASK_EXTN_VER, 0, WL_NAN_EVMASK_EXTN_VER);
  746. evmask_cmd_len = OFFSETOF(wl_nan_evmask_extn_t, evmask) +
  747. WL_NAN_EVMASK_EXTN_LEN;
  748. ret = wl_add_remove_eventmsg(ndev, WLC_E_NAN, true);
  749. if (unlikely(ret)) {
  750. WL_ERR((" nan event enable failed, error = %d \n", ret));
  751. goto fail;
  752. }
  753. nan_buf = MALLOCZ(cfg->osh, nan_buf_size);
  754. if (!nan_buf) {
  755. WL_ERR(("%s: memory allocation failed\n", __func__));
  756. ret = BCME_NOMEM;
  757. goto fail;
  758. }
  759. nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
  760. nan_buf->count = 0;
  761. nan_buf_size -= OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
  762. sub_cmd = (bcm_iov_batch_subcmd_t*)(uint8 *)(&nan_buf->cmds[0]);
  763. ret = wl_cfg_nan_check_cmd_len(nan_buf_size,
  764. evmask_cmd_len, &subcmd_len);
  765. if (unlikely(ret)) {
  766. WL_ERR(("nan_sub_cmd check failed\n"));
  767. goto fail;
  768. }
  769. sub_cmd->id = htod16(WL_NAN_CMD_CFG_EVENT_MASK);
  770. sub_cmd->len = sizeof(sub_cmd->u.options) + evmask_cmd_len;
  771. sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
  772. evmask = (wl_nan_evmask_extn_t *)sub_cmd->data;
  773. evmask->ver = WL_NAN_EVMASK_EXTN_VER;
  774. evmask->len = WL_NAN_EVMASK_EXTN_LEN;
  775. nan_buf_size -= subcmd_len;
  776. nan_buf->count = 1;
  777. if (disable_events) {
  778. WL_DBG(("Disabling all nan events..except stop event\n"));
  779. setbit(event_mask, NAN_EVENT_MAP(WL_NAN_EVENT_STOP));
  780. } else {
  781. /*
  782. * Android framework event mask configuration.
  783. */
  784. nan_buf->is_set = false;
  785. memset(resp_buf, 0, sizeof(resp_buf));
  786. ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, nan_buf_size, &status,
  787. (void*)resp_buf, NAN_IOCTL_BUF_SIZE);
  788. if (unlikely(ret) || unlikely(status)) {
  789. WL_ERR(("get nan event mask failed ret %d status %d \n",
  790. ret, status));
  791. goto fail;
  792. }
  793. sub_cmd_resp = &((bcm_iov_batch_buf_t *)(resp_buf))->cmds[0];
  794. evmask = (wl_nan_evmask_extn_t *)sub_cmd_resp->data;
  795. /* check the response buff */
  796. /* same src and dest len here */
  797. (void)memcpy_s(&event_mask, WL_NAN_EVMASK_EXTN_LEN,
  798. (uint8*)&evmask->evmask, WL_NAN_EVMASK_EXTN_LEN);
  799. if (event_ind_flag) {
  800. if (CHECK_BIT(event_ind_flag, WL_NAN_EVENT_DIC_MAC_ADDR_BIT)) {
  801. WL_DBG(("Need to add disc mac addr change event\n"));
  802. }
  803. /* BIT2 - Disable nan cluster join indication (OTA). */
  804. if (CHECK_BIT(event_ind_flag, WL_NAN_EVENT_JOIN_EVENT)) {
  805. clrbit(event_mask, NAN_EVENT_MAP(WL_NAN_EVENT_MERGE));
  806. }
  807. }
  808. setbit(event_mask, NAN_EVENT_MAP(WL_NAN_EVENT_DISCOVERY_RESULT));
  809. setbit(event_mask, NAN_EVENT_MAP(WL_NAN_EVENT_RECEIVE));
  810. setbit(event_mask, NAN_EVENT_MAP(WL_NAN_EVENT_TERMINATED));
  811. setbit(event_mask, NAN_EVENT_MAP(WL_NAN_EVENT_STOP));
  812. setbit(event_mask, NAN_EVENT_MAP(WL_NAN_EVENT_TXS));
  813. setbit(event_mask, NAN_EVENT_MAP(WL_NAN_EVENT_PEER_DATAPATH_IND));
  814. setbit(event_mask, NAN_EVENT_MAP(WL_NAN_EVENT_DATAPATH_ESTB));
  815. setbit(event_mask, NAN_EVENT_MAP(WL_NAN_EVENT_DATAPATH_END));
  816. setbit(event_mask, NAN_EVENT_MAP(WL_NAN_EVENT_RNG_REQ_IND));
  817. setbit(event_mask, NAN_EVENT_MAP(WL_NAN_EVENT_RNG_TERM_IND));
  818. setbit(event_mask, NAN_EVENT_MAP(WL_NAN_EVENT_DISC_CACHE_TIMEOUT));
  819. /* Disable below events by default */
  820. clrbit(event_mask, NAN_EVENT_MAP(WL_NAN_EVENT_PEER_SCHED_UPD_NOTIF));
  821. clrbit(event_mask, NAN_EVENT_MAP(WL_NAN_EVENT_RNG_RPT_IND));
  822. }
  823. nan_buf->is_set = true;
  824. evmask = (wl_nan_evmask_extn_t *)sub_cmd->data;
  825. /* same src and dest len here */
  826. (void)memcpy_s((uint8*)&evmask->evmask, WL_NAN_EVMASK_EXTN_LEN,
  827. &event_mask, WL_NAN_EVMASK_EXTN_LEN);
  828. nan_buf_size = (NAN_IOCTL_BUF_SIZE - nan_buf_size);
  829. ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, nan_buf_size, &status,
  830. (void*)resp_buf, NAN_IOCTL_BUF_SIZE);
  831. if (unlikely(ret) || unlikely(status)) {
  832. WL_ERR(("set nan event mask failed ret %d status %d \n", ret, status));
  833. goto fail;
  834. }
  835. WL_DBG(("set nan event mask successfull\n"));
  836. fail:
  837. if (nan_buf) {
  838. MFREE(cfg->osh, nan_buf, NAN_IOCTL_BUF_SIZE);
  839. }
  840. NAN_DBG_EXIT();
  841. return ret;
  842. }
  843. static int
  844. wl_cfgnan_set_nan_avail(struct net_device *ndev,
  845. struct bcm_cfg80211 *cfg, nan_avail_cmd_data *cmd_data, uint8 avail_type)
  846. {
  847. bcm_iov_batch_buf_t *nan_buf = NULL;
  848. s32 ret = BCME_OK;
  849. uint16 nan_buf_size = NAN_IOCTL_BUF_SIZE;
  850. uint16 subcmd_len;
  851. bcm_iov_batch_subcmd_t *sub_cmd = NULL;
  852. wl_nan_iov_t *nan_iov_data = NULL;
  853. wl_avail_t *avail = NULL;
  854. wl_avail_entry_t *entry; /* used for filling entry structure */
  855. uint8 *p; /* tracking pointer */
  856. uint8 i;
  857. u32 status;
  858. int c;
  859. char ndc_id[ETHER_ADDR_LEN] = { 0x50, 0x6f, 0x9a, 0x01, 0x0, 0x0 };
  860. dhd_pub_t *dhdp = wl_cfg80211_get_dhdp(ndev);
  861. char *a = WL_AVAIL_BIT_MAP;
  862. uint8 resp_buf[NAN_IOCTL_BUF_SIZE];
  863. NAN_DBG_ENTER();
  864. /* Do not disturb avail if dam is supported */
  865. if (FW_SUPPORTED(dhdp, autodam)) {
  866. WL_DBG(("DAM is supported, avail modification not allowed\n"));
  867. return ret;
  868. }
  869. if (avail_type < WL_AVAIL_LOCAL || avail_type > WL_AVAIL_TYPE_MAX) {
  870. WL_ERR(("Invalid availability type\n"));
  871. ret = BCME_USAGE_ERROR;
  872. goto fail;
  873. }
  874. nan_buf = MALLOCZ(cfg->osh, nan_buf_size);
  875. if (!nan_buf) {
  876. WL_ERR(("%s: memory allocation failed\n", __func__));
  877. ret = BCME_NOMEM;
  878. goto fail;
  879. }
  880. nan_iov_data = MALLOCZ(cfg->osh, sizeof(*nan_iov_data));
  881. if (!nan_iov_data) {
  882. WL_ERR(("%s: memory allocation failed\n", __func__));
  883. ret = BCME_NOMEM;
  884. goto fail;
  885. }
  886. nan_iov_data->nan_iov_len = NAN_IOCTL_BUF_SIZE;
  887. nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
  888. nan_buf->count = 0;
  889. nan_iov_data->nan_iov_buf = (uint8 *)(&nan_buf->cmds[0]);
  890. nan_iov_data->nan_iov_len -= OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
  891. sub_cmd = (bcm_iov_batch_subcmd_t*)(nan_iov_data->nan_iov_buf);
  892. ret = wl_cfg_nan_check_cmd_len(nan_iov_data->nan_iov_len,
  893. sizeof(*avail), &subcmd_len);
  894. if (unlikely(ret)) {
  895. WL_ERR(("nan_sub_cmd check failed\n"));
  896. goto fail;
  897. }
  898. avail = (wl_avail_t *)sub_cmd->data;
  899. /* populate wl_avail_type */
  900. avail->flags = avail_type;
  901. if (avail_type == WL_AVAIL_RANGING) {
  902. ret = memcpy_s(&avail->addr, ETHER_ADDR_LEN,
  903. &cmd_data->peer_nmi, ETHER_ADDR_LEN);
  904. if (ret != BCME_OK) {
  905. WL_ERR(("Failed to copy peer nmi\n"));
  906. goto fail;
  907. }
  908. }
  909. sub_cmd->len = sizeof(sub_cmd->u.options) + subcmd_len;
  910. sub_cmd->id = htod16(WL_NAN_CMD_CFG_AVAIL);
  911. sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
  912. nan_buf->is_set = false;
  913. nan_buf->count++;
  914. nan_iov_data->nan_iov_len -= subcmd_len;
  915. nan_buf_size = (NAN_IOCTL_BUF_SIZE - nan_iov_data->nan_iov_len);
  916. WL_TRACE(("Read wl nan avail status\n"));
  917. memset_s(resp_buf, sizeof(resp_buf), 0, sizeof(resp_buf));
  918. ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, nan_buf_size, &status,
  919. (void*)resp_buf, NAN_IOCTL_BUF_SIZE);
  920. if (unlikely(ret)) {
  921. WL_ERR(("\n Get nan avail failed ret %d, status %d \n", ret, status));
  922. goto fail;
  923. }
  924. if (status == BCME_NOTFOUND) {
  925. nan_buf->count = 0;
  926. nan_iov_data->nan_iov_buf = (uint8 *)(&nan_buf->cmds[0]);
  927. nan_iov_data->nan_iov_len -= OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
  928. sub_cmd = (bcm_iov_batch_subcmd_t*)(nan_iov_data->nan_iov_buf);
  929. avail = (wl_avail_t *)sub_cmd->data;
  930. p = avail->entry;
  931. /* populate wl_avail fields */
  932. avail->length = OFFSETOF(wl_avail_t, entry);
  933. avail->flags = avail_type;
  934. avail->num_entries = 0;
  935. avail->id = 0;
  936. entry = (wl_avail_entry_t*)p;
  937. entry->flags = WL_AVAIL_ENTRY_COM;
  938. /* set default values for optional parameters */
  939. entry->start_offset = 0;
  940. entry->u.band = 0;
  941. if (cmd_data->avail_period) {
  942. entry->period = cmd_data->avail_period;
  943. } else {
  944. entry->period = WL_AVAIL_PERIOD_1024;
  945. }
  946. if (cmd_data->duration != NAN_BAND_INVALID) {
  947. entry->flags |= (3 << WL_AVAIL_ENTRY_USAGE_SHIFT) |
  948. (cmd_data->duration << WL_AVAIL_ENTRY_BIT_DUR_SHIFT);
  949. } else {
  950. entry->flags |= (3 << WL_AVAIL_ENTRY_USAGE_SHIFT) |
  951. (WL_AVAIL_BIT_DUR_16 << WL_AVAIL_ENTRY_BIT_DUR_SHIFT);
  952. }
  953. entry->bitmap_len = 0;
  954. if (avail_type == WL_AVAIL_LOCAL) {
  955. entry->flags |= 1 << WL_AVAIL_ENTRY_CHAN_SHIFT;
  956. /* Check for 5g support, based on that choose 5g channel */
  957. if (cfg->support_5g) {
  958. entry->u.channel_info =
  959. htod32(wf_channel2chspec(WL_AVAIL_CHANNEL_5G,
  960. WL_AVAIL_BANDWIDTH_5G));
  961. } else {
  962. entry->u.channel_info =
  963. htod32(wf_channel2chspec(WL_AVAIL_CHANNEL_2G,
  964. WL_AVAIL_BANDWIDTH_2G));
  965. }
  966. entry->flags = htod16(entry->flags);
  967. }
  968. if (cfg->support_5g) {
  969. a = WL_5G_AVAIL_BIT_MAP;
  970. }
  971. /* point to bitmap value for processing */
  972. if (cmd_data->bmap) {
  973. for (c = (WL_NAN_EVENT_CLEAR_BIT-1); c >= 0; c--) {
  974. i = cmd_data->bmap >> c;
  975. if (i & 1) {
  976. setbit(entry->bitmap, (WL_NAN_EVENT_CLEAR_BIT-c-1));
  977. }
  978. }
  979. } else {
  980. for (i = 0; i < strlen(WL_AVAIL_BIT_MAP); i++) {
  981. if (*a == '1') {
  982. setbit(entry->bitmap, i);
  983. }
  984. a++;
  985. }
  986. }
  987. /* account for partially filled most significant byte */
  988. entry->bitmap_len = ((WL_NAN_EVENT_CLEAR_BIT) + NBBY - 1) / NBBY;
  989. if (avail_type == WL_AVAIL_NDC) {
  990. ret = memcpy_s(&avail->addr, ETHER_ADDR_LEN,
  991. ndc_id, ETHER_ADDR_LEN);
  992. if (ret != BCME_OK) {
  993. WL_ERR(("Failed to copy ndc id\n"));
  994. goto fail;
  995. }
  996. } else if (avail_type == WL_AVAIL_RANGING) {
  997. ret = memcpy_s(&avail->addr, ETHER_ADDR_LEN,
  998. &cmd_data->peer_nmi, ETHER_ADDR_LEN);
  999. if (ret != BCME_OK) {
  1000. WL_ERR(("Failed to copy peer nmi\n"));
  1001. goto fail;
  1002. }
  1003. }
  1004. /* account for partially filled most significant byte */
  1005. /* update wl_avail and populate wl_avail_entry */
  1006. entry->length = OFFSETOF(wl_avail_entry_t, bitmap) + entry->bitmap_len;
  1007. avail->num_entries++;
  1008. avail->length += entry->length;
  1009. /* advance pointer for next entry */
  1010. p += entry->length;
  1011. /* convert to dongle endianness */
  1012. entry->length = htod16(entry->length);
  1013. entry->start_offset = htod16(entry->start_offset);
  1014. entry->u.channel_info = htod32(entry->u.channel_info);
  1015. entry->flags = htod16(entry->flags);
  1016. /* update avail_len only if
  1017. * there are avail entries
  1018. */
  1019. if (avail->num_entries) {
  1020. nan_iov_data->nan_iov_len -= avail->length;
  1021. avail->length = htod16(avail->length);
  1022. avail->flags = htod16(avail->flags);
  1023. }
  1024. avail->length = htod16(avail->length);
  1025. sub_cmd->id = htod16(WL_NAN_CMD_CFG_AVAIL);
  1026. sub_cmd->len = sizeof(sub_cmd->u.options) + avail->length;
  1027. sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
  1028. nan_buf->is_set = true;
  1029. nan_buf->count++;
  1030. /* Reduce the iov_len size by subcmd_len */
  1031. nan_iov_data->nan_iov_len -= subcmd_len;
  1032. nan_buf_size = (NAN_IOCTL_BUF_SIZE - nan_iov_data->nan_iov_len);
  1033. ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, nan_buf_size, &status,
  1034. (void*)resp_buf, NAN_IOCTL_BUF_SIZE);
  1035. if (unlikely(ret) || unlikely(status)) {
  1036. WL_ERR(("\n set nan avail failed ret %d status %d \n", ret, status));
  1037. ret = status;
  1038. goto fail;
  1039. }
  1040. } else if (status == BCME_OK) {
  1041. WL_DBG(("Avail type [%d] found to be configured\n", avail_type));
  1042. } else {
  1043. WL_ERR(("set nan avail failed ret %d status %d \n", ret, status));
  1044. }
  1045. fail:
  1046. if (nan_buf) {
  1047. MFREE(cfg->osh, nan_buf, NAN_IOCTL_BUF_SIZE);
  1048. }
  1049. if (nan_iov_data) {
  1050. MFREE(cfg->osh, nan_iov_data, sizeof(*nan_iov_data));
  1051. }
  1052. NAN_DBG_EXIT();
  1053. return ret;
  1054. }
  1055. static int
  1056. wl_cfgnan_config_control_flag(struct net_device *ndev, struct bcm_cfg80211 *cfg,
  1057. uint32 flag, uint32 *status, bool set)
  1058. {
  1059. bcm_iov_batch_buf_t *nan_buf = NULL;
  1060. s32 ret = BCME_OK;
  1061. uint16 nan_iov_start, nan_iov_end;
  1062. uint16 nan_buf_size = NAN_IOCTL_BUF_SIZE;
  1063. uint16 subcmd_len;
  1064. bcm_iov_batch_subcmd_t *sub_cmd = NULL;
  1065. bcm_iov_batch_subcmd_t *sub_cmd_resp = NULL;
  1066. wl_nan_iov_t *nan_iov_data = NULL;
  1067. uint32 cfg_ctrl;
  1068. uint8 resp_buf[NAN_IOCTL_BUF_SIZE];
  1069. NAN_DBG_ENTER();
  1070. WL_INFORM_MEM(("%s: Modifying nan ctrl flag %x val %d",
  1071. __FUNCTION__, flag, set));
  1072. nan_buf = MALLOCZ(cfg->osh, nan_buf_size);
  1073. if (!nan_buf) {
  1074. WL_ERR(("%s: memory allocation failed\n", __func__));
  1075. ret = BCME_NOMEM;
  1076. goto fail;
  1077. }
  1078. nan_iov_data = MALLOCZ(cfg->osh, sizeof(*nan_iov_data));
  1079. if (!nan_iov_data) {
  1080. WL_ERR(("%s: memory allocation failed\n", __func__));
  1081. ret = BCME_NOMEM;
  1082. goto fail;
  1083. }
  1084. nan_iov_data->nan_iov_len = nan_iov_start = NAN_IOCTL_BUF_SIZE;
  1085. nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
  1086. nan_buf->count = 0;
  1087. nan_iov_data->nan_iov_buf = (uint8 *)(&nan_buf->cmds[0]);
  1088. nan_iov_data->nan_iov_len -= OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
  1089. sub_cmd = (bcm_iov_batch_subcmd_t*)(nan_iov_data->nan_iov_buf);
  1090. ret = wl_cfg_nan_check_cmd_len(nan_iov_data->nan_iov_len,
  1091. sizeof(cfg_ctrl), &subcmd_len);
  1092. if (unlikely(ret)) {
  1093. WL_ERR(("nan_sub_cmd check failed\n"));
  1094. goto fail;
  1095. }
  1096. sub_cmd->id = htod16(WL_NAN_CMD_CFG_NAN_CONFIG);
  1097. sub_cmd->len = sizeof(sub_cmd->u.options) + sizeof(cfg_ctrl);
  1098. sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
  1099. nan_buf->is_set = false;
  1100. nan_buf->count++;
  1101. /* Reduce the iov_len size by subcmd_len */
  1102. nan_iov_data->nan_iov_len -= subcmd_len;
  1103. nan_iov_end = nan_iov_data->nan_iov_len;
  1104. nan_buf_size = (nan_iov_start - nan_iov_end);
  1105. memset_s(resp_buf, sizeof(resp_buf), 0, sizeof(resp_buf));
  1106. ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, nan_buf_size, status,
  1107. (void*)resp_buf, NAN_IOCTL_BUF_SIZE);
  1108. if (unlikely(ret) || unlikely(*status)) {
  1109. WL_ERR(("get nan cfg ctrl failed ret %d status %d \n", ret, *status));
  1110. goto fail;
  1111. }
  1112. sub_cmd_resp = &((bcm_iov_batch_buf_t *)(resp_buf))->cmds[0];
  1113. /* check the response buff */
  1114. cfg_ctrl = (*(uint32 *)&sub_cmd_resp->data[0]);
  1115. if (set) {
  1116. cfg_ctrl |= flag;
  1117. } else {
  1118. cfg_ctrl &= ~flag;
  1119. }
  1120. ret = memcpy_s(sub_cmd->data, sizeof(cfg_ctrl),
  1121. &cfg_ctrl, sizeof(cfg_ctrl));
  1122. if (ret != BCME_OK) {
  1123. WL_ERR(("Failed to copy cfg ctrl\n"));
  1124. goto fail;
  1125. }
  1126. nan_buf->is_set = true;
  1127. ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, nan_buf_size, status,
  1128. (void*)resp_buf, NAN_IOCTL_BUF_SIZE);
  1129. if (unlikely(ret) || unlikely(*status)) {
  1130. WL_ERR(("set nan cfg ctrl failed ret %d status %d \n", ret, *status));
  1131. goto fail;
  1132. }
  1133. WL_DBG(("set nan cfg ctrl successfull\n"));
  1134. fail:
  1135. if (nan_buf) {
  1136. MFREE(cfg->osh, nan_buf, NAN_IOCTL_BUF_SIZE);
  1137. }
  1138. if (nan_iov_data) {
  1139. MFREE(cfg->osh, nan_iov_data, sizeof(*nan_iov_data));
  1140. }
  1141. NAN_DBG_EXIT();
  1142. return ret;
  1143. }
  1144. static int
  1145. wl_cfgnan_get_iovars_status(void *ctx, const uint8 *data, uint16 type, uint16 len)
  1146. {
  1147. bcm_iov_batch_buf_t *b_resp = (bcm_iov_batch_buf_t *)ctx;
  1148. uint32 status;
  1149. /* if all tlvs are parsed, we should not be here */
  1150. if (b_resp->count == 0) {
  1151. return BCME_BADLEN;
  1152. }
  1153. /* cbfn params may be used in f/w */
  1154. if (len < sizeof(status)) {
  1155. return BCME_BUFTOOSHORT;
  1156. }
  1157. /* first 4 bytes consists status */
  1158. if (memcpy_s(&status, sizeof(status),
  1159. data, sizeof(uint32)) != BCME_OK) {
  1160. WL_ERR(("Failed to copy status\n"));
  1161. goto exit;
  1162. }
  1163. status = dtoh32(status);
  1164. /* If status is non zero */
  1165. if (status != BCME_OK) {
  1166. printf("cmd type %d failed, status: %04x\n", type, status);
  1167. goto exit;
  1168. }
  1169. if (b_resp->count > 0) {
  1170. b_resp->count--;
  1171. }
  1172. if (!b_resp->count) {
  1173. status = BCME_IOV_LAST_CMD;
  1174. }
  1175. exit:
  1176. return status;
  1177. }
  1178. static int
  1179. wl_cfgnan_execute_ioctl(struct net_device *ndev, struct bcm_cfg80211 *cfg,
  1180. bcm_iov_batch_buf_t *nan_buf, uint16 nan_buf_size, uint32 *status,
  1181. uint8 *resp_buf, uint16 resp_buf_size)
  1182. {
  1183. int ret = BCME_OK;
  1184. uint16 tlvs_len;
  1185. int res = BCME_OK;
  1186. bcm_iov_batch_buf_t *p_resp = NULL;
  1187. char *iov = "nan";
  1188. int max_resp_len = WLC_IOCTL_MAXLEN;
  1189. WL_DBG(("Enter:\n"));
  1190. if (nan_buf->is_set) {
  1191. ret = wldev_iovar_setbuf(ndev, "nan", nan_buf, nan_buf_size,
  1192. resp_buf, resp_buf_size, NULL);
  1193. p_resp = (bcm_iov_batch_buf_t *)(resp_buf + strlen(iov) + 1);
  1194. } else {
  1195. ret = wldev_iovar_getbuf(ndev, "nan", nan_buf, nan_buf_size,
  1196. resp_buf, resp_buf_size, NULL);
  1197. p_resp = (bcm_iov_batch_buf_t *)(resp_buf);
  1198. }
  1199. if (unlikely(ret)) {
  1200. WL_ERR((" nan execute ioctl failed, error = %d \n", ret));
  1201. goto fail;
  1202. }
  1203. p_resp->is_set = nan_buf->is_set;
  1204. tlvs_len = max_resp_len - OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
  1205. /* Extract the tlvs and print their resp in cb fn */
  1206. res = bcm_unpack_xtlv_buf((void *)p_resp, (const uint8 *)&p_resp->cmds[0],
  1207. tlvs_len, BCM_IOV_CMD_OPT_ALIGN32, wl_cfgnan_get_iovars_status);
  1208. if (res == BCME_IOV_LAST_CMD) {
  1209. res = BCME_OK;
  1210. }
  1211. fail:
  1212. *status = res;
  1213. WL_DBG((" nan ioctl ret %d status %d \n", ret, *status));
  1214. return ret;
  1215. }
  1216. static int
  1217. wl_cfgnan_if_addr_handler(void *p_buf, uint16 *nan_buf_size,
  1218. struct ether_addr *if_addr)
  1219. {
  1220. /* nan enable */
  1221. s32 ret = BCME_OK;
  1222. uint16 subcmd_len;
  1223. NAN_DBG_ENTER();
  1224. if (p_buf != NULL) {
  1225. bcm_iov_batch_subcmd_t *sub_cmd = (bcm_iov_batch_subcmd_t*)(p_buf);
  1226. ret = wl_cfg_nan_check_cmd_len(*nan_buf_size,
  1227. sizeof(*if_addr), &subcmd_len);
  1228. if (unlikely(ret)) {
  1229. WL_ERR(("nan_sub_cmd check failed\n"));
  1230. goto fail;
  1231. }
  1232. /* Fill the sub_command block */
  1233. sub_cmd->id = htod16(WL_NAN_CMD_CFG_IF_ADDR);
  1234. sub_cmd->len = sizeof(sub_cmd->u.options) + sizeof(*if_addr);
  1235. sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
  1236. ret = memcpy_s(sub_cmd->data, sizeof(*if_addr),
  1237. (uint8 *)if_addr, sizeof(*if_addr));
  1238. if (ret != BCME_OK) {
  1239. WL_ERR(("Failed to copy if addr\n"));
  1240. goto fail;
  1241. }
  1242. *nan_buf_size -= subcmd_len;
  1243. } else {
  1244. WL_ERR(("nan_iov_buf is NULL\n"));
  1245. ret = BCME_ERROR;
  1246. goto fail;
  1247. }
  1248. fail:
  1249. NAN_DBG_EXIT();
  1250. return ret;
  1251. }
  1252. static int
  1253. wl_cfgnan_get_ver(struct net_device *ndev, struct bcm_cfg80211 *cfg)
  1254. {
  1255. bcm_iov_batch_buf_t *nan_buf = NULL;
  1256. s32 ret = BCME_OK;
  1257. uint16 nan_buf_size = NAN_IOCTL_BUF_SIZE;
  1258. wl_nan_ver_t *nan_ver = NULL;
  1259. uint16 subcmd_len;
  1260. uint32 status;
  1261. bcm_iov_batch_subcmd_t *sub_cmd = NULL;
  1262. bcm_iov_batch_subcmd_t *sub_cmd_resp = NULL;
  1263. uint8 resp_buf[NAN_IOCTL_BUF_SIZE];
  1264. NAN_DBG_ENTER();
  1265. nan_buf = MALLOCZ(cfg->osh, nan_buf_size);
  1266. if (!nan_buf) {
  1267. WL_ERR(("%s: memory allocation failed\n", __func__));
  1268. ret = BCME_NOMEM;
  1269. goto fail;
  1270. }
  1271. nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
  1272. nan_buf->count = 0;
  1273. nan_buf_size -= OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
  1274. sub_cmd = (bcm_iov_batch_subcmd_t*)(uint8 *)(&nan_buf->cmds[0]);
  1275. ret = wl_cfg_nan_check_cmd_len(nan_buf_size,
  1276. sizeof(*nan_ver), &subcmd_len);
  1277. if (unlikely(ret)) {
  1278. WL_ERR(("nan_sub_cmd check failed\n"));
  1279. goto fail;
  1280. }
  1281. nan_ver = (wl_nan_ver_t *)sub_cmd->data;
  1282. sub_cmd->id = htod16(WL_NAN_CMD_GLB_NAN_VER);
  1283. sub_cmd->len = sizeof(sub_cmd->u.options) + sizeof(*nan_ver);
  1284. sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
  1285. nan_buf_size -= subcmd_len;
  1286. nan_buf->count = 1;
  1287. nan_buf->is_set = false;
  1288. bzero(resp_buf, sizeof(resp_buf));
  1289. nan_buf_size = NAN_IOCTL_BUF_SIZE - nan_buf_size;
  1290. ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, nan_buf_size, &status,
  1291. (void*)resp_buf, NAN_IOCTL_BUF_SIZE);
  1292. if (unlikely(ret) || unlikely(status)) {
  1293. WL_ERR(("get nan ver failed ret %d status %d \n",
  1294. ret, status));
  1295. goto fail;
  1296. }
  1297. sub_cmd_resp = &((bcm_iov_batch_buf_t *)(resp_buf))->cmds[0];
  1298. nan_ver = ((wl_nan_ver_t *)&sub_cmd_resp->data[0]);
  1299. if (!nan_ver) {
  1300. ret = BCME_NOTFOUND;
  1301. WL_ERR(("nan_ver not found: err = %d\n", ret));
  1302. goto fail;
  1303. }
  1304. cfg->nancfg.version = *nan_ver;
  1305. WL_INFORM_MEM(("Nan Version is %d\n", cfg->nancfg.version));
  1306. fail:
  1307. if (nan_buf) {
  1308. MFREE(cfg->osh, nan_buf, NAN_IOCTL_BUF_SIZE);
  1309. }
  1310. NAN_DBG_EXIT();
  1311. return ret;
  1312. }
  1313. static int
  1314. wl_cfgnan_set_if_addr(struct bcm_cfg80211 *cfg)
  1315. {
  1316. s32 ret = BCME_OK;
  1317. uint16 nan_buf_size = NAN_IOCTL_BUF_SIZE;
  1318. uint32 status;
  1319. uint8 resp_buf[NAN_IOCTL_BUF_SIZE];
  1320. struct ether_addr if_addr;
  1321. uint8 buf[NAN_IOCTL_BUF_SIZE];
  1322. bcm_iov_batch_buf_t *nan_buf = (bcm_iov_batch_buf_t*)buf;
  1323. bool rand_mac = cfg->nancfg.mac_rand;
  1324. nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
  1325. nan_buf->count = 0;
  1326. nan_buf_size -= OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
  1327. if (rand_mac) {
  1328. RANDOM_BYTES(if_addr.octet, 6);
  1329. /* restore mcast and local admin bits to 0 and 1 */
  1330. ETHER_SET_UNICAST(if_addr.octet);
  1331. ETHER_SET_LOCALADDR(if_addr.octet);
  1332. } else {
  1333. /* Use primary MAC with the locally administered bit for the
  1334. * NAN NMI I/F
  1335. */
  1336. if (wl_get_vif_macaddr(cfg, WL_IF_TYPE_NAN_NMI,
  1337. if_addr.octet) != BCME_OK) {
  1338. ret = -EINVAL;
  1339. WL_ERR(("Failed to get mac addr for NMI\n"));
  1340. goto fail;
  1341. }
  1342. }
  1343. WL_INFORM_MEM(("%s: NMI " MACDBG "\n",
  1344. __FUNCTION__, MAC2STRDBG(if_addr.octet)));
  1345. ret = wl_cfgnan_if_addr_handler(&nan_buf->cmds[0],
  1346. &nan_buf_size, &if_addr);
  1347. if (unlikely(ret)) {
  1348. WL_ERR(("Nan if addr handler sub_cmd set failed\n"));
  1349. goto fail;
  1350. }
  1351. nan_buf->count++;
  1352. nan_buf->is_set = true;
  1353. nan_buf_size = NAN_IOCTL_BUF_SIZE - nan_buf_size;
  1354. memset_s(resp_buf, sizeof(resp_buf), 0, sizeof(resp_buf));
  1355. ret = wl_cfgnan_execute_ioctl(bcmcfg_to_prmry_ndev(cfg), cfg,
  1356. nan_buf, nan_buf_size, &status,
  1357. (void*)resp_buf, NAN_IOCTL_BUF_SIZE);
  1358. if (unlikely(ret) || unlikely(status)) {
  1359. WL_ERR(("nan if addr handler failed ret %d status %d\n",
  1360. ret, status));
  1361. goto fail;
  1362. }
  1363. ret = memcpy_s(cfg->nan_nmi_mac, ETH_ALEN,
  1364. if_addr.octet, ETH_ALEN);
  1365. if (ret != BCME_OK) {
  1366. WL_ERR(("Failed to copy nmi addr\n"));
  1367. goto fail;
  1368. }
  1369. return ret;
  1370. fail:
  1371. if (!rand_mac) {
  1372. wl_release_vif_macaddr(cfg, if_addr.octet, WL_IF_TYPE_NAN_NMI);
  1373. }
  1374. return ret;
  1375. }
  1376. static int
  1377. wl_cfgnan_init_handler(void *p_buf, uint16 *nan_buf_size, bool val)
  1378. {
  1379. /* nan enable */
  1380. s32 ret = BCME_OK;
  1381. uint16 subcmd_len;
  1382. NAN_DBG_ENTER();
  1383. if (p_buf != NULL) {
  1384. bcm_iov_batch_subcmd_t *sub_cmd = (bcm_iov_batch_subcmd_t*)(p_buf);
  1385. ret = wl_cfg_nan_check_cmd_len(*nan_buf_size,
  1386. sizeof(val), &subcmd_len);
  1387. if (unlikely(ret)) {
  1388. WL_ERR(("nan_sub_cmd check failed\n"));
  1389. goto fail;
  1390. }
  1391. /* Fill the sub_command block */
  1392. sub_cmd->id = htod16(WL_NAN_CMD_CFG_NAN_INIT);
  1393. sub_cmd->len = sizeof(sub_cmd->u.options) + sizeof(uint8);
  1394. sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
  1395. ret = memcpy_s(sub_cmd->data, sizeof(uint8),
  1396. (uint8*)&val, sizeof(uint8));
  1397. if (ret != BCME_OK) {
  1398. WL_ERR(("Failed to copy init value\n"));
  1399. goto fail;
  1400. }
  1401. *nan_buf_size -= subcmd_len;
  1402. } else {
  1403. WL_ERR(("nan_iov_buf is NULL\n"));
  1404. ret = BCME_ERROR;
  1405. goto fail;
  1406. }
  1407. fail:
  1408. NAN_DBG_EXIT();
  1409. return ret;
  1410. }
  1411. static int
  1412. wl_cfgnan_enable_handler(wl_nan_iov_t *nan_iov_data, bool val)
  1413. {
  1414. /* nan enable */
  1415. s32 ret = BCME_OK;
  1416. bcm_iov_batch_subcmd_t *sub_cmd = NULL;
  1417. uint16 subcmd_len;
  1418. NAN_DBG_ENTER();
  1419. sub_cmd = (bcm_iov_batch_subcmd_t*)(nan_iov_data->nan_iov_buf);
  1420. ret = wl_cfg_nan_check_cmd_len(nan_iov_data->nan_iov_len,
  1421. sizeof(val), &subcmd_len);
  1422. if (unlikely(ret)) {
  1423. WL_ERR(("nan_sub_cmd check failed\n"));
  1424. return ret;
  1425. }
  1426. /* Fill the sub_command block */
  1427. sub_cmd->id = htod16(WL_NAN_CMD_CFG_NAN_ENAB);
  1428. sub_cmd->len = sizeof(sub_cmd->u.options) + sizeof(uint8);
  1429. sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
  1430. ret = memcpy_s(sub_cmd->data, sizeof(uint8),
  1431. (uint8*)&val, sizeof(uint8));
  1432. if (ret != BCME_OK) {
  1433. WL_ERR(("Failed to copy enab value\n"));
  1434. return ret;
  1435. }
  1436. nan_iov_data->nan_iov_len -= subcmd_len;
  1437. nan_iov_data->nan_iov_buf += subcmd_len;
  1438. NAN_DBG_EXIT();
  1439. return ret;
  1440. }
  1441. static int
  1442. wl_cfgnan_warmup_time_handler(nan_config_cmd_data_t *cmd_data,
  1443. wl_nan_iov_t *nan_iov_data)
  1444. {
  1445. /* wl nan warm_up_time */
  1446. s32 ret = BCME_OK;
  1447. bcm_iov_batch_subcmd_t *sub_cmd = NULL;
  1448. wl_nan_warmup_time_ticks_t *wup_ticks = NULL;
  1449. uint16 subcmd_len;
  1450. NAN_DBG_ENTER();
  1451. sub_cmd = (bcm_iov_batch_subcmd_t*)(nan_iov_data->nan_iov_buf);
  1452. wup_ticks = (wl_nan_warmup_time_ticks_t *)sub_cmd->data;
  1453. ret = wl_cfg_nan_check_cmd_len(nan_iov_data->nan_iov_len,
  1454. sizeof(*wup_ticks), &subcmd_len);
  1455. if (unlikely(ret)) {
  1456. WL_ERR(("nan_sub_cmd check failed\n"));
  1457. return ret;
  1458. }
  1459. /* Fill the sub_command block */
  1460. sub_cmd->id = htod16(WL_NAN_CMD_CFG_WARMUP_TIME);
  1461. sub_cmd->len = sizeof(sub_cmd->u.options) +
  1462. sizeof(*wup_ticks);
  1463. sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
  1464. *wup_ticks = cmd_data->warmup_time;
  1465. nan_iov_data->nan_iov_len -= subcmd_len;
  1466. nan_iov_data->nan_iov_buf += subcmd_len;
  1467. NAN_DBG_EXIT();
  1468. return ret;
  1469. }
  1470. static int
  1471. wl_cfgnan_set_election_metric(nan_config_cmd_data_t *cmd_data,
  1472. wl_nan_iov_t *nan_iov_data, uint32 nan_attr_mask)
  1473. {
  1474. s32 ret = BCME_OK;
  1475. bcm_iov_batch_subcmd_t *sub_cmd = NULL;
  1476. wl_nan_election_metric_config_t *metrics = NULL;
  1477. uint16 subcmd_len;
  1478. NAN_DBG_ENTER();
  1479. sub_cmd =
  1480. (bcm_iov_batch_subcmd_t*)(nan_iov_data->nan_iov_buf);
  1481. ret = wl_cfg_nan_check_cmd_len(nan_iov_data->nan_iov_len,
  1482. sizeof(*metrics), &subcmd_len);
  1483. if (unlikely(ret)) {
  1484. WL_ERR(("nan_sub_cmd check failed\n"));
  1485. goto fail;
  1486. }
  1487. metrics = (wl_nan_election_metric_config_t *)sub_cmd->data;
  1488. if (nan_attr_mask & NAN_ATTR_RAND_FACTOR_CONFIG) {
  1489. metrics->random_factor = (uint8)cmd_data->metrics.random_factor;
  1490. }
  1491. if ((!cmd_data->metrics.master_pref) ||
  1492. (cmd_data->metrics.master_pref > NAN_MAXIMUM_MASTER_PREFERENCE)) {
  1493. WL_TRACE(("Master Pref is 0 or greater than 254, hence sending random value\n"));
  1494. /* Master pref for mobile devices can be from 1 - 127 as per Spec AppendixC */
  1495. metrics->master_pref = (RANDOM32()%(NAN_MAXIMUM_MASTER_PREFERENCE/2)) + 1;
  1496. } else {
  1497. metrics->master_pref = (uint8)cmd_data->metrics.master_pref;
  1498. }
  1499. sub_cmd->id = htod16(WL_NAN_CMD_ELECTION_METRICS_CONFIG);
  1500. sub_cmd->len = sizeof(sub_cmd->u.options) +
  1501. sizeof(*metrics);
  1502. sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
  1503. nan_iov_data->nan_iov_len -= subcmd_len;
  1504. nan_iov_data->nan_iov_buf += subcmd_len;
  1505. fail:
  1506. NAN_DBG_EXIT();
  1507. return ret;
  1508. }
  1509. static int
  1510. wl_cfgnan_set_rssi_proximity(nan_config_cmd_data_t *cmd_data,
  1511. wl_nan_iov_t *nan_iov_data, uint32 nan_attr_mask)
  1512. {
  1513. s32 ret = BCME_OK;
  1514. bcm_iov_batch_subcmd_t *sub_cmd = NULL;
  1515. wl_nan_rssi_notif_thld_t *rssi_notif_thld = NULL;
  1516. uint16 subcmd_len;
  1517. NAN_DBG_ENTER();
  1518. sub_cmd = (bcm_iov_batch_subcmd_t*)(nan_iov_data->nan_iov_buf);
  1519. rssi_notif_thld = (wl_nan_rssi_notif_thld_t *)sub_cmd->data;
  1520. ret = wl_cfg_nan_check_cmd_len(nan_iov_data->nan_iov_len,
  1521. sizeof(*rssi_notif_thld), &subcmd_len);
  1522. if (unlikely(ret)) {
  1523. WL_ERR(("nan_sub_cmd check failed\n"));
  1524. return ret;
  1525. }
  1526. if (nan_attr_mask & NAN_ATTR_RSSI_PROXIMITY_2G_CONFIG) {
  1527. rssi_notif_thld->bcn_rssi_2g =
  1528. cmd_data->rssi_attr.rssi_proximity_2dot4g_val;
  1529. } else {
  1530. /* Keeping RSSI threshold value to be -70dBm */
  1531. rssi_notif_thld->bcn_rssi_2g = NAN_DEF_RSSI_NOTIF_THRESH;
  1532. }
  1533. if (nan_attr_mask & NAN_ATTR_RSSI_PROXIMITY_5G_CONFIG) {
  1534. rssi_notif_thld->bcn_rssi_5g =
  1535. cmd_data->rssi_attr.rssi_proximity_5g_val;
  1536. } else {
  1537. /* Keeping RSSI threshold value to be -70dBm */
  1538. rssi_notif_thld->bcn_rssi_5g = NAN_DEF_RSSI_NOTIF_THRESH;
  1539. }
  1540. sub_cmd->id = htod16(WL_NAN_CMD_SYNC_BCN_RSSI_NOTIF_THRESHOLD);
  1541. sub_cmd->len = htod16(sizeof(sub_cmd->u.options) + sizeof(*rssi_notif_thld));
  1542. sub_cmd->u.options = htod32(BCM_XTLV_OPTION_ALIGN32);
  1543. nan_iov_data->nan_iov_len -= subcmd_len;
  1544. nan_iov_data->nan_iov_buf += subcmd_len;
  1545. NAN_DBG_EXIT();
  1546. return ret;
  1547. }
  1548. static int
  1549. wl_cfgnan_set_rssi_mid_or_close(nan_config_cmd_data_t *cmd_data,
  1550. wl_nan_iov_t *nan_iov_data, uint32 nan_attr_mask)
  1551. {
  1552. s32 ret = BCME_OK;
  1553. bcm_iov_batch_subcmd_t *sub_cmd = NULL;
  1554. wl_nan_rssi_thld_t *rssi_thld = NULL;
  1555. uint16 subcmd_len;
  1556. NAN_DBG_ENTER();
  1557. sub_cmd = (bcm_iov_batch_subcmd_t*)(nan_iov_data->nan_iov_buf);
  1558. rssi_thld = (wl_nan_rssi_thld_t *)sub_cmd->data;
  1559. ret = wl_cfg_nan_check_cmd_len(nan_iov_data->nan_iov_len,
  1560. sizeof(*rssi_thld), &subcmd_len);
  1561. if (unlikely(ret)) {
  1562. WL_ERR(("nan_sub_cmd check failed\n"));
  1563. return ret;
  1564. }
  1565. /*
  1566. * Keeping RSSI mid value -75dBm for both 2G and 5G
  1567. * Keeping RSSI close value -60dBm for both 2G and 5G
  1568. */
  1569. if (nan_attr_mask & NAN_ATTR_RSSI_MIDDLE_2G_CONFIG) {
  1570. rssi_thld->rssi_mid_2g =
  1571. cmd_data->rssi_attr.rssi_middle_2dot4g_val;
  1572. } else {
  1573. rssi_thld->rssi_mid_2g = NAN_DEF_RSSI_MID;
  1574. }
  1575. if (nan_attr_mask & NAN_ATTR_RSSI_MIDDLE_5G_CONFIG) {
  1576. rssi_thld->rssi_mid_5g =
  1577. cmd_data->rssi_attr.rssi_middle_5g_val;
  1578. } else {
  1579. rssi_thld->rssi_mid_5g = NAN_DEF_RSSI_MID;
  1580. }
  1581. if (nan_attr_mask & NAN_ATTR_RSSI_CLOSE_CONFIG) {
  1582. rssi_thld->rssi_close_2g =
  1583. cmd_data->rssi_attr.rssi_close_2dot4g_val;
  1584. } else {
  1585. rssi_thld->rssi_close_2g = NAN_DEF_RSSI_CLOSE;
  1586. }
  1587. if (nan_attr_mask & NAN_ATTR_RSSI_CLOSE_5G_CONFIG) {
  1588. rssi_thld->rssi_close_5g =
  1589. cmd_data->rssi_attr.rssi_close_5g_val;
  1590. } else {
  1591. rssi_thld->rssi_close_5g = NAN_DEF_RSSI_CLOSE;
  1592. }
  1593. sub_cmd->id = htod16(WL_NAN_CMD_ELECTION_RSSI_THRESHOLD);
  1594. sub_cmd->len = htod16(sizeof(sub_cmd->u.options) + sizeof(*rssi_thld));
  1595. sub_cmd->u.options = htod32(BCM_XTLV_OPTION_ALIGN32);
  1596. nan_iov_data->nan_iov_len -= subcmd_len;
  1597. nan_iov_data->nan_iov_buf += subcmd_len;
  1598. NAN_DBG_EXIT();
  1599. return ret;
  1600. }
  1601. static int
  1602. check_for_valid_5gchan(struct net_device *ndev, uint8 chan)
  1603. {
  1604. s32 ret = BCME_OK;
  1605. uint bitmap;
  1606. u8 ioctl_buf[WLC_IOCTL_SMLEN];
  1607. uint32 chanspec_arg;
  1608. NAN_DBG_ENTER();
  1609. chanspec_arg = CH20MHZ_CHSPEC(chan);
  1610. chanspec_arg = wl_chspec_host_to_driver(chanspec_arg);
  1611. memset_s(ioctl_buf, WLC_IOCTL_SMLEN, 0, WLC_IOCTL_SMLEN);
  1612. ret = wldev_iovar_getbuf(ndev, "per_chan_info",
  1613. (void *)&chanspec_arg, sizeof(chanspec_arg),
  1614. ioctl_buf, WLC_IOCTL_SMLEN, NULL);
  1615. if (ret != BCME_OK) {
  1616. WL_ERR(("Chaninfo for channel = %d, error %d\n", chan, ret));
  1617. goto exit;
  1618. }
  1619. bitmap = dtoh32(*(uint *)ioctl_buf);
  1620. if (!(bitmap & WL_CHAN_VALID_HW)) {
  1621. WL_ERR(("Invalid channel\n"));
  1622. ret = BCME_BADCHAN;
  1623. goto exit;
  1624. }
  1625. if (!(bitmap & WL_CHAN_VALID_SW)) {
  1626. WL_ERR(("Not supported in current locale\n"));
  1627. ret = BCME_BADCHAN;
  1628. goto exit;
  1629. }
  1630. exit:
  1631. NAN_DBG_EXIT();
  1632. return ret;
  1633. }
  1634. static int
  1635. wl_cfgnan_set_nan_soc_chans(struct net_device *ndev, nan_config_cmd_data_t *cmd_data,
  1636. wl_nan_iov_t *nan_iov_data, uint32 nan_attr_mask)
  1637. {
  1638. s32 ret = BCME_OK;
  1639. bcm_iov_batch_subcmd_t *sub_cmd = NULL;
  1640. wl_nan_social_channels_t *soc_chans = NULL;
  1641. uint16 subcmd_len;
  1642. NAN_DBG_ENTER();
  1643. sub_cmd = (bcm_iov_batch_subcmd_t*)(nan_iov_data->nan_iov_buf);
  1644. soc_chans =
  1645. (wl_nan_social_channels_t *)sub_cmd->data;
  1646. ret = wl_cfg_nan_check_cmd_len(nan_iov_data->nan_iov_len,
  1647. sizeof(*soc_chans), &subcmd_len);
  1648. if (unlikely(ret)) {
  1649. WL_ERR(("nan_sub_cmd check failed\n"));
  1650. return ret;
  1651. }
  1652. sub_cmd->id = htod16(WL_NAN_CMD_SYNC_SOCIAL_CHAN);
  1653. sub_cmd->len = sizeof(sub_cmd->u.options) +
  1654. sizeof(*soc_chans);
  1655. sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
  1656. if (nan_attr_mask & NAN_ATTR_2G_CHAN_CONFIG) {
  1657. soc_chans->soc_chan_2g = cmd_data->chanspec[1];
  1658. } else {
  1659. soc_chans->soc_chan_2g = NAN_DEF_SOCIAL_CHAN_2G;
  1660. }
  1661. if (cmd_data->support_5g) {
  1662. if (nan_attr_mask & NAN_ATTR_5G_CHAN_CONFIG) {
  1663. soc_chans->soc_chan_5g = cmd_data->chanspec[2];
  1664. } else {
  1665. soc_chans->soc_chan_5g = NAN_DEF_SOCIAL_CHAN_5G;
  1666. }
  1667. ret = check_for_valid_5gchan(ndev, soc_chans->soc_chan_5g);
  1668. if (ret != BCME_OK) {
  1669. ret = check_for_valid_5gchan(ndev, NAN_DEF_SEC_SOCIAL_CHAN_5G);
  1670. if (ret == BCME_OK) {
  1671. soc_chans->soc_chan_5g = NAN_DEF_SEC_SOCIAL_CHAN_5G;
  1672. } else {
  1673. soc_chans->soc_chan_5g = 0;
  1674. ret = BCME_OK;
  1675. WL_ERR(("Current locale doesn't support 5G op"
  1676. "continuing with 2G only operation\n"));
  1677. }
  1678. }
  1679. } else {
  1680. WL_DBG(("5G support is disabled\n"));
  1681. }
  1682. nan_iov_data->nan_iov_len -= subcmd_len;
  1683. nan_iov_data->nan_iov_buf += subcmd_len;
  1684. NAN_DBG_EXIT();
  1685. return ret;
  1686. }
  1687. static int
  1688. wl_cfgnan_set_nan_scan_params(struct net_device *ndev, struct bcm_cfg80211 *cfg,
  1689. nan_config_cmd_data_t *cmd_data, uint8 band_index, uint32 nan_attr_mask)
  1690. {
  1691. bcm_iov_batch_buf_t *nan_buf = NULL;
  1692. s32 ret = BCME_OK;
  1693. uint16 nan_iov_start, nan_iov_end;
  1694. uint16 nan_buf_size = NAN_IOCTL_BUF_SIZE;
  1695. uint16 subcmd_len;
  1696. bcm_iov_batch_subcmd_t *sub_cmd = NULL;
  1697. wl_nan_iov_t *nan_iov_data = NULL;
  1698. uint8 resp_buf[NAN_IOCTL_BUF_SIZE];
  1699. wl_nan_scan_params_t *scan_params = NULL;
  1700. uint32 status;
  1701. NAN_DBG_ENTER();
  1702. nan_buf = MALLOCZ(cfg->osh, nan_buf_size);
  1703. if (!nan_buf) {
  1704. WL_ERR(("%s: memory allocation failed\n", __func__));
  1705. ret = BCME_NOMEM;
  1706. goto fail;
  1707. }
  1708. nan_iov_data = MALLOCZ(cfg->osh, sizeof(*nan_iov_data));
  1709. if (!nan_iov_data) {
  1710. WL_ERR(("%s: memory allocation failed\n", __func__));
  1711. ret = BCME_NOMEM;
  1712. goto fail;
  1713. }
  1714. nan_iov_data->nan_iov_len = nan_iov_start = NAN_IOCTL_BUF_SIZE;
  1715. nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
  1716. nan_buf->count = 0;
  1717. nan_iov_data->nan_iov_buf = (uint8 *)(&nan_buf->cmds[0]);
  1718. nan_iov_data->nan_iov_len -= OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
  1719. sub_cmd = (bcm_iov_batch_subcmd_t*)(nan_iov_data->nan_iov_buf);
  1720. ret = wl_cfg_nan_check_cmd_len(nan_iov_data->nan_iov_len,
  1721. sizeof(*scan_params), &subcmd_len);
  1722. if (unlikely(ret)) {
  1723. WL_ERR(("nan_sub_cmd check failed\n"));
  1724. goto fail;
  1725. }
  1726. scan_params = (wl_nan_scan_params_t *)sub_cmd->data;
  1727. sub_cmd->id = htod16(WL_NAN_CMD_CFG_SCAN_PARAMS);
  1728. sub_cmd->len = sizeof(sub_cmd->u.options) + sizeof(*scan_params);
  1729. sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
  1730. if (!band_index) {
  1731. /* Fw default: Dwell time for 2G is 210 */
  1732. if ((nan_attr_mask & NAN_ATTR_2G_DWELL_TIME_CONFIG) &&
  1733. cmd_data->dwell_time[0]) {
  1734. scan_params->dwell_time = cmd_data->dwell_time[0] +
  1735. NAN_SCAN_DWELL_TIME_DELTA_MS;
  1736. }
  1737. /* Fw default: Scan period for 2G is 10 */
  1738. if (nan_attr_mask & NAN_ATTR_2G_SCAN_PERIOD_CONFIG) {
  1739. scan_params->scan_period = cmd_data->scan_period[0];
  1740. }
  1741. } else {
  1742. if ((nan_attr_mask & NAN_ATTR_5G_DWELL_TIME_CONFIG) &&
  1743. cmd_data->dwell_time[1]) {
  1744. scan_params->dwell_time = cmd_data->dwell_time[1] +
  1745. NAN_SCAN_DWELL_TIME_DELTA_MS;
  1746. }
  1747. if (nan_attr_mask & NAN_ATTR_5G_SCAN_PERIOD_CONFIG) {
  1748. scan_params->scan_period = cmd_data->scan_period[1];
  1749. }
  1750. }
  1751. scan_params->band_index = band_index;
  1752. nan_buf->is_set = true;
  1753. nan_buf->count++;
  1754. /* Reduce the iov_len size by subcmd_len */
  1755. nan_iov_data->nan_iov_len -= subcmd_len;
  1756. nan_iov_end = nan_iov_data->nan_iov_len;
  1757. nan_buf_size = (nan_iov_start - nan_iov_end);
  1758. memset_s(resp_buf, sizeof(resp_buf), 0, sizeof(resp_buf));
  1759. ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, nan_buf_size, &status,
  1760. (void*)resp_buf, NAN_IOCTL_BUF_SIZE);
  1761. if (unlikely(ret) || unlikely(status)) {
  1762. WL_ERR(("set nan scan params failed ret %d status %d \n", ret, status));
  1763. goto fail;
  1764. }
  1765. WL_DBG(("set nan scan params successfull\n"));
  1766. fail:
  1767. if (nan_buf) {
  1768. MFREE(cfg->osh, nan_buf, NAN_IOCTL_BUF_SIZE);
  1769. }
  1770. if (nan_iov_data) {
  1771. MFREE(cfg->osh, nan_iov_data, sizeof(*nan_iov_data));
  1772. }
  1773. NAN_DBG_EXIT();
  1774. return ret;
  1775. }
  1776. static int
  1777. wl_cfgnan_set_cluster_id(nan_config_cmd_data_t *cmd_data,
  1778. wl_nan_iov_t *nan_iov_data)
  1779. {
  1780. s32 ret = BCME_OK;
  1781. bcm_iov_batch_subcmd_t *sub_cmd = NULL;
  1782. uint16 subcmd_len;
  1783. NAN_DBG_ENTER();
  1784. sub_cmd = (bcm_iov_batch_subcmd_t*)(nan_iov_data->nan_iov_buf);
  1785. ret = wl_cfg_nan_check_cmd_len(nan_iov_data->nan_iov_len,
  1786. (sizeof(cmd_data->clus_id) - sizeof(uint8)), &subcmd_len);
  1787. if (unlikely(ret)) {
  1788. WL_ERR(("nan_sub_cmd check failed\n"));
  1789. return ret;
  1790. }
  1791. cmd_data->clus_id.octet[0] = 0x50;
  1792. cmd_data->clus_id.octet[1] = 0x6F;
  1793. cmd_data->clus_id.octet[2] = 0x9A;
  1794. cmd_data->clus_id.octet[3] = 0x01;
  1795. WL_TRACE(("cluster_id = " MACDBG "\n", MAC2STRDBG(cmd_data->clus_id.octet)));
  1796. sub_cmd->id = htod16(WL_NAN_CMD_CFG_CID);
  1797. sub_cmd->len = sizeof(sub_cmd->u.options) + sizeof(cmd_data->clus_id);
  1798. sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
  1799. ret = memcpy_s(sub_cmd->data, sizeof(cmd_data->clus_id),
  1800. (uint8 *)&cmd_data->clus_id,
  1801. sizeof(cmd_data->clus_id));
  1802. if (ret != BCME_OK) {
  1803. WL_ERR(("Failed to copy clus id\n"));
  1804. return ret;
  1805. }
  1806. nan_iov_data->nan_iov_len -= subcmd_len;
  1807. nan_iov_data->nan_iov_buf += subcmd_len;
  1808. NAN_DBG_EXIT();
  1809. return ret;
  1810. }
  1811. static int
  1812. wl_cfgnan_set_hop_count_limit(nan_config_cmd_data_t *cmd_data,
  1813. wl_nan_iov_t *nan_iov_data)
  1814. {
  1815. s32 ret = BCME_OK;
  1816. bcm_iov_batch_subcmd_t *sub_cmd = NULL;
  1817. wl_nan_hop_count_t *hop_limit = NULL;
  1818. uint16 subcmd_len;
  1819. NAN_DBG_ENTER();
  1820. sub_cmd = (bcm_iov_batch_subcmd_t*)(nan_iov_data->nan_iov_buf);
  1821. hop_limit = (wl_nan_hop_count_t *)sub_cmd->data;
  1822. ret = wl_cfg_nan_check_cmd_len(nan_iov_data->nan_iov_len,
  1823. sizeof(*hop_limit), &subcmd_len);
  1824. if (unlikely(ret)) {
  1825. WL_ERR(("nan_sub_cmd check failed\n"));
  1826. return ret;
  1827. }
  1828. *hop_limit = cmd_data->hop_count_limit;
  1829. sub_cmd->id = htod16(WL_NAN_CMD_CFG_HOP_LIMIT);
  1830. sub_cmd->len = sizeof(sub_cmd->u.options) + sizeof(*hop_limit);
  1831. sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
  1832. nan_iov_data->nan_iov_len -= subcmd_len;
  1833. nan_iov_data->nan_iov_buf += subcmd_len;
  1834. NAN_DBG_EXIT();
  1835. return ret;
  1836. }
  1837. static int
  1838. wl_cfgnan_set_sid_beacon_val(nan_config_cmd_data_t *cmd_data,
  1839. wl_nan_iov_t *nan_iov_data, uint32 nan_attr_mask)
  1840. {
  1841. s32 ret = BCME_OK;
  1842. bcm_iov_batch_subcmd_t *sub_cmd = NULL;
  1843. wl_nan_sid_beacon_control_t *sid_beacon = NULL;
  1844. uint16 subcmd_len;
  1845. NAN_DBG_ENTER();
  1846. sub_cmd = (bcm_iov_batch_subcmd_t*)(nan_iov_data->nan_iov_buf);
  1847. ret = wl_cfg_nan_check_cmd_len(nan_iov_data->nan_iov_len,
  1848. sizeof(*sid_beacon), &subcmd_len);
  1849. if (unlikely(ret)) {
  1850. WL_ERR(("nan_sub_cmd check failed\n"));
  1851. return ret;
  1852. }
  1853. sid_beacon = (wl_nan_sid_beacon_control_t *)sub_cmd->data;
  1854. sid_beacon->sid_enable = cmd_data->sid_beacon.sid_enable;
  1855. /* Need to have separate flag for sub beacons
  1856. * sid_beacon->sub_sid_enable = cmd_data->sid_beacon.sub_sid_enable;
  1857. */
  1858. if (nan_attr_mask & NAN_ATTR_SID_BEACON_CONFIG) {
  1859. /* Limit for number of publish SIDs to be included in Beacons */
  1860. sid_beacon->sid_count = cmd_data->sid_beacon.sid_count;
  1861. }
  1862. if (nan_attr_mask & NAN_ATTR_SUB_SID_BEACON_CONFIG) {
  1863. /* Limit for number of subscribe SIDs to be included in Beacons */
  1864. sid_beacon->sub_sid_count = cmd_data->sid_beacon.sub_sid_count;
  1865. }
  1866. sub_cmd->id = htod16(WL_NAN_CMD_CFG_SID_BEACON);
  1867. sub_cmd->len = sizeof(sub_cmd->u.options) +
  1868. sizeof(*sid_beacon);
  1869. sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
  1870. nan_iov_data->nan_iov_len -= subcmd_len;
  1871. nan_iov_data->nan_iov_buf += subcmd_len;
  1872. NAN_DBG_EXIT();
  1873. return ret;
  1874. }
  1875. static int
  1876. wl_cfgnan_set_nan_oui(nan_config_cmd_data_t *cmd_data,
  1877. wl_nan_iov_t *nan_iov_data)
  1878. {
  1879. s32 ret = BCME_OK;
  1880. bcm_iov_batch_subcmd_t *sub_cmd = NULL;
  1881. uint16 subcmd_len;
  1882. NAN_DBG_ENTER();
  1883. sub_cmd = (bcm_iov_batch_subcmd_t*)(nan_iov_data->nan_iov_buf);
  1884. ret = wl_cfg_nan_check_cmd_len(nan_iov_data->nan_iov_len,
  1885. sizeof(cmd_data->nan_oui), &subcmd_len);
  1886. if (unlikely(ret)) {
  1887. WL_ERR(("nan_sub_cmd check failed\n"));
  1888. return ret;
  1889. }
  1890. sub_cmd->id = htod16(WL_NAN_CMD_CFG_OUI);
  1891. sub_cmd->len = sizeof(sub_cmd->u.options) + sizeof(cmd_data->nan_oui);
  1892. sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
  1893. ret = memcpy_s(sub_cmd->data, sizeof(cmd_data->nan_oui),
  1894. (uint32 *)&cmd_data->nan_oui,
  1895. sizeof(cmd_data->nan_oui));
  1896. if (ret != BCME_OK) {
  1897. WL_ERR(("Failed to copy nan oui\n"));
  1898. return ret;
  1899. }
  1900. nan_iov_data->nan_iov_len -= subcmd_len;
  1901. nan_iov_data->nan_iov_buf += subcmd_len;
  1902. NAN_DBG_EXIT();
  1903. return ret;
  1904. }
  1905. static int
  1906. wl_cfgnan_set_awake_dws(struct net_device *ndev, nan_config_cmd_data_t *cmd_data,
  1907. wl_nan_iov_t *nan_iov_data, struct bcm_cfg80211 *cfg, uint32 nan_attr_mask)
  1908. {
  1909. s32 ret = BCME_OK;
  1910. bcm_iov_batch_subcmd_t *sub_cmd = NULL;
  1911. wl_nan_awake_dws_t *awake_dws = NULL;
  1912. uint16 subcmd_len;
  1913. NAN_DBG_ENTER();
  1914. sub_cmd =
  1915. (bcm_iov_batch_subcmd_t*)(nan_iov_data->nan_iov_buf);
  1916. ret = wl_cfg_nan_check_cmd_len(nan_iov_data->nan_iov_len,
  1917. sizeof(*awake_dws), &subcmd_len);
  1918. if (unlikely(ret)) {
  1919. WL_ERR(("nan_sub_cmd check failed\n"));
  1920. return ret;
  1921. }
  1922. awake_dws = (wl_nan_awake_dws_t *)sub_cmd->data;
  1923. if (nan_attr_mask & NAN_ATTR_2G_DW_CONFIG) {
  1924. awake_dws->dw_interval_2g = cmd_data->awake_dws.dw_interval_2g;
  1925. if (!awake_dws->dw_interval_2g) {
  1926. /* Set 2G awake dw value to fw default value 1 */
  1927. awake_dws->dw_interval_2g = NAN_SYNC_DEF_AWAKE_DW;
  1928. }
  1929. } else {
  1930. /* Set 2G awake dw value to fw default value 1 */
  1931. awake_dws->dw_interval_2g = NAN_SYNC_DEF_AWAKE_DW;
  1932. }
  1933. if (cfg->support_5g) {
  1934. if (nan_attr_mask & NAN_ATTR_5G_DW_CONFIG) {
  1935. awake_dws->dw_interval_5g = cmd_data->awake_dws.dw_interval_5g;
  1936. if (!awake_dws->dw_interval_5g) {
  1937. /* disable 5g beacon ctrls */
  1938. ret = wl_cfgnan_config_control_flag(ndev, cfg,
  1939. WL_NAN_CTRL_DISC_BEACON_TX_5G,
  1940. &(cmd_data->status), 0);
  1941. if (unlikely(ret) || unlikely(cmd_data->status)) {
  1942. WL_ERR((" nan control set config handler,"
  1943. " ret = %d status = %d \n",
  1944. ret, cmd_data->status));
  1945. goto fail;
  1946. }
  1947. ret = wl_cfgnan_config_control_flag(ndev, cfg,
  1948. WL_NAN_CTRL_SYNC_BEACON_TX_5G,
  1949. &(cmd_data->status), 0);
  1950. if (unlikely(ret) || unlikely(cmd_data->status)) {
  1951. WL_ERR((" nan control set config handler,"
  1952. " ret = %d status = %d \n",
  1953. ret, cmd_data->status));
  1954. goto fail;
  1955. }
  1956. }
  1957. } else {
  1958. /* Set 5G awake dw value to fw default value 1 */
  1959. awake_dws->dw_interval_5g = NAN_SYNC_DEF_AWAKE_DW;
  1960. }
  1961. }
  1962. sub_cmd->id = htod16(WL_NAN_CMD_SYNC_AWAKE_DWS);
  1963. sub_cmd->len = sizeof(sub_cmd->u.options) +
  1964. sizeof(*awake_dws);
  1965. sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
  1966. nan_iov_data->nan_iov_len -= subcmd_len;
  1967. nan_iov_data->nan_iov_buf += subcmd_len;
  1968. fail:
  1969. NAN_DBG_EXIT();
  1970. return ret;
  1971. }
  1972. int
  1973. wl_cfgnan_start_handler(struct net_device *ndev, struct bcm_cfg80211 *cfg,
  1974. nan_config_cmd_data_t *cmd_data, uint32 nan_attr_mask)
  1975. {
  1976. s32 ret = BCME_OK;
  1977. uint16 nan_buf_size = NAN_IOCTL_BUF_SIZE;
  1978. bcm_iov_batch_buf_t *nan_buf = NULL;
  1979. wl_nan_iov_t *nan_iov_data = NULL;
  1980. dhd_pub_t *dhdp = wl_cfg80211_get_dhdp(ndev);
  1981. uint8 resp_buf[NAN_IOCTL_BUF_SIZE];
  1982. int i;
  1983. s32 timeout = 0;
  1984. nan_hal_capabilities_t capabilities;
  1985. NAN_DBG_ENTER();
  1986. /* Protect discovery creation. Ensure proper mutex precedence.
  1987. * If if_sync & nan_mutex comes together in same context, nan_mutex
  1988. * should follow if_sync.
  1989. */
  1990. mutex_lock(&cfg->if_sync);
  1991. NAN_MUTEX_LOCK();
  1992. #ifdef WL_IFACE_MGMT
  1993. if ((ret = wl_cfg80211_handle_if_role_conflict(cfg, WL_IF_TYPE_NAN_NMI)) != BCME_OK) {
  1994. WL_ERR(("Conflicting iface is present, cant support nan\n"));
  1995. NAN_MUTEX_UNLOCK();
  1996. mutex_unlock(&cfg->if_sync);
  1997. goto fail;
  1998. }
  1999. #endif /* WL_IFACE_MGMT */
  2000. WL_INFORM_MEM(("Initializing NAN\n"));
  2001. ret = wl_cfgnan_init(cfg);
  2002. if (ret != BCME_OK) {
  2003. WL_ERR(("failed to initialize NAN[%d]\n", ret));
  2004. NAN_MUTEX_UNLOCK();
  2005. mutex_unlock(&cfg->if_sync);
  2006. goto fail;
  2007. }
  2008. ret = wl_cfgnan_get_ver(ndev, cfg);
  2009. if (ret != BCME_OK) {
  2010. WL_ERR(("failed to Nan IOV version[%d]\n", ret));
  2011. NAN_MUTEX_UNLOCK();
  2012. mutex_unlock(&cfg->if_sync);
  2013. goto fail;
  2014. }
  2015. /* set nmi addr */
  2016. ret = wl_cfgnan_set_if_addr(cfg);
  2017. if (ret != BCME_OK) {
  2018. WL_ERR(("Failed to set nmi address \n"));
  2019. NAN_MUTEX_UNLOCK();
  2020. mutex_unlock(&cfg->if_sync);
  2021. goto fail;
  2022. }
  2023. cfg->nancfg.nan_event_recvd = false;
  2024. NAN_MUTEX_UNLOCK();
  2025. mutex_unlock(&cfg->if_sync);
  2026. for (i = 0; i < NAN_MAX_NDI; i++) {
  2027. /* Create NDI using the information provided by user space */
  2028. if (cfg->nancfg.ndi[i].in_use && !cfg->nancfg.ndi[i].created) {
  2029. ret = wl_cfgnan_data_path_iface_create_delete_handler(ndev, cfg,
  2030. cfg->nancfg.ndi[i].ifname,
  2031. NAN_WIFI_SUBCMD_DATA_PATH_IFACE_CREATE, dhdp->up);
  2032. if (ret) {
  2033. WL_ERR(("failed to create ndp interface [%d]\n", ret));
  2034. goto fail;
  2035. }
  2036. cfg->nancfg.ndi[i].created = true;
  2037. }
  2038. }
  2039. nan_buf = MALLOCZ(cfg->osh, nan_buf_size);
  2040. if (!nan_buf) {
  2041. WL_ERR(("%s: memory allocation failed\n", __func__));
  2042. ret = BCME_NOMEM;
  2043. goto fail;
  2044. }
  2045. nan_iov_data = MALLOCZ(cfg->osh, sizeof(*nan_iov_data));
  2046. if (!nan_iov_data) {
  2047. WL_ERR(("%s: memory allocation failed\n", __func__));
  2048. ret = BCME_NOMEM;
  2049. goto fail;
  2050. }
  2051. nan_iov_data->nan_iov_len = NAN_IOCTL_BUF_SIZE;
  2052. nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
  2053. nan_buf->count = 0;
  2054. nan_iov_data->nan_iov_buf = (uint8 *)(&nan_buf->cmds[0]);
  2055. nan_iov_data->nan_iov_len -= OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
  2056. if (nan_attr_mask & NAN_ATTR_SYNC_DISC_2G_BEACON_CONFIG) {
  2057. /* config sync/discovery beacons on 2G band */
  2058. /* 2g is mandatory */
  2059. if (!cmd_data->beacon_2g_val) {
  2060. WL_ERR(("Invalid NAN config...2G is mandatory\n"));
  2061. ret = BCME_BADARG;
  2062. }
  2063. ret = wl_cfgnan_config_control_flag(ndev, cfg,
  2064. WL_NAN_CTRL_DISC_BEACON_TX_2G | WL_NAN_CTRL_SYNC_BEACON_TX_2G,
  2065. &(cmd_data->status), TRUE);
  2066. if (unlikely(ret) || unlikely(cmd_data->status)) {
  2067. WL_ERR((" nan control set config handler, ret = %d status = %d \n",
  2068. ret, cmd_data->status));
  2069. goto fail;
  2070. }
  2071. }
  2072. if (nan_attr_mask & NAN_ATTR_SYNC_DISC_5G_BEACON_CONFIG) {
  2073. /* config sync/discovery beacons on 5G band */
  2074. ret = wl_cfgnan_config_control_flag(ndev, cfg,
  2075. WL_NAN_CTRL_DISC_BEACON_TX_5G | WL_NAN_CTRL_SYNC_BEACON_TX_5G,
  2076. &(cmd_data->status), cmd_data->beacon_5g_val);
  2077. if (unlikely(ret) || unlikely(cmd_data->status)) {
  2078. WL_ERR((" nan control set config handler, ret = %d status = %d \n",
  2079. ret, cmd_data->status));
  2080. goto fail;
  2081. }
  2082. }
  2083. /* Setting warm up time */
  2084. cmd_data->warmup_time = 1;
  2085. if (cmd_data->warmup_time) {
  2086. ret = wl_cfgnan_warmup_time_handler(cmd_data, nan_iov_data);
  2087. if (unlikely(ret)) {
  2088. WL_ERR(("warm up time handler sub_cmd set failed\n"));
  2089. goto fail;
  2090. }
  2091. nan_buf->count++;
  2092. }
  2093. /* setting master preference and random factor */
  2094. ret = wl_cfgnan_set_election_metric(cmd_data, nan_iov_data, nan_attr_mask);
  2095. if (unlikely(ret)) {
  2096. WL_ERR(("election_metric sub_cmd set failed\n"));
  2097. goto fail;
  2098. } else {
  2099. nan_buf->count++;
  2100. }
  2101. /* setting nan social channels */
  2102. ret = wl_cfgnan_set_nan_soc_chans(ndev, cmd_data, nan_iov_data, nan_attr_mask);
  2103. if (unlikely(ret)) {
  2104. WL_ERR(("nan social channels set failed\n"));
  2105. goto fail;
  2106. } else {
  2107. /* Storing 5g capability which is reqd for avail chan config. */
  2108. cfg->support_5g = cmd_data->support_5g;
  2109. nan_buf->count++;
  2110. }
  2111. if ((cmd_data->support_2g) && ((cmd_data->dwell_time[0]) ||
  2112. (cmd_data->scan_period[0]))) {
  2113. /* setting scan params */
  2114. ret = wl_cfgnan_set_nan_scan_params(ndev, cfg, cmd_data, 0, nan_attr_mask);
  2115. if (unlikely(ret)) {
  2116. WL_ERR(("scan params set failed for 2g\n"));
  2117. goto fail;
  2118. }
  2119. }
  2120. if ((cmd_data->support_5g) && ((cmd_data->dwell_time[1]) ||
  2121. (cmd_data->scan_period[1]))) {
  2122. /* setting scan params */
  2123. ret = wl_cfgnan_set_nan_scan_params(ndev, cfg, cmd_data,
  2124. cmd_data->support_5g, nan_attr_mask);
  2125. if (unlikely(ret)) {
  2126. WL_ERR(("scan params set failed for 5g\n"));
  2127. goto fail;
  2128. }
  2129. }
  2130. /*
  2131. * A cluster_low value matching cluster_high indicates a request
  2132. * to join a cluster with that value.
  2133. * If the requested cluster is not found the
  2134. * device will start its own cluster
  2135. */
  2136. /* For Debug purpose, using clust id compulsion */
  2137. if (!ETHER_ISNULLADDR(&cmd_data->clus_id.octet)) {
  2138. if (cmd_data->clus_id.octet[4] == cmd_data->clus_id.octet[5]) {
  2139. /* device will merge to configured CID only */
  2140. ret = wl_cfgnan_config_control_flag(ndev, cfg,
  2141. WL_NAN_CTRL_MERGE_CONF_CID_ONLY, &(cmd_data->status), true);
  2142. if (unlikely(ret) || unlikely(cmd_data->status)) {
  2143. WL_ERR((" nan control set config handler, ret = %d status = %d \n",
  2144. ret, cmd_data->status));
  2145. goto fail;
  2146. }
  2147. }
  2148. /* setting cluster ID */
  2149. ret = wl_cfgnan_set_cluster_id(cmd_data, nan_iov_data);
  2150. if (unlikely(ret)) {
  2151. WL_ERR(("cluster_id sub_cmd set failed\n"));
  2152. goto fail;
  2153. }
  2154. nan_buf->count++;
  2155. }
  2156. /* setting rssi proximaty values for 2.4GHz and 5GHz */
  2157. ret = wl_cfgnan_set_rssi_proximity(cmd_data, nan_iov_data, nan_attr_mask);
  2158. if (unlikely(ret)) {
  2159. WL_ERR(("2.4GHz/5GHz rssi proximity threshold set failed\n"));
  2160. goto fail;
  2161. } else {
  2162. nan_buf->count++;
  2163. }
  2164. /* setting rssi middle/close values for 2.4GHz and 5GHz */
  2165. ret = wl_cfgnan_set_rssi_mid_or_close(cmd_data, nan_iov_data, nan_attr_mask);
  2166. if (unlikely(ret)) {
  2167. WL_ERR(("2.4GHz/5GHz rssi middle and close set failed\n"));
  2168. goto fail;
  2169. } else {
  2170. nan_buf->count++;
  2171. }
  2172. /* setting hop count limit or threshold */
  2173. if (nan_attr_mask & NAN_ATTR_HOP_COUNT_LIMIT_CONFIG) {
  2174. ret = wl_cfgnan_set_hop_count_limit(cmd_data, nan_iov_data);
  2175. if (unlikely(ret)) {
  2176. WL_ERR(("hop_count_limit sub_cmd set failed\n"));
  2177. goto fail;
  2178. }
  2179. nan_buf->count++;
  2180. }
  2181. /* setting sid beacon val */
  2182. if ((nan_attr_mask & NAN_ATTR_SID_BEACON_CONFIG) ||
  2183. (nan_attr_mask & NAN_ATTR_SUB_SID_BEACON_CONFIG)) {
  2184. ret = wl_cfgnan_set_sid_beacon_val(cmd_data, nan_iov_data, nan_attr_mask);
  2185. if (unlikely(ret)) {
  2186. WL_ERR(("sid_beacon sub_cmd set failed\n"));
  2187. goto fail;
  2188. }
  2189. nan_buf->count++;
  2190. }
  2191. /* setting nan oui */
  2192. if (nan_attr_mask & NAN_ATTR_OUI_CONFIG) {
  2193. ret = wl_cfgnan_set_nan_oui(cmd_data, nan_iov_data);
  2194. if (unlikely(ret)) {
  2195. WL_ERR(("nan_oui sub_cmd set failed\n"));
  2196. goto fail;
  2197. }
  2198. nan_buf->count++;
  2199. }
  2200. /* setting nan awake dws */
  2201. ret = wl_cfgnan_set_awake_dws(ndev, cmd_data,
  2202. nan_iov_data, cfg, nan_attr_mask);
  2203. if (unlikely(ret)) {
  2204. WL_ERR(("nan awake dws set failed\n"));
  2205. goto fail;
  2206. } else {
  2207. nan_buf->count++;
  2208. }
  2209. /* enable events */
  2210. ret = wl_cfgnan_config_eventmask(ndev, cfg, cmd_data->disc_ind_cfg, false);
  2211. if (unlikely(ret)) {
  2212. WL_ERR(("Failed to config disc ind flag in event_mask, ret = %d\n", ret));
  2213. goto fail;
  2214. }
  2215. /* setting nan enable sub_cmd */
  2216. ret = wl_cfgnan_enable_handler(nan_iov_data, true);
  2217. if (unlikely(ret)) {
  2218. WL_ERR(("enable handler sub_cmd set failed\n"));
  2219. goto fail;
  2220. }
  2221. nan_buf->count++;
  2222. nan_buf->is_set = true;
  2223. nan_buf_size -= nan_iov_data->nan_iov_len;
  2224. memset(resp_buf, 0, sizeof(resp_buf));
  2225. /* Reset conditon variable */
  2226. ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, nan_buf_size,
  2227. &(cmd_data->status), (void*)resp_buf, NAN_IOCTL_BUF_SIZE);
  2228. if (unlikely(ret) || unlikely(cmd_data->status)) {
  2229. WL_ERR((" nan start handler, enable failed, ret = %d status = %d \n",
  2230. ret, cmd_data->status));
  2231. goto fail;
  2232. }
  2233. timeout = wait_event_timeout(cfg->nancfg.nan_event_wait,
  2234. cfg->nancfg.nan_event_recvd, msecs_to_jiffies(NAN_START_STOP_TIMEOUT));
  2235. if (!timeout) {
  2236. WL_ERR(("Timed out while Waiting for WL_NAN_EVENT_START event !!!\n"));
  2237. ret = BCME_ERROR;
  2238. goto fail;
  2239. }
  2240. /* If set, auto datapath confirms will be sent by FW */
  2241. ret = wl_cfgnan_config_control_flag(ndev, cfg, WL_NAN_CTRL_AUTO_DPCONF,
  2242. &(cmd_data->status), true);
  2243. if (unlikely(ret) || unlikely(cmd_data->status)) {
  2244. WL_ERR((" nan control set config handler, ret = %d status = %d \n",
  2245. ret, cmd_data->status));
  2246. goto fail;
  2247. }
  2248. /* By default set NAN proprietary rates */
  2249. ret = wl_cfgnan_config_control_flag(ndev, cfg, WL_NAN_CTRL_PROP_RATE,
  2250. &(cmd_data->status), true);
  2251. if (unlikely(ret) || unlikely(cmd_data->status)) {
  2252. WL_ERR((" nan proprietary rate set failed, ret = %d status = %d \n",
  2253. ret, cmd_data->status));
  2254. goto fail;
  2255. }
  2256. /* malloc for ndp peer list */
  2257. if ((ret = wl_cfgnan_get_capablities_handler(ndev, cfg, &capabilities))
  2258. == BCME_OK) {
  2259. cfg->nancfg.max_ndp_count = capabilities.max_ndp_sessions;
  2260. cfg->nancfg.nan_ndp_peer_info = MALLOCZ(cfg->osh,
  2261. cfg->nancfg.max_ndp_count * sizeof(nan_ndp_peer_t));
  2262. if (!cfg->nancfg.nan_ndp_peer_info) {
  2263. WL_ERR(("%s: memory allocation failed\n", __func__));
  2264. ret = BCME_NOMEM;
  2265. goto fail;
  2266. }
  2267. } else {
  2268. WL_ERR(("wl_cfgnan_get_capablities_handler failed, ret = %d\n", ret));
  2269. goto fail;
  2270. }
  2271. cfg->nan_enable = true;
  2272. WL_INFORM_MEM(("[NAN] Enable successfull \n"));
  2273. /* disable TDLS on NAN NMI IF create */
  2274. wl_cfg80211_tdls_config(cfg, TDLS_STATE_NMI_CREATE, false);
  2275. fail:
  2276. /* reset conditon variable */
  2277. cfg->nancfg.nan_event_recvd = false;
  2278. if (unlikely(ret) || unlikely(cmd_data->status)) {
  2279. cfg->nan_enable = false;
  2280. mutex_lock(&cfg->if_sync);
  2281. ret = wl_cfg80211_delete_iface(cfg, WL_IF_TYPE_NAN);
  2282. if (ret != BCME_OK) {
  2283. WL_ERR(("failed to delete NDI[%d]\n", ret));
  2284. }
  2285. mutex_unlock(&cfg->if_sync);
  2286. }
  2287. if (nan_buf) {
  2288. MFREE(cfg->osh, nan_buf, NAN_IOCTL_BUF_SIZE);
  2289. }
  2290. if (nan_iov_data) {
  2291. MFREE(cfg->osh, nan_iov_data, sizeof(*nan_iov_data));
  2292. }
  2293. NAN_DBG_EXIT();
  2294. return ret;
  2295. }
  2296. int
  2297. wl_cfgnan_disable(struct bcm_cfg80211 *cfg, nan_stop_reason_code_t reason)
  2298. {
  2299. s32 ret = BCME_OK;
  2300. dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub);
  2301. #ifdef RTT_SUPPORT
  2302. rtt_status_info_t *rtt_status = GET_RTTSTATE(dhdp);
  2303. rtt_target_info_t *target_info = NULL;
  2304. #endif /* RTT_SUPPORT */
  2305. NAN_DBG_ENTER();
  2306. if ((cfg->nan_init_state == TRUE) &&
  2307. (cfg->nan_enable == TRUE)) {
  2308. struct net_device *ndev;
  2309. ndev = bcmcfg_to_prmry_ndev(cfg);
  2310. /* We have to remove NDIs so that P2P/Softap can work */
  2311. ret = wl_cfg80211_delete_iface(cfg, WL_IF_TYPE_NAN);
  2312. if (ret != BCME_OK) {
  2313. WL_ERR(("failed to delete NDI[%d]\n", ret));
  2314. }
  2315. WL_INFORM_MEM(("Nan Disable Req, reason = %d\n", reason));
  2316. cfg->nancfg.disable_reason = reason;
  2317. ret = wl_cfgnan_stop_handler(ndev, cfg, false);
  2318. if (ret == -ENODEV) {
  2319. WL_ERR(("Bus is down, no need to proceed\n"));
  2320. } else if (ret != BCME_OK) {
  2321. WL_ERR(("failed to stop nan, error[%d]\n", ret));
  2322. }
  2323. ret = wl_cfgnan_deinit(cfg, dhdp->up);
  2324. if (ret != BCME_OK) {
  2325. WL_ERR(("failed to de-initialize NAN[%d]\n", ret));
  2326. if (!dhd_query_bus_erros(dhdp)) {
  2327. ASSERT(0);
  2328. }
  2329. }
  2330. #ifdef RTT_SUPPORT
  2331. /* Delete the geofence rtt target list */
  2332. dhd_rtt_delete_geofence_target_list(dhdp);
  2333. /* Remove if any pending proxd timeout for nan-rtt */
  2334. target_info = &rtt_status->rtt_config.target_info[rtt_status->cur_idx];
  2335. if (target_info && target_info->peer == RTT_PEER_NAN) {
  2336. /* Cancel pending proxd timeout work if any */
  2337. if (delayed_work_pending(&rtt_status->proxd_timeout)) {
  2338. cancel_delayed_work(&rtt_status->proxd_timeout);
  2339. }
  2340. }
  2341. /* Delete if any directed nan rtt session */
  2342. dhd_rtt_delete_nan_session(dhdp);
  2343. #endif /* RTT_SUPPORT */
  2344. if (cfg->nancfg.nan_ndp_peer_info) {
  2345. MFREE(cfg->osh, cfg->nancfg.nan_ndp_peer_info,
  2346. cfg->nancfg.max_ndp_count * sizeof(nan_ndp_peer_t));
  2347. cfg->nancfg.nan_ndp_peer_info = NULL;
  2348. }
  2349. }
  2350. NAN_DBG_EXIT();
  2351. return ret;
  2352. }
  2353. static void
  2354. wl_cfgnan_send_stop_event(struct bcm_cfg80211 *cfg)
  2355. {
  2356. s32 ret = BCME_OK;
  2357. nan_event_data_t *nan_event_data = NULL;
  2358. NAN_DBG_ENTER();
  2359. if (cfg->nancfg.disable_reason == NAN_USER_INITIATED) {
  2360. /* do not event to host if command is from host */
  2361. goto exit;
  2362. }
  2363. nan_event_data = MALLOCZ(cfg->osh, sizeof(nan_event_data_t));
  2364. if (!nan_event_data) {
  2365. WL_ERR(("%s: memory allocation failed\n", __func__));
  2366. ret = BCME_NOMEM;
  2367. goto exit;
  2368. }
  2369. bzero(nan_event_data, sizeof(nan_event_data_t));
  2370. if (cfg->nancfg.disable_reason == NAN_CONCURRENCY_CONFLICT) {
  2371. nan_event_data->status = NAN_STATUS_UNSUPPORTED_CONCURRENCY_NAN_DISABLED;
  2372. } else {
  2373. nan_event_data->status = NAN_STATUS_SUCCESS;
  2374. }
  2375. nan_event_data->status = NAN_STATUS_SUCCESS;
  2376. ret = memcpy_s(nan_event_data->nan_reason, NAN_ERROR_STR_LEN,
  2377. "NAN_STATUS_SUCCESS", strlen("NAN_STATUS_SUCCESS"));
  2378. if (ret != BCME_OK) {
  2379. WL_ERR(("Failed to copy nan reason string, ret = %d\n", ret));
  2380. goto exit;
  2381. }
  2382. #if (LINUX_VERSION_CODE > KERNEL_VERSION(3, 13, 0)) || defined(WL_VENDOR_EXT_SUPPORT)
  2383. ret = wl_cfgvendor_send_nan_event(cfg->wdev->wiphy, bcmcfg_to_prmry_ndev(cfg),
  2384. GOOGLE_NAN_EVENT_DISABLED, nan_event_data);
  2385. if (ret != BCME_OK) {
  2386. WL_ERR(("Failed to send event to nan hal, (%d)\n",
  2387. GOOGLE_NAN_EVENT_DISABLED));
  2388. }
  2389. #endif /* (LINUX_VERSION_CODE > KERNEL_VERSION(3, 13, 0)) || defined(WL_VENDOR_EXT_SUPPORT) */
  2390. exit:
  2391. if (nan_event_data) {
  2392. MFREE(cfg->osh, nan_event_data, sizeof(nan_event_data_t));
  2393. }
  2394. NAN_DBG_EXIT();
  2395. return;
  2396. }
  2397. int
  2398. wl_cfgnan_stop_handler(struct net_device *ndev,
  2399. struct bcm_cfg80211 *cfg, bool disable_events)
  2400. {
  2401. bcm_iov_batch_buf_t *nan_buf = NULL;
  2402. s32 ret = BCME_OK;
  2403. uint16 nan_buf_size = NAN_IOCTL_BUF_SIZE;
  2404. wl_nan_iov_t *nan_iov_data = NULL;
  2405. uint32 status;
  2406. uint8 resp_buf[NAN_IOCTL_BUF_SIZE];
  2407. s32 timeout;
  2408. bool mutex_locked = false;
  2409. dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub);
  2410. NAN_DBG_ENTER();
  2411. NAN_MUTEX_LOCK();
  2412. mutex_locked = true;
  2413. if (!cfg->nan_enable) {
  2414. WL_INFORM(("Nan is not enabled\n"));
  2415. ret = BCME_OK;
  2416. goto fail;
  2417. }
  2418. if (cfg->nancfg.disable_reason != NAN_BUS_IS_DOWN) {
  2419. /*
  2420. * Framework doing cleanup(iface remove) on disable command,
  2421. * so avoiding event to prevent iface delete calls again
  2422. */
  2423. if (disable_events) {
  2424. WL_INFORM_MEM(("[NAN] Disabling Nan events\n"));
  2425. wl_cfgnan_config_eventmask(ndev, cfg, 0, true);
  2426. }
  2427. nan_buf = MALLOCZ(cfg->osh, nan_buf_size);
  2428. if (!nan_buf) {
  2429. WL_ERR(("%s: memory allocation failed\n", __func__));
  2430. ret = BCME_NOMEM;
  2431. goto fail;
  2432. }
  2433. nan_iov_data = MALLOCZ(cfg->osh, sizeof(*nan_iov_data));
  2434. if (!nan_iov_data) {
  2435. WL_ERR(("%s: memory allocation failed\n", __func__));
  2436. ret = BCME_NOMEM;
  2437. goto fail;
  2438. }
  2439. nan_iov_data->nan_iov_len = NAN_IOCTL_BUF_SIZE;
  2440. nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
  2441. nan_buf->count = 0;
  2442. nan_iov_data->nan_iov_buf = (uint8 *)(&nan_buf->cmds[0]);
  2443. nan_iov_data->nan_iov_len -= OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
  2444. ret = wl_cfgnan_enable_handler(nan_iov_data, false);
  2445. if (unlikely(ret)) {
  2446. WL_ERR(("nan disable handler failed\n"));
  2447. goto fail;
  2448. }
  2449. nan_buf->count++;
  2450. nan_buf->is_set = true;
  2451. nan_buf_size -= nan_iov_data->nan_iov_len;
  2452. memset_s(resp_buf, sizeof(resp_buf),
  2453. 0, sizeof(resp_buf));
  2454. mutex_locked = false;
  2455. /* reset conditon variable */
  2456. cfg->nancfg.nan_event_recvd = false;
  2457. /* Releasing lock to allow event processing */
  2458. NAN_MUTEX_UNLOCK();
  2459. ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, nan_buf_size, &status,
  2460. (void*)resp_buf, NAN_IOCTL_BUF_SIZE);
  2461. if (unlikely(ret) || unlikely(status)) {
  2462. WL_ERR(("nan disable failed ret = %d status = %d\n", ret, status));
  2463. goto fail;
  2464. }
  2465. timeout = wait_event_timeout(cfg->nancfg.nan_event_wait,
  2466. cfg->nancfg.nan_event_recvd,
  2467. msecs_to_jiffies(NAN_START_STOP_TIMEOUT));
  2468. if (!timeout) {
  2469. WL_ERR(("Timed out while Waiting for WL_NAN_EVENT_STOP event !!!\n"));
  2470. if (!dhd_query_bus_erros(dhdp)) {
  2471. ASSERT(0);
  2472. }
  2473. ret = BCME_ERROR;
  2474. goto fail;
  2475. }
  2476. WL_INFORM_MEM(("[NAN] Disable done\n"));
  2477. /* Enable back TDLS if connected interface is <= 1 */
  2478. wl_cfg80211_tdls_config(cfg, TDLS_STATE_IF_DELETE, false);
  2479. }
  2480. wl_cfgnan_send_stop_event(cfg);
  2481. fail:
  2482. /* Resetting instance ID mask */
  2483. cfg->nancfg.inst_id_start = 0;
  2484. memset(cfg->nancfg.svc_inst_id_mask, 0, sizeof(cfg->nancfg.svc_inst_id_mask));
  2485. memset(cfg->svc_info, 0, NAN_MAX_SVC_INST * sizeof(nan_svc_info_t));
  2486. /* reset conditon variable */
  2487. cfg->nancfg.nan_event_recvd = false;
  2488. cfg->nan_enable = false;
  2489. if (nan_buf) {
  2490. MFREE(cfg->osh, nan_buf, NAN_IOCTL_BUF_SIZE);
  2491. }
  2492. if (nan_iov_data) {
  2493. MFREE(cfg->osh, nan_iov_data, sizeof(*nan_iov_data));
  2494. }
  2495. if (mutex_locked)
  2496. NAN_MUTEX_UNLOCK();
  2497. NAN_DBG_EXIT();
  2498. return ret;
  2499. }
  2500. int
  2501. wl_cfgnan_config_handler(struct net_device *ndev, struct bcm_cfg80211 *cfg,
  2502. nan_config_cmd_data_t *cmd_data, uint32 nan_attr_mask)
  2503. {
  2504. bcm_iov_batch_buf_t *nan_buf = NULL;
  2505. s32 ret = BCME_OK;
  2506. uint16 nan_buf_size = NAN_IOCTL_BUF_SIZE;
  2507. wl_nan_iov_t *nan_iov_data = NULL;
  2508. uint8 resp_buf[NAN_IOCTL_BUF_SIZE];
  2509. NAN_DBG_ENTER();
  2510. /* Nan need to be enabled before configuring/updating params */
  2511. if (cfg->nan_enable) {
  2512. nan_buf = MALLOCZ(cfg->osh, nan_buf_size);
  2513. if (!nan_buf) {
  2514. WL_ERR(("%s: memory allocation failed\n", __func__));
  2515. ret = BCME_NOMEM;
  2516. goto fail;
  2517. }
  2518. nan_iov_data = MALLOCZ(cfg->osh, sizeof(*nan_iov_data));
  2519. if (!nan_iov_data) {
  2520. WL_ERR(("%s: memory allocation failed\n", __func__));
  2521. ret = BCME_NOMEM;
  2522. goto fail;
  2523. }
  2524. nan_iov_data->nan_iov_len = NAN_IOCTL_BUF_SIZE;
  2525. nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
  2526. nan_buf->count = 0;
  2527. nan_iov_data->nan_iov_buf = (uint8 *)(&nan_buf->cmds[0]);
  2528. nan_iov_data->nan_iov_len -= OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
  2529. /* setting sid beacon val */
  2530. if ((nan_attr_mask & NAN_ATTR_SID_BEACON_CONFIG) ||
  2531. (nan_attr_mask & NAN_ATTR_SUB_SID_BEACON_CONFIG)) {
  2532. ret = wl_cfgnan_set_sid_beacon_val(cmd_data, nan_iov_data, nan_attr_mask);
  2533. if (unlikely(ret)) {
  2534. WL_ERR(("sid_beacon sub_cmd set failed\n"));
  2535. goto fail;
  2536. }
  2537. nan_buf->count++;
  2538. }
  2539. /* setting master preference and random factor */
  2540. if (cmd_data->metrics.random_factor ||
  2541. cmd_data->metrics.master_pref) {
  2542. ret = wl_cfgnan_set_election_metric(cmd_data, nan_iov_data,
  2543. nan_attr_mask);
  2544. if (unlikely(ret)) {
  2545. WL_ERR(("election_metric sub_cmd set failed\n"));
  2546. goto fail;
  2547. } else {
  2548. nan_buf->count++;
  2549. }
  2550. }
  2551. /* setting hop count limit or threshold */
  2552. if (nan_attr_mask & NAN_ATTR_HOP_COUNT_LIMIT_CONFIG) {
  2553. ret = wl_cfgnan_set_hop_count_limit(cmd_data, nan_iov_data);
  2554. if (unlikely(ret)) {
  2555. WL_ERR(("hop_count_limit sub_cmd set failed\n"));
  2556. goto fail;
  2557. }
  2558. nan_buf->count++;
  2559. }
  2560. /* setting rssi proximaty values for 2.4GHz and 5GHz */
  2561. ret = wl_cfgnan_set_rssi_proximity(cmd_data, nan_iov_data,
  2562. nan_attr_mask);
  2563. if (unlikely(ret)) {
  2564. WL_ERR(("2.4GHz/5GHz rssi proximity threshold set failed\n"));
  2565. goto fail;
  2566. } else {
  2567. nan_buf->count++;
  2568. }
  2569. /* setting nan awake dws */
  2570. ret = wl_cfgnan_set_awake_dws(ndev, cmd_data, nan_iov_data,
  2571. cfg, nan_attr_mask);
  2572. if (unlikely(ret)) {
  2573. WL_ERR(("nan awake dws set failed\n"));
  2574. goto fail;
  2575. } else {
  2576. nan_buf->count++;
  2577. }
  2578. if (cmd_data->disc_ind_cfg) {
  2579. /* Disable events */
  2580. WL_TRACE(("Disable events based on flag\n"));
  2581. ret = wl_cfgnan_config_eventmask(ndev, cfg,
  2582. cmd_data->disc_ind_cfg, false);
  2583. if (unlikely(ret)) {
  2584. WL_ERR(("Failed to config disc ind flag in event_mask, ret = %d\n",
  2585. ret));
  2586. goto fail;
  2587. }
  2588. }
  2589. if ((cfg->support_5g) && ((cmd_data->dwell_time[1]) ||
  2590. (cmd_data->scan_period[1]))) {
  2591. /* setting scan params */
  2592. ret = wl_cfgnan_set_nan_scan_params(ndev, cfg,
  2593. cmd_data, cfg->support_5g, nan_attr_mask);
  2594. if (unlikely(ret)) {
  2595. WL_ERR(("scan params set failed for 5g\n"));
  2596. goto fail;
  2597. }
  2598. }
  2599. if ((cmd_data->dwell_time[0]) ||
  2600. (cmd_data->scan_period[0])) {
  2601. ret = wl_cfgnan_set_nan_scan_params(ndev, cfg, cmd_data, 0, nan_attr_mask);
  2602. if (unlikely(ret)) {
  2603. WL_ERR(("scan params set failed for 2g\n"));
  2604. goto fail;
  2605. }
  2606. }
  2607. nan_buf->is_set = true;
  2608. nan_buf_size -= nan_iov_data->nan_iov_len;
  2609. if (nan_buf->count) {
  2610. memset_s(resp_buf, sizeof(resp_buf),
  2611. 0, sizeof(resp_buf));
  2612. ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, nan_buf_size,
  2613. &(cmd_data->status),
  2614. (void*)resp_buf, NAN_IOCTL_BUF_SIZE);
  2615. if (unlikely(ret) || unlikely(cmd_data->status)) {
  2616. WL_ERR((" nan config handler failed ret = %d status = %d\n",
  2617. ret, cmd_data->status));
  2618. goto fail;
  2619. }
  2620. } else {
  2621. WL_DBG(("No commands to send\n"));
  2622. }
  2623. if ((!cmd_data->bmap) || (cmd_data->avail_params.duration == NAN_BAND_INVALID) ||
  2624. (!cmd_data->chanspec[0])) {
  2625. WL_TRACE(("mandatory arguments are not present to set avail\n"));
  2626. ret = BCME_OK;
  2627. } else {
  2628. cmd_data->avail_params.chanspec[0] = cmd_data->chanspec[0];
  2629. cmd_data->avail_params.bmap = cmd_data->bmap;
  2630. /* 1=local, 2=peer, 3=ndc, 4=immutable, 5=response, 6=counter */
  2631. ret = wl_cfgnan_set_nan_avail(bcmcfg_to_prmry_ndev(cfg),
  2632. cfg, &cmd_data->avail_params, WL_AVAIL_LOCAL);
  2633. if (unlikely(ret)) {
  2634. WL_ERR(("Failed to set avail value with type local\n"));
  2635. goto fail;
  2636. }
  2637. ret = wl_cfgnan_set_nan_avail(bcmcfg_to_prmry_ndev(cfg),
  2638. cfg, &cmd_data->avail_params, WL_AVAIL_NDC);
  2639. if (unlikely(ret)) {
  2640. WL_ERR(("Failed to set avail value with type ndc\n"));
  2641. goto fail;
  2642. }
  2643. }
  2644. } else {
  2645. WL_INFORM(("nan is not enabled\n"));
  2646. }
  2647. fail:
  2648. if (nan_buf) {
  2649. MFREE(cfg->osh, nan_buf, NAN_IOCTL_BUF_SIZE);
  2650. }
  2651. if (nan_iov_data) {
  2652. MFREE(cfg->osh, nan_iov_data, sizeof(*nan_iov_data));
  2653. }
  2654. NAN_DBG_EXIT();
  2655. return ret;
  2656. }
  2657. int
  2658. wl_cfgnan_support_handler(struct net_device *ndev,
  2659. struct bcm_cfg80211 *cfg, nan_config_cmd_data_t *cmd_data)
  2660. {
  2661. /* TODO: */
  2662. return BCME_OK;
  2663. }
  2664. int
  2665. wl_cfgnan_status_handler(struct net_device *ndev,
  2666. struct bcm_cfg80211 *cfg, nan_config_cmd_data_t *cmd_data)
  2667. {
  2668. /* TODO: */
  2669. return BCME_OK;
  2670. }
  2671. #ifdef WL_NAN_DISC_CACHE
  2672. static
  2673. nan_svc_info_t *
  2674. wl_cfgnan_get_svc_inst(struct bcm_cfg80211 *cfg,
  2675. wl_nan_instance_id svc_inst_id, uint8 ndp_id)
  2676. {
  2677. uint8 i, j;
  2678. if (ndp_id) {
  2679. for (i = 0; i < NAN_MAX_SVC_INST; i++) {
  2680. for (j = 0; j < NAN_MAX_SVC_INST; j++) {
  2681. if (cfg->svc_info[i].ndp_id[j] == ndp_id) {
  2682. return &cfg->svc_info[i];
  2683. }
  2684. }
  2685. }
  2686. } else if (svc_inst_id) {
  2687. for (i = 0; i < NAN_MAX_SVC_INST; i++) {
  2688. if (cfg->svc_info[i].svc_id == svc_inst_id) {
  2689. return &cfg->svc_info[i];
  2690. }
  2691. }
  2692. }
  2693. return NULL;
  2694. }
  2695. nan_ranging_inst_t *
  2696. wl_cfgnan_check_for_ranging(struct bcm_cfg80211 *cfg, struct ether_addr *peer)
  2697. {
  2698. uint8 i;
  2699. if (peer) {
  2700. for (i = 0; i < NAN_MAX_RANGING_INST; i++) {
  2701. if (!memcmp(peer, &cfg->nan_ranging_info[i].peer_addr,
  2702. ETHER_ADDR_LEN)) {
  2703. return &(cfg->nan_ranging_info[i]);
  2704. }
  2705. }
  2706. }
  2707. return NULL;
  2708. }
  2709. /*
  2710. * Find ranging inst for given peer,
  2711. * On not found, create one
  2712. * with given range role
  2713. */
  2714. nan_ranging_inst_t *
  2715. wl_cfgnan_get_ranging_inst(struct bcm_cfg80211 *cfg, struct ether_addr *peer,
  2716. nan_range_role_t range_role)
  2717. {
  2718. nan_ranging_inst_t *ranging_inst = NULL;
  2719. uint8 i;
  2720. ranging_inst = wl_cfgnan_check_for_ranging(cfg, peer);
  2721. if (ranging_inst) {
  2722. goto done;
  2723. }
  2724. WL_TRACE(("Creating Ranging instance \n"));
  2725. for (i = 0; i < NAN_MAX_RANGING_INST; i++) {
  2726. if (cfg->nan_ranging_info[i].in_use == FALSE) {
  2727. break;
  2728. }
  2729. }
  2730. if (i == NAN_MAX_RANGING_INST) {
  2731. WL_ERR(("No buffer available for the ranging instance"));
  2732. goto done;
  2733. }
  2734. ranging_inst = &cfg->nan_ranging_info[i];
  2735. memcpy(&ranging_inst->peer_addr, peer, ETHER_ADDR_LEN);
  2736. ranging_inst->range_status = NAN_RANGING_REQUIRED;
  2737. ranging_inst->prev_distance_mm = INVALID_DISTANCE;
  2738. ranging_inst->range_role = range_role;
  2739. ranging_inst->in_use = TRUE;
  2740. done:
  2741. return ranging_inst;
  2742. }
  2743. #endif /* WL_NAN_DISC_CACHE */
  2744. static int
  2745. process_resp_buf(void *iov_resp,
  2746. uint8 *instance_id, uint16 sub_cmd_id)
  2747. {
  2748. int res = BCME_OK;
  2749. NAN_DBG_ENTER();
  2750. if (sub_cmd_id == WL_NAN_CMD_DATA_DATAREQ) {
  2751. wl_nan_dp_req_ret_t *dpreq_ret = NULL;
  2752. dpreq_ret = (wl_nan_dp_req_ret_t *)(iov_resp);
  2753. *instance_id = dpreq_ret->ndp_id;
  2754. WL_TRACE(("%s: Initiator NDI: " MACDBG "\n",
  2755. __FUNCTION__, MAC2STRDBG(dpreq_ret->indi.octet)));
  2756. } else if (sub_cmd_id == WL_NAN_CMD_RANGE_REQUEST) {
  2757. wl_nan_range_id *range_id = NULL;
  2758. range_id = (wl_nan_range_id *)(iov_resp);
  2759. *instance_id = *range_id;
  2760. WL_TRACE(("Range id: %d\n", *range_id));
  2761. }
  2762. WL_DBG(("instance_id: %d\n", *instance_id));
  2763. NAN_DBG_EXIT();
  2764. return res;
  2765. }
  2766. int
  2767. wl_cfgnan_cancel_ranging(struct net_device *ndev,
  2768. struct bcm_cfg80211 *cfg, uint8 range_id, uint8 flags, uint32 *status)
  2769. {
  2770. bcm_iov_batch_buf_t *nan_buf = NULL;
  2771. s32 ret = BCME_OK;
  2772. uint16 nan_iov_start, nan_iov_end;
  2773. uint16 nan_buf_size = NAN_IOCTL_BUF_SIZE;
  2774. uint16 subcmd_len;
  2775. bcm_iov_batch_subcmd_t *sub_cmd = NULL;
  2776. wl_nan_iov_t *nan_iov_data = NULL;
  2777. uint8 resp_buf[NAN_IOCTL_BUF_SIZE];
  2778. wl_nan_range_cancel_ext_t rng_cncl;
  2779. uint8 size_of_iov;
  2780. NAN_DBG_ENTER();
  2781. if (cfg->nancfg.version >= NAN_RANGE_EXT_CANCEL_SUPPORT_VER) {
  2782. size_of_iov = sizeof(rng_cncl);
  2783. } else {
  2784. size_of_iov = sizeof(range_id);
  2785. }
  2786. memset_s(&rng_cncl, sizeof(rng_cncl), 0, sizeof(rng_cncl));
  2787. rng_cncl.range_id = range_id;
  2788. rng_cncl.flags = flags;
  2789. nan_buf = MALLOCZ(cfg->osh, nan_buf_size);
  2790. if (!nan_buf) {
  2791. WL_ERR(("%s: memory allocation failed\n", __func__));
  2792. ret = BCME_NOMEM;
  2793. goto fail;
  2794. }
  2795. nan_iov_data = MALLOCZ(cfg->osh, sizeof(*nan_iov_data));
  2796. if (!nan_iov_data) {
  2797. WL_ERR(("%s: memory allocation failed\n", __func__));
  2798. ret = BCME_NOMEM;
  2799. goto fail;
  2800. }
  2801. nan_iov_data->nan_iov_len = nan_iov_start = NAN_IOCTL_BUF_SIZE;
  2802. nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
  2803. nan_buf->count = 0;
  2804. nan_iov_data->nan_iov_buf = (uint8 *)(&nan_buf->cmds[0]);
  2805. nan_iov_data->nan_iov_len -= OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
  2806. sub_cmd = (bcm_iov_batch_subcmd_t*)(nan_iov_data->nan_iov_buf);
  2807. ret = wl_cfg_nan_check_cmd_len(nan_iov_data->nan_iov_len,
  2808. size_of_iov, &subcmd_len);
  2809. if (unlikely(ret)) {
  2810. WL_ERR(("nan_sub_cmd check failed\n"));
  2811. goto fail;
  2812. }
  2813. sub_cmd->id = htod16(WL_NAN_CMD_RANGE_CANCEL);
  2814. sub_cmd->len = sizeof(sub_cmd->u.options) + size_of_iov;
  2815. sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
  2816. /* Reduce the iov_len size by subcmd_len */
  2817. nan_iov_data->nan_iov_len -= subcmd_len;
  2818. nan_iov_end = nan_iov_data->nan_iov_len;
  2819. nan_buf_size = (nan_iov_start - nan_iov_end);
  2820. if (size_of_iov >= sizeof(rng_cncl)) {
  2821. (void)memcpy_s(sub_cmd->data, nan_iov_data->nan_iov_len,
  2822. &rng_cncl, size_of_iov);
  2823. } else {
  2824. (void)memcpy_s(sub_cmd->data, nan_iov_data->nan_iov_len,
  2825. &range_id, size_of_iov);
  2826. }
  2827. nan_buf->is_set = true;
  2828. nan_buf->count++;
  2829. memset_s(resp_buf, sizeof(resp_buf),
  2830. 0, sizeof(resp_buf));
  2831. ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, nan_buf_size, status,
  2832. (void*)resp_buf, NAN_IOCTL_BUF_SIZE);
  2833. if (unlikely(ret) || unlikely(*status)) {
  2834. WL_ERR(("Range ID %d cancel failed ret %d status %d \n", range_id, ret, *status));
  2835. goto fail;
  2836. }
  2837. WL_MEM(("Range cancel with Range ID [%d] successfull\n", range_id));
  2838. fail:
  2839. if (nan_buf) {
  2840. MFREE(cfg->osh, nan_buf, NAN_IOCTL_BUF_SIZE);
  2841. }
  2842. if (nan_iov_data) {
  2843. MFREE(cfg->osh, nan_iov_data, sizeof(*nan_iov_data));
  2844. }
  2845. NAN_DBG_EXIT();
  2846. return ret;
  2847. }
  2848. #ifdef WL_NAN_DISC_CACHE
  2849. static int
  2850. wl_cfgnan_cache_svc_info(struct bcm_cfg80211 *cfg,
  2851. nan_discover_cmd_data_t *cmd_data, uint16 cmd_id, bool update)
  2852. {
  2853. int ret = BCME_OK;
  2854. int i;
  2855. nan_svc_info_t *svc_info;
  2856. uint8 svc_id = (cmd_id == WL_NAN_CMD_SD_SUBSCRIBE) ? cmd_data->sub_id :
  2857. cmd_data->pub_id;
  2858. for (i = 0; i < NAN_MAX_SVC_INST; i++) {
  2859. if (update) {
  2860. if (cfg->svc_info[i].svc_id == svc_id) {
  2861. svc_info = &cfg->svc_info[i];
  2862. break;
  2863. } else {
  2864. continue;
  2865. }
  2866. }
  2867. if (!cfg->svc_info[i].svc_id) {
  2868. svc_info = &cfg->svc_info[i];
  2869. break;
  2870. }
  2871. }
  2872. if (i == NAN_MAX_SVC_INST) {
  2873. WL_ERR(("%s:cannot accomodate ranging session\n", __FUNCTION__));
  2874. ret = BCME_NORESOURCE;
  2875. goto fail;
  2876. }
  2877. if (cmd_data->sde_control_flag & NAN_SDE_CF_RANGING_REQUIRED) {
  2878. WL_TRACE(("%s: updating ranging info, enabling", __FUNCTION__));
  2879. svc_info->status = 1;
  2880. svc_info->ranging_interval = cmd_data->ranging_intvl_msec;
  2881. svc_info->ranging_ind = cmd_data->ranging_indication;
  2882. svc_info->ingress_limit = cmd_data->ingress_limit;
  2883. svc_info->egress_limit = cmd_data->egress_limit;
  2884. svc_info->ranging_required = 1;
  2885. } else {
  2886. WL_TRACE(("%s: updating ranging info, disabling", __FUNCTION__));
  2887. svc_info->status = 0;
  2888. svc_info->ranging_interval = 0;
  2889. svc_info->ranging_ind = 0;
  2890. svc_info->ingress_limit = 0;
  2891. svc_info->egress_limit = 0;
  2892. svc_info->ranging_required = 0;
  2893. }
  2894. /* Reset Range status flags on svc creation/update */
  2895. svc_info->svc_range_status = 0;
  2896. svc_info->flags = cmd_data->flags;
  2897. if (cmd_id == WL_NAN_CMD_SD_SUBSCRIBE) {
  2898. svc_info->svc_id = cmd_data->sub_id;
  2899. if ((cmd_data->flags & WL_NAN_SUB_ACTIVE) &&
  2900. (cmd_data->tx_match.dlen)) {
  2901. ret = memcpy_s(svc_info->tx_match_filter, sizeof(svc_info->tx_match_filter),
  2902. cmd_data->tx_match.data, cmd_data->tx_match.dlen);
  2903. if (ret != BCME_OK) {
  2904. WL_ERR(("Failed to copy tx match filter data\n"));
  2905. goto fail;
  2906. }
  2907. svc_info->tx_match_filter_len = cmd_data->tx_match.dlen;
  2908. }
  2909. } else {
  2910. svc_info->svc_id = cmd_data->pub_id;
  2911. }
  2912. ret = memcpy_s(svc_info->svc_hash, sizeof(svc_info->svc_hash),
  2913. cmd_data->svc_hash.data, WL_NAN_SVC_HASH_LEN);
  2914. if (ret != BCME_OK) {
  2915. WL_ERR(("Failed to copy svc hash\n"));
  2916. }
  2917. fail:
  2918. return ret;
  2919. }
  2920. static int
  2921. wl_cfgnan_clear_svc_ranging_inst(struct bcm_cfg80211 *cfg, uint8 svc_id)
  2922. {
  2923. nan_ranging_inst_t *ranging_inst;
  2924. int i = 0, j;
  2925. int ret = BCME_OK;
  2926. nan_svc_info_t *svc = wl_cfgnan_get_svc_inst(cfg, svc_id, 0);
  2927. if (!svc) {
  2928. WL_ERR(("\n svc not found \n"));
  2929. ret = BCME_NOTFOUND;
  2930. goto done;
  2931. }
  2932. for (i = 0; i < NAN_MAX_RANGING_INST; i++) {
  2933. ranging_inst = &(cfg->nan_ranging_info[i]);
  2934. if (ranging_inst->range_id) {
  2935. for (j = 0; j < MAX_SUBSCRIBES; j++) {
  2936. if (svc == ranging_inst->svc_idx[j]) {
  2937. ranging_inst->num_svc_ctx--;
  2938. ranging_inst->svc_idx[j] = NULL;
  2939. }
  2940. }
  2941. }
  2942. }
  2943. done:
  2944. return ret;
  2945. }
  2946. #ifdef RTT_SUPPORT
  2947. /* API to terminate/clear all directed nan-rtt sessions.
  2948. * Can be called from framework RTT stop context
  2949. */
  2950. int
  2951. wl_cfgnan_terminate_directed_rtt_sessions(struct net_device *ndev,
  2952. struct bcm_cfg80211 *cfg)
  2953. {
  2954. nan_ranging_inst_t *ranging_inst;
  2955. int i, ret = BCME_OK;
  2956. uint32 status;
  2957. for (i = 0; i < NAN_MAX_RANGING_INST; i++) {
  2958. ranging_inst = &cfg->nan_ranging_info[i];
  2959. if (ranging_inst->range_id && ranging_inst->range_type == RTT_TYPE_NAN_DIRECTED) {
  2960. if (ranging_inst->range_status == NAN_RANGING_IN_PROGRESS) {
  2961. ret = wl_cfgnan_cancel_ranging(ndev, cfg, ranging_inst->range_id,
  2962. NAN_RNG_TERM_FLAG_IMMEDIATE, &status);
  2963. if (unlikely(ret) || unlikely(status)) {
  2964. WL_ERR(("nan range cancel failed ret = %d status = %d\n",
  2965. ret, status));
  2966. }
  2967. }
  2968. wl_cfgnan_reset_geofence_ranging(cfg, ranging_inst,
  2969. RTT_SHCED_HOST_DIRECTED_TERM);
  2970. }
  2971. }
  2972. return ret;
  2973. }
  2974. #endif /* RTT_SUPPORT */
  2975. /*
  2976. * suspend ongoing geofence ranging session
  2977. * with a peer if on-going ranging is with given peer
  2978. * If peer NULL,
  2979. * Suspend on-going ranging blindly
  2980. * Do nothing on:
  2981. * If ranging is not in progress
  2982. * If ranging in progress but not with given peer
  2983. */
  2984. int
  2985. wl_cfgnan_suspend_geofence_rng_session(struct net_device *ndev,
  2986. struct ether_addr *peer, int suspend_reason, u8 cancel_flags)
  2987. {
  2988. int ret = BCME_OK;
  2989. uint32 status;
  2990. nan_ranging_inst_t *ranging_inst = NULL;
  2991. struct ether_addr* peer_addr = NULL;
  2992. struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
  2993. #ifdef RTT_SUPPORT
  2994. dhd_pub_t *dhd = (struct dhd_pub *)(cfg->pub);
  2995. rtt_geofence_target_info_t *geofence_target_info;
  2996. geofence_target_info = dhd_rtt_get_geofence_current_target(dhd);
  2997. if (!geofence_target_info) {
  2998. goto exit;
  2999. }
  3000. peer_addr = &geofence_target_info->peer_addr;
  3001. ranging_inst = wl_cfgnan_check_for_ranging(cfg, peer_addr);
  3002. if (dhd_rtt_get_geofence_rtt_state(dhd) == FALSE) {
  3003. WL_DBG(("Geofencing Ranging not in progress, suspend req dropped\n"));
  3004. goto exit;
  3005. }
  3006. if (peer && memcmp(peer_addr, peer, ETHER_ADDR_LEN)) {
  3007. if (suspend_reason == RTT_GEO_SUSPN_HOST_NDP_TRIGGER ||
  3008. suspend_reason == RTT_GEO_SUSPN_PEER_NDP_TRIGGER) {
  3009. /* NDP and Ranging can coexist with different Peers */
  3010. WL_DBG(("Geofencing Ranging not in progress with given peer,"
  3011. " suspend req dropped\n"));
  3012. goto exit;
  3013. }
  3014. }
  3015. #endif /* RTT_SUPPORT */
  3016. ASSERT((ranging_inst != NULL));
  3017. if (ranging_inst) {
  3018. if (ranging_inst->range_status != NAN_RANGING_IN_PROGRESS) {
  3019. goto exit;
  3020. }
  3021. cancel_flags |= NAN_RNG_TERM_FLAG_IMMEDIATE;
  3022. ret = wl_cfgnan_cancel_ranging(ndev, cfg,
  3023. ranging_inst->range_id, cancel_flags, &status);
  3024. if (unlikely(ret) || unlikely(status)) {
  3025. WL_ERR(("Geofence Range suspended failed, err = %d, status = %d,"
  3026. " range_id = %d, suspend_reason = %d, " MACDBG " \n",
  3027. ret, status, ranging_inst->range_id,
  3028. suspend_reason, MAC2STRDBG(peer_addr)));
  3029. }
  3030. ranging_inst->range_status = NAN_RANGING_REQUIRED;
  3031. WL_INFORM_MEM(("Geofence Range suspended, range_id = %d,"
  3032. " cancel_reason = %d, " MACDBG " \n", ranging_inst->range_id,
  3033. suspend_reason, MAC2STRDBG(peer_addr)));
  3034. #ifdef RTT_SUPPORT
  3035. /* Set geofence RTT in progress state to false */
  3036. dhd_rtt_set_geofence_rtt_state(dhd, FALSE);
  3037. #endif /* RTT_SUPPORT */
  3038. }
  3039. exit:
  3040. /* Post pending discovery results */
  3041. if (ranging_inst &&
  3042. ((suspend_reason == RTT_GEO_SUSPN_HOST_NDP_TRIGGER) ||
  3043. (suspend_reason == RTT_GEO_SUSPN_PEER_NDP_TRIGGER))) {
  3044. wl_cfgnan_disc_result_on_geofence_cancel(cfg, ranging_inst);
  3045. }
  3046. return ret;
  3047. }
  3048. static void
  3049. wl_cfgnan_clear_svc_cache(struct bcm_cfg80211 *cfg,
  3050. wl_nan_instance_id svc_id)
  3051. {
  3052. nan_svc_info_t *svc;
  3053. svc = wl_cfgnan_get_svc_inst(cfg, svc_id, 0);
  3054. if (svc) {
  3055. WL_DBG(("clearing cached svc info for svc id %d\n", svc_id));
  3056. memset(svc, 0, sizeof(*svc));
  3057. }
  3058. }
  3059. static int
  3060. wl_cfgnan_terminate_ranging_sessions(struct net_device *ndev,
  3061. struct bcm_cfg80211 *cfg, uint8 svc_id)
  3062. {
  3063. /* cancel all related ranging instances */
  3064. uint8 i;
  3065. int ret = BCME_OK;
  3066. uint32 status;
  3067. nan_ranging_inst_t *ranging_inst;
  3068. #ifdef RTT_SUPPORT
  3069. int8 index = -1;
  3070. dhd_pub_t *dhd = (struct dhd_pub *)(cfg->pub);
  3071. rtt_geofence_target_info_t* geofence_target;
  3072. #endif /* RTT_SUPPORT */
  3073. for (i = 0; i < NAN_MAX_RANGING_INST; i++) {
  3074. ranging_inst = &cfg->nan_ranging_info[i];
  3075. if (ranging_inst->num_svc_ctx == 0 &&
  3076. ranging_inst->range_role == NAN_RANGING_ROLE_INITIATOR) {
  3077. if (ranging_inst->range_id) {
  3078. ret = wl_cfgnan_cancel_ranging(ndev, cfg, ranging_inst->range_id,
  3079. NAN_RNG_TERM_FLAG_NONE, &status);
  3080. if (unlikely(ret) || unlikely(status)) {
  3081. WL_ERR(("%s:nan range cancel failed ret = %d status = %d\n",
  3082. __FUNCTION__, ret, status));
  3083. goto exit;
  3084. }
  3085. WL_DBG(("Range cancelled \n"));
  3086. }
  3087. #ifdef RTT_SUPPORT
  3088. /* Set geofence RTT in progress state to false */
  3089. geofence_target = dhd_rtt_get_geofence_target(dhd,
  3090. &ranging_inst->peer_addr, &index);
  3091. if (geofence_target) {
  3092. dhd_rtt_remove_geofence_target(dhd, &geofence_target->peer_addr);
  3093. }
  3094. WL_INFORM_MEM(("Removing Ranging Instance " MACDBG "\n",
  3095. MAC2STRDBG(&(ranging_inst->peer_addr))));
  3096. bzero(ranging_inst, sizeof(nan_ranging_inst_t));
  3097. if (index == 0) {
  3098. /* If the target was the geofence queue head */
  3099. dhd_rtt_set_geofence_rtt_state(dhd, FALSE);
  3100. }
  3101. #endif /* RTT_SUPPORT */
  3102. }
  3103. }
  3104. exit:
  3105. return ret;
  3106. }
  3107. /*
  3108. * Store svc_ctx for processing during RNG_RPT
  3109. * Return BCME_OK only when svc is added
  3110. */
  3111. static int
  3112. wl_cfgnan_update_ranging_svc_inst(nan_ranging_inst_t *ranging_inst,
  3113. nan_svc_info_t *svc)
  3114. {
  3115. int ret = BCME_OK;
  3116. int i = 0;
  3117. for (i = 0; i < MAX_SUBSCRIBES; i++) {
  3118. if (ranging_inst->svc_idx[i] == svc) {
  3119. WL_DBG(("SVC Ctx for ranging already present, "
  3120. " Duplication not supported: sub_id: %d\n", svc->svc_id));
  3121. ret = BCME_UNSUPPORTED;
  3122. goto done;
  3123. }
  3124. }
  3125. for (i = 0; i < MAX_SUBSCRIBES; i++) {
  3126. if (ranging_inst->svc_idx[i]) {
  3127. continue;
  3128. } else {
  3129. WL_DBG(("Adding SVC Ctx for ranging..svc_id %d\n", svc->svc_id));
  3130. ranging_inst->svc_idx[i] = svc;
  3131. ranging_inst->num_svc_ctx++;
  3132. ret = BCME_OK;
  3133. goto done;
  3134. }
  3135. }
  3136. if (i == MAX_SUBSCRIBES) {
  3137. WL_ERR(("wl_cfgnan_update_ranging_svc_inst: "
  3138. "No resource to hold Ref SVC ctx..svc_id %d\n", svc->svc_id));
  3139. ret = BCME_NORESOURCE;
  3140. goto done;
  3141. }
  3142. done:
  3143. return ret;
  3144. }
  3145. #ifdef RTT_SUPPORT
  3146. int
  3147. wl_cfgnan_trigger_geofencing_ranging(struct net_device *dev,
  3148. struct ether_addr *peer_addr)
  3149. {
  3150. int ret = BCME_OK;
  3151. int err_at = 0;
  3152. struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
  3153. int8 index = -1;
  3154. dhd_pub_t *dhd = (struct dhd_pub *)(cfg->pub);
  3155. rtt_geofence_target_info_t* geofence_target;
  3156. nan_ranging_inst_t *ranging_inst;
  3157. ranging_inst = wl_cfgnan_check_for_ranging(cfg, peer_addr);
  3158. if (!ranging_inst) {
  3159. WL_INFORM_MEM(("Ranging Entry for peer:" MACDBG ", not found\n",
  3160. MAC2STRDBG(peer_addr)));
  3161. ASSERT(0);
  3162. /* Ranging inst should have been added before adding target */
  3163. dhd_rtt_remove_geofence_target(dhd, peer_addr);
  3164. ret = BCME_ERROR;
  3165. err_at = 1;
  3166. goto exit;
  3167. }
  3168. ASSERT(ranging_inst->range_status !=
  3169. NAN_RANGING_IN_PROGRESS);
  3170. if (ranging_inst->range_status !=
  3171. NAN_RANGING_IN_PROGRESS) {
  3172. WL_DBG(("Trigger range request with first svc in svc list of range inst\n"));
  3173. ret = wl_cfgnan_trigger_ranging(bcmcfg_to_prmry_ndev(cfg),
  3174. cfg, ranging_inst, ranging_inst->svc_idx[0],
  3175. NAN_RANGE_REQ_CMD, TRUE);
  3176. if (ret != BCME_OK) {
  3177. /* Unsupported is for already ranging session for peer */
  3178. if (ret == BCME_BUSY) {
  3179. /* TODO: Attempt again over a timer */
  3180. err_at = 2;
  3181. } else {
  3182. /* Remove target and clean ranging inst */
  3183. geofence_target = dhd_rtt_get_geofence_target(dhd,
  3184. &ranging_inst->peer_addr, &index);
  3185. if (geofence_target) {
  3186. dhd_rtt_remove_geofence_target(dhd,
  3187. &geofence_target->peer_addr);
  3188. }
  3189. bzero(ranging_inst, sizeof(nan_ranging_inst_t));
  3190. err_at = 3;
  3191. goto exit;
  3192. }
  3193. }
  3194. } else {
  3195. /* already in progress..This should not happen */
  3196. ASSERT(0);
  3197. ret = BCME_ERROR;
  3198. err_at = 4;
  3199. goto exit;
  3200. }
  3201. exit:
  3202. if (ret) {
  3203. WL_ERR(("wl_cfgnan_trigger_geofencing_ranging: Failed to "
  3204. "trigger ranging, peer: " MACDBG " ret"
  3205. " = (%d), err_at = %d\n", MAC2STRDBG(peer_addr),
  3206. ret, err_at));
  3207. }
  3208. return ret;
  3209. }
  3210. #endif /* RTT_SUPPORT */
  3211. static int
  3212. wl_cfgnan_check_disc_result_for_ranging(struct bcm_cfg80211 *cfg,
  3213. nan_event_data_t* nan_event_data)
  3214. {
  3215. nan_svc_info_t *svc;
  3216. int ret = BCME_OK;
  3217. #ifdef RTT_SUPPORT
  3218. rtt_geofence_target_info_t geofence_target;
  3219. dhd_pub_t *dhd = (struct dhd_pub *)(cfg->pub);
  3220. uint8 index;
  3221. #endif /* RTT_SUPPORT */
  3222. bool add_target;
  3223. svc = wl_cfgnan_get_svc_inst(cfg, nan_event_data->sub_id, 0);
  3224. if (svc && svc->ranging_required) {
  3225. nan_ranging_inst_t *ranging_inst;
  3226. ranging_inst = wl_cfgnan_get_ranging_inst(cfg,
  3227. &nan_event_data->remote_nmi,
  3228. NAN_RANGING_ROLE_INITIATOR);
  3229. if (!ranging_inst) {
  3230. ret = BCME_NORESOURCE;
  3231. goto exit;
  3232. }
  3233. ASSERT(ranging_inst->range_role != NAN_RANGING_ROLE_INVALID);
  3234. /* For responder role, range state should be in progress only */
  3235. ASSERT(ranging_inst->range_role == NAN_RANGING_ROLE_INITIATOR ||
  3236. ranging_inst->range_status == NAN_RANGING_IN_PROGRESS);
  3237. /*
  3238. * On rec disc result with ranging required, add target, if
  3239. * ranging role is responder (range state has to be in prog always)
  3240. * Or ranging role is initiator and ranging is not already in prog
  3241. */
  3242. add_target = ((ranging_inst->range_role == NAN_RANGING_ROLE_RESPONDER) ||
  3243. ((ranging_inst->range_role == NAN_RANGING_ROLE_INITIATOR) &&
  3244. (ranging_inst->range_status != NAN_RANGING_IN_PROGRESS)));
  3245. if (add_target) {
  3246. WL_DBG(("Add Range request to geofence target list\n"));
  3247. #ifdef RTT_SUPPORT
  3248. memcpy(&geofence_target.peer_addr, &nan_event_data->remote_nmi,
  3249. ETHER_ADDR_LEN);
  3250. /* check if target is already added */
  3251. if (!dhd_rtt_get_geofence_target(dhd, &nan_event_data->remote_nmi, &index))
  3252. {
  3253. ret = dhd_rtt_add_geofence_target(dhd, &geofence_target);
  3254. if (unlikely(ret)) {
  3255. WL_ERR(("Failed to add geofence Tgt, ret = (%d)\n", ret));
  3256. bzero(ranging_inst, sizeof(*ranging_inst));
  3257. goto exit;
  3258. } else {
  3259. WL_INFORM_MEM(("Geofence Tgt Added:" MACDBG " sub_id:%d\n",
  3260. MAC2STRDBG(&geofence_target.peer_addr),
  3261. svc->svc_id));
  3262. }
  3263. ranging_inst->range_type = RTT_TYPE_NAN_GEOFENCE;
  3264. }
  3265. #endif /* RTT_SUPPORT */
  3266. if (wl_cfgnan_update_ranging_svc_inst(ranging_inst, svc)
  3267. != BCME_OK) {
  3268. goto exit;
  3269. }
  3270. #ifdef RTT_SUPPORT
  3271. if (ranging_inst->range_role == NAN_RANGING_ROLE_RESPONDER) {
  3272. /* Adding RTT target while responder, leads to role concurrency */
  3273. dhd_rtt_set_role_concurrency_state(dhd, TRUE);
  3274. }
  3275. else {
  3276. /* Trigger/Reset geofence RTT */
  3277. wl_cfgnan_reset_geofence_ranging(cfg, ranging_inst,
  3278. RTT_SCHED_SUB_MATCH);
  3279. }
  3280. #endif /* RTT_SUPPORT */
  3281. } else {
  3282. /* Target already added, check & add svc_inst ref to rang_inst */
  3283. wl_cfgnan_update_ranging_svc_inst(ranging_inst, svc);
  3284. }
  3285. /* Disc event will be given on receving range_rpt event */
  3286. WL_TRACE(("Disc event will given when Range RPT event is recvd"));
  3287. } else {
  3288. ret = BCME_UNSUPPORTED;
  3289. }
  3290. exit:
  3291. return ret;
  3292. }
  3293. bool
  3294. wl_cfgnan_ranging_allowed(struct bcm_cfg80211 *cfg)
  3295. {
  3296. int i = 0;
  3297. uint8 rng_progress_count = 0;
  3298. nan_ranging_inst_t *ranging_inst = NULL;
  3299. for (i = 0; i < NAN_MAX_RANGING_INST; i++) {
  3300. ranging_inst = &cfg->nan_ranging_info[i];
  3301. if (ranging_inst->range_status == NAN_RANGING_IN_PROGRESS) {
  3302. rng_progress_count++;
  3303. }
  3304. }
  3305. if (rng_progress_count == NAN_MAX_RANGING_SSN_ALLOWED) {
  3306. return FALSE;
  3307. }
  3308. return TRUE;
  3309. }
  3310. uint8
  3311. wl_cfgnan_cancel_rng_responders(struct net_device *ndev,
  3312. struct bcm_cfg80211 *cfg)
  3313. {
  3314. int i = 0;
  3315. uint8 num_resp_cancelled = 0;
  3316. int status, ret;
  3317. nan_ranging_inst_t *ranging_inst = NULL;
  3318. for (i = 0; i < NAN_MAX_RANGING_INST; i++) {
  3319. ranging_inst = &cfg->nan_ranging_info[i];
  3320. if (ranging_inst->range_status == NAN_RANGING_IN_PROGRESS &&
  3321. ranging_inst->range_role == NAN_RANGING_ROLE_RESPONDER) {
  3322. num_resp_cancelled++;
  3323. WL_ERR((" Cancelling responder\n"));
  3324. ret = wl_cfgnan_cancel_ranging(bcmcfg_to_prmry_ndev(cfg), cfg,
  3325. ranging_inst->range_id, NAN_RNG_TERM_FLAG_IMMEDIATE, &status);
  3326. if (unlikely(ret) || unlikely(status)) {
  3327. WL_ERR(("wl_cfgnan_cancel_rng_responders: Failed to cancel"
  3328. " existing ranging, ret = (%d)\n", ret));
  3329. }
  3330. WL_INFORM_MEM(("Removing Ranging Instance " MACDBG "\n",
  3331. MAC2STRDBG(&(ranging_inst->peer_addr))));
  3332. bzero(ranging_inst, sizeof(*ranging_inst));
  3333. }
  3334. }
  3335. return num_resp_cancelled;
  3336. }
  3337. #ifdef RTT_SUPPORT
  3338. /* ranging reqeust event handler */
  3339. static int
  3340. wl_cfgnan_handle_ranging_ind(struct bcm_cfg80211 *cfg,
  3341. wl_nan_ev_rng_req_ind_t *rng_ind)
  3342. {
  3343. int ret = BCME_OK;
  3344. nan_ranging_inst_t *ranging_inst = NULL;
  3345. uint32 status;
  3346. uint8 cancel_flags = 0;
  3347. bool accept = TRUE;
  3348. nan_ranging_inst_t tmp_rng_inst;
  3349. struct net_device *ndev = bcmcfg_to_prmry_ndev(cfg);
  3350. WL_DBG(("Trigger range response\n"));
  3351. /* check if we are already having any ranging session with peer.
  3352. * If so below are the policies
  3353. * If we are already a Geofence Initiator or responder w.r.t the peer
  3354. * then silently teardown the current session and accept the REQ.
  3355. * If we are in direct rtt initiator role then reject.
  3356. */
  3357. ranging_inst = wl_cfgnan_check_for_ranging(cfg, &(rng_ind->peer_m_addr));
  3358. if (ranging_inst) {
  3359. if (ranging_inst->range_type == RTT_TYPE_NAN_GEOFENCE ||
  3360. ranging_inst->range_role == NAN_RANGING_ROLE_RESPONDER) {
  3361. WL_INFORM_MEM(("Already responder for the Peer, cancel current ssn"
  3362. " and accept new one\n"));
  3363. cancel_flags = NAN_RNG_TERM_FLAG_IMMEDIATE |
  3364. NAN_RNG_TERM_FLAG_SILIENT_TEARDOWN;
  3365. if (ranging_inst->range_type == RTT_TYPE_NAN_GEOFENCE) {
  3366. wl_cfgnan_suspend_geofence_rng_session(ndev,
  3367. &(rng_ind->peer_m_addr), RTT_GEO_SUSPN_PEER_RTT_TRIGGER,
  3368. cancel_flags);
  3369. } else {
  3370. ret = wl_cfgnan_cancel_ranging(ndev, cfg,
  3371. ranging_inst->range_id, cancel_flags, &status);
  3372. if (unlikely(ret)) {
  3373. WL_ERR(("wl_cfgnan_handle_ranging_ind: Failed to cancel"
  3374. " existing ranging, ret = (%d)\n", ret));
  3375. goto done;
  3376. }
  3377. }
  3378. ranging_inst->range_status = NAN_RANGING_REQUIRED;
  3379. ranging_inst->range_role = NAN_RANGING_ROLE_RESPONDER;
  3380. ranging_inst->range_type = 0;
  3381. } else {
  3382. WL_ERR(("Reject the RNG_REQ_IND in direct rtt initiator role\n"));
  3383. ret = BCME_BUSY;
  3384. goto done;
  3385. }
  3386. } else {
  3387. /* Check if new Ranging session is allowed */
  3388. if (!wl_cfgnan_ranging_allowed(cfg)) {
  3389. WL_ERR(("Cannot allow more ranging sessions \n"));
  3390. ret = BCME_NORESOURCE;
  3391. goto done;
  3392. }
  3393. ranging_inst = wl_cfgnan_get_ranging_inst(cfg, &rng_ind->peer_m_addr,
  3394. NAN_RANGING_ROLE_RESPONDER);
  3395. if (!ranging_inst) {
  3396. WL_ERR(("Failed to create ranging instance \n"));
  3397. ASSERT(0);
  3398. ret = BCME_NORESOURCE;
  3399. goto done;
  3400. }
  3401. }
  3402. done:
  3403. if (ret != BCME_OK) {
  3404. /* reject the REQ using temp ranging instance */
  3405. bzero(&tmp_rng_inst, sizeof(tmp_rng_inst));
  3406. ranging_inst = &tmp_rng_inst;
  3407. (void)memcpy_s(&tmp_rng_inst.peer_addr, ETHER_ADDR_LEN,
  3408. &rng_ind->peer_m_addr, ETHER_ADDR_LEN);
  3409. accept = FALSE;
  3410. }
  3411. ranging_inst->range_id = rng_ind->rng_id;
  3412. ret = wl_cfgnan_trigger_ranging(ndev, cfg, ranging_inst,
  3413. NULL, NAN_RANGE_REQ_EVNT, accept);
  3414. if (unlikely(ret) || !accept) {
  3415. WL_ERR(("Failed to handle range request, ret = (%d) accept %d\n",
  3416. ret, accept));
  3417. bzero(ranging_inst, sizeof(*ranging_inst));
  3418. }
  3419. return ret;
  3420. }
  3421. #endif /* RTT_SUPPORT */
  3422. /* ranging quest and response iovar handler */
  3423. int
  3424. wl_cfgnan_trigger_ranging(struct net_device *ndev, struct bcm_cfg80211 *cfg,
  3425. void *ranging_ctxt, nan_svc_info_t *svc,
  3426. uint8 range_cmd, bool accept_req)
  3427. {
  3428. s32 ret = BCME_OK;
  3429. bcm_iov_batch_buf_t *nan_buf = NULL;
  3430. wl_nan_range_req_t *range_req = NULL;
  3431. wl_nan_range_resp_t *range_resp = NULL;
  3432. bcm_iov_batch_subcmd_t *sub_cmd = NULL;
  3433. uint16 nan_buf_size = NAN_IOCTL_BUF_SIZE;
  3434. uint32 status;
  3435. uint8 resp_buf[NAN_IOCTL_BUF_SIZE_MED];
  3436. nan_ranging_inst_t *ranging_inst = (nan_ranging_inst_t *)ranging_ctxt;
  3437. nan_avail_cmd_data cmd_data;
  3438. NAN_DBG_ENTER();
  3439. memset_s(&cmd_data, sizeof(cmd_data),
  3440. 0, sizeof(cmd_data));
  3441. ret = memcpy_s(&cmd_data.peer_nmi, ETHER_ADDR_LEN,
  3442. &ranging_inst->peer_addr, ETHER_ADDR_LEN);
  3443. if (ret != BCME_OK) {
  3444. WL_ERR(("Failed to copy ranging peer addr\n"));
  3445. goto fail;
  3446. }
  3447. cmd_data.avail_period = NAN_RANGING_PERIOD;
  3448. ret = wl_cfgnan_set_nan_avail(bcmcfg_to_prmry_ndev(cfg),
  3449. cfg, &cmd_data, WL_AVAIL_LOCAL);
  3450. if (ret != BCME_OK) {
  3451. WL_ERR(("Failed to set avail value with type [WL_AVAIL_LOCAL]\n"));
  3452. goto fail;
  3453. }
  3454. ret = wl_cfgnan_set_nan_avail(bcmcfg_to_prmry_ndev(cfg),
  3455. cfg, &cmd_data, WL_AVAIL_RANGING);
  3456. if (unlikely(ret)) {
  3457. WL_ERR(("Failed to set avail value with type [WL_AVAIL_RANGING]\n"));
  3458. goto fail;
  3459. }
  3460. nan_buf = MALLOCZ(cfg->osh, nan_buf_size);
  3461. if (!nan_buf) {
  3462. WL_ERR(("%s: memory allocation failed\n", __func__));
  3463. ret = BCME_NOMEM;
  3464. goto fail;
  3465. }
  3466. nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
  3467. nan_buf->count = 0;
  3468. nan_buf_size -= OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
  3469. sub_cmd = (bcm_iov_batch_subcmd_t*)(&nan_buf->cmds[0]);
  3470. sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
  3471. if (range_cmd == NAN_RANGE_REQ_CMD) {
  3472. sub_cmd->id = htod16(WL_NAN_CMD_RANGE_REQUEST);
  3473. sub_cmd->len = sizeof(sub_cmd->u.options) + sizeof(wl_nan_range_req_t);
  3474. range_req = (wl_nan_range_req_t *)(sub_cmd->data);
  3475. /* ranging config */
  3476. range_req->peer = ranging_inst->peer_addr;
  3477. if (svc) {
  3478. range_req->interval = svc->ranging_interval;
  3479. /* Limits are in cm from host */
  3480. range_req->ingress = svc->ingress_limit;
  3481. range_req->egress = svc->egress_limit;
  3482. }
  3483. range_req->indication = NAN_RANGING_INDICATE_CONTINUOUS_MASK;
  3484. } else {
  3485. /* range response config */
  3486. sub_cmd->id = htod16(WL_NAN_CMD_RANGE_RESPONSE);
  3487. sub_cmd->len = sizeof(sub_cmd->u.options) + sizeof(wl_nan_range_resp_t);
  3488. range_resp = (wl_nan_range_resp_t *)(sub_cmd->data);
  3489. range_resp->range_id = ranging_inst->range_id;
  3490. range_resp->indication = NAN_RANGING_INDICATE_CONTINUOUS_MASK;
  3491. if (accept_req) {
  3492. range_resp->status = NAN_RNG_REQ_ACCEPTED_BY_HOST;
  3493. } else {
  3494. range_resp->status = NAN_RNG_REQ_REJECTED_BY_HOST;
  3495. }
  3496. nan_buf->is_set = true;
  3497. }
  3498. nan_buf_size -= (sub_cmd->len +
  3499. OFFSETOF(bcm_iov_batch_subcmd_t, u.options));
  3500. nan_buf->count++;
  3501. memset_s(resp_buf, sizeof(resp_buf), 0, sizeof(resp_buf));
  3502. ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, nan_buf_size,
  3503. &status,
  3504. (void*)resp_buf, NAN_IOCTL_BUF_SIZE);
  3505. if (unlikely(ret) || unlikely(status)) {
  3506. WL_ERR(("nan ranging failed ret = %d status = %d\n",
  3507. ret, status));
  3508. ret = (ret == BCME_OK) ? status : ret;
  3509. goto fail;
  3510. }
  3511. WL_TRACE(("nan ranging trigger successful\n"));
  3512. if (range_cmd == NAN_RANGE_REQ_CMD) {
  3513. WL_MEM(("wl_cfgnan_trigger_ranging: Geofence Triggered"
  3514. " peer: " MACDBG ", ind : %d, ingress : %d, egress : %d\n",
  3515. MAC2STRDBG(&ranging_inst->peer_addr), range_req->indication,
  3516. range_req->ingress, range_req->egress));
  3517. } else {
  3518. WL_MEM(("wl_cfgnan_trigger_ranging: Geofence Triggered"
  3519. " peer: " MACDBG ", ind : %d, ingress : %d, egress : %d\n",
  3520. MAC2STRDBG(&ranging_inst->peer_addr), range_resp->indication,
  3521. range_resp->ingress, range_resp->egress));
  3522. }
  3523. /* check the response buff for request */
  3524. if (range_cmd == NAN_RANGE_REQ_CMD) {
  3525. ret = process_resp_buf(resp_buf + WL_NAN_OBUF_DATA_OFFSET,
  3526. &ranging_inst->range_id, WL_NAN_CMD_RANGE_REQUEST);
  3527. WL_INFORM_MEM(("ranging instance returned %d\n", ranging_inst->range_id));
  3528. }
  3529. /* Preventing continuous range requests */
  3530. ranging_inst->range_status = NAN_RANGING_IN_PROGRESS;
  3531. fail:
  3532. if (nan_buf) {
  3533. MFREE(cfg->osh, nan_buf, NAN_IOCTL_BUF_SIZE);
  3534. }
  3535. NAN_DBG_EXIT();
  3536. return ret;
  3537. }
  3538. #endif /* WL_NAN_DISC_CACHE */
  3539. static void *wl_nan_bloom_alloc(void *ctx, uint size)
  3540. {
  3541. uint8 *buf;
  3542. BCM_REFERENCE(ctx);
  3543. buf = kmalloc(size, GFP_KERNEL);
  3544. if (!buf) {
  3545. WL_ERR(("%s: memory allocation failed\n", __func__));
  3546. buf = NULL;
  3547. }
  3548. return buf;
  3549. }
  3550. static void wl_nan_bloom_free(void *ctx, void *buf, uint size)
  3551. {
  3552. BCM_REFERENCE(ctx);
  3553. BCM_REFERENCE(size);
  3554. if (buf) {
  3555. kfree(buf);
  3556. }
  3557. }
  3558. static uint wl_nan_hash(void *ctx, uint index, const uint8 *input, uint input_len)
  3559. {
  3560. uint8* filter_idx = (uint8*)ctx;
  3561. uint8 i = (*filter_idx * WL_NAN_HASHES_PER_BLOOM) + (uint8)index;
  3562. uint b = 0;
  3563. /* Steps 1 and 2 as explained in Section 6.2 */
  3564. /* Concatenate index to input and run CRC32 by calling hndcrc32 twice */
  3565. GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
  3566. b = hndcrc32(&i, sizeof(uint8), CRC32_INIT_VALUE);
  3567. b = hndcrc32((uint8*)input, input_len, b);
  3568. GCC_DIAGNOSTIC_POP();
  3569. /* Obtain the last 2 bytes of the CRC32 output */
  3570. b &= NAN_BLOOM_CRC32_MASK;
  3571. /* Step 3 is completed by bcmbloom functions */
  3572. return b;
  3573. }
  3574. static int wl_nan_bloom_create(bcm_bloom_filter_t **bp, uint *idx, uint size)
  3575. {
  3576. uint i;
  3577. int err;
  3578. err = bcm_bloom_create(wl_nan_bloom_alloc, wl_nan_bloom_free,
  3579. idx, WL_NAN_HASHES_PER_BLOOM, size, bp);
  3580. if (err != BCME_OK) {
  3581. goto exit;
  3582. }
  3583. /* Populate bloom filter with hash functions */
  3584. for (i = 0; i < WL_NAN_HASHES_PER_BLOOM; i++) {
  3585. err = bcm_bloom_add_hash(*bp, wl_nan_hash, &i);
  3586. if (err) {
  3587. WL_ERR(("bcm_bloom_add_hash failed\n"));
  3588. goto exit;
  3589. }
  3590. }
  3591. exit:
  3592. return err;
  3593. }
  3594. static int
  3595. wl_cfgnan_sd_params_handler(struct net_device *ndev,
  3596. nan_discover_cmd_data_t *cmd_data, uint16 cmd_id,
  3597. void *p_buf, uint16 *nan_buf_size)
  3598. {
  3599. s32 ret = BCME_OK;
  3600. uint8 *pxtlv, *srf = NULL, *srf_mac = NULL, *srftmp = NULL;
  3601. uint16 buflen_avail;
  3602. bcm_iov_batch_subcmd_t *sub_cmd = (bcm_iov_batch_subcmd_t*)(p_buf);
  3603. wl_nan_sd_params_t *sd_params = (wl_nan_sd_params_t *)sub_cmd->data;
  3604. uint16 srf_size = 0;
  3605. uint bloom_size, a;
  3606. bcm_bloom_filter_t *bp = NULL;
  3607. /* Bloom filter index default, indicates it has not been set */
  3608. uint bloom_idx = 0xFFFFFFFF;
  3609. uint16 bloom_len = NAN_BLOOM_LENGTH_DEFAULT;
  3610. /* srf_ctrl_size = bloom_len + src_control field */
  3611. uint16 srf_ctrl_size = bloom_len + 1;
  3612. dhd_pub_t *dhdp = wl_cfg80211_get_dhdp(ndev);
  3613. struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
  3614. BCM_REFERENCE(cfg);
  3615. NAN_DBG_ENTER();
  3616. if (cmd_data->period) {
  3617. sd_params->awake_dw = cmd_data->period;
  3618. }
  3619. sd_params->period = 1;
  3620. if (cmd_data->ttl) {
  3621. sd_params->ttl = cmd_data->ttl;
  3622. } else {
  3623. sd_params->ttl = WL_NAN_TTL_UNTIL_CANCEL;
  3624. }
  3625. sd_params->flags = 0;
  3626. sd_params->flags = cmd_data->flags;
  3627. /* Nan Service Based event suppression Flags */
  3628. if (cmd_data->recv_ind_flag) {
  3629. /* BIT0 - If set, host wont rec event "terminated" */
  3630. if (CHECK_BIT(cmd_data->recv_ind_flag, WL_NAN_EVENT_SUPPRESS_TERMINATE_BIT)) {
  3631. sd_params->flags |= WL_NAN_SVC_CTRL_SUPPRESS_EVT_TERMINATED;
  3632. }
  3633. /* BIT1 - If set, host wont receive match expiry evt */
  3634. /* TODO: Exp not yet supported */
  3635. if (CHECK_BIT(cmd_data->recv_ind_flag, WL_NAN_EVENT_SUPPRESS_MATCH_EXP_BIT)) {
  3636. WL_DBG(("Need to add match expiry event\n"));
  3637. }
  3638. /* BIT2 - If set, host wont rec event "receive" */
  3639. if (CHECK_BIT(cmd_data->recv_ind_flag, WL_NAN_EVENT_SUPPRESS_RECEIVE_BIT)) {
  3640. sd_params->flags |= WL_NAN_SVC_CTRL_SUPPRESS_EVT_RECEIVE;
  3641. }
  3642. /* BIT3 - If set, host wont rec event "replied" */
  3643. if (CHECK_BIT(cmd_data->recv_ind_flag, WL_NAN_EVENT_SUPPRESS_REPLIED_BIT)) {
  3644. sd_params->flags |= WL_NAN_SVC_CTRL_SUPPRESS_EVT_REPLIED;
  3645. }
  3646. }
  3647. if (cmd_id == WL_NAN_CMD_SD_PUBLISH) {
  3648. sd_params->instance_id = cmd_data->pub_id;
  3649. if (cmd_data->service_responder_policy) {
  3650. /* Do not disturb avail if dam is supported */
  3651. if (FW_SUPPORTED(dhdp, autodam)) {
  3652. /* Nan Accept policy: Per service basis policy
  3653. * Based on this policy(ALL/NONE), responder side
  3654. * will send ACCEPT/REJECT
  3655. * If set, auto datapath responder will be sent by FW
  3656. */
  3657. sd_params->flags |= WL_NAN_SVC_CTRL_AUTO_DPRESP;
  3658. } else {
  3659. WL_ERR(("svc specifiv auto dp resp is not"
  3660. " supported in non-auto dam fw\n"));
  3661. }
  3662. }
  3663. } else if (cmd_id == WL_NAN_CMD_SD_SUBSCRIBE) {
  3664. sd_params->instance_id = cmd_data->sub_id;
  3665. } else {
  3666. ret = BCME_USAGE_ERROR;
  3667. WL_ERR(("wrong command id = %d \n", cmd_id));
  3668. goto fail;
  3669. }
  3670. if ((cmd_data->svc_hash.dlen == WL_NAN_SVC_HASH_LEN) &&
  3671. (cmd_data->svc_hash.data)) {
  3672. ret = memcpy_s((uint8*)sd_params->svc_hash,
  3673. sizeof(sd_params->svc_hash),
  3674. cmd_data->svc_hash.data,
  3675. cmd_data->svc_hash.dlen);
  3676. if (ret != BCME_OK) {
  3677. WL_ERR(("Failed to copy svc hash\n"));
  3678. goto fail;
  3679. }
  3680. #ifdef WL_NAN_DEBUG
  3681. prhex("hashed svc name", cmd_data->svc_hash.data,
  3682. cmd_data->svc_hash.dlen);
  3683. #endif /* WL_NAN_DEBUG */
  3684. } else {
  3685. ret = BCME_ERROR;
  3686. WL_ERR(("invalid svc hash data or length = %d\n",
  3687. cmd_data->svc_hash.dlen));
  3688. goto fail;
  3689. }
  3690. /* check if ranging support is present in firmware */
  3691. if ((cmd_data->sde_control_flag & NAN_SDE_CF_RANGING_REQUIRED) &&
  3692. !FW_SUPPORTED(dhdp, nanrange)) {
  3693. WL_ERR(("Service requires ranging but fw doesnt support it\n"));
  3694. ret = BCME_UNSUPPORTED;
  3695. goto fail;
  3696. }
  3697. /* Optional parameters: fill the sub_command block with service descriptor attr */
  3698. sub_cmd->id = htod16(cmd_id);
  3699. sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
  3700. sub_cmd->len = sizeof(sub_cmd->u.options) +
  3701. OFFSETOF(wl_nan_sd_params_t, optional[0]);
  3702. pxtlv = (uint8*)&sd_params->optional[0];
  3703. *nan_buf_size -= sub_cmd->len;
  3704. buflen_avail = *nan_buf_size;
  3705. if (cmd_data->svc_info.data && cmd_data->svc_info.dlen) {
  3706. WL_TRACE(("optional svc_info present, pack it\n"));
  3707. ret = bcm_pack_xtlv_entry(&pxtlv, nan_buf_size,
  3708. WL_NAN_XTLV_SD_SVC_INFO,
  3709. cmd_data->svc_info.dlen,
  3710. cmd_data->svc_info.data, BCM_XTLV_OPTION_ALIGN32);
  3711. if (unlikely(ret)) {
  3712. WL_ERR(("%s: fail to pack WL_NAN_XTLV_SD_SVC_INFO\n", __FUNCTION__));
  3713. goto fail;
  3714. }
  3715. }
  3716. if (cmd_data->sde_svc_info.data && cmd_data->sde_svc_info.dlen) {
  3717. WL_TRACE(("optional sdea svc_info present, pack it, %d\n",
  3718. cmd_data->sde_svc_info.dlen));
  3719. ret = bcm_pack_xtlv_entry(&pxtlv, nan_buf_size,
  3720. WL_NAN_XTLV_SD_SDE_SVC_INFO,
  3721. cmd_data->sde_svc_info.dlen,
  3722. cmd_data->sde_svc_info.data, BCM_XTLV_OPTION_ALIGN32);
  3723. if (unlikely(ret)) {
  3724. WL_ERR(("%s: fail to pack sdea svc info\n", __FUNCTION__));
  3725. goto fail;
  3726. }
  3727. }
  3728. if (cmd_data->tx_match.dlen) {
  3729. WL_TRACE(("optional tx match filter presnet (len=%d)\n",
  3730. cmd_data->tx_match.dlen));
  3731. ret = bcm_pack_xtlv_entry(&pxtlv, nan_buf_size,
  3732. WL_NAN_XTLV_CFG_MATCH_TX, cmd_data->tx_match.dlen,
  3733. cmd_data->tx_match.data, BCM_XTLV_OPTION_ALIGN32);
  3734. if (unlikely(ret)) {
  3735. WL_ERR(("%s: failed on xtlv_pack for tx match filter\n", __FUNCTION__));
  3736. goto fail;
  3737. }
  3738. }
  3739. if (cmd_data->life_count) {
  3740. WL_TRACE(("optional life count is present, pack it\n"));
  3741. ret = bcm_pack_xtlv_entry(&pxtlv, nan_buf_size, WL_NAN_XTLV_CFG_SVC_LIFE_COUNT,
  3742. sizeof(cmd_data->life_count), &cmd_data->life_count,
  3743. BCM_XTLV_OPTION_ALIGN32);
  3744. if (unlikely(ret)) {
  3745. WL_ERR(("%s: failed to WL_NAN_XTLV_CFG_SVC_LIFE_COUNT\n", __FUNCTION__));
  3746. goto fail;
  3747. }
  3748. }
  3749. if (cmd_data->use_srf) {
  3750. uint8 srf_control = 0;
  3751. /* set include bit */
  3752. if (cmd_data->srf_include == true) {
  3753. srf_control |= 0x2;
  3754. }
  3755. if (!ETHER_ISNULLADDR(&cmd_data->mac_list.list) &&
  3756. (cmd_data->mac_list.num_mac_addr
  3757. < NAN_SRF_MAX_MAC)) {
  3758. if (cmd_data->srf_type == SRF_TYPE_SEQ_MAC_ADDR) {
  3759. /* mac list */
  3760. srf_size = (cmd_data->mac_list.num_mac_addr
  3761. * ETHER_ADDR_LEN) + NAN_SRF_CTRL_FIELD_LEN;
  3762. WL_TRACE(("srf size = %d\n", srf_size));
  3763. srf_mac = MALLOCZ(cfg->osh, srf_size);
  3764. if (srf_mac == NULL) {
  3765. WL_ERR(("%s: memory allocation failed\n", __FUNCTION__));
  3766. ret = -ENOMEM;
  3767. goto fail;
  3768. }
  3769. ret = memcpy_s(srf_mac, NAN_SRF_CTRL_FIELD_LEN,
  3770. &srf_control, NAN_SRF_CTRL_FIELD_LEN);
  3771. if (ret != BCME_OK) {
  3772. WL_ERR(("Failed to copy srf control\n"));
  3773. goto fail;
  3774. }
  3775. ret = memcpy_s(srf_mac+1, (srf_size - NAN_SRF_CTRL_FIELD_LEN),
  3776. cmd_data->mac_list.list,
  3777. (srf_size - NAN_SRF_CTRL_FIELD_LEN));
  3778. if (ret != BCME_OK) {
  3779. WL_ERR(("Failed to copy srf control mac list\n"));
  3780. goto fail;
  3781. }
  3782. ret = bcm_pack_xtlv_entry(&pxtlv, nan_buf_size,
  3783. WL_NAN_XTLV_CFG_SR_FILTER, srf_size, srf_mac,
  3784. BCM_XTLV_OPTION_ALIGN32);
  3785. if (unlikely(ret)) {
  3786. WL_ERR(("%s: failed to WL_NAN_XTLV_CFG_SR_FILTER\n",
  3787. __FUNCTION__));
  3788. goto fail;
  3789. }
  3790. } else if (cmd_data->srf_type == SRF_TYPE_BLOOM_FILTER) {
  3791. /* Create bloom filter */
  3792. srf = MALLOCZ(cfg->osh, srf_ctrl_size);
  3793. if (srf == NULL) {
  3794. WL_ERR(("%s: memory allocation failed\n", __FUNCTION__));
  3795. ret = -ENOMEM;
  3796. goto fail;
  3797. }
  3798. /* Bloom filter */
  3799. srf_control |= 0x1;
  3800. /* Instance id must be from 1 to 255, 0 is Reserved */
  3801. if (sd_params->instance_id == NAN_ID_RESERVED) {
  3802. WL_ERR(("Invalid instance id: %d\n",
  3803. sd_params->instance_id));
  3804. ret = BCME_BADARG;
  3805. goto fail;
  3806. }
  3807. if (bloom_idx == 0xFFFFFFFF) {
  3808. bloom_idx = sd_params->instance_id % 4;
  3809. } else {
  3810. WL_ERR(("Invalid bloom_idx\n"));
  3811. ret = BCME_BADARG;
  3812. goto fail;
  3813. }
  3814. srf_control |= bloom_idx << 2;
  3815. ret = wl_nan_bloom_create(&bp, &bloom_idx, bloom_len);
  3816. if (unlikely(ret)) {
  3817. WL_ERR(("%s: Bloom create failed\n", __FUNCTION__));
  3818. goto fail;
  3819. }
  3820. srftmp = cmd_data->mac_list.list;
  3821. for (a = 0;
  3822. a < cmd_data->mac_list.num_mac_addr; a++) {
  3823. ret = bcm_bloom_add_member(bp, srftmp, ETHER_ADDR_LEN);
  3824. if (unlikely(ret)) {
  3825. WL_ERR(("%s: Cannot add to bloom filter\n",
  3826. __FUNCTION__));
  3827. goto fail;
  3828. }
  3829. srftmp += ETHER_ADDR_LEN;
  3830. }
  3831. ret = memcpy_s(srf, NAN_SRF_CTRL_FIELD_LEN,
  3832. &srf_control, NAN_SRF_CTRL_FIELD_LEN);
  3833. if (ret != BCME_OK) {
  3834. WL_ERR(("Failed to copy srf control\n"));
  3835. goto fail;
  3836. }
  3837. ret = bcm_bloom_get_filter_data(bp, bloom_len,
  3838. (srf + NAN_SRF_CTRL_FIELD_LEN),
  3839. &bloom_size);
  3840. if (unlikely(ret)) {
  3841. WL_ERR(("%s: Cannot get filter data\n", __FUNCTION__));
  3842. goto fail;
  3843. }
  3844. ret = bcm_pack_xtlv_entry(&pxtlv, nan_buf_size,
  3845. WL_NAN_XTLV_CFG_SR_FILTER, srf_ctrl_size,
  3846. srf, BCM_XTLV_OPTION_ALIGN32);
  3847. if (ret != BCME_OK) {
  3848. WL_ERR(("Failed to pack SR FILTER data, ret = %d\n", ret));
  3849. goto fail;
  3850. }
  3851. } else {
  3852. WL_ERR(("Invalid SRF Type = %d !!!\n",
  3853. cmd_data->srf_type));
  3854. goto fail;
  3855. }
  3856. } else {
  3857. WL_ERR(("Invalid MAC Addr/Too many mac addr = %d !!!\n",
  3858. cmd_data->mac_list.num_mac_addr));
  3859. goto fail;
  3860. }
  3861. }
  3862. if (cmd_data->rx_match.dlen) {
  3863. WL_TRACE(("optional rx match filter is present, pack it\n"));
  3864. ret = bcm_pack_xtlv_entry(&pxtlv, nan_buf_size,
  3865. WL_NAN_XTLV_CFG_MATCH_RX, cmd_data->rx_match.dlen,
  3866. cmd_data->rx_match.data, BCM_XTLV_OPTION_ALIGN32);
  3867. if (unlikely(ret)) {
  3868. WL_ERR(("%s: failed on xtlv_pack for rx match filter\n", __func__));
  3869. goto fail;
  3870. }
  3871. }
  3872. /* Security elements */
  3873. if (cmd_data->csid) {
  3874. WL_TRACE(("Cipher suite type is present, pack it\n"));
  3875. ret = bcm_pack_xtlv_entry(&pxtlv, nan_buf_size,
  3876. WL_NAN_XTLV_CFG_SEC_CSID, sizeof(nan_sec_csid_e),
  3877. (uint8*)&cmd_data->csid, BCM_XTLV_OPTION_ALIGN32);
  3878. if (unlikely(ret)) {
  3879. WL_ERR(("%s: fail to pack on csid\n", __FUNCTION__));
  3880. goto fail;
  3881. }
  3882. }
  3883. if (cmd_data->ndp_cfg.security_cfg) {
  3884. if ((cmd_data->key_type == NAN_SECURITY_KEY_INPUT_PMK) ||
  3885. (cmd_data->key_type == NAN_SECURITY_KEY_INPUT_PASSPHRASE)) {
  3886. if (cmd_data->key.data && cmd_data->key.dlen) {
  3887. WL_TRACE(("optional pmk present, pack it\n"));
  3888. ret = bcm_pack_xtlv_entry(&pxtlv, nan_buf_size,
  3889. WL_NAN_XTLV_CFG_SEC_PMK, cmd_data->key.dlen,
  3890. cmd_data->key.data, BCM_XTLV_OPTION_ALIGN32);
  3891. if (unlikely(ret)) {
  3892. WL_ERR(("%s: fail to pack WL_NAN_XTLV_CFG_SEC_PMK\n",
  3893. __FUNCTION__));
  3894. goto fail;
  3895. }
  3896. }
  3897. } else {
  3898. WL_ERR(("Invalid security key type\n"));
  3899. ret = BCME_BADARG;
  3900. goto fail;
  3901. }
  3902. }
  3903. if (cmd_data->scid.data && cmd_data->scid.dlen) {
  3904. WL_TRACE(("optional scid present, pack it\n"));
  3905. ret = bcm_pack_xtlv_entry(&pxtlv, nan_buf_size, WL_NAN_XTLV_CFG_SEC_SCID,
  3906. cmd_data->scid.dlen, cmd_data->scid.data, BCM_XTLV_OPTION_ALIGN32);
  3907. if (unlikely(ret)) {
  3908. WL_ERR(("%s: fail to pack WL_NAN_XTLV_CFG_SEC_SCID\n", __FUNCTION__));
  3909. goto fail;
  3910. }
  3911. }
  3912. if (cmd_data->sde_control_config) {
  3913. ret = bcm_pack_xtlv_entry(&pxtlv, nan_buf_size,
  3914. WL_NAN_XTLV_SD_SDE_CONTROL,
  3915. sizeof(uint16), (uint8*)&cmd_data->sde_control_flag,
  3916. BCM_XTLV_OPTION_ALIGN32);
  3917. if (ret != BCME_OK) {
  3918. WL_ERR(("%s: fail to pack WL_NAN_XTLV_SD_SDE_CONTROL\n", __FUNCTION__));
  3919. goto fail;
  3920. }
  3921. }
  3922. sub_cmd->len += (buflen_avail - *nan_buf_size);
  3923. fail:
  3924. if (srf) {
  3925. MFREE(cfg->osh, srf, srf_ctrl_size);
  3926. }
  3927. if (srf_mac) {
  3928. MFREE(cfg->osh, srf_mac, srf_size);
  3929. }
  3930. NAN_DBG_EXIT();
  3931. return ret;
  3932. }
  3933. static int
  3934. wl_cfgnan_aligned_data_size_of_opt_disc_params(uint16 *data_size, nan_discover_cmd_data_t *cmd_data)
  3935. {
  3936. s32 ret = BCME_OK;
  3937. if (cmd_data->svc_info.dlen)
  3938. *data_size += ALIGN_SIZE(cmd_data->svc_info.dlen + NAN_XTLV_ID_LEN_SIZE, 4);
  3939. if (cmd_data->sde_svc_info.dlen)
  3940. *data_size += ALIGN_SIZE(cmd_data->sde_svc_info.dlen + NAN_XTLV_ID_LEN_SIZE, 4);
  3941. if (cmd_data->tx_match.dlen)
  3942. *data_size += ALIGN_SIZE(cmd_data->tx_match.dlen + NAN_XTLV_ID_LEN_SIZE, 4);
  3943. if (cmd_data->rx_match.dlen)
  3944. *data_size += ALIGN_SIZE(cmd_data->rx_match.dlen + NAN_XTLV_ID_LEN_SIZE, 4);
  3945. if (cmd_data->use_srf) {
  3946. if (cmd_data->srf_type == SRF_TYPE_SEQ_MAC_ADDR) {
  3947. *data_size += (cmd_data->mac_list.num_mac_addr * ETHER_ADDR_LEN)
  3948. + NAN_SRF_CTRL_FIELD_LEN;
  3949. } else { /* Bloom filter type */
  3950. *data_size += NAN_BLOOM_LENGTH_DEFAULT + 1;
  3951. }
  3952. *data_size += ALIGN_SIZE(*data_size + NAN_XTLV_ID_LEN_SIZE, 4);
  3953. }
  3954. if (cmd_data->csid)
  3955. *data_size += ALIGN_SIZE(sizeof(nan_sec_csid_e) + NAN_XTLV_ID_LEN_SIZE, 4);
  3956. if (cmd_data->key.dlen)
  3957. *data_size += ALIGN_SIZE(cmd_data->key.dlen + NAN_XTLV_ID_LEN_SIZE, 4);
  3958. if (cmd_data->scid.dlen)
  3959. *data_size += ALIGN_SIZE(cmd_data->scid.dlen + NAN_XTLV_ID_LEN_SIZE, 4);
  3960. if (cmd_data->sde_control_config)
  3961. *data_size += ALIGN_SIZE(sizeof(uint16) + NAN_XTLV_ID_LEN_SIZE, 4);
  3962. if (cmd_data->life_count)
  3963. *data_size += ALIGN_SIZE(sizeof(cmd_data->life_count) + NAN_XTLV_ID_LEN_SIZE, 4);
  3964. return ret;
  3965. }
  3966. static int
  3967. wl_cfgnan_aligned_data_size_of_opt_dp_params(uint16 *data_size, nan_datapath_cmd_data_t *cmd_data)
  3968. {
  3969. s32 ret = BCME_OK;
  3970. if (cmd_data->svc_info.dlen)
  3971. *data_size += ALIGN_SIZE(cmd_data->svc_info.dlen + NAN_XTLV_ID_LEN_SIZE, 4);
  3972. if (cmd_data->key.dlen)
  3973. *data_size += ALIGN_SIZE(cmd_data->key.dlen + NAN_XTLV_ID_LEN_SIZE, 4);
  3974. if (cmd_data->csid)
  3975. *data_size += ALIGN_SIZE(sizeof(nan_sec_csid_e) + NAN_XTLV_ID_LEN_SIZE, 4);
  3976. *data_size += ALIGN_SIZE(WL_NAN_SVC_HASH_LEN + NAN_XTLV_ID_LEN_SIZE, 4);
  3977. return ret;
  3978. }
  3979. int
  3980. wl_cfgnan_svc_get_handler(struct net_device *ndev,
  3981. struct bcm_cfg80211 *cfg, uint16 cmd_id, nan_discover_cmd_data_t *cmd_data)
  3982. {
  3983. bcm_iov_batch_subcmd_t *sub_cmd = NULL;
  3984. uint32 instance_id;
  3985. s32 ret = BCME_OK;
  3986. bcm_iov_batch_buf_t *nan_buf = NULL;
  3987. uint8 *resp_buf = NULL;
  3988. uint16 data_size = WL_NAN_OBUF_DATA_OFFSET + sizeof(instance_id);
  3989. NAN_DBG_ENTER();
  3990. nan_buf = MALLOCZ(cfg->osh, data_size);
  3991. if (!nan_buf) {
  3992. WL_ERR(("%s: memory allocation failed\n", __func__));
  3993. ret = BCME_NOMEM;
  3994. goto fail;
  3995. }
  3996. resp_buf = MALLOCZ(cfg->osh, NAN_IOCTL_BUF_SIZE_LARGE);
  3997. if (!resp_buf) {
  3998. WL_ERR(("%s: memory allocation failed\n", __func__));
  3999. ret = BCME_NOMEM;
  4000. goto fail;
  4001. }
  4002. nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
  4003. nan_buf->count = 1;
  4004. /* check if service is present */
  4005. nan_buf->is_set = false;
  4006. sub_cmd = (bcm_iov_batch_subcmd_t*)(&nan_buf->cmds[0]);
  4007. if (cmd_id == WL_NAN_CMD_SD_PUBLISH) {
  4008. instance_id = cmd_data->pub_id;
  4009. } else if (cmd_id == WL_NAN_CMD_SD_SUBSCRIBE) {
  4010. instance_id = cmd_data->sub_id;
  4011. } else {
  4012. ret = BCME_USAGE_ERROR;
  4013. WL_ERR(("wrong command id = %u\n", cmd_id));
  4014. goto fail;
  4015. }
  4016. /* Fill the sub_command block */
  4017. sub_cmd->id = htod16(cmd_id);
  4018. sub_cmd->len = sizeof(sub_cmd->u.options) + sizeof(instance_id);
  4019. sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
  4020. ret = memcpy_s(sub_cmd->data, (data_size - WL_NAN_OBUF_DATA_OFFSET),
  4021. &instance_id, sizeof(instance_id));
  4022. if (ret != BCME_OK) {
  4023. WL_ERR(("Failed to copy instance id, ret = %d\n", ret));
  4024. goto fail;
  4025. }
  4026. ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, data_size,
  4027. &(cmd_data->status), resp_buf, NAN_IOCTL_BUF_SIZE_LARGE);
  4028. if (unlikely(ret) || unlikely(cmd_data->status)) {
  4029. WL_ERR(("nan svc check failed ret = %d status = %d\n", ret, cmd_data->status));
  4030. goto fail;
  4031. } else {
  4032. WL_DBG(("nan svc check successful..proceed to update\n"));
  4033. }
  4034. fail:
  4035. if (nan_buf) {
  4036. MFREE(cfg->osh, nan_buf, data_size);
  4037. }
  4038. if (resp_buf) {
  4039. MFREE(cfg->osh, resp_buf, NAN_IOCTL_BUF_SIZE_LARGE);
  4040. }
  4041. NAN_DBG_EXIT();
  4042. return ret;
  4043. }
  4044. int
  4045. wl_cfgnan_svc_handler(struct net_device *ndev,
  4046. struct bcm_cfg80211 *cfg, uint16 cmd_id, nan_discover_cmd_data_t *cmd_data)
  4047. {
  4048. s32 ret = BCME_OK;
  4049. bcm_iov_batch_buf_t *nan_buf = NULL;
  4050. uint16 nan_buf_size;
  4051. uint8 *resp_buf = NULL;
  4052. /* Considering fixed params */
  4053. uint16 data_size = WL_NAN_OBUF_DATA_OFFSET +
  4054. OFFSETOF(wl_nan_sd_params_t, optional[0]);
  4055. if (cmd_data->svc_update) {
  4056. ret = wl_cfgnan_svc_get_handler(ndev, cfg, cmd_id, cmd_data);
  4057. if (ret != BCME_OK) {
  4058. WL_ERR(("Failed to update svc handler, ret = %d\n", ret));
  4059. goto fail;
  4060. } else {
  4061. /* Ignoring any other svc get error */
  4062. if (cmd_data->status == WL_NAN_E_BAD_INSTANCE) {
  4063. WL_ERR(("Bad instance status, failed to update svc handler\n"));
  4064. goto fail;
  4065. }
  4066. }
  4067. }
  4068. ret = wl_cfgnan_aligned_data_size_of_opt_disc_params(&data_size, cmd_data);
  4069. if (unlikely(ret)) {
  4070. WL_ERR(("Failed to get alligned size of optional params\n"));
  4071. goto fail;
  4072. }
  4073. nan_buf_size = data_size;
  4074. NAN_DBG_ENTER();
  4075. nan_buf = MALLOCZ(cfg->osh, data_size);
  4076. if (!nan_buf) {
  4077. WL_ERR(("%s: memory allocation failed\n", __func__));
  4078. ret = BCME_NOMEM;
  4079. goto fail;
  4080. }
  4081. resp_buf = MALLOCZ(cfg->osh, data_size + NAN_IOVAR_NAME_SIZE);
  4082. if (!resp_buf) {
  4083. WL_ERR(("%s: memory allocation failed\n", __func__));
  4084. ret = BCME_NOMEM;
  4085. goto fail;
  4086. }
  4087. nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
  4088. nan_buf->count = 0;
  4089. nan_buf->is_set = true;
  4090. ret = wl_cfgnan_sd_params_handler(ndev, cmd_data, cmd_id,
  4091. &nan_buf->cmds[0], &nan_buf_size);
  4092. if (unlikely(ret)) {
  4093. WL_ERR((" Service discovery params handler failed, ret = %d\n", ret));
  4094. goto fail;
  4095. }
  4096. nan_buf->count++;
  4097. ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, data_size,
  4098. &(cmd_data->status), resp_buf, data_size + NAN_IOVAR_NAME_SIZE);
  4099. if (cmd_data->svc_update && (cmd_data->status == BCME_DATA_NOTFOUND)) {
  4100. /* return OK if update tlv data is not present
  4101. * which means nothing to update
  4102. */
  4103. cmd_data->status = BCME_OK;
  4104. }
  4105. if (unlikely(ret) || unlikely(cmd_data->status)) {
  4106. WL_ERR(("nan svc failed ret = %d status = %d\n", ret, cmd_data->status));
  4107. goto fail;
  4108. } else {
  4109. WL_DBG(("nan svc successful\n"));
  4110. #ifdef WL_NAN_DISC_CACHE
  4111. ret = wl_cfgnan_cache_svc_info(cfg, cmd_data, cmd_id, cmd_data->svc_update);
  4112. if (ret < 0) {
  4113. WL_ERR(("%s: fail to cache svc info, ret=%d\n",
  4114. __FUNCTION__, ret));
  4115. goto fail;
  4116. }
  4117. #endif /* WL_NAN_DISC_CACHE */
  4118. }
  4119. fail:
  4120. if (nan_buf) {
  4121. MFREE(cfg->osh, nan_buf, data_size);
  4122. }
  4123. if (resp_buf) {
  4124. MFREE(cfg->osh, resp_buf, data_size + NAN_IOVAR_NAME_SIZE);
  4125. }
  4126. NAN_DBG_EXIT();
  4127. return ret;
  4128. }
  4129. int
  4130. wl_cfgnan_publish_handler(struct net_device *ndev,
  4131. struct bcm_cfg80211 *cfg, nan_discover_cmd_data_t *cmd_data)
  4132. {
  4133. int ret = BCME_OK;
  4134. NAN_DBG_ENTER();
  4135. NAN_MUTEX_LOCK();
  4136. /*
  4137. * proceed only if mandatory arguments are present - subscriber id,
  4138. * service hash
  4139. */
  4140. if ((!cmd_data->pub_id) || (!cmd_data->svc_hash.data) ||
  4141. (!cmd_data->svc_hash.dlen)) {
  4142. WL_ERR(("mandatory arguments are not present\n"));
  4143. ret = BCME_BADARG;
  4144. goto fail;
  4145. }
  4146. ret = wl_cfgnan_svc_handler(ndev, cfg, WL_NAN_CMD_SD_PUBLISH, cmd_data);
  4147. if (ret < 0) {
  4148. WL_ERR(("%s: fail to handle pub, ret=%d\n", __FUNCTION__, ret));
  4149. goto fail;
  4150. }
  4151. WL_INFORM_MEM(("[NAN] Service published for instance id:%d\n", cmd_data->pub_id));
  4152. fail:
  4153. NAN_MUTEX_UNLOCK();
  4154. NAN_DBG_EXIT();
  4155. return ret;
  4156. }
  4157. int
  4158. wl_cfgnan_subscribe_handler(struct net_device *ndev,
  4159. struct bcm_cfg80211 *cfg, nan_discover_cmd_data_t *cmd_data)
  4160. {
  4161. int ret = BCME_OK;
  4162. #ifdef WL_NAN_DISC_CACHE
  4163. nan_svc_info_t *svc_info;
  4164. uint8 upd_ranging_required;
  4165. #endif /* WL_NAN_DISC_CACHE */
  4166. NAN_DBG_ENTER();
  4167. NAN_MUTEX_LOCK();
  4168. /*
  4169. * proceed only if mandatory arguments are present - subscriber id,
  4170. * service hash
  4171. */
  4172. if ((!cmd_data->sub_id) || (!cmd_data->svc_hash.data) ||
  4173. (!cmd_data->svc_hash.dlen)) {
  4174. WL_ERR(("mandatory arguments are not present\n"));
  4175. ret = BCME_BADARG;
  4176. goto fail;
  4177. }
  4178. /* Check for ranging sessions if any */
  4179. if (cmd_data->svc_update) {
  4180. #ifdef WL_NAN_DISC_CACHE
  4181. svc_info = wl_cfgnan_get_svc_inst(cfg, cmd_data->sub_id, 0);
  4182. if (svc_info) {
  4183. wl_cfgnan_clear_svc_ranging_inst(cfg, cmd_data->sub_id);
  4184. /* terminate ranging sessions for this svc, avoid clearing svc cache */
  4185. wl_cfgnan_terminate_ranging_sessions(ndev, cfg, cmd_data->sub_id);
  4186. WL_DBG(("Ranging sessions terminated for svc update\n"));
  4187. upd_ranging_required = !!(cmd_data->sde_control_flag &
  4188. NAN_SDE_CF_RANGING_REQUIRED);
  4189. if ((svc_info->ranging_required ^ upd_ranging_required) ||
  4190. (svc_info->ingress_limit != cmd_data->ingress_limit) ||
  4191. (svc_info->egress_limit != cmd_data->egress_limit)) {
  4192. /* Clear cache info in Firmware */
  4193. ret = wl_cfgnan_clear_disc_cache(cfg, cmd_data->sub_id);
  4194. if (ret != BCME_OK) {
  4195. WL_ERR(("couldn't send clear cache to FW \n"));
  4196. goto fail;
  4197. }
  4198. /* Invalidate local cache info */
  4199. wl_cfgnan_remove_disc_result(cfg, cmd_data->sub_id);
  4200. }
  4201. }
  4202. #endif /* WL_NAN_DISC_CACHE */
  4203. }
  4204. ret = wl_cfgnan_svc_handler(ndev, cfg, WL_NAN_CMD_SD_SUBSCRIBE, cmd_data);
  4205. if (ret < 0) {
  4206. WL_ERR(("%s: fail to handle svc, ret=%d\n", __FUNCTION__, ret));
  4207. goto fail;
  4208. }
  4209. WL_INFORM_MEM(("[NAN] Service subscribed for instance id:%d\n", cmd_data->sub_id));
  4210. fail:
  4211. NAN_MUTEX_UNLOCK();
  4212. NAN_DBG_EXIT();
  4213. return ret;
  4214. }
  4215. static int
  4216. wl_cfgnan_cancel_handler(nan_discover_cmd_data_t *cmd_data,
  4217. uint16 cmd_id, void *p_buf, uint16 *nan_buf_size)
  4218. {
  4219. s32 ret = BCME_OK;
  4220. NAN_DBG_ENTER();
  4221. if (p_buf != NULL) {
  4222. bcm_iov_batch_subcmd_t *sub_cmd = (bcm_iov_batch_subcmd_t*)(p_buf);
  4223. wl_nan_instance_id_t instance_id;
  4224. if (cmd_id == WL_NAN_CMD_SD_CANCEL_PUBLISH) {
  4225. instance_id = cmd_data->pub_id;
  4226. } else if (cmd_id == WL_NAN_CMD_SD_CANCEL_SUBSCRIBE) {
  4227. instance_id = cmd_data->sub_id;
  4228. } else {
  4229. ret = BCME_USAGE_ERROR;
  4230. WL_ERR(("wrong command id = %u\n", cmd_id));
  4231. goto fail;
  4232. }
  4233. /* Fill the sub_command block */
  4234. sub_cmd->id = htod16(cmd_id);
  4235. sub_cmd->len = sizeof(sub_cmd->u.options) + sizeof(instance_id);
  4236. sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
  4237. ret = memcpy_s(sub_cmd->data, *nan_buf_size,
  4238. &instance_id, sizeof(instance_id));
  4239. if (ret != BCME_OK) {
  4240. WL_ERR(("Failed to copy instance id, ret = %d\n", ret));
  4241. goto fail;
  4242. }
  4243. /* adjust iov data len to the end of last data record */
  4244. *nan_buf_size -= (sub_cmd->len +
  4245. OFFSETOF(bcm_iov_batch_subcmd_t, u.options));
  4246. WL_INFORM_MEM(("[NAN] Service with instance id:%d cancelled\n", instance_id));
  4247. } else {
  4248. WL_ERR(("nan_iov_buf is NULL\n"));
  4249. ret = BCME_ERROR;
  4250. goto fail;
  4251. }
  4252. fail:
  4253. NAN_DBG_EXIT();
  4254. return ret;
  4255. }
  4256. int
  4257. wl_cfgnan_cancel_pub_handler(struct net_device *ndev,
  4258. struct bcm_cfg80211 *cfg, nan_discover_cmd_data_t *cmd_data)
  4259. {
  4260. bcm_iov_batch_buf_t *nan_buf = NULL;
  4261. s32 ret = BCME_OK;
  4262. uint16 nan_buf_size = NAN_IOCTL_BUF_SIZE;
  4263. uint8 resp_buf[NAN_IOCTL_BUF_SIZE];
  4264. NAN_DBG_ENTER();
  4265. NAN_MUTEX_LOCK();
  4266. nan_buf = MALLOCZ(cfg->osh, nan_buf_size);
  4267. if (!nan_buf) {
  4268. WL_ERR(("%s: memory allocation failed\n", __func__));
  4269. ret = BCME_NOMEM;
  4270. goto fail;
  4271. }
  4272. nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
  4273. nan_buf->count = 0;
  4274. nan_buf_size -= OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
  4275. /* proceed only if mandatory argument is present - publisher id */
  4276. if (!cmd_data->pub_id) {
  4277. WL_ERR(("mandatory argument is not present\n"));
  4278. ret = BCME_BADARG;
  4279. goto fail;
  4280. }
  4281. #ifdef WL_NAN_DISC_CACHE
  4282. wl_cfgnan_clear_svc_cache(cfg, cmd_data->pub_id);
  4283. #endif /* WL_NAN_DISC_CACHE */
  4284. ret = wl_cfgnan_cancel_handler(cmd_data, WL_NAN_CMD_SD_CANCEL_PUBLISH,
  4285. &nan_buf->cmds[0], &nan_buf_size);
  4286. if (unlikely(ret)) {
  4287. WL_ERR(("cancel publish failed\n"));
  4288. goto fail;
  4289. }
  4290. nan_buf->is_set = true;
  4291. nan_buf->count++;
  4292. memset_s(resp_buf, sizeof(resp_buf), 0, sizeof(resp_buf));
  4293. ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, nan_buf_size,
  4294. &(cmd_data->status),
  4295. (void*)resp_buf, NAN_IOCTL_BUF_SIZE);
  4296. if (unlikely(ret) || unlikely(cmd_data->status)) {
  4297. WL_ERR(("nan cancel publish failed ret = %d status = %d\n",
  4298. ret, cmd_data->status));
  4299. goto fail;
  4300. }
  4301. WL_DBG(("nan cancel publish successfull\n"));
  4302. wl_cfgnan_remove_inst_id(cfg, cmd_data->pub_id);
  4303. fail:
  4304. if (nan_buf) {
  4305. MFREE(cfg->osh, nan_buf, NAN_IOCTL_BUF_SIZE);
  4306. }
  4307. NAN_MUTEX_UNLOCK();
  4308. NAN_DBG_EXIT();
  4309. return ret;
  4310. }
  4311. int
  4312. wl_cfgnan_cancel_sub_handler(struct net_device *ndev,
  4313. struct bcm_cfg80211 *cfg, nan_discover_cmd_data_t *cmd_data)
  4314. {
  4315. bcm_iov_batch_buf_t *nan_buf = NULL;
  4316. s32 ret = BCME_OK;
  4317. uint16 nan_buf_size = NAN_IOCTL_BUF_SIZE;
  4318. uint8 resp_buf[NAN_IOCTL_BUF_SIZE];
  4319. NAN_DBG_ENTER();
  4320. NAN_MUTEX_LOCK();
  4321. nan_buf = MALLOCZ(cfg->osh, nan_buf_size);
  4322. if (!nan_buf) {
  4323. WL_ERR(("%s: memory allocation failed\n", __func__));
  4324. ret = BCME_NOMEM;
  4325. goto fail;
  4326. }
  4327. nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
  4328. nan_buf->count = 0;
  4329. nan_buf_size -= OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
  4330. /* proceed only if mandatory argument is present - subscriber id */
  4331. if (!cmd_data->sub_id) {
  4332. WL_ERR(("mandatory argument is not present\n"));
  4333. ret = BCME_BADARG;
  4334. goto fail;
  4335. }
  4336. #ifdef WL_NAN_DISC_CACHE
  4337. /* terminate ranging sessions for this svc */
  4338. wl_cfgnan_clear_svc_ranging_inst(cfg, cmd_data->sub_id);
  4339. wl_cfgnan_terminate_ranging_sessions(ndev, cfg, cmd_data->sub_id);
  4340. /* clear svc cache for the service */
  4341. wl_cfgnan_clear_svc_cache(cfg, cmd_data->sub_id);
  4342. wl_cfgnan_remove_disc_result(cfg, cmd_data->sub_id);
  4343. #endif /* WL_NAN_DISC_CACHE */
  4344. ret = wl_cfgnan_cancel_handler(cmd_data, WL_NAN_CMD_SD_CANCEL_SUBSCRIBE,
  4345. &nan_buf->cmds[0], &nan_buf_size);
  4346. if (unlikely(ret)) {
  4347. WL_ERR(("cancel subscribe failed\n"));
  4348. goto fail;
  4349. }
  4350. nan_buf->is_set = true;
  4351. nan_buf->count++;
  4352. memset_s(resp_buf, sizeof(resp_buf), 0, sizeof(resp_buf));
  4353. ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, nan_buf_size,
  4354. &(cmd_data->status),
  4355. (void*)resp_buf, NAN_IOCTL_BUF_SIZE);
  4356. if (unlikely(ret) || unlikely(cmd_data->status)) {
  4357. WL_ERR(("nan cancel subscribe failed ret = %d status = %d\n",
  4358. ret, cmd_data->status));
  4359. goto fail;
  4360. }
  4361. WL_DBG(("subscribe cancel successfull\n"));
  4362. wl_cfgnan_remove_inst_id(cfg, cmd_data->sub_id);
  4363. fail:
  4364. if (nan_buf) {
  4365. MFREE(cfg->osh, nan_buf, NAN_IOCTL_BUF_SIZE);
  4366. }
  4367. NAN_MUTEX_UNLOCK();
  4368. NAN_DBG_EXIT();
  4369. return ret;
  4370. }
  4371. int
  4372. wl_cfgnan_transmit_handler(struct net_device *ndev,
  4373. struct bcm_cfg80211 *cfg, nan_discover_cmd_data_t *cmd_data)
  4374. {
  4375. s32 ret = BCME_OK;
  4376. bcm_iov_batch_buf_t *nan_buf = NULL;
  4377. wl_nan_sd_transmit_t *sd_xmit = NULL;
  4378. bcm_iov_batch_subcmd_t *sub_cmd = NULL;
  4379. bool is_lcl_id = FALSE;
  4380. bool is_dest_id = FALSE;
  4381. bool is_dest_mac = FALSE;
  4382. uint16 buflen_avail;
  4383. uint8 *pxtlv;
  4384. uint16 nan_buf_size;
  4385. uint8 *resp_buf = NULL;
  4386. /* Considering fixed params */
  4387. uint16 data_size = WL_NAN_OBUF_DATA_OFFSET +
  4388. OFFSETOF(wl_nan_sd_transmit_t, opt_tlv);
  4389. data_size = ALIGN_SIZE(data_size, 4);
  4390. ret = wl_cfgnan_aligned_data_size_of_opt_disc_params(&data_size, cmd_data);
  4391. if (unlikely(ret)) {
  4392. WL_ERR(("Failed to get alligned size of optional params\n"));
  4393. goto fail;
  4394. }
  4395. NAN_DBG_ENTER();
  4396. NAN_MUTEX_LOCK();
  4397. nan_buf_size = data_size;
  4398. nan_buf = MALLOCZ(cfg->osh, data_size);
  4399. if (!nan_buf) {
  4400. WL_ERR(("%s: memory allocation failed\n", __func__));
  4401. ret = BCME_NOMEM;
  4402. goto fail;
  4403. }
  4404. resp_buf = MALLOCZ(cfg->osh, data_size + NAN_IOVAR_NAME_SIZE);
  4405. if (!resp_buf) {
  4406. WL_ERR(("%s: memory allocation failed\n", __func__));
  4407. ret = BCME_NOMEM;
  4408. goto fail;
  4409. }
  4410. /* nan transmit */
  4411. nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
  4412. nan_buf->count = 0;
  4413. nan_buf_size -= OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
  4414. /*
  4415. * proceed only if mandatory arguments are present - subscriber id,
  4416. * publisher id, mac address
  4417. */
  4418. if ((!cmd_data->local_id) || (!cmd_data->remote_id) ||
  4419. ETHER_ISNULLADDR(&cmd_data->mac_addr.octet)) {
  4420. WL_ERR(("mandatory arguments are not present\n"));
  4421. ret = -EINVAL;
  4422. goto fail;
  4423. }
  4424. sub_cmd = (bcm_iov_batch_subcmd_t*)(&nan_buf->cmds[0]);
  4425. sd_xmit = (wl_nan_sd_transmit_t *)(sub_cmd->data);
  4426. /* local instance id must be from 1 to 255, 0 is reserved */
  4427. if (cmd_data->local_id == NAN_ID_RESERVED) {
  4428. WL_ERR(("Invalid local instance id: %d\n", cmd_data->local_id));
  4429. ret = BCME_BADARG;
  4430. goto fail;
  4431. }
  4432. sd_xmit->local_service_id = cmd_data->local_id;
  4433. is_lcl_id = TRUE;
  4434. /* remote instance id must be from 1 to 255, 0 is reserved */
  4435. if (cmd_data->remote_id == NAN_ID_RESERVED) {
  4436. WL_ERR(("Invalid remote instance id: %d\n", cmd_data->remote_id));
  4437. ret = BCME_BADARG;
  4438. goto fail;
  4439. }
  4440. sd_xmit->requestor_service_id = cmd_data->remote_id;
  4441. is_dest_id = TRUE;
  4442. if (!ETHER_ISNULLADDR(&cmd_data->mac_addr.octet)) {
  4443. ret = memcpy_s(&sd_xmit->destination_addr, ETHER_ADDR_LEN,
  4444. &cmd_data->mac_addr, ETHER_ADDR_LEN);
  4445. if (ret != BCME_OK) {
  4446. WL_ERR(("Failed to copy dest mac address\n"));
  4447. goto fail;
  4448. }
  4449. } else {
  4450. WL_ERR(("Invalid ether addr provided\n"));
  4451. ret = BCME_BADARG;
  4452. goto fail;
  4453. }
  4454. is_dest_mac = TRUE;
  4455. if (cmd_data->priority) {
  4456. sd_xmit->priority = cmd_data->priority;
  4457. }
  4458. sd_xmit->token = cmd_data->token;
  4459. if (cmd_data->recv_ind_flag) {
  4460. /* BIT0 - If set, host wont rec event "txs" */
  4461. if (CHECK_BIT(cmd_data->recv_ind_flag,
  4462. WL_NAN_EVENT_SUPPRESS_FOLLOWUP_RECEIVE_BIT)) {
  4463. sd_xmit->flags = WL_NAN_FUP_SUPR_EVT_TXS;
  4464. }
  4465. }
  4466. /* Optional parameters: fill the sub_command block with service descriptor attr */
  4467. sub_cmd->id = htod16(WL_NAN_CMD_SD_TRANSMIT);
  4468. sub_cmd->len = sizeof(sub_cmd->u.options) +
  4469. OFFSETOF(wl_nan_sd_transmit_t, opt_tlv);
  4470. sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
  4471. pxtlv = (uint8 *)&sd_xmit->opt_tlv;
  4472. nan_buf_size -= (sub_cmd->len +
  4473. OFFSETOF(bcm_iov_batch_subcmd_t, u.options));
  4474. buflen_avail = nan_buf_size;
  4475. if (cmd_data->svc_info.data && cmd_data->svc_info.dlen) {
  4476. bcm_xtlv_t *pxtlv_svc_info = (bcm_xtlv_t *)pxtlv;
  4477. ret = bcm_pack_xtlv_entry(&pxtlv, &nan_buf_size,
  4478. WL_NAN_XTLV_SD_SVC_INFO, cmd_data->svc_info.dlen,
  4479. cmd_data->svc_info.data, BCM_XTLV_OPTION_ALIGN32);
  4480. if (unlikely(ret)) {
  4481. WL_ERR(("%s: fail to pack on bcm_pack_xtlv_entry, ret=%d\n",
  4482. __FUNCTION__, ret));
  4483. goto fail;
  4484. }
  4485. /* 0xFF is max length for svc_info */
  4486. if (pxtlv_svc_info->len > 0xFF) {
  4487. WL_ERR(("Invalid service info length %d\n",
  4488. (pxtlv_svc_info->len)));
  4489. ret = BCME_USAGE_ERROR;
  4490. goto fail;
  4491. }
  4492. sd_xmit->opt_len = (uint8)(pxtlv_svc_info->len);
  4493. }
  4494. if (cmd_data->sde_svc_info.data && cmd_data->sde_svc_info.dlen) {
  4495. WL_TRACE(("optional sdea svc_info present, pack it\n"));
  4496. ret = bcm_pack_xtlv_entry(&pxtlv, &nan_buf_size,
  4497. WL_NAN_XTLV_SD_SDE_SVC_INFO, cmd_data->sde_svc_info.dlen,
  4498. cmd_data->sde_svc_info.data, BCM_XTLV_OPTION_ALIGN32);
  4499. if (unlikely(ret)) {
  4500. WL_ERR(("%s: fail to pack sdea svc info\n", __FUNCTION__));
  4501. goto fail;
  4502. }
  4503. }
  4504. /* Check if all mandatory params are provided */
  4505. if (is_lcl_id && is_dest_id && is_dest_mac) {
  4506. nan_buf->count++;
  4507. sub_cmd->len += (buflen_avail - nan_buf_size);
  4508. } else {
  4509. WL_ERR(("Missing parameters\n"));
  4510. ret = BCME_USAGE_ERROR;
  4511. }
  4512. nan_buf->is_set = TRUE;
  4513. ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, data_size,
  4514. &(cmd_data->status), resp_buf, data_size + NAN_IOVAR_NAME_SIZE);
  4515. if (unlikely(ret) || unlikely(cmd_data->status)) {
  4516. WL_ERR(("nan transmit failed for token %d ret = %d status = %d\n",
  4517. sd_xmit->token, ret, cmd_data->status));
  4518. goto fail;
  4519. }
  4520. WL_INFORM_MEM(("nan transmit successful for token %d\n", sd_xmit->token));
  4521. fail:
  4522. if (nan_buf) {
  4523. MFREE(cfg->osh, nan_buf, data_size);
  4524. }
  4525. if (resp_buf) {
  4526. MFREE(cfg->osh, resp_buf, data_size + NAN_IOVAR_NAME_SIZE);
  4527. }
  4528. NAN_MUTEX_UNLOCK();
  4529. NAN_DBG_EXIT();
  4530. return ret;
  4531. }
  4532. static int
  4533. wl_cfgnan_get_capability(struct net_device *ndev,
  4534. struct bcm_cfg80211 *cfg, nan_hal_capabilities_t *capabilities)
  4535. {
  4536. bcm_iov_batch_buf_t *nan_buf = NULL;
  4537. s32 ret = BCME_OK;
  4538. uint16 nan_buf_size = NAN_IOCTL_BUF_SIZE;
  4539. wl_nan_fw_cap_t *fw_cap = NULL;
  4540. uint16 subcmd_len;
  4541. uint32 status;
  4542. bcm_iov_batch_subcmd_t *sub_cmd = NULL;
  4543. bcm_iov_batch_subcmd_t *sub_cmd_resp = NULL;
  4544. uint8 resp_buf[NAN_IOCTL_BUF_SIZE];
  4545. const bcm_xtlv_t *xtlv;
  4546. uint16 type = 0;
  4547. int len = 0;
  4548. NAN_DBG_ENTER();
  4549. nan_buf = MALLOCZ(cfg->osh, nan_buf_size);
  4550. if (!nan_buf) {
  4551. WL_ERR(("%s: memory allocation failed\n", __func__));
  4552. ret = BCME_NOMEM;
  4553. goto fail;
  4554. }
  4555. nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
  4556. nan_buf->count = 0;
  4557. nan_buf_size -= OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
  4558. sub_cmd = (bcm_iov_batch_subcmd_t*)(uint8 *)(&nan_buf->cmds[0]);
  4559. ret = wl_cfg_nan_check_cmd_len(nan_buf_size,
  4560. sizeof(*fw_cap), &subcmd_len);
  4561. if (unlikely(ret)) {
  4562. WL_ERR(("nan_sub_cmd check failed\n"));
  4563. goto fail;
  4564. }
  4565. fw_cap = (wl_nan_fw_cap_t *)sub_cmd->data;
  4566. sub_cmd->id = htod16(WL_NAN_CMD_GEN_FW_CAP);
  4567. sub_cmd->len = sizeof(sub_cmd->u.options) + sizeof(*fw_cap);
  4568. sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
  4569. nan_buf_size -= subcmd_len;
  4570. nan_buf->count = 1;
  4571. nan_buf->is_set = false;
  4572. memset(resp_buf, 0, sizeof(resp_buf));
  4573. ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, nan_buf_size, &status,
  4574. (void*)resp_buf, NAN_IOCTL_BUF_SIZE);
  4575. if (unlikely(ret) || unlikely(status)) {
  4576. WL_ERR(("get nan fw cap failed ret %d status %d \n",
  4577. ret, status));
  4578. goto fail;
  4579. }
  4580. sub_cmd_resp = &((bcm_iov_batch_buf_t *)(resp_buf))->cmds[0];
  4581. /* check the response buff */
  4582. xtlv = ((const bcm_xtlv_t *)&sub_cmd_resp->data[0]);
  4583. if (!xtlv) {
  4584. ret = BCME_NOTFOUND;
  4585. WL_ERR(("xtlv not found: err = %d\n", ret));
  4586. goto fail;
  4587. }
  4588. bcm_xtlv_unpack_xtlv(xtlv, &type, (uint16*)&len, NULL, BCM_XTLV_OPTION_ALIGN32);
  4589. do
  4590. {
  4591. switch (type) {
  4592. case WL_NAN_XTLV_GEN_FW_CAP:
  4593. if (len > sizeof(wl_nan_fw_cap_t)) {
  4594. ret = BCME_BADARG;
  4595. goto fail;
  4596. }
  4597. GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
  4598. fw_cap = (wl_nan_fw_cap_t*)xtlv->data;
  4599. GCC_DIAGNOSTIC_POP();
  4600. break;
  4601. default:
  4602. WL_ERR(("Unknown xtlv: id %u\n", type));
  4603. ret = BCME_ERROR;
  4604. break;
  4605. }
  4606. if (ret != BCME_OK) {
  4607. goto fail;
  4608. }
  4609. } while ((xtlv = bcm_next_xtlv(xtlv, &len, BCM_XTLV_OPTION_ALIGN32)));
  4610. memset(capabilities, 0, sizeof(nan_hal_capabilities_t));
  4611. capabilities->max_publishes = fw_cap->max_svc_publishes;
  4612. capabilities->max_subscribes = fw_cap->max_svc_subscribes;
  4613. capabilities->max_ndi_interfaces = fw_cap->max_lcl_ndi_interfaces;
  4614. capabilities->max_ndp_sessions = fw_cap->max_ndp_sessions;
  4615. capabilities->max_concurrent_nan_clusters = fw_cap->max_concurrent_nan_clusters;
  4616. capabilities->max_service_name_len = fw_cap->max_service_name_len;
  4617. capabilities->max_match_filter_len = fw_cap->max_match_filter_len;
  4618. capabilities->max_total_match_filter_len = fw_cap->max_total_match_filter_len;
  4619. capabilities->max_service_specific_info_len = fw_cap->max_service_specific_info_len;
  4620. capabilities->max_app_info_len = fw_cap->max_app_info_len;
  4621. capabilities->max_sdea_service_specific_info_len = fw_cap->max_sdea_svc_specific_info_len;
  4622. capabilities->max_queued_transmit_followup_msgs = fw_cap->max_queued_tx_followup_msgs;
  4623. capabilities->max_subscribe_address = fw_cap->max_subscribe_address;
  4624. capabilities->is_ndp_security_supported = fw_cap->is_ndp_security_supported;
  4625. capabilities->ndp_supported_bands = fw_cap->ndp_supported_bands;
  4626. capabilities->cipher_suites_supported = fw_cap->cipher_suites_supported_mask;
  4627. fail:
  4628. if (nan_buf) {
  4629. MFREE(cfg->osh, nan_buf, NAN_IOCTL_BUF_SIZE);
  4630. }
  4631. NAN_DBG_EXIT();
  4632. return ret;
  4633. }
  4634. int
  4635. wl_cfgnan_get_capablities_handler(struct net_device *ndev,
  4636. struct bcm_cfg80211 *cfg, nan_hal_capabilities_t *capabilities)
  4637. {
  4638. s32 ret = BCME_OK;
  4639. dhd_pub_t *dhdp = wl_cfg80211_get_dhdp(ndev);
  4640. NAN_DBG_ENTER();
  4641. /* Do not query fw about nan if feature is not supported */
  4642. if (!FW_SUPPORTED(dhdp, nan)) {
  4643. WL_DBG(("NAN is not supported\n"));
  4644. return ret;
  4645. }
  4646. if (cfg->nan_init_state) {
  4647. ret = wl_cfgnan_get_capability(ndev, cfg, capabilities);
  4648. if (ret != BCME_OK) {
  4649. WL_ERR(("NAN init state: %d, failed to get capability from FW[%d]\n",
  4650. cfg->nan_init_state, ret));
  4651. goto exit;
  4652. }
  4653. } else {
  4654. /* Initialize NAN before sending iovar */
  4655. WL_ERR(("Initializing NAN\n"));
  4656. ret = wl_cfgnan_init(cfg);
  4657. if (ret != BCME_OK) {
  4658. WL_ERR(("failed to initialize NAN[%d]\n", ret));
  4659. goto fail;
  4660. }
  4661. ret = wl_cfgnan_get_capability(ndev, cfg, capabilities);
  4662. if (ret != BCME_OK) {
  4663. WL_ERR(("NAN init state: %d, failed to get capability from FW[%d]\n",
  4664. cfg->nan_init_state, ret));
  4665. goto exit;
  4666. }
  4667. WL_ERR(("De-Initializing NAN\n"));
  4668. ret = wl_cfgnan_deinit(cfg, dhdp->up);
  4669. if (ret != BCME_OK) {
  4670. WL_ERR(("failed to de-initialize NAN[%d]\n", ret));
  4671. goto fail;
  4672. }
  4673. }
  4674. fail:
  4675. NAN_DBG_EXIT();
  4676. return ret;
  4677. exit:
  4678. /* Keeping backward campatibility */
  4679. capabilities->max_concurrent_nan_clusters = MAX_CONCURRENT_NAN_CLUSTERS;
  4680. capabilities->max_publishes = MAX_PUBLISHES;
  4681. capabilities->max_subscribes = MAX_SUBSCRIBES;
  4682. capabilities->max_service_name_len = MAX_SVC_NAME_LEN;
  4683. capabilities->max_match_filter_len = MAX_MATCH_FILTER_LEN;
  4684. capabilities->max_total_match_filter_len = MAX_TOTAL_MATCH_FILTER_LEN;
  4685. capabilities->max_service_specific_info_len = NAN_MAX_SERVICE_SPECIFIC_INFO_LEN;
  4686. capabilities->max_ndi_interfaces = MAX_NDI_INTERFACES;
  4687. capabilities->max_ndp_sessions = MAX_NDP_SESSIONS;
  4688. capabilities->max_app_info_len = MAX_APP_INFO_LEN;
  4689. capabilities->max_queued_transmit_followup_msgs = MAX_QUEUED_TX_FOLLOUP_MSGS;
  4690. capabilities->max_sdea_service_specific_info_len = MAX_SDEA_SVC_INFO_LEN;
  4691. capabilities->max_subscribe_address = MAX_SUBSCRIBE_ADDRESS;
  4692. capabilities->cipher_suites_supported = WL_NAN_CIPHER_SUITE_SHARED_KEY_128_MASK;
  4693. capabilities->max_scid_len = MAX_SCID_LEN;
  4694. capabilities->is_ndp_security_supported = true;
  4695. capabilities->ndp_supported_bands = NDP_SUPPORTED_BANDS;
  4696. ret = BCME_OK;
  4697. NAN_DBG_EXIT();
  4698. return ret;
  4699. }
  4700. bool wl_cfgnan_check_state(struct bcm_cfg80211 *cfg)
  4701. {
  4702. return cfg->nan_enable;
  4703. }
  4704. int
  4705. wl_cfgnan_init(struct bcm_cfg80211 *cfg)
  4706. {
  4707. s32 ret = BCME_OK;
  4708. uint16 nan_buf_size = NAN_IOCTL_BUF_SIZE;
  4709. uint32 status;
  4710. uint8 resp_buf[NAN_IOCTL_BUF_SIZE];
  4711. uint8 buf[NAN_IOCTL_BUF_SIZE];
  4712. bcm_iov_batch_buf_t *nan_buf = (bcm_iov_batch_buf_t*)buf;
  4713. NAN_DBG_ENTER();
  4714. if (cfg->nan_init_state) {
  4715. WL_ERR(("nan initialized/nmi exists\n"));
  4716. return BCME_OK;
  4717. }
  4718. nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
  4719. nan_buf->count = 0;
  4720. nan_buf_size -= OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
  4721. ret = wl_cfgnan_init_handler(&nan_buf->cmds[0], &nan_buf_size, true);
  4722. if (unlikely(ret)) {
  4723. WL_ERR(("init handler sub_cmd set failed\n"));
  4724. goto fail;
  4725. }
  4726. nan_buf->count++;
  4727. nan_buf->is_set = true;
  4728. memset_s(resp_buf, sizeof(resp_buf), 0, sizeof(resp_buf));
  4729. ret = wl_cfgnan_execute_ioctl(bcmcfg_to_prmry_ndev(cfg), cfg,
  4730. nan_buf, nan_buf_size, &status,
  4731. (void*)resp_buf, NAN_IOCTL_BUF_SIZE);
  4732. if (unlikely(ret) || unlikely(status)) {
  4733. WL_ERR(("nan init handler failed ret %d status %d\n",
  4734. ret, status));
  4735. goto fail;
  4736. }
  4737. #ifdef WL_NAN_DISC_CACHE
  4738. /* malloc for disc result */
  4739. cfg->nan_disc_cache = MALLOCZ(cfg->osh,
  4740. NAN_MAX_CACHE_DISC_RESULT * sizeof(nan_disc_result_cache));
  4741. if (!cfg->nan_disc_cache) {
  4742. WL_ERR(("%s: memory allocation failed\n", __func__));
  4743. ret = BCME_NOMEM;
  4744. goto fail;
  4745. }
  4746. #endif /* WL_NAN_DISC_CACHE */
  4747. cfg->nan_init_state = true;
  4748. return ret;
  4749. fail:
  4750. NAN_DBG_EXIT();
  4751. return ret;
  4752. }
  4753. int
  4754. wl_cfgnan_deinit(struct bcm_cfg80211 *cfg, uint8 busstate)
  4755. {
  4756. s32 ret = BCME_OK;
  4757. uint16 nan_buf_size = NAN_IOCTL_BUF_SIZE;
  4758. uint32 status;
  4759. uint8 resp_buf[NAN_IOCTL_BUF_SIZE];
  4760. uint8 buf[NAN_IOCTL_BUF_SIZE];
  4761. bcm_iov_batch_buf_t *nan_buf = (bcm_iov_batch_buf_t*)buf;
  4762. uint8 i = 0;
  4763. NAN_DBG_ENTER();
  4764. NAN_MUTEX_LOCK();
  4765. if (!cfg->nan_init_state) {
  4766. WL_ERR(("nan is not initialized/nmi doesnt exists\n"));
  4767. ret = BCME_OK;
  4768. goto fail;
  4769. }
  4770. if (busstate != DHD_BUS_DOWN) {
  4771. nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
  4772. nan_buf->count = 0;
  4773. nan_buf_size -= OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
  4774. WL_DBG(("nan deinit\n"));
  4775. ret = wl_cfgnan_init_handler(&nan_buf->cmds[0], &nan_buf_size, false);
  4776. if (unlikely(ret)) {
  4777. WL_ERR(("deinit handler sub_cmd set failed\n"));
  4778. } else {
  4779. nan_buf->count++;
  4780. nan_buf->is_set = true;
  4781. memset_s(resp_buf, sizeof(resp_buf), 0, sizeof(resp_buf));
  4782. ret = wl_cfgnan_execute_ioctl(cfg->wdev->netdev, cfg,
  4783. nan_buf, nan_buf_size, &status,
  4784. (void*)resp_buf, NAN_IOCTL_BUF_SIZE);
  4785. if (unlikely(ret) || unlikely(status)) {
  4786. WL_ERR(("nan init handler failed ret %d status %d\n",
  4787. ret, status));
  4788. }
  4789. }
  4790. }
  4791. cfg->nan_dp_mask = 0;
  4792. cfg->nan_init_state = false;
  4793. #ifdef WL_NAN_DISC_CACHE
  4794. if (cfg->nan_disc_cache) {
  4795. for (i = 0; i < NAN_MAX_CACHE_DISC_RESULT; i++) {
  4796. if (cfg->nan_disc_cache[i].tx_match_filter.data) {
  4797. MFREE(cfg->osh, cfg->nan_disc_cache[i].tx_match_filter.data,
  4798. cfg->nan_disc_cache[i].tx_match_filter.dlen);
  4799. }
  4800. if (cfg->nan_disc_cache[i].svc_info.data) {
  4801. MFREE(cfg->osh, cfg->nan_disc_cache[i].svc_info.data,
  4802. cfg->nan_disc_cache[i].svc_info.dlen);
  4803. }
  4804. }
  4805. MFREE(cfg->osh, cfg->nan_disc_cache,
  4806. NAN_MAX_CACHE_DISC_RESULT * sizeof(nan_disc_result_cache));
  4807. cfg->nan_disc_cache = NULL;
  4808. }
  4809. cfg->nan_disc_count = 0;
  4810. memset_s(cfg->svc_info, NAN_MAX_SVC_INST * sizeof(nan_svc_info_t),
  4811. 0, NAN_MAX_SVC_INST * sizeof(nan_svc_info_t));
  4812. memset_s(cfg->nan_ranging_info, NAN_MAX_RANGING_INST * sizeof(nan_ranging_inst_t),
  4813. 0, NAN_MAX_RANGING_INST * sizeof(nan_ranging_inst_t));
  4814. #endif /* WL_NAN_DISC_CACHE */
  4815. fail:
  4816. if (!cfg->nancfg.mac_rand && !ETHER_ISNULLADDR(cfg->nan_nmi_mac)) {
  4817. wl_release_vif_macaddr(cfg, cfg->nan_nmi_mac, WL_IF_TYPE_NAN_NMI);
  4818. }
  4819. NAN_MUTEX_UNLOCK();
  4820. NAN_DBG_EXIT();
  4821. return ret;
  4822. }
  4823. static int
  4824. wl_cfgnan_get_ndi_macaddr(struct bcm_cfg80211 *cfg, u8* mac_addr)
  4825. {
  4826. int i = 0;
  4827. int ret = BCME_OK;
  4828. bool rand_mac = cfg->nancfg.mac_rand;
  4829. BCM_REFERENCE(i);
  4830. if (rand_mac) {
  4831. /* ensure nmi != ndi */
  4832. do {
  4833. RANDOM_BYTES(mac_addr, ETHER_ADDR_LEN);
  4834. /* restore mcast and local admin bits to 0 and 1 */
  4835. ETHER_SET_UNICAST(mac_addr);
  4836. ETHER_SET_LOCALADDR(mac_addr);
  4837. i++;
  4838. if (i == NAN_RAND_MAC_RETRIES) {
  4839. break;
  4840. }
  4841. } while (eacmp(cfg->nan_nmi_mac, mac_addr) == 0);
  4842. if (i == NAN_RAND_MAC_RETRIES) {
  4843. if (eacmp(cfg->nan_nmi_mac, mac_addr) == 0) {
  4844. WL_ERR(("\nCouldn't generate rand NDI which != NMI\n"));
  4845. ret = BCME_NORESOURCE;
  4846. goto fail;
  4847. }
  4848. }
  4849. } else {
  4850. if (wl_get_vif_macaddr(cfg, WL_IF_TYPE_NAN,
  4851. mac_addr) != BCME_OK) {
  4852. ret = -EINVAL;
  4853. WL_ERR(("Failed to get mac addr for NDI\n"));
  4854. goto fail;
  4855. }
  4856. }
  4857. fail:
  4858. return ret;
  4859. }
  4860. int
  4861. wl_cfgnan_data_path_iface_create_delete_handler(struct net_device *ndev,
  4862. struct bcm_cfg80211 *cfg, char *ifname, uint16 type, uint8 busstate)
  4863. {
  4864. u8 mac_addr[ETH_ALEN];
  4865. s32 ret = BCME_OK;
  4866. s32 idx;
  4867. struct wireless_dev *wdev;
  4868. NAN_DBG_ENTER();
  4869. if (busstate != DHD_BUS_DOWN) {
  4870. if (type == NAN_WIFI_SUBCMD_DATA_PATH_IFACE_CREATE) {
  4871. if ((idx = wl_cfgnan_get_ndi_idx(cfg)) < 0) {
  4872. WL_ERR(("No free idx for NAN NDI\n"));
  4873. ret = BCME_NORESOURCE;
  4874. goto fail;
  4875. }
  4876. ret = wl_cfgnan_get_ndi_macaddr(cfg, mac_addr);
  4877. if (ret != BCME_OK) {
  4878. WL_ERR(("Couldn't get mac addr for NDI ret %d\n", ret));
  4879. goto fail;
  4880. }
  4881. wdev = wl_cfg80211_add_if(cfg, ndev, WL_IF_TYPE_NAN,
  4882. ifname, mac_addr);
  4883. if (!wdev) {
  4884. ret = -ENODEV;
  4885. WL_ERR(("Failed to create NDI iface = %s, wdev is NULL\n", ifname));
  4886. goto fail;
  4887. }
  4888. /* Store the iface name to pub data so that it can be used
  4889. * during NAN enable
  4890. */
  4891. wl_cfgnan_add_ndi_data(cfg, idx, ifname);
  4892. cfg->nancfg.ndi[idx].created = true;
  4893. /* Store nan ndev */
  4894. cfg->nancfg.ndi[idx].nan_ndev = wdev_to_ndev(wdev);
  4895. } else if (type == NAN_WIFI_SUBCMD_DATA_PATH_IFACE_DELETE) {
  4896. ret = wl_cfg80211_del_if(cfg, ndev, NULL, ifname);
  4897. if (ret == BCME_OK) {
  4898. if (wl_cfgnan_del_ndi_data(cfg, ifname) < 0) {
  4899. WL_ERR(("Failed to find matching data for ndi:%s\n",
  4900. ifname));
  4901. }
  4902. } else if (ret == -ENODEV) {
  4903. WL_INFORM(("Already deleted: %s\n", ifname));
  4904. ret = BCME_OK;
  4905. } else if (ret != BCME_OK) {
  4906. WL_ERR(("failed to delete NDI[%d]\n", ret));
  4907. }
  4908. }
  4909. } else {
  4910. ret = -ENODEV;
  4911. WL_ERR(("Bus is already down, no dev found to remove, ret = %d\n", ret));
  4912. }
  4913. fail:
  4914. NAN_DBG_EXIT();
  4915. return ret;
  4916. }
  4917. /*
  4918. * Return data peer from peer list
  4919. * for peer_addr
  4920. * NULL if not found
  4921. */
  4922. nan_ndp_peer_t *
  4923. wl_cfgnan_data_get_peer(struct bcm_cfg80211 *cfg,
  4924. struct ether_addr *peer_addr)
  4925. {
  4926. uint8 i;
  4927. nan_ndp_peer_t* peer = cfg->nancfg.nan_ndp_peer_info;
  4928. if (!peer) {
  4929. WL_ERR(("wl_cfgnan_data_get_peer: nan_ndp_peer_info is NULL\n"));
  4930. goto exit;
  4931. }
  4932. for (i = 0; i < cfg->nancfg.max_ndp_count; i++) {
  4933. if (peer[i].peer_dp_state != NAN_PEER_DP_NOT_CONNECTED &&
  4934. (!memcmp(peer_addr, &peer[i].peer_addr, ETHER_ADDR_LEN))) {
  4935. return &peer[i];
  4936. }
  4937. }
  4938. exit:
  4939. return NULL;
  4940. }
  4941. /*
  4942. * Returns True if
  4943. * datapath exists for nan cfg
  4944. * for any peer
  4945. */
  4946. bool
  4947. wl_cfgnan_data_dp_exists(struct bcm_cfg80211 *cfg)
  4948. {
  4949. bool ret = FALSE;
  4950. uint8 i;
  4951. nan_ndp_peer_t* peer = NULL;
  4952. if ((cfg->nan_init_state == FALSE) ||
  4953. (cfg->nan_enable == FALSE)) {
  4954. goto exit;
  4955. }
  4956. if (!cfg->nancfg.nan_ndp_peer_info) {
  4957. goto exit;
  4958. }
  4959. peer = cfg->nancfg.nan_ndp_peer_info;
  4960. for (i = 0; i < cfg->nancfg.max_ndp_count; i++) {
  4961. if (peer[i].peer_dp_state != NAN_PEER_DP_NOT_CONNECTED) {
  4962. ret = TRUE;
  4963. break;
  4964. }
  4965. }
  4966. exit:
  4967. return ret;
  4968. }
  4969. /*
  4970. * Returns True if
  4971. * datapath exists for nan cfg
  4972. * for given peer
  4973. */
  4974. bool
  4975. wl_cfgnan_data_dp_exists_with_peer(struct bcm_cfg80211 *cfg,
  4976. struct ether_addr *peer_addr)
  4977. {
  4978. bool ret = FALSE;
  4979. nan_ndp_peer_t* peer = NULL;
  4980. if ((cfg->nan_init_state == FALSE) ||
  4981. (cfg->nan_enable == FALSE)) {
  4982. goto exit;
  4983. }
  4984. /* check for peer exist */
  4985. peer = wl_cfgnan_data_get_peer(cfg, peer_addr);
  4986. if (peer) {
  4987. ret = TRUE;
  4988. }
  4989. exit:
  4990. return ret;
  4991. }
  4992. /*
  4993. * As of now API only available
  4994. * for setting state to CONNECTED
  4995. * if applicable
  4996. */
  4997. void
  4998. wl_cfgnan_data_set_peer_dp_state(struct bcm_cfg80211 *cfg,
  4999. struct ether_addr *peer_addr, nan_peer_dp_state_t state)
  5000. {
  5001. nan_ndp_peer_t* peer = NULL;
  5002. /* check for peer exist */
  5003. peer = wl_cfgnan_data_get_peer(cfg, peer_addr);
  5004. if (!peer) {
  5005. goto end;
  5006. }
  5007. peer->peer_dp_state = state;
  5008. end:
  5009. return;
  5010. }
  5011. /* Adds peer to nan data peer list */
  5012. void
  5013. wl_cfgnan_data_add_peer(struct bcm_cfg80211 *cfg,
  5014. struct ether_addr *peer_addr)
  5015. {
  5016. uint8 i;
  5017. nan_ndp_peer_t* peer = NULL;
  5018. /* check for peer exist */
  5019. peer = wl_cfgnan_data_get_peer(cfg, peer_addr);
  5020. if (peer) {
  5021. peer->dp_count++;
  5022. goto end;
  5023. }
  5024. peer = cfg->nancfg.nan_ndp_peer_info;
  5025. for (i = 0; i < cfg->nancfg.max_ndp_count; i++) {
  5026. if (peer[i].peer_dp_state == NAN_PEER_DP_NOT_CONNECTED) {
  5027. break;
  5028. }
  5029. }
  5030. if (i == NAN_MAX_NDP_PEER) {
  5031. WL_DBG(("DP Peer list full, Droopping add peer req\n"));
  5032. goto end;
  5033. }
  5034. /* Add peer to list */
  5035. memcpy(&peer[i].peer_addr, peer_addr, ETHER_ADDR_LEN);
  5036. peer[i].dp_count = 1;
  5037. peer[i].peer_dp_state = NAN_PEER_DP_CONNECTING;
  5038. end:
  5039. return;
  5040. }
  5041. /* Removes nan data peer from peer list */
  5042. void
  5043. wl_cfgnan_data_remove_peer(struct bcm_cfg80211 *cfg,
  5044. struct ether_addr *peer_addr)
  5045. {
  5046. nan_ndp_peer_t* peer = NULL;
  5047. /* check for peer exist */
  5048. peer = wl_cfgnan_data_get_peer(cfg, peer_addr);
  5049. if (!peer) {
  5050. WL_DBG(("DP Peer not present in list, "
  5051. "Droopping remove peer req\n"));
  5052. goto end;
  5053. }
  5054. peer->dp_count--;
  5055. if (peer->dp_count == 0) {
  5056. /* No more NDPs, delete entry */
  5057. memset(peer, 0, sizeof(nan_ndp_peer_t));
  5058. } else {
  5059. /* Set peer dp state to connected if any ndp still exits */
  5060. peer->peer_dp_state = NAN_PEER_DP_CONNECTED;
  5061. }
  5062. end:
  5063. return;
  5064. }
  5065. int
  5066. wl_cfgnan_data_path_request_handler(struct net_device *ndev,
  5067. struct bcm_cfg80211 *cfg, nan_datapath_cmd_data_t *cmd_data,
  5068. uint8 *ndp_instance_id)
  5069. {
  5070. s32 ret = BCME_OK;
  5071. bcm_iov_batch_buf_t *nan_buf = NULL;
  5072. wl_nan_dp_req_t *datareq = NULL;
  5073. bcm_iov_batch_subcmd_t *sub_cmd = NULL;
  5074. uint16 buflen_avail;
  5075. uint8 *pxtlv;
  5076. struct wireless_dev *wdev;
  5077. uint16 nan_buf_size;
  5078. uint8 *resp_buf = NULL;
  5079. /* Considering fixed params */
  5080. uint16 data_size = WL_NAN_OBUF_DATA_OFFSET +
  5081. OFFSETOF(wl_nan_dp_req_t, tlv_params);
  5082. data_size = ALIGN_SIZE(data_size, 4);
  5083. ret = wl_cfgnan_aligned_data_size_of_opt_dp_params(&data_size, cmd_data);
  5084. if (unlikely(ret)) {
  5085. WL_ERR(("Failed to get alligned size of optional params\n"));
  5086. goto fail;
  5087. }
  5088. nan_buf_size = data_size;
  5089. NAN_DBG_ENTER();
  5090. mutex_lock(&cfg->if_sync);
  5091. NAN_MUTEX_LOCK();
  5092. #ifdef WL_IFACE_MGMT
  5093. if ((ret = wl_cfg80211_handle_if_role_conflict(cfg, WL_IF_TYPE_NAN)) < 0) {
  5094. WL_ERR(("Conflicting iface found to be active\n"));
  5095. ret = BCME_UNSUPPORTED;
  5096. goto fail;
  5097. }
  5098. #endif /* WL_IFACE_MGMT */
  5099. #ifdef RTT_SUPPORT
  5100. /* cancel any ongoing RTT session with peer
  5101. * as we donot support DP and RNG to same peer
  5102. */
  5103. wl_cfgnan_clear_peer_ranging(cfg, &cmd_data->mac_addr,
  5104. RTT_GEO_SUSPN_HOST_NDP_TRIGGER);
  5105. #endif /* RTT_SUPPORT */
  5106. nan_buf = MALLOCZ(cfg->osh, data_size);
  5107. if (!nan_buf) {
  5108. WL_ERR(("%s: memory allocation failed\n", __func__));
  5109. ret = BCME_NOMEM;
  5110. goto fail;
  5111. }
  5112. resp_buf = MALLOCZ(cfg->osh, data_size + NAN_IOVAR_NAME_SIZE);
  5113. if (!resp_buf) {
  5114. WL_ERR(("%s: memory allocation failed\n", __func__));
  5115. ret = BCME_NOMEM;
  5116. goto fail;
  5117. }
  5118. ret = wl_cfgnan_set_nan_avail(bcmcfg_to_prmry_ndev(cfg),
  5119. cfg, &cmd_data->avail_params, WL_AVAIL_LOCAL);
  5120. if (unlikely(ret)) {
  5121. WL_ERR(("Failed to set avail value with type local\n"));
  5122. goto fail;
  5123. }
  5124. ret = wl_cfgnan_set_nan_avail(bcmcfg_to_prmry_ndev(cfg),
  5125. cfg, &cmd_data->avail_params, WL_AVAIL_NDC);
  5126. if (unlikely(ret)) {
  5127. WL_ERR(("Failed to set avail value with type ndc\n"));
  5128. goto fail;
  5129. }
  5130. nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
  5131. nan_buf->count = 0;
  5132. nan_buf_size -= OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
  5133. sub_cmd = (bcm_iov_batch_subcmd_t*)(&nan_buf->cmds[0]);
  5134. datareq = (wl_nan_dp_req_t *)(sub_cmd->data);
  5135. /* setting default data path type to unicast */
  5136. datareq->type = WL_NAN_DP_TYPE_UNICAST;
  5137. if (cmd_data->pub_id) {
  5138. datareq->pub_id = cmd_data->pub_id;
  5139. }
  5140. if (!ETHER_ISNULLADDR(&cmd_data->mac_addr.octet)) {
  5141. ret = memcpy_s(&datareq->peer_mac, ETHER_ADDR_LEN,
  5142. &cmd_data->mac_addr, ETHER_ADDR_LEN);
  5143. if (ret != BCME_OK) {
  5144. WL_ERR(("Failed to copy ether addr provided\n"));
  5145. goto fail;
  5146. }
  5147. } else {
  5148. WL_ERR(("Invalid ether addr provided\n"));
  5149. ret = BCME_BADARG;
  5150. goto fail;
  5151. }
  5152. /* Retrieve mac from given iface name */
  5153. wdev = wl_cfg80211_get_wdev_from_ifname(cfg,
  5154. (char *)cmd_data->ndp_iface);
  5155. if (!wdev || ETHER_ISNULLADDR(wdev->netdev->dev_addr)) {
  5156. ret = -EINVAL;
  5157. WL_ERR(("Failed to retrieve wdev/dev addr for ndp_iface = %s\n",
  5158. (char *)cmd_data->ndp_iface));
  5159. goto fail;
  5160. }
  5161. if (!ETHER_ISNULLADDR(wdev->netdev->dev_addr)) {
  5162. ret = memcpy_s(&datareq->ndi, ETHER_ADDR_LEN,
  5163. wdev->netdev->dev_addr, ETHER_ADDR_LEN);
  5164. if (ret != BCME_OK) {
  5165. WL_ERR(("Failed to copy ether addr provided\n"));
  5166. goto fail;
  5167. }
  5168. WL_TRACE(("%s: Retrieved ndi mac " MACDBG "\n",
  5169. __FUNCTION__, MAC2STRDBG(datareq->ndi.octet)));
  5170. } else {
  5171. WL_ERR(("Invalid NDI addr retrieved\n"));
  5172. ret = BCME_BADARG;
  5173. goto fail;
  5174. }
  5175. datareq->ndl_qos.min_slots = NAN_NDL_QOS_MIN_SLOT_NO_PREF;
  5176. datareq->ndl_qos.max_latency = NAN_NDL_QOS_MAX_LAT_NO_PREF;
  5177. /* Fill the sub_command block */
  5178. sub_cmd->id = htod16(WL_NAN_CMD_DATA_DATAREQ);
  5179. sub_cmd->len = sizeof(sub_cmd->u.options) +
  5180. OFFSETOF(wl_nan_dp_req_t, tlv_params);
  5181. sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
  5182. pxtlv = (uint8 *)&datareq->tlv_params;
  5183. nan_buf_size -= (sub_cmd->len +
  5184. OFFSETOF(bcm_iov_batch_subcmd_t, u.options));
  5185. buflen_avail = nan_buf_size;
  5186. if (cmd_data->svc_info.data && cmd_data->svc_info.dlen) {
  5187. ret = bcm_pack_xtlv_entry(&pxtlv, &nan_buf_size,
  5188. WL_NAN_XTLV_SD_SVC_INFO, cmd_data->svc_info.dlen,
  5189. cmd_data->svc_info.data,
  5190. BCM_XTLV_OPTION_ALIGN32);
  5191. if (ret != BCME_OK) {
  5192. WL_ERR(("unable to process svc_spec_info: %d\n", ret));
  5193. goto fail;
  5194. }
  5195. datareq->flags |= WL_NAN_DP_FLAG_SVC_INFO;
  5196. }
  5197. /* Security elements */
  5198. if (cmd_data->csid) {
  5199. WL_TRACE(("Cipher suite type is present, pack it\n"));
  5200. ret = bcm_pack_xtlv_entry(&pxtlv, &nan_buf_size,
  5201. WL_NAN_XTLV_CFG_SEC_CSID, sizeof(nan_sec_csid_e),
  5202. (uint8*)&cmd_data->csid, BCM_XTLV_OPTION_ALIGN32);
  5203. if (unlikely(ret)) {
  5204. WL_ERR(("%s: fail to pack on csid\n", __FUNCTION__));
  5205. goto fail;
  5206. }
  5207. }
  5208. if (cmd_data->ndp_cfg.security_cfg) {
  5209. if ((cmd_data->key_type == NAN_SECURITY_KEY_INPUT_PMK) ||
  5210. (cmd_data->key_type == NAN_SECURITY_KEY_INPUT_PASSPHRASE)) {
  5211. if (cmd_data->key.data && cmd_data->key.dlen) {
  5212. WL_TRACE(("optional pmk present, pack it\n"));
  5213. ret = bcm_pack_xtlv_entry(&pxtlv, &nan_buf_size,
  5214. WL_NAN_XTLV_CFG_SEC_PMK, cmd_data->key.dlen,
  5215. cmd_data->key.data, BCM_XTLV_OPTION_ALIGN32);
  5216. if (unlikely(ret)) {
  5217. WL_ERR(("%s: fail to pack on WL_NAN_XTLV_CFG_SEC_PMK\n",
  5218. __FUNCTION__));
  5219. goto fail;
  5220. }
  5221. }
  5222. } else {
  5223. WL_ERR(("Invalid security key type\n"));
  5224. ret = BCME_BADARG;
  5225. goto fail;
  5226. }
  5227. if ((cmd_data->svc_hash.dlen == WL_NAN_SVC_HASH_LEN) &&
  5228. (cmd_data->svc_hash.data)) {
  5229. WL_TRACE(("svc hash present, pack it\n"));
  5230. ret = bcm_pack_xtlv_entry(&pxtlv, &nan_buf_size,
  5231. WL_NAN_XTLV_CFG_SVC_HASH, WL_NAN_SVC_HASH_LEN,
  5232. cmd_data->svc_hash.data, BCM_XTLV_OPTION_ALIGN32);
  5233. if (ret != BCME_OK) {
  5234. WL_ERR(("%s: fail to pack WL_NAN_XTLV_CFG_SVC_HASH\n",
  5235. __FUNCTION__));
  5236. goto fail;
  5237. }
  5238. } else {
  5239. #ifdef WL_NAN_DISC_CACHE
  5240. /* check in cache */
  5241. nan_disc_result_cache *cache;
  5242. cache = wl_cfgnan_get_disc_result(cfg,
  5243. datareq->pub_id, &datareq->peer_mac);
  5244. if (!cache) {
  5245. ret = BCME_ERROR;
  5246. WL_ERR(("invalid svc hash data or length = %d\n",
  5247. cmd_data->svc_hash.dlen));
  5248. goto fail;
  5249. }
  5250. WL_TRACE(("svc hash present, pack it\n"));
  5251. ret = bcm_pack_xtlv_entry(&pxtlv, &nan_buf_size,
  5252. WL_NAN_XTLV_CFG_SVC_HASH, WL_NAN_SVC_HASH_LEN,
  5253. cache->svc_hash, BCM_XTLV_OPTION_ALIGN32);
  5254. if (ret != BCME_OK) {
  5255. WL_ERR(("%s: fail to pack WL_NAN_XTLV_CFG_SVC_HASH\n",
  5256. __FUNCTION__));
  5257. goto fail;
  5258. }
  5259. #else
  5260. ret = BCME_ERROR;
  5261. WL_ERR(("invalid svc hash data or length = %d\n",
  5262. cmd_data->svc_hash.dlen));
  5263. goto fail;
  5264. #endif /* WL_NAN_DISC_CACHE */
  5265. }
  5266. /* If the Data req is for secure data connection */
  5267. datareq->flags |= WL_NAN_DP_FLAG_SECURITY;
  5268. }
  5269. sub_cmd->len += (buflen_avail - nan_buf_size);
  5270. nan_buf->is_set = false;
  5271. nan_buf->count++;
  5272. ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, data_size,
  5273. &(cmd_data->status), resp_buf, data_size + NAN_IOVAR_NAME_SIZE);
  5274. if (unlikely(ret) || unlikely(cmd_data->status)) {
  5275. WL_ERR(("nan data path request handler failed, ret = %d status %d\n",
  5276. ret, cmd_data->status));
  5277. goto fail;
  5278. }
  5279. /* check the response buff */
  5280. if (ret == BCME_OK) {
  5281. ret = process_resp_buf(resp_buf + WL_NAN_OBUF_DATA_OFFSET,
  5282. ndp_instance_id, WL_NAN_CMD_DATA_DATAREQ);
  5283. cmd_data->ndp_instance_id = *ndp_instance_id;
  5284. }
  5285. WL_INFORM_MEM(("[NAN] DP request successfull (ndp_id:%d)\n",
  5286. cmd_data->ndp_instance_id));
  5287. /* Add peer to data ndp peer list */
  5288. wl_cfgnan_data_add_peer(cfg, &datareq->peer_mac);
  5289. fail:
  5290. if (nan_buf) {
  5291. MFREE(cfg->osh, nan_buf, data_size);
  5292. }
  5293. if (resp_buf) {
  5294. MFREE(cfg->osh, resp_buf, data_size + NAN_IOVAR_NAME_SIZE);
  5295. }
  5296. NAN_MUTEX_UNLOCK();
  5297. mutex_unlock(&cfg->if_sync);
  5298. NAN_DBG_EXIT();
  5299. return ret;
  5300. }
  5301. int
  5302. wl_cfgnan_data_path_response_handler(struct net_device *ndev,
  5303. struct bcm_cfg80211 *cfg, nan_datapath_cmd_data_t *cmd_data)
  5304. {
  5305. s32 ret = BCME_OK;
  5306. bcm_iov_batch_buf_t *nan_buf = NULL;
  5307. wl_nan_dp_resp_t *dataresp = NULL;
  5308. bcm_iov_batch_subcmd_t *sub_cmd = NULL;
  5309. uint16 buflen_avail;
  5310. uint8 *pxtlv;
  5311. struct wireless_dev *wdev;
  5312. uint16 nan_buf_size;
  5313. uint8 *resp_buf = NULL;
  5314. /* Considering fixed params */
  5315. uint16 data_size = WL_NAN_OBUF_DATA_OFFSET +
  5316. OFFSETOF(wl_nan_dp_resp_t, tlv_params);
  5317. data_size = ALIGN_SIZE(data_size, 4);
  5318. ret = wl_cfgnan_aligned_data_size_of_opt_dp_params(&data_size, cmd_data);
  5319. if (unlikely(ret)) {
  5320. WL_ERR(("Failed to get alligned size of optional params\n"));
  5321. goto fail;
  5322. }
  5323. nan_buf_size = data_size;
  5324. NAN_DBG_ENTER();
  5325. mutex_lock(&cfg->if_sync);
  5326. NAN_MUTEX_LOCK();
  5327. #ifdef WL_IFACE_MGMT
  5328. if ((ret = wl_cfg80211_handle_if_role_conflict(cfg, WL_IF_TYPE_NAN)) < 0) {
  5329. WL_ERR(("Conflicting iface found to be active\n"));
  5330. ret = BCME_UNSUPPORTED;
  5331. goto fail;
  5332. }
  5333. #endif /* WL_IFACE_MGMT */
  5334. nan_buf = MALLOCZ(cfg->osh, data_size);
  5335. if (!nan_buf) {
  5336. WL_ERR(("%s: memory allocation failed\n", __func__));
  5337. ret = BCME_NOMEM;
  5338. goto fail;
  5339. }
  5340. resp_buf = MALLOCZ(cfg->osh, data_size + NAN_IOVAR_NAME_SIZE);
  5341. if (!resp_buf) {
  5342. WL_ERR(("%s: memory allocation failed\n", __func__));
  5343. ret = BCME_NOMEM;
  5344. goto fail;
  5345. }
  5346. ret = wl_cfgnan_set_nan_avail(bcmcfg_to_prmry_ndev(cfg),
  5347. cfg, &cmd_data->avail_params, WL_AVAIL_LOCAL);
  5348. if (unlikely(ret)) {
  5349. WL_ERR(("Failed to set avail value with type local\n"));
  5350. goto fail;
  5351. }
  5352. ret = wl_cfgnan_set_nan_avail(bcmcfg_to_prmry_ndev(cfg),
  5353. cfg, &cmd_data->avail_params, WL_AVAIL_NDC);
  5354. if (unlikely(ret)) {
  5355. WL_ERR(("Failed to set avail value with type ndc\n"));
  5356. goto fail;
  5357. }
  5358. nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
  5359. nan_buf->count = 0;
  5360. nan_buf_size -= OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
  5361. sub_cmd = (bcm_iov_batch_subcmd_t*)(&nan_buf->cmds[0]);
  5362. dataresp = (wl_nan_dp_resp_t *)(sub_cmd->data);
  5363. /* Setting default data path type to unicast */
  5364. dataresp->type = WL_NAN_DP_TYPE_UNICAST;
  5365. /* Changing status value as per fw convention */
  5366. dataresp->status = cmd_data->rsp_code ^= 1;
  5367. dataresp->reason_code = 0;
  5368. /* ndp instance id must be from 1 to 255, 0 is reserved */
  5369. if (cmd_data->ndp_instance_id < NAN_ID_MIN ||
  5370. cmd_data->ndp_instance_id > NAN_ID_MAX) {
  5371. WL_ERR(("Invalid ndp instance id: %d\n", cmd_data->ndp_instance_id));
  5372. ret = BCME_BADARG;
  5373. goto fail;
  5374. }
  5375. dataresp->ndp_id = cmd_data->ndp_instance_id;
  5376. /* Retrieved initiator ndi from NanDataPathRequestInd */
  5377. if (!ETHER_ISNULLADDR(&cfg->initiator_ndi.octet)) {
  5378. ret = memcpy_s(&dataresp->mac_addr, ETHER_ADDR_LEN,
  5379. &cfg->initiator_ndi, ETHER_ADDR_LEN);
  5380. if (ret != BCME_OK) {
  5381. WL_ERR(("Failed to copy initiator ndi\n"));
  5382. goto fail;
  5383. }
  5384. } else {
  5385. WL_ERR(("Invalid ether addr retrieved\n"));
  5386. ret = BCME_BADARG;
  5387. goto fail;
  5388. }
  5389. /* Interface is not mandatory, when it is a reject from framework */
  5390. if (dataresp->status != WL_NAN_DP_STATUS_REJECTED) {
  5391. /* Retrieve mac from given iface name */
  5392. wdev = wl_cfg80211_get_wdev_from_ifname(cfg,
  5393. (char *)cmd_data->ndp_iface);
  5394. if (!wdev || ETHER_ISNULLADDR(wdev->netdev->dev_addr)) {
  5395. ret = -EINVAL;
  5396. WL_ERR(("Failed to retrieve wdev/dev addr for ndp_iface = %s\n",
  5397. (char *)cmd_data->ndp_iface));
  5398. goto fail;
  5399. }
  5400. if (!ETHER_ISNULLADDR(wdev->netdev->dev_addr)) {
  5401. ret = memcpy_s(&dataresp->ndi, ETHER_ADDR_LEN,
  5402. wdev->netdev->dev_addr, ETHER_ADDR_LEN);
  5403. if (ret != BCME_OK) {
  5404. WL_ERR(("Failed to copy responder ndi\n"));
  5405. goto fail;
  5406. }
  5407. WL_TRACE(("%s: Retrieved ndi mac " MACDBG "\n",
  5408. __FUNCTION__, MAC2STRDBG(dataresp->ndi.octet)));
  5409. } else {
  5410. WL_ERR(("Invalid NDI addr retrieved\n"));
  5411. ret = BCME_BADARG;
  5412. goto fail;
  5413. }
  5414. }
  5415. dataresp->ndl_qos.min_slots = NAN_NDL_QOS_MIN_SLOT_NO_PREF;
  5416. dataresp->ndl_qos.max_latency = NAN_NDL_QOS_MAX_LAT_NO_PREF;
  5417. /* Fill the sub_command block */
  5418. sub_cmd->id = htod16(WL_NAN_CMD_DATA_DATARESP);
  5419. sub_cmd->len = sizeof(sub_cmd->u.options) +
  5420. OFFSETOF(wl_nan_dp_resp_t, tlv_params);
  5421. sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
  5422. pxtlv = (uint8 *)&dataresp->tlv_params;
  5423. nan_buf_size -= (sub_cmd->len +
  5424. OFFSETOF(bcm_iov_batch_subcmd_t, u.options));
  5425. buflen_avail = nan_buf_size;
  5426. if (cmd_data->svc_info.data && cmd_data->svc_info.dlen) {
  5427. ret = bcm_pack_xtlv_entry(&pxtlv, &nan_buf_size,
  5428. WL_NAN_XTLV_SD_SVC_INFO, cmd_data->svc_info.dlen,
  5429. cmd_data->svc_info.data,
  5430. BCM_XTLV_OPTION_ALIGN32);
  5431. if (ret != BCME_OK) {
  5432. WL_ERR(("unable to process svc_spec_info: %d\n", ret));
  5433. goto fail;
  5434. }
  5435. dataresp->flags |= WL_NAN_DP_FLAG_SVC_INFO;
  5436. }
  5437. /* Security elements */
  5438. if (cmd_data->csid) {
  5439. WL_TRACE(("Cipher suite type is present, pack it\n"));
  5440. ret = bcm_pack_xtlv_entry(&pxtlv, &nan_buf_size,
  5441. WL_NAN_XTLV_CFG_SEC_CSID, sizeof(nan_sec_csid_e),
  5442. (uint8*)&cmd_data->csid, BCM_XTLV_OPTION_ALIGN32);
  5443. if (unlikely(ret)) {
  5444. WL_ERR(("%s: fail to pack csid\n", __FUNCTION__));
  5445. goto fail;
  5446. }
  5447. }
  5448. if (cmd_data->ndp_cfg.security_cfg) {
  5449. if ((cmd_data->key_type == NAN_SECURITY_KEY_INPUT_PMK) ||
  5450. (cmd_data->key_type == NAN_SECURITY_KEY_INPUT_PASSPHRASE)) {
  5451. if (cmd_data->key.data && cmd_data->key.dlen) {
  5452. WL_TRACE(("optional pmk present, pack it\n"));
  5453. ret = bcm_pack_xtlv_entry(&pxtlv, &nan_buf_size,
  5454. WL_NAN_XTLV_CFG_SEC_PMK, cmd_data->key.dlen,
  5455. cmd_data->key.data, BCM_XTLV_OPTION_ALIGN32);
  5456. if (unlikely(ret)) {
  5457. WL_ERR(("%s: fail to pack WL_NAN_XTLV_CFG_SEC_PMK\n",
  5458. __FUNCTION__));
  5459. goto fail;
  5460. }
  5461. }
  5462. } else {
  5463. WL_ERR(("Invalid security key type\n"));
  5464. ret = BCME_BADARG;
  5465. goto fail;
  5466. }
  5467. if ((cmd_data->svc_hash.dlen == WL_NAN_SVC_HASH_LEN) &&
  5468. (cmd_data->svc_hash.data)) {
  5469. WL_TRACE(("svc hash present, pack it\n"));
  5470. ret = bcm_pack_xtlv_entry(&pxtlv, &nan_buf_size,
  5471. WL_NAN_XTLV_CFG_SVC_HASH, WL_NAN_SVC_HASH_LEN,
  5472. cmd_data->svc_hash.data,
  5473. BCM_XTLV_OPTION_ALIGN32);
  5474. if (ret != BCME_OK) {
  5475. WL_ERR(("%s: fail to pack WL_NAN_XTLV_CFG_SVC_HASH\n",
  5476. __FUNCTION__));
  5477. goto fail;
  5478. }
  5479. }
  5480. /* If the Data resp is for secure data connection */
  5481. dataresp->flags |= WL_NAN_DP_FLAG_SECURITY;
  5482. }
  5483. sub_cmd->len += (buflen_avail - nan_buf_size);
  5484. nan_buf->is_set = false;
  5485. nan_buf->count++;
  5486. ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, data_size,
  5487. &(cmd_data->status), resp_buf, data_size + NAN_IOVAR_NAME_SIZE);
  5488. if (unlikely(ret) || unlikely(cmd_data->status)) {
  5489. WL_ERR(("nan data path response handler failed, error = %d, status %d\n",
  5490. ret, cmd_data->status));
  5491. goto fail;
  5492. }
  5493. WL_INFORM_MEM(("[NAN] DP response successfull (ndp_id:%d)\n", dataresp->ndp_id));
  5494. fail:
  5495. if (nan_buf) {
  5496. MFREE(cfg->osh, nan_buf, data_size);
  5497. }
  5498. if (resp_buf) {
  5499. MFREE(cfg->osh, resp_buf, data_size + NAN_IOVAR_NAME_SIZE);
  5500. }
  5501. NAN_MUTEX_UNLOCK();
  5502. mutex_unlock(&cfg->if_sync);
  5503. NAN_DBG_EXIT();
  5504. return ret;
  5505. }
  5506. int wl_cfgnan_data_path_end_handler(struct net_device *ndev,
  5507. struct bcm_cfg80211 *cfg, nan_datapath_cmd_data_t *cmd_data)
  5508. {
  5509. bcm_iov_batch_buf_t *nan_buf = NULL;
  5510. wl_nan_dp_end_t *dataend = NULL;
  5511. bcm_iov_batch_subcmd_t *sub_cmd = NULL;
  5512. s32 ret = BCME_OK;
  5513. uint16 nan_buf_size = NAN_IOCTL_BUF_SIZE;
  5514. uint8 resp_buf[NAN_IOCTL_BUF_SIZE];
  5515. dhd_pub_t *dhdp = wl_cfg80211_get_dhdp(ndev);
  5516. NAN_DBG_ENTER();
  5517. NAN_MUTEX_LOCK();
  5518. if (!dhdp->up) {
  5519. WL_ERR(("bus is already down, hence blocking nan dp end\n"));
  5520. ret = BCME_OK;
  5521. goto fail;
  5522. }
  5523. if (!cfg->nan_enable) {
  5524. WL_ERR(("nan is not enabled, nan dp end blocked\n"));
  5525. ret = BCME_OK;
  5526. goto fail;
  5527. }
  5528. /* ndp instance id must be from 1 to 255, 0 is reserved */
  5529. if (cmd_data->ndp_instance_id < NAN_ID_MIN ||
  5530. cmd_data->ndp_instance_id > NAN_ID_MAX) {
  5531. WL_ERR(("Invalid ndp instance id: %d\n", cmd_data->ndp_instance_id));
  5532. ret = BCME_BADARG;
  5533. goto fail;
  5534. }
  5535. nan_buf = MALLOCZ(cfg->osh, nan_buf_size);
  5536. if (!nan_buf) {
  5537. WL_ERR(("%s: memory allocation failed\n", __func__));
  5538. ret = BCME_NOMEM;
  5539. goto fail;
  5540. }
  5541. nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
  5542. nan_buf->count = 0;
  5543. nan_buf_size -= OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
  5544. sub_cmd = (bcm_iov_batch_subcmd_t*)(&nan_buf->cmds[0]);
  5545. dataend = (wl_nan_dp_end_t *)(sub_cmd->data);
  5546. /* Fill sub_cmd block */
  5547. sub_cmd->id = htod16(WL_NAN_CMD_DATA_DATAEND);
  5548. sub_cmd->len = sizeof(sub_cmd->u.options) +
  5549. sizeof(*dataend);
  5550. sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
  5551. dataend->lndp_id = cmd_data->ndp_instance_id;
  5552. /*
  5553. * Currently fw requires ndp_id and reason to end the data path
  5554. * But wifi_nan.h takes ndp_instances_count and ndp_id.
  5555. * Will keep reason = accept always.
  5556. */
  5557. dataend->status = 1;
  5558. nan_buf->is_set = true;
  5559. nan_buf->count++;
  5560. nan_buf_size -= (sub_cmd->len +
  5561. OFFSETOF(bcm_iov_batch_subcmd_t, u.options));
  5562. memset_s(resp_buf, sizeof(resp_buf), 0, sizeof(resp_buf));
  5563. ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, nan_buf_size,
  5564. &(cmd_data->status),
  5565. (void*)resp_buf, NAN_IOCTL_BUF_SIZE);
  5566. if (unlikely(ret) || unlikely(cmd_data->status)) {
  5567. WL_ERR(("nan data path end handler failed, error = %d status %d\n",
  5568. ret, cmd_data->status));
  5569. goto fail;
  5570. }
  5571. WL_INFORM_MEM(("[NAN] DP end successfull (ndp_id:%d)\n",
  5572. dataend->lndp_id));
  5573. fail:
  5574. if (nan_buf) {
  5575. MFREE(cfg->osh, nan_buf, NAN_IOCTL_BUF_SIZE);
  5576. }
  5577. NAN_MUTEX_UNLOCK();
  5578. NAN_DBG_EXIT();
  5579. return ret;
  5580. }
  5581. #ifdef WL_NAN_DISC_CACHE
  5582. int wl_cfgnan_sec_info_handler(struct bcm_cfg80211 *cfg,
  5583. nan_datapath_sec_info_cmd_data_t *cmd_data, nan_hal_resp_t *nan_req_resp)
  5584. {
  5585. s32 ret = BCME_NOTFOUND;
  5586. /* check in cache */
  5587. nan_disc_result_cache *disc_cache = NULL;
  5588. nan_svc_info_t *svc_info = NULL;
  5589. NAN_DBG_ENTER();
  5590. NAN_MUTEX_LOCK();
  5591. if (!cfg->nan_init_state) {
  5592. WL_ERR(("nan is not initialized/nmi doesnt exists\n"));
  5593. ret = BCME_NOTENABLED;
  5594. goto fail;
  5595. }
  5596. /* datapath request context */
  5597. if (cmd_data->pub_id && !ETHER_ISNULLADDR(&cmd_data->mac_addr)) {
  5598. disc_cache = wl_cfgnan_get_disc_result(cfg,
  5599. cmd_data->pub_id, &cmd_data->mac_addr);
  5600. WL_DBG(("datapath request: PUB ID: = %d\n",
  5601. cmd_data->pub_id));
  5602. if (disc_cache) {
  5603. (void)memcpy_s(nan_req_resp->svc_hash, WL_NAN_SVC_HASH_LEN,
  5604. disc_cache->svc_hash, WL_NAN_SVC_HASH_LEN);
  5605. ret = BCME_OK;
  5606. } else {
  5607. WL_ERR(("disc_cache is NULL\n"));
  5608. goto fail;
  5609. }
  5610. }
  5611. /* datapath response context */
  5612. if (cmd_data->ndp_instance_id) {
  5613. WL_DBG(("datapath response: NDP ID: = %d\n",
  5614. cmd_data->ndp_instance_id));
  5615. svc_info = wl_cfgnan_get_svc_inst(cfg, 0, cmd_data->ndp_instance_id);
  5616. /* Note: svc_info will not be present in OOB cases
  5617. * In such case send NMI alone and let HAL handle if
  5618. * svc_hash is mandatory
  5619. */
  5620. if (svc_info) {
  5621. WL_DBG(("svc hash present, pack it\n"));
  5622. (void)memcpy_s(nan_req_resp->svc_hash, WL_NAN_SVC_HASH_LEN,
  5623. svc_info->svc_hash, WL_NAN_SVC_HASH_LEN);
  5624. } else {
  5625. WL_INFORM_MEM(("svc_info not present..assuming OOB DP\n"));
  5626. }
  5627. /* Always send NMI */
  5628. (void)memcpy_s(nan_req_resp->pub_nmi, ETHER_ADDR_LEN,
  5629. cfg->nan_nmi_mac, ETHER_ADDR_LEN);
  5630. ret = BCME_OK;
  5631. }
  5632. fail:
  5633. NAN_MUTEX_UNLOCK();
  5634. NAN_DBG_EXIT();
  5635. return ret;
  5636. }
  5637. static s32 wl_nan_cache_to_event_data(nan_disc_result_cache *cache,
  5638. nan_event_data_t *nan_event_data, osl_t *osh)
  5639. {
  5640. s32 ret = BCME_OK;
  5641. NAN_DBG_ENTER();
  5642. nan_event_data->pub_id = cache->pub_id;
  5643. nan_event_data->sub_id = cache->sub_id;
  5644. nan_event_data->publish_rssi = cache->publish_rssi;
  5645. nan_event_data->peer_cipher_suite = cache->peer_cipher_suite;
  5646. ret = memcpy_s(&nan_event_data->remote_nmi, ETHER_ADDR_LEN,
  5647. &cache->peer, ETHER_ADDR_LEN);
  5648. if (ret != BCME_OK) {
  5649. WL_ERR(("Failed to copy cached peer nan nmi\n"));
  5650. goto fail;
  5651. }
  5652. if (cache->svc_info.dlen && cache->svc_info.data) {
  5653. nan_event_data->svc_info.dlen = cache->svc_info.dlen;
  5654. nan_event_data->svc_info.data =
  5655. MALLOCZ(osh, nan_event_data->svc_info.dlen);
  5656. if (!nan_event_data->svc_info.data) {
  5657. WL_ERR(("%s: memory allocation failed\n", __FUNCTION__));
  5658. nan_event_data->svc_info.dlen = 0;
  5659. ret = -ENOMEM;
  5660. goto fail;
  5661. }
  5662. ret = memcpy_s(nan_event_data->svc_info.data, nan_event_data->svc_info.dlen,
  5663. cache->svc_info.data, cache->svc_info.dlen);
  5664. if (ret != BCME_OK) {
  5665. WL_ERR(("Failed to copy cached svc info data\n"));
  5666. goto fail;
  5667. }
  5668. }
  5669. if (cache->tx_match_filter.dlen && cache->tx_match_filter.data) {
  5670. nan_event_data->tx_match_filter.dlen = cache->tx_match_filter.dlen;
  5671. nan_event_data->tx_match_filter.data =
  5672. MALLOCZ(osh, nan_event_data->tx_match_filter.dlen);
  5673. if (!nan_event_data->tx_match_filter.data) {
  5674. WL_ERR(("%s: memory allocation failed\n", __FUNCTION__));
  5675. nan_event_data->tx_match_filter.dlen = 0;
  5676. ret = -ENOMEM;
  5677. goto fail;
  5678. }
  5679. ret = memcpy_s(nan_event_data->tx_match_filter.data,
  5680. nan_event_data->tx_match_filter.dlen,
  5681. cache->tx_match_filter.data, cache->tx_match_filter.dlen);
  5682. if (ret != BCME_OK) {
  5683. WL_ERR(("Failed to copy cached tx match filter data\n"));
  5684. goto fail;
  5685. }
  5686. }
  5687. fail:
  5688. NAN_DBG_EXIT();
  5689. return ret;
  5690. }
  5691. #endif /* WL_NAN_DISC_CACHE */
  5692. /* API to cancel the ranging with peer
  5693. * For geofence initiator, suspend ranging.
  5694. * for directed RTT initiator , report fail result, cancel ranging
  5695. * and clear ranging instance
  5696. * For responder, cancel ranging and clear ranging instance
  5697. */
  5698. #ifdef RTT_SUPPORT
  5699. static s32
  5700. wl_cfgnan_clear_peer_ranging(struct bcm_cfg80211 *cfg,
  5701. struct ether_addr *peer, int reason)
  5702. {
  5703. uint32 status = 0;
  5704. nan_ranging_inst_t *rng_inst = NULL;
  5705. int err = BCME_OK;
  5706. struct net_device *ndev = bcmcfg_to_prmry_ndev(cfg);
  5707. dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub);
  5708. rng_inst = wl_cfgnan_check_for_ranging(cfg, peer);
  5709. if (rng_inst) {
  5710. if (rng_inst->range_type == RTT_TYPE_NAN_GEOFENCE) {
  5711. err = wl_cfgnan_suspend_geofence_rng_session(ndev,
  5712. peer, reason, 0);
  5713. } else {
  5714. if (rng_inst->range_type == RTT_TYPE_NAN_DIRECTED) {
  5715. dhd_rtt_handle_nan_rtt_session_end(dhdp,
  5716. peer);
  5717. }
  5718. /* responder */
  5719. err = wl_cfgnan_cancel_ranging(ndev, cfg,
  5720. rng_inst->range_id,
  5721. NAN_RNG_TERM_FLAG_IMMEDIATE, &status);
  5722. bzero(rng_inst, sizeof(*rng_inst));
  5723. }
  5724. }
  5725. if (err) {
  5726. WL_ERR(("Failed to stop ranging with peer %d\n", err));
  5727. }
  5728. return err;
  5729. }
  5730. #endif /* RTT_SUPPORT */
  5731. static s32
  5732. wl_nan_dp_cmn_event_data(struct bcm_cfg80211 *cfg, void *event_data,
  5733. uint16 data_len, uint16 *tlvs_offset,
  5734. uint16 *nan_opts_len, uint32 event_num,
  5735. int *hal_event_id, nan_event_data_t *nan_event_data)
  5736. {
  5737. s32 ret = BCME_OK;
  5738. uint8 i;
  5739. wl_nan_ev_datapath_cmn_t *ev_dp;
  5740. nan_svc_info_t *svc_info;
  5741. bcm_xtlv_t *xtlv = (bcm_xtlv_t *)event_data;
  5742. #ifdef RTT_SUPPORT
  5743. nan_ranging_inst_t *rng_inst = NULL;
  5744. #endif /* RTT_SUPPORT */
  5745. if (xtlv->id == WL_NAN_XTLV_DATA_DP_INFO) {
  5746. ev_dp = (wl_nan_ev_datapath_cmn_t *)xtlv->data;
  5747. NAN_DBG_ENTER();
  5748. BCM_REFERENCE(svc_info);
  5749. BCM_REFERENCE(i);
  5750. /* Mapping to common struct between DHD and HAL */
  5751. WL_TRACE(("Event type: %d\n", ev_dp->type));
  5752. nan_event_data->type = ev_dp->type;
  5753. WL_TRACE(("pub_id: %d\n", ev_dp->pub_id));
  5754. nan_event_data->pub_id = ev_dp->pub_id;
  5755. WL_TRACE(("security: %d\n", ev_dp->security));
  5756. nan_event_data->security = ev_dp->security;
  5757. /* Store initiator_ndi, required for data_path_response_request */
  5758. ret = memcpy_s(&cfg->initiator_ndi, ETHER_ADDR_LEN,
  5759. &ev_dp->initiator_ndi, ETHER_ADDR_LEN);
  5760. if (ret != BCME_OK) {
  5761. WL_ERR(("Failed to copy event's initiator addr\n"));
  5762. goto fail;
  5763. }
  5764. if (ev_dp->type == NAN_DP_SESSION_UNICAST) {
  5765. WL_INFORM_MEM(("NDP ID: %d\n", ev_dp->ndp_id));
  5766. nan_event_data->ndp_id = ev_dp->ndp_id;
  5767. WL_TRACE(("INITIATOR_NDI: " MACDBG "\n",
  5768. MAC2STRDBG(ev_dp->initiator_ndi.octet)));
  5769. WL_TRACE(("RESPONDOR_NDI: " MACDBG "\n",
  5770. MAC2STRDBG(ev_dp->responder_ndi.octet)));
  5771. WL_TRACE(("PEER NMI: " MACDBG "\n",
  5772. MAC2STRDBG(ev_dp->peer_nmi.octet)));
  5773. ret = memcpy_s(&nan_event_data->remote_nmi, ETHER_ADDR_LEN,
  5774. &ev_dp->peer_nmi, ETHER_ADDR_LEN);
  5775. if (ret != BCME_OK) {
  5776. WL_ERR(("Failed to copy event's peer nmi\n"));
  5777. goto fail;
  5778. }
  5779. } else {
  5780. /* type is multicast */
  5781. WL_INFORM_MEM(("NDP ID: %d\n", ev_dp->mc_id));
  5782. nan_event_data->ndp_id = ev_dp->mc_id;
  5783. WL_TRACE(("PEER NMI: " MACDBG "\n",
  5784. MAC2STRDBG(ev_dp->peer_nmi.octet)));
  5785. ret = memcpy_s(&nan_event_data->remote_nmi, ETHER_ADDR_LEN,
  5786. &ev_dp->peer_nmi,
  5787. ETHER_ADDR_LEN);
  5788. if (ret != BCME_OK) {
  5789. WL_ERR(("Failed to copy event's peer nmi\n"));
  5790. goto fail;
  5791. }
  5792. }
  5793. *tlvs_offset = OFFSETOF(wl_nan_ev_datapath_cmn_t, opt_tlvs) +
  5794. OFFSETOF(bcm_xtlv_t, data);
  5795. *nan_opts_len = data_len - *tlvs_offset;
  5796. if (event_num == WL_NAN_EVENT_PEER_DATAPATH_IND) {
  5797. *hal_event_id = GOOGLE_NAN_EVENT_DATA_REQUEST;
  5798. #ifdef WL_NAN_DISC_CACHE
  5799. svc_info = wl_cfgnan_get_svc_inst(cfg, nan_event_data->pub_id, 0);
  5800. if (svc_info) {
  5801. for (i = 0; i < NAN_MAX_SVC_INST; i++) {
  5802. if (!svc_info->ndp_id[i]) {
  5803. WL_TRACE(("Found empty field\n"));
  5804. break;
  5805. }
  5806. }
  5807. if (i == NAN_MAX_SVC_INST) {
  5808. WL_ERR(("%s:cannot accommadate ndp id\n", __FUNCTION__));
  5809. ret = BCME_NORESOURCE;
  5810. goto fail;
  5811. }
  5812. svc_info->ndp_id[i] = nan_event_data->ndp_id;
  5813. /* Add peer to data ndp peer list */
  5814. wl_cfgnan_data_add_peer(cfg, &ev_dp->peer_nmi);
  5815. #ifdef RTT_SUPPORT
  5816. /* cancel any ongoing RTT session with peer
  5817. * as we donot support DP and RNG to same peer
  5818. */
  5819. wl_cfgnan_clear_peer_ranging(cfg, &ev_dp->peer_nmi,
  5820. RTT_GEO_SUSPN_PEER_NDP_TRIGGER);
  5821. #endif /* RTT_SUPPORT */
  5822. ret = BCME_OK;
  5823. }
  5824. #endif /* WL_NAN_DISC_CACHE */
  5825. } else if (event_num == WL_NAN_EVENT_DATAPATH_ESTB) {
  5826. *hal_event_id = GOOGLE_NAN_EVENT_DATA_CONFIRMATION;
  5827. if (ev_dp->role == NAN_DP_ROLE_INITIATOR) {
  5828. ret = memcpy_s(&nan_event_data->responder_ndi, ETHER_ADDR_LEN,
  5829. &ev_dp->responder_ndi,
  5830. ETHER_ADDR_LEN);
  5831. if (ret != BCME_OK) {
  5832. WL_ERR(("Failed to copy event's responder ndi\n"));
  5833. goto fail;
  5834. }
  5835. WL_TRACE(("REMOTE_NDI: " MACDBG "\n",
  5836. MAC2STRDBG(ev_dp->responder_ndi.octet)));
  5837. WL_TRACE(("Initiator status %d\n", nan_event_data->status));
  5838. } else {
  5839. ret = memcpy_s(&nan_event_data->responder_ndi, ETHER_ADDR_LEN,
  5840. &ev_dp->initiator_ndi,
  5841. ETHER_ADDR_LEN);
  5842. if (ret != BCME_OK) {
  5843. WL_ERR(("Failed to copy event's responder ndi\n"));
  5844. goto fail;
  5845. }
  5846. WL_TRACE(("REMOTE_NDI: " MACDBG "\n",
  5847. MAC2STRDBG(ev_dp->initiator_ndi.octet)));
  5848. }
  5849. if (ev_dp->status == NAN_NDP_STATUS_ACCEPT) {
  5850. nan_event_data->status = NAN_DP_REQUEST_ACCEPT;
  5851. wl_cfgnan_data_set_peer_dp_state(cfg, &ev_dp->peer_nmi,
  5852. NAN_PEER_DP_CONNECTED);
  5853. } else if (ev_dp->status == NAN_NDP_STATUS_REJECT) {
  5854. nan_event_data->status = NAN_DP_REQUEST_REJECT;
  5855. /* Remove peer from data ndp peer list */
  5856. wl_cfgnan_data_remove_peer(cfg, &ev_dp->peer_nmi);
  5857. #ifdef RTT_SUPPORT
  5858. rng_inst = wl_cfgnan_check_for_ranging(cfg, &ev_dp->peer_nmi);
  5859. if (rng_inst) {
  5860. /* Trigger/Reset geofence RTT */
  5861. wl_cfgnan_reset_geofence_ranging(cfg,
  5862. rng_inst, RTT_SCHED_DP_REJECTED);
  5863. }
  5864. #endif /* RTT_SUPPORT */
  5865. } else {
  5866. WL_ERR(("%s:Status code = %x not expected\n",
  5867. __FUNCTION__, ev_dp->status));
  5868. ret = BCME_ERROR;
  5869. goto fail;
  5870. }
  5871. WL_TRACE(("Responder status %d\n", nan_event_data->status));
  5872. wl_cfgnan_update_dp_mask(cfg, true, nan_event_data->ndp_id);
  5873. } else if (event_num == WL_NAN_EVENT_DATAPATH_END) {
  5874. /* Mapping to common struct between DHD and HAL */
  5875. *hal_event_id = GOOGLE_NAN_EVENT_DATA_END;
  5876. wl_cfgnan_update_dp_mask(cfg, false, nan_event_data->ndp_id);
  5877. #ifdef WL_NAN_DISC_CACHE
  5878. if (ev_dp->role != NAN_DP_ROLE_INITIATOR) {
  5879. /* Only at Responder side,
  5880. * If dp is ended,
  5881. * clear the resp ndp id from the svc info cache
  5882. */
  5883. svc_info = wl_cfgnan_get_svc_inst(cfg, 0, nan_event_data->ndp_id);
  5884. if (svc_info) {
  5885. for (i = 0; i < NAN_MAX_SVC_INST; i++) {
  5886. if (svc_info->ndp_id[i] == nan_event_data->ndp_id) {
  5887. svc_info->ndp_id[i] = 0;
  5888. }
  5889. }
  5890. } else {
  5891. WL_DBG(("couldn't find entry for ndp id = %d\n",
  5892. nan_event_data->ndp_id));
  5893. }
  5894. }
  5895. #endif /* WL_NAN_DISC_CACHE */
  5896. /* Remove peer from data ndp peer list */
  5897. wl_cfgnan_data_remove_peer(cfg, &ev_dp->peer_nmi);
  5898. #ifdef RTT_SUPPORT
  5899. WL_INFORM_MEM(("DP_END for REMOTE_NMI: " MACDBG "\n",
  5900. MAC2STRDBG(&ev_dp->peer_nmi)));
  5901. rng_inst = wl_cfgnan_check_for_ranging(cfg, &ev_dp->peer_nmi);
  5902. if (rng_inst) {
  5903. /* Trigger/Reset geofence RTT */
  5904. WL_INFORM_MEM(("sched geofence rtt from DP_END ctx: " MACDBG "\n",
  5905. MAC2STRDBG(&rng_inst->peer_addr)));
  5906. wl_cfgnan_reset_geofence_ranging(cfg, rng_inst,
  5907. RTT_SCHED_DP_END);
  5908. }
  5909. #endif /* RTT_SUPPORT */
  5910. }
  5911. } else {
  5912. /* Follow though, not handling other IDs as of now */
  5913. WL_DBG(("%s:ID = 0x%02x not supported\n", __FUNCTION__, xtlv->id));
  5914. }
  5915. fail:
  5916. NAN_DBG_EXIT();
  5917. return ret;
  5918. }
  5919. #define IN_GEOFENCE(ingress, egress, distance) (((distance) <= (ingress)) && \
  5920. ((distance) >= (egress)))
  5921. #define IS_INGRESS_VAL(ingress, distance) ((distance) < (ingress))
  5922. #define IS_EGRESS_VAL(egress, distance) ((distance) > (egress))
  5923. static bool
  5924. wl_cfgnan_check_ranging_cond(nan_svc_info_t *svc_info, uint32 distance,
  5925. uint8 *ranging_ind, uint32 prev_distance)
  5926. {
  5927. uint8 svc_ind = svc_info->ranging_ind;
  5928. bool notify = FALSE;
  5929. bool range_rep_ev_once =
  5930. !!(svc_info->svc_range_status & SVC_RANGE_REP_EVENT_ONCE);
  5931. uint32 ingress_limit = svc_info->ingress_limit;
  5932. uint32 egress_limit = svc_info->egress_limit;
  5933. WL_DBG(("wl_cfgnan_check_ranging_cond: Checking the svc ranging cnd %d"
  5934. " distance %d prev_distance %d, range_rep_ev_once %d\n",
  5935. svc_ind, distance, prev_distance, range_rep_ev_once));
  5936. WL_DBG(("wl_cfgnan_check_ranging_cond: Checking the SVC ingress and"
  5937. " egress limits %d %d\n", ingress_limit, egress_limit));
  5938. if (svc_ind & NAN_RANGE_INDICATION_CONT) {
  5939. *ranging_ind = NAN_RANGE_INDICATION_CONT;
  5940. notify = TRUE;
  5941. WL_ERR(("\n%s :Svc has continous Ind %d\n",
  5942. __FUNCTION__, __LINE__));
  5943. goto done;
  5944. }
  5945. if (svc_ind == (NAN_RANGE_INDICATION_INGRESS |
  5946. NAN_RANGE_INDICATION_EGRESS)) {
  5947. if (IN_GEOFENCE(ingress_limit, egress_limit, distance)) {
  5948. /* if not already in geofence */
  5949. if ((range_rep_ev_once == FALSE) ||
  5950. (!IN_GEOFENCE(ingress_limit, egress_limit,
  5951. prev_distance))) {
  5952. notify = TRUE;
  5953. if (distance < ingress_limit) {
  5954. *ranging_ind = NAN_RANGE_INDICATION_INGRESS;
  5955. } else {
  5956. *ranging_ind = NAN_RANGE_INDICATION_EGRESS;
  5957. }
  5958. WL_ERR(("\n%s :Svc has geofence Ind %d res_ind %d\n",
  5959. __FUNCTION__, __LINE__, *ranging_ind));
  5960. }
  5961. }
  5962. goto done;
  5963. }
  5964. if (svc_ind == NAN_RANGE_INDICATION_INGRESS) {
  5965. if (IS_INGRESS_VAL(ingress_limit, distance)) {
  5966. if ((range_rep_ev_once == FALSE) ||
  5967. (prev_distance == INVALID_DISTANCE) ||
  5968. !IS_INGRESS_VAL(ingress_limit, prev_distance)) {
  5969. notify = TRUE;
  5970. *ranging_ind = NAN_RANGE_INDICATION_INGRESS;
  5971. WL_ERR(("\n%s :Svc has ingress Ind %d\n",
  5972. __FUNCTION__, __LINE__));
  5973. }
  5974. }
  5975. goto done;
  5976. }
  5977. if (svc_ind == NAN_RANGE_INDICATION_EGRESS) {
  5978. if (IS_EGRESS_VAL(egress_limit, distance)) {
  5979. if ((range_rep_ev_once == FALSE) ||
  5980. (prev_distance == INVALID_DISTANCE) ||
  5981. !IS_EGRESS_VAL(egress_limit, prev_distance)) {
  5982. notify = TRUE;
  5983. *ranging_ind = NAN_RANGE_INDICATION_EGRESS;
  5984. WL_ERR(("\n%s :Svc has egress Ind %d\n",
  5985. __FUNCTION__, __LINE__));
  5986. }
  5987. }
  5988. goto done;
  5989. }
  5990. done:
  5991. svc_info->svc_range_status |= SVC_RANGE_REP_EVENT_ONCE;
  5992. return notify;
  5993. }
  5994. static int
  5995. wl_cfgnan_event_disc_result(struct bcm_cfg80211 *cfg,
  5996. nan_event_data_t *nan_event_data)
  5997. {
  5998. int ret = BCME_OK;
  5999. #if (LINUX_VERSION_CODE > KERNEL_VERSION(3, 13, 0)) || defined(WL_VENDOR_EXT_SUPPORT)
  6000. ret = wl_cfgvendor_send_nan_event(cfg->wdev->wiphy, bcmcfg_to_prmry_ndev(cfg),
  6001. GOOGLE_NAN_EVENT_SUBSCRIBE_MATCH, nan_event_data);
  6002. if (ret != BCME_OK) {
  6003. WL_ERR(("Failed to send event to nan hal\n"));
  6004. }
  6005. #endif /* (LINUX_VERSION_CODE > KERNEL_VERSION(3, 13, 0)) || defined(WL_VENDOR_EXT_SUPPORT) */
  6006. return ret;
  6007. }
  6008. static int32
  6009. wl_cfgnan_notify_disc_with_ranging(struct bcm_cfg80211 *cfg,
  6010. nan_ranging_inst_t *rng_inst, nan_event_data_t *nan_event_data, uint32 distance)
  6011. {
  6012. nan_svc_info_t *svc_info;
  6013. bool notify_svc = FALSE;
  6014. nan_disc_result_cache *disc_res = cfg->nan_disc_cache;
  6015. uint8 ranging_ind = 0;
  6016. int ret = BCME_OK;
  6017. int i = 0, j = 0;
  6018. for (i = 0; i < MAX_SUBSCRIBES; i++) {
  6019. svc_info = rng_inst->svc_idx[i];
  6020. if (svc_info) {
  6021. if (nan_event_data->ranging_result_present) {
  6022. notify_svc = wl_cfgnan_check_ranging_cond(svc_info, distance,
  6023. &ranging_ind, rng_inst->prev_distance_mm);
  6024. nan_event_data->ranging_ind = ranging_ind;
  6025. } else {
  6026. /* Report only if ranging was needed */
  6027. notify_svc = svc_info->ranging_required;
  6028. }
  6029. WL_DBG(("wl_cfgnan_notify_disc_with_ranging: Ranging notify for"
  6030. " svc_id %d, notify %d and ind %d\n",
  6031. svc_info->svc_id, notify_svc, ranging_ind));
  6032. } else {
  6033. continue;
  6034. }
  6035. if (notify_svc) {
  6036. for (j = 0; j < NAN_MAX_CACHE_DISC_RESULT; j++) {
  6037. if (!memcmp(&disc_res[j].peer,
  6038. &(rng_inst->peer_addr), ETHER_ADDR_LEN) &&
  6039. (svc_info->svc_id == disc_res[j].sub_id)) {
  6040. ret = wl_nan_cache_to_event_data(&disc_res[j],
  6041. nan_event_data, cfg->osh);
  6042. ret = wl_cfgnan_event_disc_result(cfg, nan_event_data);
  6043. /* If its not match once, clear it as the FW indicates
  6044. * again.
  6045. */
  6046. if (!(svc_info->flags & WL_NAN_MATCH_ONCE)) {
  6047. wl_cfgnan_remove_disc_result(cfg, svc_info->svc_id);
  6048. }
  6049. }
  6050. }
  6051. }
  6052. }
  6053. WL_DBG(("notify_disc_with_ranging done ret %d\n", ret));
  6054. return ret;
  6055. }
  6056. #ifdef RTT_SUPPORT
  6057. static int32
  6058. wl_cfgnan_handle_directed_rtt_report(struct bcm_cfg80211 *cfg,
  6059. nan_ranging_inst_t *rng_inst, uint8 rng_id)
  6060. {
  6061. int ret = BCME_OK;
  6062. uint32 status;
  6063. dhd_pub_t *dhd = (struct dhd_pub *)(cfg->pub);
  6064. ret = wl_cfgnan_cancel_ranging(bcmcfg_to_prmry_ndev(cfg), cfg,
  6065. rng_id, NAN_RNG_TERM_FLAG_IMMEDIATE, &status);
  6066. if (unlikely(ret) || unlikely(status)) {
  6067. WL_ERR(("%s:nan range cancel failed ret = %d status = %d\n",
  6068. __FUNCTION__, ret, status));
  6069. }
  6070. dhd_rtt_handle_nan_rtt_session_end(dhd, &rng_inst->peer_addr);
  6071. wl_cfgnan_reset_geofence_ranging(cfg, rng_inst, RTT_SCHED_RNG_RPT_DIRECTED);
  6072. WL_DBG(("Ongoing ranging session is cancelled \n"));
  6073. return ret;
  6074. }
  6075. #endif /* RTT_SUPPORT */
  6076. static void
  6077. wl_cfgnan_disc_result_on_geofence_cancel(struct bcm_cfg80211 *cfg,
  6078. nan_ranging_inst_t *rng_inst)
  6079. {
  6080. nan_event_data_t *nan_event_data = NULL;
  6081. nan_event_data = MALLOCZ(cfg->osh, sizeof(*nan_event_data));
  6082. if (!nan_event_data) {
  6083. WL_ERR(("%s: memory allocation failed\n", __func__));
  6084. goto exit;
  6085. }
  6086. wl_cfgnan_notify_disc_with_ranging(cfg, rng_inst, nan_event_data, 0);
  6087. exit:
  6088. wl_cfgnan_clear_nan_event_data(cfg, nan_event_data);
  6089. return;
  6090. }
  6091. #ifdef RTT_SUPPORT
  6092. void
  6093. wl_cfgnan_process_range_report(struct bcm_cfg80211 *cfg,
  6094. wl_nan_ev_rng_rpt_ind_t *range_res)
  6095. {
  6096. nan_ranging_inst_t *rng_inst = NULL;
  6097. nan_event_data_t nan_event_data;
  6098. UNUSED_PARAMETER(nan_event_data);
  6099. rng_inst = wl_cfgnan_check_for_ranging(cfg, &range_res->peer_m_addr);
  6100. if (!rng_inst) {
  6101. WL_ERR(("wl_cfgnan_process_range_report: No ranging instance "
  6102. "but received RNG RPT event..check \n"));
  6103. goto exit;
  6104. }
  6105. #ifdef NAN_RTT_DBG
  6106. DUMP_NAN_RTT_INST(rng_inst);
  6107. DUMP_NAN_RTT_RPT(range_res);
  6108. #endif // endif
  6109. range_res->rng_id = rng_inst->range_id;
  6110. bzero(&nan_event_data, sizeof(nan_event_data));
  6111. nan_event_data.ranging_result_present = 1;
  6112. nan_event_data.range_measurement_cm = range_res->dist_mm;
  6113. (void)memcpy_s(&nan_event_data.remote_nmi, ETHER_ADDR_LEN,
  6114. &range_res->peer_m_addr, ETHER_ADDR_LEN);
  6115. nan_event_data.ranging_ind = range_res->indication;
  6116. if (rng_inst->range_type == RTT_TYPE_NAN_GEOFENCE) {
  6117. /* check in cache and event match to host */
  6118. wl_cfgnan_notify_disc_with_ranging(cfg, rng_inst, &nan_event_data,
  6119. range_res->dist_mm);
  6120. rng_inst->prev_distance_mm = range_res->dist_mm;
  6121. }
  6122. if (rng_inst->range_type == RTT_TYPE_NAN_DIRECTED) {
  6123. wl_cfgnan_handle_directed_rtt_report(cfg, rng_inst, range_res->rng_id);
  6124. }
  6125. exit:
  6126. return;
  6127. }
  6128. #endif /* RTT_SUPPORT */
  6129. static void
  6130. wl_nan_print_status(wl_nan_conf_status_t *nstatus)
  6131. {
  6132. printf("> enabled: %d\n", nstatus->enabled);
  6133. printf("> Current NMI: " MACDBG "\n", MAC2STRDBG(nstatus->nmi.octet));
  6134. printf("> Current cluster_id: " MACDBG "\n", MAC2STRDBG(nstatus->cid.octet));
  6135. switch (nstatus->role) {
  6136. case WL_NAN_ROLE_AUTO:
  6137. printf("> role: %s (%d)\n", "auto", nstatus->role);
  6138. break;
  6139. case WL_NAN_ROLE_NON_MASTER_NON_SYNC:
  6140. printf("> role: %s (%d)\n", "non-master-non-sync", nstatus->role);
  6141. break;
  6142. case WL_NAN_ROLE_NON_MASTER_SYNC:
  6143. printf("> role: %s (%d)\n", "non-master-sync", nstatus->role);
  6144. break;
  6145. case WL_NAN_ROLE_MASTER:
  6146. printf("> role: %s (%d)\n", "master", nstatus->role);
  6147. break;
  6148. case WL_NAN_ROLE_ANCHOR_MASTER:
  6149. printf("> role: %s (%d)\n", "anchor-master", nstatus->role);
  6150. break;
  6151. default:
  6152. printf("> role: %s (%d)\n", "undefined", nstatus->role);
  6153. break;
  6154. }
  6155. printf("> social channels: %d, %d\n",
  6156. nstatus->social_chans[0], nstatus->social_chans[1]);
  6157. printf("> master_rank: " NMRSTR "\n", NMR2STR(nstatus->mr));
  6158. printf("> amr : " NMRSTR "\n", NMR2STR(nstatus->amr));
  6159. printf("> hop_count: %d\n", nstatus->hop_count);
  6160. printf("> ambtt: %d\n", nstatus->ambtt);
  6161. }
  6162. static void
  6163. wl_cfgnan_clear_nan_event_data(struct bcm_cfg80211 *cfg,
  6164. nan_event_data_t *nan_event_data)
  6165. {
  6166. if (nan_event_data) {
  6167. if (nan_event_data->tx_match_filter.data) {
  6168. MFREE(cfg->osh, nan_event_data->tx_match_filter.data,
  6169. nan_event_data->tx_match_filter.dlen);
  6170. nan_event_data->tx_match_filter.data = NULL;
  6171. }
  6172. if (nan_event_data->rx_match_filter.data) {
  6173. MFREE(cfg->osh, nan_event_data->rx_match_filter.data,
  6174. nan_event_data->rx_match_filter.dlen);
  6175. nan_event_data->rx_match_filter.data = NULL;
  6176. }
  6177. if (nan_event_data->svc_info.data) {
  6178. MFREE(cfg->osh, nan_event_data->svc_info.data,
  6179. nan_event_data->svc_info.dlen);
  6180. nan_event_data->svc_info.data = NULL;
  6181. }
  6182. if (nan_event_data->sde_svc_info.data) {
  6183. MFREE(cfg->osh, nan_event_data->sde_svc_info.data,
  6184. nan_event_data->sde_svc_info.dlen);
  6185. nan_event_data->sde_svc_info.data = NULL;
  6186. }
  6187. MFREE(cfg->osh, nan_event_data, sizeof(*nan_event_data));
  6188. }
  6189. }
  6190. #ifdef RTT_SUPPORT
  6191. /*
  6192. * Triggers rtt work thread
  6193. * if geofence rtt pending,
  6194. * clears ranging instance
  6195. * otherwise
  6196. */
  6197. void
  6198. wl_cfgnan_reset_geofence_ranging(struct bcm_cfg80211 *cfg,
  6199. nan_ranging_inst_t * rng_inst, int sched_reason)
  6200. {
  6201. dhd_pub_t *dhd = (struct dhd_pub *)(cfg->pub);
  6202. u8 rtt_invalid_reason = RTT_STATE_VALID;
  6203. rtt_geofence_target_info_t *geofence_target = NULL;
  6204. rtt_status_info_t *rtt_status = GET_RTTSTATE(dhd);
  6205. int8 cur_idx = DHD_RTT_INVALID_TARGET_INDEX;
  6206. int8 index = DHD_RTT_INVALID_TARGET_INDEX;
  6207. bool geofence_state = dhd_rtt_get_geofence_rtt_state(dhd);
  6208. bool retry = FALSE;
  6209. WL_INFORM_MEM(("wl_cfgnan_reset_geofence_ranging, sched_reason = %d, cur_idx = %d\n",
  6210. sched_reason, rtt_status->geofence_cfg.cur_target_idx));
  6211. cur_idx = dhd_rtt_get_geofence_cur_target_idx(dhd);
  6212. if (cur_idx == -1) {
  6213. WL_INFORM_MEM(("wl_cfgnan_reset_geofence_ranging, "
  6214. "Removing Ranging Instance " MACDBG "\n",
  6215. MAC2STRDBG(&(rng_inst->peer_addr))));
  6216. bzero(rng_inst, sizeof(*rng_inst));
  6217. /* Cancel pending retry timer if any */
  6218. if (delayed_work_pending(&rtt_status->rtt_retry_timer)) {
  6219. cancel_delayed_work(&rtt_status->rtt_retry_timer);
  6220. }
  6221. goto exit;
  6222. }
  6223. /* Get current geofencing target */
  6224. geofence_target = dhd_rtt_get_geofence_current_target(dhd);
  6225. /* get target index for cur ranging inst */
  6226. dhd_rtt_get_geofence_target(dhd,
  6227. &rng_inst->peer_addr, &index);
  6228. if (cur_idx == index) {
  6229. /* Reset incoming Ranging instance */
  6230. rng_inst->range_type = RTT_TYPE_NAN_GEOFENCE;
  6231. rng_inst->range_status = NAN_RANGING_REQUIRED;
  6232. if ((sched_reason != RTT_SCHED_RNG_RPT_GEOFENCE) &&
  6233. (sched_reason != RTT_SCHED_RTT_RETRY_GEOFENCE)) {
  6234. rng_inst->prev_distance_mm = INVALID_DISTANCE;
  6235. }
  6236. } else {
  6237. if (index == DHD_RTT_INVALID_TARGET_INDEX) {
  6238. /* Remove incoming Ranging instance */
  6239. WL_INFORM_MEM(("Removing Ranging Instance " MACDBG "\n",
  6240. MAC2STRDBG(&(rng_inst->peer_addr))));
  6241. bzero(rng_inst, sizeof(*rng_inst));
  6242. } else {
  6243. /* Reset incoming Ranging instance */
  6244. rng_inst->range_type = RTT_TYPE_NAN_GEOFENCE;
  6245. rng_inst->range_status = NAN_RANGING_REQUIRED;
  6246. if ((sched_reason != RTT_SCHED_RNG_RPT_GEOFENCE) &&
  6247. (sched_reason != RTT_SCHED_RTT_RETRY_GEOFENCE)) {
  6248. rng_inst->prev_distance_mm = INVALID_DISTANCE;
  6249. }
  6250. }
  6251. /* Create range inst if not present and reset explicitly */
  6252. rng_inst = wl_cfgnan_get_ranging_inst(cfg,
  6253. &geofence_target->peer_addr, NAN_RANGING_ROLE_INITIATOR);
  6254. }
  6255. /* Retry later if
  6256. * already geofence running
  6257. * or Directed RTT in progress
  6258. * or Invalid RTT state like
  6259. * NDP with Peer
  6260. */
  6261. if ((geofence_state == TRUE) ||
  6262. (!RTT_IS_STOPPED(rtt_status)) ||
  6263. (rtt_invalid_reason != RTT_STATE_VALID)) {
  6264. /* Not in valid RTT state, retry over a timer */
  6265. retry = TRUE;
  6266. }
  6267. if (cur_idx == 0 && sched_reason == RTT_SCHED_RNG_RPT_GEOFENCE) {
  6268. /* First Target again after all done, retry over a timer */
  6269. retry = TRUE;
  6270. }
  6271. if (retry) {
  6272. /* Move to first target and retry over a timer */
  6273. WL_DBG(("Retry over a timer, cur_idx = %d\n",
  6274. rtt_status->geofence_cfg.cur_target_idx));
  6275. /* schedule proxd retry timer */
  6276. schedule_delayed_work(&rtt_status->rtt_retry_timer,
  6277. msecs_to_jiffies(DHD_RTT_RETRY_TIMER_INTERVAL_MS));
  6278. goto exit;
  6279. }
  6280. /* schedule RTT */
  6281. dhd_rtt_schedule_rtt_work_thread(dhd, sched_reason);
  6282. exit:
  6283. return;
  6284. }
  6285. static bool
  6286. wl_check_range_role_concurrency(dhd_pub_t *dhd, nan_ranging_inst_t *rng_inst)
  6287. {
  6288. ASSERT(rng_inst);
  6289. if ((dhd_rtt_get_role_concurrency_state(dhd) == TRUE) &&
  6290. (rng_inst->num_svc_ctx > 0)) {
  6291. return TRUE;
  6292. } else {
  6293. return FALSE;
  6294. }
  6295. }
  6296. static void
  6297. wl_cfgnan_resolve_ranging_role_concurrecny(dhd_pub_t *dhd,
  6298. nan_ranging_inst_t *rng_inst)
  6299. {
  6300. /* Update rang_inst to initiator and resolve role concurrency */
  6301. rng_inst->range_role = NAN_RANGING_ROLE_INITIATOR;
  6302. dhd_rtt_set_role_concurrency_state(dhd, FALSE);
  6303. }
  6304. #endif /* RTT_SUPPORT */
  6305. s32
  6306. wl_cfgnan_notify_nan_status(struct bcm_cfg80211 *cfg,
  6307. bcm_struct_cfgdev *cfgdev, const wl_event_msg_t *event, void *event_data)
  6308. {
  6309. uint16 data_len;
  6310. uint32 event_num;
  6311. s32 event_type;
  6312. int hal_event_id = 0;
  6313. nan_event_data_t *nan_event_data = NULL;
  6314. nan_parse_event_ctx_t nan_event_ctx;
  6315. uint16 tlvs_offset = 0;
  6316. uint16 nan_opts_len = 0;
  6317. uint8 *tlv_buf;
  6318. s32 ret = BCME_OK;
  6319. bcm_xtlv_opts_t xtlv_opt = BCM_IOV_CMD_OPT_ALIGN32;
  6320. uint32 status;
  6321. nan_svc_info_t *svc;
  6322. UNUSED_PARAMETER(wl_nan_print_status);
  6323. UNUSED_PARAMETER(status);
  6324. NAN_DBG_ENTER();
  6325. NAN_MUTEX_LOCK();
  6326. if (!event || !event_data) {
  6327. WL_ERR(("event data is NULL\n"));
  6328. ret = -EINVAL;
  6329. goto exit;
  6330. }
  6331. event_type = ntoh32(event->event_type);
  6332. event_num = ntoh32(event->reason);
  6333. data_len = ntoh32(event->datalen);
  6334. if (NAN_INVALID_EVENT(event_num)) {
  6335. WL_ERR(("unsupported event, num: %d, event type: %d\n", event_num, event_type));
  6336. ret = -EINVAL;
  6337. goto exit;
  6338. }
  6339. WL_DBG((">> Nan Event Received: %s (num=%d, len=%d)\n",
  6340. nan_event_to_str(event_num), event_num, data_len));
  6341. #ifdef WL_NAN_DEBUG
  6342. prhex("nan_event_data:", event_data, data_len);
  6343. #endif /* WL_NAN_DEBUG */
  6344. if (!cfg->nan_init_state) {
  6345. WL_ERR(("nan is not in initialized state, dropping nan related events\n"));
  6346. ret = BCME_OK;
  6347. goto exit;
  6348. }
  6349. nan_event_data = MALLOCZ(cfg->osh, sizeof(*nan_event_data));
  6350. if (!nan_event_data) {
  6351. WL_ERR(("%s: memory allocation failed\n", __func__));
  6352. goto exit;
  6353. }
  6354. nan_event_ctx.cfg = cfg;
  6355. nan_event_ctx.nan_evt_data = nan_event_data;
  6356. /*
  6357. * send as preformatted hex string
  6358. * EVENT_NAN <event_type> <tlv_hex_string>
  6359. */
  6360. switch (event_num) {
  6361. case WL_NAN_EVENT_START:
  6362. case WL_NAN_EVENT_MERGE:
  6363. case WL_NAN_EVENT_ROLE: {
  6364. /* get nan status info as-is */
  6365. bcm_xtlv_t *xtlv = (bcm_xtlv_t *)event_data;
  6366. wl_nan_conf_status_t *nstatus = (wl_nan_conf_status_t *)xtlv->data;
  6367. WL_INFORM_MEM((">> Nan Mac Event Received: %s (num=%d, len=%d)\n",
  6368. nan_event_to_str(event_num), event_num, data_len));
  6369. WL_INFORM_MEM(("Nan Device Role %s\n", nan_role_to_str(nstatus->role)));
  6370. /* Mapping to common struct between DHD and HAL */
  6371. nan_event_data->enabled = nstatus->enabled;
  6372. ret = memcpy_s(&nan_event_data->local_nmi, ETHER_ADDR_LEN,
  6373. &nstatus->nmi, ETHER_ADDR_LEN);
  6374. if (ret != BCME_OK) {
  6375. WL_ERR(("Failed to copy nmi\n"));
  6376. goto exit;
  6377. }
  6378. ret = memcpy_s(&nan_event_data->clus_id, ETHER_ADDR_LEN,
  6379. &nstatus->cid, ETHER_ADDR_LEN);
  6380. if (ret != BCME_OK) {
  6381. WL_ERR(("Failed to copy cluster id\n"));
  6382. goto exit;
  6383. }
  6384. nan_event_data->nan_de_evt_type = event_num;
  6385. #ifdef WL_NAN_DEBUG
  6386. wl_nan_print_status(nstatus);
  6387. #endif /* WL_NAN_DEBUG */
  6388. if (event_num == WL_NAN_EVENT_START) {
  6389. OSL_SMP_WMB();
  6390. cfg->nancfg.nan_event_recvd = true;
  6391. OSL_SMP_WMB();
  6392. wake_up(&cfg->nancfg.nan_event_wait);
  6393. }
  6394. hal_event_id = GOOGLE_NAN_EVENT_DE_EVENT;
  6395. break;
  6396. }
  6397. case WL_NAN_EVENT_TERMINATED: {
  6398. bcm_xtlv_t *xtlv = (bcm_xtlv_t *)event_data;
  6399. wl_nan_ev_terminated_t *pev = (wl_nan_ev_terminated_t *)xtlv->data;
  6400. /* Mapping to common struct between DHD and HAL */
  6401. WL_TRACE(("Instance ID: %d\n", pev->instance_id));
  6402. nan_event_data->local_inst_id = pev->instance_id;
  6403. WL_TRACE(("Service Type: %d\n", pev->svctype));
  6404. #ifdef WL_NAN_DISC_CACHE
  6405. if (pev->svctype == NAN_SC_SUBSCRIBE) {
  6406. wl_cfgnan_remove_disc_result(cfg, pev->instance_id);
  6407. }
  6408. #endif /* WL_NAN_DISC_CACHE */
  6409. /* Mapping reason code of FW to status code of framework */
  6410. if (pev->reason == NAN_TERM_REASON_TIMEOUT ||
  6411. pev->reason == NAN_TERM_REASON_USER_REQ ||
  6412. pev->reason == NAN_TERM_REASON_COUNT_REACHED) {
  6413. nan_event_data->status = NAN_STATUS_SUCCESS;
  6414. ret = memcpy_s(nan_event_data->nan_reason,
  6415. sizeof(nan_event_data->nan_reason),
  6416. "NAN_STATUS_SUCCESS",
  6417. strlen("NAN_STATUS_SUCCESS"));
  6418. if (ret != BCME_OK) {
  6419. WL_ERR(("Failed to copy nan_reason\n"));
  6420. goto exit;
  6421. }
  6422. } else {
  6423. nan_event_data->status = NAN_STATUS_INTERNAL_FAILURE;
  6424. ret = memcpy_s(nan_event_data->nan_reason,
  6425. sizeof(nan_event_data->nan_reason),
  6426. "NAN_STATUS_INTERNAL_FAILURE",
  6427. strlen("NAN_STATUS_INTERNAL_FAILURE"));
  6428. if (ret != BCME_OK) {
  6429. WL_ERR(("Failed to copy nan_reason\n"));
  6430. goto exit;
  6431. }
  6432. }
  6433. if (pev->svctype == NAN_SC_SUBSCRIBE) {
  6434. hal_event_id = GOOGLE_NAN_EVENT_SUBSCRIBE_TERMINATED;
  6435. } else {
  6436. hal_event_id = GOOGLE_NAN_EVENT_PUBLISH_TERMINATED;
  6437. }
  6438. #ifdef WL_NAN_DISC_CACHE
  6439. if (pev->reason != NAN_TERM_REASON_USER_REQ) {
  6440. wl_cfgnan_clear_svc_ranging_inst(cfg, pev->instance_id);
  6441. /* terminate ranging sessions */
  6442. wl_cfgnan_terminate_ranging_sessions(bcmcfg_to_prmry_ndev(cfg),
  6443. cfg, pev->instance_id);
  6444. }
  6445. #endif /* WL_NAN_DISC_CACHE */
  6446. break;
  6447. }
  6448. case WL_NAN_EVENT_RECEIVE: {
  6449. nan_opts_len = data_len;
  6450. hal_event_id = GOOGLE_NAN_EVENT_FOLLOWUP;
  6451. xtlv_opt = BCM_IOV_CMD_OPT_ALIGN_NONE;
  6452. break;
  6453. }
  6454. case WL_NAN_EVENT_TXS: {
  6455. bcm_xtlv_t *xtlv = (bcm_xtlv_t *)event_data;
  6456. wl_nan_event_txs_t *txs = (wl_nan_event_txs_t *)xtlv->data;
  6457. wl_nan_event_sd_txs_t *txs_sd = NULL;
  6458. if (txs->status == WL_NAN_TXS_SUCCESS) {
  6459. WL_INFORM_MEM(("TXS success for type %d token %d\n",
  6460. txs->type, txs->host_seq));
  6461. nan_event_data->status = NAN_STATUS_SUCCESS;
  6462. ret = memcpy_s(nan_event_data->nan_reason,
  6463. sizeof(nan_event_data->nan_reason),
  6464. "NAN_STATUS_SUCCESS",
  6465. strlen("NAN_STATUS_SUCCESS"));
  6466. if (ret != BCME_OK) {
  6467. WL_ERR(("Failed to copy nan_reason\n"));
  6468. goto exit;
  6469. }
  6470. } else {
  6471. /* TODO : populate status based on reason codes
  6472. For now adding it as no ACK, so that app/framework can retry
  6473. */
  6474. WL_INFORM_MEM(("TXS failed for type %d status %d token %d\n",
  6475. txs->type, txs->status, txs->host_seq));
  6476. nan_event_data->status = NAN_STATUS_NO_OTA_ACK;
  6477. ret = memcpy_s(nan_event_data->nan_reason,
  6478. sizeof(nan_event_data->nan_reason),
  6479. "NAN_STATUS_NO_OTA_ACK",
  6480. strlen("NAN_STATUS_NO_OTA_ACK"));
  6481. if (ret != BCME_OK) {
  6482. WL_ERR(("Failed to copy nan_reason\n"));
  6483. goto exit;
  6484. }
  6485. }
  6486. nan_event_data->reason = txs->reason_code;
  6487. nan_event_data->token = txs->host_seq;
  6488. if (txs->type == WL_NAN_FRM_TYPE_FOLLOWUP) {
  6489. hal_event_id = GOOGLE_NAN_EVENT_TRANSMIT_FOLLOWUP_IND;
  6490. xtlv = (bcm_xtlv_t *)(txs->opt_tlvs);
  6491. if (txs->opt_tlvs_len && xtlv->id == WL_NAN_XTLV_SD_TXS) {
  6492. txs_sd = (wl_nan_event_sd_txs_t*)xtlv->data;
  6493. nan_event_data->local_inst_id = txs_sd->inst_id;
  6494. } else {
  6495. WL_ERR(("Invalid params in TX status for trasnmit followup"));
  6496. ret = -EINVAL;
  6497. goto exit;
  6498. }
  6499. } else { /* TODO: add for other frame types if required */
  6500. ret = -EINVAL;
  6501. goto exit;
  6502. }
  6503. break;
  6504. }
  6505. case WL_NAN_EVENT_DISCOVERY_RESULT: {
  6506. nan_opts_len = data_len;
  6507. hal_event_id = GOOGLE_NAN_EVENT_SUBSCRIBE_MATCH;
  6508. xtlv_opt = BCM_IOV_CMD_OPT_ALIGN_NONE;
  6509. break;
  6510. }
  6511. #ifdef WL_NAN_DISC_CACHE
  6512. case WL_NAN_EVENT_DISC_CACHE_TIMEOUT: {
  6513. bcm_xtlv_t *xtlv = (bcm_xtlv_t *)event_data;
  6514. wl_nan_ev_disc_cache_timeout_t *cache_data =
  6515. (wl_nan_ev_disc_cache_timeout_t *)xtlv->data;
  6516. wl_nan_disc_expired_cache_entry_t *cache_entry = NULL;
  6517. uint16 xtlv_len = xtlv->len;
  6518. uint8 entry_idx = 0;
  6519. if (xtlv->id == WL_NAN_XTLV_SD_DISC_CACHE_TIMEOUT) {
  6520. xtlv_len = xtlv_len -
  6521. OFFSETOF(wl_nan_ev_disc_cache_timeout_t, cache_exp_list);
  6522. while ((entry_idx < cache_data->count) &&
  6523. (xtlv_len >= sizeof(*cache_entry))) {
  6524. cache_entry = &cache_data->cache_exp_list[entry_idx];
  6525. /* Invalidate local cache info */
  6526. wl_cfgnan_remove_disc_result(cfg, cache_entry->l_sub_id);
  6527. xtlv_len = xtlv_len - sizeof(*cache_entry);
  6528. entry_idx++;
  6529. }
  6530. }
  6531. break;
  6532. }
  6533. case WL_NAN_EVENT_RNG_REQ_IND: {
  6534. wl_nan_ev_rng_req_ind_t *rng_ind;
  6535. bcm_xtlv_t *xtlv = (bcm_xtlv_t *)event_data;
  6536. nan_opts_len = data_len;
  6537. rng_ind = (wl_nan_ev_rng_req_ind_t *)xtlv->data;
  6538. xtlv_opt = BCM_IOV_CMD_OPT_ALIGN_NONE;
  6539. WL_INFORM_MEM(("Received WL_NAN_EVENT_RNG_REQ_IND range_id %d"
  6540. " peer:" MACDBG "\n", rng_ind->rng_id,
  6541. MAC2STRDBG(&rng_ind->peer_m_addr)));
  6542. #ifdef RTT_SUPPORT
  6543. ret = wl_cfgnan_handle_ranging_ind(cfg, rng_ind);
  6544. #endif /* RTT_SUPPORT */
  6545. /* no need to event to HAL */
  6546. goto exit;
  6547. }
  6548. case WL_NAN_EVENT_RNG_TERM_IND: {
  6549. bcm_xtlv_t *xtlv = (bcm_xtlv_t *)event_data;
  6550. dhd_pub_t *dhd = (struct dhd_pub *)(cfg->pub);
  6551. nan_ranging_inst_t *rng_inst;
  6552. wl_nan_ev_rng_term_ind_t *range_term = (wl_nan_ev_rng_term_ind_t *)xtlv->data;
  6553. #ifdef RTT_SUPPORT
  6554. int8 index = -1;
  6555. rtt_geofence_target_info_t* geofence_target;
  6556. #endif /* RTT_SUPPORT */
  6557. BCM_REFERENCE(dhd);
  6558. WL_INFORM_MEM(("Received WL_NAN_EVENT_RNG_TERM_IND peer: " MACDBG ", "
  6559. " Reason Code: %d\n", MAC2STRDBG(&range_term->peer_m_addr),
  6560. range_term->reason_code));
  6561. rng_inst = wl_cfgnan_check_for_ranging(cfg, &range_term->peer_m_addr);
  6562. if (rng_inst) {
  6563. #ifdef RTT_SUPPORT
  6564. if (rng_inst->range_type == RTT_TYPE_NAN_DIRECTED) {
  6565. dhd_rtt_handle_nan_rtt_session_end(dhd, &rng_inst->peer_addr);
  6566. } else if (rng_inst->range_type == RTT_TYPE_NAN_GEOFENCE) {
  6567. /* Set geofence RTT in progress state to false */
  6568. /*
  6569. * ToDo:
  6570. * Check for reason code,
  6571. * Accordingly, if we can attempt retry
  6572. */
  6573. geofence_target = dhd_rtt_get_geofence_target(dhd,
  6574. &rng_inst->peer_addr, &index);
  6575. if (geofence_target) {
  6576. dhd_rtt_remove_geofence_target(dhd,
  6577. &geofence_target->peer_addr);
  6578. }
  6579. WL_TRACE(("Reset the state on terminate\n"));
  6580. /* If the target was the geofence queue head */
  6581. dhd_rtt_set_geofence_rtt_state(dhd, FALSE);
  6582. }
  6583. if (rng_inst->range_role == NAN_RANGING_ROLE_RESPONDER &&
  6584. wl_check_range_role_concurrency(dhd, rng_inst)) {
  6585. /* Resolve role concurrency */
  6586. wl_cfgnan_resolve_ranging_role_concurrecny(dhd, rng_inst);
  6587. }
  6588. /* Dont abolish rng_inst if geofence rtt pending for peer */
  6589. wl_cfgnan_reset_geofence_ranging(cfg, rng_inst, RTT_SCHED_RNG_TERM);
  6590. #endif /* RTT_SUPPORT */
  6591. }
  6592. break;
  6593. }
  6594. #endif /* WL_NAN_DISC_CACHE */
  6595. /*
  6596. * Data path events data are received in common event struct,
  6597. * Handling all the events as part of one case, hence fall through is intentional
  6598. */
  6599. case WL_NAN_EVENT_PEER_DATAPATH_IND:
  6600. case WL_NAN_EVENT_DATAPATH_ESTB:
  6601. case WL_NAN_EVENT_DATAPATH_END: {
  6602. ret = wl_nan_dp_cmn_event_data(cfg, event_data, data_len,
  6603. &tlvs_offset, &nan_opts_len,
  6604. event_num, &hal_event_id, nan_event_data);
  6605. /* Avoiding optional param parsing for DP END Event */
  6606. if (event_num == WL_NAN_EVENT_DATAPATH_END) {
  6607. nan_opts_len = 0;
  6608. xtlv_opt = BCM_IOV_CMD_OPT_ALIGN_NONE;
  6609. }
  6610. if (unlikely(ret)) {
  6611. WL_ERR(("nan dp common event data parse failed\n"));
  6612. goto exit;
  6613. }
  6614. break;
  6615. }
  6616. default:
  6617. WL_ERR_RLMT(("WARNING: unimplemented NAN APP EVENT = %d\n", event_num));
  6618. ret = BCME_ERROR;
  6619. goto exit;
  6620. }
  6621. if (nan_opts_len) {
  6622. tlv_buf = (uint8 *)event_data + tlvs_offset;
  6623. /* Extract event data tlvs and pass their resp to cb fn */
  6624. ret = bcm_unpack_xtlv_buf((void *)&nan_event_ctx, (const uint8*)tlv_buf,
  6625. nan_opts_len, xtlv_opt, wl_cfgnan_set_vars_cbfn);
  6626. if (ret != BCME_OK) {
  6627. WL_ERR(("Failed to unpack tlv data, ret=%d\n", ret));
  6628. }
  6629. }
  6630. #ifdef WL_NAN_DISC_CACHE
  6631. if (hal_event_id == GOOGLE_NAN_EVENT_SUBSCRIBE_MATCH) {
  6632. #ifdef RTT_SUPPORT
  6633. u8 rtt_invalid_reason = RTT_STATE_VALID;
  6634. bool role_concur_state = 0;
  6635. dhd_pub_t *dhd = (struct dhd_pub *)(cfg->pub);
  6636. #endif /* RTT_SUPPORT */
  6637. u16 update_flags = 0;
  6638. WL_TRACE(("Cache disc res\n"));
  6639. ret = wl_cfgnan_cache_disc_result(cfg, nan_event_data, &update_flags);
  6640. if (ret) {
  6641. WL_ERR(("Failed to cache disc result ret %d\n", ret));
  6642. }
  6643. if (nan_event_data->sde_control_flag & NAN_SDE_CF_RANGING_REQUIRED) {
  6644. ret = wl_cfgnan_check_disc_result_for_ranging(cfg, nan_event_data);
  6645. if (ret == BCME_OK) {
  6646. #ifdef RTT_SUPPORT
  6647. rtt_invalid_reason = dhd_rtt_invalid_states
  6648. (bcmcfg_to_prmry_ndev(cfg), &nan_event_data->remote_nmi);
  6649. role_concur_state = dhd_rtt_get_role_concurrency_state(dhd);
  6650. /*
  6651. * If instant RTT not possible,
  6652. * send discovery result instantly like
  6653. * incase of invalid rtt state as
  6654. * NDP connected/connecting or role_concurrency
  6655. * on, otherwise, disc result will be posted
  6656. * on ranging report event
  6657. */
  6658. if (rtt_invalid_reason == RTT_STATE_VALID &&
  6659. role_concur_state == FALSE) {
  6660. /* Avoid sending disc result instantly */
  6661. goto exit;
  6662. }
  6663. #endif /* RTT_SUPPORT */
  6664. } else {
  6665. /* TODO: should we terminate service if ranging fails ? */
  6666. WL_INFORM_MEM(("Ranging failed or not required, " MACDBG
  6667. " sub_id:%d , pub_id:%d\n",
  6668. MAC2STRDBG(&nan_event_data->remote_nmi),
  6669. nan_event_data->sub_id, nan_event_data->pub_id));
  6670. }
  6671. } else {
  6672. nan_svc_info_t *svc_info = wl_cfgnan_get_svc_inst(cfg,
  6673. nan_event_data->sub_id, 0);
  6674. if (svc_info && svc_info->ranging_required &&
  6675. (update_flags & NAN_DISC_CACHE_PARAM_SDE_CONTROL)) {
  6676. wl_cfgnan_clear_svc_ranging_inst(cfg, nan_event_data->sub_id);
  6677. /* terminate ranging sessions for this svc, if any */
  6678. wl_cfgnan_terminate_ranging_sessions(bcmcfg_to_prmry_ndev(cfg),
  6679. cfg, nan_event_data->sub_id);
  6680. }
  6681. WL_DBG(("Ranging sessions terminated for svc update or not required\n"));
  6682. }
  6683. /*
  6684. * If tx match filter is present as part of active subscribe, keep same filter
  6685. * values in discovery results also.
  6686. */
  6687. if (nan_event_data->sub_id == nan_event_data->requestor_id) {
  6688. svc = wl_cfgnan_get_svc_inst(cfg, nan_event_data->sub_id, 0);
  6689. if (svc && svc->tx_match_filter_len) {
  6690. nan_event_data->tx_match_filter.dlen = svc->tx_match_filter_len;
  6691. nan_event_data->tx_match_filter.data =
  6692. MALLOCZ(cfg->osh, svc->tx_match_filter_len);
  6693. if (!nan_event_data->tx_match_filter.data) {
  6694. WL_ERR(("%s: tx_match_filter_data alloc failed\n",
  6695. __FUNCTION__));
  6696. nan_event_data->tx_match_filter.dlen = 0;
  6697. ret = -ENOMEM;
  6698. goto exit;
  6699. }
  6700. ret = memcpy_s(nan_event_data->tx_match_filter.data,
  6701. nan_event_data->tx_match_filter.dlen,
  6702. svc->tx_match_filter, svc->tx_match_filter_len);
  6703. if (ret != BCME_OK) {
  6704. WL_ERR(("Failed to copy tx match filter data\n"));
  6705. goto exit;
  6706. }
  6707. }
  6708. }
  6709. }
  6710. #endif /* WL_NAN_DISC_CACHE */
  6711. WL_TRACE(("Send up %s (%d) data to HAL, hal_event_id=%d\n",
  6712. nan_event_to_str(event_num), event_num, hal_event_id));
  6713. #if (LINUX_VERSION_CODE > KERNEL_VERSION(3, 13, 0)) || defined(WL_VENDOR_EXT_SUPPORT)
  6714. ret = wl_cfgvendor_send_nan_event(cfg->wdev->wiphy, bcmcfg_to_prmry_ndev(cfg),
  6715. hal_event_id, nan_event_data);
  6716. if (ret != BCME_OK) {
  6717. WL_ERR(("Failed to send event to nan hal, %s (%d)\n",
  6718. nan_event_to_str(event_num), event_num));
  6719. }
  6720. #endif /* (LINUX_VERSION_CODE > KERNEL_VERSION(3, 13, 0)) || defined(WL_VENDOR_EXT_SUPPORT) */
  6721. exit:
  6722. wl_cfgnan_clear_nan_event_data(cfg, nan_event_data);
  6723. NAN_MUTEX_UNLOCK();
  6724. NAN_DBG_EXIT();
  6725. return ret;
  6726. }
  6727. #ifdef WL_NAN_DISC_CACHE
  6728. static int
  6729. wl_cfgnan_cache_disc_result(struct bcm_cfg80211 *cfg, void * data,
  6730. u16 *disc_cache_update_flags)
  6731. {
  6732. nan_event_data_t* disc = (nan_event_data_t*)data;
  6733. int i, add_index = 0;
  6734. int ret = BCME_OK;
  6735. nan_disc_result_cache *disc_res = cfg->nan_disc_cache;
  6736. *disc_cache_update_flags = 0;
  6737. if (!cfg->nan_enable) {
  6738. WL_DBG(("nan not enabled"));
  6739. return BCME_NOTENABLED;
  6740. }
  6741. if (cfg->nan_disc_count == NAN_MAX_CACHE_DISC_RESULT) {
  6742. WL_DBG(("cache full"));
  6743. ret = BCME_NORESOURCE;
  6744. goto done;
  6745. }
  6746. for (i = 0; i < NAN_MAX_CACHE_DISC_RESULT; i++) {
  6747. if (!disc_res[i].valid) {
  6748. add_index = i;
  6749. continue;
  6750. }
  6751. if (!memcmp(&disc_res[i].peer, &disc->remote_nmi, ETHER_ADDR_LEN) &&
  6752. !memcmp(disc_res[i].svc_hash, disc->svc_name, WL_NAN_SVC_HASH_LEN)) {
  6753. WL_DBG(("cache entry already present, i = %d", i));
  6754. /* Update needed parameters here */
  6755. if (disc_res[i].sde_control_flag != disc->sde_control_flag) {
  6756. disc_res[i].sde_control_flag = disc->sde_control_flag;
  6757. *disc_cache_update_flags |= NAN_DISC_CACHE_PARAM_SDE_CONTROL;
  6758. }
  6759. ret = BCME_OK; /* entry already present */
  6760. goto done;
  6761. }
  6762. }
  6763. WL_DBG(("adding cache entry: add_index = %d\n", add_index));
  6764. disc_res[add_index].valid = 1;
  6765. disc_res[add_index].pub_id = disc->pub_id;
  6766. disc_res[add_index].sub_id = disc->sub_id;
  6767. disc_res[add_index].publish_rssi = disc->publish_rssi;
  6768. disc_res[add_index].peer_cipher_suite = disc->peer_cipher_suite;
  6769. disc_res[add_index].sde_control_flag = disc->sde_control_flag;
  6770. ret = memcpy_s(&disc_res[add_index].peer, ETHER_ADDR_LEN,
  6771. &disc->remote_nmi, ETHER_ADDR_LEN);
  6772. if (ret != BCME_OK) {
  6773. WL_ERR(("Failed to copy remote nmi\n"));
  6774. goto done;
  6775. }
  6776. ret = memcpy_s(disc_res[add_index].svc_hash, WL_NAN_SVC_HASH_LEN,
  6777. disc->svc_name, WL_NAN_SVC_HASH_LEN);
  6778. if (ret != BCME_OK) {
  6779. WL_ERR(("Failed to copy svc hash\n"));
  6780. goto done;
  6781. }
  6782. if (disc->svc_info.dlen && disc->svc_info.data) {
  6783. disc_res[add_index].svc_info.dlen = disc->svc_info.dlen;
  6784. disc_res[add_index].svc_info.data =
  6785. MALLOCZ(cfg->osh, disc_res[add_index].svc_info.dlen);
  6786. if (!disc_res[add_index].svc_info.data) {
  6787. WL_ERR(("%s: memory allocation failed\n", __FUNCTION__));
  6788. disc_res[add_index].svc_info.dlen = 0;
  6789. ret = BCME_NOMEM;
  6790. goto done;
  6791. }
  6792. ret = memcpy_s(disc_res[add_index].svc_info.data, disc_res[add_index].svc_info.dlen,
  6793. disc->svc_info.data, disc->svc_info.dlen);
  6794. if (ret != BCME_OK) {
  6795. WL_ERR(("Failed to copy svc info\n"));
  6796. goto done;
  6797. }
  6798. }
  6799. if (disc->tx_match_filter.dlen && disc->tx_match_filter.data) {
  6800. disc_res[add_index].tx_match_filter.dlen = disc->tx_match_filter.dlen;
  6801. disc_res[add_index].tx_match_filter.data =
  6802. MALLOCZ(cfg->osh, disc_res[add_index].tx_match_filter.dlen);
  6803. if (!disc_res[add_index].tx_match_filter.data) {
  6804. WL_ERR(("%s: memory allocation failed\n", __FUNCTION__));
  6805. disc_res[add_index].tx_match_filter.dlen = 0;
  6806. ret = BCME_NOMEM;
  6807. goto done;
  6808. }
  6809. ret = memcpy_s(disc_res[add_index].tx_match_filter.data,
  6810. disc_res[add_index].tx_match_filter.dlen,
  6811. disc->tx_match_filter.data, disc->tx_match_filter.dlen);
  6812. if (ret != BCME_OK) {
  6813. WL_ERR(("Failed to copy tx match filter\n"));
  6814. goto done;
  6815. }
  6816. }
  6817. cfg->nan_disc_count++;
  6818. WL_DBG(("cfg->nan_disc_count = %d\n", cfg->nan_disc_count));
  6819. done:
  6820. return ret;
  6821. }
  6822. /* Sending command to FW for clearing discovery cache info in FW */
  6823. static int
  6824. wl_cfgnan_clear_disc_cache(struct bcm_cfg80211 *cfg, wl_nan_instance_id_t sub_id)
  6825. {
  6826. s32 ret = BCME_OK;
  6827. uint16 nan_buf_size = NAN_IOCTL_BUF_SIZE;
  6828. uint32 status;
  6829. uint8 resp_buf[NAN_IOCTL_BUF_SIZE];
  6830. uint8 buf[NAN_IOCTL_BUF_SIZE];
  6831. bcm_iov_batch_buf_t *nan_buf;
  6832. bcm_iov_batch_subcmd_t *sub_cmd;
  6833. uint16 subcmd_len;
  6834. /* Same src and dest len here */
  6835. memset_s(buf, sizeof(buf), 0, sizeof(buf));
  6836. nan_buf = (bcm_iov_batch_buf_t*)buf;
  6837. nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
  6838. nan_buf->count = 0;
  6839. nan_buf_size -= OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
  6840. sub_cmd = (bcm_iov_batch_subcmd_t *)(&nan_buf->cmds[0]);
  6841. ret = wl_cfg_nan_check_cmd_len(nan_buf_size,
  6842. sizeof(sub_id), &subcmd_len);
  6843. if (unlikely(ret)) {
  6844. WL_ERR(("nan_sub_cmd check failed\n"));
  6845. goto fail;
  6846. }
  6847. /* Fill the sub_command block */
  6848. sub_cmd->id = htod16(WL_NAN_CMD_SD_DISC_CACHE_CLEAR);
  6849. sub_cmd->len = sizeof(sub_cmd->u.options) + sizeof(sub_id);
  6850. sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
  6851. /* Data size len vs buffer len check is already done above.
  6852. * So, short buffer error is impossible.
  6853. */
  6854. (void)memcpy_s(sub_cmd->data, (nan_buf_size - OFFSETOF(bcm_iov_batch_subcmd_t, data)),
  6855. &sub_id, sizeof(sub_id));
  6856. /* adjust iov data len to the end of last data record */
  6857. nan_buf_size -= (subcmd_len);
  6858. nan_buf->count++;
  6859. nan_buf->is_set = true;
  6860. nan_buf_size = NAN_IOCTL_BUF_SIZE - nan_buf_size;
  6861. /* Same src and dest len here */
  6862. memset_s(resp_buf, sizeof(resp_buf), 0, sizeof(resp_buf));
  6863. ret = wl_cfgnan_execute_ioctl(bcmcfg_to_prmry_ndev(cfg), cfg,
  6864. nan_buf, nan_buf_size, &status,
  6865. (void*)resp_buf, NAN_IOCTL_BUF_SIZE);
  6866. if (unlikely(ret) || unlikely(status)) {
  6867. WL_ERR(("Disc cache clear handler failed ret %d status %d\n",
  6868. ret, status));
  6869. goto fail;
  6870. }
  6871. fail:
  6872. return ret;
  6873. }
  6874. static int wl_cfgnan_remove_disc_result(struct bcm_cfg80211 *cfg,
  6875. uint8 local_subid)
  6876. {
  6877. int i;
  6878. int ret = BCME_NOTFOUND;
  6879. nan_disc_result_cache *disc_res = cfg->nan_disc_cache;
  6880. if (!cfg->nan_enable) {
  6881. WL_DBG(("nan not enabled\n"));
  6882. ret = BCME_NOTENABLED;
  6883. goto done;
  6884. }
  6885. for (i = 0; i < NAN_MAX_CACHE_DISC_RESULT; i++) {
  6886. if ((disc_res[i].valid) && (disc_res[i].sub_id == local_subid)) {
  6887. WL_TRACE(("make cache entry invalid\n"));
  6888. if (disc_res[i].tx_match_filter.data) {
  6889. MFREE(cfg->osh, disc_res[i].tx_match_filter.data,
  6890. disc_res[i].tx_match_filter.dlen);
  6891. }
  6892. if (disc_res[i].svc_info.data) {
  6893. MFREE(cfg->osh, disc_res[i].svc_info.data,
  6894. disc_res[i].svc_info.dlen);
  6895. }
  6896. memset_s(&disc_res[i], sizeof(disc_res[i]), 0, sizeof(disc_res[i]));
  6897. cfg->nan_disc_count--;
  6898. ret = BCME_OK;
  6899. }
  6900. }
  6901. WL_DBG(("couldn't find entry\n"));
  6902. done:
  6903. return ret;
  6904. }
  6905. static nan_disc_result_cache *
  6906. wl_cfgnan_get_disc_result(struct bcm_cfg80211 *cfg, uint8 remote_pubid,
  6907. struct ether_addr *peer)
  6908. {
  6909. int i;
  6910. nan_disc_result_cache *disc_res = cfg->nan_disc_cache;
  6911. if (remote_pubid) {
  6912. for (i = 0; i < NAN_MAX_CACHE_DISC_RESULT; i++) {
  6913. if ((disc_res[i].pub_id == remote_pubid) &&
  6914. !memcmp(&disc_res[i].peer, peer, ETHER_ADDR_LEN)) {
  6915. WL_DBG(("Found entry: i = %d\n", i));
  6916. return &disc_res[i];
  6917. }
  6918. }
  6919. } else {
  6920. for (i = 0; i < NAN_MAX_CACHE_DISC_RESULT; i++) {
  6921. if (!memcmp(&disc_res[i].peer, peer, ETHER_ADDR_LEN)) {
  6922. WL_DBG(("Found entry: %d\n", i));
  6923. return &disc_res[i];
  6924. }
  6925. }
  6926. }
  6927. return NULL;
  6928. }
  6929. #endif /* WL_NAN_DISC_CACHE */
  6930. static void
  6931. wl_cfgnan_update_dp_mask(struct bcm_cfg80211 *cfg, bool enable, u8 nan_dp_id)
  6932. {
  6933. #ifdef ARP_OFFLOAD_SUPPORT
  6934. dhd_pub_t *dhd = (struct dhd_pub *)(cfg->pub);
  6935. #endif /* ARP_OFFLOAD_SUPPORT */
  6936. /* As of now, we don't see a need to know which ndp is active.
  6937. * so just keep tracking of ndp via count. If we need to know
  6938. * the status of each ndp based on ndp id, we need to change
  6939. * this implementation to use a bit mask.
  6940. */
  6941. if (!dhd) {
  6942. WL_ERR(("dhd pub null!\n"));
  6943. return;
  6944. }
  6945. if (enable) {
  6946. /* On first NAN DP indication, disable ARP. */
  6947. #ifdef ARP_OFFLOAD_SUPPORT
  6948. if (!cfg->nan_dp_mask) {
  6949. dhd_arp_offload_set(dhd, 0);
  6950. dhd_arp_offload_enable(dhd, false);
  6951. }
  6952. #endif /* ARP_OFFLOAD_SUPPORT */
  6953. cfg->nan_dp_mask |= (0x1 << nan_dp_id);
  6954. } else {
  6955. cfg->nan_dp_mask &= ~(0x1 << nan_dp_id);
  6956. #ifdef ARP_OFFLOAD_SUPPORT
  6957. if (!cfg->nan_dp_mask) {
  6958. /* If NAN DP count becomes zero and if there
  6959. * are no conflicts, enable back ARP offload.
  6960. * As of now, the conflicting interfaces are AP
  6961. * and P2P. But NAN + P2P/AP concurrency is not
  6962. * supported.
  6963. */
  6964. dhd_arp_offload_set(dhd, dhd_arp_mode);
  6965. dhd_arp_offload_enable(dhd, true);
  6966. }
  6967. #endif /* ARP_OFFLOAD_SUPPORT */
  6968. }
  6969. WL_INFORM_MEM(("NAN_DP_MASK:0x%x\n", cfg->nan_dp_mask));
  6970. }
  6971. bool
  6972. wl_cfgnan_is_dp_active(struct net_device *ndev)
  6973. {
  6974. struct bcm_cfg80211 *cfg;
  6975. bool nan_dp;
  6976. if (!ndev || !ndev->ieee80211_ptr) {
  6977. WL_ERR(("ndev/wdev null\n"));
  6978. return false;
  6979. }
  6980. cfg = wiphy_priv(ndev->ieee80211_ptr->wiphy);
  6981. nan_dp = cfg->nan_dp_mask ? true : false;
  6982. WL_DBG(("NAN DP status:%d\n", nan_dp));
  6983. return nan_dp;
  6984. }
  6985. s32
  6986. wl_cfgnan_get_ndi_idx(struct bcm_cfg80211 *cfg)
  6987. {
  6988. int i;
  6989. for (i = 0; i < NAN_MAX_NDI; i++) {
  6990. if (!cfg->nancfg.ndi[i].in_use) {
  6991. /* Free interface, use it */
  6992. return i;
  6993. }
  6994. }
  6995. /* Don't have a free interface */
  6996. return WL_INVALID;
  6997. }
  6998. s32
  6999. wl_cfgnan_add_ndi_data(struct bcm_cfg80211 *cfg, s32 idx, char *name)
  7000. {
  7001. u16 len;
  7002. if (!name || (idx < 0) || (idx >= NAN_MAX_NDI)) {
  7003. return -EINVAL;
  7004. }
  7005. /* Ensure ifname string size <= IFNAMSIZ including null termination */
  7006. len = MIN(strlen(name), (IFNAMSIZ - 1));
  7007. strncpy(cfg->nancfg.ndi[idx].ifname, name, len);
  7008. cfg->nancfg.ndi[idx].ifname[len] = '\0';
  7009. cfg->nancfg.ndi[idx].in_use = true;
  7010. cfg->nancfg.ndi[idx].created = false;
  7011. /* Don't have a free interface */
  7012. return WL_INVALID;
  7013. }
  7014. s32
  7015. wl_cfgnan_del_ndi_data(struct bcm_cfg80211 *cfg, char *name)
  7016. {
  7017. u16 len;
  7018. int i;
  7019. if (!name) {
  7020. return -EINVAL;
  7021. }
  7022. len = MIN(strlen(name), IFNAMSIZ);
  7023. for (i = 0; i < NAN_MAX_NDI; i++) {
  7024. if (strncmp(cfg->nancfg.ndi[i].ifname, name, len) == 0) {
  7025. memset_s(&cfg->nancfg.ndi[i].ifname, IFNAMSIZ,
  7026. 0x0, IFNAMSIZ);
  7027. cfg->nancfg.ndi[i].in_use = false;
  7028. cfg->nancfg.ndi[i].created = false;
  7029. cfg->nancfg.ndi[i].nan_ndev = NULL;
  7030. return i;
  7031. }
  7032. }
  7033. return -EINVAL;
  7034. }
  7035. struct wl_ndi_data *
  7036. wl_cfgnan_get_ndi_data(struct bcm_cfg80211 *cfg, char *name)
  7037. {
  7038. u16 len;
  7039. int i;
  7040. if (!name) {
  7041. return NULL;
  7042. }
  7043. len = MIN(strlen(name), IFNAMSIZ);
  7044. for (i = 0; i < NAN_MAX_NDI; i++) {
  7045. if (strncmp(cfg->nancfg.ndi[i].ifname, name, len) == 0) {
  7046. return &cfg->nancfg.ndi[i];
  7047. }
  7048. }
  7049. return NULL;
  7050. }
  7051. s32
  7052. wl_cfgnan_delete_ndp(struct bcm_cfg80211 *cfg,
  7053. struct net_device *nan_ndev)
  7054. {
  7055. s32 ret = BCME_OK;
  7056. uint8 i = 0;
  7057. for (i = 0; i < NAN_MAX_NDI; i++) {
  7058. if (cfg->nancfg.ndi[i].in_use &&
  7059. cfg->nancfg.ndi[i].created &&
  7060. (cfg->nancfg.ndi[i].nan_ndev == nan_ndev)) {
  7061. WL_INFORM_MEM(("iface name: %s, cfg->nancfg.ndi[i].nan_ndev = %p"
  7062. " and nan_ndev = %p\n",
  7063. (char*)cfg->nancfg.ndi[i].ifname,
  7064. cfg->nancfg.ndi[i].nan_ndev, nan_ndev));
  7065. ret = _wl_cfg80211_del_if(cfg, nan_ndev, NULL,
  7066. (char*)cfg->nancfg.ndi[i].ifname);
  7067. if (ret) {
  7068. WL_ERR(("failed to del ndi [%d]\n", ret));
  7069. goto exit;
  7070. }
  7071. /* After successful delete of interface,
  7072. * clear up the ndi data
  7073. */
  7074. if (wl_cfgnan_del_ndi_data(cfg,
  7075. (char*)cfg->nancfg.ndi[i].ifname) < 0) {
  7076. WL_ERR(("Failed to find matching data for ndi:%s\n",
  7077. (char*)cfg->nancfg.ndi[i].ifname));
  7078. }
  7079. }
  7080. }
  7081. exit:
  7082. return ret;
  7083. }
  7084. #endif /* WL_NAN */