dhd_common.c 220 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447444844494450445144524453445444554456445744584459446044614462446344644465446644674468446944704471447244734474447544764477447844794480448144824483448444854486448744884489449044914492449344944495449644974498449945004501450245034504450545064507450845094510451145124513451445154516451745184519452045214522452345244525452645274528452945304531453245334534453545364537453845394540454145424543454445454546454745484549455045514552455345544555455645574558455945604561456245634564456545664567456845694570457145724573457445754576457745784579458045814582458345844585458645874588458945904591459245934594459545964597459845994600460146024603460446054606460746084609461046114612461346144615461646174618461946204621462246234624462546264627462846294630463146324633463446354636463746384639464046414642464346444645464646474648464946504651465246534654465546564657465846594660466146624663466446654666466746684669467046714672467346744675467646774678467946804681468246834684468546864687468846894690469146924693469446954696469746984699470047014702470347044705470647074708470947104711471247134714471547164717471847194720472147224723472447254726472747284729473047314732473347344735473647374738473947404741474247434744474547464747474847494750475147524753475447554756475747584759476047614762476347644765476647674768476947704771477247734774477547764777477847794780478147824783478447854786478747884789479047914792479347944795479647974798479948004801480248034804480548064807480848094810481148124813481448154816481748184819482048214822482348244825482648274828482948304831483248334834483548364837483848394840484148424843484448454846484748484849485048514852485348544855485648574858485948604861486248634864486548664867486848694870487148724873487448754876487748784879488048814882488348844885488648874888488948904891489248934894489548964897489848994900490149024903490449054906490749084909491049114912491349144915491649174918491949204921492249234924492549264927492849294930493149324933493449354936493749384939494049414942494349444945494649474948494949504951495249534954495549564957495849594960496149624963496449654966496749684969497049714972497349744975497649774978497949804981498249834984498549864987498849894990499149924993499449954996499749984999500050015002500350045005500650075008500950105011501250135014501550165017501850195020502150225023502450255026502750285029503050315032503350345035503650375038503950405041504250435044504550465047504850495050505150525053505450555056505750585059506050615062506350645065506650675068506950705071507250735074507550765077507850795080508150825083508450855086508750885089509050915092509350945095509650975098509951005101510251035104510551065107510851095110511151125113511451155116511751185119512051215122512351245125512651275128512951305131513251335134513551365137513851395140514151425143514451455146514751485149515051515152515351545155515651575158515951605161516251635164516551665167516851695170517151725173517451755176517751785179518051815182518351845185518651875188518951905191519251935194519551965197519851995200520152025203520452055206520752085209521052115212521352145215521652175218521952205221522252235224522552265227522852295230523152325233523452355236523752385239524052415242524352445245524652475248524952505251525252535254525552565257525852595260526152625263526452655266526752685269527052715272527352745275527652775278527952805281528252835284528552865287528852895290529152925293529452955296529752985299530053015302530353045305530653075308530953105311531253135314531553165317531853195320532153225323532453255326532753285329533053315332533353345335533653375338533953405341534253435344534553465347534853495350535153525353535453555356535753585359536053615362536353645365536653675368536953705371537253735374537553765377537853795380538153825383538453855386538753885389539053915392539353945395539653975398539954005401540254035404540554065407540854095410541154125413541454155416541754185419542054215422542354245425542654275428542954305431543254335434543554365437543854395440544154425443544454455446544754485449545054515452545354545455545654575458545954605461546254635464546554665467546854695470547154725473547454755476547754785479548054815482548354845485548654875488548954905491549254935494549554965497549854995500550155025503550455055506550755085509551055115512551355145515551655175518551955205521552255235524552555265527552855295530553155325533553455355536553755385539554055415542554355445545554655475548554955505551555255535554555555565557555855595560556155625563556455655566556755685569557055715572557355745575557655775578557955805581558255835584558555865587558855895590559155925593559455955596559755985599560056015602560356045605560656075608560956105611561256135614561556165617561856195620562156225623562456255626562756285629563056315632563356345635563656375638563956405641564256435644564556465647564856495650565156525653565456555656565756585659566056615662566356645665566656675668566956705671567256735674567556765677567856795680568156825683568456855686568756885689569056915692569356945695569656975698569957005701570257035704570557065707570857095710571157125713571457155716571757185719572057215722572357245725572657275728572957305731573257335734573557365737573857395740574157425743574457455746574757485749575057515752575357545755575657575758575957605761576257635764576557665767576857695770577157725773577457755776577757785779578057815782578357845785578657875788578957905791579257935794579557965797579857995800580158025803580458055806580758085809581058115812581358145815581658175818581958205821582258235824582558265827582858295830583158325833583458355836583758385839584058415842584358445845584658475848584958505851585258535854585558565857585858595860586158625863586458655866586758685869587058715872587358745875587658775878587958805881588258835884588558865887588858895890589158925893589458955896589758985899590059015902590359045905590659075908590959105911591259135914591559165917591859195920592159225923592459255926592759285929593059315932593359345935593659375938593959405941594259435944594559465947594859495950595159525953595459555956595759585959596059615962596359645965596659675968596959705971597259735974597559765977597859795980598159825983598459855986598759885989599059915992599359945995599659975998599960006001600260036004600560066007600860096010601160126013601460156016601760186019602060216022602360246025602660276028602960306031603260336034603560366037603860396040604160426043604460456046604760486049605060516052605360546055605660576058605960606061606260636064606560666067606860696070607160726073607460756076607760786079608060816082608360846085608660876088608960906091609260936094609560966097609860996100610161026103610461056106610761086109611061116112611361146115611661176118611961206121612261236124612561266127612861296130613161326133613461356136613761386139614061416142614361446145614661476148614961506151615261536154615561566157615861596160616161626163616461656166616761686169617061716172617361746175617661776178617961806181618261836184618561866187618861896190619161926193619461956196619761986199620062016202620362046205620662076208620962106211621262136214621562166217621862196220622162226223622462256226622762286229623062316232623362346235623662376238623962406241624262436244624562466247624862496250625162526253625462556256625762586259626062616262626362646265626662676268626962706271627262736274627562766277627862796280628162826283628462856286628762886289629062916292629362946295629662976298629963006301630263036304630563066307630863096310631163126313631463156316631763186319632063216322632363246325632663276328632963306331633263336334633563366337633863396340634163426343634463456346634763486349635063516352635363546355635663576358635963606361636263636364636563666367636863696370637163726373637463756376637763786379638063816382638363846385638663876388638963906391639263936394639563966397639863996400640164026403640464056406640764086409641064116412641364146415641664176418641964206421642264236424642564266427642864296430643164326433643464356436643764386439644064416442644364446445644664476448644964506451645264536454645564566457645864596460646164626463646464656466646764686469647064716472647364746475647664776478647964806481648264836484648564866487648864896490649164926493649464956496649764986499650065016502650365046505650665076508650965106511651265136514651565166517651865196520652165226523652465256526652765286529653065316532653365346535653665376538653965406541654265436544654565466547654865496550655165526553655465556556655765586559656065616562656365646565656665676568656965706571657265736574657565766577657865796580658165826583658465856586658765886589659065916592659365946595659665976598659966006601660266036604660566066607660866096610661166126613661466156616661766186619662066216622662366246625662666276628662966306631663266336634663566366637663866396640664166426643664466456646664766486649665066516652665366546655665666576658665966606661666266636664666566666667666866696670667166726673667466756676667766786679668066816682668366846685668666876688668966906691669266936694669566966697669866996700670167026703670467056706670767086709671067116712671367146715671667176718671967206721672267236724672567266727672867296730673167326733673467356736673767386739674067416742674367446745674667476748674967506751675267536754675567566757675867596760676167626763676467656766676767686769677067716772677367746775677667776778677967806781678267836784678567866787678867896790679167926793679467956796679767986799680068016802680368046805680668076808680968106811681268136814681568166817681868196820682168226823682468256826682768286829683068316832683368346835683668376838683968406841684268436844684568466847684868496850685168526853685468556856685768586859686068616862686368646865686668676868686968706871687268736874687568766877687868796880688168826883688468856886688768886889689068916892689368946895689668976898689969006901690269036904690569066907690869096910691169126913691469156916691769186919692069216922692369246925692669276928692969306931693269336934693569366937693869396940694169426943694469456946694769486949695069516952695369546955695669576958695969606961696269636964696569666967696869696970697169726973697469756976697769786979698069816982698369846985698669876988698969906991699269936994699569966997699869997000700170027003700470057006700770087009701070117012701370147015701670177018701970207021702270237024702570267027702870297030703170327033703470357036703770387039704070417042704370447045704670477048704970507051705270537054705570567057705870597060706170627063706470657066706770687069707070717072707370747075707670777078707970807081708270837084708570867087708870897090709170927093709470957096709770987099710071017102710371047105710671077108710971107111711271137114711571167117711871197120712171227123712471257126712771287129713071317132713371347135713671377138713971407141714271437144714571467147714871497150715171527153715471557156715771587159716071617162716371647165716671677168716971707171717271737174717571767177717871797180718171827183718471857186718771887189719071917192719371947195719671977198719972007201720272037204720572067207720872097210721172127213721472157216721772187219722072217222722372247225722672277228722972307231723272337234723572367237723872397240724172427243724472457246724772487249725072517252725372547255725672577258725972607261726272637264726572667267726872697270727172727273727472757276727772787279728072817282728372847285728672877288728972907291729272937294729572967297729872997300730173027303730473057306730773087309731073117312731373147315731673177318731973207321732273237324732573267327732873297330733173327333733473357336733773387339734073417342734373447345734673477348734973507351735273537354735573567357735873597360736173627363736473657366736773687369737073717372737373747375737673777378737973807381738273837384738573867387738873897390739173927393739473957396739773987399740074017402740374047405740674077408740974107411741274137414741574167417741874197420742174227423742474257426742774287429743074317432743374347435743674377438743974407441744274437444744574467447744874497450745174527453745474557456745774587459746074617462746374647465746674677468746974707471747274737474747574767477747874797480748174827483748474857486748774887489749074917492749374947495749674977498749975007501750275037504750575067507750875097510751175127513751475157516751775187519752075217522752375247525752675277528752975307531753275337534753575367537753875397540754175427543754475457546754775487549755075517552755375547555755675577558755975607561756275637564756575667567756875697570757175727573757475757576757775787579758075817582758375847585758675877588758975907591759275937594759575967597759875997600760176027603760476057606760776087609761076117612761376147615761676177618761976207621762276237624762576267627762876297630763176327633763476357636763776387639764076417642764376447645764676477648764976507651765276537654765576567657765876597660766176627663766476657666766776687669767076717672767376747675767676777678767976807681768276837684768576867687768876897690769176927693769476957696769776987699770077017702770377047705770677077708770977107711771277137714771577167717771877197720772177227723772477257726772777287729773077317732773377347735773677377738773977407741774277437744774577467747774877497750775177527753775477557756775777587759776077617762776377647765776677677768776977707771777277737774777577767777777877797780778177827783778477857786778777887789779077917792779377947795779677977798779978007801780278037804780578067807780878097810781178127813781478157816781778187819782078217822782378247825
  1. /*
  2. * Broadcom Dongle Host Driver (DHD), common DHD core.
  3. *
  4. * Portions of this code are copyright (c) 2020 Cypress Semiconductor Corporation
  5. *
  6. * Copyright (C) 1999-2020, Broadcom Corporation
  7. *
  8. * Unless you and Broadcom execute a separate written software license
  9. * agreement governing use of this software, this software is licensed to you
  10. * under the terms of the GNU General Public License version 2 (the "GPL"),
  11. * available at http://www.broadcom.com/licenses/GPLv2.php, with the
  12. * following added to such license:
  13. *
  14. * As a special exception, the copyright holders of this software give you
  15. * permission to link this software with independent modules, and to copy and
  16. * distribute the resulting executable under terms of your choice, provided that
  17. * you also meet, for each linked independent module, the terms and conditions of
  18. * the license of that module. An independent module is a module which is not
  19. * derived from this software. The special exception does not apply to any
  20. * modifications of the software.
  21. *
  22. * Notwithstanding the above, under no circumstances may you combine this
  23. * software in any way with any other Broadcom software provided under a license
  24. * other than the GPL, without Broadcom's express prior written consent.
  25. *
  26. *
  27. * <<Broadcom-WL-IPTag/Open:>>
  28. *
  29. * $Id: dhd_common.c 701858 2017-05-26 20:20:58Z $
  30. */
  31. #include <typedefs.h>
  32. #include <osl.h>
  33. #include <epivers.h>
  34. #include <bcmutils.h>
  35. #include <bcmstdlib_s.h>
  36. #include <bcmendian.h>
  37. #include <dngl_stats.h>
  38. #include <dhd.h>
  39. #include <dhd_ip.h>
  40. #include <bcmevent.h>
  41. #include <dhdioctl.h>
  42. #ifdef PCIE_FULL_DONGLE
  43. #include <bcmmsgbuf.h>
  44. #endif /* PCIE_FULL_DONGLE */
  45. #ifdef SHOW_LOGTRACE
  46. #include <event_log.h>
  47. #endif /* SHOW_LOGTRACE */
  48. #ifdef BCMPCIE
  49. #include <dhd_flowring.h>
  50. #endif // endif
  51. #include <dhd_bus.h>
  52. #include <dhd_proto.h>
  53. #include <dhd_dbg.h>
  54. #include <802.1d.h>
  55. #include <dhd_debug.h>
  56. #include <dhd_dbg_ring.h>
  57. #include <dhd_mschdbg.h>
  58. #include <msgtrace.h>
  59. #ifdef WL_CFG80211
  60. #include <wl_cfg80211.h>
  61. #endif // endif
  62. #if defined(OEM_ANDROID) && defined(PNO_SUPPORT)
  63. #include <dhd_pno.h>
  64. #endif /* OEM_ANDROID && PNO_SUPPORT */
  65. #ifdef RTT_SUPPORT
  66. #include <dhd_rtt.h>
  67. #endif // endif
  68. #ifdef DNGL_EVENT_SUPPORT
  69. #include <dnglevent.h>
  70. #endif // endif
  71. #define htod32(i) (i)
  72. #define htod16(i) (i)
  73. #define dtoh32(i) (i)
  74. #define dtoh16(i) (i)
  75. #define htodchanspec(i) (i)
  76. #define dtohchanspec(i) (i)
  77. #ifdef PROP_TXSTATUS
  78. #include <wlfc_proto.h>
  79. #include <dhd_wlfc.h>
  80. #endif // endif
  81. #if defined(DHD_POST_EAPOL_M1_AFTER_ROAM_EVT)
  82. #include <dhd_linux.h>
  83. #endif // endif
  84. #ifdef DHD_L2_FILTER
  85. #include <dhd_l2_filter.h>
  86. #endif /* DHD_L2_FILTER */
  87. #ifdef DHD_PSTA
  88. #include <dhd_psta.h>
  89. #endif /* DHD_PSTA */
  90. #ifdef DHD_WET
  91. #include <dhd_wet.h>
  92. #endif /* DHD_WET */
  93. #ifdef DHD_LOG_DUMP
  94. #include <dhd_dbg.h>
  95. #ifdef DHD_PKT_LOGGING
  96. #include <dhd_pktlog.h>
  97. #endif /* DHD_PKT_LOGGING */
  98. #endif /* DHD_LOG_DUMP */
  99. #ifdef DHD_LOG_PRINT_RATE_LIMIT
  100. int log_print_threshold = 0;
  101. #endif /* DHD_LOG_PRINT_RATE_LIMIT */
  102. int dhd_msg_level = DHD_ERROR_VAL | DHD_FWLOG_VAL | DHD_EVENT_VAL
  103. /* For CUSTOMER_HW4 do not enable DHD_IOVAR_MEM_VAL by default */
  104. #if !defined(BOARD_HIKEY)
  105. | DHD_IOVAR_MEM_VAL
  106. #endif // endif
  107. #ifndef OEM_ANDROID
  108. | DHD_MSGTRACE_VAL
  109. #endif /* OEM_ANDROID */
  110. | DHD_PKT_MON_VAL;
  111. #if defined(OEM_ANDROID) && defined(WL_WIRELESS_EXT)
  112. #include <wl_iw.h>
  113. #endif /* defined(OEM_ANDROID) && defined(WL_WIRELESS_EXT) */
  114. #ifdef DHD_ULP
  115. #include <dhd_ulp.h>
  116. #endif /* DHD_ULP */
  117. #ifdef DHD_DEBUG
  118. #include <sdiovar.h>
  119. #endif /* DHD_DEBUG */
  120. #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
  121. #include <linux/pm_runtime.h>
  122. #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
  123. #ifdef SOFTAP
  124. char fw_path2[MOD_PARAM_PATHLEN];
  125. extern bool softap_enabled;
  126. #endif // endif
  127. #ifdef SHOW_LOGTRACE
  128. #define BYTES_AHEAD_NUM 10 /* address in map file is before these many bytes */
  129. #define READ_NUM_BYTES 1000 /* read map file each time this No. of bytes */
  130. #define GO_BACK_FILE_POS_NUM_BYTES 100 /* set file pos back to cur pos */
  131. static char *ramstart_str = " text_start"; /* string in mapfile has addr ramstart */
  132. static char *rodata_start_str = " rodata_start"; /* string in mapfile has addr rodata start */
  133. static char *rodata_end_str = " rodata_end"; /* string in mapfile has addr rodata end */
  134. #define RAMSTART_BIT 0x01
  135. #define RDSTART_BIT 0x02
  136. #define RDEND_BIT 0x04
  137. #define ALL_MAP_VAL (RAMSTART_BIT | RDSTART_BIT | RDEND_BIT)
  138. #endif /* SHOW_LOGTRACE */
  139. #ifdef SHOW_LOGTRACE
  140. /* the fw file path is taken from either the module parameter at
  141. * insmod time or is defined as a constant of different values
  142. * for different platforms
  143. */
  144. extern char *st_str_file_path;
  145. #endif /* SHOW_LOGTRACE */
  146. #define DHD_TPUT_MAX_TX_PKTS_BATCH 1000
  147. #ifdef EWP_EDL
  148. typedef struct msg_hdr_edl {
  149. uint32 infobuf_ver;
  150. info_buf_payload_hdr_t pyld_hdr;
  151. msgtrace_hdr_t trace_hdr;
  152. } msg_hdr_edl_t;
  153. #endif /* EWP_EDL */
  154. /* Last connection success/failure status */
  155. uint32 dhd_conn_event;
  156. uint32 dhd_conn_status;
  157. uint32 dhd_conn_reason;
  158. extern int dhd_iscan_request(void * dhdp, uint16 action);
  159. extern void dhd_ind_scan_confirm(void *h, bool status);
  160. extern int dhd_iscan_in_progress(void *h);
  161. void dhd_iscan_lock(void);
  162. void dhd_iscan_unlock(void);
  163. extern int dhd_change_mtu(dhd_pub_t *dhd, int new_mtu, int ifidx);
  164. #if defined(OEM_ANDROID) && !defined(AP) && defined(WLP2P)
  165. extern int dhd_get_concurrent_capabilites(dhd_pub_t *dhd);
  166. #endif // endif
  167. extern int dhd_socram_dump(struct dhd_bus *bus);
  168. extern void dhd_set_packet_filter(dhd_pub_t *dhd);
  169. #ifdef DNGL_EVENT_SUPPORT
  170. static void dngl_host_event_process(dhd_pub_t *dhdp, bcm_dngl_event_t *event,
  171. bcm_dngl_event_msg_t *dngl_event, size_t pktlen);
  172. static int dngl_host_event(dhd_pub_t *dhdp, void *pktdata, bcm_dngl_event_msg_t *dngl_event,
  173. size_t pktlen);
  174. #endif /* DNGL_EVENT_SUPPORT */
  175. #ifdef WL_CFGVENDOR_SEND_HANG_EVENT
  176. static void copy_hang_info_ioctl_timeout(dhd_pub_t *dhd, int ifidx, wl_ioctl_t *ioc);
  177. #endif /* WL_CFGVENDOR_SEND_HANG_EVENT */
  178. #define MAX_CHUNK_LEN 1408 /* 8 * 8 * 22 */
  179. #if defined(OEM_ANDROID)
  180. bool ap_cfg_running = FALSE;
  181. bool ap_fw_loaded = FALSE;
  182. #endif /* defined(OEM_ANDROID) && defined(SOFTAP) */
  183. /* Version string to report */
  184. #ifdef DHD_DEBUG
  185. #ifndef SRCBASE
  186. #define SRCBASE "drivers/net/wireless/bcmdhd"
  187. #endif // endif
  188. #define DHD_COMPILED "\nCompiled in " SRCBASE
  189. #endif /* DHD_DEBUG */
  190. #define CHIPID_MISMATCH 8
  191. #if defined(DHD_DEBUG)
  192. const char dhd_version[] = "Dongle Host Driver, version " EPI_VERSION_STR
  193. DHD_COMPILED " on " __DATE__ " at " __TIME__;
  194. #else
  195. const char dhd_version[] = "\nDongle Host Driver, version " EPI_VERSION_STR "\nCompiled from ";
  196. #endif // endif
  197. char fw_version[FW_VER_STR_LEN] = "\0";
  198. char clm_version[CLM_VER_STR_LEN] = "\0";
  199. char bus_api_revision[BUS_API_REV_STR_LEN] = "\0";
  200. void dhd_set_timer(void *bus, uint wdtick);
  201. static char* ioctl2str(uint32 ioctl);
  202. /* IOVar table */
  203. enum {
  204. IOV_VERSION = 1,
  205. IOV_MSGLEVEL,
  206. IOV_BCMERRORSTR,
  207. IOV_BCMERROR,
  208. IOV_WDTICK,
  209. IOV_DUMP,
  210. IOV_CLEARCOUNTS,
  211. IOV_LOGDUMP,
  212. IOV_LOGCAL,
  213. IOV_LOGSTAMP,
  214. IOV_GPIOOB,
  215. IOV_IOCTLTIMEOUT,
  216. IOV_CONS,
  217. IOV_DCONSOLE_POLL,
  218. #if defined(DHD_DEBUG)
  219. IOV_DHD_JOIN_TIMEOUT_DBG,
  220. IOV_SCAN_TIMEOUT,
  221. IOV_MEM_DEBUG,
  222. #ifdef BCMPCIE
  223. IOV_FLOW_RING_DEBUG,
  224. #endif /* BCMPCIE */
  225. #endif /* defined(DHD_DEBUG) */
  226. #ifdef PROP_TXSTATUS
  227. IOV_PROPTXSTATUS_ENABLE,
  228. IOV_PROPTXSTATUS_MODE,
  229. IOV_PROPTXSTATUS_OPT,
  230. IOV_PROPTXSTATUS_MODULE_IGNORE,
  231. IOV_PROPTXSTATUS_CREDIT_IGNORE,
  232. IOV_PROPTXSTATUS_TXSTATUS_IGNORE,
  233. IOV_PROPTXSTATUS_RXPKT_CHK,
  234. #endif /* PROP_TXSTATUS */
  235. IOV_BUS_TYPE,
  236. IOV_CHANGEMTU,
  237. IOV_HOSTREORDER_FLOWS,
  238. #ifdef DHDTCPACK_SUPPRESS
  239. IOV_TCPACK_SUPPRESS,
  240. #endif /* DHDTCPACK_SUPPRESS */
  241. IOV_AP_ISOLATE,
  242. #ifdef DHD_L2_FILTER
  243. IOV_DHCP_UNICAST,
  244. IOV_BLOCK_PING,
  245. IOV_PROXY_ARP,
  246. IOV_GRAT_ARP,
  247. IOV_BLOCK_TDLS,
  248. #endif /* DHD_L2_FILTER */
  249. IOV_DHD_IE,
  250. #ifdef DHD_PSTA
  251. IOV_PSTA,
  252. #endif /* DHD_PSTA */
  253. #ifdef DHD_WET
  254. IOV_WET,
  255. IOV_WET_HOST_IPV4,
  256. IOV_WET_HOST_MAC,
  257. #endif /* DHD_WET */
  258. IOV_CFG80211_OPMODE,
  259. IOV_ASSERT_TYPE,
  260. IOV_LMTEST,
  261. #ifdef DHD_MCAST_REGEN
  262. IOV_MCAST_REGEN_BSS_ENABLE,
  263. #endif // endif
  264. #ifdef SHOW_LOGTRACE
  265. IOV_DUMP_TRACE_LOG,
  266. #endif /* SHOW_LOGTRACE */
  267. IOV_DONGLE_TRAP_TYPE,
  268. IOV_DONGLE_TRAP_INFO,
  269. IOV_BPADDR,
  270. IOV_DUMP_DONGLE, /**< dumps core registers and d11 memories */
  271. #if defined(DHD_LOG_DUMP)
  272. IOV_LOG_DUMP,
  273. #endif /* DHD_LOG_DUMP */
  274. IOV_TPUT_TEST,
  275. IOV_FIS_TRIGGER,
  276. IOV_DEBUG_BUF_DEST_STAT,
  277. #ifdef DHD_DEBUG
  278. IOV_INDUCE_ERROR,
  279. #endif /* DHD_DEBUG */
  280. #ifdef WL_IFACE_MGMT_CONF
  281. #ifdef WL_CFG80211
  282. #ifdef WL_NANP2P
  283. IOV_CONC_DISC,
  284. #endif /* WL_NANP2P */
  285. #ifdef WL_IFACE_MGMT
  286. IOV_IFACE_POLICY,
  287. #endif /* WL_IFACE_MGMT */
  288. #endif /* WL_CFG80211 */
  289. #endif /* WL_IFACE_MGMT_CONF */
  290. IOV_LAST
  291. };
  292. const bcm_iovar_t dhd_iovars[] = {
  293. /* name varid flags flags2 type minlen */
  294. {"version", IOV_VERSION, 0, 0, IOVT_BUFFER, sizeof(dhd_version)},
  295. #ifdef DHD_DEBUG
  296. {"msglevel", IOV_MSGLEVEL, 0, 0, IOVT_UINT32, 0},
  297. {"mem_debug", IOV_MEM_DEBUG, 0, 0, IOVT_BUFFER, 0 },
  298. #ifdef BCMPCIE
  299. {"flow_ring_debug", IOV_FLOW_RING_DEBUG, 0, 0, IOVT_BUFFER, 0 },
  300. #endif /* BCMPCIE */
  301. #endif /* DHD_DEBUG */
  302. {"bcmerrorstr", IOV_BCMERRORSTR, 0, 0, IOVT_BUFFER, BCME_STRLEN},
  303. {"bcmerror", IOV_BCMERROR, 0, 0, IOVT_INT8, 0},
  304. {"wdtick", IOV_WDTICK, 0, 0, IOVT_UINT32, 0},
  305. {"dump", IOV_DUMP, 0, 0, IOVT_BUFFER, DHD_IOCTL_MAXLEN},
  306. {"cons", IOV_CONS, 0, 0, IOVT_BUFFER, 0},
  307. {"dconpoll", IOV_DCONSOLE_POLL, 0, 0, IOVT_UINT32, 0},
  308. {"clearcounts", IOV_CLEARCOUNTS, 0, 0, IOVT_VOID, 0},
  309. {"gpioob", IOV_GPIOOB, 0, 0, IOVT_UINT32, 0},
  310. {"ioctl_timeout", IOV_IOCTLTIMEOUT, 0, 0, IOVT_UINT32, 0},
  311. #ifdef PROP_TXSTATUS
  312. {"proptx", IOV_PROPTXSTATUS_ENABLE, 0, 0, IOVT_BOOL, 0 },
  313. /*
  314. set the proptxtstatus operation mode:
  315. 0 - Do not do any proptxtstatus flow control
  316. 1 - Use implied credit from a packet status
  317. 2 - Use explicit credit
  318. */
  319. {"ptxmode", IOV_PROPTXSTATUS_MODE, 0, 0, IOVT_UINT32, 0 },
  320. {"proptx_opt", IOV_PROPTXSTATUS_OPT, 0, 0, IOVT_UINT32, 0 },
  321. {"pmodule_ignore", IOV_PROPTXSTATUS_MODULE_IGNORE, 0, 0, IOVT_BOOL, 0 },
  322. {"pcredit_ignore", IOV_PROPTXSTATUS_CREDIT_IGNORE, 0, 0, IOVT_BOOL, 0 },
  323. {"ptxstatus_ignore", IOV_PROPTXSTATUS_TXSTATUS_IGNORE, 0, 0, IOVT_BOOL, 0 },
  324. {"rxpkt_chk", IOV_PROPTXSTATUS_RXPKT_CHK, 0, 0, IOVT_BOOL, 0 },
  325. #endif /* PROP_TXSTATUS */
  326. {"bustype", IOV_BUS_TYPE, 0, 0, IOVT_UINT32, 0},
  327. {"changemtu", IOV_CHANGEMTU, 0, 0, IOVT_UINT32, 0 },
  328. {"host_reorder_flows", IOV_HOSTREORDER_FLOWS, 0, 0, IOVT_BUFFER,
  329. (WLHOST_REORDERDATA_MAXFLOWS + 1) },
  330. #ifdef DHDTCPACK_SUPPRESS
  331. {"tcpack_suppress", IOV_TCPACK_SUPPRESS, 0, 0, IOVT_UINT8, 0 },
  332. #endif /* DHDTCPACK_SUPPRESS */
  333. #ifdef DHD_L2_FILTER
  334. {"dhcp_unicast", IOV_DHCP_UNICAST, (0), 0, IOVT_BOOL, 0 },
  335. #endif /* DHD_L2_FILTER */
  336. {"ap_isolate", IOV_AP_ISOLATE, (0), 0, IOVT_BOOL, 0},
  337. #ifdef DHD_L2_FILTER
  338. {"block_ping", IOV_BLOCK_PING, (0), 0, IOVT_BOOL, 0},
  339. {"proxy_arp", IOV_PROXY_ARP, (0), 0, IOVT_BOOL, 0},
  340. {"grat_arp", IOV_GRAT_ARP, (0), 0, IOVT_BOOL, 0},
  341. {"block_tdls", IOV_BLOCK_TDLS, (0), IOVT_BOOL, 0},
  342. #endif /* DHD_L2_FILTER */
  343. {"dhd_ie", IOV_DHD_IE, (0), 0, IOVT_BUFFER, 0},
  344. #ifdef DHD_PSTA
  345. /* PSTA/PSR Mode configuration. 0: DIABLED 1: PSTA 2: PSR */
  346. {"psta", IOV_PSTA, 0, 0, IOVT_UINT32, 0},
  347. #endif /* DHD PSTA */
  348. #ifdef DHD_WET
  349. /* WET Mode configuration. 0: DIABLED 1: WET */
  350. {"wet", IOV_WET, 0, 0, IOVT_UINT32, 0},
  351. {"wet_host_ipv4", IOV_WET_HOST_IPV4, 0, 0, IOVT_UINT32, 0},
  352. {"wet_host_mac", IOV_WET_HOST_MAC, 0, 0, IOVT_BUFFER, 0},
  353. #endif /* DHD WET */
  354. {"op_mode", IOV_CFG80211_OPMODE, 0, 0, IOVT_UINT32, 0 },
  355. {"assert_type", IOV_ASSERT_TYPE, (0), 0, IOVT_UINT32, 0},
  356. {"lmtest", IOV_LMTEST, 0, 0, IOVT_UINT32, 0 },
  357. #ifdef DHD_MCAST_REGEN
  358. {"mcast_regen_bss_enable", IOV_MCAST_REGEN_BSS_ENABLE, 0, 0, IOVT_BOOL, 0},
  359. #endif // endif
  360. #ifdef SHOW_LOGTRACE
  361. {"dump_trace_buf", IOV_DUMP_TRACE_LOG, 0, 0, IOVT_BUFFER, sizeof(trace_buf_info_t) },
  362. #endif /* SHOW_LOGTRACE */
  363. {"trap_type", IOV_DONGLE_TRAP_TYPE, 0, 0, IOVT_UINT32, 0 },
  364. {"trap_info", IOV_DONGLE_TRAP_INFO, 0, 0, IOVT_BUFFER, sizeof(trap_t) },
  365. #ifdef DHD_DEBUG
  366. {"bpaddr", IOV_BPADDR, 0, 0, IOVT_BUFFER, sizeof(sdreg_t) },
  367. #endif /* DHD_DEBUG */
  368. {"dump_dongle", IOV_DUMP_DONGLE, 0, 0, IOVT_BUFFER,
  369. MAX(sizeof(dump_dongle_in_t), sizeof(dump_dongle_out_t)) },
  370. #if defined(DHD_LOG_DUMP)
  371. {"log_dump", IOV_LOG_DUMP, 0, 0, IOVT_UINT8, 0},
  372. #endif /* DHD_LOG_DUMP */
  373. #ifndef OEM_ANDROID
  374. {"tput_test", IOV_TPUT_TEST, 0, 0, IOVT_BUFFER, sizeof(tput_test_t)},
  375. #endif // endif
  376. {"debug_buf_dest_stat", IOV_DEBUG_BUF_DEST_STAT, 0, 0, IOVT_UINT32, 0 },
  377. #ifdef DHD_DEBUG
  378. {"induce_error", IOV_INDUCE_ERROR, (0), 0, IOVT_UINT16, 0 },
  379. #endif /* DHD_DEBUG */
  380. #ifdef WL_IFACE_MGMT_CONF
  381. #ifdef WL_CFG80211
  382. #ifdef WL_NANP2P
  383. {"conc_disc", IOV_CONC_DISC, (0), 0, IOVT_UINT16, 0 },
  384. #endif /* WL_NANP2P */
  385. #ifdef WL_IFACE_MGMT
  386. {"if_policy", IOV_IFACE_POLICY, (0), 0, IOVT_BUFFER, sizeof(iface_mgmt_data_t)},
  387. #endif /* WL_IFACE_MGMT */
  388. #endif /* WL_CFG80211 */
  389. #endif /* WL_IFACE_MGMT_CONF */
  390. {NULL, 0, 0, 0, 0, 0 }
  391. };
  392. #define DHD_IOVAR_BUF_SIZE 128
  393. bool
  394. dhd_query_bus_erros(dhd_pub_t *dhdp)
  395. {
  396. bool ret = FALSE;
  397. if (dhdp->dongle_reset) {
  398. DHD_ERROR_RLMT(("%s: Dongle Reset occurred, cannot proceed\n",
  399. __FUNCTION__));
  400. ret = TRUE;
  401. }
  402. if (dhdp->dongle_trap_occured) {
  403. DHD_ERROR_RLMT(("%s: FW TRAP has occurred, cannot proceed\n",
  404. __FUNCTION__));
  405. ret = TRUE;
  406. #ifdef OEM_ANDROID
  407. dhdp->hang_reason = HANG_REASON_DONGLE_TRAP;
  408. dhd_os_send_hang_message(dhdp);
  409. #endif /* OEM_ANDROID */
  410. }
  411. if (dhdp->iovar_timeout_occured) {
  412. DHD_ERROR_RLMT(("%s: Resumed on timeout for previous IOVAR, cannot proceed\n",
  413. __FUNCTION__));
  414. ret = TRUE;
  415. }
  416. #ifdef PCIE_FULL_DONGLE
  417. if (dhdp->d3ack_timeout_occured) {
  418. DHD_ERROR_RLMT(("%s: Resumed on timeout for previous D3ACK, cannot proceed\n",
  419. __FUNCTION__));
  420. ret = TRUE;
  421. }
  422. if (dhdp->livelock_occured) {
  423. DHD_ERROR_RLMT(("%s: LIVELOCK occurred for previous msg, cannot proceed\n",
  424. __FUNCTION__));
  425. ret = TRUE;
  426. }
  427. if (dhdp->pktid_audit_failed) {
  428. DHD_ERROR_RLMT(("%s: pktid_audit_failed, cannot proceed\n",
  429. __FUNCTION__));
  430. ret = TRUE;
  431. }
  432. #endif /* PCIE_FULL_DONGLE */
  433. if (dhdp->iface_op_failed) {
  434. DHD_ERROR_RLMT(("%s: iface_op_failed, cannot proceed\n",
  435. __FUNCTION__));
  436. ret = TRUE;
  437. }
  438. if (dhdp->scan_timeout_occurred) {
  439. DHD_ERROR_RLMT(("%s: scan_timeout_occurred, cannot proceed\n",
  440. __FUNCTION__));
  441. ret = TRUE;
  442. }
  443. if (dhdp->scan_busy_occurred) {
  444. DHD_ERROR_RLMT(("%s: scan_busy_occurred, cannot proceed\n",
  445. __FUNCTION__));
  446. ret = TRUE;
  447. }
  448. #ifdef DNGL_AXI_ERROR_LOGGING
  449. if (dhdp->axi_error) {
  450. DHD_ERROR_RLMT(("%s: AXI error occurred, cannot proceed\n",
  451. __FUNCTION__));
  452. ret = TRUE;
  453. }
  454. #endif /* DNGL_AXI_ERROR_LOGGING */
  455. if (dhd_bus_get_linkdown(dhdp)) {
  456. DHD_ERROR_RLMT(("%s : PCIE Link down occurred, cannot proceed\n",
  457. __FUNCTION__));
  458. ret = TRUE;
  459. }
  460. if (dhd_bus_get_cto(dhdp)) {
  461. DHD_ERROR_RLMT(("%s : CTO Recovery reported, cannot proceed\n",
  462. __FUNCTION__));
  463. ret = TRUE;
  464. }
  465. return ret;
  466. }
  467. void
  468. dhd_clear_bus_errors(dhd_pub_t *dhdp)
  469. {
  470. if (!dhdp)
  471. return;
  472. dhdp->dongle_reset = FALSE;
  473. dhdp->dongle_trap_occured = FALSE;
  474. dhdp->iovar_timeout_occured = FALSE;
  475. #ifdef PCIE_FULL_DONGLE
  476. dhdp->d3ack_timeout_occured = FALSE;
  477. dhdp->livelock_occured = FALSE;
  478. dhdp->pktid_audit_failed = FALSE;
  479. #endif // endif
  480. dhdp->iface_op_failed = FALSE;
  481. dhdp->scan_timeout_occurred = FALSE;
  482. dhdp->scan_busy_occurred = FALSE;
  483. }
  484. #ifdef DHD_SSSR_DUMP
  485. /* This can be overwritten by module parameter defined in dhd_linux.c */
  486. uint support_sssr_dump = TRUE;
  487. int
  488. dhd_sssr_mempool_init(dhd_pub_t *dhd)
  489. {
  490. dhd->sssr_mempool = (uint8 *) MALLOCZ(dhd->osh, DHD_SSSR_MEMPOOL_SIZE);
  491. if (dhd->sssr_mempool == NULL) {
  492. DHD_ERROR(("%s: MALLOC of sssr_mempool failed\n",
  493. __FUNCTION__));
  494. return BCME_ERROR;
  495. }
  496. return BCME_OK;
  497. }
  498. void
  499. dhd_sssr_mempool_deinit(dhd_pub_t *dhd)
  500. {
  501. if (dhd->sssr_mempool) {
  502. MFREE(dhd->osh, dhd->sssr_mempool, DHD_SSSR_MEMPOOL_SIZE);
  503. dhd->sssr_mempool = NULL;
  504. }
  505. }
  506. void
  507. dhd_dump_sssr_reg_info(sssr_reg_info_v1_t *sssr_reg_info)
  508. {
  509. #ifdef DHD_PCIE_REG_ACCESS
  510. int i, j;
  511. DHD_ERROR(("************** SSSR REG INFO start ****************\n"));
  512. DHD_ERROR(("pmu_regs\n"));
  513. DHD_ERROR(("pmuintmask0=0x%x pmuintmask1=0x%x resreqtimer=0x%x "
  514. "macresreqtimer=0x%x macresreqtimer1=0x%x\n",
  515. sssr_reg_info->pmu_regs.base_regs.pmuintmask0,
  516. sssr_reg_info->pmu_regs.base_regs.pmuintmask1,
  517. sssr_reg_info->pmu_regs.base_regs.resreqtimer,
  518. sssr_reg_info->pmu_regs.base_regs.macresreqtimer,
  519. sssr_reg_info->pmu_regs.base_regs.macresreqtimer1));
  520. DHD_ERROR(("chipcommon_regs\n"));
  521. DHD_ERROR(("intmask=0x%x powerctrl=0x%x clockcontrolstatus=0x%x powerctrl_mask=0x%x\n",
  522. sssr_reg_info->chipcommon_regs.base_regs.intmask,
  523. sssr_reg_info->chipcommon_regs.base_regs.powerctrl,
  524. sssr_reg_info->chipcommon_regs.base_regs.clockcontrolstatus,
  525. sssr_reg_info->chipcommon_regs.base_regs.powerctrl_mask));
  526. DHD_ERROR(("arm_regs\n"));
  527. DHD_ERROR(("clockcontrolstatus=0x%x clockcontrolstatus_val=0x%x"
  528. " resetctrl=0x%x itopoobb=0x%x\n",
  529. sssr_reg_info->arm_regs.base_regs.clockcontrolstatus,
  530. sssr_reg_info->arm_regs.base_regs.clockcontrolstatus_val,
  531. sssr_reg_info->arm_regs.wrapper_regs.resetctrl,
  532. sssr_reg_info->arm_regs.wrapper_regs.itopoobb));
  533. DHD_ERROR(("pcie_regs\n"));
  534. DHD_ERROR(("ltrstate=0x%x clockcontrolstatus=0x%x "
  535. "clockcontrolstatus_val=0x%x itopoobb=0x%x\n",
  536. sssr_reg_info->pcie_regs.base_regs.ltrstate,
  537. sssr_reg_info->pcie_regs.base_regs.clockcontrolstatus,
  538. sssr_reg_info->pcie_regs.base_regs.clockcontrolstatus_val,
  539. sssr_reg_info->pcie_regs.wrapper_regs.itopoobb));
  540. DHD_ERROR(("vasip_regs\n"));
  541. DHD_ERROR(("ioctrl=0x%x vasip_sr_addr=0x%x vasip_sr_size=0x%x\n",
  542. sssr_reg_info->vasip_regs.wrapper_regs.ioctrl,
  543. sssr_reg_info->vasip_regs.vasip_sr_addr,
  544. sssr_reg_info->vasip_regs.vasip_sr_size));
  545. for (i = 0; i < MAX_NUM_D11CORES; i++) {
  546. DHD_ERROR(("mac_regs core[%d]\n", i));
  547. DHD_ERROR(("xmtaddress=0x%x xmtdata=0x%x clockcontrolstatus=0x%x "
  548. "clockcontrolstatus_val=0x%x\n",
  549. sssr_reg_info->mac_regs[i].base_regs.xmtaddress,
  550. sssr_reg_info->mac_regs[i].base_regs.xmtdata,
  551. sssr_reg_info->mac_regs[i].base_regs.clockcontrolstatus,
  552. sssr_reg_info->mac_regs[i].base_regs.clockcontrolstatus_val));
  553. DHD_ERROR(("resetctrl=0x%x itopoobb=0x%x ioctrl=0x%x\n",
  554. sssr_reg_info->mac_regs[i].wrapper_regs.resetctrl,
  555. sssr_reg_info->mac_regs[i].wrapper_regs.itopoobb,
  556. sssr_reg_info->mac_regs[i].wrapper_regs.ioctrl));
  557. for (j = 0; j < SSSR_D11_RESET_SEQ_STEPS; j++) {
  558. DHD_ERROR(("ioctrl_resetseq_val[%d] 0x%x\n", j,
  559. sssr_reg_info->mac_regs[i].wrapper_regs.ioctrl_resetseq_val[j]));
  560. }
  561. DHD_ERROR(("sr_size=0x%x\n", sssr_reg_info->mac_regs[i].sr_size));
  562. }
  563. DHD_ERROR(("************** SSSR REG INFO end ****************\n"));
  564. #endif /* DHD_PCIE_REG_ACCESS */
  565. }
  566. int
  567. dhd_get_sssr_reg_info(dhd_pub_t *dhd)
  568. {
  569. int ret;
  570. /* get sssr_reg_info from firmware */
  571. memset((void *)&dhd->sssr_reg_info, 0, sizeof(dhd->sssr_reg_info));
  572. ret = dhd_iovar(dhd, 0, "sssr_reg_info", NULL, 0, (char *)&dhd->sssr_reg_info,
  573. sizeof(dhd->sssr_reg_info), FALSE);
  574. if (ret < 0) {
  575. DHD_ERROR(("%s: sssr_reg_info failed (error=%d)\n",
  576. __FUNCTION__, ret));
  577. return BCME_ERROR;
  578. }
  579. dhd_dump_sssr_reg_info(&dhd->sssr_reg_info);
  580. return BCME_OK;
  581. }
  582. uint32
  583. dhd_get_sssr_bufsize(dhd_pub_t *dhd)
  584. {
  585. int i;
  586. uint32 sssr_bufsize = 0;
  587. /* Init all pointers to NULL */
  588. for (i = 0; i < MAX_NUM_D11CORES; i++) {
  589. sssr_bufsize += dhd->sssr_reg_info.mac_regs[i].sr_size;
  590. }
  591. sssr_bufsize += dhd->sssr_reg_info.vasip_regs.vasip_sr_size;
  592. /* Double the size as different dumps will be saved before and after SR */
  593. sssr_bufsize = 2 * sssr_bufsize;
  594. return sssr_bufsize;
  595. }
  596. int
  597. dhd_sssr_dump_init(dhd_pub_t *dhd)
  598. {
  599. int i;
  600. uint32 sssr_bufsize;
  601. uint32 mempool_used = 0;
  602. dhd->sssr_inited = FALSE;
  603. if (!support_sssr_dump) {
  604. DHD_ERROR(("%s: sssr dump not inited as instructed by mod param\n", __FUNCTION__));
  605. return BCME_OK;
  606. }
  607. /* check if sssr mempool is allocated */
  608. if (dhd->sssr_mempool == NULL) {
  609. DHD_ERROR(("%s: sssr_mempool is not allocated\n",
  610. __FUNCTION__));
  611. return BCME_ERROR;
  612. }
  613. /* Get SSSR reg info */
  614. if (dhd_get_sssr_reg_info(dhd) != BCME_OK) {
  615. DHD_ERROR(("%s: dhd_get_sssr_reg_info failed\n", __FUNCTION__));
  616. return BCME_ERROR;
  617. }
  618. /* Validate structure version */
  619. if (dhd->sssr_reg_info.version > SSSR_REG_INFO_VER_1) {
  620. DHD_ERROR(("%s: dhd->sssr_reg_info.version (%d : %d) mismatch\n",
  621. __FUNCTION__, (int)dhd->sssr_reg_info.version, SSSR_REG_INFO_VER));
  622. return BCME_ERROR;
  623. }
  624. /* Validate structure length */
  625. if (dhd->sssr_reg_info.length < sizeof(sssr_reg_info_v0_t)) {
  626. DHD_ERROR(("%s: dhd->sssr_reg_info.length (%d : %d) mismatch\n",
  627. __FUNCTION__, (int)dhd->sssr_reg_info.length,
  628. (int)sizeof(dhd->sssr_reg_info)));
  629. return BCME_ERROR;
  630. }
  631. /* validate fifo size */
  632. sssr_bufsize = dhd_get_sssr_bufsize(dhd);
  633. if (sssr_bufsize > DHD_SSSR_MEMPOOL_SIZE) {
  634. DHD_ERROR(("%s: sssr_bufsize(%d) is greater than sssr_mempool(%d)\n",
  635. __FUNCTION__, (int)sssr_bufsize, DHD_SSSR_MEMPOOL_SIZE));
  636. return BCME_ERROR;
  637. }
  638. /* init all pointers to NULL */
  639. for (i = 0; i < MAX_NUM_D11CORES; i++) {
  640. dhd->sssr_d11_before[i] = NULL;
  641. dhd->sssr_d11_after[i] = NULL;
  642. }
  643. dhd->sssr_dig_buf_before = NULL;
  644. dhd->sssr_dig_buf_after = NULL;
  645. /* Allocate memory */
  646. for (i = 0; i < MAX_NUM_D11CORES; i++) {
  647. if (dhd->sssr_reg_info.mac_regs[i].sr_size) {
  648. dhd->sssr_d11_before[i] = (uint32 *)(dhd->sssr_mempool + mempool_used);
  649. mempool_used += dhd->sssr_reg_info.mac_regs[i].sr_size;
  650. dhd->sssr_d11_after[i] = (uint32 *)(dhd->sssr_mempool + mempool_used);
  651. mempool_used += dhd->sssr_reg_info.mac_regs[i].sr_size;
  652. }
  653. }
  654. if (dhd->sssr_reg_info.vasip_regs.vasip_sr_size) {
  655. dhd->sssr_dig_buf_before = (uint32 *)(dhd->sssr_mempool + mempool_used);
  656. mempool_used += dhd->sssr_reg_info.vasip_regs.vasip_sr_size;
  657. dhd->sssr_dig_buf_after = (uint32 *)(dhd->sssr_mempool + mempool_used);
  658. mempool_used += dhd->sssr_reg_info.vasip_regs.vasip_sr_size;
  659. } else if ((dhd->sssr_reg_info.length > OFFSETOF(sssr_reg_info_v1_t, dig_mem_info)) &&
  660. dhd->sssr_reg_info.dig_mem_info.dig_sr_addr) {
  661. dhd->sssr_dig_buf_before = (uint32 *)(dhd->sssr_mempool + mempool_used);
  662. mempool_used += dhd->sssr_reg_info.dig_mem_info.dig_sr_size;
  663. dhd->sssr_dig_buf_after = (uint32 *)(dhd->sssr_mempool + mempool_used);
  664. mempool_used += dhd->sssr_reg_info.dig_mem_info.dig_sr_size;
  665. }
  666. dhd->sssr_inited = TRUE;
  667. return BCME_OK;
  668. }
  669. void
  670. dhd_sssr_dump_deinit(dhd_pub_t *dhd)
  671. {
  672. int i;
  673. dhd->sssr_inited = FALSE;
  674. /* init all pointers to NULL */
  675. for (i = 0; i < MAX_NUM_D11CORES; i++) {
  676. dhd->sssr_d11_before[i] = NULL;
  677. dhd->sssr_d11_after[i] = NULL;
  678. }
  679. dhd->sssr_dig_buf_before = NULL;
  680. dhd->sssr_dig_buf_after = NULL;
  681. return;
  682. }
  683. void
  684. dhd_sssr_print_filepath(dhd_pub_t *dhd, char *path)
  685. {
  686. bool print_info = FALSE;
  687. int dump_mode;
  688. if (!dhd || !path) {
  689. DHD_ERROR(("%s: dhd or memdump_path is NULL\n",
  690. __FUNCTION__));
  691. return;
  692. }
  693. if (!dhd->sssr_dump_collected) {
  694. /* SSSR dump is not collected */
  695. return;
  696. }
  697. dump_mode = dhd->sssr_dump_mode;
  698. if (bcmstrstr(path, "core_0_before")) {
  699. if (dhd->sssr_d11_outofreset[0] &&
  700. dump_mode == SSSR_DUMP_MODE_SSSR) {
  701. print_info = TRUE;
  702. }
  703. } else if (bcmstrstr(path, "core_0_after")) {
  704. if (dhd->sssr_d11_outofreset[0]) {
  705. print_info = TRUE;
  706. }
  707. } else if (bcmstrstr(path, "core_1_before")) {
  708. if (dhd->sssr_d11_outofreset[1] &&
  709. dump_mode == SSSR_DUMP_MODE_SSSR) {
  710. print_info = TRUE;
  711. }
  712. } else if (bcmstrstr(path, "core_1_after")) {
  713. if (dhd->sssr_d11_outofreset[1]) {
  714. print_info = TRUE;
  715. }
  716. } else {
  717. print_info = TRUE;
  718. }
  719. if (print_info) {
  720. DHD_ERROR(("%s: file_path = %s%s\n", __FUNCTION__,
  721. path, FILE_NAME_HAL_TAG));
  722. }
  723. }
  724. #endif /* DHD_SSSR_DUMP */
  725. #ifdef DHD_FW_COREDUMP
  726. void* dhd_get_fwdump_buf(dhd_pub_t *dhd_pub, uint32 length)
  727. {
  728. if (!dhd_pub->soc_ram) {
  729. #if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP)
  730. dhd_pub->soc_ram = (uint8*)DHD_OS_PREALLOC(dhd_pub,
  731. DHD_PREALLOC_MEMDUMP_RAM, length);
  732. #else
  733. dhd_pub->soc_ram = (uint8*) MALLOC(dhd_pub->osh, length);
  734. #endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_MEMDUMP */
  735. }
  736. if (dhd_pub->soc_ram == NULL) {
  737. DHD_ERROR(("%s: Failed to allocate memory for fw crash snap shot.\n",
  738. __FUNCTION__));
  739. dhd_pub->soc_ram_length = 0;
  740. } else {
  741. memset(dhd_pub->soc_ram, 0, length);
  742. dhd_pub->soc_ram_length = length;
  743. }
  744. /* soc_ram free handled in dhd_{free,clear} */
  745. return dhd_pub->soc_ram;
  746. }
  747. #endif /* DHD_FW_COREDUMP */
  748. /* to NDIS developer, the structure dhd_common is redundant,
  749. * please do NOT merge it back from other branches !!!
  750. */
  751. int
  752. dhd_common_socram_dump(dhd_pub_t *dhdp)
  753. {
  754. return dhd_socram_dump(dhdp->bus);
  755. }
  756. int
  757. dhd_dump(dhd_pub_t *dhdp, char *buf, int buflen)
  758. {
  759. struct bcmstrbuf b;
  760. struct bcmstrbuf *strbuf = &b;
  761. if (!dhdp || !dhdp->prot || !buf) {
  762. return BCME_ERROR;
  763. }
  764. bcm_binit(strbuf, buf, buflen);
  765. /* Base DHD info */
  766. bcm_bprintf(strbuf, "%s\n", dhd_version);
  767. bcm_bprintf(strbuf, "\n");
  768. bcm_bprintf(strbuf, "pub.up %d pub.txoff %d pub.busstate %d\n",
  769. dhdp->up, dhdp->txoff, dhdp->busstate);
  770. bcm_bprintf(strbuf, "pub.hdrlen %u pub.maxctl %u pub.rxsz %u\n",
  771. dhdp->hdrlen, dhdp->maxctl, dhdp->rxsz);
  772. bcm_bprintf(strbuf, "pub.iswl %d pub.drv_version %ld pub.mac "MACDBG"\n",
  773. dhdp->iswl, dhdp->drv_version, MAC2STRDBG(&dhdp->mac));
  774. bcm_bprintf(strbuf, "pub.bcmerror %d tickcnt %u\n", dhdp->bcmerror, dhdp->tickcnt);
  775. bcm_bprintf(strbuf, "dongle stats:\n");
  776. bcm_bprintf(strbuf, "tx_packets %lu tx_bytes %lu tx_errors %lu tx_dropped %lu\n",
  777. dhdp->dstats.tx_packets, dhdp->dstats.tx_bytes,
  778. dhdp->dstats.tx_errors, dhdp->dstats.tx_dropped);
  779. bcm_bprintf(strbuf, "rx_packets %lu rx_bytes %lu rx_errors %lu rx_dropped %lu\n",
  780. dhdp->dstats.rx_packets, dhdp->dstats.rx_bytes,
  781. dhdp->dstats.rx_errors, dhdp->dstats.rx_dropped);
  782. bcm_bprintf(strbuf, "multicast %lu\n", dhdp->dstats.multicast);
  783. bcm_bprintf(strbuf, "bus stats:\n");
  784. bcm_bprintf(strbuf, "tx_packets %lu tx_dropped %lu tx_multicast %lu tx_errors %lu\n",
  785. dhdp->tx_packets, dhdp->tx_dropped, dhdp->tx_multicast, dhdp->tx_errors);
  786. bcm_bprintf(strbuf, "tx_ctlpkts %lu tx_ctlerrs %lu\n",
  787. dhdp->tx_ctlpkts, dhdp->tx_ctlerrs);
  788. bcm_bprintf(strbuf, "rx_packets %lu rx_multicast %lu rx_errors %lu \n",
  789. dhdp->rx_packets, dhdp->rx_multicast, dhdp->rx_errors);
  790. bcm_bprintf(strbuf, "rx_ctlpkts %lu rx_ctlerrs %lu rx_dropped %lu\n",
  791. dhdp->rx_ctlpkts, dhdp->rx_ctlerrs, dhdp->rx_dropped);
  792. bcm_bprintf(strbuf, "rx_readahead_cnt %lu tx_realloc %lu\n",
  793. dhdp->rx_readahead_cnt, dhdp->tx_realloc);
  794. bcm_bprintf(strbuf, "tx_pktgetfail %lu rx_pktgetfail %lu\n",
  795. dhdp->tx_pktgetfail, dhdp->rx_pktgetfail);
  796. bcm_bprintf(strbuf, "tx_big_packets %lu\n",
  797. dhdp->tx_big_packets);
  798. bcm_bprintf(strbuf, "\n");
  799. #ifdef DMAMAP_STATS
  800. /* Add DMA MAP info */
  801. bcm_bprintf(strbuf, "DMA MAP stats: \n");
  802. bcm_bprintf(strbuf, "txdata: %lu size: %luK, rxdata: %lu size: %luK\n",
  803. dhdp->dma_stats.txdata, KB(dhdp->dma_stats.txdata_sz),
  804. dhdp->dma_stats.rxdata, KB(dhdp->dma_stats.rxdata_sz));
  805. #ifndef IOCTLRESP_USE_CONSTMEM
  806. bcm_bprintf(strbuf, "IOCTL RX: %lu size: %luK ,",
  807. dhdp->dma_stats.ioctl_rx, KB(dhdp->dma_stats.ioctl_rx_sz));
  808. #endif /* !IOCTLRESP_USE_CONSTMEM */
  809. bcm_bprintf(strbuf, "EVENT RX: %lu size: %luK, INFO RX: %lu size: %luK, "
  810. "TSBUF RX: %lu size %luK\n",
  811. dhdp->dma_stats.event_rx, KB(dhdp->dma_stats.event_rx_sz),
  812. dhdp->dma_stats.info_rx, KB(dhdp->dma_stats.info_rx_sz),
  813. dhdp->dma_stats.tsbuf_rx, KB(dhdp->dma_stats.tsbuf_rx_sz));
  814. bcm_bprintf(strbuf, "Total : %luK \n",
  815. KB(dhdp->dma_stats.txdata_sz + dhdp->dma_stats.rxdata_sz +
  816. dhdp->dma_stats.ioctl_rx_sz + dhdp->dma_stats.event_rx_sz +
  817. dhdp->dma_stats.tsbuf_rx_sz));
  818. #endif /* DMAMAP_STATS */
  819. bcm_bprintf(strbuf, "dhd_induce_error : %u\n", dhdp->dhd_induce_error);
  820. /* Add any prot info */
  821. dhd_prot_dump(dhdp, strbuf);
  822. bcm_bprintf(strbuf, "\n");
  823. /* Add any bus info */
  824. dhd_bus_dump(dhdp, strbuf);
  825. #if defined(DHD_LB_STATS)
  826. dhd_lb_stats_dump(dhdp, strbuf);
  827. #endif /* DHD_LB_STATS */
  828. #ifdef DHD_WET
  829. if (dhd_get_wet_mode(dhdp)) {
  830. bcm_bprintf(strbuf, "Wet Dump:\n");
  831. dhd_wet_dump(dhdp, strbuf);
  832. }
  833. #endif /* DHD_WET */
  834. /* return remaining buffer length */
  835. return (!strbuf->size ? BCME_BUFTOOSHORT : strbuf->size);
  836. }
  837. void
  838. dhd_dump_to_kernelog(dhd_pub_t *dhdp)
  839. {
  840. char buf[512];
  841. DHD_ERROR(("F/W version: %s\n", fw_version));
  842. bcm_bprintf_bypass = TRUE;
  843. dhd_dump(dhdp, buf, sizeof(buf));
  844. bcm_bprintf_bypass = FALSE;
  845. }
  846. int
  847. dhd_wl_ioctl_cmd(dhd_pub_t *dhd_pub, int cmd, void *arg, int len, uint8 set, int ifidx)
  848. {
  849. wl_ioctl_t ioc;
  850. ioc.cmd = cmd;
  851. ioc.buf = arg;
  852. ioc.len = len;
  853. ioc.set = set;
  854. return dhd_wl_ioctl(dhd_pub, ifidx, &ioc, arg, len);
  855. }
  856. int
  857. dhd_wl_ioctl_get_intiovar(dhd_pub_t *dhd_pub, char *name, uint *pval,
  858. int cmd, uint8 set, int ifidx)
  859. {
  860. char iovbuf[WLC_IOCTL_SMLEN];
  861. int ret = -1;
  862. memset(iovbuf, 0, sizeof(iovbuf));
  863. if (bcm_mkiovar(name, NULL, 0, iovbuf, sizeof(iovbuf))) {
  864. ret = dhd_wl_ioctl_cmd(dhd_pub, cmd, iovbuf, sizeof(iovbuf), set, ifidx);
  865. if (!ret) {
  866. *pval = ltoh32(*((uint*)iovbuf));
  867. } else {
  868. DHD_ERROR(("%s: get int iovar %s failed, ERR %d\n",
  869. __FUNCTION__, name, ret));
  870. }
  871. } else {
  872. DHD_ERROR(("%s: mkiovar %s failed\n",
  873. __FUNCTION__, name));
  874. }
  875. return ret;
  876. }
  877. int
  878. dhd_wl_ioctl_set_intiovar(dhd_pub_t *dhd_pub, char *name, uint val,
  879. int cmd, uint8 set, int ifidx)
  880. {
  881. char iovbuf[WLC_IOCTL_SMLEN];
  882. int ret = -1;
  883. int lval = htol32(val);
  884. uint len;
  885. len = bcm_mkiovar(name, (char*)&lval, sizeof(lval), iovbuf, sizeof(iovbuf));
  886. if (len) {
  887. ret = dhd_wl_ioctl_cmd(dhd_pub, cmd, iovbuf, len, set, ifidx);
  888. if (ret) {
  889. DHD_ERROR(("%s: set int iovar %s failed, ERR %d\n",
  890. __FUNCTION__, name, ret));
  891. }
  892. } else {
  893. DHD_ERROR(("%s: mkiovar %s failed\n",
  894. __FUNCTION__, name));
  895. }
  896. return ret;
  897. }
  898. static struct ioctl2str_s {
  899. uint32 ioctl;
  900. char *name;
  901. } ioctl2str_array[] = {
  902. {WLC_UP, "UP"},
  903. {WLC_DOWN, "DOWN"},
  904. {WLC_SET_PROMISC, "SET_PROMISC"},
  905. {WLC_SET_INFRA, "SET_INFRA"},
  906. {WLC_SET_AUTH, "SET_AUTH"},
  907. {WLC_SET_SSID, "SET_SSID"},
  908. {WLC_RESTART, "RESTART"},
  909. {WLC_SET_CHANNEL, "SET_CHANNEL"},
  910. {WLC_SET_RATE_PARAMS, "SET_RATE_PARAMS"},
  911. {WLC_SET_KEY, "SET_KEY"},
  912. {WLC_SCAN, "SCAN"},
  913. {WLC_DISASSOC, "DISASSOC"},
  914. {WLC_REASSOC, "REASSOC"},
  915. {WLC_SET_COUNTRY, "SET_COUNTRY"},
  916. {WLC_SET_WAKE, "SET_WAKE"},
  917. {WLC_SET_SCANSUPPRESS, "SET_SCANSUPPRESS"},
  918. {WLC_SCB_DEAUTHORIZE, "SCB_DEAUTHORIZE"},
  919. {WLC_SET_WSEC, "SET_WSEC"},
  920. {WLC_SET_INTERFERENCE_MODE, "SET_INTERFERENCE_MODE"},
  921. {WLC_SET_RADAR, "SET_RADAR"},
  922. {0, NULL}
  923. };
  924. static char *
  925. ioctl2str(uint32 ioctl)
  926. {
  927. struct ioctl2str_s *p = ioctl2str_array;
  928. while (p->name != NULL) {
  929. if (p->ioctl == ioctl) {
  930. return p->name;
  931. }
  932. p++;
  933. }
  934. return "";
  935. }
  936. /**
  937. * @param ioc IO control struct, members are partially used by this function.
  938. * @param buf [inout] Contains parameters to send to dongle, contains dongle response on return.
  939. * @param len Maximum number of bytes that dongle is allowed to write into 'buf'.
  940. */
  941. int
  942. dhd_wl_ioctl(dhd_pub_t *dhd_pub, int ifidx, wl_ioctl_t *ioc, void *buf, int len)
  943. {
  944. int ret = BCME_ERROR;
  945. unsigned long flags;
  946. #ifdef DUMP_IOCTL_IOV_LIST
  947. dhd_iov_li_t *iov_li;
  948. #endif /* DUMP_IOCTL_IOV_LIST */
  949. #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
  950. DHD_OS_WAKE_LOCK(dhd_pub);
  951. if (pm_runtime_get_sync(dhd_bus_to_dev(dhd_pub->bus)) < 0) {
  952. DHD_RPM(("%s: pm_runtime_get_sync error. \n", __FUNCTION__));
  953. DHD_OS_WAKE_UNLOCK(dhd_pub);
  954. return BCME_ERROR;
  955. }
  956. #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
  957. #ifdef KEEPIF_ON_DEVICE_RESET
  958. if (ioc->cmd == WLC_GET_VAR) {
  959. dbus_config_t config;
  960. config.general_param = 0;
  961. if (buf) {
  962. if (!strcmp(buf, "wowl_activate")) {
  963. /* 1 (TRUE) after decreased by 1 */
  964. config.general_param = 2;
  965. } else if (!strcmp(buf, "wowl_clear")) {
  966. /* 0 (FALSE) after decreased by 1 */
  967. config.general_param = 1;
  968. }
  969. }
  970. if (config.general_param) {
  971. config.config_id = DBUS_CONFIG_ID_KEEPIF_ON_DEVRESET;
  972. config.general_param--;
  973. dbus_set_config(dhd_pub->dbus, &config);
  974. }
  975. }
  976. #endif /* KEEPIF_ON_DEVICE_RESET */
  977. if (dhd_os_proto_block(dhd_pub))
  978. {
  979. #ifdef DHD_LOG_DUMP
  980. int slen, val, lval, min_len;
  981. char *msg, tmp[64];
  982. /* WLC_GET_VAR */
  983. if (ioc->cmd == WLC_GET_VAR && buf) {
  984. min_len = MIN(sizeof(tmp) - 1, strlen(buf));
  985. memset(tmp, 0, sizeof(tmp));
  986. bcopy(buf, tmp, min_len);
  987. tmp[min_len] = '\0';
  988. }
  989. #endif /* DHD_LOG_DUMP */
  990. #ifdef DHD_DISCONNECT_TRACE
  991. if ((WLC_DISASSOC == ioc->cmd) || (WLC_DOWN == ioc->cmd) ||
  992. (WLC_DISASSOC_MYAP == ioc->cmd)) {
  993. DHD_ERROR(("IOCTL Disconnect WiFi: %d\n", ioc->cmd));
  994. }
  995. #endif /* HW_DISCONNECT_TRACE */
  996. /* logging of iovars that are send to the dongle, ./dhd msglevel +iovar */
  997. if (ioc->set == TRUE) {
  998. char *pars = (char *)buf; // points at user buffer
  999. if (ioc->cmd == WLC_SET_VAR && buf) {
  1000. DHD_DNGL_IOVAR_SET(("iovar:%d: set %s", ifidx, pars));
  1001. if (ioc->len > 1 + sizeof(uint32)) {
  1002. // skip iovar name:
  1003. pars += strnlen(pars, ioc->len - 1 - sizeof(uint32));
  1004. pars++; // skip NULL character
  1005. }
  1006. } else {
  1007. DHD_DNGL_IOVAR_SET(("ioctl:%d: set %d %s",
  1008. ifidx, ioc->cmd, ioctl2str(ioc->cmd)));
  1009. }
  1010. if (pars != NULL) {
  1011. DHD_DNGL_IOVAR_SET((" 0x%x\n", *(uint32*)pars));
  1012. } else {
  1013. DHD_DNGL_IOVAR_SET((" NULL\n"));
  1014. }
  1015. }
  1016. DHD_LINUX_GENERAL_LOCK(dhd_pub, flags);
  1017. if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(dhd_pub)) {
  1018. DHD_INFO(("%s: returning as busstate=%d\n",
  1019. __FUNCTION__, dhd_pub->busstate));
  1020. DHD_LINUX_GENERAL_UNLOCK(dhd_pub, flags);
  1021. dhd_os_proto_unblock(dhd_pub);
  1022. return -ENODEV;
  1023. }
  1024. DHD_BUS_BUSY_SET_IN_IOVAR(dhd_pub);
  1025. DHD_LINUX_GENERAL_UNLOCK(dhd_pub, flags);
  1026. #ifdef DHD_PCIE_RUNTIMEPM
  1027. dhdpcie_runtime_bus_wake(dhd_pub, TRUE, dhd_wl_ioctl);
  1028. #endif /* DHD_PCIE_RUNTIMEPM */
  1029. DHD_LINUX_GENERAL_LOCK(dhd_pub, flags);
  1030. if (DHD_BUS_CHECK_SUSPEND_OR_SUSPEND_IN_PROGRESS(dhd_pub)) {
  1031. DHD_ERROR(("%s: bus is in suspend(%d) or suspending(0x%x) state!!\n",
  1032. __FUNCTION__, dhd_pub->busstate, dhd_pub->dhd_bus_busy_state));
  1033. DHD_BUS_BUSY_CLEAR_IN_IOVAR(dhd_pub);
  1034. dhd_os_busbusy_wake(dhd_pub);
  1035. DHD_LINUX_GENERAL_UNLOCK(dhd_pub, flags);
  1036. dhd_os_proto_unblock(dhd_pub);
  1037. return -ENODEV;
  1038. }
  1039. DHD_LINUX_GENERAL_UNLOCK(dhd_pub, flags);
  1040. #ifdef DUMP_IOCTL_IOV_LIST
  1041. if (ioc->cmd != WLC_GET_MAGIC && ioc->cmd != WLC_GET_VERSION && buf) {
  1042. if (!(iov_li = MALLOC(dhd_pub->osh, sizeof(*iov_li)))) {
  1043. DHD_ERROR(("iovar dump list item allocation Failed\n"));
  1044. } else {
  1045. iov_li->cmd = ioc->cmd;
  1046. if (buf)
  1047. bcopy((char *)buf, iov_li->buff, strlen((char *)buf)+1);
  1048. dhd_iov_li_append(dhd_pub, &dhd_pub->dump_iovlist_head,
  1049. &iov_li->list);
  1050. }
  1051. }
  1052. #endif /* DUMP_IOCTL_IOV_LIST */
  1053. ret = dhd_prot_ioctl(dhd_pub, ifidx, ioc, buf, len);
  1054. #ifdef DUMP_IOCTL_IOV_LIST
  1055. if (ret == -ETIMEDOUT) {
  1056. DHD_ERROR(("Last %d issued commands: Latest one is at bottom.\n",
  1057. IOV_LIST_MAX_LEN));
  1058. dhd_iov_li_print(&dhd_pub->dump_iovlist_head);
  1059. }
  1060. #endif /* DUMP_IOCTL_IOV_LIST */
  1061. #ifdef WL_CFGVENDOR_SEND_HANG_EVENT
  1062. if (ret == -ETIMEDOUT) {
  1063. copy_hang_info_ioctl_timeout(dhd_pub, ifidx, ioc);
  1064. }
  1065. #endif /* WL_CFGVENDOR_SEND_HANG_EVENT */
  1066. #ifdef DHD_LOG_DUMP
  1067. if ((ioc->cmd == WLC_GET_VAR || ioc->cmd == WLC_SET_VAR) &&
  1068. buf != NULL) {
  1069. if (buf) {
  1070. lval = 0;
  1071. slen = strlen(buf) + 1;
  1072. msg = (char*)buf;
  1073. if (len >= slen + sizeof(lval)) {
  1074. if (ioc->cmd == WLC_GET_VAR) {
  1075. msg = tmp;
  1076. lval = *(int*)buf;
  1077. } else {
  1078. min_len = MIN(ioc->len - slen, sizeof(int));
  1079. bcopy((msg + slen), &lval, min_len);
  1080. }
  1081. if (!strncmp(msg, "cur_etheraddr",
  1082. strlen("cur_etheraddr"))) {
  1083. lval = 0;
  1084. }
  1085. }
  1086. DHD_IOVAR_MEM((
  1087. "%s: cmd: %d, msg: %s val: 0x%x,"
  1088. " len: %d, set: %d, txn-id: %d\n",
  1089. ioc->cmd == WLC_GET_VAR ?
  1090. "WLC_GET_VAR" : "WLC_SET_VAR",
  1091. ioc->cmd, msg, lval, ioc->len, ioc->set,
  1092. dhd_prot_get_ioctl_trans_id(dhd_pub)));
  1093. } else {
  1094. DHD_IOVAR_MEM(("%s: cmd: %d, len: %d, set: %d, txn-id: %d\n",
  1095. ioc->cmd == WLC_GET_VAR ? "WLC_GET_VAR" : "WLC_SET_VAR",
  1096. ioc->cmd, ioc->len, ioc->set,
  1097. dhd_prot_get_ioctl_trans_id(dhd_pub)));
  1098. }
  1099. } else {
  1100. slen = ioc->len;
  1101. if (buf != NULL && slen != 0) {
  1102. if (slen >= 4) {
  1103. val = *(int*)buf;
  1104. } else if (slen >= 2) {
  1105. val = *(short*)buf;
  1106. } else {
  1107. val = *(char*)buf;
  1108. }
  1109. /* Do not dump for WLC_GET_MAGIC and WLC_GET_VERSION */
  1110. if (ioc->cmd != WLC_GET_MAGIC && ioc->cmd != WLC_GET_VERSION)
  1111. DHD_IOVAR_MEM(("WLC_IOCTL: cmd: %d, val: %d, len: %d, "
  1112. "set: %d\n", ioc->cmd, val, ioc->len, ioc->set));
  1113. } else {
  1114. DHD_IOVAR_MEM(("WLC_IOCTL: cmd: %d, buf is NULL\n", ioc->cmd));
  1115. }
  1116. }
  1117. #endif /* DHD_LOG_DUMP */
  1118. #if defined(OEM_ANDROID)
  1119. if (ret && dhd_pub->up) {
  1120. /* Send hang event only if dhd_open() was success */
  1121. dhd_os_check_hang(dhd_pub, ifidx, ret);
  1122. }
  1123. if (ret == -ETIMEDOUT && !dhd_pub->up) {
  1124. DHD_ERROR(("%s: 'resumed on timeout' error is "
  1125. "occurred before the interface does not"
  1126. " bring up\n", __FUNCTION__));
  1127. }
  1128. #endif /* defined(OEM_ANDROID) */
  1129. DHD_LINUX_GENERAL_LOCK(dhd_pub, flags);
  1130. DHD_BUS_BUSY_CLEAR_IN_IOVAR(dhd_pub);
  1131. dhd_os_busbusy_wake(dhd_pub);
  1132. DHD_LINUX_GENERAL_UNLOCK(dhd_pub, flags);
  1133. dhd_os_proto_unblock(dhd_pub);
  1134. }
  1135. #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
  1136. pm_runtime_mark_last_busy(dhd_bus_to_dev(dhd_pub->bus));
  1137. pm_runtime_put_autosuspend(dhd_bus_to_dev(dhd_pub->bus));
  1138. DHD_OS_WAKE_UNLOCK(dhd_pub);
  1139. #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
  1140. return ret;
  1141. }
  1142. uint wl_get_port_num(wl_io_pport_t *io_pport)
  1143. {
  1144. return 0;
  1145. }
  1146. /* Get bssidx from iovar params
  1147. * Input: dhd_pub - pointer to dhd_pub_t
  1148. * params - IOVAR params
  1149. * Output: idx - BSS index
  1150. * val - ponter to the IOVAR arguments
  1151. */
  1152. static int
  1153. dhd_iovar_parse_bssidx(dhd_pub_t *dhd_pub, const char *params, uint32 *idx, const char **val)
  1154. {
  1155. char *prefix = "bsscfg:";
  1156. uint32 bssidx;
  1157. if (!(strncmp(params, prefix, strlen(prefix)))) {
  1158. /* per bss setting should be prefixed with 'bsscfg:' */
  1159. const char *p = params + strlen(prefix);
  1160. /* Skip Name */
  1161. while (*p != '\0')
  1162. p++;
  1163. /* consider null */
  1164. p = p + 1;
  1165. bcopy(p, &bssidx, sizeof(uint32));
  1166. /* Get corresponding dhd index */
  1167. bssidx = dhd_bssidx2idx(dhd_pub, htod32(bssidx));
  1168. if (bssidx >= DHD_MAX_IFS) {
  1169. DHD_ERROR(("%s Wrong bssidx provided\n", __FUNCTION__));
  1170. return BCME_ERROR;
  1171. }
  1172. /* skip bss idx */
  1173. p += sizeof(uint32);
  1174. *val = p;
  1175. *idx = bssidx;
  1176. } else {
  1177. DHD_ERROR(("%s: bad parameter for per bss iovar\n", __FUNCTION__));
  1178. return BCME_ERROR;
  1179. }
  1180. return BCME_OK;
  1181. }
  1182. #if defined(DHD_DEBUG) && defined(BCMDHDUSB)
  1183. /* USB Device console input function */
  1184. int dhd_bus_console_in(dhd_pub_t *dhd, uchar *msg, uint msglen)
  1185. {
  1186. DHD_TRACE(("%s \n", __FUNCTION__));
  1187. return dhd_iovar(dhd, 0, "cons", msg, msglen, NULL, 0, TRUE);
  1188. }
  1189. #endif /* DHD_DEBUG && BCMDHDUSB */
  1190. #ifdef DHD_DEBUG
  1191. int
  1192. dhd_mem_debug(dhd_pub_t *dhd, uchar *msg, uint msglen)
  1193. {
  1194. unsigned long int_arg = 0;
  1195. char *p;
  1196. char *end_ptr = NULL;
  1197. dhd_dbg_mwli_t *mw_li;
  1198. dll_t *item, *next;
  1199. /* check if mwalloc, mwquery or mwfree was supplied arguement with space */
  1200. p = bcmstrstr((char *)msg, " ");
  1201. if (p != NULL) {
  1202. /* space should be converted to null as separation flag for firmware */
  1203. *p = '\0';
  1204. /* store the argument in int_arg */
  1205. int_arg = bcm_strtoul(p+1, &end_ptr, 10);
  1206. }
  1207. if (!p && !strcmp(msg, "query")) {
  1208. /* lets query the list inetrnally */
  1209. if (dll_empty(dll_head_p(&dhd->mw_list_head))) {
  1210. DHD_ERROR(("memwaste list is empty, call mwalloc < size > to allocate\n"));
  1211. } else {
  1212. for (item = dll_head_p(&dhd->mw_list_head);
  1213. !dll_end(&dhd->mw_list_head, item); item = next) {
  1214. next = dll_next_p(item);
  1215. mw_li = (dhd_dbg_mwli_t *)CONTAINEROF(item, dhd_dbg_mwli_t, list);
  1216. DHD_ERROR(("item: <id=%d, size=%d>\n", mw_li->id, mw_li->size));
  1217. }
  1218. }
  1219. } else if (p && end_ptr && (*end_ptr == '\0') && !strcmp(msg, "alloc")) {
  1220. int32 alloc_handle;
  1221. /* convert size into KB and append as integer */
  1222. *((int32 *)(p+1)) = int_arg*1024;
  1223. *(p+1+sizeof(int32)) = '\0';
  1224. /* recalculated length -> 5 bytes for "alloc" + 4 bytes for size +
  1225. * 1 bytes for null caracter
  1226. */
  1227. msglen = strlen(msg) + sizeof(int32) + 1;
  1228. if (dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, msg, msglen+1, FALSE, 0) < 0) {
  1229. DHD_ERROR(("IOCTL failed for memdebug alloc\n"));
  1230. }
  1231. /* returned allocated handle from dongle, basically address of the allocated unit */
  1232. alloc_handle = *((int32 *)msg);
  1233. /* add a node in the list with tuple <id, handle, size> */
  1234. if (alloc_handle == 0) {
  1235. DHD_ERROR(("Reuqested size could not be allocated\n"));
  1236. } else if (!(mw_li = MALLOC(dhd->osh, sizeof(*mw_li)))) {
  1237. DHD_ERROR(("mw list item allocation Failed\n"));
  1238. } else {
  1239. mw_li->id = dhd->mw_id++;
  1240. mw_li->handle = alloc_handle;
  1241. mw_li->size = int_arg;
  1242. /* append the node in the list */
  1243. dll_append(&dhd->mw_list_head, &mw_li->list);
  1244. }
  1245. } else if (p && end_ptr && (*end_ptr == '\0') && !strcmp(msg, "free")) {
  1246. /* inform dongle to free wasted chunk */
  1247. int handle = 0;
  1248. int size = 0;
  1249. for (item = dll_head_p(&dhd->mw_list_head);
  1250. !dll_end(&dhd->mw_list_head, item); item = next) {
  1251. next = dll_next_p(item);
  1252. mw_li = (dhd_dbg_mwli_t *)CONTAINEROF(item, dhd_dbg_mwli_t, list);
  1253. if (mw_li->id == (int)int_arg) {
  1254. handle = mw_li->handle;
  1255. size = mw_li->size;
  1256. dll_delete(item);
  1257. MFREE(dhd->osh, mw_li, sizeof(*mw_li));
  1258. if (dll_empty(dll_head_p(&dhd->mw_list_head))) {
  1259. /* reset the id */
  1260. dhd->mw_id = 0;
  1261. }
  1262. }
  1263. }
  1264. if (handle) {
  1265. int len;
  1266. /* append the free handle and the chunk size in first 8 bytes
  1267. * after the command and null character
  1268. */
  1269. *((int32 *)(p+1)) = handle;
  1270. *((int32 *)((p+1)+sizeof(int32))) = size;
  1271. /* append null as terminator */
  1272. *(p+1+2*sizeof(int32)) = '\0';
  1273. /* recalculated length -> 4 bytes for "free" + 8 bytes for hadnle and size
  1274. * + 1 bytes for null caracter
  1275. */
  1276. len = strlen(msg) + 2*sizeof(int32) + 1;
  1277. /* send iovar to free the chunk */
  1278. if (dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, msg, len, FALSE, 0) < 0) {
  1279. DHD_ERROR(("IOCTL failed for memdebug free\n"));
  1280. }
  1281. } else {
  1282. DHD_ERROR(("specified id does not exist\n"));
  1283. }
  1284. } else {
  1285. /* for all the wrong argument formats */
  1286. return BCME_BADARG;
  1287. }
  1288. return 0;
  1289. }
  1290. extern void
  1291. dhd_mw_list_delete(dhd_pub_t *dhd, dll_t *list_head)
  1292. {
  1293. dll_t *item;
  1294. dhd_dbg_mwli_t *mw_li;
  1295. while (!(dll_empty(list_head))) {
  1296. item = dll_head_p(list_head);
  1297. mw_li = (dhd_dbg_mwli_t *)CONTAINEROF(item, dhd_dbg_mwli_t, list);
  1298. dll_delete(item);
  1299. MFREE(dhd->osh, mw_li, sizeof(*mw_li));
  1300. }
  1301. }
  1302. #ifdef BCMPCIE
  1303. int
  1304. dhd_flow_ring_debug(dhd_pub_t *dhd, char *msg, uint msglen)
  1305. {
  1306. flow_ring_table_t *flow_ring_table;
  1307. char *cmd;
  1308. char *end_ptr = NULL;
  1309. uint8 prio;
  1310. uint16 flowid;
  1311. int i;
  1312. int ret = 0;
  1313. cmd = bcmstrstr(msg, " ");
  1314. BCM_REFERENCE(prio);
  1315. if (cmd != NULL) {
  1316. /* in order to use string operations append null */
  1317. *cmd = '\0';
  1318. } else {
  1319. DHD_ERROR(("missing: create/delete args\n"));
  1320. return BCME_ERROR;
  1321. }
  1322. if (cmd && !strcmp(msg, "create")) {
  1323. /* extract <"source address", "destination address", "priority"> */
  1324. uint8 sa[ETHER_ADDR_LEN], da[ETHER_ADDR_LEN];
  1325. BCM_REFERENCE(sa);
  1326. BCM_REFERENCE(da);
  1327. msg = msg + strlen("create") + 1;
  1328. /* fill ethernet source address */
  1329. for (i = 0; i < ETHER_ADDR_LEN; i++) {
  1330. sa[i] = (uint8)bcm_strtoul(msg, &end_ptr, 16);
  1331. if (*end_ptr == ':') {
  1332. msg = (end_ptr + 1);
  1333. } else if (i != 5) {
  1334. DHD_ERROR(("not a valid source mac addr\n"));
  1335. return BCME_ERROR;
  1336. }
  1337. }
  1338. if (*end_ptr != ' ') {
  1339. DHD_ERROR(("missing: destiantion mac id\n"));
  1340. return BCME_ERROR;
  1341. } else {
  1342. /* skip space */
  1343. msg = end_ptr + 1;
  1344. }
  1345. /* fill ethernet destination address */
  1346. for (i = 0; i < ETHER_ADDR_LEN; i++) {
  1347. da[i] = (uint8)bcm_strtoul(msg, &end_ptr, 16);
  1348. if (*end_ptr == ':') {
  1349. msg = (end_ptr + 1);
  1350. } else if (i != 5) {
  1351. DHD_ERROR(("not a valid destination mac addr\n"));
  1352. return BCME_ERROR;
  1353. }
  1354. }
  1355. if (*end_ptr != ' ') {
  1356. DHD_ERROR(("missing: priority\n"));
  1357. return BCME_ERROR;
  1358. } else {
  1359. msg = end_ptr + 1;
  1360. }
  1361. /* parse priority */
  1362. prio = (uint8)bcm_strtoul(msg, &end_ptr, 10);
  1363. if (prio > MAXPRIO) {
  1364. DHD_ERROR(("%s: invalid priority. Must be between 0-7 inclusive\n",
  1365. __FUNCTION__));
  1366. return BCME_ERROR;
  1367. }
  1368. if (*end_ptr != '\0') {
  1369. DHD_ERROR(("msg not truncated with NULL character\n"));
  1370. return BCME_ERROR;
  1371. }
  1372. ret = dhd_flowid_debug_create(dhd, 0, prio, (char *)sa, (char *)da, &flowid);
  1373. if (ret != BCME_OK) {
  1374. DHD_ERROR(("%s: flowring creation failed ret: %d\n", __FUNCTION__, ret));
  1375. return BCME_ERROR;
  1376. }
  1377. return BCME_OK;
  1378. } else if (cmd && !strcmp(msg, "delete")) {
  1379. msg = msg + strlen("delete") + 1;
  1380. /* parse flowid */
  1381. flowid = (uint16)bcm_strtoul(msg, &end_ptr, 10);
  1382. if (*end_ptr != '\0') {
  1383. DHD_ERROR(("msg not truncated with NULL character\n"));
  1384. return BCME_ERROR;
  1385. }
  1386. /* Find flowid from ifidx 0 since this IOVAR creating flowring with ifidx 0 */
  1387. if (dhd_flowid_find_by_ifidx(dhd, 0, flowid) != BCME_OK)
  1388. {
  1389. DHD_ERROR(("%s : Deleting not created flowid: %u\n", __FUNCTION__, flowid));
  1390. return BCME_ERROR;
  1391. }
  1392. flow_ring_table = (flow_ring_table_t *)dhd->flow_ring_table;
  1393. ret = dhd_bus_flow_ring_delete_request(dhd->bus, (void *)&flow_ring_table[flowid]);
  1394. if (ret != BCME_OK) {
  1395. DHD_ERROR(("%s: flowring deletion failed ret: %d\n", __FUNCTION__, ret));
  1396. return BCME_ERROR;
  1397. }
  1398. return BCME_OK;
  1399. }
  1400. DHD_ERROR(("%s: neither create nor delete\n", __FUNCTION__));
  1401. return BCME_ERROR;
  1402. }
  1403. #endif /* BCMPCIE */
  1404. #endif /* DHD_DEBUG */
  1405. static int
  1406. dhd_doiovar(dhd_pub_t *dhd_pub, const bcm_iovar_t *vi, uint32 actionid, const char *name,
  1407. void *params, int plen, void *arg, int len, int val_size)
  1408. {
  1409. int bcmerror = 0;
  1410. int32 int_val = 0;
  1411. uint32 dhd_ver_len, bus_api_rev_len;
  1412. DHD_TRACE(("%s: Enter\n", __FUNCTION__));
  1413. DHD_TRACE(("%s: actionid = %d; name %s\n", __FUNCTION__, actionid, name));
  1414. if ((bcmerror = bcm_iovar_lencheck(vi, arg, len, IOV_ISSET(actionid))) != 0)
  1415. goto exit;
  1416. if (plen >= (int)sizeof(int_val))
  1417. bcopy(params, &int_val, sizeof(int_val));
  1418. switch (actionid) {
  1419. case IOV_GVAL(IOV_VERSION):
  1420. /* Need to have checked buffer length */
  1421. dhd_ver_len = strlen(dhd_version);
  1422. bus_api_rev_len = strlen(bus_api_revision);
  1423. if (dhd_ver_len)
  1424. bcm_strncpy_s((char*)arg, dhd_ver_len, dhd_version, dhd_ver_len);
  1425. if (bus_api_rev_len)
  1426. bcm_strncat_s((char*)arg + dhd_ver_len, bus_api_rev_len, bus_api_revision,
  1427. bus_api_rev_len);
  1428. break;
  1429. case IOV_GVAL(IOV_MSGLEVEL):
  1430. int_val = (int32)dhd_msg_level;
  1431. bcopy(&int_val, arg, val_size);
  1432. break;
  1433. case IOV_SVAL(IOV_MSGLEVEL):
  1434. #ifdef WL_CFG80211
  1435. /* Enable DHD and WL logs in oneshot */
  1436. if (int_val & DHD_WL_VAL2)
  1437. wl_cfg80211_enable_trace(TRUE, int_val & (~DHD_WL_VAL2));
  1438. else if (int_val & DHD_WL_VAL)
  1439. wl_cfg80211_enable_trace(FALSE, WL_DBG_DBG);
  1440. if (!(int_val & DHD_WL_VAL2))
  1441. #endif /* WL_CFG80211 */
  1442. dhd_msg_level = int_val;
  1443. break;
  1444. case IOV_GVAL(IOV_BCMERRORSTR):
  1445. bcm_strncpy_s((char *)arg, len, bcmerrorstr(dhd_pub->bcmerror), BCME_STRLEN);
  1446. ((char *)arg)[BCME_STRLEN - 1] = 0x00;
  1447. break;
  1448. case IOV_GVAL(IOV_BCMERROR):
  1449. int_val = (int32)dhd_pub->bcmerror;
  1450. bcopy(&int_val, arg, val_size);
  1451. break;
  1452. case IOV_GVAL(IOV_WDTICK):
  1453. int_val = (int32)dhd_watchdog_ms;
  1454. bcopy(&int_val, arg, val_size);
  1455. break;
  1456. case IOV_SVAL(IOV_WDTICK):
  1457. if (!dhd_pub->up) {
  1458. bcmerror = BCME_NOTUP;
  1459. break;
  1460. }
  1461. dhd_watchdog_ms = (uint)int_val;
  1462. dhd_os_wd_timer(dhd_pub, (uint)int_val);
  1463. break;
  1464. case IOV_GVAL(IOV_DUMP):
  1465. if (dhd_dump(dhd_pub, arg, len) <= 0)
  1466. bcmerror = BCME_ERROR;
  1467. else
  1468. bcmerror = BCME_OK;
  1469. break;
  1470. case IOV_GVAL(IOV_DCONSOLE_POLL):
  1471. int_val = (int32)dhd_pub->dhd_console_ms;
  1472. bcopy(&int_val, arg, val_size);
  1473. break;
  1474. case IOV_SVAL(IOV_DCONSOLE_POLL):
  1475. dhd_pub->dhd_console_ms = (uint)int_val;
  1476. break;
  1477. #if defined(DHD_DEBUG)
  1478. case IOV_SVAL(IOV_CONS):
  1479. if (len > 0)
  1480. bcmerror = dhd_bus_console_in(dhd_pub, arg, len - 1);
  1481. break;
  1482. #endif /* DHD_DEBUG */
  1483. case IOV_SVAL(IOV_CLEARCOUNTS):
  1484. dhd_pub->tx_packets = dhd_pub->rx_packets = 0;
  1485. dhd_pub->tx_errors = dhd_pub->rx_errors = 0;
  1486. dhd_pub->tx_ctlpkts = dhd_pub->rx_ctlpkts = 0;
  1487. dhd_pub->tx_ctlerrs = dhd_pub->rx_ctlerrs = 0;
  1488. dhd_pub->tx_dropped = 0;
  1489. dhd_pub->rx_dropped = 0;
  1490. dhd_pub->tx_pktgetfail = 0;
  1491. dhd_pub->rx_pktgetfail = 0;
  1492. dhd_pub->rx_readahead_cnt = 0;
  1493. dhd_pub->tx_realloc = 0;
  1494. dhd_pub->wd_dpc_sched = 0;
  1495. dhd_pub->tx_big_packets = 0;
  1496. memset(&dhd_pub->dstats, 0, sizeof(dhd_pub->dstats));
  1497. dhd_bus_clearcounts(dhd_pub);
  1498. #ifdef PROP_TXSTATUS
  1499. /* clear proptxstatus related counters */
  1500. dhd_wlfc_clear_counts(dhd_pub);
  1501. #endif /* PROP_TXSTATUS */
  1502. #if defined(DHD_LB_STATS)
  1503. DHD_LB_STATS_RESET(dhd_pub);
  1504. #endif /* DHD_LB_STATS */
  1505. break;
  1506. case IOV_GVAL(IOV_IOCTLTIMEOUT): {
  1507. int_val = (int32)dhd_os_get_ioctl_resp_timeout();
  1508. bcopy(&int_val, arg, sizeof(int_val));
  1509. break;
  1510. }
  1511. case IOV_SVAL(IOV_IOCTLTIMEOUT): {
  1512. if (int_val <= 0)
  1513. bcmerror = BCME_BADARG;
  1514. else
  1515. dhd_os_set_ioctl_resp_timeout((unsigned int)int_val);
  1516. break;
  1517. }
  1518. #ifdef PROP_TXSTATUS
  1519. case IOV_GVAL(IOV_PROPTXSTATUS_ENABLE): {
  1520. bool wlfc_enab = FALSE;
  1521. bcmerror = dhd_wlfc_get_enable(dhd_pub, &wlfc_enab);
  1522. if (bcmerror != BCME_OK)
  1523. goto exit;
  1524. int_val = wlfc_enab ? 1 : 0;
  1525. bcopy(&int_val, arg, val_size);
  1526. break;
  1527. }
  1528. case IOV_SVAL(IOV_PROPTXSTATUS_ENABLE): {
  1529. bool wlfc_enab = FALSE;
  1530. bcmerror = dhd_wlfc_get_enable(dhd_pub, &wlfc_enab);
  1531. if (bcmerror != BCME_OK)
  1532. goto exit;
  1533. /* wlfc is already set as desired */
  1534. if (wlfc_enab == (int_val == 0 ? FALSE : TRUE))
  1535. goto exit;
  1536. if (int_val == TRUE)
  1537. bcmerror = dhd_wlfc_init(dhd_pub);
  1538. else
  1539. bcmerror = dhd_wlfc_deinit(dhd_pub);
  1540. break;
  1541. }
  1542. case IOV_GVAL(IOV_PROPTXSTATUS_MODE):
  1543. bcmerror = dhd_wlfc_get_mode(dhd_pub, &int_val);
  1544. if (bcmerror != BCME_OK)
  1545. goto exit;
  1546. bcopy(&int_val, arg, val_size);
  1547. break;
  1548. case IOV_SVAL(IOV_PROPTXSTATUS_MODE):
  1549. dhd_wlfc_set_mode(dhd_pub, int_val);
  1550. break;
  1551. case IOV_GVAL(IOV_PROPTXSTATUS_MODULE_IGNORE):
  1552. bcmerror = dhd_wlfc_get_module_ignore(dhd_pub, &int_val);
  1553. if (bcmerror != BCME_OK)
  1554. goto exit;
  1555. bcopy(&int_val, arg, val_size);
  1556. break;
  1557. case IOV_SVAL(IOV_PROPTXSTATUS_MODULE_IGNORE):
  1558. dhd_wlfc_set_module_ignore(dhd_pub, int_val);
  1559. break;
  1560. case IOV_GVAL(IOV_PROPTXSTATUS_CREDIT_IGNORE):
  1561. bcmerror = dhd_wlfc_get_credit_ignore(dhd_pub, &int_val);
  1562. if (bcmerror != BCME_OK)
  1563. goto exit;
  1564. bcopy(&int_val, arg, val_size);
  1565. break;
  1566. case IOV_SVAL(IOV_PROPTXSTATUS_CREDIT_IGNORE):
  1567. dhd_wlfc_set_credit_ignore(dhd_pub, int_val);
  1568. break;
  1569. case IOV_GVAL(IOV_PROPTXSTATUS_TXSTATUS_IGNORE):
  1570. bcmerror = dhd_wlfc_get_txstatus_ignore(dhd_pub, &int_val);
  1571. if (bcmerror != BCME_OK)
  1572. goto exit;
  1573. bcopy(&int_val, arg, val_size);
  1574. break;
  1575. case IOV_SVAL(IOV_PROPTXSTATUS_TXSTATUS_IGNORE):
  1576. dhd_wlfc_set_txstatus_ignore(dhd_pub, int_val);
  1577. break;
  1578. case IOV_GVAL(IOV_PROPTXSTATUS_RXPKT_CHK):
  1579. bcmerror = dhd_wlfc_get_rxpkt_chk(dhd_pub, &int_val);
  1580. if (bcmerror != BCME_OK)
  1581. goto exit;
  1582. bcopy(&int_val, arg, val_size);
  1583. break;
  1584. case IOV_SVAL(IOV_PROPTXSTATUS_RXPKT_CHK):
  1585. dhd_wlfc_set_rxpkt_chk(dhd_pub, int_val);
  1586. break;
  1587. #endif /* PROP_TXSTATUS */
  1588. case IOV_GVAL(IOV_BUS_TYPE):
  1589. /* The dhd application queries the driver to check if its usb or sdio. */
  1590. #ifdef BCMDHDUSB
  1591. int_val = BUS_TYPE_USB;
  1592. #endif // endif
  1593. #ifdef BCMSDIO
  1594. int_val = BUS_TYPE_SDIO;
  1595. #endif // endif
  1596. #ifdef PCIE_FULL_DONGLE
  1597. int_val = BUS_TYPE_PCIE;
  1598. #endif // endif
  1599. bcopy(&int_val, arg, val_size);
  1600. break;
  1601. case IOV_SVAL(IOV_CHANGEMTU):
  1602. int_val &= 0xffff;
  1603. bcmerror = dhd_change_mtu(dhd_pub, int_val, 0);
  1604. break;
  1605. case IOV_GVAL(IOV_HOSTREORDER_FLOWS):
  1606. {
  1607. uint i = 0;
  1608. uint8 *ptr = (uint8 *)arg;
  1609. uint8 count = 0;
  1610. ptr++;
  1611. for (i = 0; i < WLHOST_REORDERDATA_MAXFLOWS; i++) {
  1612. if (dhd_pub->reorder_bufs[i] != NULL) {
  1613. *ptr = dhd_pub->reorder_bufs[i]->flow_id;
  1614. ptr++;
  1615. count++;
  1616. }
  1617. }
  1618. ptr = (uint8 *)arg;
  1619. *ptr = count;
  1620. break;
  1621. }
  1622. #ifdef DHDTCPACK_SUPPRESS
  1623. case IOV_GVAL(IOV_TCPACK_SUPPRESS): {
  1624. int_val = (uint32)dhd_pub->tcpack_sup_mode;
  1625. bcopy(&int_val, arg, val_size);
  1626. break;
  1627. }
  1628. case IOV_SVAL(IOV_TCPACK_SUPPRESS): {
  1629. bcmerror = dhd_tcpack_suppress_set(dhd_pub, (uint8)int_val);
  1630. break;
  1631. }
  1632. #endif /* DHDTCPACK_SUPPRESS */
  1633. #ifdef DHD_L2_FILTER
  1634. case IOV_GVAL(IOV_DHCP_UNICAST): {
  1635. uint32 bssidx;
  1636. const char *val;
  1637. if (dhd_iovar_parse_bssidx(dhd_pub, name, &bssidx, &val) != BCME_OK) {
  1638. DHD_ERROR(("%s: IOV_DHCP_UNICAST: bad parameterand name = %s\n",
  1639. __FUNCTION__, name));
  1640. bcmerror = BCME_BADARG;
  1641. break;
  1642. }
  1643. int_val = dhd_get_dhcp_unicast_status(dhd_pub, bssidx);
  1644. memcpy(arg, &int_val, val_size);
  1645. break;
  1646. }
  1647. case IOV_SVAL(IOV_DHCP_UNICAST): {
  1648. uint32 bssidx;
  1649. const char *val;
  1650. if (dhd_iovar_parse_bssidx(dhd_pub, name, &bssidx, &val) != BCME_OK) {
  1651. DHD_ERROR(("%s: IOV_DHCP_UNICAST: bad parameterand name = %s\n",
  1652. __FUNCTION__, name));
  1653. bcmerror = BCME_BADARG;
  1654. break;
  1655. }
  1656. memcpy(&int_val, val, sizeof(int_val));
  1657. bcmerror = dhd_set_dhcp_unicast_status(dhd_pub, bssidx, int_val ? 1 : 0);
  1658. break;
  1659. }
  1660. case IOV_GVAL(IOV_BLOCK_PING): {
  1661. uint32 bssidx;
  1662. const char *val;
  1663. if (dhd_iovar_parse_bssidx(dhd_pub, name, &bssidx, &val) != BCME_OK) {
  1664. DHD_ERROR(("%s: IOV_BLOCK_PING: bad parameter\n", __FUNCTION__));
  1665. bcmerror = BCME_BADARG;
  1666. break;
  1667. }
  1668. int_val = dhd_get_block_ping_status(dhd_pub, bssidx);
  1669. memcpy(arg, &int_val, val_size);
  1670. break;
  1671. }
  1672. case IOV_SVAL(IOV_BLOCK_PING): {
  1673. uint32 bssidx;
  1674. const char *val;
  1675. if (dhd_iovar_parse_bssidx(dhd_pub, name, &bssidx, &val) != BCME_OK) {
  1676. DHD_ERROR(("%s: IOV_BLOCK_PING: bad parameter\n", __FUNCTION__));
  1677. bcmerror = BCME_BADARG;
  1678. break;
  1679. }
  1680. memcpy(&int_val, val, sizeof(int_val));
  1681. bcmerror = dhd_set_block_ping_status(dhd_pub, bssidx, int_val ? 1 : 0);
  1682. break;
  1683. }
  1684. case IOV_GVAL(IOV_PROXY_ARP): {
  1685. uint32 bssidx;
  1686. const char *val;
  1687. if (dhd_iovar_parse_bssidx(dhd_pub, name, &bssidx, &val) != BCME_OK) {
  1688. DHD_ERROR(("%s: IOV_PROXY_ARP: bad parameter\n", __FUNCTION__));
  1689. bcmerror = BCME_BADARG;
  1690. break;
  1691. }
  1692. int_val = dhd_get_parp_status(dhd_pub, bssidx);
  1693. bcopy(&int_val, arg, val_size);
  1694. break;
  1695. }
  1696. case IOV_SVAL(IOV_PROXY_ARP): {
  1697. uint32 bssidx;
  1698. const char *val;
  1699. if (dhd_iovar_parse_bssidx(dhd_pub, name, &bssidx, &val) != BCME_OK) {
  1700. DHD_ERROR(("%s: IOV_PROXY_ARP: bad parameter\n", __FUNCTION__));
  1701. bcmerror = BCME_BADARG;
  1702. break;
  1703. }
  1704. bcopy(val, &int_val, sizeof(int_val));
  1705. /* Issue a iovar request to WL to update the proxy arp capability bit
  1706. * in the Extended Capability IE of beacons/probe responses.
  1707. */
  1708. bcmerror = dhd_iovar(dhd_pub, bssidx, "proxy_arp_advertise", val, sizeof(int_val),
  1709. NULL, 0, TRUE);
  1710. if (bcmerror == BCME_OK) {
  1711. dhd_set_parp_status(dhd_pub, bssidx, int_val ? 1 : 0);
  1712. }
  1713. break;
  1714. }
  1715. case IOV_GVAL(IOV_GRAT_ARP): {
  1716. uint32 bssidx;
  1717. const char *val;
  1718. if (dhd_iovar_parse_bssidx(dhd_pub, name, &bssidx, &val) != BCME_OK) {
  1719. DHD_ERROR(("%s: IOV_GRAT_ARP: bad parameter\n", __FUNCTION__));
  1720. bcmerror = BCME_BADARG;
  1721. break;
  1722. }
  1723. int_val = dhd_get_grat_arp_status(dhd_pub, bssidx);
  1724. memcpy(arg, &int_val, val_size);
  1725. break;
  1726. }
  1727. case IOV_SVAL(IOV_GRAT_ARP): {
  1728. uint32 bssidx;
  1729. const char *val;
  1730. if (dhd_iovar_parse_bssidx(dhd_pub, name, &bssidx, &val) != BCME_OK) {
  1731. DHD_ERROR(("%s: IOV_GRAT_ARP: bad parameter\n", __FUNCTION__));
  1732. bcmerror = BCME_BADARG;
  1733. break;
  1734. }
  1735. memcpy(&int_val, val, sizeof(int_val));
  1736. bcmerror = dhd_set_grat_arp_status(dhd_pub, bssidx, int_val ? 1 : 0);
  1737. break;
  1738. }
  1739. case IOV_GVAL(IOV_BLOCK_TDLS): {
  1740. uint32 bssidx;
  1741. const char *val;
  1742. if (dhd_iovar_parse_bssidx(dhd_pub, name, &bssidx, &val) != BCME_OK) {
  1743. DHD_ERROR(("%s: IOV_BLOCK_TDLS: bad parameter\n", __FUNCTION__));
  1744. bcmerror = BCME_BADARG;
  1745. break;
  1746. }
  1747. int_val = dhd_get_block_tdls_status(dhd_pub, bssidx);
  1748. memcpy(arg, &int_val, val_size);
  1749. break;
  1750. }
  1751. case IOV_SVAL(IOV_BLOCK_TDLS): {
  1752. uint32 bssidx;
  1753. const char *val;
  1754. if (dhd_iovar_parse_bssidx(dhd_pub, name, &bssidx, &val) != BCME_OK) {
  1755. DHD_ERROR(("%s: IOV_BLOCK_TDLS: bad parameter\n", __FUNCTION__));
  1756. bcmerror = BCME_BADARG;
  1757. break;
  1758. }
  1759. memcpy(&int_val, val, sizeof(int_val));
  1760. bcmerror = dhd_set_block_tdls_status(dhd_pub, bssidx, int_val ? 1 : 0);
  1761. break;
  1762. }
  1763. #endif /* DHD_L2_FILTER */
  1764. case IOV_SVAL(IOV_DHD_IE): {
  1765. uint32 bssidx;
  1766. const char *val;
  1767. if (dhd_iovar_parse_bssidx(dhd_pub, name, &bssidx, &val) != BCME_OK) {
  1768. DHD_ERROR(("%s: dhd ie: bad parameter\n", __FUNCTION__));
  1769. bcmerror = BCME_BADARG;
  1770. break;
  1771. }
  1772. break;
  1773. }
  1774. case IOV_GVAL(IOV_AP_ISOLATE): {
  1775. uint32 bssidx;
  1776. const char *val;
  1777. if (dhd_iovar_parse_bssidx(dhd_pub, name, &bssidx, &val) != BCME_OK) {
  1778. DHD_ERROR(("%s: ap isoalate: bad parameter\n", __FUNCTION__));
  1779. bcmerror = BCME_BADARG;
  1780. break;
  1781. }
  1782. int_val = dhd_get_ap_isolate(dhd_pub, bssidx);
  1783. bcopy(&int_val, arg, val_size);
  1784. break;
  1785. }
  1786. case IOV_SVAL(IOV_AP_ISOLATE): {
  1787. uint32 bssidx;
  1788. const char *val;
  1789. if (dhd_iovar_parse_bssidx(dhd_pub, name, &bssidx, &val) != BCME_OK) {
  1790. DHD_ERROR(("%s: ap isolate: bad parameter\n", __FUNCTION__));
  1791. bcmerror = BCME_BADARG;
  1792. break;
  1793. }
  1794. ASSERT(val);
  1795. bcopy(val, &int_val, sizeof(uint32));
  1796. dhd_set_ap_isolate(dhd_pub, bssidx, int_val);
  1797. break;
  1798. }
  1799. #ifdef DHD_PSTA
  1800. case IOV_GVAL(IOV_PSTA): {
  1801. int_val = dhd_get_psta_mode(dhd_pub);
  1802. bcopy(&int_val, arg, val_size);
  1803. break;
  1804. }
  1805. case IOV_SVAL(IOV_PSTA): {
  1806. if (int_val >= DHD_MODE_PSTA_DISABLED && int_val <= DHD_MODE_PSR) {
  1807. dhd_set_psta_mode(dhd_pub, int_val);
  1808. } else {
  1809. bcmerror = BCME_RANGE;
  1810. }
  1811. break;
  1812. }
  1813. #endif /* DHD_PSTA */
  1814. #ifdef DHD_WET
  1815. case IOV_GVAL(IOV_WET):
  1816. int_val = dhd_get_wet_mode(dhd_pub);
  1817. bcopy(&int_val, arg, val_size);
  1818. break;
  1819. case IOV_SVAL(IOV_WET):
  1820. if (int_val == 0 || int_val == 1) {
  1821. dhd_set_wet_mode(dhd_pub, int_val);
  1822. /* Delete the WET DB when disabled */
  1823. if (!int_val) {
  1824. dhd_wet_sta_delete_list(dhd_pub);
  1825. }
  1826. } else {
  1827. bcmerror = BCME_RANGE;
  1828. }
  1829. break;
  1830. case IOV_SVAL(IOV_WET_HOST_IPV4):
  1831. dhd_set_wet_host_ipv4(dhd_pub, params, plen);
  1832. break;
  1833. case IOV_SVAL(IOV_WET_HOST_MAC):
  1834. dhd_set_wet_host_mac(dhd_pub, params, plen);
  1835. break;
  1836. #endif /* DHD_WET */
  1837. #ifdef DHD_MCAST_REGEN
  1838. case IOV_GVAL(IOV_MCAST_REGEN_BSS_ENABLE): {
  1839. uint32 bssidx;
  1840. const char *val;
  1841. if (dhd_iovar_parse_bssidx(dhd_pub, (char *)name, &bssidx, &val) != BCME_OK) {
  1842. DHD_ERROR(("%s: mcast_regen_bss_enable: bad parameter\n", __FUNCTION__));
  1843. bcmerror = BCME_BADARG;
  1844. break;
  1845. }
  1846. int_val = dhd_get_mcast_regen_bss_enable(dhd_pub, bssidx);
  1847. bcopy(&int_val, arg, val_size);
  1848. break;
  1849. }
  1850. case IOV_SVAL(IOV_MCAST_REGEN_BSS_ENABLE): {
  1851. uint32 bssidx;
  1852. const char *val;
  1853. if (dhd_iovar_parse_bssidx(dhd_pub, (char *)name, &bssidx, &val) != BCME_OK) {
  1854. DHD_ERROR(("%s: mcast_regen_bss_enable: bad parameter\n", __FUNCTION__));
  1855. bcmerror = BCME_BADARG;
  1856. break;
  1857. }
  1858. ASSERT(val);
  1859. bcopy(val, &int_val, sizeof(uint32));
  1860. dhd_set_mcast_regen_bss_enable(dhd_pub, bssidx, int_val);
  1861. break;
  1862. }
  1863. #endif /* DHD_MCAST_REGEN */
  1864. case IOV_GVAL(IOV_CFG80211_OPMODE): {
  1865. int_val = (int32)dhd_pub->op_mode;
  1866. bcopy(&int_val, arg, sizeof(int_val));
  1867. break;
  1868. }
  1869. case IOV_SVAL(IOV_CFG80211_OPMODE): {
  1870. if (int_val <= 0)
  1871. bcmerror = BCME_BADARG;
  1872. else
  1873. dhd_pub->op_mode = int_val;
  1874. break;
  1875. }
  1876. case IOV_GVAL(IOV_ASSERT_TYPE):
  1877. int_val = g_assert_type;
  1878. bcopy(&int_val, arg, val_size);
  1879. break;
  1880. case IOV_SVAL(IOV_ASSERT_TYPE):
  1881. g_assert_type = (uint32)int_val;
  1882. break;
  1883. #if !defined(MACOSX_DHD)
  1884. case IOV_GVAL(IOV_LMTEST): {
  1885. *(uint32 *)arg = (uint32)lmtest;
  1886. break;
  1887. }
  1888. case IOV_SVAL(IOV_LMTEST): {
  1889. uint32 val = *(uint32 *)arg;
  1890. if (val > 50)
  1891. bcmerror = BCME_BADARG;
  1892. else {
  1893. lmtest = (uint)val;
  1894. DHD_ERROR(("%s: lmtest %s\n",
  1895. __FUNCTION__, (lmtest == FALSE)? "OFF" : "ON"));
  1896. }
  1897. break;
  1898. }
  1899. #endif // endif
  1900. #ifdef SHOW_LOGTRACE
  1901. case IOV_GVAL(IOV_DUMP_TRACE_LOG): {
  1902. trace_buf_info_t *trace_buf_info = (trace_buf_info_t *)arg;
  1903. dhd_dbg_ring_t *dbg_verbose_ring = NULL;
  1904. dbg_verbose_ring = dhd_dbg_get_ring_from_ring_id(dhd_pub, FW_VERBOSE_RING_ID);
  1905. if (dbg_verbose_ring == NULL) {
  1906. DHD_ERROR(("dbg_verbose_ring is NULL\n"));
  1907. bcmerror = BCME_UNSUPPORTED;
  1908. break;
  1909. }
  1910. if (trace_buf_info != NULL) {
  1911. bzero(trace_buf_info, sizeof(trace_buf_info_t));
  1912. dhd_dbg_read_ring_into_trace_buf(dbg_verbose_ring, trace_buf_info);
  1913. } else {
  1914. DHD_ERROR(("%s: arg is NULL\n", __FUNCTION__));
  1915. bcmerror = BCME_NOMEM;
  1916. }
  1917. break;
  1918. }
  1919. #endif /* SHOW_LOGTRACE */
  1920. case IOV_GVAL(IOV_DONGLE_TRAP_TYPE):
  1921. if (dhd_pub->dongle_trap_occured)
  1922. int_val = ltoh32(dhd_pub->last_trap_info.type);
  1923. else
  1924. int_val = 0;
  1925. bcopy(&int_val, arg, val_size);
  1926. break;
  1927. case IOV_GVAL(IOV_DONGLE_TRAP_INFO):
  1928. {
  1929. struct bcmstrbuf strbuf;
  1930. bcm_binit(&strbuf, arg, len);
  1931. if (dhd_pub->dongle_trap_occured == FALSE) {
  1932. bcm_bprintf(&strbuf, "no trap recorded\n");
  1933. break;
  1934. }
  1935. dhd_bus_dump_trap_info(dhd_pub->bus, &strbuf);
  1936. break;
  1937. }
  1938. #ifdef DHD_DEBUG
  1939. #if defined(BCMSDIO) || defined(BCMPCIE)
  1940. case IOV_GVAL(IOV_BPADDR):
  1941. {
  1942. sdreg_t sdreg;
  1943. uint32 addr, size;
  1944. memcpy(&sdreg, params, sizeof(sdreg));
  1945. addr = sdreg.offset;
  1946. size = sdreg.func;
  1947. bcmerror = dhd_bus_readwrite_bp_addr(dhd_pub, addr, size,
  1948. (uint *)&int_val, TRUE);
  1949. memcpy(arg, &int_val, sizeof(int32));
  1950. break;
  1951. }
  1952. case IOV_SVAL(IOV_BPADDR):
  1953. {
  1954. sdreg_t sdreg;
  1955. uint32 addr, size;
  1956. memcpy(&sdreg, params, sizeof(sdreg));
  1957. addr = sdreg.offset;
  1958. size = sdreg.func;
  1959. bcmerror = dhd_bus_readwrite_bp_addr(dhd_pub, addr, size,
  1960. (uint *)&sdreg.value,
  1961. FALSE);
  1962. break;
  1963. }
  1964. #endif /* BCMSDIO || BCMPCIE */
  1965. #ifdef BCMPCIE
  1966. case IOV_SVAL(IOV_FLOW_RING_DEBUG):
  1967. {
  1968. bcmerror = dhd_flow_ring_debug(dhd_pub, arg, len);
  1969. break;
  1970. }
  1971. #endif /* BCMPCIE */
  1972. case IOV_SVAL(IOV_MEM_DEBUG):
  1973. if (len > 0) {
  1974. bcmerror = dhd_mem_debug(dhd_pub, arg, len - 1);
  1975. }
  1976. break;
  1977. #endif /* DHD_DEBUG */
  1978. #if defined(DHD_LOG_DUMP)
  1979. case IOV_GVAL(IOV_LOG_DUMP):
  1980. {
  1981. dhd_prot_debug_info_print(dhd_pub);
  1982. dhd_log_dump_trigger(dhd_pub, CMD_DEFAULT);
  1983. break;
  1984. }
  1985. #endif /* DHD_LOG_DUMP */
  1986. #ifndef OEM_ANDROID
  1987. case IOV_GVAL(IOV_TPUT_TEST):
  1988. {
  1989. tput_test_t *tput_data = NULL;
  1990. if (params && plen >= sizeof(tput_test_t)) {
  1991. tput_data = (tput_test_t *)params;
  1992. bcmerror = dhd_tput_test(dhd_pub, tput_data);
  1993. } else {
  1994. DHD_ERROR(("%s: tput test - no input params ! \n", __FUNCTION__));
  1995. bcmerror = BCME_BADARG;
  1996. }
  1997. break;
  1998. }
  1999. #endif /* OEM_ANDROID */
  2000. case IOV_GVAL(IOV_DEBUG_BUF_DEST_STAT):
  2001. {
  2002. if (dhd_pub->debug_buf_dest_support) {
  2003. debug_buf_dest_stat_t *debug_buf_dest_stat =
  2004. (debug_buf_dest_stat_t *)arg;
  2005. memcpy(debug_buf_dest_stat, dhd_pub->debug_buf_dest_stat,
  2006. sizeof(dhd_pub->debug_buf_dest_stat));
  2007. } else {
  2008. bcmerror = BCME_DISABLED;
  2009. }
  2010. break;
  2011. }
  2012. #ifdef DHD_DEBUG
  2013. case IOV_SVAL(IOV_INDUCE_ERROR): {
  2014. if (int_val >= DHD_INDUCE_ERROR_MAX) {
  2015. DHD_ERROR(("%s: Invalid command : %u\n", __FUNCTION__, (uint16)int_val));
  2016. } else {
  2017. dhd_pub->dhd_induce_error = (uint16)int_val;
  2018. }
  2019. break;
  2020. }
  2021. #endif /* DHD_DEBUG */
  2022. #ifdef WL_IFACE_MGMT_CONF
  2023. #ifdef WL_CFG80211
  2024. #ifdef WL_NANP2P
  2025. case IOV_GVAL(IOV_CONC_DISC): {
  2026. int_val = wl_cfg80211_get_iface_conc_disc(
  2027. dhd_linux_get_primary_netdev(dhd_pub));
  2028. bcopy(&int_val, arg, sizeof(int_val));
  2029. break;
  2030. }
  2031. case IOV_SVAL(IOV_CONC_DISC): {
  2032. bcmerror = wl_cfg80211_set_iface_conc_disc(
  2033. dhd_linux_get_primary_netdev(dhd_pub), (uint8)int_val);
  2034. break;
  2035. }
  2036. #endif /* WL_NANP2P */
  2037. #ifdef WL_IFACE_MGMT
  2038. case IOV_GVAL(IOV_IFACE_POLICY): {
  2039. int_val = wl_cfg80211_get_iface_policy(
  2040. dhd_linux_get_primary_netdev(dhd_pub));
  2041. bcopy(&int_val, arg, sizeof(int_val));
  2042. break;
  2043. }
  2044. case IOV_SVAL(IOV_IFACE_POLICY): {
  2045. bcmerror = wl_cfg80211_set_iface_policy(
  2046. dhd_linux_get_primary_netdev(dhd_pub),
  2047. arg, len);
  2048. break;
  2049. }
  2050. #endif /* WL_IFACE_MGMT */
  2051. #endif /* WL_CFG80211 */
  2052. #endif /* WL_IFACE_MGMT_CONF */
  2053. default:
  2054. bcmerror = BCME_UNSUPPORTED;
  2055. break;
  2056. }
  2057. exit:
  2058. DHD_TRACE(("%s: actionid %d, bcmerror %d\n", __FUNCTION__, actionid, bcmerror));
  2059. return bcmerror;
  2060. }
  2061. /* Store the status of a connection attempt for later retrieval by an iovar */
  2062. void
  2063. dhd_store_conn_status(uint32 event, uint32 status, uint32 reason)
  2064. {
  2065. /* Do not overwrite a WLC_E_PRUNE with a WLC_E_SET_SSID
  2066. * because an encryption/rsn mismatch results in both events, and
  2067. * the important information is in the WLC_E_PRUNE.
  2068. */
  2069. if (!(event == WLC_E_SET_SSID && status == WLC_E_STATUS_FAIL &&
  2070. dhd_conn_event == WLC_E_PRUNE)) {
  2071. dhd_conn_event = event;
  2072. dhd_conn_status = status;
  2073. dhd_conn_reason = reason;
  2074. }
  2075. }
  2076. bool
  2077. dhd_prec_enq(dhd_pub_t *dhdp, struct pktq *q, void *pkt, int prec)
  2078. {
  2079. void *p;
  2080. int eprec = -1; /* precedence to evict from */
  2081. bool discard_oldest;
  2082. /* Fast case, precedence queue is not full and we are also not
  2083. * exceeding total queue length
  2084. */
  2085. if (!pktqprec_full(q, prec) && !pktq_full(q)) {
  2086. pktq_penq(q, prec, pkt);
  2087. return TRUE;
  2088. }
  2089. /* Determine precedence from which to evict packet, if any */
  2090. if (pktqprec_full(q, prec))
  2091. eprec = prec;
  2092. else if (pktq_full(q)) {
  2093. p = pktq_peek_tail(q, &eprec);
  2094. ASSERT(p);
  2095. if (eprec > prec || eprec < 0)
  2096. return FALSE;
  2097. }
  2098. /* Evict if needed */
  2099. if (eprec >= 0) {
  2100. /* Detect queueing to unconfigured precedence */
  2101. ASSERT(!pktqprec_empty(q, eprec));
  2102. discard_oldest = AC_BITMAP_TST(dhdp->wme_dp, eprec);
  2103. if (eprec == prec && !discard_oldest)
  2104. return FALSE; /* refuse newer (incoming) packet */
  2105. /* Evict packet according to discard policy */
  2106. p = discard_oldest ? pktq_pdeq(q, eprec) : pktq_pdeq_tail(q, eprec);
  2107. ASSERT(p);
  2108. #ifdef DHDTCPACK_SUPPRESS
  2109. if (dhd_tcpack_check_xmit(dhdp, p) == BCME_ERROR) {
  2110. DHD_ERROR(("%s %d: tcpack_suppress ERROR!!! Stop using it\n",
  2111. __FUNCTION__, __LINE__));
  2112. dhd_tcpack_suppress_set(dhdp, TCPACK_SUP_OFF);
  2113. }
  2114. #endif /* DHDTCPACK_SUPPRESS */
  2115. PKTFREE(dhdp->osh, p, TRUE);
  2116. }
  2117. /* Enqueue */
  2118. p = pktq_penq(q, prec, pkt);
  2119. ASSERT(p);
  2120. return TRUE;
  2121. }
  2122. /*
  2123. * Functions to drop proper pkts from queue:
  2124. * If one pkt in queue is non-fragmented, drop first non-fragmented pkt only
  2125. * If all pkts in queue are all fragmented, find and drop one whole set fragmented pkts
  2126. * If can't find pkts matching upper 2 cases, drop first pkt anyway
  2127. */
  2128. bool
  2129. dhd_prec_drop_pkts(dhd_pub_t *dhdp, struct pktq *pq, int prec, f_droppkt_t fn)
  2130. {
  2131. struct pktq_prec *q = NULL;
  2132. void *p, *prev = NULL, *next = NULL, *first = NULL, *last = NULL, *prev_first = NULL;
  2133. pkt_frag_t frag_info;
  2134. ASSERT(dhdp && pq);
  2135. ASSERT(prec >= 0 && prec < pq->num_prec);
  2136. q = &pq->q[prec];
  2137. p = q->head;
  2138. if (p == NULL)
  2139. return FALSE;
  2140. while (p) {
  2141. frag_info = pkt_frag_info(dhdp->osh, p);
  2142. if (frag_info == DHD_PKT_FRAG_NONE) {
  2143. break;
  2144. } else if (frag_info == DHD_PKT_FRAG_FIRST) {
  2145. if (first) {
  2146. /* No last frag pkt, use prev as last */
  2147. last = prev;
  2148. break;
  2149. } else {
  2150. first = p;
  2151. prev_first = prev;
  2152. }
  2153. } else if (frag_info == DHD_PKT_FRAG_LAST) {
  2154. if (first) {
  2155. last = p;
  2156. break;
  2157. }
  2158. }
  2159. prev = p;
  2160. p = PKTLINK(p);
  2161. }
  2162. if ((p == NULL) || ((frag_info != DHD_PKT_FRAG_NONE) && !(first && last))) {
  2163. /* Not found matching pkts, use oldest */
  2164. prev = NULL;
  2165. p = q->head;
  2166. frag_info = 0;
  2167. }
  2168. if (frag_info == DHD_PKT_FRAG_NONE) {
  2169. first = last = p;
  2170. prev_first = prev;
  2171. }
  2172. p = first;
  2173. while (p) {
  2174. next = PKTLINK(p);
  2175. q->n_pkts--;
  2176. pq->n_pkts_tot--;
  2177. #ifdef WL_TXQ_STALL
  2178. q->dequeue_count++;
  2179. #endif // endif
  2180. PKTSETLINK(p, NULL);
  2181. if (fn)
  2182. fn(dhdp, prec, p, TRUE);
  2183. if (p == last)
  2184. break;
  2185. p = next;
  2186. }
  2187. if (prev_first == NULL) {
  2188. if ((q->head = next) == NULL)
  2189. q->tail = NULL;
  2190. } else {
  2191. PKTSETLINK(prev_first, next);
  2192. if (!next)
  2193. q->tail = prev_first;
  2194. }
  2195. return TRUE;
  2196. }
  2197. static int
  2198. dhd_iovar_op(dhd_pub_t *dhd_pub, const char *name,
  2199. void *params, int plen, void *arg, int len, bool set)
  2200. {
  2201. int bcmerror = 0;
  2202. int val_size;
  2203. const bcm_iovar_t *vi = NULL;
  2204. uint32 actionid;
  2205. DHD_TRACE(("%s: Enter\n", __FUNCTION__));
  2206. ASSERT(name);
  2207. ASSERT(len >= 0);
  2208. /* Get MUST have return space */
  2209. ASSERT(set || (arg && len));
  2210. /* Set does NOT take qualifiers */
  2211. ASSERT(!set || (!params && !plen));
  2212. if ((vi = bcm_iovar_lookup(dhd_iovars, name)) == NULL) {
  2213. bcmerror = BCME_UNSUPPORTED;
  2214. goto exit;
  2215. }
  2216. DHD_CTL(("%s: %s %s, len %d plen %d\n", __FUNCTION__,
  2217. name, (set ? "set" : "get"), len, plen));
  2218. /* set up 'params' pointer in case this is a set command so that
  2219. * the convenience int and bool code can be common to set and get
  2220. */
  2221. if (params == NULL) {
  2222. params = arg;
  2223. plen = len;
  2224. }
  2225. if (vi->type == IOVT_VOID)
  2226. val_size = 0;
  2227. else if (vi->type == IOVT_BUFFER)
  2228. val_size = len;
  2229. else
  2230. /* all other types are integer sized */
  2231. val_size = sizeof(int);
  2232. actionid = set ? IOV_SVAL(vi->varid) : IOV_GVAL(vi->varid);
  2233. bcmerror = dhd_doiovar(dhd_pub, vi, actionid, name, params, plen, arg, len, val_size);
  2234. exit:
  2235. return bcmerror;
  2236. }
  2237. int
  2238. dhd_ioctl(dhd_pub_t * dhd_pub, dhd_ioctl_t *ioc, void *buf, uint buflen)
  2239. {
  2240. int bcmerror = 0;
  2241. unsigned long flags;
  2242. DHD_TRACE(("%s: Enter\n", __FUNCTION__));
  2243. if (!buf) {
  2244. return BCME_BADARG;
  2245. }
  2246. dhd_os_dhdiovar_lock(dhd_pub);
  2247. switch (ioc->cmd) {
  2248. case DHD_GET_MAGIC:
  2249. if (buflen < sizeof(int))
  2250. bcmerror = BCME_BUFTOOSHORT;
  2251. else
  2252. *(int*)buf = DHD_IOCTL_MAGIC;
  2253. break;
  2254. case DHD_GET_VERSION:
  2255. if (buflen < sizeof(int))
  2256. bcmerror = BCME_BUFTOOSHORT;
  2257. else
  2258. *(int*)buf = DHD_IOCTL_VERSION;
  2259. break;
  2260. case DHD_GET_VAR:
  2261. case DHD_SET_VAR:
  2262. {
  2263. char *arg;
  2264. uint arglen;
  2265. DHD_LINUX_GENERAL_LOCK(dhd_pub, flags);
  2266. if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(dhd_pub) &&
  2267. bcmstricmp((char *)buf, "devreset")) {
  2268. /* In platforms like FC19, the FW download is done via IOCTL
  2269. * and should not return error for IOCTLs fired before FW
  2270. * Download is done
  2271. */
  2272. if (dhd_fw_download_status(dhd_pub) == FW_DOWNLOAD_DONE) {
  2273. DHD_ERROR(("%s: returning as busstate=%d\n",
  2274. __FUNCTION__, dhd_pub->busstate));
  2275. DHD_LINUX_GENERAL_UNLOCK(dhd_pub, flags);
  2276. dhd_os_dhdiovar_unlock(dhd_pub);
  2277. return -ENODEV;
  2278. }
  2279. }
  2280. DHD_BUS_BUSY_SET_IN_DHD_IOVAR(dhd_pub);
  2281. DHD_LINUX_GENERAL_UNLOCK(dhd_pub, flags);
  2282. #ifdef DHD_PCIE_RUNTIMEPM
  2283. dhdpcie_runtime_bus_wake(dhd_pub, TRUE, dhd_ioctl);
  2284. #endif /* DHD_PCIE_RUNTIMEPM */
  2285. DHD_LINUX_GENERAL_LOCK(dhd_pub, flags);
  2286. if (DHD_BUS_CHECK_SUSPEND_OR_SUSPEND_IN_PROGRESS(dhd_pub)) {
  2287. /* If Suspend/Resume is tested via pcie_suspend IOVAR
  2288. * then continue to execute the IOVAR, return from here for
  2289. * other IOVARs, also include pciecfgreg and devreset to go
  2290. * through.
  2291. */
  2292. if (bcmstricmp((char *)buf, "pcie_suspend") &&
  2293. bcmstricmp((char *)buf, "pciecfgreg") &&
  2294. bcmstricmp((char *)buf, "devreset") &&
  2295. bcmstricmp((char *)buf, "sdio_suspend")) {
  2296. DHD_ERROR(("%s: bus is in suspend(%d)"
  2297. "or suspending(0x%x) state\n",
  2298. __FUNCTION__, dhd_pub->busstate,
  2299. dhd_pub->dhd_bus_busy_state));
  2300. DHD_BUS_BUSY_CLEAR_IN_DHD_IOVAR(dhd_pub);
  2301. dhd_os_busbusy_wake(dhd_pub);
  2302. DHD_LINUX_GENERAL_UNLOCK(dhd_pub, flags);
  2303. dhd_os_dhdiovar_unlock(dhd_pub);
  2304. return -ENODEV;
  2305. }
  2306. }
  2307. /* During devreset ioctl, we call dhdpcie_advertise_bus_cleanup,
  2308. * which will wait for all the busy contexts to get over for
  2309. * particular time and call ASSERT if timeout happens. As during
  2310. * devreset ioctal, we made DHD_BUS_BUSY_SET_IN_DHD_IOVAR,
  2311. * to avoid ASSERT, clear the IOCTL busy state. "devreset" ioctl is
  2312. * not used in Production platforms but only used in FC19 setups.
  2313. */
  2314. if (!bcmstricmp((char *)buf, "devreset") ||
  2315. #ifdef BCMPCIE
  2316. (dhd_bus_is_multibp_capable(dhd_pub->bus) &&
  2317. !bcmstricmp((char *)buf, "dwnldstate")) ||
  2318. #endif /* BCMPCIE */
  2319. FALSE)
  2320. {
  2321. DHD_BUS_BUSY_CLEAR_IN_DHD_IOVAR(dhd_pub);
  2322. }
  2323. DHD_LINUX_GENERAL_UNLOCK(dhd_pub, flags);
  2324. /* scan past the name to any arguments */
  2325. for (arg = buf, arglen = buflen; *arg && arglen; arg++, arglen--)
  2326. ;
  2327. if (*arg) {
  2328. bcmerror = BCME_BUFTOOSHORT;
  2329. goto unlock_exit;
  2330. }
  2331. /* account for the NUL terminator */
  2332. arg++, arglen--;
  2333. /* call with the appropriate arguments */
  2334. if (ioc->cmd == DHD_GET_VAR) {
  2335. bcmerror = dhd_iovar_op(dhd_pub, buf, arg, arglen,
  2336. buf, buflen, IOV_GET);
  2337. } else {
  2338. bcmerror = dhd_iovar_op(dhd_pub, buf, NULL, 0,
  2339. arg, arglen, IOV_SET);
  2340. }
  2341. if (bcmerror != BCME_UNSUPPORTED) {
  2342. goto unlock_exit;
  2343. }
  2344. /* not in generic table, try protocol module */
  2345. if (ioc->cmd == DHD_GET_VAR) {
  2346. bcmerror = dhd_prot_iovar_op(dhd_pub, buf, arg,
  2347. arglen, buf, buflen, IOV_GET);
  2348. } else {
  2349. bcmerror = dhd_prot_iovar_op(dhd_pub, buf,
  2350. NULL, 0, arg, arglen, IOV_SET);
  2351. }
  2352. if (bcmerror != BCME_UNSUPPORTED) {
  2353. goto unlock_exit;
  2354. }
  2355. /* if still not found, try bus module */
  2356. if (ioc->cmd == DHD_GET_VAR) {
  2357. bcmerror = dhd_bus_iovar_op(dhd_pub, buf,
  2358. arg, arglen, buf, buflen, IOV_GET);
  2359. } else {
  2360. bcmerror = dhd_bus_iovar_op(dhd_pub, buf,
  2361. NULL, 0, arg, arglen, IOV_SET);
  2362. }
  2363. if (bcmerror != BCME_UNSUPPORTED) {
  2364. goto unlock_exit;
  2365. }
  2366. }
  2367. goto unlock_exit;
  2368. default:
  2369. bcmerror = BCME_UNSUPPORTED;
  2370. }
  2371. dhd_os_dhdiovar_unlock(dhd_pub);
  2372. return bcmerror;
  2373. unlock_exit:
  2374. DHD_LINUX_GENERAL_LOCK(dhd_pub, flags);
  2375. DHD_BUS_BUSY_CLEAR_IN_DHD_IOVAR(dhd_pub);
  2376. dhd_os_busbusy_wake(dhd_pub);
  2377. DHD_LINUX_GENERAL_UNLOCK(dhd_pub, flags);
  2378. dhd_os_dhdiovar_unlock(dhd_pub);
  2379. return bcmerror;
  2380. }
  2381. #ifdef SHOW_EVENTS
  2382. static void
  2383. wl_show_host_event(dhd_pub_t *dhd_pub, wl_event_msg_t *event, void *event_data,
  2384. void *raw_event_ptr, char *eventmask)
  2385. {
  2386. uint i, status, reason;
  2387. bool group = FALSE, flush_txq = FALSE, link = FALSE;
  2388. bool host_data = FALSE; /* prints event data after the case when set */
  2389. const char *auth_str;
  2390. const char *event_name;
  2391. uchar *buf;
  2392. char err_msg[256], eabuf[ETHER_ADDR_STR_LEN];
  2393. uint event_type, flags, auth_type, datalen;
  2394. event_type = ntoh32(event->event_type);
  2395. flags = ntoh16(event->flags);
  2396. status = ntoh32(event->status);
  2397. reason = ntoh32(event->reason);
  2398. BCM_REFERENCE(reason);
  2399. auth_type = ntoh32(event->auth_type);
  2400. datalen = ntoh32(event->datalen);
  2401. /* debug dump of event messages */
  2402. snprintf(eabuf, sizeof(eabuf), MACDBG, MAC2STRDBG(event->addr.octet));
  2403. event_name = bcmevent_get_name(event_type);
  2404. BCM_REFERENCE(event_name);
  2405. if (flags & WLC_EVENT_MSG_LINK)
  2406. link = TRUE;
  2407. if (flags & WLC_EVENT_MSG_GROUP)
  2408. group = TRUE;
  2409. if (flags & WLC_EVENT_MSG_FLUSHTXQ)
  2410. flush_txq = TRUE;
  2411. switch (event_type) {
  2412. case WLC_E_START:
  2413. case WLC_E_DEAUTH:
  2414. case WLC_E_DISASSOC:
  2415. DHD_EVENT(("MACEVENT: %s, MAC %s\n", event_name, eabuf));
  2416. break;
  2417. case WLC_E_ASSOC_IND:
  2418. case WLC_E_REASSOC_IND:
  2419. DHD_EVENT(("MACEVENT: %s, MAC %s\n", event_name, eabuf));
  2420. break;
  2421. case WLC_E_ASSOC:
  2422. case WLC_E_REASSOC:
  2423. if (status == WLC_E_STATUS_SUCCESS) {
  2424. DHD_EVENT(("MACEVENT: %s, MAC %s, SUCCESS\n", event_name, eabuf));
  2425. } else if (status == WLC_E_STATUS_TIMEOUT) {
  2426. DHD_EVENT(("MACEVENT: %s, MAC %s, TIMEOUT\n", event_name, eabuf));
  2427. } else if (status == WLC_E_STATUS_FAIL) {
  2428. DHD_EVENT(("MACEVENT: %s, MAC %s, FAILURE, status %d reason %d\n",
  2429. event_name, eabuf, (int)status, (int)reason));
  2430. } else {
  2431. DHD_EVENT(("MACEVENT: %s, MAC %s, unexpected status %d\n",
  2432. event_name, eabuf, (int)status));
  2433. }
  2434. break;
  2435. case WLC_E_DEAUTH_IND:
  2436. case WLC_E_DISASSOC_IND:
  2437. DHD_EVENT(("MACEVENT: %s, MAC %s, reason %d\n", event_name, eabuf, (int)reason));
  2438. break;
  2439. case WLC_E_AUTH:
  2440. case WLC_E_AUTH_IND:
  2441. if (auth_type == DOT11_OPEN_SYSTEM)
  2442. auth_str = "Open System";
  2443. else if (auth_type == DOT11_SHARED_KEY)
  2444. auth_str = "Shared Key";
  2445. else if (auth_type == DOT11_SAE)
  2446. auth_str = "SAE";
  2447. else {
  2448. snprintf(err_msg, sizeof(err_msg), "AUTH unknown: %d", (int)auth_type);
  2449. auth_str = err_msg;
  2450. }
  2451. if (event_type == WLC_E_AUTH_IND) {
  2452. DHD_EVENT(("MACEVENT: %s, MAC %s, %s\n", event_name, eabuf, auth_str));
  2453. } else if (status == WLC_E_STATUS_SUCCESS) {
  2454. DHD_EVENT(("MACEVENT: %s, MAC %s, %s, SUCCESS\n",
  2455. event_name, eabuf, auth_str));
  2456. } else if (status == WLC_E_STATUS_TIMEOUT) {
  2457. DHD_EVENT(("MACEVENT: %s, MAC %s, %s, TIMEOUT\n",
  2458. event_name, eabuf, auth_str));
  2459. } else if (status == WLC_E_STATUS_FAIL) {
  2460. DHD_EVENT(("MACEVENT: %s, MAC %s, %s, FAILURE, status %d reason %d\n",
  2461. event_name, eabuf, auth_str, (int)status, (int)reason));
  2462. } else if (status == WLC_E_STATUS_NO_ACK) {
  2463. DHD_EVENT(("MACEVENT: %s, MAC %s, %s, NOACK\n",
  2464. event_name, eabuf, auth_str));
  2465. } else {
  2466. DHD_EVENT(("MACEVENT: %s, MAC %s, %s, status %d reason %d\n",
  2467. event_name, eabuf, auth_str, (int)status, (int)reason));
  2468. }
  2469. BCM_REFERENCE(auth_str);
  2470. break;
  2471. case WLC_E_JOIN:
  2472. case WLC_E_ROAM:
  2473. case WLC_E_SET_SSID:
  2474. if (status == WLC_E_STATUS_SUCCESS) {
  2475. DHD_EVENT(("MACEVENT: %s, MAC %s\n", event_name, eabuf));
  2476. } else {
  2477. if (status == WLC_E_STATUS_FAIL) {
  2478. DHD_EVENT(("MACEVENT: %s, failed status %d\n", event_name, status));
  2479. } else if (status == WLC_E_STATUS_NO_NETWORKS) {
  2480. DHD_EVENT(("MACEVENT: %s, no networks found\n", event_name));
  2481. } else {
  2482. DHD_EVENT(("MACEVENT: %s, unexpected status %d\n",
  2483. event_name, (int)status));
  2484. }
  2485. }
  2486. break;
  2487. case WLC_E_BEACON_RX:
  2488. if (status == WLC_E_STATUS_SUCCESS) {
  2489. DHD_EVENT(("MACEVENT: %s, SUCCESS\n", event_name));
  2490. } else if (status == WLC_E_STATUS_FAIL) {
  2491. DHD_EVENT(("MACEVENT: %s, FAIL\n", event_name));
  2492. } else {
  2493. DHD_EVENT(("MACEVENT: %s, status %d\n", event_name, status));
  2494. }
  2495. break;
  2496. case WLC_E_LINK:
  2497. DHD_EVENT(("MACEVENT: %s %s flags:0x%x status:%d\n",
  2498. event_name, link?"UP":"DOWN", flags, status));
  2499. BCM_REFERENCE(link);
  2500. break;
  2501. case WLC_E_MIC_ERROR:
  2502. DHD_EVENT(("MACEVENT: %s, MAC %s, Group %d, Flush %d\n",
  2503. event_name, eabuf, group, flush_txq));
  2504. BCM_REFERENCE(group);
  2505. BCM_REFERENCE(flush_txq);
  2506. break;
  2507. case WLC_E_ICV_ERROR:
  2508. case WLC_E_UNICAST_DECODE_ERROR:
  2509. case WLC_E_MULTICAST_DECODE_ERROR:
  2510. DHD_EVENT(("MACEVENT: %s, MAC %s\n",
  2511. event_name, eabuf));
  2512. break;
  2513. case WLC_E_TXFAIL:
  2514. DHD_EVENT(("MACEVENT: %s, RA %s status %d\n", event_name, eabuf, status));
  2515. break;
  2516. case WLC_E_ASSOC_REQ_IE:
  2517. case WLC_E_ASSOC_RESP_IE:
  2518. case WLC_E_PMKID_CACHE:
  2519. DHD_EVENT(("MACEVENT: %s\n", event_name));
  2520. break;
  2521. case WLC_E_SCAN_COMPLETE:
  2522. DHD_EVENT(("MACEVENT: %s\n", event_name));
  2523. break;
  2524. case WLC_E_IND_DOS_STATUS:
  2525. DHD_EVENT(("MACEVENT: %s\n", event_name));
  2526. break;
  2527. case WLC_E_RSSI_LQM:
  2528. case WLC_E_PFN_NET_FOUND:
  2529. case WLC_E_PFN_NET_LOST:
  2530. case WLC_E_PFN_SCAN_COMPLETE:
  2531. case WLC_E_PFN_SCAN_NONE:
  2532. case WLC_E_PFN_SCAN_ALLGONE:
  2533. case WLC_E_PFN_GSCAN_FULL_RESULT:
  2534. case WLC_E_PFN_SSID_EXT:
  2535. DHD_EVENT(("PNOEVENT: %s\n", event_name));
  2536. break;
  2537. case WLC_E_PFN_SCAN_BACKOFF:
  2538. case WLC_E_PFN_BSSID_SCAN_BACKOFF:
  2539. DHD_EVENT(("PNOEVENT: %s, status %d, reason %d\n",
  2540. event_name, (int)status, (int)reason));
  2541. break;
  2542. case WLC_E_PSK_SUP:
  2543. case WLC_E_PRUNE:
  2544. DHD_EVENT(("MACEVENT: %s, status %d, reason %d\n",
  2545. event_name, (int)status, (int)reason));
  2546. break;
  2547. #ifdef WIFI_ACT_FRAME
  2548. case WLC_E_ACTION_FRAME:
  2549. DHD_TRACE(("MACEVENT: %s Bssid %s\n", event_name, eabuf));
  2550. break;
  2551. #endif /* WIFI_ACT_FRAME */
  2552. #ifdef SHOW_LOGTRACE
  2553. case WLC_E_TRACE:
  2554. {
  2555. dhd_dbg_trace_evnt_handler(dhd_pub, event_data, raw_event_ptr, datalen);
  2556. break;
  2557. }
  2558. #endif /* SHOW_LOGTRACE */
  2559. case WLC_E_RSSI:
  2560. DHD_EVENT(("MACEVENT: %s %d\n", event_name, ntoh32(*((int *)event_data))));
  2561. break;
  2562. case WLC_E_SERVICE_FOUND:
  2563. case WLC_E_P2PO_ADD_DEVICE:
  2564. case WLC_E_P2PO_DEL_DEVICE:
  2565. DHD_EVENT(("MACEVENT: %s, MAC %s\n", event_name, eabuf));
  2566. break;
  2567. #ifdef BT_WIFI_HANDOBER
  2568. case WLC_E_BT_WIFI_HANDOVER_REQ:
  2569. DHD_EVENT(("MACEVENT: %s, MAC %s\n", event_name, eabuf));
  2570. break;
  2571. #endif // endif
  2572. case WLC_E_CCA_CHAN_QUAL:
  2573. if (datalen) {
  2574. cca_chan_qual_event_t *cca_event = (cca_chan_qual_event_t *)event_data;
  2575. if (cca_event->id == WL_CHAN_QUAL_FULLPM_CCA) {
  2576. cca_only_chan_qual_event_t *cca_only_event =
  2577. (cca_only_chan_qual_event_t *)cca_event;
  2578. BCM_REFERENCE(cca_only_event);
  2579. DHD_EVENT((
  2580. "MACEVENT: %s %d, MAC %s, status %d, reason %d, auth %d,"
  2581. " channel 0x%02x\n",
  2582. event_name, event_type, eabuf, (int)status,
  2583. (int)reason, (int)auth_type, cca_event->chanspec));
  2584. DHD_EVENT((
  2585. "\tTOTAL (dur %dms me %dms notme %dms interf %dms"
  2586. " ts 0x%08x)\n",
  2587. cca_only_event->cca_busy_ext.duration,
  2588. cca_only_event->cca_busy_ext.congest_ibss,
  2589. cca_only_event->cca_busy_ext.congest_obss,
  2590. cca_only_event->cca_busy_ext.interference,
  2591. cca_only_event->cca_busy_ext.timestamp));
  2592. DHD_EVENT((
  2593. "\t !PM (dur %dms me %dms notme %dms interf %dms)\n",
  2594. cca_only_event->cca_busy_nopm.duration,
  2595. cca_only_event->cca_busy_nopm.congest_ibss,
  2596. cca_only_event->cca_busy_nopm.congest_obss,
  2597. cca_only_event->cca_busy_nopm.interference));
  2598. DHD_EVENT((
  2599. "\t PM (dur %dms me %dms notme %dms interf %dms)\n",
  2600. cca_only_event->cca_busy_pm.duration,
  2601. cca_only_event->cca_busy_pm.congest_ibss,
  2602. cca_only_event->cca_busy_pm.congest_obss,
  2603. cca_only_event->cca_busy_pm.interference));
  2604. } else if (cca_event->id == WL_CHAN_QUAL_FULL_CCA) {
  2605. DHD_EVENT((
  2606. "MACEVENT: %s %d, MAC %s, status %d, reason %d, auth %d,"
  2607. " channel 0x%02x (dur %dms ibss %dms obss %dms interf %dms"
  2608. " ts 0x%08x)\n",
  2609. event_name, event_type, eabuf, (int)status,
  2610. (int)reason, (int)auth_type, cca_event->chanspec,
  2611. cca_event->cca_busy_ext.duration,
  2612. cca_event->cca_busy_ext.congest_ibss,
  2613. cca_event->cca_busy_ext.congest_obss,
  2614. cca_event->cca_busy_ext.interference,
  2615. cca_event->cca_busy_ext.timestamp));
  2616. } else if (cca_event->id == WL_CHAN_QUAL_CCA) {
  2617. DHD_EVENT((
  2618. "MACEVENT: %s %d, MAC %s, status %d, reason %d, auth %d,"
  2619. " channel 0x%02x (dur %dms busy %dms ts 0x%08x)\n",
  2620. event_name, event_type, eabuf, (int)status,
  2621. (int)reason, (int)auth_type, cca_event->chanspec,
  2622. cca_event->cca_busy.duration,
  2623. cca_event->cca_busy.congest,
  2624. cca_event->cca_busy.timestamp));
  2625. } else if ((cca_event->id == WL_CHAN_QUAL_NF) ||
  2626. (cca_event->id == WL_CHAN_QUAL_NF_LTE)) {
  2627. DHD_EVENT((
  2628. "MACEVENT: %s %d, MAC %s, status %d, reason %d, auth %d,"
  2629. " channel 0x%02x (NF[%d] %ddB)\n",
  2630. event_name, event_type, eabuf, (int)status,
  2631. (int)reason, (int)auth_type, cca_event->chanspec,
  2632. cca_event->id, cca_event->noise));
  2633. } else {
  2634. DHD_EVENT((
  2635. "MACEVENT: %s %d, MAC %s, status %d, reason %d, auth %d,"
  2636. " channel 0x%02x (unknown ID %d)\n",
  2637. event_name, event_type, eabuf, (int)status,
  2638. (int)reason, (int)auth_type, cca_event->chanspec,
  2639. cca_event->id));
  2640. }
  2641. }
  2642. break;
  2643. case WLC_E_ESCAN_RESULT:
  2644. {
  2645. wl_escan_result_v2_t *escan_result =
  2646. (wl_escan_result_v2_t *)event_data;
  2647. BCM_REFERENCE(escan_result);
  2648. #ifdef OEM_ANDROID
  2649. if ((status == WLC_E_STATUS_SUCCESS) || (status == WLC_E_STATUS_ABORT)) {
  2650. DHD_EVENT(("MACEVENT: %s %d, status %d sync-id %u\n",
  2651. event_name, event_type, (int)status,
  2652. dtoh16(escan_result->sync_id)));
  2653. } else {
  2654. DHD_TRACE(("MACEVENT: %s %d, MAC %s, status %d \n",
  2655. event_name, event_type, eabuf, (int)status));
  2656. }
  2657. #else
  2658. DHD_EVENT(("MACEVENT: %s %d, MAC %s, status %d sync-id %u\n",
  2659. event_name, event_type, eabuf, (int)status, dtoh16(escan_result->sync_id)));
  2660. #endif // endif
  2661. break;
  2662. }
  2663. case WLC_E_IF:
  2664. {
  2665. struct wl_event_data_if *ifevent = (struct wl_event_data_if *)event_data;
  2666. BCM_REFERENCE(ifevent);
  2667. DHD_EVENT(("MACEVENT: %s, opcode:0x%d ifidx:%d role:%d\n",
  2668. event_name, ifevent->opcode, ifevent->ifidx, ifevent->role));
  2669. break;
  2670. }
  2671. #ifdef SHOW_LOGTRACE
  2672. case WLC_E_MSCH:
  2673. {
  2674. wl_mschdbg_event_handler(dhd_pub, raw_event_ptr, reason, event_data, datalen);
  2675. break;
  2676. }
  2677. #endif /* SHOW_LOGTRACE */
  2678. case WLC_E_PSK_AUTH:
  2679. DHD_EVENT(("MACEVENT: %s, RA %s status %d Reason:%d\n",
  2680. event_name, eabuf, status, reason));
  2681. break;
  2682. case WLC_E_AGGR_EVENT:
  2683. {
  2684. event_aggr_data_t *aggrbuf = event_data;
  2685. int j = 0, len = 0;
  2686. uint8 *data = aggrbuf->data;
  2687. DHD_EVENT(("MACEVENT: %s, num of events %d total len %d sub events: ",
  2688. event_name, aggrbuf->num_events, aggrbuf->len));
  2689. for (j = 0; j < aggrbuf->num_events; j++)
  2690. {
  2691. wl_event_msg_t * sub_event = (wl_event_msg_t *)data;
  2692. if (len > aggrbuf->len) {
  2693. DHD_ERROR(("%s: Aggr events corrupted!",
  2694. __FUNCTION__));
  2695. break;
  2696. }
  2697. DHD_EVENT(("\n Event type: %d ", ntoh32(sub_event->event_type)));
  2698. len += ALIGN_SIZE((ntoh32(sub_event->datalen) +
  2699. sizeof(wl_event_msg_t)), sizeof(uint64));
  2700. buf = (uchar *)(data + sizeof(wl_event_msg_t));
  2701. BCM_REFERENCE(buf);
  2702. DHD_EVENT((" data (%d) : ", ntoh32(sub_event->datalen)));
  2703. for (i = 0; i < ntoh32(sub_event->datalen); i++) {
  2704. DHD_EVENT((" 0x%02x ", buf[i]));
  2705. }
  2706. data = aggrbuf->data + len;
  2707. }
  2708. DHD_EVENT(("\n"));
  2709. }
  2710. break;
  2711. case WLC_E_NAN_CRITICAL:
  2712. {
  2713. DHD_LOG_MEM(("MACEVENT: %s, type:%d\n", event_name, reason));
  2714. break;
  2715. }
  2716. case WLC_E_NAN_NON_CRITICAL:
  2717. {
  2718. DHD_TRACE(("MACEVENT: %s, type:%d\n", event_name, reason));
  2719. break;
  2720. }
  2721. case WLC_E_PROXD:
  2722. {
  2723. wl_proxd_event_t *proxd = (wl_proxd_event_t*)event_data;
  2724. DHD_LOG_MEM(("MACEVENT: %s, event:%d, status:%d\n",
  2725. event_name, proxd->type, reason));
  2726. break;
  2727. }
  2728. case WLC_E_RPSNOA:
  2729. {
  2730. rpsnoa_stats_t *stat = event_data;
  2731. if (datalen == sizeof(*stat)) {
  2732. DHD_EVENT(("MACEVENT: %s, band %s, status %d, pps %d\n", event_name,
  2733. (stat->band == WLC_BAND_2G) ? "2G":"5G",
  2734. stat->state, stat->last_pps));
  2735. }
  2736. break;
  2737. }
  2738. case WLC_E_PHY_CAL:
  2739. {
  2740. DHD_EVENT(("MACEVENT: %s, reason:%d\n", event_name, reason));
  2741. break;
  2742. }
  2743. case WLC_E_WA_LQM:
  2744. {
  2745. wl_event_wa_lqm_t *event_wa_lqm = (wl_event_wa_lqm_t *)event_data;
  2746. bcm_xtlv_t *subevent;
  2747. wl_event_wa_lqm_basic_t *elqm_basic;
  2748. if ((event_wa_lqm->ver != WL_EVENT_WA_LQM_VER) ||
  2749. (event_wa_lqm->len < sizeof(wl_event_wa_lqm_t) + BCM_XTLV_HDR_SIZE)) {
  2750. DHD_ERROR(("MACEVENT: %s invalid (ver=%d len=%d)\n",
  2751. event_name, event_wa_lqm->ver, event_wa_lqm->len));
  2752. break;
  2753. }
  2754. subevent = (bcm_xtlv_t *)event_wa_lqm->subevent;
  2755. if ((subevent->id != WL_EVENT_WA_LQM_BASIC) ||
  2756. (subevent->len < sizeof(wl_event_wa_lqm_basic_t))) {
  2757. DHD_ERROR(("MACEVENT: %s invalid sub-type (id=%d len=%d)\n",
  2758. event_name, subevent->id, subevent->len));
  2759. break;
  2760. }
  2761. elqm_basic = (wl_event_wa_lqm_basic_t *)subevent->data;
  2762. BCM_REFERENCE(elqm_basic);
  2763. DHD_EVENT(("MACEVENT: %s (RSSI=%d SNR=%d TxRate=%d RxRate=%d)\n",
  2764. event_name, elqm_basic->rssi, elqm_basic->snr,
  2765. elqm_basic->tx_rate, elqm_basic->rx_rate));
  2766. break;
  2767. }
  2768. default:
  2769. DHD_INFO(("MACEVENT: %s %d, MAC %s, status %d, reason %d, auth %d\n",
  2770. event_name, event_type, eabuf, (int)status, (int)reason,
  2771. (int)auth_type));
  2772. break;
  2773. }
  2774. /* show any appended data if message level is set to bytes or host_data is set */
  2775. if ((DHD_BYTES_ON() || (host_data == TRUE)) && DHD_EVENT_ON() && datalen) {
  2776. buf = (uchar *) event_data;
  2777. BCM_REFERENCE(buf);
  2778. DHD_EVENT((" data (%d) : ", datalen));
  2779. for (i = 0; i < datalen; i++) {
  2780. DHD_EVENT((" 0x%02x ", buf[i]));
  2781. }
  2782. DHD_EVENT(("\n"));
  2783. }
  2784. } /* wl_show_host_event */
  2785. #endif /* SHOW_EVENTS */
  2786. #ifdef DNGL_EVENT_SUPPORT
  2787. /* Check whether packet is a BRCM dngl event pkt. If it is, process event data. */
  2788. int
  2789. dngl_host_event(dhd_pub_t *dhdp, void *pktdata, bcm_dngl_event_msg_t *dngl_event, size_t pktlen)
  2790. {
  2791. bcm_dngl_event_t *pvt_data = (bcm_dngl_event_t *)pktdata;
  2792. dngl_host_event_process(dhdp, pvt_data, dngl_event, pktlen);
  2793. return BCME_OK;
  2794. }
  2795. #ifdef PARSE_DONGLE_HOST_EVENT
  2796. typedef struct hck_id_to_str_s {
  2797. uint32 id;
  2798. char *name;
  2799. } hck_id_to_str_t;
  2800. hck_id_to_str_t hck_sw_id_to_str[] = {
  2801. {WL_HC_DD_PCIE, "WL_HC_DD_PCIE"},
  2802. {WL_HC_DD_RX_DMA_STALL, "WL_HC_DD_RX_DMA_STALL"},
  2803. {WL_HC_DD_RX_STALL, "WL_HC_DD_RX_STALL"},
  2804. {WL_HC_DD_TX_STALL, "WL_HC_DD_TX_STALL"},
  2805. {WL_HC_DD_SCAN_STALL, "WL_HC_DD_SCAN_STALL"},
  2806. {WL_HC_DD_PHY, "WL_HC_DD_PHY"},
  2807. {WL_HC_DD_REINIT, "WL_HC_DD_REINIT"},
  2808. {WL_HC_DD_TXQ_STALL, "WL_HC_DD_TXQ_STALL"},
  2809. {0, NULL}
  2810. };
  2811. hck_id_to_str_t hck_pcie_module_to_str[] = {
  2812. {HEALTH_CHECK_PCIEDEV_INDUCED_IND, "PCIEDEV_INDUCED_IND"},
  2813. {HEALTH_CHECK_PCIEDEV_H2D_DMA_IND, "PCIEDEV_H2D_DMA_IND"},
  2814. {HEALTH_CHECK_PCIEDEV_D2H_DMA_IND, "PCIEDEV_D2H_DMA_IND"},
  2815. {HEALTH_CHECK_PCIEDEV_IOCTL_STALL_IND, "PCIEDEV_IOCTL_STALL_IND"},
  2816. {HEALTH_CHECK_PCIEDEV_D3ACK_STALL_IND, "PCIEDEV_D3ACK_STALL_IND"},
  2817. {HEALTH_CHECK_PCIEDEV_NODS_IND, "PCIEDEV_NODS_IND"},
  2818. {HEALTH_CHECK_PCIEDEV_LINKSPEED_FALLBACK_IND, "PCIEDEV_LINKSPEED_FALLBACK_IND"},
  2819. {HEALTH_CHECK_PCIEDEV_DSACK_STALL_IND, "PCIEDEV_DSACK_STALL_IND"},
  2820. {0, NULL}
  2821. };
  2822. static void
  2823. dhd_print_dongle_hck_id(uint32 id, hck_id_to_str_t *hck)
  2824. {
  2825. while (hck->name != NULL) {
  2826. if (hck->id == id) {
  2827. DHD_ERROR(("DONGLE_HCK_EVENT: %s\n", hck->name));
  2828. return;
  2829. }
  2830. hck++;
  2831. }
  2832. }
  2833. #endif /* PARSE_DONGLE_HOST_EVENT */
  2834. void
  2835. dngl_host_event_process(dhd_pub_t *dhdp, bcm_dngl_event_t *event,
  2836. bcm_dngl_event_msg_t *dngl_event, size_t pktlen)
  2837. {
  2838. uint8 *p = (uint8 *)(event + 1);
  2839. uint16 type = ntoh16_ua((void *)&dngl_event->event_type);
  2840. uint16 datalen = ntoh16_ua((void *)&dngl_event->datalen);
  2841. uint16 version = ntoh16_ua((void *)&dngl_event->version);
  2842. DHD_EVENT(("VERSION:%d, EVENT TYPE:%d, DATALEN:%d\n", version, type, datalen));
  2843. if (datalen > (pktlen - sizeof(bcm_dngl_event_t) + ETHER_TYPE_LEN)) {
  2844. return;
  2845. }
  2846. if (version != BCM_DNGL_EVENT_MSG_VERSION) {
  2847. DHD_ERROR(("%s:version mismatch:%d:%d\n", __FUNCTION__,
  2848. version, BCM_DNGL_EVENT_MSG_VERSION));
  2849. return;
  2850. }
  2851. switch (type) {
  2852. case DNGL_E_SOCRAM_IND:
  2853. {
  2854. bcm_dngl_socramind_t *socramind_ptr = (bcm_dngl_socramind_t *)p;
  2855. uint16 tag = ltoh32(socramind_ptr->tag);
  2856. uint16 taglen = ltoh32(socramind_ptr->length);
  2857. p = (uint8 *)socramind_ptr->value;
  2858. DHD_EVENT(("Tag:%d Len:%d Datalen:%d\n", tag, taglen, datalen));
  2859. switch (tag) {
  2860. case SOCRAM_IND_ASSERT_TAG:
  2861. {
  2862. /*
  2863. * The payload consists of -
  2864. * null terminated function name padded till 32 bit boundary +
  2865. * Line number - (32 bits)
  2866. * Caller address (32 bits)
  2867. */
  2868. char *fnname = (char *)p;
  2869. if (datalen < (ROUNDUP(strlen(fnname) + 1, sizeof(uint32)) +
  2870. sizeof(uint32) * 2)) {
  2871. DHD_ERROR(("Wrong length:%d\n", datalen));
  2872. return;
  2873. }
  2874. DHD_EVENT(("ASSRT Function:%s ", p));
  2875. p += ROUNDUP(strlen(p) + 1, sizeof(uint32));
  2876. DHD_EVENT(("Line:%d ", *(uint32 *)p));
  2877. p += sizeof(uint32);
  2878. DHD_EVENT(("Caller Addr:0x%x\n", *(uint32 *)p));
  2879. #ifdef PARSE_DONGLE_HOST_EVENT
  2880. DHD_ERROR(("DONGLE_HCK_EVENT: SOCRAM_IND_ASSERT_TAG\n"));
  2881. #endif /* PARSE_DONGLE_HOST_EVENT */
  2882. break;
  2883. }
  2884. case SOCRAM_IND_TAG_HEALTH_CHECK:
  2885. {
  2886. bcm_dngl_healthcheck_t *dngl_hc = (bcm_dngl_healthcheck_t *)p;
  2887. DHD_EVENT(("SOCRAM_IND_HEALTHCHECK_TAG:%d Len:%d datalen:%d\n",
  2888. ltoh32(dngl_hc->top_module_tag),
  2889. ltoh32(dngl_hc->top_module_len),
  2890. datalen));
  2891. if (DHD_EVENT_ON()) {
  2892. prhex("HEALTHCHECK", p, MIN(ltoh32(dngl_hc->top_module_len)
  2893. + BCM_XTLV_HDR_SIZE, datalen));
  2894. }
  2895. #ifdef DHD_LOG_DUMP
  2896. memset(dhdp->health_chk_event_data, 0, HEALTH_CHK_BUF_SIZE);
  2897. memcpy(dhdp->health_chk_event_data, p,
  2898. MIN(ltoh32(dngl_hc->top_module_len),
  2899. HEALTH_CHK_BUF_SIZE));
  2900. #endif /* DHD_LOG_DUMP */
  2901. p = (uint8 *)dngl_hc->value;
  2902. switch (ltoh32(dngl_hc->top_module_tag)) {
  2903. case HEALTH_CHECK_TOP_LEVEL_MODULE_PCIEDEV_RTE:
  2904. {
  2905. bcm_dngl_pcie_hc_t *pcie_hc;
  2906. pcie_hc = (bcm_dngl_pcie_hc_t *)p;
  2907. BCM_REFERENCE(pcie_hc);
  2908. if (ltoh32(dngl_hc->top_module_len) <
  2909. sizeof(bcm_dngl_pcie_hc_t)) {
  2910. DHD_ERROR(("Wrong length:%d\n",
  2911. ltoh32(dngl_hc->top_module_len)));
  2912. return;
  2913. }
  2914. DHD_EVENT(("%d:PCIE HC error:%d flag:0x%x,"
  2915. " control:0x%x\n",
  2916. ltoh32(pcie_hc->version),
  2917. ltoh32(pcie_hc->pcie_err_ind_type),
  2918. ltoh32(pcie_hc->pcie_flag),
  2919. ltoh32(pcie_hc->pcie_control_reg)));
  2920. #ifdef PARSE_DONGLE_HOST_EVENT
  2921. dhd_print_dongle_hck_id(
  2922. ltoh32(pcie_hc->pcie_err_ind_type),
  2923. hck_pcie_module_to_str);
  2924. #endif /* PARSE_DONGLE_HOST_EVENT */
  2925. break;
  2926. }
  2927. #ifdef HCHK_COMMON_SW_EVENT
  2928. case HCHK_SW_ENTITY_WL_PRIMARY:
  2929. case HCHK_SW_ENTITY_WL_SECONDARY:
  2930. {
  2931. bcm_xtlv_t *wl_hc = (bcm_xtlv_t*)p;
  2932. if (ltoh32(dngl_hc->top_module_len) <
  2933. sizeof(bcm_xtlv_t)) {
  2934. DHD_ERROR(("WL SW HC Wrong length:%d\n",
  2935. ltoh32(dngl_hc->top_module_len)));
  2936. return;
  2937. }
  2938. BCM_REFERENCE(wl_hc);
  2939. DHD_EVENT(("WL SW HC type %d len %d\n",
  2940. ltoh16(wl_hc->id), ltoh16(wl_hc->len)));
  2941. #ifdef PARSE_DONGLE_HOST_EVENT
  2942. dhd_print_dongle_hck_id(ltoh16(wl_hc->id),
  2943. hck_sw_id_to_str);
  2944. #endif /* PARSE_DONGLE_HOST_EVENT */
  2945. break;
  2946. }
  2947. #endif /* HCHK_COMMON_SW_EVENT */
  2948. default:
  2949. {
  2950. DHD_ERROR(("%s:Unknown module TAG:%d\n",
  2951. __FUNCTION__,
  2952. ltoh32(dngl_hc->top_module_tag)));
  2953. break;
  2954. }
  2955. }
  2956. break;
  2957. }
  2958. default:
  2959. DHD_ERROR(("%s:Unknown TAG\n", __FUNCTION__));
  2960. if (p && DHD_EVENT_ON()) {
  2961. prhex("SOCRAMIND", p, taglen);
  2962. }
  2963. break;
  2964. }
  2965. break;
  2966. }
  2967. default:
  2968. DHD_ERROR(("%s:Unknown DNGL Event Type:%d\n", __FUNCTION__, type));
  2969. if (p && DHD_EVENT_ON()) {
  2970. prhex("SOCRAMIND", p, datalen);
  2971. }
  2972. break;
  2973. }
  2974. #ifdef DHD_FW_COREDUMP
  2975. if (dhdp->memdump_enabled) {
  2976. dhdp->memdump_type = DUMP_TYPE_DONGLE_HOST_EVENT;
  2977. if (dhd_socram_dump(dhdp->bus)) {
  2978. DHD_ERROR(("%s: socram dump failed\n", __FUNCTION__));
  2979. }
  2980. }
  2981. #else
  2982. dhd_dbg_send_urgent_evt(dhdp, p, datalen);
  2983. #endif /* DHD_FW_COREDUMP */
  2984. }
  2985. #endif /* DNGL_EVENT_SUPPORT */
  2986. /* Stub for now. Will become real function as soon as shim
  2987. * is being integrated to Android, Linux etc.
  2988. */
  2989. int
  2990. wl_event_process_default(wl_event_msg_t *event, struct wl_evt_pport *evt_pport)
  2991. {
  2992. return BCME_OK;
  2993. }
  2994. int
  2995. wl_event_process(dhd_pub_t *dhd_pub, int *ifidx, void *pktdata,
  2996. uint pktlen, void **data_ptr, void *raw_event)
  2997. {
  2998. wl_evt_pport_t evt_pport;
  2999. wl_event_msg_t event;
  3000. bcm_event_msg_u_t evu;
  3001. int ret;
  3002. /* make sure it is a BRCM event pkt and record event data */
  3003. ret = wl_host_event_get_data(pktdata, pktlen, &evu);
  3004. if (ret != BCME_OK) {
  3005. return ret;
  3006. }
  3007. memcpy(&event, &evu.event, sizeof(wl_event_msg_t));
  3008. /* convert event from network order to host order */
  3009. wl_event_to_host_order(&event);
  3010. /* record event params to evt_pport */
  3011. evt_pport.dhd_pub = dhd_pub;
  3012. evt_pport.ifidx = ifidx;
  3013. evt_pport.pktdata = pktdata;
  3014. evt_pport.data_ptr = data_ptr;
  3015. evt_pport.raw_event = raw_event;
  3016. evt_pport.data_len = pktlen;
  3017. ret = wl_event_process_default(&event, &evt_pport);
  3018. return ret;
  3019. } /* wl_event_process */
  3020. /* Check whether packet is a BRCM event pkt. If it is, record event data. */
  3021. int
  3022. wl_host_event_get_data(void *pktdata, uint pktlen, bcm_event_msg_u_t *evu)
  3023. {
  3024. int ret;
  3025. ret = is_wlc_event_frame(pktdata, pktlen, 0, evu);
  3026. if (ret != BCME_OK) {
  3027. DHD_ERROR(("%s: Invalid event frame, err = %d\n",
  3028. __FUNCTION__, ret));
  3029. }
  3030. return ret;
  3031. }
  3032. int
  3033. wl_process_host_event(dhd_pub_t *dhd_pub, int *ifidx, void *pktdata, uint pktlen,
  3034. wl_event_msg_t *event, void **data_ptr, void *raw_event)
  3035. {
  3036. bcm_event_t *pvt_data = (bcm_event_t *)pktdata;
  3037. bcm_event_msg_u_t evu;
  3038. uint8 *event_data;
  3039. uint32 type, status, datalen, reason;
  3040. uint16 flags;
  3041. uint evlen;
  3042. int ret;
  3043. uint16 usr_subtype;
  3044. #ifdef DHD_POST_EAPOL_M1_AFTER_ROAM_EVT
  3045. dhd_if_t *ifp = NULL;
  3046. #endif /* DHD_POST_EAPOL_M1_AFTER_ROAM_EVT */
  3047. ret = wl_host_event_get_data(pktdata, pktlen, &evu);
  3048. if (ret != BCME_OK) {
  3049. return ret;
  3050. }
  3051. usr_subtype = ntoh16_ua((void *)&pvt_data->bcm_hdr.usr_subtype);
  3052. switch (usr_subtype) {
  3053. case BCMILCP_BCM_SUBTYPE_EVENT:
  3054. memcpy(event, &evu.event, sizeof(wl_event_msg_t));
  3055. *data_ptr = &pvt_data[1];
  3056. break;
  3057. case BCMILCP_BCM_SUBTYPE_DNGLEVENT:
  3058. #ifdef DNGL_EVENT_SUPPORT
  3059. /* If it is a DNGL event process it first */
  3060. if (dngl_host_event(dhd_pub, pktdata, &evu.dngl_event, pktlen) == BCME_OK) {
  3061. /*
  3062. * Return error purposely to prevent DNGL event being processed
  3063. * as BRCM event
  3064. */
  3065. return BCME_ERROR;
  3066. }
  3067. #endif /* DNGL_EVENT_SUPPORT */
  3068. return BCME_NOTFOUND;
  3069. default:
  3070. return BCME_NOTFOUND;
  3071. }
  3072. /* start wl_event_msg process */
  3073. event_data = *data_ptr;
  3074. type = ntoh32_ua((void *)&event->event_type);
  3075. flags = ntoh16_ua((void *)&event->flags);
  3076. status = ntoh32_ua((void *)&event->status);
  3077. reason = ntoh32_ua((void *)&event->reason);
  3078. datalen = ntoh32_ua((void *)&event->datalen);
  3079. evlen = datalen + sizeof(bcm_event_t);
  3080. switch (type) {
  3081. #ifdef PROP_TXSTATUS
  3082. case WLC_E_FIFO_CREDIT_MAP:
  3083. dhd_wlfc_enable(dhd_pub);
  3084. dhd_wlfc_FIFOcreditmap_event(dhd_pub, event_data);
  3085. WLFC_DBGMESG(("WLC_E_FIFO_CREDIT_MAP:(AC0,AC1,AC2,AC3),(BC_MC),(OTHER): "
  3086. "(%d,%d,%d,%d),(%d),(%d)\n", event_data[0], event_data[1],
  3087. event_data[2],
  3088. event_data[3], event_data[4], event_data[5]));
  3089. break;
  3090. case WLC_E_BCMC_CREDIT_SUPPORT:
  3091. dhd_wlfc_BCMCCredit_support_event(dhd_pub);
  3092. break;
  3093. #ifdef LIMIT_BORROW
  3094. case WLC_E_ALLOW_CREDIT_BORROW:
  3095. dhd_wlfc_disable_credit_borrow_event(dhd_pub, event_data);
  3096. break;
  3097. #endif /* LIMIT_BORROW */
  3098. #endif /* PROP_TXSTATUS */
  3099. case WLC_E_ULP:
  3100. #ifdef DHD_ULP
  3101. {
  3102. wl_ulp_event_t *ulp_evt = (wl_ulp_event_t *)event_data;
  3103. /* Flush and disable console messages */
  3104. if (ulp_evt->ulp_dongle_action == WL_ULP_DISABLE_CONSOLE) {
  3105. #ifdef DHD_ULP_NOT_USED
  3106. dhd_bus_ulp_disable_console(dhd_pub);
  3107. #endif /* DHD_ULP_NOT_USED */
  3108. }
  3109. if (ulp_evt->ulp_dongle_action == WL_ULP_UCODE_DOWNLOAD) {
  3110. dhd_bus_ucode_download(dhd_pub->bus);
  3111. }
  3112. }
  3113. #endif /* DHD_ULP */
  3114. break;
  3115. case WLC_E_TDLS_PEER_EVENT:
  3116. #if defined(WLTDLS) && defined(PCIE_FULL_DONGLE)
  3117. {
  3118. dhd_tdls_event_handler(dhd_pub, event);
  3119. }
  3120. #endif // endif
  3121. break;
  3122. case WLC_E_IF:
  3123. {
  3124. struct wl_event_data_if *ifevent = (struct wl_event_data_if *)event_data;
  3125. /* Ignore the event if NOIF is set */
  3126. if (ifevent->reserved & WLC_E_IF_FLAGS_BSSCFG_NOIF) {
  3127. DHD_ERROR(("WLC_E_IF: NO_IF set, event Ignored\r\n"));
  3128. return (BCME_UNSUPPORTED);
  3129. }
  3130. #ifdef PCIE_FULL_DONGLE
  3131. dhd_update_interface_flow_info(dhd_pub, ifevent->ifidx,
  3132. ifevent->opcode, ifevent->role);
  3133. #endif // endif
  3134. #ifdef PROP_TXSTATUS
  3135. {
  3136. uint8* ea = pvt_data->eth.ether_dhost;
  3137. WLFC_DBGMESG(("WLC_E_IF: idx:%d, action:%s, iftype:%s, ["MACDBG"]\n"
  3138. ifevent->ifidx,
  3139. ((ifevent->opcode == WLC_E_IF_ADD) ? "ADD":"DEL"),
  3140. ((ifevent->role == 0) ? "STA":"AP "),
  3141. MAC2STRDBG(ea)));
  3142. (void)ea;
  3143. if (ifevent->opcode == WLC_E_IF_CHANGE)
  3144. dhd_wlfc_interface_event(dhd_pub,
  3145. eWLFC_MAC_ENTRY_ACTION_UPDATE,
  3146. ifevent->ifidx, ifevent->role, ea);
  3147. else
  3148. dhd_wlfc_interface_event(dhd_pub,
  3149. ((ifevent->opcode == WLC_E_IF_ADD) ?
  3150. eWLFC_MAC_ENTRY_ACTION_ADD : eWLFC_MAC_ENTRY_ACTION_DEL),
  3151. ifevent->ifidx, ifevent->role, ea);
  3152. /* dhd already has created an interface by default, for 0 */
  3153. if (ifevent->ifidx == 0)
  3154. break;
  3155. }
  3156. #endif /* PROP_TXSTATUS */
  3157. if (ifevent->ifidx > 0 && ifevent->ifidx < DHD_MAX_IFS) {
  3158. if (ifevent->opcode == WLC_E_IF_ADD) {
  3159. if (dhd_event_ifadd(dhd_pub->info, ifevent, event->ifname,
  3160. event->addr.octet)) {
  3161. DHD_ERROR(("%s: dhd_event_ifadd failed ifidx: %d %s\n",
  3162. __FUNCTION__, ifevent->ifidx, event->ifname));
  3163. return (BCME_ERROR);
  3164. }
  3165. } else if (ifevent->opcode == WLC_E_IF_DEL) {
  3166. #ifdef PCIE_FULL_DONGLE
  3167. /* Delete flowrings unconditionally for i/f delete */
  3168. dhd_flow_rings_delete(dhd_pub, (uint8)dhd_ifname2idx(dhd_pub->info,
  3169. event->ifname));
  3170. #endif /* PCIE_FULL_DONGLE */
  3171. dhd_event_ifdel(dhd_pub->info, ifevent, event->ifname,
  3172. event->addr.octet);
  3173. } else if (ifevent->opcode == WLC_E_IF_CHANGE) {
  3174. #ifdef WL_CFG80211
  3175. dhd_event_ifchange(dhd_pub->info, ifevent, event->ifname,
  3176. event->addr.octet);
  3177. #endif /* WL_CFG80211 */
  3178. }
  3179. } else {
  3180. #if !defined(PROP_TXSTATUS) && !defined(PCIE_FULL_DONGLE) && defined(WL_CFG80211)
  3181. DHD_INFO(("%s: Invalid ifidx %d for %s\n",
  3182. __FUNCTION__, ifevent->ifidx, event->ifname));
  3183. #endif /* !PROP_TXSTATUS && !PCIE_FULL_DONGLE && WL_CFG80211 */
  3184. }
  3185. /* send up the if event: btamp user needs it */
  3186. *ifidx = dhd_ifname2idx(dhd_pub->info, event->ifname);
  3187. /* push up to external supp/auth */
  3188. dhd_event(dhd_pub->info, (char *)pvt_data, evlen, *ifidx);
  3189. break;
  3190. }
  3191. case WLC_E_NDIS_LINK:
  3192. break;
  3193. case WLC_E_PFN_NET_FOUND:
  3194. case WLC_E_PFN_SCAN_ALLGONE: /* share with WLC_E_PFN_BSSID_NET_LOST */
  3195. case WLC_E_PFN_NET_LOST:
  3196. break;
  3197. #if defined(OEM_ANDROID) && defined(PNO_SUPPORT)
  3198. case WLC_E_PFN_BSSID_NET_FOUND:
  3199. case WLC_E_PFN_BEST_BATCHING:
  3200. dhd_pno_event_handler(dhd_pub, event, (void *)event_data);
  3201. break;
  3202. #endif /* #if defined(OEM_ANDROID) && defined(PNO_SUPPORT) */
  3203. #if defined(RTT_SUPPORT)
  3204. case WLC_E_PROXD:
  3205. #ifndef WL_CFG80211
  3206. dhd_rtt_event_handler(dhd_pub, event, (void *)event_data);
  3207. #endif /* WL_CFG80211 */
  3208. break;
  3209. #endif /* RTT_SUPPORT */
  3210. /* These are what external supplicant/authenticator wants */
  3211. case WLC_E_ASSOC_IND:
  3212. case WLC_E_AUTH_IND:
  3213. case WLC_E_REASSOC_IND:
  3214. dhd_findadd_sta(dhd_pub,
  3215. dhd_ifname2idx(dhd_pub->info, event->ifname),
  3216. &event->addr.octet);
  3217. break;
  3218. #if defined(DHD_FW_COREDUMP)
  3219. case WLC_E_PSM_WATCHDOG:
  3220. DHD_ERROR(("%s: WLC_E_PSM_WATCHDOG event received : \n", __FUNCTION__));
  3221. if (dhd_socram_dump(dhd_pub->bus) != BCME_OK) {
  3222. DHD_ERROR(("%s: socram dump ERROR : \n", __FUNCTION__));
  3223. }
  3224. break;
  3225. #endif // endif
  3226. case WLC_E_NATOE_NFCT:
  3227. #ifdef WL_NATOE
  3228. DHD_EVENT(("%s: WLC_E_NATOE_NFCT event received \n", __FUNCTION__));
  3229. dhd_natoe_ct_event(dhd_pub, event_data);
  3230. #endif /* WL_NATOE */
  3231. break;
  3232. #ifdef WL_NAN
  3233. case WLC_E_SLOTTED_BSS_PEER_OP:
  3234. DHD_EVENT(("%s: WLC_E_SLOTTED_BSS_PEER_OP event received for peer: "
  3235. "" MACDBG ", status = %d\n",
  3236. __FUNCTION__, MAC2STRDBG(event->addr.octet), status));
  3237. if (status == WLC_E_STATUS_SLOTTED_PEER_ADD) {
  3238. dhd_findadd_sta(dhd_pub, dhd_ifname2idx(dhd_pub->info,
  3239. event->ifname), &event->addr.octet);
  3240. } else if (status == WLC_E_STATUS_SLOTTED_PEER_DEL) {
  3241. uint8 ifindex = (uint8)dhd_ifname2idx(dhd_pub->info, event->ifname);
  3242. BCM_REFERENCE(ifindex);
  3243. dhd_del_sta(dhd_pub, dhd_ifname2idx(dhd_pub->info,
  3244. event->ifname), &event->addr.octet);
  3245. #ifdef PCIE_FULL_DONGLE
  3246. dhd_flow_rings_delete_for_peer(dhd_pub, ifindex,
  3247. (char *)&event->addr.octet[0]);
  3248. #endif // endif
  3249. } else {
  3250. DHD_ERROR(("%s: WLC_E_SLOTTED_BSS_PEER_OP: Status is not expected = %d\n",
  3251. __FUNCTION__, status));
  3252. }
  3253. break;
  3254. #endif /* WL_NAN */
  3255. #ifdef DHD_POST_EAPOL_M1_AFTER_ROAM_EVT
  3256. case WLC_E_REASSOC:
  3257. ifp = dhd_get_ifp(dhd_pub, event->ifidx);
  3258. if (!ifp)
  3259. break;
  3260. /* Consider STA role only since roam is disabled on P2P GC.
  3261. * Drop EAPOL M1 frame only if roam is done to same BSS.
  3262. */
  3263. if ((status == WLC_E_STATUS_SUCCESS) &&
  3264. IS_STA_IFACE(ndev_to_wdev(ifp->net)) &&
  3265. wl_cfg80211_is_event_from_connected_bssid(ifp->net, event, event->ifidx)) {
  3266. ifp->recv_reassoc_evt = TRUE;
  3267. }
  3268. break;
  3269. #endif /* DHD_POST_EAPOL_M1_AFTER_ROAM_EVT */
  3270. case WLC_E_LINK:
  3271. #ifdef PCIE_FULL_DONGLE
  3272. if (dhd_update_interface_link_status(dhd_pub, (uint8)dhd_ifname2idx(dhd_pub->info,
  3273. event->ifname), (uint8)flags) != BCME_OK) {
  3274. DHD_ERROR(("%s: dhd_update_interface_link_status Failed.\n",
  3275. __FUNCTION__));
  3276. break;
  3277. }
  3278. if (!flags) {
  3279. DHD_ERROR(("%s: Deleting all STA from assoc list and flowrings.\n",
  3280. __FUNCTION__));
  3281. /* Delete all sta and flowrings */
  3282. dhd_del_all_sta(dhd_pub, dhd_ifname2idx(dhd_pub->info, event->ifname));
  3283. dhd_flow_rings_delete(dhd_pub, (uint8)dhd_ifname2idx(dhd_pub->info,
  3284. event->ifname));
  3285. }
  3286. #endif /* PCIE_FULL_DONGLE */
  3287. /* fall through */
  3288. case WLC_E_DEAUTH:
  3289. case WLC_E_DEAUTH_IND:
  3290. case WLC_E_DISASSOC:
  3291. case WLC_E_DISASSOC_IND:
  3292. #ifdef PCIE_FULL_DONGLE
  3293. if (type != WLC_E_LINK) {
  3294. uint8 ifindex = (uint8)dhd_ifname2idx(dhd_pub->info, event->ifname);
  3295. uint8 role = dhd_flow_rings_ifindex2role(dhd_pub, ifindex);
  3296. uint8 del_sta = TRUE;
  3297. #ifdef WL_CFG80211
  3298. if (role == WLC_E_IF_ROLE_STA &&
  3299. !wl_cfg80211_is_roam_offload(dhd_idx2net(dhd_pub, ifindex)) &&
  3300. !wl_cfg80211_is_event_from_connected_bssid(
  3301. dhd_idx2net(dhd_pub, ifindex), event, *ifidx)) {
  3302. del_sta = FALSE;
  3303. }
  3304. #endif /* WL_CFG80211 */
  3305. DHD_EVENT(("%s: Link event %d, flags %x, status %x, role %d, del_sta %d\n",
  3306. __FUNCTION__, type, flags, status, role, del_sta));
  3307. if (del_sta) {
  3308. DHD_EVENT(("%s: Deleting STA " MACDBG "\n",
  3309. __FUNCTION__, MAC2STRDBG(event->addr.octet)));
  3310. dhd_del_sta(dhd_pub, dhd_ifname2idx(dhd_pub->info,
  3311. event->ifname), &event->addr.octet);
  3312. /* Delete all flowrings for STA and P2P Client */
  3313. if (role == WLC_E_IF_ROLE_STA || role == WLC_E_IF_ROLE_P2P_CLIENT) {
  3314. dhd_flow_rings_delete(dhd_pub, ifindex);
  3315. } else {
  3316. dhd_flow_rings_delete_for_peer(dhd_pub, ifindex,
  3317. (char *)&event->addr.octet[0]);
  3318. }
  3319. }
  3320. }
  3321. #endif /* PCIE_FULL_DONGLE */
  3322. #ifdef DHD_POST_EAPOL_M1_AFTER_ROAM_EVT
  3323. /* fall through */
  3324. ifp = dhd_get_ifp(dhd_pub, event->ifidx);
  3325. if (ifp) {
  3326. ifp->recv_reassoc_evt = FALSE;
  3327. ifp->post_roam_evt = FALSE;
  3328. }
  3329. #endif /* DHD_POST_EAPOL_M1_AFTER_ROAM_EVT */
  3330. /* fall through */
  3331. default:
  3332. *ifidx = dhd_ifname2idx(dhd_pub->info, event->ifname);
  3333. /* push up to external supp/auth */
  3334. dhd_event(dhd_pub->info, (char *)pvt_data, evlen, *ifidx);
  3335. DHD_TRACE(("%s: MAC event %d, flags %x, status %x\n",
  3336. __FUNCTION__, type, flags, status));
  3337. BCM_REFERENCE(flags);
  3338. BCM_REFERENCE(status);
  3339. BCM_REFERENCE(reason);
  3340. break;
  3341. }
  3342. #if defined(STBAP)
  3343. /* For routers, EAPD will be working on these events.
  3344. * Overwrite interface name to that event is pushed
  3345. * to host with its registered interface name
  3346. */
  3347. memcpy(pvt_data->event.ifname, dhd_ifname(dhd_pub, *ifidx), IFNAMSIZ);
  3348. #endif // endif
  3349. #ifdef DHD_STATUS_LOGGING
  3350. if (dhd_pub->statlog) {
  3351. dhd_statlog_process_event(dhd_pub, type, *ifidx,
  3352. status, reason, flags);
  3353. }
  3354. #endif /* DHD_STATUS_LOGGING */
  3355. #ifdef SHOW_EVENTS
  3356. if (DHD_FWLOG_ON() || DHD_EVENT_ON()) {
  3357. wl_show_host_event(dhd_pub, event,
  3358. (void *)event_data, raw_event, dhd_pub->enable_log);
  3359. }
  3360. #endif /* SHOW_EVENTS */
  3361. return (BCME_OK);
  3362. } /* wl_process_host_event */
  3363. int
  3364. wl_host_event(dhd_pub_t *dhd_pub, int *ifidx, void *pktdata, uint pktlen,
  3365. wl_event_msg_t *event, void **data_ptr, void *raw_event)
  3366. {
  3367. return wl_process_host_event(dhd_pub, ifidx, pktdata, pktlen, event, data_ptr,
  3368. raw_event);
  3369. }
  3370. void
  3371. dhd_print_buf(void *pbuf, int len, int bytes_per_line)
  3372. {
  3373. #ifdef DHD_DEBUG
  3374. int i, j = 0;
  3375. unsigned char *buf = pbuf;
  3376. if (bytes_per_line == 0) {
  3377. bytes_per_line = len;
  3378. }
  3379. for (i = 0; i < len; i++) {
  3380. printf("%2.2x", *buf++);
  3381. j++;
  3382. if (j == bytes_per_line) {
  3383. printf("\n");
  3384. j = 0;
  3385. } else {
  3386. printf(":");
  3387. }
  3388. }
  3389. printf("\n");
  3390. #endif /* DHD_DEBUG */
  3391. }
  3392. #ifndef strtoul
  3393. #define strtoul(nptr, endptr, base) bcm_strtoul((nptr), (endptr), (base))
  3394. #endif // endif
  3395. #if defined(PKT_FILTER_SUPPORT) || defined(DHD_PKT_LOGGING)
  3396. /* Convert user's input in hex pattern to byte-size mask */
  3397. int
  3398. wl_pattern_atoh(char *src, char *dst)
  3399. {
  3400. int i;
  3401. if (strncmp(src, "0x", 2) != 0 &&
  3402. strncmp(src, "0X", 2) != 0) {
  3403. DHD_ERROR(("Mask invalid format. Needs to start with 0x\n"));
  3404. return -1;
  3405. }
  3406. src = src + 2; /* Skip past 0x */
  3407. if (strlen(src) % 2 != 0) {
  3408. DHD_ERROR(("Mask invalid format. Needs to be of even length\n"));
  3409. return -1;
  3410. }
  3411. for (i = 0; *src != '\0'; i++) {
  3412. char num[3];
  3413. bcm_strncpy_s(num, sizeof(num), src, 2);
  3414. num[2] = '\0';
  3415. dst[i] = (uint8)strtoul(num, NULL, 16);
  3416. src += 2;
  3417. }
  3418. return i;
  3419. }
  3420. int
  3421. pattern_atoh_len(char *src, char *dst, int len)
  3422. {
  3423. int i;
  3424. if (strncmp(src, "0x", HD_PREFIX_SIZE) != 0 &&
  3425. strncmp(src, "0X", HD_PREFIX_SIZE) != 0) {
  3426. DHD_ERROR(("Mask invalid format. Needs to start with 0x\n"));
  3427. return -1;
  3428. }
  3429. src = src + HD_PREFIX_SIZE; /* Skip past 0x */
  3430. if (strlen(src) % HD_BYTE_SIZE != 0) {
  3431. DHD_ERROR(("Mask invalid format. Needs to be of even length\n"));
  3432. return -1;
  3433. }
  3434. for (i = 0; *src != '\0'; i++) {
  3435. char num[HD_BYTE_SIZE + 1];
  3436. if (i > len - 1) {
  3437. DHD_ERROR(("pattern not in range, idx: %d len: %d\n", i, len));
  3438. return -1;
  3439. }
  3440. bcm_strncpy_s(num, sizeof(num), src, HD_BYTE_SIZE);
  3441. num[HD_BYTE_SIZE] = '\0';
  3442. dst[i] = (uint8)strtoul(num, NULL, 16);
  3443. src += HD_BYTE_SIZE;
  3444. }
  3445. return i;
  3446. }
  3447. #endif /* PKT_FILTER_SUPPORT || DHD_PKT_LOGGING */
  3448. #ifdef PKT_FILTER_SUPPORT
  3449. void
  3450. dhd_pktfilter_offload_enable(dhd_pub_t * dhd, char *arg, int enable, int master_mode)
  3451. {
  3452. char *argv[8];
  3453. int i = 0;
  3454. const char *str;
  3455. int buf_len;
  3456. int str_len;
  3457. char *arg_save = 0, *arg_org = 0;
  3458. int rc;
  3459. char buf[32] = {0};
  3460. wl_pkt_filter_enable_t enable_parm;
  3461. wl_pkt_filter_enable_t * pkt_filterp;
  3462. if (!arg)
  3463. return;
  3464. if (!(arg_save = MALLOC(dhd->osh, strlen(arg) + 1))) {
  3465. DHD_ERROR(("%s: malloc failed\n", __FUNCTION__));
  3466. goto fail;
  3467. }
  3468. arg_org = arg_save;
  3469. memcpy(arg_save, arg, strlen(arg) + 1);
  3470. argv[i] = bcmstrtok(&arg_save, " ", 0);
  3471. i = 0;
  3472. if (argv[i] == NULL) {
  3473. DHD_ERROR(("No args provided\n"));
  3474. goto fail;
  3475. }
  3476. str = "pkt_filter_enable";
  3477. str_len = strlen(str);
  3478. bcm_strncpy_s(buf, sizeof(buf) - 1, str, sizeof(buf) - 1);
  3479. buf[ sizeof(buf) - 1 ] = '\0';
  3480. buf_len = str_len + 1;
  3481. pkt_filterp = (wl_pkt_filter_enable_t *)(buf + str_len + 1);
  3482. /* Parse packet filter id. */
  3483. enable_parm.id = htod32(strtoul(argv[i], NULL, 0));
  3484. /* Parse enable/disable value. */
  3485. enable_parm.enable = htod32(enable);
  3486. buf_len += sizeof(enable_parm);
  3487. memcpy((char *)pkt_filterp,
  3488. &enable_parm,
  3489. sizeof(enable_parm));
  3490. /* Enable/disable the specified filter. */
  3491. rc = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, buf_len, TRUE, 0);
  3492. rc = rc >= 0 ? 0 : rc;
  3493. if (rc) {
  3494. DHD_TRACE(("%s: failed to add pktfilter %s, retcode = %d\n",
  3495. __FUNCTION__, arg, rc));
  3496. dhd_set_packet_filter(dhd);
  3497. rc = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, buf_len, TRUE, 0);
  3498. rc = rc >= 0 ? 0 : rc;
  3499. if (rc) {
  3500. DHD_TRACE_HW4(("%s: 2nd retry failed to add pktfilter %s, retcode = %d\n",
  3501. __FUNCTION__, arg, rc));
  3502. } else {
  3503. DHD_TRACE_HW4(("%s: 2nd retry successfully added pktfilter %s\n",
  3504. __FUNCTION__, arg));
  3505. }
  3506. }
  3507. else
  3508. DHD_TRACE(("%s: successfully added pktfilter %s\n",
  3509. __FUNCTION__, arg));
  3510. /* Contorl the master mode */
  3511. rc = dhd_wl_ioctl_set_intiovar(dhd, "pkt_filter_mode",
  3512. master_mode, WLC_SET_VAR, TRUE, 0);
  3513. rc = rc >= 0 ? 0 : rc;
  3514. if (rc)
  3515. DHD_TRACE(("%s: failed to add pktfilter %s, retcode = %d\n",
  3516. __FUNCTION__, arg, rc));
  3517. fail:
  3518. if (arg_org)
  3519. MFREE(dhd->osh, arg_org, strlen(arg) + 1);
  3520. }
  3521. /* Packet filter section: extended filters have named offsets, add table here */
  3522. typedef struct {
  3523. char *name;
  3524. uint16 base;
  3525. } wl_pfbase_t;
  3526. static wl_pfbase_t basenames[] = { WL_PKT_FILTER_BASE_NAMES };
  3527. static int
  3528. wl_pkt_filter_base_parse(char *name)
  3529. {
  3530. uint i;
  3531. char *bname, *uname;
  3532. for (i = 0; i < ARRAYSIZE(basenames); i++) {
  3533. bname = basenames[i].name;
  3534. for (uname = name; *uname; bname++, uname++) {
  3535. if (*bname != bcm_toupper(*uname)) {
  3536. break;
  3537. }
  3538. }
  3539. if (!*uname && !*bname) {
  3540. break;
  3541. }
  3542. }
  3543. if (i < ARRAYSIZE(basenames)) {
  3544. return basenames[i].base;
  3545. } else {
  3546. return -1;
  3547. }
  3548. }
  3549. void
  3550. dhd_pktfilter_offload_set(dhd_pub_t * dhd, char *arg)
  3551. {
  3552. const char *str;
  3553. wl_pkt_filter_t pkt_filter;
  3554. wl_pkt_filter_t *pkt_filterp;
  3555. int buf_len;
  3556. int str_len;
  3557. int rc = -1;
  3558. uint32 mask_size;
  3559. uint32 pattern_size;
  3560. char *argv[MAXPKT_ARG] = {0}, * buf = 0;
  3561. int i = 0;
  3562. char *arg_save = 0, *arg_org = 0;
  3563. if (!arg)
  3564. return;
  3565. if (!(arg_save = MALLOC(dhd->osh, strlen(arg) + 1))) {
  3566. DHD_ERROR(("%s: malloc failed\n", __FUNCTION__));
  3567. goto fail;
  3568. }
  3569. arg_org = arg_save;
  3570. if (!(buf = MALLOC(dhd->osh, MAX_PKTFLT_BUF_SIZE))) {
  3571. DHD_ERROR(("%s: malloc failed\n", __FUNCTION__));
  3572. goto fail;
  3573. }
  3574. memset(buf, 0, MAX_PKTFLT_BUF_SIZE);
  3575. memcpy(arg_save, arg, strlen(arg) + 1);
  3576. if (strlen(arg) > MAX_PKTFLT_BUF_SIZE) {
  3577. DHD_ERROR(("Not enough buffer %d < %d\n", (int)strlen(arg), (int)sizeof(buf)));
  3578. goto fail;
  3579. }
  3580. argv[i] = bcmstrtok(&arg_save, " ", 0);
  3581. while (argv[i++]) {
  3582. if (i >= MAXPKT_ARG) {
  3583. DHD_ERROR(("Invalid args provided\n"));
  3584. goto fail;
  3585. }
  3586. argv[i] = bcmstrtok(&arg_save, " ", 0);
  3587. }
  3588. i = 0;
  3589. if (argv[i] == NULL) {
  3590. DHD_ERROR(("No args provided\n"));
  3591. goto fail;
  3592. }
  3593. str = "pkt_filter_add";
  3594. str_len = strlen(str);
  3595. bcm_strncpy_s(buf, MAX_PKTFLT_BUF_SIZE, str, str_len);
  3596. buf[ str_len ] = '\0';
  3597. buf_len = str_len + 1;
  3598. pkt_filterp = (wl_pkt_filter_t *) (buf + str_len + 1);
  3599. /* Parse packet filter id. */
  3600. pkt_filter.id = htod32(strtoul(argv[i], NULL, 0));
  3601. if (argv[++i] == NULL) {
  3602. DHD_ERROR(("Polarity not provided\n"));
  3603. goto fail;
  3604. }
  3605. /* Parse filter polarity. */
  3606. pkt_filter.negate_match = htod32(strtoul(argv[i], NULL, 0));
  3607. if (argv[++i] == NULL) {
  3608. DHD_ERROR(("Filter type not provided\n"));
  3609. goto fail;
  3610. }
  3611. /* Parse filter type. */
  3612. pkt_filter.type = htod32(strtoul(argv[i], NULL, 0));
  3613. if ((pkt_filter.type == 0) || (pkt_filter.type == 1)) {
  3614. if (argv[++i] == NULL) {
  3615. DHD_ERROR(("Offset not provided\n"));
  3616. goto fail;
  3617. }
  3618. /* Parse pattern filter offset. */
  3619. pkt_filter.u.pattern.offset = htod32(strtoul(argv[i], NULL, 0));
  3620. if (argv[++i] == NULL) {
  3621. DHD_ERROR(("Bitmask not provided\n"));
  3622. goto fail;
  3623. }
  3624. /* Parse pattern filter mask. */
  3625. rc = wl_pattern_atoh(argv[i],
  3626. (char *) pkt_filterp->u.pattern.mask_and_pattern);
  3627. if (rc == -1) {
  3628. DHD_ERROR(("Rejecting: %s\n", argv[i]));
  3629. goto fail;
  3630. }
  3631. mask_size = htod32(rc);
  3632. if (argv[++i] == NULL) {
  3633. DHD_ERROR(("Pattern not provided\n"));
  3634. goto fail;
  3635. }
  3636. /* Parse pattern filter pattern. */
  3637. rc = wl_pattern_atoh(argv[i],
  3638. (char *) &pkt_filterp->u.pattern.mask_and_pattern[mask_size]);
  3639. if (rc == -1) {
  3640. DHD_ERROR(("Rejecting: %s\n", argv[i]));
  3641. goto fail;
  3642. }
  3643. pattern_size = htod32(rc);
  3644. if (mask_size != pattern_size) {
  3645. DHD_ERROR(("Mask and pattern not the same size\n"));
  3646. goto fail;
  3647. }
  3648. pkt_filter.u.pattern.size_bytes = mask_size;
  3649. buf_len += WL_PKT_FILTER_FIXED_LEN;
  3650. buf_len += (WL_PKT_FILTER_PATTERN_FIXED_LEN + 2 * mask_size);
  3651. /* Keep-alive attributes are set in local variable (keep_alive_pkt), and
  3652. * then memcpy'ed into buffer (keep_alive_pktp) since there is no
  3653. * guarantee that the buffer is properly aligned.
  3654. */
  3655. memcpy((char *)pkt_filterp,
  3656. &pkt_filter,
  3657. WL_PKT_FILTER_FIXED_LEN + WL_PKT_FILTER_PATTERN_FIXED_LEN);
  3658. } else if ((pkt_filter.type == 2) || (pkt_filter.type == 6)) {
  3659. int list_cnt = 0;
  3660. char *endptr = NULL;
  3661. wl_pkt_filter_pattern_listel_t *pf_el =
  3662. (wl_pkt_filter_pattern_listel_t *)&pkt_filterp->u.patlist.patterns[0];
  3663. while (argv[++i] != NULL) {
  3664. /* Check valid buffer size. */
  3665. if ((buf_len + MAX_PKTFLT_FIXED_BUF_SIZE) > MAX_PKTFLT_BUF_SIZE) {
  3666. DHD_ERROR(("buffer over length MAX_PKTFLT_FIXED_BUF_SIZE\n"));
  3667. goto fail;
  3668. }
  3669. /* Parse pattern filter base and offset. */
  3670. if (bcm_isdigit(*argv[i])) {
  3671. /* Numeric base */
  3672. rc = strtoul(argv[i], &endptr, 0);
  3673. } else {
  3674. endptr = strchr(argv[i], ':');
  3675. if (endptr) {
  3676. *endptr = '\0';
  3677. rc = wl_pkt_filter_base_parse(argv[i]);
  3678. if (rc == -1) {
  3679. printf("Invalid base %s\n", argv[i]);
  3680. goto fail;
  3681. }
  3682. *endptr = ':';
  3683. }
  3684. }
  3685. if (endptr == NULL) {
  3686. printf("Invalid [base:]offset format: %s\n", argv[i]);
  3687. goto fail;
  3688. }
  3689. if (*endptr == ':') {
  3690. pf_el->base_offs = htod16(rc);
  3691. rc = strtoul(endptr + 1, &endptr, 0);
  3692. } else {
  3693. /* Must have had a numeric offset only */
  3694. pf_el->base_offs = htod16(0);
  3695. }
  3696. if (*endptr) {
  3697. printf("Invalid [base:]offset format: %s\n", argv[i]);
  3698. goto fail;
  3699. }
  3700. if (rc > 0x0000FFFF) {
  3701. printf("Offset too large\n");
  3702. goto fail;
  3703. }
  3704. pf_el->rel_offs = htod16(rc);
  3705. /* Clear match_flag (may be set in parsing which follows) */
  3706. pf_el->match_flags = htod16(0);
  3707. /* Parse pattern filter mask and pattern directly into ioctl buffer */
  3708. if (argv[++i] == NULL) {
  3709. printf("Bitmask not provided\n");
  3710. goto fail;
  3711. }
  3712. rc = wl_pattern_atoh(argv[i], (char*)pf_el->mask_and_data);
  3713. if ((rc == -1) || (rc > MAX_PKTFLT_FIXED_PATTERN_SIZE)) {
  3714. printf("Rejecting: %s\n", argv[i]);
  3715. goto fail;
  3716. }
  3717. mask_size = htod16(rc);
  3718. if (argv[++i] == NULL) {
  3719. printf("Pattern not provided\n");
  3720. goto fail;
  3721. }
  3722. if (*argv[i] == '!') {
  3723. pf_el->match_flags =
  3724. htod16(WL_PKT_FILTER_MFLAG_NEG);
  3725. (argv[i])++;
  3726. }
  3727. if (*argv[i] == '\0') {
  3728. printf("Pattern not provided\n");
  3729. goto fail;
  3730. }
  3731. rc = wl_pattern_atoh(argv[i], (char*)&pf_el->mask_and_data[rc]);
  3732. if ((rc == -1) || (rc > MAX_PKTFLT_FIXED_PATTERN_SIZE)) {
  3733. printf("Rejecting: %s\n", argv[i]);
  3734. goto fail;
  3735. }
  3736. pattern_size = htod16(rc);
  3737. if (mask_size != pattern_size) {
  3738. printf("Mask and pattern not the same size\n");
  3739. goto fail;
  3740. }
  3741. pf_el->size_bytes = mask_size;
  3742. /* Account for the size of this pattern element */
  3743. buf_len += WL_PKT_FILTER_PATTERN_LISTEL_FIXED_LEN + 2 * rc;
  3744. /* Move to next element location in ioctl buffer */
  3745. pf_el = (wl_pkt_filter_pattern_listel_t*)
  3746. ((uint8*)pf_el + WL_PKT_FILTER_PATTERN_LISTEL_FIXED_LEN + 2 * rc);
  3747. /* Count list element */
  3748. list_cnt++;
  3749. }
  3750. /* Account for initial fixed size, and copy initial fixed fields */
  3751. buf_len += WL_PKT_FILTER_FIXED_LEN + WL_PKT_FILTER_PATTERN_LIST_FIXED_LEN;
  3752. if (buf_len > MAX_PKTFLT_BUF_SIZE) {
  3753. DHD_ERROR(("buffer over length MAX_PKTFLT_BUF_SIZE\n"));
  3754. goto fail;
  3755. }
  3756. /* Update list count and total size */
  3757. pkt_filter.u.patlist.list_cnt = list_cnt;
  3758. pkt_filter.u.patlist.PAD1[0] = 0;
  3759. pkt_filter.u.patlist.totsize = buf + buf_len - (char*)pkt_filterp;
  3760. pkt_filter.u.patlist.totsize -= WL_PKT_FILTER_FIXED_LEN;
  3761. memcpy((char *)pkt_filterp, &pkt_filter,
  3762. WL_PKT_FILTER_FIXED_LEN + WL_PKT_FILTER_PATTERN_LIST_FIXED_LEN);
  3763. } else {
  3764. DHD_ERROR(("Invalid filter type %d\n", pkt_filter.type));
  3765. goto fail;
  3766. }
  3767. rc = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, buf_len, TRUE, 0);
  3768. rc = rc >= 0 ? 0 : rc;
  3769. if (rc)
  3770. DHD_TRACE(("%s: failed to add pktfilter %s, retcode = %d\n",
  3771. __FUNCTION__, arg, rc));
  3772. else
  3773. DHD_TRACE(("%s: successfully added pktfilter %s\n",
  3774. __FUNCTION__, arg));
  3775. fail:
  3776. if (arg_org)
  3777. MFREE(dhd->osh, arg_org, strlen(arg) + 1);
  3778. if (buf)
  3779. MFREE(dhd->osh, buf, MAX_PKTFLT_BUF_SIZE);
  3780. }
  3781. void
  3782. dhd_pktfilter_offload_delete(dhd_pub_t *dhd, int id)
  3783. {
  3784. int ret;
  3785. ret = dhd_wl_ioctl_set_intiovar(dhd, "pkt_filter_delete",
  3786. id, WLC_SET_VAR, TRUE, 0);
  3787. if (ret < 0) {
  3788. DHD_ERROR(("%s: Failed to delete filter ID:%d, ret=%d\n",
  3789. __FUNCTION__, id, ret));
  3790. }
  3791. }
  3792. #endif /* PKT_FILTER_SUPPORT */
  3793. /* ========================== */
  3794. /* ==== ARP OFFLOAD SUPPORT = */
  3795. /* ========================== */
  3796. #ifdef ARP_OFFLOAD_SUPPORT
  3797. void
  3798. dhd_arp_offload_set(dhd_pub_t * dhd, int arp_mode)
  3799. {
  3800. int retcode;
  3801. retcode = dhd_wl_ioctl_set_intiovar(dhd, "arp_ol",
  3802. arp_mode, WLC_SET_VAR, TRUE, 0);
  3803. retcode = retcode >= 0 ? 0 : retcode;
  3804. if (retcode)
  3805. DHD_TRACE(("%s: failed to set ARP offload mode to 0x%x, retcode = %d\n",
  3806. __FUNCTION__, arp_mode, retcode));
  3807. else
  3808. DHD_TRACE(("%s: successfully set ARP offload mode to 0x%x\n",
  3809. __FUNCTION__, arp_mode));
  3810. }
  3811. void
  3812. dhd_arp_offload_enable(dhd_pub_t * dhd, int arp_enable)
  3813. {
  3814. int retcode;
  3815. #ifdef WL_CFG80211
  3816. /* Do not enable arp offload in case of non-STA interfaces active */
  3817. if (arp_enable &&
  3818. (wl_cfg80211_check_vif_in_use(dhd_linux_get_primary_netdev(dhd)))) {
  3819. DHD_TRACE(("%s: Virtual interfaces active, ignore arp offload request \n",
  3820. __FUNCTION__));
  3821. return;
  3822. }
  3823. #endif /* WL_CFG80211 */
  3824. retcode = dhd_wl_ioctl_set_intiovar(dhd, "arpoe",
  3825. arp_enable, WLC_SET_VAR, TRUE, 0);
  3826. retcode = retcode >= 0 ? 0 : retcode;
  3827. if (retcode)
  3828. DHD_TRACE(("%s: failed to enabe ARP offload to %d, retcode = %d\n",
  3829. __FUNCTION__, arp_enable, retcode));
  3830. else
  3831. DHD_TRACE(("%s: successfully enabed ARP offload to %d\n",
  3832. __FUNCTION__, arp_enable));
  3833. if (arp_enable) {
  3834. uint32 version;
  3835. retcode = dhd_wl_ioctl_get_intiovar(dhd, "arp_version",
  3836. &version, WLC_GET_VAR, FALSE, 0);
  3837. if (retcode) {
  3838. DHD_INFO(("%s: fail to get version (maybe version 1:retcode = %d\n",
  3839. __FUNCTION__, retcode));
  3840. dhd->arp_version = 1;
  3841. }
  3842. else {
  3843. DHD_INFO(("%s: ARP Version= %x\n", __FUNCTION__, version));
  3844. dhd->arp_version = version;
  3845. }
  3846. }
  3847. }
  3848. void
  3849. dhd_aoe_arp_clr(dhd_pub_t *dhd, int idx)
  3850. {
  3851. int ret = 0;
  3852. if (dhd == NULL) return;
  3853. if (dhd->arp_version == 1)
  3854. idx = 0;
  3855. ret = dhd_iovar(dhd, idx, "arp_table_clear", NULL, 0, NULL, 0, TRUE);
  3856. if (ret < 0)
  3857. DHD_ERROR(("%s failed code %d\n", __FUNCTION__, ret));
  3858. }
  3859. void
  3860. dhd_aoe_hostip_clr(dhd_pub_t *dhd, int idx)
  3861. {
  3862. int ret = 0;
  3863. if (dhd == NULL) return;
  3864. if (dhd->arp_version == 1)
  3865. idx = 0;
  3866. ret = dhd_iovar(dhd, idx, "arp_hostip_clear", NULL, 0, NULL, 0, TRUE);
  3867. if (ret < 0)
  3868. DHD_ERROR(("%s failed code %d\n", __FUNCTION__, ret));
  3869. }
  3870. void
  3871. dhd_arp_offload_add_ip(dhd_pub_t *dhd, uint32 ipaddr, int idx)
  3872. {
  3873. int ret;
  3874. if (dhd == NULL) return;
  3875. if (dhd->arp_version == 1)
  3876. idx = 0;
  3877. ret = dhd_iovar(dhd, idx, "arp_hostip", (char *)&ipaddr, sizeof(ipaddr),
  3878. NULL, 0, TRUE);
  3879. if (ret)
  3880. DHD_TRACE(("%s: ARP ip addr add failed, ret = %d\n", __FUNCTION__, ret));
  3881. else
  3882. DHD_TRACE(("%s: sARP H ipaddr entry added \n",
  3883. __FUNCTION__));
  3884. }
  3885. int
  3886. dhd_arp_get_arp_hostip_table(dhd_pub_t *dhd, void *buf, int buflen, int idx)
  3887. {
  3888. int ret, i;
  3889. uint32 *ptr32 = buf;
  3890. bool clr_bottom = FALSE;
  3891. if (!buf)
  3892. return -1;
  3893. if (dhd == NULL) return -1;
  3894. if (dhd->arp_version == 1)
  3895. idx = 0;
  3896. ret = dhd_iovar(dhd, idx, "arp_hostip", NULL, 0, (char *)buf, buflen,
  3897. FALSE);
  3898. if (ret) {
  3899. DHD_TRACE(("%s: ioctl WLC_GET_VAR error %d\n",
  3900. __FUNCTION__, ret));
  3901. return -1;
  3902. }
  3903. /* clean up the buf, ascii reminder */
  3904. for (i = 0; i < MAX_IPV4_ENTRIES; i++) {
  3905. if (!clr_bottom) {
  3906. if (*ptr32 == 0)
  3907. clr_bottom = TRUE;
  3908. } else {
  3909. *ptr32 = 0;
  3910. }
  3911. ptr32++;
  3912. }
  3913. return 0;
  3914. }
  3915. #endif /* ARP_OFFLOAD_SUPPORT */
  3916. /*
  3917. * Neighbor Discovery Offload: enable NDO feature
  3918. * Called by ipv6 event handler when interface comes up/goes down
  3919. */
  3920. int
  3921. dhd_ndo_enable(dhd_pub_t * dhd, int ndo_enable)
  3922. {
  3923. int retcode;
  3924. if (dhd == NULL)
  3925. return -1;
  3926. #if defined(WL_CFG80211) && defined(WL_NAN)
  3927. if (wl_cfgnan_is_dp_active(dhd_linux_get_primary_netdev(dhd))) {
  3928. /* If nan dp is active, skip NDO */
  3929. DHD_INFO(("Active NAN DP, skip NDO\n"));
  3930. return 0;
  3931. }
  3932. #endif /* WL_CFG80211 && WL_NAN */
  3933. #ifdef WL_CFG80211
  3934. if (dhd->op_mode & DHD_FLAG_HOSTAP_MODE) {
  3935. /* NDO disable on STA+SOFTAP mode */
  3936. ndo_enable = FALSE;
  3937. }
  3938. #endif /* WL_CFG80211 */
  3939. retcode = dhd_wl_ioctl_set_intiovar(dhd, "ndoe",
  3940. ndo_enable, WLC_SET_VAR, TRUE, 0);
  3941. if (retcode)
  3942. DHD_ERROR(("%s: failed to enabe ndo to %d, retcode = %d\n",
  3943. __FUNCTION__, ndo_enable, retcode));
  3944. else
  3945. DHD_TRACE(("%s: successfully enabed ndo offload to %d\n",
  3946. __FUNCTION__, ndo_enable));
  3947. return retcode;
  3948. }
  3949. /*
  3950. * Neighbor Discover Offload: enable NDO feature
  3951. * Called by ipv6 event handler when interface comes up
  3952. */
  3953. int
  3954. dhd_ndo_add_ip(dhd_pub_t *dhd, char* ipv6addr, int idx)
  3955. {
  3956. int iov_len = 0;
  3957. char iovbuf[DHD_IOVAR_BUF_SIZE];
  3958. int retcode;
  3959. if (dhd == NULL)
  3960. return -1;
  3961. iov_len = bcm_mkiovar("nd_hostip", (char *)ipv6addr,
  3962. IPV6_ADDR_LEN, iovbuf, sizeof(iovbuf));
  3963. if (!iov_len) {
  3964. DHD_ERROR(("%s: Insufficient iovar buffer size %zu \n",
  3965. __FUNCTION__, sizeof(iovbuf)));
  3966. return -1;
  3967. }
  3968. retcode = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, iov_len, TRUE, idx);
  3969. if (retcode)
  3970. DHD_ERROR(("%s: ndo ip addr add failed, retcode = %d\n",
  3971. __FUNCTION__, retcode));
  3972. else
  3973. DHD_TRACE(("%s: ndo ipaddr entry added \n",
  3974. __FUNCTION__));
  3975. return retcode;
  3976. }
  3977. #ifdef REVERSE_AIFSN
  3978. int
  3979. check_reverse_aifsn_condition(dhd_pub_t *dhd, struct net_device *ndev)
  3980. {
  3981. int iov_len = 0;
  3982. char iovbuf[DHD_IOVAR_BUF_SIZE];
  3983. edcf_acparam_t *ac_params = NULL;
  3984. int retcode;
  3985. u8 aci, aifsn;
  3986. int ifidx;
  3987. if (dhd == NULL)
  3988. return -1;
  3989. ifidx = dhd_net2idx(dhd->info, ndev);
  3990. if (ifidx == DHD_BAD_IF)
  3991. return -1;
  3992. dhd->aifsn_reverse = FALSE;
  3993. strcpy(iovbuf, "wme_ac_sta");
  3994. iov_len = sizeof(iovbuf);
  3995. retcode = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iovbuf, iov_len, FALSE, ifidx);
  3996. if (retcode) {
  3997. DHD_ERROR(("%s: could not get wme_ac_sta params(%d)\n\n",
  3998. __FUNCTION__, retcode));
  3999. return -1;
  4000. }
  4001. ac_params = (edcf_acparam_t *)iovbuf;
  4002. for (aci = 0; aci < AC_COUNT; aci++) {
  4003. aifsn = ac_params[aci].ACI & EDCF_AIFSN_MASK;
  4004. if (aci == AC_VI && aifsn == 10) {
  4005. DHD_ERROR(("[%s] Reverse AIFSN for AC_VI:10 \n", __FUNCTION__));
  4006. dhd->aifsn_reverse = TRUE;
  4007. break;
  4008. }
  4009. }
  4010. return 0;
  4011. }
  4012. #endif /* REVERSE_AIFSN */
  4013. /*
  4014. * Neighbor Discover Offload: enable NDO feature
  4015. * Called by ipv6 event handler when interface goes down
  4016. */
  4017. int
  4018. dhd_ndo_remove_ip(dhd_pub_t *dhd, int idx)
  4019. {
  4020. int iov_len = 0;
  4021. char iovbuf[DHD_IOVAR_BUF_SIZE];
  4022. int retcode;
  4023. if (dhd == NULL)
  4024. return -1;
  4025. iov_len = bcm_mkiovar("nd_hostip_clear", NULL,
  4026. 0, iovbuf, sizeof(iovbuf));
  4027. if (!iov_len) {
  4028. DHD_ERROR(("%s: Insufficient iovar buffer size %zu \n",
  4029. __FUNCTION__, sizeof(iovbuf)));
  4030. return -1;
  4031. }
  4032. retcode = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, iov_len, TRUE, idx);
  4033. if (retcode)
  4034. DHD_ERROR(("%s: ndo ip addr remove failed, retcode = %d\n",
  4035. __FUNCTION__, retcode));
  4036. else
  4037. DHD_TRACE(("%s: ndo ipaddr entry removed \n",
  4038. __FUNCTION__));
  4039. return retcode;
  4040. }
  4041. /* Enhanced ND offload */
  4042. uint16
  4043. dhd_ndo_get_version(dhd_pub_t *dhdp)
  4044. {
  4045. char iovbuf[DHD_IOVAR_BUF_SIZE];
  4046. wl_nd_hostip_t ndo_get_ver;
  4047. int iov_len;
  4048. int retcode;
  4049. uint16 ver = 0;
  4050. if (dhdp == NULL) {
  4051. return BCME_ERROR;
  4052. }
  4053. memset(&iovbuf, 0, sizeof(iovbuf));
  4054. ndo_get_ver.version = htod16(WL_ND_HOSTIP_IOV_VER);
  4055. ndo_get_ver.op_type = htod16(WL_ND_HOSTIP_OP_VER);
  4056. ndo_get_ver.length = htod32(WL_ND_HOSTIP_FIXED_LEN + sizeof(uint16));
  4057. ndo_get_ver.u.version = 0;
  4058. iov_len = bcm_mkiovar("nd_hostip", (char *)&ndo_get_ver,
  4059. WL_ND_HOSTIP_FIXED_LEN + sizeof(uint16), iovbuf, sizeof(iovbuf));
  4060. if (!iov_len) {
  4061. DHD_ERROR(("%s: Insufficient iovar buffer size %zu \n",
  4062. __FUNCTION__, sizeof(iovbuf)));
  4063. return BCME_ERROR;
  4064. }
  4065. retcode = dhd_wl_ioctl_cmd(dhdp, WLC_GET_VAR, iovbuf, iov_len, FALSE, 0);
  4066. if (retcode) {
  4067. DHD_ERROR(("%s: failed, retcode = %d\n", __FUNCTION__, retcode));
  4068. /* ver iovar not supported. NDO version is 0 */
  4069. ver = 0;
  4070. } else {
  4071. wl_nd_hostip_t *ndo_ver_ret = (wl_nd_hostip_t *)iovbuf;
  4072. if ((dtoh16(ndo_ver_ret->version) == WL_ND_HOSTIP_IOV_VER) &&
  4073. (dtoh16(ndo_ver_ret->op_type) == WL_ND_HOSTIP_OP_VER) &&
  4074. (dtoh32(ndo_ver_ret->length) == WL_ND_HOSTIP_FIXED_LEN
  4075. + sizeof(uint16))) {
  4076. /* nd_hostip iovar version */
  4077. ver = dtoh16(ndo_ver_ret->u.version);
  4078. }
  4079. DHD_TRACE(("%s: successfully get version: %d\n", __FUNCTION__, ver));
  4080. }
  4081. return ver;
  4082. }
  4083. int
  4084. dhd_ndo_add_ip_with_type(dhd_pub_t *dhdp, char *ipv6addr, uint8 type, int idx)
  4085. {
  4086. char iovbuf[DHD_IOVAR_BUF_SIZE];
  4087. wl_nd_hostip_t ndo_add_addr;
  4088. int iov_len;
  4089. int retcode;
  4090. if (dhdp == NULL || ipv6addr == 0) {
  4091. return BCME_ERROR;
  4092. }
  4093. /* wl_nd_hostip_t fixed param */
  4094. ndo_add_addr.version = htod16(WL_ND_HOSTIP_IOV_VER);
  4095. ndo_add_addr.op_type = htod16(WL_ND_HOSTIP_OP_ADD);
  4096. ndo_add_addr.length = htod32(WL_ND_HOSTIP_WITH_ADDR_LEN);
  4097. /* wl_nd_host_ip_addr_t param for add */
  4098. memcpy(&ndo_add_addr.u.host_ip.ip_addr, ipv6addr, IPV6_ADDR_LEN);
  4099. ndo_add_addr.u.host_ip.type = type;
  4100. iov_len = bcm_mkiovar("nd_hostip", (char *)&ndo_add_addr,
  4101. WL_ND_HOSTIP_WITH_ADDR_LEN, iovbuf, sizeof(iovbuf));
  4102. if (!iov_len) {
  4103. DHD_ERROR(("%s: Insufficient iovar buffer size %zu \n",
  4104. __FUNCTION__, sizeof(iovbuf)));
  4105. return BCME_ERROR;
  4106. }
  4107. retcode = dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR, iovbuf, iov_len, TRUE, idx);
  4108. if (retcode) {
  4109. DHD_ERROR(("%s: failed, retcode = %d\n", __FUNCTION__, retcode));
  4110. #ifdef NDO_CONFIG_SUPPORT
  4111. if (retcode == BCME_NORESOURCE) {
  4112. /* number of host ip addr exceeds FW capacity, Deactivate ND offload */
  4113. DHD_INFO(("%s: Host IP count exceed device capacity,"
  4114. "ND offload deactivated\n", __FUNCTION__));
  4115. dhdp->ndo_host_ip_overflow = TRUE;
  4116. dhd_ndo_enable(dhdp, FALSE);
  4117. }
  4118. #endif /* NDO_CONFIG_SUPPORT */
  4119. } else {
  4120. DHD_TRACE(("%s: successfully added: %d\n", __FUNCTION__, retcode));
  4121. }
  4122. return retcode;
  4123. }
  4124. int
  4125. dhd_ndo_remove_ip_by_addr(dhd_pub_t *dhdp, char *ipv6addr, int idx)
  4126. {
  4127. char iovbuf[DHD_IOVAR_BUF_SIZE];
  4128. wl_nd_hostip_t ndo_del_addr;
  4129. int iov_len;
  4130. int retcode;
  4131. if (dhdp == NULL || ipv6addr == 0) {
  4132. return BCME_ERROR;
  4133. }
  4134. /* wl_nd_hostip_t fixed param */
  4135. ndo_del_addr.version = htod16(WL_ND_HOSTIP_IOV_VER);
  4136. ndo_del_addr.op_type = htod16(WL_ND_HOSTIP_OP_DEL);
  4137. ndo_del_addr.length = htod32(WL_ND_HOSTIP_WITH_ADDR_LEN);
  4138. /* wl_nd_host_ip_addr_t param for del */
  4139. memcpy(&ndo_del_addr.u.host_ip.ip_addr, ipv6addr, IPV6_ADDR_LEN);
  4140. ndo_del_addr.u.host_ip.type = 0; /* don't care */
  4141. iov_len = bcm_mkiovar("nd_hostip", (char *)&ndo_del_addr,
  4142. WL_ND_HOSTIP_WITH_ADDR_LEN, iovbuf, sizeof(iovbuf));
  4143. if (!iov_len) {
  4144. DHD_ERROR(("%s: Insufficient iovar buffer size %zu \n",
  4145. __FUNCTION__, sizeof(iovbuf)));
  4146. return BCME_ERROR;
  4147. }
  4148. retcode = dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR, iovbuf, iov_len, TRUE, idx);
  4149. if (retcode) {
  4150. DHD_ERROR(("%s: failed, retcode = %d\n", __FUNCTION__, retcode));
  4151. } else {
  4152. DHD_TRACE(("%s: successfully removed: %d\n", __FUNCTION__, retcode));
  4153. }
  4154. return retcode;
  4155. }
  4156. int
  4157. dhd_ndo_remove_ip_by_type(dhd_pub_t *dhdp, uint8 type, int idx)
  4158. {
  4159. char iovbuf[DHD_IOVAR_BUF_SIZE];
  4160. wl_nd_hostip_t ndo_del_addr;
  4161. int iov_len;
  4162. int retcode;
  4163. if (dhdp == NULL) {
  4164. return BCME_ERROR;
  4165. }
  4166. /* wl_nd_hostip_t fixed param */
  4167. ndo_del_addr.version = htod16(WL_ND_HOSTIP_IOV_VER);
  4168. if (type == WL_ND_IPV6_ADDR_TYPE_UNICAST) {
  4169. ndo_del_addr.op_type = htod16(WL_ND_HOSTIP_OP_DEL_UC);
  4170. } else if (type == WL_ND_IPV6_ADDR_TYPE_ANYCAST) {
  4171. ndo_del_addr.op_type = htod16(WL_ND_HOSTIP_OP_DEL_AC);
  4172. } else {
  4173. return BCME_BADARG;
  4174. }
  4175. ndo_del_addr.length = htod32(WL_ND_HOSTIP_FIXED_LEN);
  4176. iov_len = bcm_mkiovar("nd_hostip", (char *)&ndo_del_addr, WL_ND_HOSTIP_FIXED_LEN,
  4177. iovbuf, sizeof(iovbuf));
  4178. if (!iov_len) {
  4179. DHD_ERROR(("%s: Insufficient iovar buffer size %zu \n",
  4180. __FUNCTION__, sizeof(iovbuf)));
  4181. return BCME_ERROR;
  4182. }
  4183. retcode = dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR, iovbuf, iov_len, TRUE, idx);
  4184. if (retcode) {
  4185. DHD_ERROR(("%s: failed, retcode = %d\n", __FUNCTION__, retcode));
  4186. } else {
  4187. DHD_TRACE(("%s: successfully removed: %d\n", __FUNCTION__, retcode));
  4188. }
  4189. return retcode;
  4190. }
  4191. int
  4192. dhd_ndo_unsolicited_na_filter_enable(dhd_pub_t *dhdp, int enable)
  4193. {
  4194. char iovbuf[DHD_IOVAR_BUF_SIZE];
  4195. int iov_len;
  4196. int retcode;
  4197. if (dhdp == NULL) {
  4198. return BCME_ERROR;
  4199. }
  4200. iov_len = bcm_mkiovar("nd_unsolicited_na_filter", (char *)&enable, sizeof(int),
  4201. iovbuf, sizeof(iovbuf));
  4202. if (!iov_len) {
  4203. DHD_ERROR(("%s: Insufficient iovar buffer size %zu \n",
  4204. __FUNCTION__, sizeof(iovbuf)));
  4205. return BCME_ERROR;
  4206. }
  4207. retcode = dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR, iovbuf, iov_len, TRUE, 0);
  4208. if (retcode)
  4209. DHD_ERROR(("%s: failed to enable Unsolicited NA filter to %d, retcode = %d\n",
  4210. __FUNCTION__, enable, retcode));
  4211. else {
  4212. DHD_TRACE(("%s: successfully enabled Unsolicited NA filter to %d\n",
  4213. __FUNCTION__, enable));
  4214. }
  4215. return retcode;
  4216. }
  4217. #ifdef SIMPLE_ISCAN
  4218. uint iscan_thread_id = 0;
  4219. iscan_buf_t * iscan_chain = 0;
  4220. iscan_buf_t *
  4221. dhd_iscan_allocate_buf(dhd_pub_t *dhd, iscan_buf_t **iscanbuf)
  4222. {
  4223. iscan_buf_t *iscanbuf_alloc = 0;
  4224. iscan_buf_t *iscanbuf_head;
  4225. DHD_ISCAN(("%s: Entered\n", __FUNCTION__));
  4226. dhd_iscan_lock();
  4227. iscanbuf_alloc = (iscan_buf_t*)MALLOC(dhd->osh, sizeof(iscan_buf_t));
  4228. if (iscanbuf_alloc == NULL)
  4229. goto fail;
  4230. iscanbuf_alloc->next = NULL;
  4231. iscanbuf_head = *iscanbuf;
  4232. DHD_ISCAN(("%s: addr of allocated node = 0x%X"
  4233. "addr of iscanbuf_head = 0x%X dhd = 0x%X\n",
  4234. __FUNCTION__, iscanbuf_alloc, iscanbuf_head, dhd));
  4235. if (iscanbuf_head == NULL) {
  4236. *iscanbuf = iscanbuf_alloc;
  4237. DHD_ISCAN(("%s: Head is allocated\n", __FUNCTION__));
  4238. goto fail;
  4239. }
  4240. while (iscanbuf_head->next)
  4241. iscanbuf_head = iscanbuf_head->next;
  4242. iscanbuf_head->next = iscanbuf_alloc;
  4243. fail:
  4244. dhd_iscan_unlock();
  4245. return iscanbuf_alloc;
  4246. }
  4247. void
  4248. dhd_iscan_free_buf(void *dhdp, iscan_buf_t *iscan_delete)
  4249. {
  4250. iscan_buf_t *iscanbuf_free = 0;
  4251. iscan_buf_t *iscanbuf_prv = 0;
  4252. iscan_buf_t *iscanbuf_cur;
  4253. dhd_pub_t *dhd = dhd_bus_pub(dhdp);
  4254. DHD_ISCAN(("%s: Entered\n", __FUNCTION__));
  4255. dhd_iscan_lock();
  4256. iscanbuf_cur = iscan_chain;
  4257. /* If iscan_delete is null then delete the entire
  4258. * chain or else delete specific one provided
  4259. */
  4260. if (!iscan_delete) {
  4261. while (iscanbuf_cur) {
  4262. iscanbuf_free = iscanbuf_cur;
  4263. iscanbuf_cur = iscanbuf_cur->next;
  4264. iscanbuf_free->next = 0;
  4265. MFREE(dhd->osh, iscanbuf_free, sizeof(iscan_buf_t));
  4266. }
  4267. iscan_chain = 0;
  4268. } else {
  4269. while (iscanbuf_cur) {
  4270. if (iscanbuf_cur == iscan_delete)
  4271. break;
  4272. iscanbuf_prv = iscanbuf_cur;
  4273. iscanbuf_cur = iscanbuf_cur->next;
  4274. }
  4275. if (iscanbuf_prv)
  4276. iscanbuf_prv->next = iscan_delete->next;
  4277. iscan_delete->next = 0;
  4278. MFREE(dhd->osh, iscan_delete, sizeof(iscan_buf_t));
  4279. if (!iscanbuf_prv)
  4280. iscan_chain = 0;
  4281. }
  4282. dhd_iscan_unlock();
  4283. }
  4284. iscan_buf_t *
  4285. dhd_iscan_result_buf(void)
  4286. {
  4287. return iscan_chain;
  4288. }
  4289. int
  4290. dhd_iscan_issue_request(void * dhdp, wl_iscan_params_t *pParams, uint32 size)
  4291. {
  4292. int rc = -1;
  4293. dhd_pub_t *dhd = dhd_bus_pub(dhdp);
  4294. char *buf;
  4295. char iovar[] = "iscan";
  4296. uint32 allocSize = 0;
  4297. wl_ioctl_t ioctl;
  4298. int len;
  4299. if (pParams) {
  4300. allocSize = (size + strlen(iovar) + 1);
  4301. if ((allocSize < size) || (allocSize < strlen(iovar)))
  4302. {
  4303. DHD_ERROR(("%s: overflow - allocation size too large %d < %d + %d!\n",
  4304. __FUNCTION__, allocSize, size, strlen(iovar)));
  4305. goto cleanUp;
  4306. }
  4307. buf = MALLOC(dhd->osh, allocSize);
  4308. if (buf == NULL)
  4309. {
  4310. DHD_ERROR(("%s: malloc of size %d failed!\n", __FUNCTION__, allocSize));
  4311. goto cleanUp;
  4312. }
  4313. ioctl.cmd = WLC_SET_VAR;
  4314. len = bcm_mkiovar(iovar, (char *)pParams, size, buf, allocSize);
  4315. if (len == 0) {
  4316. rc = BCME_BUFTOOSHORT;
  4317. goto cleanUp;
  4318. }
  4319. rc = dhd_wl_ioctl(dhd, 0, &ioctl, buf, len);
  4320. }
  4321. cleanUp:
  4322. if (buf) {
  4323. MFREE(dhd->osh, buf, allocSize);
  4324. }
  4325. return rc;
  4326. }
  4327. static int
  4328. dhd_iscan_get_partial_result(void *dhdp, uint *scan_count)
  4329. {
  4330. wl_iscan_results_t *list_buf;
  4331. wl_iscan_results_t list;
  4332. wl_scan_results_t *results;
  4333. iscan_buf_t *iscan_cur;
  4334. int status = -1;
  4335. dhd_pub_t *dhd = dhd_bus_pub(dhdp);
  4336. int rc;
  4337. wl_ioctl_t ioctl;
  4338. int len;
  4339. DHD_ISCAN(("%s: Enter\n", __FUNCTION__));
  4340. iscan_cur = dhd_iscan_allocate_buf(dhd, &iscan_chain);
  4341. if (!iscan_cur) {
  4342. DHD_ERROR(("%s: Failed to allocate node\n", __FUNCTION__));
  4343. dhd_iscan_free_buf(dhdp, 0);
  4344. dhd_iscan_request(dhdp, WL_SCAN_ACTION_ABORT);
  4345. dhd_ind_scan_confirm(dhdp, FALSE);
  4346. goto fail;
  4347. }
  4348. dhd_iscan_lock();
  4349. memset(iscan_cur->iscan_buf, 0, WLC_IW_ISCAN_MAXLEN);
  4350. list_buf = (wl_iscan_results_t*)iscan_cur->iscan_buf;
  4351. results = &list_buf->results;
  4352. results->buflen = WL_ISCAN_RESULTS_FIXED_SIZE;
  4353. results->version = 0;
  4354. results->count = 0;
  4355. memset(&list, 0, sizeof(list));
  4356. list.results.buflen = htod32(WLC_IW_ISCAN_MAXLEN);
  4357. len = bcm_mkiovar("iscanresults", (char *)&list, WL_ISCAN_RESULTS_FIXED_SIZE,
  4358. iscan_cur->iscan_buf, WLC_IW_ISCAN_MAXLEN);
  4359. if (len == 0) {
  4360. dhd_iscan_free_buf(dhdp, 0);
  4361. dhd_iscan_request(dhdp, WL_SCAN_ACTION_ABORT);
  4362. dhd_ind_scan_confirm(dhdp, FALSE);
  4363. status = BCME_BUFTOOSHORT;
  4364. goto fail;
  4365. }
  4366. ioctl.cmd = WLC_GET_VAR;
  4367. ioctl.set = FALSE;
  4368. rc = dhd_wl_ioctl(dhd, 0, &ioctl, iscan_cur->iscan_buf, WLC_IW_ISCAN_MAXLEN);
  4369. results->buflen = dtoh32(results->buflen);
  4370. results->version = dtoh32(results->version);
  4371. *scan_count = results->count = dtoh32(results->count);
  4372. status = dtoh32(list_buf->status);
  4373. DHD_ISCAN(("%s: Got %d resuls status = (%x)\n", __FUNCTION__, results->count, status));
  4374. dhd_iscan_unlock();
  4375. if (!(*scan_count)) {
  4376. /* TODO: race condition when FLUSH already called */
  4377. dhd_iscan_free_buf(dhdp, 0);
  4378. }
  4379. fail:
  4380. return status;
  4381. }
  4382. #endif /* SIMPLE_ISCAN */
  4383. /*
  4384. * returns = TRUE if associated, FALSE if not associated
  4385. */
  4386. bool dhd_is_associated(dhd_pub_t *dhd, uint8 ifidx, int *retval)
  4387. {
  4388. char bssid[6], zbuf[6];
  4389. int ret = -1;
  4390. bzero(bssid, 6);
  4391. bzero(zbuf, 6);
  4392. ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_BSSID, (char *)&bssid,
  4393. ETHER_ADDR_LEN, FALSE, ifidx);
  4394. DHD_TRACE((" %s WLC_GET_BSSID ioctl res = %d\n", __FUNCTION__, ret));
  4395. if (ret == BCME_NOTASSOCIATED) {
  4396. DHD_TRACE(("%s: not associated! res:%d\n", __FUNCTION__, ret));
  4397. }
  4398. if (retval)
  4399. *retval = ret;
  4400. if (ret < 0)
  4401. return FALSE;
  4402. if ((memcmp(bssid, zbuf, ETHER_ADDR_LEN) == 0)) {
  4403. DHD_TRACE(("%s: WLC_GET_BSSID ioctl returned zero bssid\n", __FUNCTION__));
  4404. return FALSE;
  4405. }
  4406. return TRUE;
  4407. }
  4408. /* Function to estimate possible DTIM_SKIP value */
  4409. #if defined(OEM_ANDROID) && defined(BCMPCIE)
  4410. int
  4411. dhd_get_suspend_bcn_li_dtim(dhd_pub_t *dhd, int *dtim_period, int *bcn_interval)
  4412. {
  4413. int bcn_li_dtim = 1; /* deafult no dtim skip setting */
  4414. int ret = -1;
  4415. int allowed_skip_dtim_cnt = 0;
  4416. if (dhd->disable_dtim_in_suspend) {
  4417. DHD_ERROR(("%s Disable bcn_li_dtim in suspend\n", __FUNCTION__));
  4418. bcn_li_dtim = 0;
  4419. return bcn_li_dtim;
  4420. }
  4421. /* Check if associated */
  4422. if (dhd_is_associated(dhd, 0, NULL) == FALSE) {
  4423. DHD_TRACE(("%s NOT assoc ret %d\n", __FUNCTION__, ret));
  4424. return bcn_li_dtim;
  4425. }
  4426. if (dtim_period == NULL || bcn_interval == NULL)
  4427. return bcn_li_dtim;
  4428. /* read associated AP beacon interval */
  4429. if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_BCNPRD,
  4430. bcn_interval, sizeof(*bcn_interval), FALSE, 0)) < 0) {
  4431. DHD_ERROR(("%s get beacon failed code %d\n", __FUNCTION__, ret));
  4432. return bcn_li_dtim;
  4433. }
  4434. /* read associated AP dtim setup */
  4435. if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_DTIMPRD,
  4436. dtim_period, sizeof(*dtim_period), FALSE, 0)) < 0) {
  4437. DHD_ERROR(("%s failed code %d\n", __FUNCTION__, ret));
  4438. return bcn_li_dtim;
  4439. }
  4440. /* if not assocated just return */
  4441. if (*dtim_period == 0) {
  4442. return bcn_li_dtim;
  4443. }
  4444. if (dhd->max_dtim_enable) {
  4445. bcn_li_dtim =
  4446. (int) (MAX_DTIM_ALLOWED_INTERVAL / ((*dtim_period) * (*bcn_interval)));
  4447. if (bcn_li_dtim == 0) {
  4448. bcn_li_dtim = 1;
  4449. }
  4450. } else {
  4451. /* attemp to use platform defined dtim skip interval */
  4452. bcn_li_dtim = dhd->suspend_bcn_li_dtim;
  4453. /* check if sta listen interval fits into AP dtim */
  4454. if (*dtim_period > CUSTOM_LISTEN_INTERVAL) {
  4455. /* AP DTIM to big for our Listen Interval : no dtim skiping */
  4456. bcn_li_dtim = NO_DTIM_SKIP;
  4457. DHD_ERROR(("%s DTIM=%d > Listen=%d : too big ...\n",
  4458. __FUNCTION__, *dtim_period, CUSTOM_LISTEN_INTERVAL));
  4459. return bcn_li_dtim;
  4460. }
  4461. if (((*dtim_period) * (*bcn_interval) * bcn_li_dtim) > MAX_DTIM_ALLOWED_INTERVAL) {
  4462. allowed_skip_dtim_cnt =
  4463. MAX_DTIM_ALLOWED_INTERVAL / ((*dtim_period) * (*bcn_interval));
  4464. bcn_li_dtim =
  4465. (allowed_skip_dtim_cnt != 0) ? allowed_skip_dtim_cnt : NO_DTIM_SKIP;
  4466. }
  4467. if ((bcn_li_dtim * (*dtim_period)) > CUSTOM_LISTEN_INTERVAL) {
  4468. /* Round up dtim_skip to fit into STAs Listen Interval */
  4469. bcn_li_dtim = (int)(CUSTOM_LISTEN_INTERVAL / *dtim_period);
  4470. DHD_TRACE(("%s agjust dtim_skip as %d\n", __FUNCTION__, bcn_li_dtim));
  4471. }
  4472. }
  4473. DHD_ERROR(("%s beacon=%d bcn_li_dtim=%d DTIM=%d Listen=%d\n",
  4474. __FUNCTION__, *bcn_interval, bcn_li_dtim, *dtim_period, CUSTOM_LISTEN_INTERVAL));
  4475. return bcn_li_dtim;
  4476. }
  4477. #else /* OEM_ANDROID && BCMPCIE */
  4478. int
  4479. dhd_get_suspend_bcn_li_dtim(dhd_pub_t *dhd)
  4480. {
  4481. int bcn_li_dtim = 1; /* deafult no dtim skip setting */
  4482. int ret = -1;
  4483. int dtim_period = 0;
  4484. int ap_beacon = 0;
  4485. int allowed_skip_dtim_cnt = 0;
  4486. if (dhd->disable_dtim_in_suspend) {
  4487. DHD_ERROR(("%s Disable bcn_li_dtim in suspend\n", __FUNCTION__));
  4488. bcn_li_dtim = 0;
  4489. goto exit;
  4490. }
  4491. /* Check if associated */
  4492. if (dhd_is_associated(dhd, 0, NULL) == FALSE) {
  4493. DHD_TRACE(("%s NOT assoc ret %d\n", __FUNCTION__, ret));
  4494. goto exit;
  4495. }
  4496. /* read associated AP beacon interval */
  4497. if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_BCNPRD,
  4498. &ap_beacon, sizeof(ap_beacon), FALSE, 0)) < 0) {
  4499. DHD_ERROR(("%s get beacon failed code %d\n", __FUNCTION__, ret));
  4500. goto exit;
  4501. }
  4502. /* read associated ap's dtim setup */
  4503. if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_DTIMPRD,
  4504. &dtim_period, sizeof(dtim_period), FALSE, 0)) < 0) {
  4505. DHD_ERROR(("%s failed code %d\n", __FUNCTION__, ret));
  4506. goto exit;
  4507. }
  4508. /* if not assocated just exit */
  4509. if (dtim_period == 0) {
  4510. goto exit;
  4511. }
  4512. if (dhd->max_dtim_enable) {
  4513. bcn_li_dtim =
  4514. (int) (MAX_DTIM_ALLOWED_INTERVAL / (ap_beacon * dtim_period));
  4515. if (bcn_li_dtim == 0) {
  4516. bcn_li_dtim = 1;
  4517. }
  4518. } else {
  4519. /* attemp to use platform defined dtim skip interval */
  4520. bcn_li_dtim = dhd->suspend_bcn_li_dtim;
  4521. /* check if sta listen interval fits into AP dtim */
  4522. if (dtim_period > CUSTOM_LISTEN_INTERVAL) {
  4523. /* AP DTIM to big for our Listen Interval : no dtim skiping */
  4524. bcn_li_dtim = NO_DTIM_SKIP;
  4525. DHD_ERROR(("%s DTIM=%d > Listen=%d : too big ...\n",
  4526. __FUNCTION__, dtim_period, CUSTOM_LISTEN_INTERVAL));
  4527. goto exit;
  4528. }
  4529. if ((dtim_period * ap_beacon * bcn_li_dtim) > MAX_DTIM_ALLOWED_INTERVAL) {
  4530. allowed_skip_dtim_cnt =
  4531. MAX_DTIM_ALLOWED_INTERVAL / (dtim_period * ap_beacon);
  4532. bcn_li_dtim =
  4533. (allowed_skip_dtim_cnt != 0) ? allowed_skip_dtim_cnt : NO_DTIM_SKIP;
  4534. }
  4535. if ((bcn_li_dtim * dtim_period) > CUSTOM_LISTEN_INTERVAL) {
  4536. /* Round up dtim_skip to fit into STAs Listen Interval */
  4537. bcn_li_dtim = (int)(CUSTOM_LISTEN_INTERVAL / dtim_period);
  4538. DHD_TRACE(("%s agjust dtim_skip as %d\n", __FUNCTION__, bcn_li_dtim));
  4539. }
  4540. }
  4541. DHD_ERROR(("%s beacon=%d bcn_li_dtim=%d DTIM=%d Listen=%d\n",
  4542. __FUNCTION__, ap_beacon, bcn_li_dtim, dtim_period, CUSTOM_LISTEN_INTERVAL));
  4543. exit:
  4544. return bcn_li_dtim;
  4545. }
  4546. #endif /* OEM_ANDROID && BCMPCIE */
  4547. #ifdef CONFIG_SILENT_ROAM
  4548. int
  4549. dhd_sroam_set_mon(dhd_pub_t *dhd, bool set)
  4550. {
  4551. int ret = BCME_OK;
  4552. wlc_sroam_t *psroam;
  4553. wlc_sroam_info_t *sroam;
  4554. uint sroamlen = sizeof(*sroam) + SROAM_HDRLEN;
  4555. /* Check if associated */
  4556. if (dhd_is_associated(dhd, 0, NULL) == FALSE) {
  4557. DHD_TRACE(("%s NOT assoc\n", __FUNCTION__));
  4558. return ret;
  4559. }
  4560. if (set && (dhd->op_mode &
  4561. (DHD_FLAG_HOSTAP_MODE | DHD_FLAG_P2P_GC_MODE | DHD_FLAG_P2P_GO_MODE))) {
  4562. DHD_INFO((" Failed to set sroam %d, op_mode 0x%04x\n", set, dhd->op_mode));
  4563. return ret;
  4564. }
  4565. if (!dhd->sroam_turn_on) {
  4566. DHD_INFO((" Failed to set sroam %d, sroam turn %d\n", set, dhd->sroam_turn_on));
  4567. return ret;
  4568. }
  4569. psroam = (wlc_sroam_t *)MALLOCZ(dhd->osh, sroamlen);
  4570. if (!psroam) {
  4571. DHD_ERROR(("%s Fail to malloc buffer\n", __FUNCTION__));
  4572. return BCME_NOMEM;
  4573. }
  4574. ret = dhd_iovar(dhd, 0, "sroam", NULL, 0, (char *)psroam, sroamlen, FALSE);
  4575. if (ret < 0) {
  4576. DHD_ERROR(("%s Failed to Get sroam %d\n", __FUNCTION__, ret));
  4577. goto done;
  4578. }
  4579. if (psroam->ver != WLC_SILENT_ROAM_CUR_VER) {
  4580. ret = BCME_VERSION;
  4581. goto done;
  4582. }
  4583. sroam = (wlc_sroam_info_t *)psroam->data;
  4584. sroam->sroam_on = set;
  4585. DHD_INFO((" Silent roam monitor mode %s\n", set ? "On" : "Off"));
  4586. ret = dhd_iovar(dhd, 0, "sroam", (char *)psroam, sroamlen, NULL, 0, TRUE);
  4587. if (ret < 0) {
  4588. DHD_ERROR(("%s Failed to Set sroam %d\n", __FUNCTION__, ret));
  4589. }
  4590. done:
  4591. if (psroam) {
  4592. MFREE(dhd->osh, psroam, sroamlen);
  4593. }
  4594. return ret;
  4595. }
  4596. #endif /* CONFIG_SILENT_ROAM */
  4597. /* Check if the mode supports STA MODE */
  4598. bool dhd_support_sta_mode(dhd_pub_t *dhd)
  4599. {
  4600. #ifdef WL_CFG80211
  4601. if (!(dhd->op_mode & DHD_FLAG_STA_MODE))
  4602. return FALSE;
  4603. else
  4604. #endif /* WL_CFG80211 */
  4605. return TRUE;
  4606. }
  4607. #if defined(KEEP_ALIVE)
  4608. int dhd_keep_alive_onoff(dhd_pub_t *dhd)
  4609. {
  4610. char buf[32] = {0};
  4611. const char *str;
  4612. wl_mkeep_alive_pkt_t mkeep_alive_pkt = {0, 0, 0, 0, 0, {0}};
  4613. wl_mkeep_alive_pkt_t *mkeep_alive_pktp;
  4614. int buf_len;
  4615. int str_len;
  4616. int res = -1;
  4617. if (!dhd_support_sta_mode(dhd))
  4618. return res;
  4619. DHD_TRACE(("%s execution\n", __FUNCTION__));
  4620. str = "mkeep_alive";
  4621. str_len = strlen(str);
  4622. strncpy(buf, str, sizeof(buf) - 1);
  4623. buf[ sizeof(buf) - 1 ] = '\0';
  4624. mkeep_alive_pktp = (wl_mkeep_alive_pkt_t *) (buf + str_len + 1);
  4625. mkeep_alive_pkt.period_msec = CUSTOM_KEEP_ALIVE_SETTING;
  4626. buf_len = str_len + 1;
  4627. mkeep_alive_pkt.version = htod16(WL_MKEEP_ALIVE_VERSION);
  4628. mkeep_alive_pkt.length = htod16(WL_MKEEP_ALIVE_FIXED_LEN);
  4629. /* Setup keep alive zero for null packet generation */
  4630. mkeep_alive_pkt.keep_alive_id = 0;
  4631. mkeep_alive_pkt.len_bytes = 0;
  4632. buf_len += WL_MKEEP_ALIVE_FIXED_LEN;
  4633. bzero(mkeep_alive_pkt.data, sizeof(mkeep_alive_pkt.data));
  4634. /* Keep-alive attributes are set in local variable (mkeep_alive_pkt), and
  4635. * then memcpy'ed into buffer (mkeep_alive_pktp) since there is no
  4636. * guarantee that the buffer is properly aligned.
  4637. */
  4638. memcpy((char *)mkeep_alive_pktp, &mkeep_alive_pkt, WL_MKEEP_ALIVE_FIXED_LEN);
  4639. res = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, buf_len, TRUE, 0);
  4640. return res;
  4641. }
  4642. #endif /* defined(KEEP_ALIVE) */
  4643. #if defined(OEM_ANDROID)
  4644. #define CSCAN_TLV_TYPE_SSID_IE 'S'
  4645. /*
  4646. * SSIDs list parsing from cscan tlv list
  4647. */
  4648. int
  4649. wl_parse_ssid_list_tlv(char** list_str, wlc_ssid_ext_t* ssid, int max, int *bytes_left)
  4650. {
  4651. char* str;
  4652. int idx = 0;
  4653. uint8 len;
  4654. if ((list_str == NULL) || (*list_str == NULL) || (*bytes_left < 0)) {
  4655. DHD_ERROR(("%s error paramters\n", __FUNCTION__));
  4656. return BCME_BADARG;
  4657. }
  4658. str = *list_str;
  4659. while (*bytes_left > 0) {
  4660. if (str[0] != CSCAN_TLV_TYPE_SSID_IE) {
  4661. *list_str = str;
  4662. DHD_TRACE(("nssid=%d left_parse=%d %d\n", idx, *bytes_left, str[0]));
  4663. return idx;
  4664. }
  4665. if (idx >= max) {
  4666. DHD_ERROR(("%s number of SSIDs more than %d\n", __FUNCTION__, idx));
  4667. return BCME_BADARG;
  4668. }
  4669. /* Get proper CSCAN_TLV_TYPE_SSID_IE */
  4670. *bytes_left -= 1;
  4671. if (*bytes_left == 0) {
  4672. DHD_ERROR(("%s no length field.\n", __FUNCTION__));
  4673. return BCME_BADARG;
  4674. }
  4675. str += 1;
  4676. ssid[idx].rssi_thresh = 0;
  4677. ssid[idx].flags = 0;
  4678. len = str[0];
  4679. if (len == 0) {
  4680. /* Broadcast SSID */
  4681. ssid[idx].SSID_len = 0;
  4682. memset((char*)ssid[idx].SSID, 0x0, DOT11_MAX_SSID_LEN);
  4683. *bytes_left -= 1;
  4684. str += 1;
  4685. DHD_TRACE(("BROADCAST SCAN left=%d\n", *bytes_left));
  4686. } else if (len <= DOT11_MAX_SSID_LEN) {
  4687. /* Get proper SSID size */
  4688. ssid[idx].SSID_len = len;
  4689. *bytes_left -= 1;
  4690. /* Get SSID */
  4691. if (ssid[idx].SSID_len > *bytes_left) {
  4692. DHD_ERROR(("%s out of memory range len=%d but left=%d\n",
  4693. __FUNCTION__, ssid[idx].SSID_len, *bytes_left));
  4694. return BCME_BADARG;
  4695. }
  4696. str += 1;
  4697. memcpy((char*)ssid[idx].SSID, str, ssid[idx].SSID_len);
  4698. *bytes_left -= ssid[idx].SSID_len;
  4699. str += ssid[idx].SSID_len;
  4700. ssid[idx].hidden = TRUE;
  4701. DHD_TRACE(("%s :size=%d left=%d\n",
  4702. (char*)ssid[idx].SSID, ssid[idx].SSID_len, *bytes_left));
  4703. } else {
  4704. DHD_ERROR(("### SSID size more than %d\n", str[0]));
  4705. return BCME_BADARG;
  4706. }
  4707. idx++;
  4708. }
  4709. *list_str = str;
  4710. return idx;
  4711. }
  4712. /* Android ComboSCAN support */
  4713. /*
  4714. * data parsing from ComboScan tlv list
  4715. */
  4716. int
  4717. wl_iw_parse_data_tlv(char** list_str, void *dst, int dst_size, const char token,
  4718. int input_size, int *bytes_left)
  4719. {
  4720. char* str;
  4721. uint16 short_temp;
  4722. uint32 int_temp;
  4723. if ((list_str == NULL) || (*list_str == NULL) ||(bytes_left == NULL) || (*bytes_left < 0)) {
  4724. DHD_ERROR(("%s error paramters\n", __FUNCTION__));
  4725. return -1;
  4726. }
  4727. str = *list_str;
  4728. /* Clean all dest bytes */
  4729. memset(dst, 0, dst_size);
  4730. if (*bytes_left > 0) {
  4731. if (str[0] != token) {
  4732. DHD_TRACE(("%s NOT Type=%d get=%d left_parse=%d \n",
  4733. __FUNCTION__, token, str[0], *bytes_left));
  4734. return -1;
  4735. }
  4736. *bytes_left -= 1;
  4737. str += 1;
  4738. if (input_size == 1) {
  4739. memcpy(dst, str, input_size);
  4740. }
  4741. else if (input_size == 2) {
  4742. memcpy(dst, (char *)htod16(memcpy(&short_temp, str, input_size)),
  4743. input_size);
  4744. }
  4745. else if (input_size == 4) {
  4746. memcpy(dst, (char *)htod32(memcpy(&int_temp, str, input_size)),
  4747. input_size);
  4748. }
  4749. *bytes_left -= input_size;
  4750. str += input_size;
  4751. *list_str = str;
  4752. return 1;
  4753. }
  4754. return 1;
  4755. }
  4756. /*
  4757. * channel list parsing from cscan tlv list
  4758. */
  4759. int
  4760. wl_iw_parse_channel_list_tlv(char** list_str, uint16* channel_list,
  4761. int channel_num, int *bytes_left)
  4762. {
  4763. char* str;
  4764. int idx = 0;
  4765. if ((list_str == NULL) || (*list_str == NULL) ||(bytes_left == NULL) || (*bytes_left < 0)) {
  4766. DHD_ERROR(("%s error paramters\n", __FUNCTION__));
  4767. return -1;
  4768. }
  4769. str = *list_str;
  4770. while (*bytes_left > 0) {
  4771. if (str[0] != CSCAN_TLV_TYPE_CHANNEL_IE) {
  4772. *list_str = str;
  4773. DHD_TRACE(("End channel=%d left_parse=%d %d\n", idx, *bytes_left, str[0]));
  4774. return idx;
  4775. }
  4776. /* Get proper CSCAN_TLV_TYPE_CHANNEL_IE */
  4777. *bytes_left -= 1;
  4778. str += 1;
  4779. if (str[0] == 0) {
  4780. /* All channels */
  4781. channel_list[idx] = 0x0;
  4782. }
  4783. else {
  4784. channel_list[idx] = (uint16)str[0];
  4785. DHD_TRACE(("%s channel=%d \n", __FUNCTION__, channel_list[idx]));
  4786. }
  4787. *bytes_left -= 1;
  4788. str += 1;
  4789. if (idx++ > 255) {
  4790. DHD_ERROR(("%s Too many channels \n", __FUNCTION__));
  4791. return -1;
  4792. }
  4793. }
  4794. *list_str = str;
  4795. return idx;
  4796. }
  4797. /* Parse a comma-separated list from list_str into ssid array, starting
  4798. * at index idx. Max specifies size of the ssid array. Parses ssids
  4799. * and returns updated idx; if idx >= max not all fit, the excess have
  4800. * not been copied. Returns -1 on empty string, or on ssid too long.
  4801. */
  4802. int
  4803. wl_iw_parse_ssid_list(char** list_str, wlc_ssid_t* ssid, int idx, int max)
  4804. {
  4805. char* str, *ptr;
  4806. if ((list_str == NULL) || (*list_str == NULL))
  4807. return -1;
  4808. for (str = *list_str; str != NULL; str = ptr) {
  4809. /* check for next TAG */
  4810. if (!strncmp(str, GET_CHANNEL, strlen(GET_CHANNEL))) {
  4811. *list_str = str + strlen(GET_CHANNEL);
  4812. return idx;
  4813. }
  4814. if ((ptr = strchr(str, ',')) != NULL) {
  4815. *ptr++ = '\0';
  4816. }
  4817. if (strlen(str) > DOT11_MAX_SSID_LEN) {
  4818. DHD_ERROR(("ssid <%s> exceeds %d\n", str, DOT11_MAX_SSID_LEN));
  4819. return -1;
  4820. }
  4821. if (strlen(str) == 0)
  4822. ssid[idx].SSID_len = 0;
  4823. if (idx < max) {
  4824. bzero(ssid[idx].SSID, sizeof(ssid[idx].SSID));
  4825. strncpy((char*)ssid[idx].SSID, str, sizeof(ssid[idx].SSID) - 1);
  4826. ssid[idx].SSID_len = strlen(str);
  4827. }
  4828. idx++;
  4829. }
  4830. return idx;
  4831. }
  4832. /*
  4833. * Parse channel list from iwpriv CSCAN
  4834. */
  4835. int
  4836. wl_iw_parse_channel_list(char** list_str, uint16* channel_list, int channel_num)
  4837. {
  4838. int num;
  4839. int val;
  4840. char* str;
  4841. char* endptr = NULL;
  4842. if ((list_str == NULL)||(*list_str == NULL))
  4843. return -1;
  4844. str = *list_str;
  4845. num = 0;
  4846. while (strncmp(str, GET_NPROBE, strlen(GET_NPROBE))) {
  4847. val = (int)strtoul(str, &endptr, 0);
  4848. if (endptr == str) {
  4849. printf("could not parse channel number starting at"
  4850. " substring \"%s\" in list:\n%s\n",
  4851. str, *list_str);
  4852. return -1;
  4853. }
  4854. str = endptr + strspn(endptr, " ,");
  4855. if (num == channel_num) {
  4856. DHD_ERROR(("too many channels (more than %d) in channel list:\n%s\n",
  4857. channel_num, *list_str));
  4858. return -1;
  4859. }
  4860. channel_list[num++] = (uint16)val;
  4861. }
  4862. *list_str = str;
  4863. return num;
  4864. }
  4865. #endif /* defined(OEM_ANDROID) */
  4866. /* Given filename and download type, returns a buffer pointer and length
  4867. * for download to f/w. Type can be FW or NVRAM.
  4868. *
  4869. */
  4870. int dhd_get_download_buffer(dhd_pub_t *dhd, char *file_path, download_type_t component,
  4871. char ** buffer, int *length)
  4872. {
  4873. int ret = BCME_ERROR;
  4874. int len = 0;
  4875. int file_len;
  4876. void *image = NULL;
  4877. uint8 *buf = NULL;
  4878. /* Point to cache if available. */
  4879. /* No Valid cache found on this call */
  4880. if (!len) {
  4881. file_len = *length;
  4882. *length = 0;
  4883. if (file_path) {
  4884. image = dhd_os_open_image1(dhd, file_path);
  4885. if (image == NULL) {
  4886. goto err;
  4887. }
  4888. }
  4889. buf = MALLOCZ(dhd->osh, file_len);
  4890. if (buf == NULL) {
  4891. DHD_ERROR(("%s: Failed to allocate memory %d bytes\n",
  4892. __FUNCTION__, file_len));
  4893. goto err;
  4894. }
  4895. /* Download image */
  4896. len = dhd_os_get_image_block((char *)buf, file_len, image);
  4897. if ((len <= 0 || len > file_len)) {
  4898. MFREE(dhd->osh, buf, file_len);
  4899. goto err;
  4900. }
  4901. }
  4902. ret = BCME_OK;
  4903. *length = len;
  4904. *buffer = (char *)buf;
  4905. /* Cache if first call. */
  4906. err:
  4907. if (image)
  4908. dhd_os_close_image1(dhd, image);
  4909. return ret;
  4910. }
  4911. int
  4912. dhd_download_2_dongle(dhd_pub_t *dhd, char *iovar, uint16 flag, uint16 dload_type,
  4913. unsigned char *dload_buf, int len)
  4914. {
  4915. struct wl_dload_data *dload_ptr = (struct wl_dload_data *)dload_buf;
  4916. int err = 0;
  4917. int dload_data_offset;
  4918. static char iovar_buf[WLC_IOCTL_MEDLEN];
  4919. int iovar_len;
  4920. memset(iovar_buf, 0, sizeof(iovar_buf));
  4921. dload_data_offset = OFFSETOF(wl_dload_data_t, data);
  4922. dload_ptr->flag = (DLOAD_HANDLER_VER << DLOAD_FLAG_VER_SHIFT) | flag;
  4923. dload_ptr->dload_type = dload_type;
  4924. dload_ptr->len = htod32(len - dload_data_offset);
  4925. dload_ptr->crc = 0;
  4926. len = ROUNDUP(len, 8);
  4927. iovar_len = bcm_mkiovar(iovar, (char *)dload_buf,
  4928. (uint)len, iovar_buf, sizeof(iovar_buf));
  4929. if (iovar_len == 0) {
  4930. DHD_ERROR(("%s: insufficient buffer space passed to bcm_mkiovar for '%s' \n",
  4931. __FUNCTION__, iovar));
  4932. return BCME_BUFTOOSHORT;
  4933. }
  4934. err = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovar_buf,
  4935. iovar_len, IOV_SET, 0);
  4936. return err;
  4937. }
  4938. int
  4939. dhd_download_blob(dhd_pub_t *dhd, unsigned char *buf,
  4940. uint32 len, char *iovar)
  4941. {
  4942. int chunk_len;
  4943. int size2alloc;
  4944. unsigned char *new_buf;
  4945. int err = 0, data_offset;
  4946. uint16 dl_flag = DL_BEGIN;
  4947. data_offset = OFFSETOF(wl_dload_data_t, data);
  4948. size2alloc = data_offset + MAX_CHUNK_LEN;
  4949. size2alloc = ROUNDUP(size2alloc, 8);
  4950. if ((new_buf = (unsigned char *)MALLOCZ(dhd->osh, size2alloc)) != NULL) {
  4951. do {
  4952. chunk_len = dhd_os_get_image_block((char *)(new_buf + data_offset),
  4953. MAX_CHUNK_LEN, buf);
  4954. if (chunk_len < 0) {
  4955. DHD_ERROR(("%s: dhd_os_get_image_block failed (%d)\n",
  4956. __FUNCTION__, chunk_len));
  4957. err = BCME_ERROR;
  4958. goto exit;
  4959. }
  4960. if (len - chunk_len == 0)
  4961. dl_flag |= DL_END;
  4962. err = dhd_download_2_dongle(dhd, iovar, dl_flag, DL_TYPE_CLM,
  4963. new_buf, data_offset + chunk_len);
  4964. dl_flag &= ~DL_BEGIN;
  4965. len = len - chunk_len;
  4966. } while ((len > 0) && (err == 0));
  4967. } else {
  4968. err = BCME_NOMEM;
  4969. }
  4970. exit:
  4971. if (new_buf) {
  4972. MFREE(dhd->osh, new_buf, size2alloc);
  4973. }
  4974. return err;
  4975. }
  4976. int
  4977. dhd_apply_default_txcap(dhd_pub_t *dhd, char *path)
  4978. {
  4979. return 0;
  4980. }
  4981. int
  4982. dhd_check_current_clm_data(dhd_pub_t *dhd)
  4983. {
  4984. char iovbuf[WLC_IOCTL_SMLEN];
  4985. wl_country_t *cspec;
  4986. int err = BCME_OK;
  4987. memset(iovbuf, 0, sizeof(iovbuf));
  4988. err = bcm_mkiovar("country", NULL, 0, iovbuf, sizeof(iovbuf));
  4989. if (err == 0) {
  4990. err = BCME_BUFTOOSHORT;
  4991. DHD_ERROR(("%s: bcm_mkiovar failed.", __FUNCTION__));
  4992. return err;
  4993. }
  4994. err = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iovbuf, sizeof(iovbuf), FALSE, 0);
  4995. if (err) {
  4996. DHD_ERROR(("%s: country code get failed\n", __FUNCTION__));
  4997. return err;
  4998. }
  4999. cspec = (wl_country_t *)iovbuf;
  5000. if ((strncmp(cspec->ccode, WL_CCODE_NULL_COUNTRY, WLC_CNTRY_BUF_SZ)) == 0) {
  5001. DHD_ERROR(("%s: ----- This FW is not included CLM data -----\n",
  5002. __FUNCTION__));
  5003. return FALSE;
  5004. }
  5005. DHD_ERROR(("%s: ----- This FW is included CLM data -----\n",
  5006. __FUNCTION__));
  5007. return TRUE;
  5008. }
  5009. int
  5010. dhd_apply_default_clm(dhd_pub_t *dhd, char *clm_path)
  5011. {
  5012. char *clm_blob_path;
  5013. int len;
  5014. char *memblock = NULL;
  5015. int err = BCME_OK;
  5016. char iovbuf[WLC_IOCTL_SMLEN];
  5017. int status = FALSE;
  5018. if (clm_path[0] != '\0') {
  5019. if (strlen(clm_path) > MOD_PARAM_PATHLEN) {
  5020. DHD_ERROR(("clm path exceeds max len\n"));
  5021. return BCME_ERROR;
  5022. }
  5023. clm_blob_path = clm_path;
  5024. DHD_TRACE(("clm path from module param:%s\n", clm_path));
  5025. } else {
  5026. clm_blob_path = VENDOR_PATH CONFIG_BCMDHD_CLM_PATH;
  5027. }
  5028. /* If CLM blob file is found on the filesystem, download the file.
  5029. * After CLM file download or If the blob file is not present,
  5030. * validate the country code before proceeding with the initialization.
  5031. * If country code is not valid, fail the initialization.
  5032. */
  5033. memblock = dhd_os_open_image1(dhd, (char *)clm_blob_path);
  5034. if (memblock == NULL) {
  5035. #if defined(DHD_BLOB_EXISTENCE_CHECK)
  5036. if (dhd->is_blob) {
  5037. err = BCME_ERROR;
  5038. } else {
  5039. status = dhd_check_current_clm_data(dhd);
  5040. if (status == TRUE) {
  5041. err = BCME_OK;
  5042. } else {
  5043. err = status;
  5044. }
  5045. }
  5046. #endif /* DHD_BLOB_EXISTENCE_CHECK */
  5047. goto exit;
  5048. }
  5049. len = dhd_os_get_image_size(memblock);
  5050. if ((len > 0) && (len < MAX_CLM_BUF_SIZE) && memblock) {
  5051. status = dhd_check_current_clm_data(dhd);
  5052. if (status == TRUE) {
  5053. #if defined(DHD_BLOB_EXISTENCE_CHECK)
  5054. if (dhd->op_mode != DHD_FLAG_MFG_MODE) {
  5055. if (dhd->is_blob) {
  5056. err = BCME_ERROR;
  5057. }
  5058. goto exit;
  5059. }
  5060. #else
  5061. DHD_ERROR(("%s: CLM already exist in F/W, "
  5062. "new CLM data will be added to the end of existing CLM data!\n",
  5063. __FUNCTION__));
  5064. #endif /* DHD_BLOB_EXISTENCE_CHECK */
  5065. } else if (status != FALSE) {
  5066. err = status;
  5067. goto exit;
  5068. }
  5069. /* Found blob file. Download the file */
  5070. DHD_TRACE(("clm file download from %s \n", clm_blob_path));
  5071. err = dhd_download_blob(dhd, (unsigned char*)memblock, len, "clmload");
  5072. if (err) {
  5073. DHD_ERROR(("%s: CLM download failed err=%d\n", __FUNCTION__, err));
  5074. /* Retrieve clmload_status and print */
  5075. memset(iovbuf, 0, sizeof(iovbuf));
  5076. len = bcm_mkiovar("clmload_status", NULL, 0, iovbuf, sizeof(iovbuf));
  5077. if (len == 0) {
  5078. err = BCME_BUFTOOSHORT;
  5079. goto exit;
  5080. }
  5081. err = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iovbuf, sizeof(iovbuf), FALSE, 0);
  5082. if (err) {
  5083. DHD_ERROR(("%s: clmload_status get failed err=%d \n",
  5084. __FUNCTION__, err));
  5085. } else {
  5086. DHD_ERROR(("%s: clmload_status: %d \n",
  5087. __FUNCTION__, *((int *)iovbuf)));
  5088. if (*((int *)iovbuf) == CHIPID_MISMATCH) {
  5089. DHD_ERROR(("Chip ID mismatch error \n"));
  5090. }
  5091. }
  5092. err = BCME_ERROR;
  5093. goto exit;
  5094. } else {
  5095. DHD_INFO(("%s: CLM download succeeded \n", __FUNCTION__));
  5096. }
  5097. } else {
  5098. DHD_INFO(("Skipping the clm download. len:%d memblk:%p \n", len, memblock));
  5099. }
  5100. /* Verify country code */
  5101. status = dhd_check_current_clm_data(dhd);
  5102. if (status != TRUE) {
  5103. /* Country code not initialized or CLM download not proper */
  5104. DHD_ERROR(("country code not initialized\n"));
  5105. err = status;
  5106. }
  5107. exit:
  5108. if (memblock) {
  5109. dhd_os_close_image1(dhd, memblock);
  5110. }
  5111. return err;
  5112. }
  5113. void dhd_free_download_buffer(dhd_pub_t *dhd, void *buffer, int length)
  5114. {
  5115. MFREE(dhd->osh, buffer, length);
  5116. }
  5117. #ifdef SHOW_LOGTRACE
  5118. int
  5119. dhd_parse_logstrs_file(osl_t *osh, char *raw_fmts, int logstrs_size,
  5120. dhd_event_log_t *event_log)
  5121. {
  5122. uint32 *lognums = NULL;
  5123. char *logstrs = NULL;
  5124. logstr_trailer_t *trailer = NULL;
  5125. int ram_index = 0;
  5126. char **fmts = NULL;
  5127. int num_fmts = 0;
  5128. bool match_fail = TRUE;
  5129. int32 i = 0;
  5130. uint8 *pfw_id = NULL;
  5131. uint32 fwid = 0;
  5132. void *file = NULL;
  5133. int file_len = 0;
  5134. char fwid_str[FWID_STR_LEN];
  5135. uint32 hdr_logstrs_size = 0;
  5136. /* Read last three words in the logstrs.bin file */
  5137. trailer = (logstr_trailer_t *) (raw_fmts + logstrs_size -
  5138. sizeof(logstr_trailer_t));
  5139. if (trailer->log_magic == LOGSTRS_MAGIC) {
  5140. /*
  5141. * logstrs.bin has a header.
  5142. */
  5143. if (trailer->version == 1) {
  5144. logstr_header_v1_t *hdr_v1 = (logstr_header_v1_t *) (raw_fmts +
  5145. logstrs_size - sizeof(logstr_header_v1_t));
  5146. DHD_INFO(("%s: logstr header version = %u\n",
  5147. __FUNCTION__, hdr_v1->version));
  5148. num_fmts = hdr_v1->rom_logstrs_offset / sizeof(uint32);
  5149. ram_index = (hdr_v1->ram_lognums_offset -
  5150. hdr_v1->rom_lognums_offset) / sizeof(uint32);
  5151. lognums = (uint32 *) &raw_fmts[hdr_v1->rom_lognums_offset];
  5152. logstrs = (char *) &raw_fmts[hdr_v1->rom_logstrs_offset];
  5153. hdr_logstrs_size = hdr_v1->logstrs_size;
  5154. } else if (trailer->version == 2) {
  5155. logstr_header_t *hdr = (logstr_header_t *) (raw_fmts + logstrs_size -
  5156. sizeof(logstr_header_t));
  5157. DHD_INFO(("%s: logstr header version = %u; flags = %x\n",
  5158. __FUNCTION__, hdr->trailer.version, hdr->trailer.flags));
  5159. /* For ver. 2 of the header, need to match fwid of
  5160. * both logstrs.bin and fw bin
  5161. */
  5162. /* read the FWID from fw bin */
  5163. file = dhd_os_open_image1(NULL, st_str_file_path);
  5164. if (!file) {
  5165. DHD_ERROR(("%s: cannot open fw file !\n", __FUNCTION__));
  5166. goto error;
  5167. }
  5168. file_len = dhd_os_get_image_size(file);
  5169. if (file_len <= 0) {
  5170. DHD_ERROR(("%s: bad fw file length !\n", __FUNCTION__));
  5171. goto error;
  5172. }
  5173. /* fwid is at the end of fw bin in string format */
  5174. if (dhd_os_seek_file(file, file_len - (sizeof(fwid_str) - 1)) < 0) {
  5175. DHD_ERROR(("%s: can't seek file \n", __FUNCTION__));
  5176. goto error;
  5177. }
  5178. memset(fwid_str, 0, sizeof(fwid_str));
  5179. if (dhd_os_get_image_block(fwid_str, sizeof(fwid_str) - 1, file) <= 0) {
  5180. DHD_ERROR(("%s: read fw file failed !\n", __FUNCTION__));
  5181. goto error;
  5182. }
  5183. pfw_id = (uint8 *)bcmstrnstr(fwid_str, sizeof(fwid_str) - 1,
  5184. FWID_STR_1, strlen(FWID_STR_1));
  5185. if (!pfw_id) {
  5186. pfw_id = (uint8 *)bcmstrnstr(fwid_str, sizeof(fwid_str) - 1,
  5187. FWID_STR_2, strlen(FWID_STR_2));
  5188. if (!pfw_id) {
  5189. DHD_ERROR(("%s: could not find id in FW bin!\n",
  5190. __FUNCTION__));
  5191. goto error;
  5192. }
  5193. }
  5194. /* search for the '-' in the fw id str, after which the
  5195. * actual 4 byte fw id is present
  5196. */
  5197. while (pfw_id && *pfw_id != '-') {
  5198. ++pfw_id;
  5199. }
  5200. ++pfw_id;
  5201. fwid = bcm_strtoul((char *)pfw_id, NULL, 16);
  5202. /* check if fw id in logstrs.bin matches the fw one */
  5203. if (hdr->trailer.fw_id != fwid) {
  5204. DHD_ERROR(("%s: logstr id does not match FW!"
  5205. "logstrs_fwid:0x%x, rtecdc_fwid:0x%x\n",
  5206. __FUNCTION__, hdr->trailer.fw_id, fwid));
  5207. goto error;
  5208. }
  5209. match_fail = FALSE;
  5210. num_fmts = hdr->rom_logstrs_offset / sizeof(uint32);
  5211. ram_index = (hdr->ram_lognums_offset -
  5212. hdr->rom_lognums_offset) / sizeof(uint32);
  5213. lognums = (uint32 *) &raw_fmts[hdr->rom_lognums_offset];
  5214. logstrs = (char *) &raw_fmts[hdr->rom_logstrs_offset];
  5215. hdr_logstrs_size = hdr->logstrs_size;
  5216. error:
  5217. if (file) {
  5218. dhd_os_close_image1(NULL, file);
  5219. }
  5220. if (match_fail) {
  5221. return BCME_DECERR;
  5222. }
  5223. } else {
  5224. DHD_ERROR(("%s: Invalid logstr version %u\n", __FUNCTION__,
  5225. trailer->version));
  5226. return BCME_ERROR;
  5227. }
  5228. if (logstrs_size != hdr_logstrs_size) {
  5229. DHD_ERROR(("%s: bad logstrs_size %d\n", __FUNCTION__, hdr_logstrs_size));
  5230. return BCME_ERROR;
  5231. }
  5232. } else {
  5233. /*
  5234. * Legacy logstrs.bin format without header.
  5235. */
  5236. num_fmts = *((uint32 *) (raw_fmts)) / sizeof(uint32);
  5237. /* Legacy RAM-only logstrs.bin format:
  5238. * - RAM 'lognums' section
  5239. * - RAM 'logstrs' section.
  5240. *
  5241. * 'lognums' is an array of indexes for the strings in the
  5242. * 'logstrs' section. The first uint32 is an index to the
  5243. * start of 'logstrs'. Therefore, if this index is divided
  5244. * by 'sizeof(uint32)' it provides the number of logstr
  5245. * entries.
  5246. */
  5247. ram_index = 0;
  5248. lognums = (uint32 *) raw_fmts;
  5249. logstrs = (char *) &raw_fmts[num_fmts << 2];
  5250. }
  5251. if (num_fmts)
  5252. fmts = MALLOC(osh, num_fmts * sizeof(char *));
  5253. if (fmts == NULL) {
  5254. DHD_ERROR(("%s: Failed to allocate fmts memory\n", __FUNCTION__));
  5255. return BCME_ERROR;
  5256. }
  5257. event_log->fmts_size = num_fmts * sizeof(char *);
  5258. for (i = 0; i < num_fmts; i++) {
  5259. /* ROM lognums index into logstrs using 'rom_logstrs_offset' as a base
  5260. * (they are 0-indexed relative to 'rom_logstrs_offset').
  5261. *
  5262. * RAM lognums are already indexed to point to the correct RAM logstrs (they
  5263. * are 0-indexed relative to the start of the logstrs.bin file).
  5264. */
  5265. if (i == ram_index) {
  5266. logstrs = raw_fmts;
  5267. }
  5268. fmts[i] = &logstrs[lognums[i]];
  5269. }
  5270. event_log->fmts = fmts;
  5271. event_log->raw_fmts_size = logstrs_size;
  5272. event_log->raw_fmts = raw_fmts;
  5273. event_log->num_fmts = num_fmts;
  5274. return BCME_OK;
  5275. } /* dhd_parse_logstrs_file */
  5276. int dhd_parse_map_file(osl_t *osh, void *file, uint32 *ramstart, uint32 *rodata_start,
  5277. uint32 *rodata_end)
  5278. {
  5279. char *raw_fmts = NULL, *raw_fmts_loc = NULL;
  5280. uint32 read_size = READ_NUM_BYTES;
  5281. int error = 0;
  5282. char * cptr = NULL;
  5283. char c;
  5284. uint8 count = 0;
  5285. *ramstart = 0;
  5286. *rodata_start = 0;
  5287. *rodata_end = 0;
  5288. /* Allocate 1 byte more than read_size to terminate it with NULL */
  5289. raw_fmts = MALLOCZ(osh, read_size + 1);
  5290. if (raw_fmts == NULL) {
  5291. DHD_ERROR(("%s: Failed to allocate raw_fmts memory \n", __FUNCTION__));
  5292. goto fail;
  5293. }
  5294. /* read ram start, rodata_start and rodata_end values from map file */
  5295. while (count != ALL_MAP_VAL)
  5296. {
  5297. error = dhd_os_read_file(file, raw_fmts, read_size);
  5298. if (error < 0) {
  5299. DHD_ERROR(("%s: map file read failed err:%d \n", __FUNCTION__,
  5300. error));
  5301. goto fail;
  5302. }
  5303. /* End raw_fmts with NULL as strstr expects NULL terminated strings */
  5304. raw_fmts[read_size] = '\0';
  5305. /* Get ramstart address */
  5306. raw_fmts_loc = raw_fmts;
  5307. if (!(count & RAMSTART_BIT) &&
  5308. (cptr = bcmstrnstr(raw_fmts_loc, read_size, ramstart_str,
  5309. strlen(ramstart_str)))) {
  5310. cptr = cptr - BYTES_AHEAD_NUM;
  5311. sscanf(cptr, "%x %c text_start", ramstart, &c);
  5312. count |= RAMSTART_BIT;
  5313. }
  5314. /* Get ram rodata start address */
  5315. raw_fmts_loc = raw_fmts;
  5316. if (!(count & RDSTART_BIT) &&
  5317. (cptr = bcmstrnstr(raw_fmts_loc, read_size, rodata_start_str,
  5318. strlen(rodata_start_str)))) {
  5319. cptr = cptr - BYTES_AHEAD_NUM;
  5320. sscanf(cptr, "%x %c rodata_start", rodata_start, &c);
  5321. count |= RDSTART_BIT;
  5322. }
  5323. /* Get ram rodata end address */
  5324. raw_fmts_loc = raw_fmts;
  5325. if (!(count & RDEND_BIT) &&
  5326. (cptr = bcmstrnstr(raw_fmts_loc, read_size, rodata_end_str,
  5327. strlen(rodata_end_str)))) {
  5328. cptr = cptr - BYTES_AHEAD_NUM;
  5329. sscanf(cptr, "%x %c rodata_end", rodata_end, &c);
  5330. count |= RDEND_BIT;
  5331. }
  5332. if (error < (int)read_size) {
  5333. /*
  5334. * since we reset file pos back to earlier pos by
  5335. * GO_BACK_FILE_POS_NUM_BYTES bytes we won't reach EOF.
  5336. * The reason for this is if string is spreaded across
  5337. * bytes, the read function should not miss it.
  5338. * So if ret value is less than read_size, reached EOF don't read further
  5339. */
  5340. break;
  5341. }
  5342. memset(raw_fmts, 0, read_size);
  5343. /*
  5344. * go back to predefined NUM of bytes so that we won't miss
  5345. * the string and addr even if it comes as splited in next read.
  5346. */
  5347. dhd_os_seek_file(file, -GO_BACK_FILE_POS_NUM_BYTES);
  5348. }
  5349. fail:
  5350. if (raw_fmts) {
  5351. MFREE(osh, raw_fmts, read_size + 1);
  5352. raw_fmts = NULL;
  5353. }
  5354. if (count == ALL_MAP_VAL) {
  5355. return BCME_OK;
  5356. }
  5357. else {
  5358. DHD_ERROR(("%s: readmap error 0X%x \n", __FUNCTION__,
  5359. count));
  5360. return BCME_ERROR;
  5361. }
  5362. } /* dhd_parse_map_file */
  5363. #ifdef PCIE_FULL_DONGLE
  5364. int
  5365. dhd_event_logtrace_infobuf_pkt_process(dhd_pub_t *dhdp, void *pktbuf,
  5366. dhd_event_log_t *event_data)
  5367. {
  5368. uint32 infobuf_version;
  5369. info_buf_payload_hdr_t *payload_hdr_ptr;
  5370. uint16 payload_hdr_type;
  5371. uint16 payload_hdr_length;
  5372. DHD_TRACE(("%s:Enter\n", __FUNCTION__));
  5373. if (PKTLEN(dhdp->osh, pktbuf) < sizeof(uint32)) {
  5374. DHD_ERROR(("%s: infobuf too small for version field\n",
  5375. __FUNCTION__));
  5376. goto exit;
  5377. }
  5378. infobuf_version = *((uint32 *)PKTDATA(dhdp->osh, pktbuf));
  5379. PKTPULL(dhdp->osh, pktbuf, sizeof(uint32));
  5380. if (infobuf_version != PCIE_INFOBUF_V1) {
  5381. DHD_ERROR(("%s: infobuf version %d is not PCIE_INFOBUF_V1\n",
  5382. __FUNCTION__, infobuf_version));
  5383. goto exit;
  5384. }
  5385. /* Version 1 infobuf has a single type/length (and then value) field */
  5386. if (PKTLEN(dhdp->osh, pktbuf) < sizeof(info_buf_payload_hdr_t)) {
  5387. DHD_ERROR(("%s: infobuf too small for v1 type/length fields\n",
  5388. __FUNCTION__));
  5389. goto exit;
  5390. }
  5391. /* Process/parse the common info payload header (type/length) */
  5392. payload_hdr_ptr = (info_buf_payload_hdr_t *)PKTDATA(dhdp->osh, pktbuf);
  5393. payload_hdr_type = ltoh16(payload_hdr_ptr->type);
  5394. payload_hdr_length = ltoh16(payload_hdr_ptr->length);
  5395. if (payload_hdr_type != PCIE_INFOBUF_V1_TYPE_LOGTRACE) {
  5396. DHD_ERROR(("%s: payload_hdr_type %d is not V1_TYPE_LOGTRACE\n",
  5397. __FUNCTION__, payload_hdr_type));
  5398. goto exit;
  5399. }
  5400. PKTPULL(dhdp->osh, pktbuf, sizeof(info_buf_payload_hdr_t));
  5401. /* Validate that the specified length isn't bigger than the
  5402. * provided data.
  5403. */
  5404. if (payload_hdr_length > PKTLEN(dhdp->osh, pktbuf)) {
  5405. DHD_ERROR(("%s: infobuf logtrace length is bigger"
  5406. " than actual buffer data\n", __FUNCTION__));
  5407. goto exit;
  5408. }
  5409. dhd_dbg_trace_evnt_handler(dhdp, PKTDATA(dhdp->osh, pktbuf),
  5410. event_data, payload_hdr_length);
  5411. return BCME_OK;
  5412. exit:
  5413. return BCME_ERROR;
  5414. } /* dhd_event_logtrace_infobuf_pkt_process */
  5415. #endif /* PCIE_FULL_DONGLE */
  5416. #endif /* SHOW_LOGTRACE */
  5417. #if defined(WLTDLS) && defined(PCIE_FULL_DONGLE)
  5418. /* To handle the TDLS event in the dhd_common.c
  5419. */
  5420. int dhd_tdls_event_handler(dhd_pub_t *dhd_pub, wl_event_msg_t *event)
  5421. {
  5422. int ret = BCME_OK;
  5423. #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
  5424. #pragma GCC diagnostic push
  5425. #pragma GCC diagnostic ignored "-Wcast-qual"
  5426. #endif // endif
  5427. ret = dhd_tdls_update_peer_info(dhd_pub, event);
  5428. #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
  5429. #pragma GCC diagnostic pop
  5430. #endif // endif
  5431. return ret;
  5432. }
  5433. int dhd_free_tdls_peer_list(dhd_pub_t *dhd_pub)
  5434. {
  5435. tdls_peer_node_t *cur = NULL, *prev = NULL;
  5436. if (!dhd_pub)
  5437. return BCME_ERROR;
  5438. cur = dhd_pub->peer_tbl.node;
  5439. if ((dhd_pub->peer_tbl.node == NULL) && !dhd_pub->peer_tbl.tdls_peer_count)
  5440. return BCME_ERROR;
  5441. while (cur != NULL) {
  5442. prev = cur;
  5443. cur = cur->next;
  5444. MFREE(dhd_pub->osh, prev, sizeof(tdls_peer_node_t));
  5445. }
  5446. dhd_pub->peer_tbl.tdls_peer_count = 0;
  5447. dhd_pub->peer_tbl.node = NULL;
  5448. return BCME_OK;
  5449. }
  5450. #endif /* #if defined(WLTDLS) && defined(PCIE_FULL_DONGLE) */
  5451. /* pretty hex print a contiguous buffer
  5452. * based on the debug level specified
  5453. */
  5454. void
  5455. dhd_prhex(const char *msg, volatile uchar *buf, uint nbytes, uint8 dbg_level)
  5456. {
  5457. char line[128], *p;
  5458. int len = sizeof(line);
  5459. int nchar;
  5460. uint i;
  5461. if (msg && (msg[0] != '\0')) {
  5462. if (dbg_level == DHD_ERROR_VAL)
  5463. DHD_ERROR(("%s:\n", msg));
  5464. else if (dbg_level == DHD_INFO_VAL)
  5465. DHD_INFO(("%s:\n", msg));
  5466. else if (dbg_level == DHD_TRACE_VAL)
  5467. DHD_TRACE(("%s:\n", msg));
  5468. }
  5469. p = line;
  5470. for (i = 0; i < nbytes; i++) {
  5471. if (i % 16 == 0) {
  5472. nchar = snprintf(p, len, " %04x: ", i); /* line prefix */
  5473. p += nchar;
  5474. len -= nchar;
  5475. }
  5476. if (len > 0) {
  5477. nchar = snprintf(p, len, "%02x ", buf[i]);
  5478. p += nchar;
  5479. len -= nchar;
  5480. }
  5481. if (i % 16 == 15) {
  5482. /* flush line */
  5483. if (dbg_level == DHD_ERROR_VAL)
  5484. DHD_ERROR(("%s:\n", line));
  5485. else if (dbg_level == DHD_INFO_VAL)
  5486. DHD_INFO(("%s:\n", line));
  5487. else if (dbg_level == DHD_TRACE_VAL)
  5488. DHD_TRACE(("%s:\n", line));
  5489. p = line;
  5490. len = sizeof(line);
  5491. }
  5492. }
  5493. /* flush last partial line */
  5494. if (p != line) {
  5495. if (dbg_level == DHD_ERROR_VAL)
  5496. DHD_ERROR(("%s:\n", line));
  5497. else if (dbg_level == DHD_INFO_VAL)
  5498. DHD_INFO(("%s:\n", line));
  5499. else if (dbg_level == DHD_TRACE_VAL)
  5500. DHD_TRACE(("%s:\n", line));
  5501. }
  5502. }
  5503. #ifndef OEM_ANDROID
  5504. int
  5505. dhd_tput_test(dhd_pub_t *dhd, tput_test_t *tput_data)
  5506. {
  5507. struct ether_header ether_hdr;
  5508. tput_pkt_t tput_pkt;
  5509. void *pkt = NULL;
  5510. uint8 *pktdata = NULL;
  5511. uint32 pktsize = 0;
  5512. uint64 total_size = 0;
  5513. uint32 *crc = 0;
  5514. uint32 pktid = 0;
  5515. uint32 total_num_tx_pkts = 0;
  5516. int err = 0, err_exit = 0;
  5517. uint32 i = 0;
  5518. uint64 time_taken = 0;
  5519. int max_txbufs = 0;
  5520. uint32 n_batches = 0;
  5521. uint32 n_remain = 0;
  5522. uint8 tput_pkt_hdr_size = 0;
  5523. bool batch_cnt = FALSE;
  5524. bool tx_stop_pkt = FALSE;
  5525. if (tput_data->version != TPUT_TEST_T_VER ||
  5526. tput_data->length != TPUT_TEST_T_LEN) {
  5527. DHD_ERROR(("%s: wrong structure ver/len! \n", __FUNCTION__));
  5528. err_exit = BCME_BADARG;
  5529. goto exit_error;
  5530. }
  5531. if (dhd->tput_data.tput_test_running) {
  5532. DHD_ERROR(("%s: tput test already running ! \n", __FUNCTION__));
  5533. err_exit = BCME_BUSY;
  5534. goto exit_error;
  5535. }
  5536. #ifdef PCIE_FULL_DONGLE
  5537. /*
  5538. * 100 bytes to accommodate ether header and tput header. As of today
  5539. * both occupy 30 bytes. Rest is reserved.
  5540. */
  5541. if ((tput_data->payload_size > TPUT_TEST_MAX_PAYLOAD) ||
  5542. (tput_data->payload_size > (DHD_FLOWRING_RX_BUFPOST_PKTSZ - 100))) {
  5543. DHD_ERROR(("%s: payload size is too large! max_payload=%u rx_bufpost_size=%u\n",
  5544. __FUNCTION__, TPUT_TEST_MAX_PAYLOAD,
  5545. (DHD_FLOWRING_RX_BUFPOST_PKTSZ - 100)));
  5546. err_exit = BCME_BUFTOOLONG;
  5547. goto exit_error;
  5548. }
  5549. #endif // endif
  5550. max_txbufs = dhd_get_max_txbufs(dhd);
  5551. max_txbufs = MIN(max_txbufs, DHD_TPUT_MAX_TX_PKTS_BATCH);
  5552. if (!(tput_data->num_pkts > 0)) {
  5553. DHD_ERROR(("%s: invalid num_pkts: %d to tx\n",
  5554. __FUNCTION__, tput_data->num_pkts));
  5555. err_exit = BCME_ERROR;
  5556. goto exit_error;
  5557. }
  5558. memset(&dhd->tput_data, 0, sizeof(dhd->tput_data));
  5559. memcpy(&dhd->tput_data, tput_data, sizeof(*tput_data));
  5560. dhd->tput_data.pkts_bad = dhd->tput_data.pkts_good = 0;
  5561. dhd->tput_data.pkts_cmpl = 0;
  5562. dhd->tput_start_ts = dhd->tput_stop_ts = 0;
  5563. if (tput_data->flags & TPUT_TEST_USE_ETHERNET_HDR) {
  5564. pktsize = sizeof(ether_hdr) + sizeof(tput_pkt_t) +
  5565. (tput_data->payload_size - 12);
  5566. } else {
  5567. pktsize = sizeof(tput_pkt_t) +
  5568. (tput_data->payload_size - 12);
  5569. }
  5570. tput_pkt_hdr_size = (uint8)((uint8 *)&tput_pkt.crc32 -
  5571. (uint8 *)&tput_pkt.mac_sta);
  5572. /* mark the tput test as started */
  5573. dhd->tput_data.tput_test_running = TRUE;
  5574. if (tput_data->direction == TPUT_DIR_TX) {
  5575. /* for ethernet header */
  5576. memcpy(ether_hdr.ether_shost, tput_data->mac_sta, ETHER_ADDR_LEN);
  5577. memcpy(ether_hdr.ether_dhost, tput_data->mac_ap, ETHER_ADDR_LEN);
  5578. ether_hdr.ether_type = hton16(ETHER_TYPE_IP);
  5579. /* fill in the tput pkt */
  5580. memset(&tput_pkt, 0, sizeof(tput_pkt));
  5581. memcpy(tput_pkt.mac_ap, tput_data->mac_ap, ETHER_ADDR_LEN);
  5582. memcpy(tput_pkt.mac_sta, tput_data->mac_sta, ETHER_ADDR_LEN);
  5583. tput_pkt.pkt_type = hton16(TPUT_PKT_TYPE_NORMAL);
  5584. tput_pkt.num_pkts = hton32(tput_data->num_pkts);
  5585. if (tput_data->num_pkts > (uint32)max_txbufs) {
  5586. n_batches = tput_data->num_pkts / max_txbufs;
  5587. n_remain = tput_data->num_pkts % max_txbufs;
  5588. } else {
  5589. n_batches = 0;
  5590. n_remain = tput_data->num_pkts;
  5591. }
  5592. DHD_ERROR(("%s: num_pkts: %u n_batches: %u n_remain: %u\n",
  5593. __FUNCTION__, tput_data->num_pkts, n_batches, n_remain));
  5594. do {
  5595. /* reset before every batch */
  5596. dhd->batch_tx_pkts_cmpl = 0;
  5597. if (n_batches) {
  5598. dhd->batch_tx_num_pkts = max_txbufs;
  5599. --n_batches;
  5600. } else if (n_remain) {
  5601. dhd->batch_tx_num_pkts = n_remain;
  5602. n_remain = 0;
  5603. } else {
  5604. DHD_ERROR(("Invalid. This should not hit\n"));
  5605. }
  5606. dhd->tput_start_ts = OSL_SYSUPTIME_US();
  5607. for (i = 0; (i < dhd->batch_tx_num_pkts) || (tx_stop_pkt); ++i) {
  5608. pkt = PKTGET(dhd->osh, pktsize, TRUE);
  5609. if (!pkt) {
  5610. dhd->tput_data.tput_test_running = FALSE;
  5611. DHD_ERROR(("%s: PKTGET fails ! Not enough Tx buffers\n",
  5612. __FUNCTION__));
  5613. DHD_ERROR(("%s: pkts_good:%u; pkts_bad:%u; pkts_cmpl:%u\n",
  5614. __FUNCTION__, dhd->tput_data.pkts_good,
  5615. dhd->tput_data.pkts_bad, dhd->tput_data.pkts_cmpl));
  5616. err_exit = BCME_NOMEM;
  5617. goto exit_error;
  5618. }
  5619. pktdata = PKTDATA(dhd->osh, pkt);
  5620. PKTSETLEN(dhd->osh, pkt, pktsize);
  5621. memset(pktdata, 0, pktsize);
  5622. if (tput_data->flags & TPUT_TEST_USE_ETHERNET_HDR) {
  5623. memcpy(pktdata, &ether_hdr, sizeof(ether_hdr));
  5624. pktdata += sizeof(ether_hdr);
  5625. }
  5626. /* send stop pkt as last pkt */
  5627. if (tx_stop_pkt) {
  5628. tput_pkt.pkt_type = hton16(TPUT_PKT_TYPE_STOP);
  5629. tx_stop_pkt = FALSE;
  5630. } else
  5631. tput_pkt.pkt_type = hton16(TPUT_PKT_TYPE_NORMAL);
  5632. tput_pkt.pkt_id = hton32(pktid++);
  5633. tput_pkt.crc32 = 0;
  5634. memcpy(pktdata, &tput_pkt, sizeof(tput_pkt));
  5635. /* compute crc32 over the pkt-id, num-pkts and data fields */
  5636. crc = (uint32 *)(pktdata + tput_pkt_hdr_size);
  5637. *crc = hton32(hndcrc32(pktdata + tput_pkt_hdr_size + 4,
  5638. 8 + (tput_data->payload_size - 12),
  5639. CRC32_INIT_VALUE));
  5640. err = dhd_sendpkt(dhd, 0, pkt);
  5641. if (err != BCME_OK) {
  5642. DHD_INFO(("%s: send pkt (id = %u) fails (err = %d) ! \n",
  5643. __FUNCTION__, pktid, err));
  5644. dhd->tput_data.pkts_bad++;
  5645. }
  5646. total_num_tx_pkts++;
  5647. if ((total_num_tx_pkts == tput_data->num_pkts) && (!tx_stop_pkt)) {
  5648. tx_stop_pkt = TRUE;
  5649. }
  5650. }
  5651. DHD_INFO(("%s: TX done, wait for completion...\n", __FUNCTION__));
  5652. if (!dhd_os_tput_test_wait(dhd, NULL,
  5653. TPUT_TEST_WAIT_TIMEOUT_DEFAULT)) {
  5654. dhd->tput_stop_ts = OSL_SYSUPTIME_US();
  5655. dhd->tput_data.tput_test_running = FALSE;
  5656. DHD_ERROR(("%s: TX completion timeout !"
  5657. " Total Tx pkts (including STOP) = %u; pkts cmpl = %u; \n",
  5658. __FUNCTION__, total_num_tx_pkts, dhd->batch_tx_pkts_cmpl));
  5659. err_exit = BCME_ERROR;
  5660. goto exit_error;
  5661. }
  5662. if (dhd->tput_start_ts && dhd->tput_stop_ts &&
  5663. (dhd->tput_stop_ts > dhd->tput_start_ts)) {
  5664. time_taken += dhd->tput_stop_ts - dhd->tput_start_ts;
  5665. } else {
  5666. dhd->tput_data.tput_test_running = FALSE;
  5667. DHD_ERROR(("%s: bad timestamp while cal tx batch time\n",
  5668. __FUNCTION__));
  5669. err_exit = BCME_ERROR;
  5670. goto exit_error;
  5671. }
  5672. if (n_batches || n_remain) {
  5673. batch_cnt = TRUE;
  5674. } else {
  5675. batch_cnt = FALSE;
  5676. }
  5677. } while (batch_cnt);
  5678. } else {
  5679. /* TPUT_DIR_RX */
  5680. DHD_INFO(("%s: waiting for RX completion... \n", __FUNCTION__));
  5681. if (!dhd_os_tput_test_wait(dhd, NULL, tput_data->timeout_ms)) {
  5682. DHD_ERROR(("%s: RX completion timeout ! \n", __FUNCTION__));
  5683. dhd->tput_stop_ts = OSL_SYSUPTIME_US();
  5684. }
  5685. }
  5686. /* calculate the throughput in bits per sec */
  5687. if ((dhd->tput_start_ts && dhd->tput_stop_ts &&
  5688. (dhd->tput_stop_ts > dhd->tput_start_ts)) || (time_taken)) {
  5689. if (!time_taken) {
  5690. time_taken = dhd->tput_stop_ts - dhd->tput_start_ts;
  5691. }
  5692. time_taken = DIV_U64_BY_U32(time_taken, MSEC_PER_SEC); /* convert to ms */
  5693. dhd->tput_data.time_ms = time_taken;
  5694. if (time_taken) {
  5695. total_size = (pktsize * dhd->tput_data.pkts_cmpl * 8);
  5696. dhd->tput_data.tput_bps = DIV_U64_BY_U64(total_size, time_taken);
  5697. /* convert from ms to seconds */
  5698. dhd->tput_data.tput_bps = dhd->tput_data.tput_bps * MSEC_PER_SEC;
  5699. }
  5700. } else {
  5701. DHD_ERROR(("%s: bad timestamp !\n", __FUNCTION__));
  5702. }
  5703. DHD_INFO(("%s: DONE. tput = %llu bps, time = %llu ms\n", __FUNCTION__,
  5704. dhd->tput_data.tput_bps, dhd->tput_data.time_ms));
  5705. memcpy(tput_data, &dhd->tput_data, sizeof(dhd->tput_data));
  5706. dhd->tput_data.tput_test_running = FALSE;
  5707. err_exit = BCME_OK;
  5708. exit_error:
  5709. DHD_ERROR(("%s: pkts_good = %u; pkts_bad = %u; pkts_cmpl = %u\n",
  5710. __FUNCTION__, dhd->tput_data.pkts_good,
  5711. dhd->tput_data.pkts_bad, dhd->tput_data.pkts_cmpl));
  5712. return err_exit;
  5713. }
  5714. void
  5715. dhd_tput_test_rx(dhd_pub_t *dhd, void *pkt)
  5716. {
  5717. uint8 *pktdata = NULL;
  5718. tput_pkt_t *tput_pkt = NULL;
  5719. uint32 crc = 0;
  5720. uint8 tput_pkt_hdr_size = 0;
  5721. pktdata = PKTDATA(dhd->osh, pkt);
  5722. if (dhd->tput_data.flags & TPUT_TEST_USE_ETHERNET_HDR)
  5723. pktdata += sizeof(struct ether_header);
  5724. tput_pkt = (tput_pkt_t *)pktdata;
  5725. /* record the timestamp of the first packet received */
  5726. if (dhd->tput_data.pkts_cmpl == 0) {
  5727. dhd->tput_start_ts = OSL_SYSUPTIME_US();
  5728. }
  5729. if (ntoh16(tput_pkt->pkt_type) != TPUT_PKT_TYPE_STOP &&
  5730. dhd->tput_data.pkts_cmpl <= dhd->tput_data.num_pkts) {
  5731. dhd->tput_data.pkts_cmpl++;
  5732. }
  5733. /* drop rx packets received beyond the specified # */
  5734. if (dhd->tput_data.pkts_cmpl > dhd->tput_data.num_pkts)
  5735. return;
  5736. DHD_TRACE(("%s: Rx tput test pkt, id = %u ; type = %u\n", __FUNCTION__,
  5737. ntoh32(tput_pkt->pkt_id), ntoh16(tput_pkt->pkt_type)));
  5738. /* discard if mac addr of AP/STA does not match the specified ones */
  5739. if ((memcmp(tput_pkt->mac_ap, dhd->tput_data.mac_ap,
  5740. ETHER_ADDR_LEN) != 0) ||
  5741. (memcmp(tput_pkt->mac_sta, dhd->tput_data.mac_sta,
  5742. ETHER_ADDR_LEN) != 0)) {
  5743. dhd->tput_data.pkts_bad++;
  5744. DHD_INFO(("%s: dropping tput pkt with id %u due to bad AP/STA mac !\n",
  5745. __FUNCTION__, ntoh32(tput_pkt->pkt_id)));
  5746. return;
  5747. }
  5748. tput_pkt_hdr_size = (uint8)((uint8 *)&tput_pkt->crc32 -
  5749. (uint8 *)&tput_pkt->mac_sta);
  5750. pktdata += tput_pkt_hdr_size + 4;
  5751. crc = hndcrc32(pktdata, 8 + (dhd->tput_data.payload_size - 12),
  5752. CRC32_INIT_VALUE);
  5753. if (crc != ntoh32(tput_pkt->crc32)) {
  5754. DHD_INFO(("%s: dropping tput pkt with id %u due to bad CRC !\n",
  5755. __FUNCTION__, ntoh32(tput_pkt->pkt_id)));
  5756. dhd->tput_data.pkts_bad++;
  5757. return;
  5758. }
  5759. if (ntoh16(tput_pkt->pkt_type) != TPUT_PKT_TYPE_STOP)
  5760. dhd->tput_data.pkts_good++;
  5761. /* if we have received the stop packet or all the # of pkts, we're done */
  5762. if (ntoh16(tput_pkt->pkt_type) == TPUT_PKT_TYPE_STOP ||
  5763. dhd->tput_data.pkts_cmpl == dhd->tput_data.num_pkts) {
  5764. dhd->tput_stop_ts = OSL_SYSUPTIME_US();
  5765. dhd_os_tput_test_wake(dhd);
  5766. }
  5767. }
  5768. #endif /* OEM_ANDROID */
  5769. #ifdef DUMP_IOCTL_IOV_LIST
  5770. void
  5771. dhd_iov_li_append(dhd_pub_t *dhd, dll_t *list_head, dll_t *node)
  5772. {
  5773. dll_t *item;
  5774. dhd_iov_li_t *iov_li;
  5775. dhd->dump_iovlist_len++;
  5776. if (dhd->dump_iovlist_len == IOV_LIST_MAX_LEN+1) {
  5777. item = dll_head_p(list_head);
  5778. iov_li = (dhd_iov_li_t *)CONTAINEROF(item, dhd_iov_li_t, list);
  5779. dll_delete(item);
  5780. MFREE(dhd->osh, iov_li, sizeof(*iov_li));
  5781. dhd->dump_iovlist_len--;
  5782. }
  5783. dll_append(list_head, node);
  5784. }
  5785. void
  5786. dhd_iov_li_print(dll_t *list_head)
  5787. {
  5788. dhd_iov_li_t *iov_li;
  5789. dll_t *item, *next;
  5790. uint8 index = 0;
  5791. for (item = dll_head_p(list_head); !dll_end(list_head, item); item = next) {
  5792. next = dll_next_p(item);
  5793. iov_li = (dhd_iov_li_t *)CONTAINEROF(item, dhd_iov_li_t, list);
  5794. DHD_ERROR(("%d:cmd_name = %s, cmd = %d.\n", ++index, iov_li->buff, iov_li->cmd));
  5795. }
  5796. }
  5797. void
  5798. dhd_iov_li_delete(dhd_pub_t *dhd, dll_t *list_head)
  5799. {
  5800. dll_t *item;
  5801. dhd_iov_li_t *iov_li;
  5802. while (!(dll_empty(list_head))) {
  5803. item = dll_head_p(list_head);
  5804. iov_li = (dhd_iov_li_t *)CONTAINEROF(item, dhd_iov_li_t, list);
  5805. dll_delete(item);
  5806. MFREE(dhd->osh, iov_li, sizeof(*iov_li));
  5807. }
  5808. }
  5809. #endif /* DUMP_IOCTL_IOV_LIST */
  5810. /* configuations of ecounters to be enabled by default in FW */
  5811. static ecounters_cfg_t ecounters_cfg_tbl[] = {
  5812. /* Global ecounters */
  5813. {ECOUNTERS_STATS_TYPES_FLAG_GLOBAL, 0x0, WL_IFSTATS_XTLV_BUS_PCIE},
  5814. // {ECOUNTERS_STATS_TYPES_FLAG_GLOBAL, 0x0, WL_IFSTATS_XTLV_TX_AMPDU_STATS},
  5815. // {ECOUNTERS_STATS_TYPES_FLAG_GLOBAL, 0x0, WL_IFSTATS_XTLV_RX_AMPDU_STATS},
  5816. /* Slice specific ecounters */
  5817. {ECOUNTERS_STATS_TYPES_FLAG_SLICE, 0x0, WL_SLICESTATS_XTLV_PERIODIC_STATE},
  5818. {ECOUNTERS_STATS_TYPES_FLAG_SLICE, 0x1, WL_SLICESTATS_XTLV_PERIODIC_STATE},
  5819. {ECOUNTERS_STATS_TYPES_FLAG_SLICE, 0x1, WL_IFSTATS_XTLV_WL_SLICE_BTCOEX},
  5820. /* Interface specific ecounters */
  5821. {ECOUNTERS_STATS_TYPES_FLAG_IFACE, 0x0, WL_IFSTATS_XTLV_IF_PERIODIC_STATE},
  5822. {ECOUNTERS_STATS_TYPES_FLAG_IFACE, 0x0, WL_IFSTATS_XTLV_GENERIC},
  5823. {ECOUNTERS_STATS_TYPES_FLAG_IFACE, 0x0, WL_IFSTATS_XTLV_INFRA_SPECIFIC},
  5824. {ECOUNTERS_STATS_TYPES_FLAG_IFACE, 0x0, WL_IFSTATS_XTLV_MGT_CNT},
  5825. /* secondary interface */
  5826. };
  5827. static event_ecounters_cfg_t event_ecounters_cfg_tbl[] = {
  5828. /* Interface specific event ecounters */
  5829. {WLC_E_DEAUTH_IND, ECOUNTERS_STATS_TYPES_FLAG_IFACE, 0x0, WL_IFSTATS_XTLV_IF_EVENT_STATS},
  5830. };
  5831. /* Accepts an argument to -s, -g or -f and creates an XTLV */
  5832. int
  5833. dhd_create_ecounters_params(dhd_pub_t *dhd, uint16 type, uint16 if_slice_idx,
  5834. uint16 stats_rep, uint8 **xtlv)
  5835. {
  5836. uint8 *req_xtlv = NULL;
  5837. ecounters_stats_types_report_req_t *req;
  5838. bcm_xtlvbuf_t xtlvbuf, container_xtlvbuf;
  5839. ecountersv2_xtlv_list_elt_t temp;
  5840. uint16 xtlv_len = 0, total_len = 0;
  5841. int rc = BCME_OK;
  5842. /* fill in the stat type XTLV. For now there is no explicit TLV for the stat type. */
  5843. temp.id = stats_rep;
  5844. temp.len = 0;
  5845. /* Hence len/data = 0/NULL */
  5846. xtlv_len += temp.len + BCM_XTLV_HDR_SIZE;
  5847. /* Total length of the container */
  5848. total_len = BCM_XTLV_HDR_SIZE +
  5849. OFFSETOF(ecounters_stats_types_report_req_t, stats_types_req) + xtlv_len;
  5850. /* Now allocate a structure for the entire request */
  5851. if ((req_xtlv = (uint8 *)MALLOCZ(dhd->osh, total_len)) == NULL) {
  5852. rc = BCME_NOMEM;
  5853. goto fail;
  5854. }
  5855. /* container XTLV context */
  5856. bcm_xtlv_buf_init(&container_xtlvbuf, (uint8 *)req_xtlv, total_len,
  5857. BCM_XTLV_OPTION_ALIGN32);
  5858. /* Fill other XTLVs in the container. Leave space for XTLV headers */
  5859. req = (ecounters_stats_types_report_req_t *)(req_xtlv + BCM_XTLV_HDR_SIZE);
  5860. req->flags = type;
  5861. if (type == ECOUNTERS_STATS_TYPES_FLAG_SLICE) {
  5862. req->slice_mask = 0x1 << if_slice_idx;
  5863. } else if (type == ECOUNTERS_STATS_TYPES_FLAG_IFACE) {
  5864. req->if_index = if_slice_idx;
  5865. }
  5866. /* Fill remaining XTLVs */
  5867. bcm_xtlv_buf_init(&xtlvbuf, (uint8*) req->stats_types_req, xtlv_len,
  5868. BCM_XTLV_OPTION_ALIGN32);
  5869. if (bcm_xtlv_put_data(&xtlvbuf, temp.id, NULL, temp.len)) {
  5870. DHD_ERROR(("Error creating XTLV for requested stats type = %d\n", temp.id));
  5871. rc = BCME_ERROR;
  5872. goto fail;
  5873. }
  5874. /* fill the top level container and get done with the XTLV container */
  5875. rc = bcm_xtlv_put_data(&container_xtlvbuf, WL_ECOUNTERS_XTLV_REPORT_REQ, NULL,
  5876. bcm_xtlv_buf_len(&xtlvbuf) + OFFSETOF(ecounters_stats_types_report_req_t,
  5877. stats_types_req));
  5878. if (rc) {
  5879. DHD_ERROR(("Error creating parent XTLV for type = %d\n", req->flags));
  5880. goto fail;
  5881. }
  5882. fail:
  5883. if (rc && req_xtlv) {
  5884. MFREE(dhd->osh, req_xtlv, total_len);
  5885. req_xtlv = NULL;
  5886. }
  5887. /* update the xtlv pointer */
  5888. *xtlv = req_xtlv;
  5889. return rc;
  5890. }
  5891. int
  5892. dhd_get_preserve_log_numbers(dhd_pub_t *dhd, uint32 *logset_mask)
  5893. {
  5894. wl_el_set_type_t logset_type, logset_op;
  5895. int ret = BCME_ERROR;
  5896. int i = 0, err = 0;
  5897. if (!dhd || !logset_mask)
  5898. return BCME_BADARG;
  5899. *logset_mask = 0;
  5900. memset(&logset_type, 0, sizeof(logset_type));
  5901. memset(&logset_op, 0, sizeof(logset_op));
  5902. logset_type.version = htod16(EVENT_LOG_SET_TYPE_CURRENT_VERSION);
  5903. logset_type.len = htod16(sizeof(wl_el_set_type_t));
  5904. for (i = 0; i < dhd->event_log_max_sets; i++) {
  5905. logset_type.set = i;
  5906. err = dhd_iovar(dhd, 0, "event_log_set_type", (char *)&logset_type,
  5907. sizeof(logset_type), (char *)&logset_op, sizeof(logset_op), FALSE);
  5908. /* the iovar may return 'unsupported' error if a log set number is not present
  5909. * in the fw, so we should not return on error !
  5910. */
  5911. if (err == BCME_OK &&
  5912. logset_op.type == EVENT_LOG_SET_TYPE_PRSRV) {
  5913. *logset_mask |= 0x01u << i;
  5914. ret = BCME_OK;
  5915. DHD_ERROR(("[INIT] logset:%d is preserve/chatty\n", i));
  5916. }
  5917. }
  5918. return ret;
  5919. }
  5920. static int
  5921. dhd_ecounter_autoconfig(dhd_pub_t *dhd)
  5922. {
  5923. int rc = BCME_OK;
  5924. uint32 buf;
  5925. rc = dhd_iovar(dhd, 0, "ecounters_autoconfig", NULL, 0, (char *)&buf, sizeof(buf), FALSE);
  5926. if (rc != BCME_OK) {
  5927. if (rc != BCME_UNSUPPORTED) {
  5928. rc = BCME_OK;
  5929. DHD_ERROR(("%s Ecounter autoconfig in fw failed : %d\n", __FUNCTION__, rc));
  5930. } else {
  5931. DHD_ERROR(("%s Ecounter autoconfig in FW not supported\n", __FUNCTION__));
  5932. }
  5933. }
  5934. return rc;
  5935. }
  5936. int
  5937. dhd_ecounter_configure(dhd_pub_t *dhd, bool enable)
  5938. {
  5939. int rc = BCME_OK;
  5940. if (enable) {
  5941. if (dhd_ecounter_autoconfig(dhd) != BCME_OK) {
  5942. if ((rc = dhd_start_ecounters(dhd)) != BCME_OK) {
  5943. DHD_ERROR(("%s Ecounters start failed\n", __FUNCTION__));
  5944. } else if ((rc = dhd_start_event_ecounters(dhd)) != BCME_OK) {
  5945. DHD_ERROR(("%s Event_Ecounters start failed\n", __FUNCTION__));
  5946. }
  5947. }
  5948. } else {
  5949. if ((rc = dhd_stop_ecounters(dhd)) != BCME_OK) {
  5950. DHD_ERROR(("%s Ecounters stop failed\n", __FUNCTION__));
  5951. } else if ((rc = dhd_stop_event_ecounters(dhd)) != BCME_OK) {
  5952. DHD_ERROR(("%s Event_Ecounters stop failed\n", __FUNCTION__));
  5953. }
  5954. }
  5955. return rc;
  5956. }
  5957. int
  5958. dhd_start_ecounters(dhd_pub_t *dhd)
  5959. {
  5960. uint8 i = 0;
  5961. uint8 *start_ptr;
  5962. int rc = BCME_OK;
  5963. bcm_xtlv_t *elt;
  5964. ecounters_config_request_v2_t *req = NULL;
  5965. ecountersv2_processed_xtlv_list_elt *list_elt, *tail = NULL;
  5966. ecountersv2_processed_xtlv_list_elt *processed_containers_list = NULL;
  5967. uint16 total_processed_containers_len = 0;
  5968. for (i = 0; i < ARRAYSIZE(ecounters_cfg_tbl); i++) {
  5969. ecounters_cfg_t *ecounter_stat = &ecounters_cfg_tbl[i];
  5970. if ((list_elt = (ecountersv2_processed_xtlv_list_elt *)
  5971. MALLOCZ(dhd->osh, sizeof(*list_elt))) == NULL) {
  5972. DHD_ERROR(("Ecounters v2: No memory to process\n"));
  5973. goto fail;
  5974. }
  5975. rc = dhd_create_ecounters_params(dhd, ecounter_stat->type,
  5976. ecounter_stat->if_slice_idx, ecounter_stat->stats_rep, &list_elt->data);
  5977. if (rc) {
  5978. DHD_ERROR(("Ecounters v2: Could not process: stat: %d return code: %d\n",
  5979. ecounter_stat->stats_rep, rc));
  5980. /* Free allocated memory and go to fail to release any memories allocated
  5981. * in previous iterations. Note that list_elt->data gets populated in
  5982. * dhd_create_ecounters_params() and gets freed there itself.
  5983. */
  5984. MFREE(dhd->osh, list_elt, sizeof(*list_elt));
  5985. list_elt = NULL;
  5986. goto fail;
  5987. }
  5988. elt = (bcm_xtlv_t *) list_elt->data;
  5989. /* Put the elements in the order they are processed */
  5990. if (processed_containers_list == NULL) {
  5991. processed_containers_list = list_elt;
  5992. } else {
  5993. tail->next = list_elt;
  5994. }
  5995. tail = list_elt;
  5996. /* Size of the XTLV returned */
  5997. total_processed_containers_len += BCM_XTLV_LEN(elt) + BCM_XTLV_HDR_SIZE;
  5998. }
  5999. /* Now create ecounters config request with totallength */
  6000. req = (ecounters_config_request_v2_t *)MALLOCZ(dhd->osh, sizeof(*req) +
  6001. total_processed_containers_len);
  6002. if (req == NULL) {
  6003. rc = BCME_NOMEM;
  6004. goto fail;
  6005. }
  6006. req->version = ECOUNTERS_VERSION_2;
  6007. req->logset = EVENT_LOG_SET_ECOUNTERS;
  6008. req->reporting_period = ECOUNTERS_DEFAULT_PERIOD;
  6009. req->num_reports = ECOUNTERS_NUM_REPORTS;
  6010. req->len = total_processed_containers_len +
  6011. OFFSETOF(ecounters_config_request_v2_t, ecounters_xtlvs);
  6012. /* Copy config */
  6013. start_ptr = req->ecounters_xtlvs;
  6014. /* Now go element by element in the list */
  6015. while (processed_containers_list) {
  6016. list_elt = processed_containers_list;
  6017. elt = (bcm_xtlv_t *)list_elt->data;
  6018. memcpy(start_ptr, list_elt->data, BCM_XTLV_LEN(elt) + BCM_XTLV_HDR_SIZE);
  6019. start_ptr += (size_t)(BCM_XTLV_LEN(elt) + BCM_XTLV_HDR_SIZE);
  6020. processed_containers_list = processed_containers_list->next;
  6021. /* Free allocated memories */
  6022. MFREE(dhd->osh, elt, elt->len + BCM_XTLV_HDR_SIZE);
  6023. MFREE(dhd->osh, list_elt, sizeof(*list_elt));
  6024. }
  6025. if ((rc = dhd_iovar(dhd, 0, "ecounters", (char *)req, req->len, NULL, 0, TRUE)) < 0) {
  6026. DHD_ERROR(("failed to start ecounters\n"));
  6027. }
  6028. fail:
  6029. if (req) {
  6030. MFREE(dhd->osh, req, sizeof(*req) + total_processed_containers_len);
  6031. }
  6032. /* Now go element by element in the list */
  6033. while (processed_containers_list) {
  6034. list_elt = processed_containers_list;
  6035. elt = (bcm_xtlv_t *)list_elt->data;
  6036. processed_containers_list = processed_containers_list->next;
  6037. /* Free allocated memories */
  6038. MFREE(dhd->osh, elt, elt->len + BCM_XTLV_HDR_SIZE);
  6039. MFREE(dhd->osh, list_elt, sizeof(*list_elt));
  6040. }
  6041. return rc;
  6042. }
  6043. int
  6044. dhd_stop_ecounters(dhd_pub_t *dhd)
  6045. {
  6046. int rc = BCME_OK;
  6047. ecounters_config_request_v2_t *req;
  6048. /* Now create ecounters config request with totallength */
  6049. req = (ecounters_config_request_v2_t *)MALLOCZ(dhd->osh, sizeof(*req));
  6050. if (req == NULL) {
  6051. rc = BCME_NOMEM;
  6052. goto fail;
  6053. }
  6054. req->version = ECOUNTERS_VERSION_2;
  6055. req->len = OFFSETOF(ecounters_config_request_v2_t, ecounters_xtlvs);
  6056. if ((rc = dhd_iovar(dhd, 0, "ecounters", (char *)req, req->len, NULL, 0, TRUE)) < 0) {
  6057. DHD_ERROR(("failed to stop ecounters\n"));
  6058. }
  6059. fail:
  6060. if (req) {
  6061. MFREE(dhd->osh, req, sizeof(*req));
  6062. }
  6063. return rc;
  6064. }
  6065. /* configured event_id_array for event ecounters */
  6066. typedef struct event_id_array {
  6067. uint8 event_id;
  6068. uint8 str_idx;
  6069. } event_id_array_t;
  6070. /* get event id array only from event_ecounters_cfg_tbl[] */
  6071. static inline int __dhd_event_ecounters_get_event_id_array(event_id_array_t *event_array)
  6072. {
  6073. uint8 i;
  6074. uint8 idx = 0;
  6075. int32 prev_evt_id = -1;
  6076. for (i = 0; i < (uint8)ARRAYSIZE(event_ecounters_cfg_tbl); i++) {
  6077. if (prev_evt_id != event_ecounters_cfg_tbl[i].event_id) {
  6078. if (prev_evt_id >= 0)
  6079. idx++;
  6080. event_array[idx].event_id = event_ecounters_cfg_tbl[i].event_id;
  6081. event_array[idx].str_idx = i;
  6082. }
  6083. prev_evt_id = event_ecounters_cfg_tbl[i].event_id;
  6084. }
  6085. return idx;
  6086. }
  6087. /* One event id has limit xtlv num to request based on wl_ifstats_xtlv_id * 2 interface */
  6088. #define ECNTRS_MAX_XTLV_NUM (31 * 2)
  6089. int
  6090. dhd_start_event_ecounters(dhd_pub_t *dhd)
  6091. {
  6092. uint8 i, j = 0;
  6093. uint8 event_id_cnt = 0;
  6094. uint16 processed_containers_len = 0;
  6095. uint16 max_xtlv_len = 0;
  6096. int rc = BCME_OK;
  6097. uint8 *ptr;
  6098. uint8 *data;
  6099. event_id_array_t *id_array;
  6100. bcm_xtlv_t *elt = NULL;
  6101. event_ecounters_config_request_v2_t *req = NULL;
  6102. id_array = (event_id_array_t *)MALLOCZ(dhd->osh, sizeof(event_id_array_t) *
  6103. ARRAYSIZE(event_ecounters_cfg_tbl));
  6104. if (id_array == NULL) {
  6105. rc = BCME_NOMEM;
  6106. goto fail;
  6107. }
  6108. event_id_cnt = __dhd_event_ecounters_get_event_id_array(id_array);
  6109. max_xtlv_len = ((BCM_XTLV_HDR_SIZE +
  6110. OFFSETOF(event_ecounters_config_request_v2_t, ecounters_xtlvs)) *
  6111. ECNTRS_MAX_XTLV_NUM);
  6112. /* Now create ecounters config request with max allowed length */
  6113. req = (event_ecounters_config_request_v2_t *)MALLOCZ(dhd->osh,
  6114. sizeof(event_ecounters_config_request_v2_t *) + max_xtlv_len);
  6115. if (req == NULL) {
  6116. rc = BCME_NOMEM;
  6117. goto fail;
  6118. }
  6119. for (i = 0; i <= event_id_cnt; i++) {
  6120. /* req initialization by event id */
  6121. req->version = ECOUNTERS_VERSION_2;
  6122. req->logset = EVENT_LOG_SET_ECOUNTERS;
  6123. req->event_id = id_array[i].event_id;
  6124. req->flags = EVENT_ECOUNTERS_FLAGS_ADD;
  6125. req->len = 0;
  6126. processed_containers_len = 0;
  6127. /* Copy config */
  6128. ptr = req->ecounters_xtlvs;
  6129. for (j = id_array[i].str_idx; j < (uint8)ARRAYSIZE(event_ecounters_cfg_tbl); j++) {
  6130. event_ecounters_cfg_t *event_ecounter_stat = &event_ecounters_cfg_tbl[j];
  6131. if (id_array[i].event_id != event_ecounter_stat->event_id)
  6132. break;
  6133. rc = dhd_create_ecounters_params(dhd, event_ecounter_stat->type,
  6134. event_ecounter_stat->if_slice_idx, event_ecounter_stat->stats_rep,
  6135. &data);
  6136. if (rc) {
  6137. DHD_ERROR(("%s: Could not process: stat: %d return code: %d\n",
  6138. __FUNCTION__, event_ecounter_stat->stats_rep, rc));
  6139. goto fail;
  6140. }
  6141. elt = (bcm_xtlv_t *)data;
  6142. memcpy(ptr, elt, BCM_XTLV_LEN(elt) + BCM_XTLV_HDR_SIZE);
  6143. ptr += (size_t)(BCM_XTLV_LEN(elt) + BCM_XTLV_HDR_SIZE);
  6144. processed_containers_len += BCM_XTLV_LEN(elt) + BCM_XTLV_HDR_SIZE;
  6145. /* Free allocated memories alloced by dhd_create_ecounters_params */
  6146. MFREE(dhd->osh, elt, elt->len + BCM_XTLV_HDR_SIZE);
  6147. if (processed_containers_len > max_xtlv_len) {
  6148. DHD_ERROR(("%s XTLV NUM IS OVERFLOWED THAN ALLOWED!!\n",
  6149. __FUNCTION__));
  6150. rc = BCME_BADLEN;
  6151. goto fail;
  6152. }
  6153. }
  6154. req->len = processed_containers_len +
  6155. OFFSETOF(event_ecounters_config_request_v2_t, ecounters_xtlvs);
  6156. DHD_INFO(("%s req version %d logset %d event_id %d flags %d len %d\n",
  6157. __FUNCTION__, req->version, req->logset, req->event_id,
  6158. req->flags, req->len));
  6159. rc = dhd_iovar(dhd, 0, "event_ecounters", (char *)req, req->len, NULL, 0, TRUE);
  6160. if (rc < 0) {
  6161. DHD_ERROR(("failed to start event_ecounters(event id %d) with rc %d\n",
  6162. req->event_id, rc));
  6163. goto fail;
  6164. }
  6165. }
  6166. fail:
  6167. /* Free allocated memories */
  6168. if (req) {
  6169. MFREE(dhd->osh, req, sizeof(event_ecounters_config_request_v2_t *) + max_xtlv_len);
  6170. }
  6171. if (id_array) {
  6172. MFREE(dhd->osh, id_array, sizeof(event_id_array_t) *
  6173. ARRAYSIZE(event_ecounters_cfg_tbl));
  6174. }
  6175. return rc;
  6176. }
  6177. int
  6178. dhd_stop_event_ecounters(dhd_pub_t *dhd)
  6179. {
  6180. int rc = BCME_OK;
  6181. event_ecounters_config_request_v2_t *req;
  6182. /* Now create ecounters config request with totallength */
  6183. req = (event_ecounters_config_request_v2_t *)MALLOCZ(dhd->osh, sizeof(*req));
  6184. if (req == NULL) {
  6185. rc = BCME_NOMEM;
  6186. goto fail;
  6187. }
  6188. req->version = ECOUNTERS_VERSION_2;
  6189. req->flags = EVENT_ECOUNTERS_FLAGS_DEL_ALL;
  6190. req->len = OFFSETOF(event_ecounters_config_request_v2_t, ecounters_xtlvs);
  6191. if ((rc = dhd_iovar(dhd, 0, "event_ecounters", (char *)req, req->len, NULL, 0, TRUE)) < 0) {
  6192. DHD_ERROR(("failed to stop event_ecounters\n"));
  6193. }
  6194. fail:
  6195. if (req) {
  6196. MFREE(dhd->osh, req, sizeof(*req));
  6197. }
  6198. return rc;
  6199. }
  6200. #ifdef DHD_LOG_DUMP
  6201. int
  6202. dhd_dump_debug_ring(dhd_pub_t *dhdp, void *ring_ptr, const void *user_buf,
  6203. log_dump_section_hdr_t *sec_hdr,
  6204. char *text_hdr, int buflen, uint32 sec_type)
  6205. {
  6206. uint32 rlen = 0;
  6207. uint32 data_len = 0;
  6208. void *data = NULL;
  6209. unsigned long flags = 0;
  6210. int ret = 0;
  6211. dhd_dbg_ring_t *ring = (dhd_dbg_ring_t *)ring_ptr;
  6212. int pos = 0;
  6213. int fpos_sechdr = 0;
  6214. if (!dhdp || !ring || !user_buf || !sec_hdr || !text_hdr) {
  6215. return BCME_BADARG;
  6216. }
  6217. /* do not allow further writes to the ring
  6218. * till we flush it
  6219. */
  6220. DHD_DBG_RING_LOCK(ring->lock, flags);
  6221. ring->state = RING_SUSPEND;
  6222. DHD_DBG_RING_UNLOCK(ring->lock, flags);
  6223. if (dhdp->concise_dbg_buf) {
  6224. /* re-use concise debug buffer temporarily
  6225. * to pull ring data, to write
  6226. * record by record to file
  6227. */
  6228. data_len = CONCISE_DUMP_BUFLEN;
  6229. data = dhdp->concise_dbg_buf;
  6230. ret = dhd_export_debug_data(text_hdr, NULL, user_buf, strlen(text_hdr), &pos);
  6231. /* write the section header now with zero length,
  6232. * once the correct length is found out, update
  6233. * it later
  6234. */
  6235. fpos_sechdr = pos;
  6236. sec_hdr->type = sec_type;
  6237. sec_hdr->length = 0;
  6238. ret = dhd_export_debug_data((char *)sec_hdr, NULL, user_buf,
  6239. sizeof(*sec_hdr), &pos);
  6240. do {
  6241. rlen = dhd_dbg_ring_pull_single(ring, data, data_len, TRUE);
  6242. if (rlen > 0) {
  6243. /* write the log */
  6244. ret = dhd_export_debug_data(data, NULL, user_buf, rlen, &pos);
  6245. }
  6246. DHD_DBGIF(("%s: rlen : %d\n", __FUNCTION__, rlen));
  6247. } while ((rlen > 0));
  6248. /* now update the section header length in the file */
  6249. /* Complete ring size is dumped by HAL, hence updating length to ring size */
  6250. sec_hdr->length = ring->ring_size;
  6251. ret = dhd_export_debug_data((char *)sec_hdr, NULL, user_buf,
  6252. sizeof(*sec_hdr), &fpos_sechdr);
  6253. } else {
  6254. DHD_ERROR(("%s: No concise buffer available !\n", __FUNCTION__));
  6255. }
  6256. DHD_DBG_RING_LOCK(ring->lock, flags);
  6257. ring->state = RING_ACTIVE;
  6258. /* Resetting both read and write pointer,
  6259. * since all items are read.
  6260. */
  6261. ring->rp = ring->wp = 0;
  6262. DHD_DBG_RING_UNLOCK(ring->lock, flags);
  6263. return ret;
  6264. }
  6265. int
  6266. dhd_log_dump_ring_to_file(dhd_pub_t *dhdp, void *ring_ptr, void *file,
  6267. unsigned long *file_posn, log_dump_section_hdr_t *sec_hdr,
  6268. char *text_hdr, uint32 sec_type)
  6269. {
  6270. uint32 rlen = 0;
  6271. uint32 data_len = 0, total_len = 0;
  6272. void *data = NULL;
  6273. unsigned long fpos_sechdr = 0;
  6274. unsigned long flags = 0;
  6275. int ret = 0;
  6276. dhd_dbg_ring_t *ring = (dhd_dbg_ring_t *)ring_ptr;
  6277. if (!dhdp || !ring || !file || !sec_hdr ||
  6278. !file_posn || !text_hdr)
  6279. return BCME_BADARG;
  6280. /* do not allow further writes to the ring
  6281. * till we flush it
  6282. */
  6283. DHD_DBG_RING_LOCK(ring->lock, flags);
  6284. ring->state = RING_SUSPEND;
  6285. DHD_DBG_RING_UNLOCK(ring->lock, flags);
  6286. if (dhdp->concise_dbg_buf) {
  6287. /* re-use concise debug buffer temporarily
  6288. * to pull ring data, to write
  6289. * record by record to file
  6290. */
  6291. data_len = CONCISE_DUMP_BUFLEN;
  6292. data = dhdp->concise_dbg_buf;
  6293. dhd_os_write_file_posn(file, file_posn, text_hdr,
  6294. strlen(text_hdr));
  6295. /* write the section header now with zero length,
  6296. * once the correct length is found out, update
  6297. * it later
  6298. */
  6299. dhd_init_sec_hdr(sec_hdr);
  6300. fpos_sechdr = *file_posn;
  6301. sec_hdr->type = sec_type;
  6302. sec_hdr->length = 0;
  6303. dhd_os_write_file_posn(file, file_posn, (char *)sec_hdr,
  6304. sizeof(*sec_hdr));
  6305. do {
  6306. rlen = dhd_dbg_ring_pull_single(ring, data, data_len, TRUE);
  6307. if (rlen > 0) {
  6308. /* write the log */
  6309. ret = dhd_os_write_file_posn(file, file_posn, data, rlen);
  6310. if (ret < 0) {
  6311. DHD_ERROR(("%s: write file error !\n", __FUNCTION__));
  6312. DHD_DBG_RING_LOCK(ring->lock, flags);
  6313. ring->state = RING_ACTIVE;
  6314. DHD_DBG_RING_UNLOCK(ring->lock, flags);
  6315. return BCME_ERROR;
  6316. }
  6317. }
  6318. total_len += rlen;
  6319. } while (rlen > 0);
  6320. /* now update the section header length in the file */
  6321. sec_hdr->length = total_len;
  6322. dhd_os_write_file_posn(file, &fpos_sechdr, (char *)sec_hdr, sizeof(*sec_hdr));
  6323. } else {
  6324. DHD_ERROR(("%s: No concise buffer available !\n", __FUNCTION__));
  6325. }
  6326. DHD_DBG_RING_LOCK(ring->lock, flags);
  6327. ring->state = RING_ACTIVE;
  6328. /* Resetting both read and write pointer,
  6329. * since all items are read.
  6330. */
  6331. ring->rp = ring->wp = 0;
  6332. DHD_DBG_RING_UNLOCK(ring->lock, flags);
  6333. return BCME_OK;
  6334. }
  6335. /* logdump cookie */
  6336. #define MAX_LOGUDMP_COOKIE_CNT 10u
  6337. #define LOGDUMP_COOKIE_STR_LEN 50u
  6338. int
  6339. dhd_logdump_cookie_init(dhd_pub_t *dhdp, uint8 *buf, uint32 buf_size)
  6340. {
  6341. uint32 ring_size;
  6342. if (!dhdp || !buf) {
  6343. DHD_ERROR(("INVALID PTR: dhdp:%p buf:%p\n", dhdp, buf));
  6344. return BCME_ERROR;
  6345. }
  6346. ring_size = dhd_ring_get_hdr_size() + LOGDUMP_COOKIE_STR_LEN * MAX_LOGUDMP_COOKIE_CNT;
  6347. if (buf_size < ring_size) {
  6348. DHD_ERROR(("BUF SIZE IS TO SHORT: req:%d buf_size:%d\n",
  6349. ring_size, buf_size));
  6350. return BCME_ERROR;
  6351. }
  6352. dhdp->logdump_cookie = dhd_ring_init(dhdp, buf, buf_size,
  6353. LOGDUMP_COOKIE_STR_LEN, MAX_LOGUDMP_COOKIE_CNT,
  6354. DHD_RING_TYPE_FIXED);
  6355. if (!dhdp->logdump_cookie) {
  6356. DHD_ERROR(("FAIL TO INIT COOKIE RING\n"));
  6357. return BCME_ERROR;
  6358. }
  6359. return BCME_OK;
  6360. }
  6361. void
  6362. dhd_logdump_cookie_deinit(dhd_pub_t *dhdp)
  6363. {
  6364. if (!dhdp) {
  6365. return;
  6366. }
  6367. if (dhdp->logdump_cookie) {
  6368. dhd_ring_deinit(dhdp, dhdp->logdump_cookie);
  6369. }
  6370. return;
  6371. }
  6372. void
  6373. dhd_logdump_cookie_save(dhd_pub_t *dhdp, char *cookie, char *type)
  6374. {
  6375. char *ptr;
  6376. if (!dhdp || !cookie || !type || !dhdp->logdump_cookie) {
  6377. DHD_ERROR(("%s: At least one buffer ptr is NULL dhdp=%p cookie=%p"
  6378. " type = %p, cookie_cfg:%p\n", __FUNCTION__,
  6379. dhdp, cookie, type, dhdp?dhdp->logdump_cookie: NULL));
  6380. return;
  6381. }
  6382. ptr = (char *)dhd_ring_get_empty(dhdp->logdump_cookie);
  6383. if (ptr == NULL) {
  6384. DHD_ERROR(("%s : Skip to save due to locking\n", __FUNCTION__));
  6385. return;
  6386. }
  6387. scnprintf(ptr, LOGDUMP_COOKIE_STR_LEN, "%s: %s\n", type, cookie);
  6388. return;
  6389. }
  6390. int
  6391. dhd_logdump_cookie_get(dhd_pub_t *dhdp, char *ret_cookie, uint32 buf_size)
  6392. {
  6393. char *ptr;
  6394. if (!dhdp || !ret_cookie || !dhdp->logdump_cookie) {
  6395. DHD_ERROR(("%s: At least one buffer ptr is NULL dhdp=%p"
  6396. "cookie=%p cookie_cfg:%p\n", __FUNCTION__,
  6397. dhdp, ret_cookie, dhdp?dhdp->logdump_cookie: NULL));
  6398. return BCME_ERROR;
  6399. }
  6400. ptr = (char *)dhd_ring_get_first(dhdp->logdump_cookie);
  6401. if (ptr == NULL) {
  6402. DHD_ERROR(("%s : Skip to save due to locking\n", __FUNCTION__));
  6403. return BCME_ERROR;
  6404. }
  6405. memcpy(ret_cookie, ptr, MIN(buf_size, strlen(ptr)));
  6406. dhd_ring_free_first(dhdp->logdump_cookie);
  6407. return BCME_OK;
  6408. }
  6409. int
  6410. dhd_logdump_cookie_count(dhd_pub_t *dhdp)
  6411. {
  6412. if (!dhdp || !dhdp->logdump_cookie) {
  6413. DHD_ERROR(("%s: At least one buffer ptr is NULL dhdp=%p cookie=%p\n",
  6414. __FUNCTION__, dhdp, dhdp?dhdp->logdump_cookie: NULL));
  6415. return 0;
  6416. }
  6417. return dhd_ring_get_cur_size(dhdp->logdump_cookie);
  6418. }
  6419. static inline int
  6420. __dhd_log_dump_cookie_to_file(
  6421. dhd_pub_t *dhdp, void *fp, const void *user_buf, unsigned long *f_pos,
  6422. char *buf, uint32 buf_size)
  6423. {
  6424. uint32 remain = buf_size;
  6425. int ret = BCME_ERROR;
  6426. char tmp_buf[LOGDUMP_COOKIE_STR_LEN];
  6427. log_dump_section_hdr_t sec_hdr;
  6428. uint32 read_idx;
  6429. uint32 write_idx;
  6430. read_idx = dhd_ring_get_read_idx(dhdp->logdump_cookie);
  6431. write_idx = dhd_ring_get_write_idx(dhdp->logdump_cookie);
  6432. while (dhd_logdump_cookie_count(dhdp) > 0) {
  6433. memset(tmp_buf, 0, sizeof(tmp_buf));
  6434. ret = dhd_logdump_cookie_get(dhdp, tmp_buf, LOGDUMP_COOKIE_STR_LEN);
  6435. if (ret != BCME_OK) {
  6436. return ret;
  6437. }
  6438. remain -= scnprintf(&buf[buf_size - remain], remain, "%s", tmp_buf);
  6439. }
  6440. dhd_ring_set_read_idx(dhdp->logdump_cookie, read_idx);
  6441. dhd_ring_set_write_idx(dhdp->logdump_cookie, write_idx);
  6442. ret = dhd_export_debug_data(COOKIE_LOG_HDR, fp, user_buf, strlen(COOKIE_LOG_HDR), f_pos);
  6443. if (ret < 0) {
  6444. DHD_ERROR(("%s : Write file Error for cookie hdr\n", __FUNCTION__));
  6445. return ret;
  6446. }
  6447. sec_hdr.magic = LOG_DUMP_MAGIC;
  6448. sec_hdr.timestamp = local_clock();
  6449. sec_hdr.type = LOG_DUMP_SECTION_COOKIE;
  6450. sec_hdr.length = buf_size - remain;
  6451. ret = dhd_export_debug_data((char *)&sec_hdr, fp, user_buf, sizeof(sec_hdr), f_pos);
  6452. if (ret < 0) {
  6453. DHD_ERROR(("%s : Write file Error for section hdr\n", __FUNCTION__));
  6454. return ret;
  6455. }
  6456. ret = dhd_export_debug_data(buf, fp, user_buf, sec_hdr.length, f_pos);
  6457. if (ret < 0) {
  6458. DHD_ERROR(("%s : Write file Error for cookie data\n", __FUNCTION__));
  6459. }
  6460. return ret;
  6461. }
  6462. uint32
  6463. dhd_log_dump_cookie_len(dhd_pub_t *dhdp)
  6464. {
  6465. int len = 0;
  6466. char tmp_buf[LOGDUMP_COOKIE_STR_LEN];
  6467. log_dump_section_hdr_t sec_hdr;
  6468. char *buf = NULL;
  6469. int ret = BCME_ERROR;
  6470. uint32 buf_size = MAX_LOGUDMP_COOKIE_CNT * LOGDUMP_COOKIE_STR_LEN;
  6471. uint32 read_idx;
  6472. uint32 write_idx;
  6473. uint32 remain;
  6474. remain = buf_size;
  6475. if (!dhdp || !dhdp->logdump_cookie) {
  6476. DHD_ERROR(("%s At least one ptr is NULL "
  6477. "dhdp = %p cookie %p\n",
  6478. __FUNCTION__, dhdp, dhdp?dhdp->logdump_cookie:NULL));
  6479. goto exit;
  6480. }
  6481. buf = (char *)MALLOCZ(dhdp->osh, buf_size);
  6482. if (!buf) {
  6483. DHD_ERROR(("%s Fail to malloc buffer\n", __FUNCTION__));
  6484. goto exit;
  6485. }
  6486. read_idx = dhd_ring_get_read_idx(dhdp->logdump_cookie);
  6487. write_idx = dhd_ring_get_write_idx(dhdp->logdump_cookie);
  6488. while (dhd_logdump_cookie_count(dhdp) > 0) {
  6489. memset(tmp_buf, 0, sizeof(tmp_buf));
  6490. ret = dhd_logdump_cookie_get(dhdp, tmp_buf, LOGDUMP_COOKIE_STR_LEN);
  6491. if (ret != BCME_OK) {
  6492. goto exit;
  6493. }
  6494. remain -= (uint32)strlen(tmp_buf);
  6495. }
  6496. dhd_ring_set_read_idx(dhdp->logdump_cookie, read_idx);
  6497. dhd_ring_set_write_idx(dhdp->logdump_cookie, write_idx);
  6498. len += strlen(COOKIE_LOG_HDR);
  6499. len += sizeof(sec_hdr);
  6500. len += (buf_size - remain);
  6501. exit:
  6502. if (buf)
  6503. MFREE(dhdp->osh, buf, buf_size);
  6504. return len;
  6505. }
  6506. int
  6507. dhd_log_dump_cookie(dhd_pub_t *dhdp, const void *user_buf)
  6508. {
  6509. int ret = BCME_ERROR;
  6510. char tmp_buf[LOGDUMP_COOKIE_STR_LEN];
  6511. log_dump_section_hdr_t sec_hdr;
  6512. char *buf = NULL;
  6513. uint32 buf_size = MAX_LOGUDMP_COOKIE_CNT * LOGDUMP_COOKIE_STR_LEN;
  6514. int pos = 0;
  6515. uint32 read_idx;
  6516. uint32 write_idx;
  6517. uint32 remain;
  6518. remain = buf_size;
  6519. if (!dhdp || !dhdp->logdump_cookie) {
  6520. DHD_ERROR(("%s At least one ptr is NULL "
  6521. "dhdp = %p cookie %p\n",
  6522. __FUNCTION__, dhdp, dhdp?dhdp->logdump_cookie:NULL));
  6523. goto exit;
  6524. }
  6525. buf = (char *)MALLOCZ(dhdp->osh, buf_size);
  6526. if (!buf) {
  6527. DHD_ERROR(("%s Fail to malloc buffer\n", __FUNCTION__));
  6528. goto exit;
  6529. }
  6530. read_idx = dhd_ring_get_read_idx(dhdp->logdump_cookie);
  6531. write_idx = dhd_ring_get_write_idx(dhdp->logdump_cookie);
  6532. while (dhd_logdump_cookie_count(dhdp) > 0) {
  6533. memset(tmp_buf, 0, sizeof(tmp_buf));
  6534. ret = dhd_logdump_cookie_get(dhdp, tmp_buf, LOGDUMP_COOKIE_STR_LEN);
  6535. if (ret != BCME_OK) {
  6536. goto exit;
  6537. }
  6538. remain -= scnprintf(&buf[buf_size - remain], remain, "%s", tmp_buf);
  6539. }
  6540. dhd_ring_set_read_idx(dhdp->logdump_cookie, read_idx);
  6541. dhd_ring_set_write_idx(dhdp->logdump_cookie, write_idx);
  6542. ret = dhd_export_debug_data(COOKIE_LOG_HDR, NULL, user_buf, strlen(COOKIE_LOG_HDR), &pos);
  6543. sec_hdr.magic = LOG_DUMP_MAGIC;
  6544. sec_hdr.timestamp = local_clock();
  6545. sec_hdr.type = LOG_DUMP_SECTION_COOKIE;
  6546. sec_hdr.length = buf_size - remain;
  6547. ret = dhd_export_debug_data((char *)&sec_hdr, NULL, user_buf, sizeof(sec_hdr), &pos);
  6548. ret = dhd_export_debug_data(buf, NULL, user_buf, sec_hdr.length, &pos);
  6549. exit:
  6550. if (buf)
  6551. MFREE(dhdp->osh, buf, buf_size);
  6552. return ret;
  6553. }
  6554. int
  6555. dhd_log_dump_cookie_to_file(dhd_pub_t *dhdp, void *fp, const void *user_buf, unsigned long *f_pos)
  6556. {
  6557. char *buf;
  6558. int ret = BCME_ERROR;
  6559. uint32 buf_size = MAX_LOGUDMP_COOKIE_CNT * LOGDUMP_COOKIE_STR_LEN;
  6560. if (!dhdp || !dhdp->logdump_cookie || (!fp && !user_buf) || !f_pos) {
  6561. DHD_ERROR(("%s At least one ptr is NULL "
  6562. "dhdp = %p cookie %p fp = %p f_pos = %p\n",
  6563. __FUNCTION__, dhdp, dhdp?dhdp->logdump_cookie:NULL, fp, f_pos));
  6564. return ret;
  6565. }
  6566. buf = (char *)MALLOCZ(dhdp->osh, buf_size);
  6567. if (!buf) {
  6568. DHD_ERROR(("%s Fail to malloc buffer\n", __FUNCTION__));
  6569. return ret;
  6570. }
  6571. ret = __dhd_log_dump_cookie_to_file(dhdp, fp, user_buf, f_pos, buf, buf_size);
  6572. MFREE(dhdp->osh, buf, buf_size);
  6573. return ret;
  6574. }
  6575. #endif /* DHD_LOG_DUMP */
  6576. #ifdef DHD_LOG_DUMP
  6577. #define DEBUG_DUMP_TRIGGER_INTERVAL_SEC 4
  6578. void
  6579. dhd_log_dump_trigger(dhd_pub_t *dhdp, int subcmd)
  6580. {
  6581. #if defined(DHD_DUMP_FILE_WRITE_FROM_KERNEL)
  6582. log_dump_type_t *flush_type;
  6583. #endif /* DHD_DUMP_FILE_WRITE_FROM_KERNEL */
  6584. uint64 current_time_sec;
  6585. if (!dhdp) {
  6586. DHD_ERROR(("dhdp is NULL !\n"));
  6587. return;
  6588. }
  6589. if (subcmd >= CMD_MAX || subcmd < CMD_DEFAULT) {
  6590. DHD_ERROR(("%s : Invalid subcmd \n", __FUNCTION__));
  6591. return;
  6592. }
  6593. current_time_sec = DIV_U64_BY_U32(OSL_LOCALTIME_NS(), NSEC_PER_SEC);
  6594. DHD_ERROR(("%s: current_time_sec=%lld debug_dump_time_sec=%lld interval=%d\n",
  6595. __FUNCTION__, current_time_sec, dhdp->debug_dump_time_sec,
  6596. DEBUG_DUMP_TRIGGER_INTERVAL_SEC));
  6597. if ((current_time_sec - dhdp->debug_dump_time_sec) < DEBUG_DUMP_TRIGGER_INTERVAL_SEC) {
  6598. DHD_ERROR(("%s : Last debug dump triggered(%lld) within %d seconds, so SKIP\n",
  6599. __FUNCTION__, dhdp->debug_dump_time_sec, DEBUG_DUMP_TRIGGER_INTERVAL_SEC));
  6600. return;
  6601. }
  6602. clear_debug_dump_time(dhdp->debug_dump_time_str);
  6603. #ifdef DHD_PCIE_RUNTIMEPM
  6604. /* wake up RPM if SYSDUMP is triggered */
  6605. dhdpcie_runtime_bus_wake(dhdp, TRUE, __builtin_return_address(0));
  6606. #endif /* DHD_PCIE_RUNTIMEPM */
  6607. /* */
  6608. dhdp->debug_dump_subcmd = subcmd;
  6609. dhdp->debug_dump_time_sec = DIV_U64_BY_U32(OSL_LOCALTIME_NS(), NSEC_PER_SEC);
  6610. #if defined(DHD_DUMP_FILE_WRITE_FROM_KERNEL)
  6611. /* flush_type is freed at do_dhd_log_dump function */
  6612. flush_type = MALLOCZ(dhdp->osh, sizeof(log_dump_type_t));
  6613. if (flush_type) {
  6614. *flush_type = DLD_BUF_TYPE_ALL;
  6615. dhd_schedule_log_dump(dhdp, flush_type);
  6616. } else {
  6617. DHD_ERROR(("%s Fail to malloc flush_type\n", __FUNCTION__));
  6618. return;
  6619. }
  6620. #endif /* DHD_DUMP_FILE_WRITE_FROM_KERNEL */
  6621. /* Inside dhd_mem_dump, event notification will be sent to HAL and
  6622. * from other context DHD pushes memdump, debug_dump and pktlog dump
  6623. * to HAL and HAL will write into file
  6624. */
  6625. #if (defined(BCMPCIE) || defined(BCMSDIO)) && defined(DHD_FW_COREDUMP)
  6626. dhdp->memdump_type = DUMP_TYPE_BY_SYSDUMP;
  6627. dhd_bus_mem_dump(dhdp);
  6628. #endif /* BCMPCIE && DHD_FW_COREDUMP */
  6629. #if defined(DHD_PKT_LOGGING) && defined(DHD_DUMP_FILE_WRITE_FROM_KERNEL)
  6630. dhd_schedule_pktlog_dump(dhdp);
  6631. #endif /* DHD_PKT_LOGGING && DHD_DUMP_FILE_WRITE_FROM_KERNEL */
  6632. }
  6633. #endif /* DHD_LOG_DUMP */
  6634. #ifdef EWP_EDL
  6635. /* For now we are allocating memory for EDL ring using DMA_ALLOC_CONSISTENT
  6636. * The reason being that, in hikey, if we try to DMA_MAP prealloced memory
  6637. * it is failing with an 'out of space in SWIOTLB' error
  6638. */
  6639. int
  6640. dhd_edl_mem_init(dhd_pub_t *dhd)
  6641. {
  6642. int ret = 0;
  6643. memset(&dhd->edl_ring_mem, 0, sizeof(dhd->edl_ring_mem));
  6644. ret = dhd_dma_buf_alloc(dhd, &dhd->edl_ring_mem, DHD_EDL_RING_SIZE);
  6645. if (ret != BCME_OK) {
  6646. DHD_ERROR(("%s: alloc of edl_ring_mem failed\n",
  6647. __FUNCTION__));
  6648. return BCME_ERROR;
  6649. }
  6650. return BCME_OK;
  6651. }
  6652. /* NOTE:- that dhd_edl_mem_deinit need NOT be called explicitly, because the dma_buf
  6653. * for EDL is freed during 'dhd_prot_detach_edl_rings' which is called during de-init.
  6654. */
  6655. void
  6656. dhd_edl_mem_deinit(dhd_pub_t *dhd)
  6657. {
  6658. if (dhd->edl_ring_mem.va != NULL)
  6659. dhd_dma_buf_free(dhd, &dhd->edl_ring_mem);
  6660. }
  6661. int
  6662. dhd_event_logtrace_process_edl(dhd_pub_t *dhdp, uint8 *data,
  6663. void *evt_decode_data)
  6664. {
  6665. msg_hdr_edl_t *msg = NULL;
  6666. cmn_msg_hdr_t *cmn_msg_hdr = NULL;
  6667. uint8 *buf = NULL;
  6668. if (!data || !dhdp || !evt_decode_data) {
  6669. DHD_ERROR(("%s: invalid args ! \n", __FUNCTION__));
  6670. return BCME_ERROR;
  6671. }
  6672. /* format of data in each work item in the EDL ring:
  6673. * |cmn_msg_hdr_t |payload (var len)|cmn_msg_hdr_t|
  6674. * payload = |infobuf_ver(u32)|info_buf_payload_hdr_t|msgtrace_hdr_t|<var len data>|
  6675. */
  6676. cmn_msg_hdr = (cmn_msg_hdr_t *)data;
  6677. msg = (msg_hdr_edl_t *)(data + sizeof(cmn_msg_hdr_t));
  6678. buf = (uint8 *)msg;
  6679. /* validate the fields */
  6680. if (ltoh32(msg->infobuf_ver) != PCIE_INFOBUF_V1) {
  6681. DHD_ERROR(("%s: Skipping msg with invalid infobuf ver (0x%x)"
  6682. " expected (0x%x)\n", __FUNCTION__,
  6683. msg->infobuf_ver, PCIE_INFOBUF_V1));
  6684. return BCME_VERSION;
  6685. }
  6686. /* in EDL, the request_id field of cmn_msg_hdr is overloaded to carry payload length */
  6687. if (sizeof(info_buf_payload_hdr_t) > cmn_msg_hdr->request_id) {
  6688. DHD_ERROR(("%s: infobuf too small for v1 type/length fields\n",
  6689. __FUNCTION__));
  6690. return BCME_BUFTOOLONG;
  6691. }
  6692. if (ltoh16(msg->pyld_hdr.type) != PCIE_INFOBUF_V1_TYPE_LOGTRACE) {
  6693. DHD_ERROR(("%s: payload_hdr_type %d is not V1_TYPE_LOGTRACE\n",
  6694. __FUNCTION__, ltoh16(msg->pyld_hdr.type)));
  6695. return BCME_BADOPTION;
  6696. }
  6697. if (ltoh16(msg->pyld_hdr.length) > cmn_msg_hdr->request_id) {
  6698. DHD_ERROR(("%s: infobuf logtrace length %u is bigger"
  6699. " than available buffer size %u\n", __FUNCTION__,
  6700. ltoh16(msg->pyld_hdr.length), cmn_msg_hdr->request_id));
  6701. return BCME_BADLEN;
  6702. }
  6703. /* dhd_dbg_trace_evnt_handler expects the data to start from msgtrace_hdr_t */
  6704. buf += sizeof(msg->infobuf_ver) + sizeof(msg->pyld_hdr);
  6705. dhd_dbg_trace_evnt_handler(dhdp, buf, evt_decode_data,
  6706. ltoh16(msg->pyld_hdr.length));
  6707. /* check 'dhdp->logtrace_pkt_sendup' and if true alloc an skb
  6708. * copy the event data to the skb and send it up the stack
  6709. */
  6710. #ifdef BCMPCIE
  6711. if (dhdp->logtrace_pkt_sendup) {
  6712. DHD_INFO(("%s: send up event log, len %u bytes\n", __FUNCTION__,
  6713. (uint32)(ltoh16(msg->pyld_hdr.length) +
  6714. sizeof(info_buf_payload_hdr_t) + 4)));
  6715. dhd_sendup_info_buf(dhdp, (uint8 *)msg);
  6716. }
  6717. #endif /* BCMPCIE */
  6718. return BCME_OK;
  6719. }
  6720. #endif /* EWP_EDL */
  6721. #if defined(SHOW_LOGTRACE)
  6722. int
  6723. dhd_print_fw_ver_from_file(dhd_pub_t *dhdp, char *fwpath)
  6724. {
  6725. void *file = NULL;
  6726. int size = 0;
  6727. char buf[FW_VER_STR_LEN];
  6728. char *str = NULL;
  6729. int ret = BCME_OK;
  6730. if (!fwpath)
  6731. return BCME_BADARG;
  6732. file = dhd_os_open_image1(dhdp, fwpath);
  6733. if (!file) {
  6734. ret = BCME_ERROR;
  6735. goto exit;
  6736. }
  6737. size = dhd_os_get_image_size(file);
  6738. if (!size) {
  6739. ret = BCME_ERROR;
  6740. goto exit;
  6741. }
  6742. /* seek to the last 'X' bytes in the file */
  6743. if (dhd_os_seek_file(file, size - FW_VER_STR_LEN) != BCME_OK) {
  6744. ret = BCME_ERROR;
  6745. goto exit;
  6746. }
  6747. /* read the last 'X' bytes of the file to a buffer */
  6748. memset(buf, 0, FW_VER_STR_LEN);
  6749. if (dhd_os_get_image_block(buf, FW_VER_STR_LEN - 1, file) < 0) {
  6750. ret = BCME_ERROR;
  6751. goto exit;
  6752. }
  6753. /* search for 'Version' in the buffer */
  6754. str = bcmstrnstr(buf, FW_VER_STR_LEN, FW_VER_STR, strlen(FW_VER_STR));
  6755. if (!str) {
  6756. ret = BCME_ERROR;
  6757. goto exit;
  6758. }
  6759. /* go back in the buffer to the last ascii character */
  6760. while (str != buf &&
  6761. (*str >= ' ' && *str <= '~')) {
  6762. --str;
  6763. }
  6764. /* reverse the final decrement, so that str is pointing
  6765. * to the first ascii character in the buffer
  6766. */
  6767. ++str;
  6768. if (strlen(str) > (FW_VER_STR_LEN - 1)) {
  6769. ret = BCME_BADLEN;
  6770. goto exit;
  6771. }
  6772. DHD_ERROR(("FW version in file '%s': %s\n", fwpath, str));
  6773. /* copy to global variable, so that in case FW load fails, the
  6774. * core capture logs will contain FW version read from the file
  6775. */
  6776. memset(fw_version, 0, FW_VER_STR_LEN);
  6777. strlcpy(fw_version, str, FW_VER_STR_LEN);
  6778. exit:
  6779. if (file)
  6780. dhd_os_close_image1(dhdp, file);
  6781. return ret;
  6782. }
  6783. #endif // endif
  6784. #ifdef WL_CFGVENDOR_SEND_HANG_EVENT
  6785. /* Ignore compiler warnings due to -Werror=cast-qual */
  6786. #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
  6787. #pragma GCC diagnostic push
  6788. #pragma GCC diagnostic ignored "-Wcast-qual"
  6789. #endif // endif
  6790. static void
  6791. copy_hang_info_ioctl_timeout(dhd_pub_t *dhd, int ifidx, wl_ioctl_t *ioc)
  6792. {
  6793. int remain_len;
  6794. int i;
  6795. int *cnt;
  6796. char *dest;
  6797. int bytes_written;
  6798. uint32 ioc_dwlen = 0;
  6799. if (!dhd || !dhd->hang_info) {
  6800. DHD_ERROR(("%s dhd=%p hang_info=%p\n",
  6801. __FUNCTION__, dhd, (dhd ? dhd->hang_info : NULL)));
  6802. return;
  6803. }
  6804. cnt = &dhd->hang_info_cnt;
  6805. dest = dhd->hang_info;
  6806. memset(dest, 0, VENDOR_SEND_HANG_EXT_INFO_LEN);
  6807. (*cnt) = 0;
  6808. bytes_written = 0;
  6809. remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - bytes_written;
  6810. get_debug_dump_time(dhd->debug_dump_time_hang_str);
  6811. copy_debug_dump_time(dhd->debug_dump_time_str, dhd->debug_dump_time_hang_str);
  6812. bytes_written += scnprintf(&dest[bytes_written], remain_len, "%d %d %s %d %d %d %d %d %d ",
  6813. HANG_REASON_IOCTL_RESP_TIMEOUT, VENDOR_SEND_HANG_EXT_INFO_VER,
  6814. dhd->debug_dump_time_hang_str,
  6815. ifidx, ioc->cmd, ioc->len, ioc->set, ioc->used, ioc->needed);
  6816. (*cnt) = HANG_FIELD_IOCTL_RESP_TIMEOUT_CNT;
  6817. clear_debug_dump_time(dhd->debug_dump_time_hang_str);
  6818. /* Access ioc->buf only if the ioc->len is more than 4 bytes */
  6819. ioc_dwlen = (uint32)(ioc->len / sizeof(uint32));
  6820. if (ioc_dwlen > 0) {
  6821. const uint32 *ioc_buf = (const uint32 *)ioc->buf;
  6822. remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - bytes_written;
  6823. bytes_written += scnprintf(&dest[bytes_written], remain_len,
  6824. "%08x", *(uint32 *)(ioc_buf++));
  6825. (*cnt)++;
  6826. if ((*cnt) >= HANG_FIELD_CNT_MAX) {
  6827. return;
  6828. }
  6829. for (i = 1; i < ioc_dwlen && *cnt <= HANG_FIELD_CNT_MAX;
  6830. i++, (*cnt)++) {
  6831. remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - bytes_written;
  6832. bytes_written += scnprintf(&dest[bytes_written], remain_len, "%c%08x",
  6833. HANG_RAW_DEL, *(uint32 *)(ioc_buf++));
  6834. }
  6835. }
  6836. DHD_INFO(("%s hang info len: %d data: %s\n",
  6837. __FUNCTION__, (int)strlen(dhd->hang_info), dhd->hang_info));
  6838. }
  6839. #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
  6840. #pragma GCC diagnostic pop
  6841. #endif // endif
  6842. #endif /* WL_CFGVENDOR_SEND_HANG_EVENT */
  6843. #if defined(DHD_H2D_LOG_TIME_SYNC)
  6844. /*
  6845. * Helper function:
  6846. * Used for Dongle console message time syncing with Host printk
  6847. */
  6848. void dhd_h2d_log_time_sync(dhd_pub_t *dhd)
  6849. {
  6850. uint64 ts;
  6851. /*
  6852. * local_clock() returns time in nano seconds.
  6853. * Dongle understand only milli seconds time.
  6854. */
  6855. ts = local_clock();
  6856. /* Nano seconds to milli seconds */
  6857. do_div(ts, 1000000);
  6858. if (dhd_wl_ioctl_set_intiovar(dhd, "rte_timesync", ts, WLC_SET_VAR, TRUE, 0)) {
  6859. DHD_ERROR(("%s rte_timesync **** FAILED ****\n", __FUNCTION__));
  6860. /* Stopping HOST Dongle console time syncing */
  6861. dhd->dhd_rte_time_sync_ms = 0;
  6862. }
  6863. }
  6864. #endif /* DHD_H2D_LOG_TIME_SYNC */
  6865. #if defined(DISABLE_HE_ENAB) || defined(CUSTOM_CONTROL_HE_ENAB)
  6866. int
  6867. dhd_control_he_enab(dhd_pub_t * dhd, uint8 he_enab)
  6868. {
  6869. int ret = BCME_OK;
  6870. bcm_xtlv_t *pxtlv = NULL;
  6871. uint8 mybuf[DHD_IOVAR_BUF_SIZE];
  6872. uint16 mybuf_len = sizeof(mybuf);
  6873. pxtlv = (bcm_xtlv_t *)mybuf;
  6874. ret = bcm_pack_xtlv_entry((uint8**)&pxtlv, &mybuf_len, WL_HE_CMD_ENAB, sizeof(he_enab),
  6875. &he_enab, BCM_XTLV_OPTION_ALIGN32);
  6876. if (ret != BCME_OK) {
  6877. ret = -EINVAL;
  6878. DHD_ERROR(("%s failed to pack he enab, err: %s\n", __FUNCTION__, bcmerrorstr(ret)));
  6879. return ret;
  6880. }
  6881. ret = dhd_iovar(dhd, 0, "he", (char *)&mybuf, sizeof(mybuf), NULL, 0, TRUE);
  6882. if (ret < 0) {
  6883. DHD_ERROR(("%s he_enab (%d) set failed, err: %s\n",
  6884. __FUNCTION__, he_enab, bcmerrorstr(ret)));
  6885. } else {
  6886. DHD_ERROR(("%s he_enab (%d) set successed\n", __FUNCTION__, he_enab));
  6887. }
  6888. return ret;
  6889. }
  6890. #endif /* DISABLE_HE_ENAB || CUSTOM_CONTROL_HE_ENAB */