dhd_msgbuf.c 368 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447444844494450445144524453445444554456445744584459446044614462446344644465446644674468446944704471447244734474447544764477447844794480448144824483448444854486448744884489449044914492449344944495449644974498449945004501450245034504450545064507450845094510451145124513451445154516451745184519452045214522452345244525452645274528452945304531453245334534453545364537453845394540454145424543454445454546454745484549455045514552455345544555455645574558455945604561456245634564456545664567456845694570457145724573457445754576457745784579458045814582458345844585458645874588458945904591459245934594459545964597459845994600460146024603460446054606460746084609461046114612461346144615461646174618461946204621462246234624462546264627462846294630463146324633463446354636463746384639464046414642464346444645464646474648464946504651465246534654465546564657465846594660466146624663466446654666466746684669467046714672467346744675467646774678467946804681468246834684468546864687468846894690469146924693469446954696469746984699470047014702470347044705470647074708470947104711471247134714471547164717471847194720472147224723472447254726472747284729473047314732473347344735473647374738473947404741474247434744474547464747474847494750475147524753475447554756475747584759476047614762476347644765476647674768476947704771477247734774477547764777477847794780478147824783478447854786478747884789479047914792479347944795479647974798479948004801480248034804480548064807480848094810481148124813481448154816481748184819482048214822482348244825482648274828482948304831483248334834483548364837483848394840484148424843484448454846484748484849485048514852485348544855485648574858485948604861486248634864486548664867486848694870487148724873487448754876487748784879488048814882488348844885488648874888488948904891489248934894489548964897489848994900490149024903490449054906490749084909491049114912491349144915491649174918491949204921492249234924492549264927492849294930493149324933493449354936493749384939494049414942494349444945494649474948494949504951495249534954495549564957495849594960496149624963496449654966496749684969497049714972497349744975497649774978497949804981498249834984498549864987498849894990499149924993499449954996499749984999500050015002500350045005500650075008500950105011501250135014501550165017501850195020502150225023502450255026502750285029503050315032503350345035503650375038503950405041504250435044504550465047504850495050505150525053505450555056505750585059506050615062506350645065506650675068506950705071507250735074507550765077507850795080508150825083508450855086508750885089509050915092509350945095509650975098509951005101510251035104510551065107510851095110511151125113511451155116511751185119512051215122512351245125512651275128512951305131513251335134513551365137513851395140514151425143514451455146514751485149515051515152515351545155515651575158515951605161516251635164516551665167516851695170517151725173517451755176517751785179518051815182518351845185518651875188518951905191519251935194519551965197519851995200520152025203520452055206520752085209521052115212521352145215521652175218521952205221522252235224522552265227522852295230523152325233523452355236523752385239524052415242524352445245524652475248524952505251525252535254525552565257525852595260526152625263526452655266526752685269527052715272527352745275527652775278527952805281528252835284528552865287528852895290529152925293529452955296529752985299530053015302530353045305530653075308530953105311531253135314531553165317531853195320532153225323532453255326532753285329533053315332533353345335533653375338533953405341534253435344534553465347534853495350535153525353535453555356535753585359536053615362536353645365536653675368536953705371537253735374537553765377537853795380538153825383538453855386538753885389539053915392539353945395539653975398539954005401540254035404540554065407540854095410541154125413541454155416541754185419542054215422542354245425542654275428542954305431543254335434543554365437543854395440544154425443544454455446544754485449545054515452545354545455545654575458545954605461546254635464546554665467546854695470547154725473547454755476547754785479548054815482548354845485548654875488548954905491549254935494549554965497549854995500550155025503550455055506550755085509551055115512551355145515551655175518551955205521552255235524552555265527552855295530553155325533553455355536553755385539554055415542554355445545554655475548554955505551555255535554555555565557555855595560556155625563556455655566556755685569557055715572557355745575557655775578557955805581558255835584558555865587558855895590559155925593559455955596559755985599560056015602560356045605560656075608560956105611561256135614561556165617561856195620562156225623562456255626562756285629563056315632563356345635563656375638563956405641564256435644564556465647564856495650565156525653565456555656565756585659566056615662566356645665566656675668566956705671567256735674567556765677567856795680568156825683568456855686568756885689569056915692569356945695569656975698569957005701570257035704570557065707570857095710571157125713571457155716571757185719572057215722572357245725572657275728572957305731573257335734573557365737573857395740574157425743574457455746574757485749575057515752575357545755575657575758575957605761576257635764576557665767576857695770577157725773577457755776577757785779578057815782578357845785578657875788578957905791579257935794579557965797579857995800580158025803580458055806580758085809581058115812581358145815581658175818581958205821582258235824582558265827582858295830583158325833583458355836583758385839584058415842584358445845584658475848584958505851585258535854585558565857585858595860586158625863586458655866586758685869587058715872587358745875587658775878587958805881588258835884588558865887588858895890589158925893589458955896589758985899590059015902590359045905590659075908590959105911591259135914591559165917591859195920592159225923592459255926592759285929593059315932593359345935593659375938593959405941594259435944594559465947594859495950595159525953595459555956595759585959596059615962596359645965596659675968596959705971597259735974597559765977597859795980598159825983598459855986598759885989599059915992599359945995599659975998599960006001600260036004600560066007600860096010601160126013601460156016601760186019602060216022602360246025602660276028602960306031603260336034603560366037603860396040604160426043604460456046604760486049605060516052605360546055605660576058605960606061606260636064606560666067606860696070607160726073607460756076607760786079608060816082608360846085608660876088608960906091609260936094609560966097609860996100610161026103610461056106610761086109611061116112611361146115611661176118611961206121612261236124612561266127612861296130613161326133613461356136613761386139614061416142614361446145614661476148614961506151615261536154615561566157615861596160616161626163616461656166616761686169617061716172617361746175617661776178617961806181618261836184618561866187618861896190619161926193619461956196619761986199620062016202620362046205620662076208620962106211621262136214621562166217621862196220622162226223622462256226622762286229623062316232623362346235623662376238623962406241624262436244624562466247624862496250625162526253625462556256625762586259626062616262626362646265626662676268626962706271627262736274627562766277627862796280628162826283628462856286628762886289629062916292629362946295629662976298629963006301630263036304630563066307630863096310631163126313631463156316631763186319632063216322632363246325632663276328632963306331633263336334633563366337633863396340634163426343634463456346634763486349635063516352635363546355635663576358635963606361636263636364636563666367636863696370637163726373637463756376637763786379638063816382638363846385638663876388638963906391639263936394639563966397639863996400640164026403640464056406640764086409641064116412641364146415641664176418641964206421642264236424642564266427642864296430643164326433643464356436643764386439644064416442644364446445644664476448644964506451645264536454645564566457645864596460646164626463646464656466646764686469647064716472647364746475647664776478647964806481648264836484648564866487648864896490649164926493649464956496649764986499650065016502650365046505650665076508650965106511651265136514651565166517651865196520652165226523652465256526652765286529653065316532653365346535653665376538653965406541654265436544654565466547654865496550655165526553655465556556655765586559656065616562656365646565656665676568656965706571657265736574657565766577657865796580658165826583658465856586658765886589659065916592659365946595659665976598659966006601660266036604660566066607660866096610661166126613661466156616661766186619662066216622662366246625662666276628662966306631663266336634663566366637663866396640664166426643664466456646664766486649665066516652665366546655665666576658665966606661666266636664666566666667666866696670667166726673667466756676667766786679668066816682668366846685668666876688668966906691669266936694669566966697669866996700670167026703670467056706670767086709671067116712671367146715671667176718671967206721672267236724672567266727672867296730673167326733673467356736673767386739674067416742674367446745674667476748674967506751675267536754675567566757675867596760676167626763676467656766676767686769677067716772677367746775677667776778677967806781678267836784678567866787678867896790679167926793679467956796679767986799680068016802680368046805680668076808680968106811681268136814681568166817681868196820682168226823682468256826682768286829683068316832683368346835683668376838683968406841684268436844684568466847684868496850685168526853685468556856685768586859686068616862686368646865686668676868686968706871687268736874687568766877687868796880688168826883688468856886688768886889689068916892689368946895689668976898689969006901690269036904690569066907690869096910691169126913691469156916691769186919692069216922692369246925692669276928692969306931693269336934693569366937693869396940694169426943694469456946694769486949695069516952695369546955695669576958695969606961696269636964696569666967696869696970697169726973697469756976697769786979698069816982698369846985698669876988698969906991699269936994699569966997699869997000700170027003700470057006700770087009701070117012701370147015701670177018701970207021702270237024702570267027702870297030703170327033703470357036703770387039704070417042704370447045704670477048704970507051705270537054705570567057705870597060706170627063706470657066706770687069707070717072707370747075707670777078707970807081708270837084708570867087708870897090709170927093709470957096709770987099710071017102710371047105710671077108710971107111711271137114711571167117711871197120712171227123712471257126712771287129713071317132713371347135713671377138713971407141714271437144714571467147714871497150715171527153715471557156715771587159716071617162716371647165716671677168716971707171717271737174717571767177717871797180718171827183718471857186718771887189719071917192719371947195719671977198719972007201720272037204720572067207720872097210721172127213721472157216721772187219722072217222722372247225722672277228722972307231723272337234723572367237723872397240724172427243724472457246724772487249725072517252725372547255725672577258725972607261726272637264726572667267726872697270727172727273727472757276727772787279728072817282728372847285728672877288728972907291729272937294729572967297729872997300730173027303730473057306730773087309731073117312731373147315731673177318731973207321732273237324732573267327732873297330733173327333733473357336733773387339734073417342734373447345734673477348734973507351735273537354735573567357735873597360736173627363736473657366736773687369737073717372737373747375737673777378737973807381738273837384738573867387738873897390739173927393739473957396739773987399740074017402740374047405740674077408740974107411741274137414741574167417741874197420742174227423742474257426742774287429743074317432743374347435743674377438743974407441744274437444744574467447744874497450745174527453745474557456745774587459746074617462746374647465746674677468746974707471747274737474747574767477747874797480748174827483748474857486748774887489749074917492749374947495749674977498749975007501750275037504750575067507750875097510751175127513751475157516751775187519752075217522752375247525752675277528752975307531753275337534753575367537753875397540754175427543754475457546754775487549755075517552755375547555755675577558755975607561756275637564756575667567756875697570757175727573757475757576757775787579758075817582758375847585758675877588758975907591759275937594759575967597759875997600760176027603760476057606760776087609761076117612761376147615761676177618761976207621762276237624762576267627762876297630763176327633763476357636763776387639764076417642764376447645764676477648764976507651765276537654765576567657765876597660766176627663766476657666766776687669767076717672767376747675767676777678767976807681768276837684768576867687768876897690769176927693769476957696769776987699770077017702770377047705770677077708770977107711771277137714771577167717771877197720772177227723772477257726772777287729773077317732773377347735773677377738773977407741774277437744774577467747774877497750775177527753775477557756775777587759776077617762776377647765776677677768776977707771777277737774777577767777777877797780778177827783778477857786778777887789779077917792779377947795779677977798779978007801780278037804780578067807780878097810781178127813781478157816781778187819782078217822782378247825782678277828782978307831783278337834783578367837783878397840784178427843784478457846784778487849785078517852785378547855785678577858785978607861786278637864786578667867786878697870787178727873787478757876787778787879788078817882788378847885788678877888788978907891789278937894789578967897789878997900790179027903790479057906790779087909791079117912791379147915791679177918791979207921792279237924792579267927792879297930793179327933793479357936793779387939794079417942794379447945794679477948794979507951795279537954795579567957795879597960796179627963796479657966796779687969797079717972797379747975797679777978797979807981798279837984798579867987798879897990799179927993799479957996799779987999800080018002800380048005800680078008800980108011801280138014801580168017801880198020802180228023802480258026802780288029803080318032803380348035803680378038803980408041804280438044804580468047804880498050805180528053805480558056805780588059806080618062806380648065806680678068806980708071807280738074807580768077807880798080808180828083808480858086808780888089809080918092809380948095809680978098809981008101810281038104810581068107810881098110811181128113811481158116811781188119812081218122812381248125812681278128812981308131813281338134813581368137813881398140814181428143814481458146814781488149815081518152815381548155815681578158815981608161816281638164816581668167816881698170817181728173817481758176817781788179818081818182818381848185818681878188818981908191819281938194819581968197819881998200820182028203820482058206820782088209821082118212821382148215821682178218821982208221822282238224822582268227822882298230823182328233823482358236823782388239824082418242824382448245824682478248824982508251825282538254825582568257825882598260826182628263826482658266826782688269827082718272827382748275827682778278827982808281828282838284828582868287828882898290829182928293829482958296829782988299830083018302830383048305830683078308830983108311831283138314831583168317831883198320832183228323832483258326832783288329833083318332833383348335833683378338833983408341834283438344834583468347834883498350835183528353835483558356835783588359836083618362836383648365836683678368836983708371837283738374837583768377837883798380838183828383838483858386838783888389839083918392839383948395839683978398839984008401840284038404840584068407840884098410841184128413841484158416841784188419842084218422842384248425842684278428842984308431843284338434843584368437843884398440844184428443844484458446844784488449845084518452845384548455845684578458845984608461846284638464846584668467846884698470847184728473847484758476847784788479848084818482848384848485848684878488848984908491849284938494849584968497849884998500850185028503850485058506850785088509851085118512851385148515851685178518851985208521852285238524852585268527852885298530853185328533853485358536853785388539854085418542854385448545854685478548854985508551855285538554855585568557855885598560856185628563856485658566856785688569857085718572857385748575857685778578857985808581858285838584858585868587858885898590859185928593859485958596859785988599860086018602860386048605860686078608860986108611861286138614861586168617861886198620862186228623862486258626862786288629863086318632863386348635863686378638863986408641864286438644864586468647864886498650865186528653865486558656865786588659866086618662866386648665866686678668866986708671867286738674867586768677867886798680868186828683868486858686868786888689869086918692869386948695869686978698869987008701870287038704870587068707870887098710871187128713871487158716871787188719872087218722872387248725872687278728872987308731873287338734873587368737873887398740874187428743874487458746874787488749875087518752875387548755875687578758875987608761876287638764876587668767876887698770877187728773877487758776877787788779878087818782878387848785878687878788878987908791879287938794879587968797879887998800880188028803880488058806880788088809881088118812881388148815881688178818881988208821882288238824882588268827882888298830883188328833883488358836883788388839884088418842884388448845884688478848884988508851885288538854885588568857885888598860886188628863886488658866886788688869887088718872887388748875887688778878887988808881888288838884888588868887888888898890889188928893889488958896889788988899890089018902890389048905890689078908890989108911891289138914891589168917891889198920892189228923892489258926892789288929893089318932893389348935893689378938893989408941894289438944894589468947894889498950895189528953895489558956895789588959896089618962896389648965896689678968896989708971897289738974897589768977897889798980898189828983898489858986898789888989899089918992899389948995899689978998899990009001900290039004900590069007900890099010901190129013901490159016901790189019902090219022902390249025902690279028902990309031903290339034903590369037903890399040904190429043904490459046904790489049905090519052905390549055905690579058905990609061906290639064906590669067906890699070907190729073907490759076907790789079908090819082908390849085908690879088908990909091909290939094909590969097909890999100910191029103910491059106910791089109911091119112911391149115911691179118911991209121912291239124912591269127912891299130913191329133913491359136913791389139914091419142914391449145914691479148914991509151915291539154915591569157915891599160916191629163916491659166916791689169917091719172917391749175917691779178917991809181918291839184918591869187918891899190919191929193919491959196919791989199920092019202920392049205920692079208920992109211921292139214921592169217921892199220922192229223922492259226922792289229923092319232923392349235923692379238923992409241924292439244924592469247924892499250925192529253925492559256925792589259926092619262926392649265926692679268926992709271927292739274927592769277927892799280928192829283928492859286928792889289929092919292929392949295929692979298929993009301930293039304930593069307930893099310931193129313931493159316931793189319932093219322932393249325932693279328932993309331933293339334933593369337933893399340934193429343934493459346934793489349935093519352935393549355935693579358935993609361936293639364936593669367936893699370937193729373937493759376937793789379938093819382938393849385938693879388938993909391939293939394939593969397939893999400940194029403940494059406940794089409941094119412941394149415941694179418941994209421942294239424942594269427942894299430943194329433943494359436943794389439944094419442944394449445944694479448944994509451945294539454945594569457945894599460946194629463946494659466946794689469947094719472947394749475947694779478947994809481948294839484948594869487948894899490949194929493949494959496949794989499950095019502950395049505950695079508950995109511951295139514951595169517951895199520952195229523952495259526952795289529953095319532953395349535953695379538953995409541954295439544954595469547954895499550955195529553955495559556955795589559956095619562956395649565956695679568956995709571957295739574957595769577957895799580958195829583958495859586958795889589959095919592959395949595959695979598959996009601960296039604960596069607960896099610961196129613961496159616961796189619962096219622962396249625962696279628962996309631963296339634963596369637963896399640964196429643964496459646964796489649965096519652965396549655965696579658965996609661966296639664966596669667966896699670967196729673967496759676967796789679968096819682968396849685968696879688968996909691969296939694969596969697969896999700970197029703970497059706970797089709971097119712971397149715971697179718971997209721972297239724972597269727972897299730973197329733973497359736973797389739974097419742974397449745974697479748974997509751975297539754975597569757975897599760976197629763976497659766976797689769977097719772977397749775977697779778977997809781978297839784978597869787978897899790979197929793979497959796979797989799980098019802980398049805980698079808980998109811981298139814981598169817981898199820982198229823982498259826982798289829983098319832983398349835983698379838983998409841984298439844984598469847984898499850985198529853985498559856985798589859986098619862986398649865986698679868986998709871987298739874987598769877987898799880988198829883988498859886988798889889989098919892989398949895989698979898989999009901990299039904990599069907990899099910991199129913991499159916991799189919992099219922992399249925992699279928992999309931993299339934993599369937993899399940994199429943994499459946994799489949995099519952995399549955995699579958995999609961996299639964996599669967996899699970997199729973997499759976997799789979998099819982998399849985998699879988998999909991999299939994999599969997999899991000010001100021000310004100051000610007100081000910010100111001210013100141001510016100171001810019100201002110022100231002410025100261002710028100291003010031100321003310034100351003610037100381003910040100411004210043100441004510046100471004810049100501005110052100531005410055100561005710058100591006010061100621006310064100651006610067100681006910070100711007210073100741007510076100771007810079100801008110082100831008410085100861008710088100891009010091100921009310094100951009610097100981009910100101011010210103101041010510106101071010810109101101011110112101131011410115101161011710118101191012010121101221012310124101251012610127101281012910130101311013210133101341013510136101371013810139101401014110142101431014410145101461014710148101491015010151101521015310154101551015610157101581015910160101611016210163101641016510166101671016810169101701017110172101731017410175101761017710178101791018010181101821018310184101851018610187101881018910190101911019210193101941019510196101971019810199102001020110202102031020410205102061020710208102091021010211102121021310214102151021610217102181021910220102211022210223102241022510226102271022810229102301023110232102331023410235102361023710238102391024010241102421024310244102451024610247102481024910250102511025210253102541025510256102571025810259102601026110262102631026410265102661026710268102691027010271102721027310274102751027610277102781027910280102811028210283102841028510286102871028810289102901029110292102931029410295102961029710298102991030010301103021030310304103051030610307103081030910310103111031210313103141031510316103171031810319103201032110322103231032410325103261032710328103291033010331103321033310334103351033610337103381033910340103411034210343103441034510346103471034810349103501035110352103531035410355103561035710358103591036010361103621036310364103651036610367103681036910370103711037210373103741037510376103771037810379103801038110382103831038410385103861038710388103891039010391103921039310394103951039610397103981039910400104011040210403104041040510406104071040810409104101041110412104131041410415104161041710418104191042010421104221042310424104251042610427104281042910430104311043210433104341043510436104371043810439104401044110442104431044410445104461044710448104491045010451104521045310454104551045610457104581045910460104611046210463104641046510466104671046810469104701047110472104731047410475104761047710478104791048010481104821048310484104851048610487104881048910490104911049210493104941049510496104971049810499105001050110502105031050410505105061050710508105091051010511105121051310514105151051610517105181051910520105211052210523105241052510526105271052810529105301053110532105331053410535105361053710538105391054010541105421054310544105451054610547105481054910550105511055210553105541055510556105571055810559105601056110562105631056410565105661056710568105691057010571105721057310574105751057610577105781057910580105811058210583105841058510586105871058810589105901059110592105931059410595105961059710598105991060010601106021060310604106051060610607106081060910610106111061210613106141061510616106171061810619106201062110622106231062410625106261062710628106291063010631106321063310634106351063610637106381063910640106411064210643106441064510646106471064810649106501065110652106531065410655106561065710658106591066010661106621066310664106651066610667106681066910670106711067210673106741067510676106771067810679106801068110682106831068410685106861068710688106891069010691106921069310694106951069610697106981069910700107011070210703107041070510706107071070810709107101071110712107131071410715107161071710718107191072010721107221072310724107251072610727107281072910730107311073210733107341073510736107371073810739107401074110742107431074410745107461074710748107491075010751107521075310754107551075610757107581075910760107611076210763107641076510766107671076810769107701077110772107731077410775107761077710778107791078010781107821078310784107851078610787107881078910790107911079210793107941079510796107971079810799108001080110802108031080410805108061080710808108091081010811108121081310814108151081610817108181081910820108211082210823108241082510826108271082810829108301083110832108331083410835108361083710838108391084010841108421084310844108451084610847108481084910850108511085210853108541085510856108571085810859108601086110862108631086410865108661086710868108691087010871108721087310874108751087610877108781087910880108811088210883108841088510886108871088810889108901089110892108931089410895108961089710898108991090010901109021090310904109051090610907109081090910910109111091210913109141091510916109171091810919109201092110922109231092410925109261092710928109291093010931109321093310934109351093610937109381093910940109411094210943109441094510946109471094810949109501095110952109531095410955109561095710958109591096010961109621096310964109651096610967109681096910970109711097210973109741097510976109771097810979109801098110982109831098410985109861098710988109891099010991109921099310994109951099610997109981099911000110011100211003110041100511006110071100811009110101101111012110131101411015110161101711018110191102011021110221102311024110251102611027110281102911030110311103211033110341103511036110371103811039110401104111042110431104411045110461104711048110491105011051110521105311054110551105611057110581105911060110611106211063110641106511066110671106811069110701107111072110731107411075110761107711078110791108011081110821108311084110851108611087110881108911090110911109211093110941109511096110971109811099111001110111102111031110411105111061110711108111091111011111111121111311114111151111611117111181111911120111211112211123111241112511126111271112811129111301113111132111331113411135111361113711138111391114011141111421114311144111451114611147111481114911150111511115211153111541115511156111571115811159111601116111162111631116411165111661116711168111691117011171111721117311174111751117611177111781117911180111811118211183111841118511186111871118811189111901119111192111931119411195111961119711198111991120011201112021120311204112051120611207112081120911210112111121211213112141121511216112171121811219112201122111222112231122411225112261122711228112291123011231112321123311234112351123611237112381123911240112411124211243112441124511246112471124811249112501125111252112531125411255112561125711258112591126011261112621126311264112651126611267112681126911270112711127211273112741127511276112771127811279112801128111282112831128411285112861128711288112891129011291112921129311294112951129611297112981129911300113011130211303113041130511306113071130811309113101131111312113131131411315113161131711318113191132011321113221132311324113251132611327113281132911330113311133211333113341133511336113371133811339113401134111342113431134411345113461134711348113491135011351113521135311354113551135611357113581135911360113611136211363113641136511366113671136811369113701137111372113731137411375113761137711378113791138011381113821138311384113851138611387113881138911390113911139211393113941139511396113971139811399114001140111402114031140411405114061140711408114091141011411114121141311414114151141611417114181141911420114211142211423114241142511426114271142811429114301143111432114331143411435114361143711438114391144011441114421144311444114451144611447114481144911450114511145211453114541145511456114571145811459114601146111462114631146411465114661146711468114691147011471114721147311474114751147611477114781147911480114811148211483114841148511486114871148811489114901149111492114931149411495114961149711498114991150011501115021150311504115051150611507115081150911510115111151211513115141151511516115171151811519115201152111522115231152411525115261152711528115291153011531115321153311534115351153611537115381153911540115411154211543115441154511546115471154811549115501155111552115531155411555115561155711558115591156011561115621156311564115651156611567115681156911570115711157211573115741157511576115771157811579115801158111582115831158411585115861158711588115891159011591115921159311594115951159611597115981159911600116011160211603116041160511606116071160811609116101161111612116131161411615116161161711618116191162011621116221162311624116251162611627116281162911630116311163211633116341163511636116371163811639116401164111642116431164411645116461164711648116491165011651116521165311654116551165611657116581165911660116611166211663116641166511666116671166811669116701167111672116731167411675116761167711678116791168011681116821168311684116851168611687116881168911690116911169211693116941169511696116971169811699117001170111702117031170411705117061170711708117091171011711117121171311714117151171611717
  1. /**
  2. * @file definition of host message ring functionality
  3. * Provides type definitions and function prototypes used to link the
  4. * DHD OS, bus, and protocol modules.
  5. *
  6. * Portions of this code are copyright (c) 2020 Cypress Semiconductor Corporation
  7. *
  8. * Copyright (C) 1999-2020, Broadcom Corporation
  9. *
  10. * Unless you and Broadcom execute a separate written software license
  11. * agreement governing use of this software, this software is licensed to you
  12. * under the terms of the GNU General Public License version 2 (the "GPL"),
  13. * available at http://www.broadcom.com/licenses/GPLv2.php, with the
  14. * following added to such license:
  15. *
  16. * As a special exception, the copyright holders of this software give you
  17. * permission to link this software with independent modules, and to copy and
  18. * distribute the resulting executable under terms of your choice, provided that
  19. * you also meet, for each linked independent module, the terms and conditions of
  20. * the license of that module. An independent module is a module which is not
  21. * derived from this software. The special exception does not apply to any
  22. * modifications of the software.
  23. *
  24. * Notwithstanding the above, under no circumstances may you combine this
  25. * software in any way with any other Broadcom software provided under a license
  26. * other than the GPL, without Broadcom's express prior written consent.
  27. *
  28. *
  29. * <<Broadcom-WL-IPTag/Open:>>
  30. *
  31. * $Id: dhd_msgbuf.c 701962 2017-05-30 06:13:15Z $
  32. */
  33. #include <typedefs.h>
  34. #include <osl.h>
  35. #include <bcmutils.h>
  36. #include <bcmmsgbuf.h>
  37. #include <bcmendian.h>
  38. #include <bcmstdlib_s.h>
  39. #include <dngl_stats.h>
  40. #include <dhd.h>
  41. #include <dhd_proto.h>
  42. #include <dhd_bus.h>
  43. #include <dhd_dbg.h>
  44. #include <siutils.h>
  45. #include <dhd_debug.h>
  46. #include <dhd_flowring.h>
  47. #include <pcie_core.h>
  48. #include <bcmpcie.h>
  49. #include <dhd_pcie.h>
  50. #if defined(DHD_LB)
  51. #include <linux/cpu.h>
  52. #include <bcm_ring.h>
  53. #define DHD_LB_WORKQ_SZ (8192)
  54. #define DHD_LB_WORKQ_SYNC (16)
  55. #define DHD_LB_WORK_SCHED (DHD_LB_WORKQ_SYNC * 2)
  56. #endif /* DHD_LB */
  57. #include <etd.h>
  58. #include <hnd_debug.h>
  59. #include <bcmtlv.h>
  60. #include <hnd_armtrap.h>
  61. #include <dnglevent.h>
  62. #ifdef DHD_PKT_LOGGING
  63. #include <dhd_pktlog.h>
  64. #include <dhd_linux_pktdump.h>
  65. #endif /* DHD_PKT_LOGGING */
  66. #ifdef DHD_EWPR_VER2
  67. #include <dhd_bitpack.h>
  68. #endif /* DHD_EWPR_VER2 */
  69. extern char dhd_version[];
  70. extern char fw_version[];
  71. /**
  72. * Host configures a soft doorbell for d2h rings, by specifying a 32bit host
  73. * address where a value must be written. Host may also interrupt coalescing
  74. * on this soft doorbell.
  75. * Use Case: Hosts with network processors, may register with the dongle the
  76. * network processor's thread wakeup register and a value corresponding to the
  77. * core/thread context. Dongle will issue a write transaction <address,value>
  78. * to the PCIE RC which will need to be routed to the mapped register space, by
  79. * the host.
  80. */
  81. /* #define DHD_D2H_SOFT_DOORBELL_SUPPORT */
  82. /* Dependency Check */
  83. #if defined(IOCTLRESP_USE_CONSTMEM) && defined(DHD_USE_STATIC_CTRLBUF)
  84. #error "DHD_USE_STATIC_CTRLBUF is NOT working with DHD_USE_OSLPKT_FOR_RESPBUF"
  85. #endif /* IOCTLRESP_USE_CONSTMEM && DHD_USE_STATIC_CTRLBUF */
  86. #define RETRIES 2 /* # of retries to retrieve matching ioctl response */
  87. #define DEFAULT_RX_BUFFERS_TO_POST 256
  88. #define RXBUFPOST_THRESHOLD 32
  89. #define RX_BUF_BURST 32 /* Rx buffers for MSDU Data */
  90. #define DHD_STOP_QUEUE_THRESHOLD 200
  91. #define DHD_START_QUEUE_THRESHOLD 100
  92. #define RX_DMA_OFFSET 8 /* Mem2mem DMA inserts an extra 8 */
  93. #define IOCT_RETBUF_SIZE (RX_DMA_OFFSET + WLC_IOCTL_MAXLEN)
  94. /* flags for ioctl pending status */
  95. #define MSGBUF_IOCTL_ACK_PENDING (1<<0)
  96. #define MSGBUF_IOCTL_RESP_PENDING (1<<1)
  97. #define DHD_IOCTL_REQ_PKTBUFSZ 2048
  98. #define MSGBUF_IOCTL_MAX_RQSTLEN (DHD_IOCTL_REQ_PKTBUFSZ - H2DRING_CTRL_SUB_ITEMSIZE)
  99. #define DMA_ALIGN_LEN 4
  100. #define DMA_D2H_SCRATCH_BUF_LEN 8
  101. #define DMA_XFER_LEN_LIMIT 0x400000
  102. #ifdef BCM_HOST_BUF
  103. #ifndef DMA_HOST_BUFFER_LEN
  104. #define DMA_HOST_BUFFER_LEN 0x200000
  105. #endif // endif
  106. #endif /* BCM_HOST_BUF */
  107. #define DHD_FLOWRING_IOCTL_BUFPOST_PKTSZ 8192
  108. #define DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D 1
  109. #define DHD_FLOWRING_MAX_EVENTBUF_POST 32
  110. #define DHD_FLOWRING_MAX_IOCTLRESPBUF_POST 8
  111. #define DHD_H2D_INFORING_MAX_BUF_POST 32
  112. #define DHD_MAX_TSBUF_POST 8
  113. #define DHD_PROT_FUNCS 43
  114. /* Length of buffer in host for bus throughput measurement */
  115. #define DHD_BUS_TPUT_BUF_LEN 2048
  116. #define TXP_FLUSH_NITEMS
  117. /* optimization to write "n" tx items at a time to ring */
  118. #define TXP_FLUSH_MAX_ITEMS_FLUSH_CNT 48
  119. #define RING_NAME_MAX_LENGTH 24
  120. #define CTRLSUB_HOSTTS_MEESAGE_SIZE 1024
  121. /* Giving room before ioctl_trans_id rollsover. */
  122. #define BUFFER_BEFORE_ROLLOVER 300
  123. /* 512K memory + 32K registers */
  124. #define SNAPSHOT_UPLOAD_BUF_SIZE ((512 + 32) * 1024)
  125. struct msgbuf_ring; /* ring context for common and flow rings */
  126. /**
  127. * PCIE D2H DMA Complete Sync Modes
  128. *
  129. * Firmware may interrupt the host, prior to the D2H Mem2Mem DMA completes into
  130. * Host system memory. A WAR using one of 3 approaches is needed:
  131. * 1. Dongle places a modulo-253 seqnum in last word of each D2H message
  132. * 2. XOR Checksum, with epoch# in each work item. Dongle builds an XOR checksum
  133. * writes in the last word of each work item. Each work item has a seqnum
  134. * number = sequence num % 253.
  135. *
  136. * 3. Read Barrier: Dongle does a host memory read access prior to posting an
  137. * interrupt, ensuring that D2H data transfer indeed completed.
  138. * 4. Dongle DMA's all indices after producing items in the D2H ring, flushing
  139. * ring contents before the indices.
  140. *
  141. * Host does not sync for DMA to complete with option #3 or #4, and a noop sync
  142. * callback (see dhd_prot_d2h_sync_none) may be bound.
  143. *
  144. * Dongle advertizes host side sync mechanism requirements.
  145. */
  146. #define PCIE_D2H_SYNC_WAIT_TRIES (512U)
  147. #define PCIE_D2H_SYNC_NUM_OF_STEPS (5U)
  148. #define PCIE_D2H_SYNC_DELAY (100UL) /* in terms of usecs */
  149. #define HWA_DB_TYPE_RXPOST (0x0050)
  150. #define HWA_DB_TYPE_TXCPLT (0x0060)
  151. #define HWA_DB_TYPE_RXCPLT (0x0170)
  152. #define HWA_DB_INDEX_VALUE(val) ((uint32)(val) << 16)
  153. #define HWA_ENAB_BITMAP_RXPOST (1U << 0) /* 1A */
  154. #define HWA_ENAB_BITMAP_RXCPLT (1U << 1) /* 2B */
  155. #define HWA_ENAB_BITMAP_TXCPLT (1U << 2) /* 4B */
  156. /**
  157. * Custom callback attached based upon D2H DMA Sync mode advertized by dongle.
  158. *
  159. * On success: return cmn_msg_hdr_t::msg_type
  160. * On failure: return 0 (invalid msg_type)
  161. */
  162. typedef uint8 (* d2h_sync_cb_t)(dhd_pub_t *dhd, struct msgbuf_ring *ring,
  163. volatile cmn_msg_hdr_t *msg, int msglen);
  164. /**
  165. * Custom callback attached based upon D2H DMA Sync mode advertized by dongle.
  166. * For EDL messages.
  167. *
  168. * On success: return cmn_msg_hdr_t::msg_type
  169. * On failure: return 0 (invalid msg_type)
  170. */
  171. #ifdef EWP_EDL
  172. typedef int (* d2h_edl_sync_cb_t)(dhd_pub_t *dhd, struct msgbuf_ring *ring,
  173. volatile cmn_msg_hdr_t *msg);
  174. #endif /* EWP_EDL */
  175. /*
  176. * +----------------------------------------------------------------------------
  177. *
  178. * RingIds and FlowId are not equivalent as ringids include D2H rings whereas
  179. * flowids do not.
  180. *
  181. * Dongle advertizes the max H2D rings, as max_sub_queues = 'N' which includes
  182. * the H2D common rings as well as the (N-BCMPCIE_H2D_COMMON_MSGRINGS) flowrings
  183. *
  184. * Here is a sample mapping for (based on PCIE Full Dongle Rev5) where,
  185. * BCMPCIE_H2D_COMMON_MSGRINGS = 2, i.e. 2 H2D common rings,
  186. * BCMPCIE_COMMON_MSGRINGS = 5, i.e. include 3 D2H common rings.
  187. *
  188. * H2D Control Submit RingId = 0 FlowId = 0 reserved never allocated
  189. * H2D RxPost Submit RingId = 1 FlowId = 1 reserved never allocated
  190. *
  191. * D2H Control Complete RingId = 2
  192. * D2H Transmit Complete RingId = 3
  193. * D2H Receive Complete RingId = 4
  194. *
  195. * H2D TxPost FLOWRING RingId = 5 FlowId = 2 (1st flowring)
  196. * H2D TxPost FLOWRING RingId = 6 FlowId = 3 (2nd flowring)
  197. * H2D TxPost FLOWRING RingId = 5 + (N-1) FlowId = (N-1) (Nth flowring)
  198. *
  199. * When TxPost FlowId(s) are allocated, the FlowIds [0..FLOWID_RESERVED) are
  200. * unused, where FLOWID_RESERVED is BCMPCIE_H2D_COMMON_MSGRINGS.
  201. *
  202. * Example: when a system supports 4 bc/mc and 128 uc flowrings, with
  203. * BCMPCIE_H2D_COMMON_MSGRINGS = 2, and BCMPCIE_H2D_COMMON_MSGRINGS = 5, and the
  204. * FlowId values would be in the range [2..133] and the corresponding
  205. * RingId values would be in the range [5..136].
  206. *
  207. * The flowId allocator, may chose to, allocate Flowids:
  208. * bc/mc (per virtual interface) in one consecutive range [2..(2+VIFS))
  209. * X# of uc flowids in consecutive ranges (per station Id), where X is the
  210. * packet's access category (e.g. 4 uc flowids per station).
  211. *
  212. * CAUTION:
  213. * When DMA indices array feature is used, RingId=5, corresponding to the 0th
  214. * FLOWRING, will actually use the FlowId as index into the H2D DMA index,
  215. * since the FlowId truly represents the index in the H2D DMA indices array.
  216. *
  217. * Likewise, in the D2H direction, the RingId - BCMPCIE_H2D_COMMON_MSGRINGS,
  218. * will represent the index in the D2H DMA indices array.
  219. *
  220. * +----------------------------------------------------------------------------
  221. */
  222. /* First TxPost Flowring Id */
  223. #define DHD_FLOWRING_START_FLOWID BCMPCIE_H2D_COMMON_MSGRINGS
  224. /* Determine whether a ringid belongs to a TxPost flowring */
  225. #define DHD_IS_FLOWRING(ringid, max_flow_rings) \
  226. ((ringid) >= BCMPCIE_COMMON_MSGRINGS && \
  227. (ringid) < ((max_flow_rings) + BCMPCIE_COMMON_MSGRINGS))
  228. /* Convert a H2D TxPost FlowId to a MsgBuf RingId */
  229. #define DHD_FLOWID_TO_RINGID(flowid) \
  230. (BCMPCIE_COMMON_MSGRINGS + ((flowid) - BCMPCIE_H2D_COMMON_MSGRINGS))
  231. /* Convert a MsgBuf RingId to a H2D TxPost FlowId */
  232. #define DHD_RINGID_TO_FLOWID(ringid) \
  233. (BCMPCIE_H2D_COMMON_MSGRINGS + ((ringid) - BCMPCIE_COMMON_MSGRINGS))
  234. /* Convert a H2D MsgBuf RingId to an offset index into the H2D DMA indices array
  235. * This may be used for the H2D DMA WR index array or H2D DMA RD index array or
  236. * any array of H2D rings.
  237. */
  238. #define DHD_H2D_RING_OFFSET(ringid) \
  239. (((ringid) >= BCMPCIE_COMMON_MSGRINGS) ? DHD_RINGID_TO_FLOWID(ringid) : (ringid))
  240. /* Convert a H2D MsgBuf Flowring Id to an offset index into the H2D DMA indices array
  241. * This may be used for IFRM.
  242. */
  243. #define DHD_H2D_FRM_FLOW_RING_OFFSET(ringid) \
  244. ((ringid) - BCMPCIE_COMMON_MSGRINGS)
  245. /* Convert a D2H MsgBuf RingId to an offset index into the D2H DMA indices array
  246. * This may be used for the D2H DMA WR index array or D2H DMA RD index array or
  247. * any array of D2H rings.
  248. * d2h debug ring is located at the end, i.e. after all the tx flow rings and h2d debug ring
  249. * max_h2d_rings: total number of h2d rings
  250. */
  251. #define DHD_D2H_RING_OFFSET(ringid, max_h2d_rings) \
  252. ((ringid) > (max_h2d_rings) ? \
  253. ((ringid) - max_h2d_rings) : \
  254. ((ringid) - BCMPCIE_H2D_COMMON_MSGRINGS))
  255. /* Convert a D2H DMA Indices Offset to a RingId */
  256. #define DHD_D2H_RINGID(offset) \
  257. ((offset) + BCMPCIE_H2D_COMMON_MSGRINGS)
  258. #define DHD_DMAH_NULL ((void*)NULL)
  259. /*
  260. * Pad a DMA-able buffer by an additional cachline. If the end of the DMA-able
  261. * buffer does not occupy the entire cacheline, and another object is placed
  262. * following the DMA-able buffer, data corruption may occur if the DMA-able
  263. * buffer is used to DMAing into (e.g. D2H direction), when HW cache coherency
  264. * is not available.
  265. */
  266. #if defined(L1_CACHE_BYTES)
  267. #define DHD_DMA_PAD (L1_CACHE_BYTES)
  268. #else
  269. #define DHD_DMA_PAD (128)
  270. #endif // endif
  271. /*
  272. * +----------------------------------------------------------------------------
  273. * Flowring Pool
  274. *
  275. * Unlike common rings, which are attached very early on (dhd_prot_attach),
  276. * flowrings are dynamically instantiated. Moreover, flowrings may require a
  277. * larger DMA-able buffer. To avoid issues with fragmented cache coherent
  278. * DMA-able memory, a pre-allocated pool of msgbuf_ring_t is allocated once.
  279. * The DMA-able buffers are attached to these pre-allocated msgbuf_ring.
  280. *
  281. * Each DMA-able buffer may be allocated independently, or may be carved out
  282. * of a single large contiguous region that is registered with the protocol
  283. * layer into flowrings_dma_buf. On a 64bit platform, this contiguous region
  284. * may not span 0x00000000FFFFFFFF (avoid dongle side 64bit ptr arithmetic).
  285. *
  286. * No flowring pool action is performed in dhd_prot_attach(), as the number
  287. * of h2d rings is not yet known.
  288. *
  289. * In dhd_prot_init(), the dongle advertized number of h2d rings is used to
  290. * determine the number of flowrings required, and a pool of msgbuf_rings are
  291. * allocated and a DMA-able buffer (carved or allocated) is attached.
  292. * See: dhd_prot_flowrings_pool_attach()
  293. *
  294. * A flowring msgbuf_ring object may be fetched from this pool during flowring
  295. * creation, using the flowid. Likewise, flowrings may be freed back into the
  296. * pool on flowring deletion.
  297. * See: dhd_prot_flowrings_pool_fetch(), dhd_prot_flowrings_pool_release()
  298. *
  299. * In dhd_prot_detach(), the flowring pool is detached. The DMA-able buffers
  300. * are detached (returned back to the carved region or freed), and the pool of
  301. * msgbuf_ring and any objects allocated against it are freed.
  302. * See: dhd_prot_flowrings_pool_detach()
  303. *
  304. * In dhd_prot_reset(), the flowring pool is simply reset by returning it to a
  305. * state as-if upon an attach. All DMA-able buffers are retained.
  306. * Following a dhd_prot_reset(), in a subsequent dhd_prot_init(), the flowring
  307. * pool attach will notice that the pool persists and continue to use it. This
  308. * will avoid the case of a fragmented DMA-able region.
  309. *
  310. * +----------------------------------------------------------------------------
  311. */
  312. /* Conversion of a flowid to a flowring pool index */
  313. #define DHD_FLOWRINGS_POOL_OFFSET(flowid) \
  314. ((flowid) - BCMPCIE_H2D_COMMON_MSGRINGS)
  315. /* Fetch the msgbuf_ring_t from the flowring pool given a flowid */
  316. #define DHD_RING_IN_FLOWRINGS_POOL(prot, flowid) \
  317. (msgbuf_ring_t*)((prot)->h2d_flowrings_pool) + \
  318. DHD_FLOWRINGS_POOL_OFFSET(flowid)
  319. /* Traverse each flowring in the flowring pool, assigning ring and flowid */
  320. #define FOREACH_RING_IN_FLOWRINGS_POOL(prot, ring, flowid, total_flowrings) \
  321. for ((flowid) = DHD_FLOWRING_START_FLOWID, \
  322. (ring) = DHD_RING_IN_FLOWRINGS_POOL(prot, flowid); \
  323. (flowid) < ((total_flowrings) + DHD_FLOWRING_START_FLOWID); \
  324. (ring)++, (flowid)++)
  325. /* Used in loopback tests */
  326. typedef struct dhd_dmaxfer {
  327. dhd_dma_buf_t srcmem;
  328. dhd_dma_buf_t dstmem;
  329. uint32 srcdelay;
  330. uint32 destdelay;
  331. uint32 len;
  332. bool in_progress;
  333. uint64 start_usec;
  334. uint64 time_taken;
  335. uint32 d11_lpbk;
  336. int status;
  337. } dhd_dmaxfer_t;
  338. /**
  339. * msgbuf_ring : This object manages the host side ring that includes a DMA-able
  340. * buffer, the WR and RD indices, ring parameters such as max number of items
  341. * an length of each items, and other miscellaneous runtime state.
  342. * A msgbuf_ring may be used to represent a H2D or D2H common ring or a
  343. * H2D TxPost ring as specified in the PCIE FullDongle Spec.
  344. * Ring parameters are conveyed to the dongle, which maintains its own peer end
  345. * ring state. Depending on whether the DMA Indices feature is supported, the
  346. * host will update the WR/RD index in the DMA indices array in host memory or
  347. * directly in dongle memory.
  348. */
  349. typedef struct msgbuf_ring {
  350. bool inited;
  351. uint16 idx; /* ring id */
  352. uint16 rd; /* read index */
  353. uint16 curr_rd; /* read index for debug */
  354. uint16 wr; /* write index */
  355. uint16 max_items; /* maximum number of items in ring */
  356. uint16 item_len; /* length of each item in the ring */
  357. sh_addr_t base_addr; /* LITTLE ENDIAN formatted: base address */
  358. dhd_dma_buf_t dma_buf; /* DMA-able buffer: pa, va, len, dmah, secdma */
  359. uint32 seqnum; /* next expected item's sequence number */
  360. #ifdef TXP_FLUSH_NITEMS
  361. void *start_addr;
  362. /* # of messages on ring not yet announced to dongle */
  363. uint16 pend_items_count;
  364. #endif /* TXP_FLUSH_NITEMS */
  365. uint8 ring_type;
  366. uint16 hwa_db_type; /* hwa type non-zero for Data path rings */
  367. uint8 n_completion_ids;
  368. bool create_pending;
  369. uint16 create_req_id;
  370. uint8 current_phase;
  371. uint16 compeltion_ring_ids[MAX_COMPLETION_RING_IDS_ASSOCIATED];
  372. uchar name[RING_NAME_MAX_LENGTH];
  373. uint32 ring_mem_allocated;
  374. void *ring_lock;
  375. } msgbuf_ring_t;
  376. #define DHD_RING_BGN_VA(ring) ((ring)->dma_buf.va)
  377. #define DHD_RING_END_VA(ring) \
  378. ((uint8 *)(DHD_RING_BGN_VA((ring))) + \
  379. (((ring)->max_items - 1) * (ring)->item_len))
  380. /* This can be overwritten by module parameter defined in dhd_linux.c
  381. * or by dhd iovar h2d_max_txpost.
  382. */
  383. int h2d_max_txpost = H2DRING_TXPOST_MAX_ITEM;
  384. /** DHD protocol handle. Is an opaque type to other DHD software layers. */
  385. typedef struct dhd_prot {
  386. osl_t *osh; /* OSL handle */
  387. uint16 rxbufpost_sz;
  388. uint16 rxbufpost;
  389. uint16 max_rxbufpost;
  390. uint16 max_eventbufpost;
  391. uint16 max_ioctlrespbufpost;
  392. uint16 max_tsbufpost;
  393. uint16 max_infobufpost;
  394. uint16 infobufpost;
  395. uint16 cur_event_bufs_posted;
  396. uint16 cur_ioctlresp_bufs_posted;
  397. uint16 cur_ts_bufs_posted;
  398. /* Flow control mechanism based on active transmits pending */
  399. osl_atomic_t active_tx_count; /* increments/decrements on every packet tx/tx_status */
  400. uint16 h2d_max_txpost;
  401. uint16 txp_threshold; /* optimization to write "n" tx items at a time to ring */
  402. /* MsgBuf Ring info: has a dhd_dma_buf that is dynamically allocated */
  403. msgbuf_ring_t h2dring_ctrl_subn; /* H2D ctrl message submission ring */
  404. msgbuf_ring_t h2dring_rxp_subn; /* H2D RxBuf post ring */
  405. msgbuf_ring_t d2hring_ctrl_cpln; /* D2H ctrl completion ring */
  406. msgbuf_ring_t d2hring_tx_cpln; /* D2H Tx complete message ring */
  407. msgbuf_ring_t d2hring_rx_cpln; /* D2H Rx complete message ring */
  408. msgbuf_ring_t *h2dring_info_subn; /* H2D info submission ring */
  409. msgbuf_ring_t *d2hring_info_cpln; /* D2H info completion ring */
  410. msgbuf_ring_t *d2hring_edl; /* D2H Enhanced Debug Lane (EDL) ring */
  411. msgbuf_ring_t *h2d_flowrings_pool; /* Pool of preallocated flowings */
  412. dhd_dma_buf_t flowrings_dma_buf; /* Contiguous DMA buffer for flowrings */
  413. uint16 h2d_rings_total; /* total H2D (common rings + flowrings) */
  414. uint32 rx_dataoffset;
  415. dhd_mb_ring_t mb_ring_fn; /* called when dongle needs to be notified of new msg */
  416. dhd_mb_ring_2_t mb_2_ring_fn; /* called when dongle needs to be notified of new msg */
  417. /* ioctl related resources */
  418. uint8 ioctl_state;
  419. int16 ioctl_status; /* status returned from dongle */
  420. uint16 ioctl_resplen;
  421. dhd_ioctl_recieved_status_t ioctl_received;
  422. uint curr_ioctl_cmd;
  423. dhd_dma_buf_t retbuf; /* For holding ioctl response */
  424. dhd_dma_buf_t ioctbuf; /* For holding ioctl request */
  425. dhd_dma_buf_t d2h_dma_scratch_buf; /* For holding d2h scratch */
  426. /* DMA-able arrays for holding WR and RD indices */
  427. uint32 rw_index_sz; /* Size of a RD or WR index in dongle */
  428. dhd_dma_buf_t h2d_dma_indx_wr_buf; /* Array of H2D WR indices */
  429. dhd_dma_buf_t h2d_dma_indx_rd_buf; /* Array of H2D RD indices */
  430. dhd_dma_buf_t d2h_dma_indx_wr_buf; /* Array of D2H WR indices */
  431. dhd_dma_buf_t d2h_dma_indx_rd_buf; /* Array of D2H RD indices */
  432. dhd_dma_buf_t h2d_ifrm_indx_wr_buf; /* Array of H2D WR indices for ifrm */
  433. dhd_dma_buf_t host_bus_throughput_buf; /* bus throughput measure buffer */
  434. dhd_dma_buf_t *flowring_buf; /* pool of flow ring buf */
  435. uint32 flowring_num;
  436. d2h_sync_cb_t d2h_sync_cb; /* Sync on D2H DMA done: SEQNUM or XORCSUM */
  437. #ifdef EWP_EDL
  438. d2h_edl_sync_cb_t d2h_edl_sync_cb; /* Sync on EDL D2H DMA done: SEQNUM or XORCSUM */
  439. #endif /* EWP_EDL */
  440. ulong d2h_sync_wait_max; /* max number of wait loops to receive one msg */
  441. ulong d2h_sync_wait_tot; /* total wait loops */
  442. dhd_dmaxfer_t dmaxfer; /* for test/DMA loopback */
  443. uint16 ioctl_seq_no;
  444. uint16 data_seq_no;
  445. uint16 ioctl_trans_id;
  446. void *pktid_ctrl_map; /* a pktid maps to a packet and its metadata */
  447. void *pktid_rx_map; /* pktid map for rx path */
  448. void *pktid_tx_map; /* pktid map for tx path */
  449. bool metadata_dbg;
  450. void *pktid_map_handle_ioctl;
  451. #ifdef DHD_MAP_PKTID_LOGGING
  452. void *pktid_dma_map; /* pktid map for DMA MAP */
  453. void *pktid_dma_unmap; /* pktid map for DMA UNMAP */
  454. #endif /* DHD_MAP_PKTID_LOGGING */
  455. uint32 pktid_depleted_cnt; /* pktid depleted count */
  456. /* netif tx queue stop count */
  457. uint8 pktid_txq_stop_cnt;
  458. /* netif tx queue start count */
  459. uint8 pktid_txq_start_cnt;
  460. uint64 ioctl_fillup_time; /* timestamp for ioctl fillup */
  461. uint64 ioctl_ack_time; /* timestamp for ioctl ack */
  462. uint64 ioctl_cmplt_time; /* timestamp for ioctl completion */
  463. /* Applications/utilities can read tx and rx metadata using IOVARs */
  464. uint16 rx_metadata_offset;
  465. uint16 tx_metadata_offset;
  466. #if defined(DHD_D2H_SOFT_DOORBELL_SUPPORT)
  467. /* Host's soft doorbell configuration */
  468. bcmpcie_soft_doorbell_t soft_doorbell[BCMPCIE_D2H_COMMON_MSGRINGS];
  469. #endif /* DHD_D2H_SOFT_DOORBELL_SUPPORT */
  470. /* Work Queues to be used by the producer and the consumer, and threshold
  471. * when the WRITE index must be synced to consumer's workq
  472. */
  473. #if defined(DHD_LB_TXC)
  474. uint32 tx_compl_prod_sync ____cacheline_aligned;
  475. bcm_workq_t tx_compl_prod, tx_compl_cons;
  476. #endif /* DHD_LB_TXC */
  477. #if defined(DHD_LB_RXC)
  478. uint32 rx_compl_prod_sync ____cacheline_aligned;
  479. bcm_workq_t rx_compl_prod, rx_compl_cons;
  480. #endif /* DHD_LB_RXC */
  481. dhd_dma_buf_t fw_trap_buf; /* firmware trap buffer */
  482. uint32 host_ipc_version; /* Host sypported IPC rev */
  483. uint32 device_ipc_version; /* FW supported IPC rev */
  484. uint32 active_ipc_version; /* Host advertised IPC rev */
  485. dhd_dma_buf_t hostts_req_buf; /* For holding host timestamp request buf */
  486. bool hostts_req_buf_inuse;
  487. bool rx_ts_log_enabled;
  488. bool tx_ts_log_enabled;
  489. bool no_retry;
  490. bool no_aggr;
  491. bool fixed_rate;
  492. dhd_dma_buf_t host_scb_buf; /* scb host offload buffer */
  493. #ifdef DHD_HP2P
  494. msgbuf_ring_t *d2hring_hp2p_txcpl; /* D2H HPP Tx completion ring */
  495. msgbuf_ring_t *d2hring_hp2p_rxcpl; /* D2H HPP Rx completion ring */
  496. #endif /* DHD_HP2P */
  497. bool no_tx_resource;
  498. } dhd_prot_t;
  499. #ifdef DHD_EWPR_VER2
  500. #define HANG_INFO_BASE64_BUFFER_SIZE 640
  501. #endif // endif
  502. #ifdef DHD_DUMP_PCIE_RINGS
  503. static
  504. int dhd_ring_write(dhd_pub_t *dhd, msgbuf_ring_t *ring, void *file,
  505. const void *user_buf, unsigned long *file_posn);
  506. #ifdef EWP_EDL
  507. static
  508. int dhd_edl_ring_hdr_write(dhd_pub_t *dhd, msgbuf_ring_t *ring, void *file, const void *user_buf,
  509. unsigned long *file_posn);
  510. #endif /* EWP_EDL */
  511. #endif /* DHD_DUMP_PCIE_RINGS */
  512. extern bool dhd_timesync_delay_post_bufs(dhd_pub_t *dhdp);
  513. extern void dhd_schedule_dmaxfer_free(dhd_pub_t* dhdp, dmaxref_mem_map_t *dmmap);
  514. /* Convert a dmaaddr_t to a base_addr with htol operations */
  515. static INLINE void dhd_base_addr_htolpa(sh_addr_t *base_addr, dmaaddr_t pa);
  516. /* APIs for managing a DMA-able buffer */
  517. static int dhd_dma_buf_audit(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf);
  518. static void dhd_dma_buf_reset(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf);
  519. /* msgbuf ring management */
  520. static int dhd_prot_ring_attach(dhd_pub_t *dhd, msgbuf_ring_t *ring,
  521. const char *name, uint16 max_items, uint16 len_item, uint16 ringid);
  522. static void dhd_prot_ring_init(dhd_pub_t *dhd, msgbuf_ring_t *ring);
  523. static void dhd_prot_ring_reset(dhd_pub_t *dhd, msgbuf_ring_t *ring);
  524. static void dhd_prot_ring_detach(dhd_pub_t *dhd, msgbuf_ring_t *ring);
  525. static void dhd_prot_process_fw_timestamp(dhd_pub_t *dhd, void* buf);
  526. /* Pool of pre-allocated msgbuf_ring_t with DMA-able buffers for Flowrings */
  527. static int dhd_prot_flowrings_pool_attach(dhd_pub_t *dhd);
  528. static void dhd_prot_flowrings_pool_reset(dhd_pub_t *dhd);
  529. static void dhd_prot_flowrings_pool_detach(dhd_pub_t *dhd);
  530. /* Fetch and Release a flowring msgbuf_ring from flowring pool */
  531. static msgbuf_ring_t *dhd_prot_flowrings_pool_fetch(dhd_pub_t *dhd,
  532. uint16 flowid);
  533. /* see also dhd_prot_flowrings_pool_release() in dhd_prot.h */
  534. /* Producer: Allocate space in a msgbuf ring */
  535. static void* dhd_prot_alloc_ring_space(dhd_pub_t *dhd, msgbuf_ring_t *ring,
  536. uint16 nitems, uint16 *alloced, bool exactly_nitems);
  537. static void* dhd_prot_get_ring_space(msgbuf_ring_t *ring, uint16 nitems,
  538. uint16 *alloced, bool exactly_nitems);
  539. /* Consumer: Determine the location where the next message may be consumed */
  540. static uint8* dhd_prot_get_read_addr(dhd_pub_t *dhd, msgbuf_ring_t *ring,
  541. uint32 *available_len);
  542. /* Producer (WR index update) or Consumer (RD index update) indication */
  543. static void dhd_prot_ring_write_complete(dhd_pub_t *dhd, msgbuf_ring_t *ring,
  544. void *p, uint16 len);
  545. static void dhd_prot_upd_read_idx(dhd_pub_t *dhd, msgbuf_ring_t *ring);
  546. static INLINE int dhd_prot_dma_indx_alloc(dhd_pub_t *dhd, uint8 type,
  547. dhd_dma_buf_t *dma_buf, uint32 bufsz);
  548. /* Set/Get a RD or WR index in the array of indices */
  549. /* See also: dhd_prot_dma_indx_init() */
  550. void dhd_prot_dma_indx_set(dhd_pub_t *dhd, uint16 new_index, uint8 type,
  551. uint16 ringid);
  552. static uint16 dhd_prot_dma_indx_get(dhd_pub_t *dhd, uint8 type, uint16 ringid);
  553. /* Locate a packet given a pktid */
  554. static INLINE void *dhd_prot_packet_get(dhd_pub_t *dhd, uint32 pktid, uint8 pkttype,
  555. bool free_pktid);
  556. /* Locate a packet given a PktId and free it. */
  557. static INLINE void dhd_prot_packet_free(dhd_pub_t *dhd, void *pkt, uint8 pkttype, bool send);
  558. static int dhd_msgbuf_query_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd,
  559. void *buf, uint len, uint8 action);
  560. static int dhd_msgbuf_set_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd,
  561. void *buf, uint len, uint8 action);
  562. static int dhd_msgbuf_wait_ioctl_cmplt(dhd_pub_t *dhd, uint32 len, void *buf);
  563. static int dhd_fillup_ioct_reqst(dhd_pub_t *dhd, uint16 len, uint cmd,
  564. void *buf, int ifidx);
  565. /* Post buffers for Rx, control ioctl response and events */
  566. static uint16 dhd_msgbuf_rxbuf_post_ctrlpath(dhd_pub_t *dhd, uint8 msgid, uint32 max_to_post);
  567. static void dhd_msgbuf_rxbuf_post_ioctlresp_bufs(dhd_pub_t *pub);
  568. static void dhd_msgbuf_rxbuf_post_event_bufs(dhd_pub_t *pub);
  569. static void dhd_msgbuf_rxbuf_post(dhd_pub_t *dhd, bool use_rsv_pktid);
  570. static int dhd_prot_rxbuf_post(dhd_pub_t *dhd, uint16 count, bool use_rsv_pktid);
  571. static int dhd_msgbuf_rxbuf_post_ts_bufs(dhd_pub_t *pub);
  572. static void dhd_prot_return_rxbuf(dhd_pub_t *dhd, uint32 pktid, uint32 rxcnt);
  573. /* D2H Message handling */
  574. static int dhd_prot_process_msgtype(dhd_pub_t *dhd, msgbuf_ring_t *ring, uint8 *buf, uint32 len);
  575. /* D2H Message handlers */
  576. static void dhd_prot_noop(dhd_pub_t *dhd, void *msg);
  577. static void dhd_prot_txstatus_process(dhd_pub_t *dhd, void *msg);
  578. static void dhd_prot_ioctcmplt_process(dhd_pub_t *dhd, void *msg);
  579. static void dhd_prot_ioctack_process(dhd_pub_t *dhd, void *msg);
  580. static void dhd_prot_ringstatus_process(dhd_pub_t *dhd, void *msg);
  581. static void dhd_prot_genstatus_process(dhd_pub_t *dhd, void *msg);
  582. static void dhd_prot_event_process(dhd_pub_t *dhd, void *msg);
  583. /* Loopback test with dongle */
  584. static void dmaxfer_free_dmaaddr(dhd_pub_t *dhd, dhd_dmaxfer_t *dma);
  585. static int dmaxfer_prepare_dmaaddr(dhd_pub_t *dhd, uint len, uint srcdelay,
  586. uint destdelay, dhd_dmaxfer_t *dma);
  587. static void dhd_msgbuf_dmaxfer_process(dhd_pub_t *dhd, void *msg);
  588. /* Flowring management communication with dongle */
  589. static void dhd_prot_flow_ring_create_response_process(dhd_pub_t *dhd, void *msg);
  590. static void dhd_prot_flow_ring_delete_response_process(dhd_pub_t *dhd, void *msg);
  591. static void dhd_prot_flow_ring_flush_response_process(dhd_pub_t *dhd, void *msg);
  592. static void dhd_prot_process_flow_ring_resume_response(dhd_pub_t *dhd, void* msg);
  593. static void dhd_prot_process_flow_ring_suspend_response(dhd_pub_t *dhd, void* msg);
  594. /* Monitor Mode */
  595. #ifdef WL_MONITOR
  596. extern bool dhd_monitor_enabled(dhd_pub_t *dhd, int ifidx);
  597. extern void dhd_rx_mon_pkt(dhd_pub_t *dhdp, host_rxbuf_cmpl_t* msg, void *pkt, int ifidx);
  598. #endif /* WL_MONITOR */
  599. /* Configure a soft doorbell per D2H ring */
  600. static void dhd_msgbuf_ring_config_d2h_soft_doorbell(dhd_pub_t *dhd);
  601. static void dhd_prot_process_d2h_ring_config_complete(dhd_pub_t *dhd, void *msg);
  602. static void dhd_prot_process_d2h_ring_create_complete(dhd_pub_t *dhd, void *buf);
  603. static void dhd_prot_process_h2d_ring_create_complete(dhd_pub_t *dhd, void *buf);
  604. static void dhd_prot_process_infobuf_complete(dhd_pub_t *dhd, void* buf);
  605. static void dhd_prot_process_d2h_mb_data(dhd_pub_t *dhd, void* buf);
  606. static void dhd_prot_detach_info_rings(dhd_pub_t *dhd);
  607. #ifdef DHD_HP2P
  608. static void dhd_prot_detach_hp2p_rings(dhd_pub_t *dhd);
  609. #endif /* DHD_HP2P */
  610. #ifdef EWP_EDL
  611. static void dhd_prot_detach_edl_rings(dhd_pub_t *dhd);
  612. #endif // endif
  613. static void dhd_prot_process_d2h_host_ts_complete(dhd_pub_t *dhd, void* buf);
  614. static void dhd_prot_process_snapshot_complete(dhd_pub_t *dhd, void *buf);
  615. #ifdef DHD_HP2P
  616. static void dhd_update_hp2p_rxstats(dhd_pub_t *dhd, host_rxbuf_cmpl_t *rxstatus);
  617. static void dhd_update_hp2p_txstats(dhd_pub_t *dhd, host_txbuf_cmpl_t *txstatus);
  618. static void dhd_calc_hp2p_burst(dhd_pub_t *dhd, msgbuf_ring_t *ring, uint16 flowid);
  619. static void dhd_update_hp2p_txdesc(dhd_pub_t *dhd, host_txbuf_post_t *txdesc);
  620. #endif // endif
  621. typedef void (*dhd_msgbuf_func_t)(dhd_pub_t *dhd, void *msg);
  622. /** callback functions for messages generated by the dongle */
  623. #define MSG_TYPE_INVALID 0
  624. static dhd_msgbuf_func_t table_lookup[DHD_PROT_FUNCS] = {
  625. dhd_prot_noop, /* 0 is MSG_TYPE_INVALID */
  626. dhd_prot_genstatus_process, /* MSG_TYPE_GEN_STATUS */
  627. dhd_prot_ringstatus_process, /* MSG_TYPE_RING_STATUS */
  628. NULL,
  629. dhd_prot_flow_ring_create_response_process, /* MSG_TYPE_FLOW_RING_CREATE_CMPLT */
  630. NULL,
  631. dhd_prot_flow_ring_delete_response_process, /* MSG_TYPE_FLOW_RING_DELETE_CMPLT */
  632. NULL,
  633. dhd_prot_flow_ring_flush_response_process, /* MSG_TYPE_FLOW_RING_FLUSH_CMPLT */
  634. NULL,
  635. dhd_prot_ioctack_process, /* MSG_TYPE_IOCTLPTR_REQ_ACK */
  636. NULL,
  637. dhd_prot_ioctcmplt_process, /* MSG_TYPE_IOCTL_CMPLT */
  638. NULL,
  639. dhd_prot_event_process, /* MSG_TYPE_WL_EVENT */
  640. NULL,
  641. dhd_prot_txstatus_process, /* MSG_TYPE_TX_STATUS */
  642. NULL,
  643. NULL, /* MSG_TYPE_RX_CMPLT use dedicated handler */
  644. NULL,
  645. dhd_msgbuf_dmaxfer_process, /* MSG_TYPE_LPBK_DMAXFER_CMPLT */
  646. NULL, /* MSG_TYPE_FLOW_RING_RESUME */
  647. dhd_prot_process_flow_ring_resume_response, /* MSG_TYPE_FLOW_RING_RESUME_CMPLT */
  648. NULL, /* MSG_TYPE_FLOW_RING_SUSPEND */
  649. dhd_prot_process_flow_ring_suspend_response, /* MSG_TYPE_FLOW_RING_SUSPEND_CMPLT */
  650. NULL, /* MSG_TYPE_INFO_BUF_POST */
  651. dhd_prot_process_infobuf_complete, /* MSG_TYPE_INFO_BUF_CMPLT */
  652. NULL, /* MSG_TYPE_H2D_RING_CREATE */
  653. NULL, /* MSG_TYPE_D2H_RING_CREATE */
  654. dhd_prot_process_h2d_ring_create_complete, /* MSG_TYPE_H2D_RING_CREATE_CMPLT */
  655. dhd_prot_process_d2h_ring_create_complete, /* MSG_TYPE_D2H_RING_CREATE_CMPLT */
  656. NULL, /* MSG_TYPE_H2D_RING_CONFIG */
  657. NULL, /* MSG_TYPE_D2H_RING_CONFIG */
  658. NULL, /* MSG_TYPE_H2D_RING_CONFIG_CMPLT */
  659. dhd_prot_process_d2h_ring_config_complete, /* MSG_TYPE_D2H_RING_CONFIG_CMPLT */
  660. NULL, /* MSG_TYPE_H2D_MAILBOX_DATA */
  661. dhd_prot_process_d2h_mb_data, /* MSG_TYPE_D2H_MAILBOX_DATA */
  662. NULL, /* MSG_TYPE_TIMSTAMP_BUFPOST */
  663. NULL, /* MSG_TYPE_HOSTTIMSTAMP */
  664. dhd_prot_process_d2h_host_ts_complete, /* MSG_TYPE_HOSTTIMSTAMP_CMPLT */
  665. dhd_prot_process_fw_timestamp, /* MSG_TYPE_FIRMWARE_TIMESTAMP */
  666. NULL, /* MSG_TYPE_SNAPSHOT_UPLOAD */
  667. dhd_prot_process_snapshot_complete, /* MSG_TYPE_SNAPSHOT_CMPLT */
  668. };
  669. #ifdef DHD_RX_CHAINING
  670. #define PKT_CTF_CHAINABLE(dhd, ifidx, evh, prio, h_sa, h_da, h_prio) \
  671. (dhd_wet_chainable(dhd) && \
  672. dhd_rx_pkt_chainable((dhd), (ifidx)) && \
  673. !ETHER_ISNULLDEST(((struct ether_header *)(evh))->ether_dhost) && \
  674. !ETHER_ISMULTI(((struct ether_header *)(evh))->ether_dhost) && \
  675. !eacmp((h_da), ((struct ether_header *)(evh))->ether_dhost) && \
  676. !eacmp((h_sa), ((struct ether_header *)(evh))->ether_shost) && \
  677. ((h_prio) == (prio)) && (dhd_ctf_hotbrc_check((dhd), (evh), (ifidx))) && \
  678. ((((struct ether_header *)(evh))->ether_type == HTON16(ETHER_TYPE_IP)) || \
  679. (((struct ether_header *)(evh))->ether_type == HTON16(ETHER_TYPE_IPV6))))
  680. static INLINE void BCMFASTPATH dhd_rxchain_reset(rxchain_info_t *rxchain);
  681. static void BCMFASTPATH dhd_rxchain_frame(dhd_pub_t *dhd, void *pkt, uint ifidx);
  682. static void BCMFASTPATH dhd_rxchain_commit(dhd_pub_t *dhd);
  683. #define DHD_PKT_CTF_MAX_CHAIN_LEN 64
  684. #endif /* DHD_RX_CHAINING */
  685. #define DHD_LPBKDTDUMP_ON() (dhd_msg_level & DHD_LPBKDTDUMP_VAL)
  686. static void dhd_prot_h2d_sync_init(dhd_pub_t *dhd);
  687. bool
  688. dhd_prot_is_cmpl_ring_empty(dhd_pub_t *dhd, void *prot_info)
  689. {
  690. msgbuf_ring_t *flow_ring = (msgbuf_ring_t *)prot_info;
  691. uint16 rd, wr;
  692. bool ret;
  693. if (dhd->dma_d2h_ring_upd_support) {
  694. wr = flow_ring->wr;
  695. } else {
  696. dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, flow_ring->idx);
  697. }
  698. if (dhd->dma_h2d_ring_upd_support) {
  699. rd = flow_ring->rd;
  700. } else {
  701. dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, flow_ring->idx);
  702. }
  703. ret = (wr == rd) ? TRUE : FALSE;
  704. return ret;
  705. }
  706. void
  707. dhd_prot_dump_ring_ptrs(void *prot_info)
  708. {
  709. msgbuf_ring_t *ring = (msgbuf_ring_t *)prot_info;
  710. DHD_ERROR(("%s curr_rd: %d rd: %d wr: %d \n", __FUNCTION__,
  711. ring->curr_rd, ring->rd, ring->wr));
  712. }
  713. uint16
  714. dhd_prot_get_h2d_max_txpost(dhd_pub_t *dhd)
  715. {
  716. return (uint16)h2d_max_txpost;
  717. }
  718. void
  719. dhd_prot_set_h2d_max_txpost(dhd_pub_t *dhd, uint16 max_txpost)
  720. {
  721. h2d_max_txpost = max_txpost;
  722. }
  723. /**
  724. * D2H DMA to completion callback handlers. Based on the mode advertised by the
  725. * dongle through the PCIE shared region, the appropriate callback will be
  726. * registered in the proto layer to be invoked prior to precessing any message
  727. * from a D2H DMA ring. If the dongle uses a read barrier or another mode that
  728. * does not require host participation, then a noop callback handler will be
  729. * bound that simply returns the msg_type.
  730. */
  731. static void dhd_prot_d2h_sync_livelock(dhd_pub_t *dhd, uint32 msg_seqnum, msgbuf_ring_t *ring,
  732. uint32 tries, volatile uchar *msg, int msglen);
  733. static uint8 dhd_prot_d2h_sync_seqnum(dhd_pub_t *dhd, msgbuf_ring_t *ring,
  734. volatile cmn_msg_hdr_t *msg, int msglen);
  735. static uint8 dhd_prot_d2h_sync_xorcsum(dhd_pub_t *dhd, msgbuf_ring_t *ring,
  736. volatile cmn_msg_hdr_t *msg, int msglen);
  737. static uint8 dhd_prot_d2h_sync_none(dhd_pub_t *dhd, msgbuf_ring_t *ring,
  738. volatile cmn_msg_hdr_t *msg, int msglen);
  739. static void dhd_prot_d2h_sync_init(dhd_pub_t *dhd);
  740. static int dhd_send_d2h_ringcreate(dhd_pub_t *dhd, msgbuf_ring_t *ring_to_create,
  741. uint16 ring_type, uint32 id);
  742. static int dhd_send_h2d_ringcreate(dhd_pub_t *dhd, msgbuf_ring_t *ring_to_create,
  743. uint8 type, uint32 id);
  744. /**
  745. * dhd_prot_d2h_sync_livelock - when the host determines that a DMA transfer has
  746. * not completed, a livelock condition occurs. Host will avert this livelock by
  747. * dropping this message and moving to the next. This dropped message can lead
  748. * to a packet leak, or even something disastrous in the case the dropped
  749. * message happens to be a control response.
  750. * Here we will log this condition. One may choose to reboot the dongle.
  751. *
  752. */
  753. static void
  754. dhd_prot_d2h_sync_livelock(dhd_pub_t *dhd, uint32 msg_seqnum, msgbuf_ring_t *ring, uint32 tries,
  755. volatile uchar *msg, int msglen)
  756. {
  757. uint32 ring_seqnum = ring->seqnum;
  758. if (dhd_query_bus_erros(dhd)) {
  759. return;
  760. }
  761. DHD_ERROR((
  762. "LIVELOCK DHD<%p> ring<%s> msg_seqnum<%u> ring_seqnum<%u:%u> tries<%u> max<%lu>"
  763. " tot<%lu> dma_buf va<%p> msg<%p> curr_rd<%d> rd<%d> wr<%d>\n",
  764. dhd, ring->name, msg_seqnum, ring_seqnum, ring_seqnum% D2H_EPOCH_MODULO, tries,
  765. dhd->prot->d2h_sync_wait_max, dhd->prot->d2h_sync_wait_tot,
  766. ring->dma_buf.va, msg, ring->curr_rd, ring->rd, ring->wr));
  767. dhd_prhex("D2H MsgBuf Failure", msg, msglen, DHD_ERROR_VAL);
  768. /* Try to resume if already suspended or suspend in progress */
  769. #ifdef DHD_PCIE_RUNTIMEPM
  770. dhdpcie_runtime_bus_wake(dhd, CAN_SLEEP(), __builtin_return_address(0));
  771. #endif /* DHD_PCIE_RUNTIMEPM */
  772. /* Skip if still in suspended or suspend in progress */
  773. if (DHD_BUS_CHECK_SUSPEND_OR_ANY_SUSPEND_IN_PROGRESS(dhd)) {
  774. DHD_ERROR(("%s: bus is in suspend(%d) or suspending(0x%x) state, so skip\n",
  775. __FUNCTION__, dhd->busstate, dhd->dhd_bus_busy_state));
  776. goto exit;
  777. }
  778. dhd_bus_dump_console_buffer(dhd->bus);
  779. dhd_prot_debug_info_print(dhd);
  780. #ifdef DHD_FW_COREDUMP
  781. if (dhd->memdump_enabled) {
  782. /* collect core dump */
  783. dhd->memdump_type = DUMP_TYPE_BY_LIVELOCK;
  784. dhd_bus_mem_dump(dhd);
  785. }
  786. #endif /* DHD_FW_COREDUMP */
  787. exit:
  788. dhd_schedule_reset(dhd);
  789. #ifdef OEM_ANDROID
  790. #ifdef SUPPORT_LINKDOWN_RECOVERY
  791. #ifdef CONFIG_ARCH_MSM
  792. dhd->bus->no_cfg_restore = 1;
  793. #endif /* CONFIG_ARCH_MSM */
  794. dhd->hang_reason = HANG_REASON_MSGBUF_LIVELOCK;
  795. dhd_os_send_hang_message(dhd);
  796. #endif /* SUPPORT_LINKDOWN_RECOVERY */
  797. #endif /* OEM_ANDROID */
  798. dhd->livelock_occured = TRUE;
  799. }
  800. /**
  801. * dhd_prot_d2h_sync_seqnum - Sync on a D2H DMA completion using the SEQNUM
  802. * mode. Sequence number is always in the last word of a message.
  803. */
  804. static uint8 BCMFASTPATH
  805. dhd_prot_d2h_sync_seqnum(dhd_pub_t *dhd, msgbuf_ring_t *ring,
  806. volatile cmn_msg_hdr_t *msg, int msglen)
  807. {
  808. uint32 tries;
  809. uint32 ring_seqnum = ring->seqnum % D2H_EPOCH_MODULO;
  810. int num_words = msglen / sizeof(uint32); /* num of 32bit words */
  811. volatile uint32 *marker = (volatile uint32 *)msg + (num_words - 1); /* last word */
  812. dhd_prot_t *prot = dhd->prot;
  813. uint32 msg_seqnum;
  814. uint32 step = 0;
  815. uint32 delay = PCIE_D2H_SYNC_DELAY;
  816. uint32 total_tries = 0;
  817. ASSERT(msglen == ring->item_len);
  818. BCM_REFERENCE(delay);
  819. /*
  820. * For retries we have to make some sort of stepper algorithm.
  821. * We see that every time when the Dongle comes out of the D3
  822. * Cold state, the first D2H mem2mem DMA takes more time to
  823. * complete, leading to livelock issues.
  824. *
  825. * Case 1 - Apart from Host CPU some other bus master is
  826. * accessing the DDR port, probably page close to the ring
  827. * so, PCIE does not get a change to update the memory.
  828. * Solution - Increase the number of tries.
  829. *
  830. * Case 2 - The 50usec delay given by the Host CPU is not
  831. * sufficient for the PCIe RC to start its work.
  832. * In this case the breathing time of 50usec given by
  833. * the Host CPU is not sufficient.
  834. * Solution: Increase the delay in a stepper fashion.
  835. * This is done to ensure that there are no
  836. * unwanted extra delay introdcued in normal conditions.
  837. */
  838. for (step = 1; step <= PCIE_D2H_SYNC_NUM_OF_STEPS; step++) {
  839. for (tries = 0; tries < PCIE_D2H_SYNC_WAIT_TRIES; tries++) {
  840. msg_seqnum = *marker;
  841. if (ltoh32(msg_seqnum) == ring_seqnum) { /* dma upto last word done */
  842. ring->seqnum++; /* next expected sequence number */
  843. /* Check for LIVELOCK induce flag, which is set by firing
  844. * dhd iovar to induce LIVELOCK error. If flag is set,
  845. * MSG_TYPE_INVALID is returned, which results in to LIVELOCK error.
  846. */
  847. if (dhd->dhd_induce_error != DHD_INDUCE_LIVELOCK) {
  848. goto dma_completed;
  849. }
  850. }
  851. total_tries = (uint32)(((step-1) * PCIE_D2H_SYNC_WAIT_TRIES) + tries);
  852. if (total_tries > prot->d2h_sync_wait_max)
  853. prot->d2h_sync_wait_max = total_tries;
  854. OSL_CACHE_INV(msg, msglen); /* invalidate and try again */
  855. OSL_CPU_RELAX(); /* CPU relax for msg_seqnum value to update */
  856. OSL_DELAY(delay * step); /* Add stepper delay */
  857. } /* for PCIE_D2H_SYNC_WAIT_TRIES */
  858. } /* for PCIE_D2H_SYNC_NUM_OF_STEPS */
  859. dhd_prot_d2h_sync_livelock(dhd, msg_seqnum, ring, total_tries,
  860. (volatile uchar *) msg, msglen);
  861. ring->seqnum++; /* skip this message ... leak of a pktid */
  862. return MSG_TYPE_INVALID; /* invalid msg_type 0 -> noop callback */
  863. dma_completed:
  864. prot->d2h_sync_wait_tot += tries;
  865. return msg->msg_type;
  866. }
  867. /**
  868. * dhd_prot_d2h_sync_xorcsum - Sync on a D2H DMA completion using the XORCSUM
  869. * mode. The xorcsum is placed in the last word of a message. Dongle will also
  870. * place a seqnum in the epoch field of the cmn_msg_hdr.
  871. */
  872. static uint8 BCMFASTPATH
  873. dhd_prot_d2h_sync_xorcsum(dhd_pub_t *dhd, msgbuf_ring_t *ring,
  874. volatile cmn_msg_hdr_t *msg, int msglen)
  875. {
  876. uint32 tries;
  877. uint32 prot_checksum = 0; /* computed checksum */
  878. int num_words = msglen / sizeof(uint32); /* num of 32bit words */
  879. uint8 ring_seqnum = ring->seqnum % D2H_EPOCH_MODULO;
  880. dhd_prot_t *prot = dhd->prot;
  881. uint32 step = 0;
  882. uint32 delay = PCIE_D2H_SYNC_DELAY;
  883. uint32 total_tries = 0;
  884. ASSERT(msglen == ring->item_len);
  885. BCM_REFERENCE(delay);
  886. /*
  887. * For retries we have to make some sort of stepper algorithm.
  888. * We see that every time when the Dongle comes out of the D3
  889. * Cold state, the first D2H mem2mem DMA takes more time to
  890. * complete, leading to livelock issues.
  891. *
  892. * Case 1 - Apart from Host CPU some other bus master is
  893. * accessing the DDR port, probably page close to the ring
  894. * so, PCIE does not get a change to update the memory.
  895. * Solution - Increase the number of tries.
  896. *
  897. * Case 2 - The 50usec delay given by the Host CPU is not
  898. * sufficient for the PCIe RC to start its work.
  899. * In this case the breathing time of 50usec given by
  900. * the Host CPU is not sufficient.
  901. * Solution: Increase the delay in a stepper fashion.
  902. * This is done to ensure that there are no
  903. * unwanted extra delay introdcued in normal conditions.
  904. */
  905. for (step = 1; step <= PCIE_D2H_SYNC_NUM_OF_STEPS; step++) {
  906. for (tries = 0; tries < PCIE_D2H_SYNC_WAIT_TRIES; tries++) {
  907. /* First verify if the seqnumber has been update,
  908. * if yes, then only check xorcsum.
  909. * Once seqnum and xorcsum is proper that means
  910. * complete message has arrived.
  911. */
  912. if (msg->epoch == ring_seqnum) {
  913. prot_checksum = bcm_compute_xor32((volatile uint32 *)msg,
  914. num_words);
  915. if (prot_checksum == 0U) { /* checksum is OK */
  916. ring->seqnum++; /* next expected sequence number */
  917. /* Check for LIVELOCK induce flag, which is set by firing
  918. * dhd iovar to induce LIVELOCK error. If flag is set,
  919. * MSG_TYPE_INVALID is returned, which results in to
  920. * LIVELOCK error.
  921. */
  922. if (dhd->dhd_induce_error != DHD_INDUCE_LIVELOCK) {
  923. goto dma_completed;
  924. }
  925. }
  926. }
  927. total_tries = ((step-1) * PCIE_D2H_SYNC_WAIT_TRIES) + tries;
  928. if (total_tries > prot->d2h_sync_wait_max)
  929. prot->d2h_sync_wait_max = total_tries;
  930. OSL_CACHE_INV(msg, msglen); /* invalidate and try again */
  931. OSL_CPU_RELAX(); /* CPU relax for msg_seqnum value to update */
  932. OSL_DELAY(delay * step); /* Add stepper delay */
  933. } /* for PCIE_D2H_SYNC_WAIT_TRIES */
  934. } /* for PCIE_D2H_SYNC_NUM_OF_STEPS */
  935. DHD_ERROR(("%s: prot_checksum = 0x%x\n", __FUNCTION__, prot_checksum));
  936. dhd_prot_d2h_sync_livelock(dhd, msg->epoch, ring, total_tries,
  937. (volatile uchar *) msg, msglen);
  938. ring->seqnum++; /* skip this message ... leak of a pktid */
  939. return MSG_TYPE_INVALID; /* invalid msg_type 0 -> noop callback */
  940. dma_completed:
  941. prot->d2h_sync_wait_tot += tries;
  942. return msg->msg_type;
  943. }
  944. /**
  945. * dhd_prot_d2h_sync_none - Dongle ensure that the DMA will complete and host
  946. * need to try to sync. This noop sync handler will be bound when the dongle
  947. * advertises that neither the SEQNUM nor XORCSUM mode of DMA sync is required.
  948. */
  949. static uint8 BCMFASTPATH
  950. dhd_prot_d2h_sync_none(dhd_pub_t *dhd, msgbuf_ring_t *ring,
  951. volatile cmn_msg_hdr_t *msg, int msglen)
  952. {
  953. /* Check for LIVELOCK induce flag, which is set by firing
  954. * dhd iovar to induce LIVELOCK error. If flag is set,
  955. * MSG_TYPE_INVALID is returned, which results in to LIVELOCK error.
  956. */
  957. if (dhd->dhd_induce_error == DHD_INDUCE_LIVELOCK) {
  958. DHD_ERROR(("%s: Inducing livelock\n", __FUNCTION__));
  959. return MSG_TYPE_INVALID;
  960. } else {
  961. return msg->msg_type;
  962. }
  963. }
  964. #ifdef EWP_EDL
  965. /**
  966. * dhd_prot_d2h_sync_edl - Sync on a D2H DMA completion by validating the cmn_msg_hdr_t
  967. * header values at both the beginning and end of the payload.
  968. * The cmn_msg_hdr_t is placed at the start and end of the payload
  969. * in each work item in the EDL ring.
  970. * Dongle will place a seqnum inside the cmn_msg_hdr_t 'epoch' field
  971. * and the length of the payload in the 'request_id' field.
  972. * Structure of each work item in the EDL ring:
  973. * | cmn_msg_hdr_t | payload (var len) | cmn_msg_hdr_t |
  974. * NOTE: - it was felt that calculating xorcsum for the entire payload (max length of 1648 bytes) is
  975. * too costly on the dongle side and might take up too many ARM cycles,
  976. * hence the xorcsum sync method is not being used for EDL ring.
  977. */
  978. static int
  979. BCMFASTPATH(dhd_prot_d2h_sync_edl)(dhd_pub_t *dhd, msgbuf_ring_t *ring,
  980. volatile cmn_msg_hdr_t *msg)
  981. {
  982. uint32 tries;
  983. int msglen = 0, len = 0;
  984. uint32 ring_seqnum = ring->seqnum % D2H_EPOCH_MODULO;
  985. dhd_prot_t *prot = dhd->prot;
  986. uint32 step = 0;
  987. uint32 delay = PCIE_D2H_SYNC_DELAY;
  988. uint32 total_tries = 0;
  989. volatile cmn_msg_hdr_t *trailer = NULL;
  990. volatile uint8 *buf = NULL;
  991. bool valid_msg = FALSE;
  992. BCM_REFERENCE(delay);
  993. /*
  994. * For retries we have to make some sort of stepper algorithm.
  995. * We see that every time when the Dongle comes out of the D3
  996. * Cold state, the first D2H mem2mem DMA takes more time to
  997. * complete, leading to livelock issues.
  998. *
  999. * Case 1 - Apart from Host CPU some other bus master is
  1000. * accessing the DDR port, probably page close to the ring
  1001. * so, PCIE does not get a change to update the memory.
  1002. * Solution - Increase the number of tries.
  1003. *
  1004. * Case 2 - The 50usec delay given by the Host CPU is not
  1005. * sufficient for the PCIe RC to start its work.
  1006. * In this case the breathing time of 50usec given by
  1007. * the Host CPU is not sufficient.
  1008. * Solution: Increase the delay in a stepper fashion.
  1009. * This is done to ensure that there are no
  1010. * unwanted extra delay introdcued in normal conditions.
  1011. */
  1012. for (step = 1; step <= PCIE_D2H_SYNC_NUM_OF_STEPS; step++) {
  1013. for (tries = 0; tries < PCIE_D2H_SYNC_WAIT_TRIES; tries++) {
  1014. /* First verify if the seqnumber has been updated,
  1015. * if yes, only then validate the header and trailer.
  1016. * Once seqnum, header and trailer have been validated, it means
  1017. * that the complete message has arrived.
  1018. */
  1019. valid_msg = FALSE;
  1020. if (msg->epoch == ring_seqnum &&
  1021. msg->msg_type == MSG_TYPE_INFO_PYLD &&
  1022. msg->request_id > 0 &&
  1023. msg->request_id <= ring->item_len) {
  1024. /* proceed to check trailer only if header is valid */
  1025. buf = (volatile uint8 *)msg;
  1026. msglen = sizeof(cmn_msg_hdr_t) + msg->request_id;
  1027. buf += msglen;
  1028. if (msglen + sizeof(cmn_msg_hdr_t) <= ring->item_len) {
  1029. trailer = (volatile cmn_msg_hdr_t *)buf;
  1030. valid_msg = (trailer->epoch == ring_seqnum) &&
  1031. (trailer->msg_type == msg->msg_type) &&
  1032. (trailer->request_id == msg->request_id);
  1033. if (!valid_msg) {
  1034. DHD_TRACE(("%s:invalid trailer! seqnum=%u;reqid=%u"
  1035. " expected, seqnum=%u; reqid=%u. Retrying... \n",
  1036. __FUNCTION__, trailer->epoch, trailer->request_id,
  1037. msg->epoch, msg->request_id));
  1038. }
  1039. } else {
  1040. DHD_TRACE(("%s: invalid payload length (%u)! Retrying.. \n",
  1041. __FUNCTION__, msg->request_id));
  1042. }
  1043. if (valid_msg) {
  1044. /* data is OK */
  1045. ring->seqnum++; /* next expected sequence number */
  1046. if (dhd->dhd_induce_error != DHD_INDUCE_LIVELOCK) {
  1047. goto dma_completed;
  1048. }
  1049. }
  1050. } else {
  1051. DHD_TRACE(("%s: wrong hdr, seqnum expected %u, got %u."
  1052. " msg_type=0x%x, request_id=%u."
  1053. " Retrying...\n",
  1054. __FUNCTION__, ring_seqnum, msg->epoch,
  1055. msg->msg_type, msg->request_id));
  1056. }
  1057. total_tries = ((step-1) * PCIE_D2H_SYNC_WAIT_TRIES) + tries;
  1058. if (total_tries > prot->d2h_sync_wait_max)
  1059. prot->d2h_sync_wait_max = total_tries;
  1060. OSL_CACHE_INV(msg, msglen); /* invalidate and try again */
  1061. OSL_CPU_RELAX(); /* CPU relax for msg_seqnum value to update */
  1062. OSL_DELAY(delay * step); /* Add stepper delay */
  1063. } /* for PCIE_D2H_SYNC_WAIT_TRIES */
  1064. } /* for PCIE_D2H_SYNC_NUM_OF_STEPS */
  1065. DHD_ERROR(("%s: EDL header check fails !\n", __FUNCTION__));
  1066. DHD_ERROR(("%s: header: seqnum=%u; expected-seqnum=%u"
  1067. " msgtype=0x%x; expected-msgtype=0x%x"
  1068. " length=%u; expected-max-length=%u", __FUNCTION__,
  1069. msg->epoch, ring_seqnum, msg->msg_type, MSG_TYPE_INFO_PYLD,
  1070. msg->request_id, ring->item_len));
  1071. dhd_prhex("msg header bytes: ", (volatile uchar *)msg, sizeof(*msg), DHD_ERROR_VAL);
  1072. if (trailer && msglen > 0 &&
  1073. (msglen + sizeof(cmn_msg_hdr_t)) <= ring->item_len) {
  1074. DHD_ERROR(("%s: trailer: seqnum=%u; expected-seqnum=%u"
  1075. " msgtype=0x%x; expected-msgtype=0x%x"
  1076. " length=%u; expected-length=%u", __FUNCTION__,
  1077. trailer->epoch, ring_seqnum, trailer->msg_type, MSG_TYPE_INFO_PYLD,
  1078. trailer->request_id, msg->request_id));
  1079. dhd_prhex("msg trailer bytes: ", (volatile uchar *)trailer,
  1080. sizeof(*trailer), DHD_ERROR_VAL);
  1081. }
  1082. if ((msglen + sizeof(cmn_msg_hdr_t)) <= ring->item_len)
  1083. len = msglen + sizeof(cmn_msg_hdr_t);
  1084. else
  1085. len = ring->item_len;
  1086. dhd_prot_d2h_sync_livelock(dhd, msg->epoch, ring, total_tries,
  1087. (volatile uchar *) msg, len);
  1088. ring->seqnum++; /* skip this message */
  1089. return BCME_ERROR; /* invalid msg_type 0 -> noop callback */
  1090. dma_completed:
  1091. DHD_TRACE(("%s: EDL header check pass, seqnum=%u; reqid=%u\n", __FUNCTION__,
  1092. msg->epoch, msg->request_id));
  1093. prot->d2h_sync_wait_tot += tries;
  1094. return BCME_OK;
  1095. }
  1096. /**
  1097. * dhd_prot_d2h_sync_edl_none - Dongle ensure that the DMA will complete and host
  1098. * need to try to sync. This noop sync handler will be bound when the dongle
  1099. * advertises that neither the SEQNUM nor XORCSUM mode of DMA sync is required.
  1100. */
  1101. static int BCMFASTPATH
  1102. dhd_prot_d2h_sync_edl_none(dhd_pub_t *dhd, msgbuf_ring_t *ring,
  1103. volatile cmn_msg_hdr_t *msg)
  1104. {
  1105. /* Check for LIVELOCK induce flag, which is set by firing
  1106. * dhd iovar to induce LIVELOCK error. If flag is set,
  1107. * MSG_TYPE_INVALID is returned, which results in to LIVELOCK error.
  1108. */
  1109. if (dhd->dhd_induce_error == DHD_INDUCE_LIVELOCK) {
  1110. DHD_ERROR(("%s: Inducing livelock\n", __FUNCTION__));
  1111. return BCME_ERROR;
  1112. } else {
  1113. if (msg->msg_type == MSG_TYPE_INFO_PYLD)
  1114. return BCME_OK;
  1115. else
  1116. return msg->msg_type;
  1117. }
  1118. }
  1119. #endif /* EWP_EDL */
  1120. INLINE void
  1121. dhd_wakeup_ioctl_event(dhd_pub_t *dhd, dhd_ioctl_recieved_status_t reason)
  1122. {
  1123. /* To synchronize with the previous memory operations call wmb() */
  1124. OSL_SMP_WMB();
  1125. dhd->prot->ioctl_received = reason;
  1126. /* Call another wmb() to make sure before waking up the other event value gets updated */
  1127. OSL_SMP_WMB();
  1128. dhd_os_ioctl_resp_wake(dhd);
  1129. }
  1130. /**
  1131. * dhd_prot_d2h_sync_init - Setup the host side DMA sync mode based on what
  1132. * dongle advertizes.
  1133. */
  1134. static void
  1135. dhd_prot_d2h_sync_init(dhd_pub_t *dhd)
  1136. {
  1137. dhd_prot_t *prot = dhd->prot;
  1138. prot->d2h_sync_wait_max = 0UL;
  1139. prot->d2h_sync_wait_tot = 0UL;
  1140. prot->d2hring_ctrl_cpln.seqnum = D2H_EPOCH_INIT_VAL;
  1141. prot->d2hring_ctrl_cpln.current_phase = BCMPCIE_CMNHDR_PHASE_BIT_INIT;
  1142. prot->d2hring_tx_cpln.seqnum = D2H_EPOCH_INIT_VAL;
  1143. prot->d2hring_tx_cpln.current_phase = BCMPCIE_CMNHDR_PHASE_BIT_INIT;
  1144. prot->d2hring_rx_cpln.seqnum = D2H_EPOCH_INIT_VAL;
  1145. prot->d2hring_rx_cpln.current_phase = BCMPCIE_CMNHDR_PHASE_BIT_INIT;
  1146. if (HWA_ACTIVE(dhd)) {
  1147. prot->d2hring_tx_cpln.hwa_db_type =
  1148. (dhd->bus->hwa_enab_bmap & HWA_ENAB_BITMAP_TXCPLT) ? HWA_DB_TYPE_TXCPLT : 0;
  1149. prot->d2hring_rx_cpln.hwa_db_type =
  1150. (dhd->bus->hwa_enab_bmap & HWA_ENAB_BITMAP_RXCPLT) ? HWA_DB_TYPE_RXCPLT : 0;
  1151. DHD_ERROR(("%s: TXCPLT hwa_db_type:0x%x RXCPLT hwa_db_type:0x%x\n",
  1152. __FUNCTION__, prot->d2hring_tx_cpln.hwa_db_type,
  1153. prot->d2hring_rx_cpln.hwa_db_type));
  1154. }
  1155. if (dhd->d2h_sync_mode & PCIE_SHARED_D2H_SYNC_SEQNUM) {
  1156. prot->d2h_sync_cb = dhd_prot_d2h_sync_seqnum;
  1157. #ifdef EWP_EDL
  1158. prot->d2h_edl_sync_cb = dhd_prot_d2h_sync_edl;
  1159. #endif /* EWP_EDL */
  1160. DHD_ERROR(("%s(): D2H sync mechanism is SEQNUM \r\n", __FUNCTION__));
  1161. } else if (dhd->d2h_sync_mode & PCIE_SHARED_D2H_SYNC_XORCSUM) {
  1162. prot->d2h_sync_cb = dhd_prot_d2h_sync_xorcsum;
  1163. #ifdef EWP_EDL
  1164. prot->d2h_edl_sync_cb = dhd_prot_d2h_sync_edl;
  1165. #endif /* EWP_EDL */
  1166. DHD_ERROR(("%s(): D2H sync mechanism is XORCSUM \r\n", __FUNCTION__));
  1167. } else {
  1168. prot->d2h_sync_cb = dhd_prot_d2h_sync_none;
  1169. #ifdef EWP_EDL
  1170. prot->d2h_edl_sync_cb = dhd_prot_d2h_sync_edl_none;
  1171. #endif /* EWP_EDL */
  1172. DHD_ERROR(("%s(): D2H sync mechanism is NONE \r\n", __FUNCTION__));
  1173. }
  1174. }
  1175. /**
  1176. * dhd_prot_h2d_sync_init - Per H2D common ring, setup the msgbuf ring seqnum
  1177. */
  1178. static void
  1179. dhd_prot_h2d_sync_init(dhd_pub_t *dhd)
  1180. {
  1181. dhd_prot_t *prot = dhd->prot;
  1182. prot->h2dring_rxp_subn.seqnum = H2D_EPOCH_INIT_VAL;
  1183. if (HWA_ACTIVE(dhd)) {
  1184. prot->h2dring_rxp_subn.hwa_db_type =
  1185. (dhd->bus->hwa_enab_bmap & HWA_ENAB_BITMAP_RXPOST) ? HWA_DB_TYPE_RXPOST : 0;
  1186. DHD_ERROR(("%s: RXPOST hwa_db_type:0x%x\n",
  1187. __FUNCTION__, prot->d2hring_tx_cpln.hwa_db_type));
  1188. }
  1189. prot->h2dring_rxp_subn.current_phase = 0;
  1190. prot->h2dring_ctrl_subn.seqnum = H2D_EPOCH_INIT_VAL;
  1191. prot->h2dring_ctrl_subn.current_phase = 0;
  1192. }
  1193. /* +----------------- End of PCIE DHD H2D DMA SYNC ------------------------+ */
  1194. /*
  1195. * +---------------------------------------------------------------------------+
  1196. * PCIE DMA-able buffer. Sets up a dhd_dma_buf_t object, which includes the
  1197. * virtual and physical address, the buffer lenght and the DMA handler.
  1198. * A secdma handler is also included in the dhd_dma_buf object.
  1199. * +---------------------------------------------------------------------------+
  1200. */
  1201. static INLINE void
  1202. dhd_base_addr_htolpa(sh_addr_t *base_addr, dmaaddr_t pa)
  1203. {
  1204. base_addr->low_addr = htol32(PHYSADDRLO(pa));
  1205. base_addr->high_addr = htol32(PHYSADDRHI(pa));
  1206. }
  1207. /**
  1208. * dhd_dma_buf_audit - Any audits on a DHD DMA Buffer.
  1209. */
  1210. static int
  1211. dhd_dma_buf_audit(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf)
  1212. {
  1213. uint32 pa_lowaddr, end; /* dongle uses 32bit ptr arithmetic */
  1214. ASSERT(dma_buf);
  1215. pa_lowaddr = PHYSADDRLO(dma_buf->pa);
  1216. ASSERT(PHYSADDRLO(dma_buf->pa) || PHYSADDRHI(dma_buf->pa));
  1217. ASSERT(ISALIGNED(pa_lowaddr, DMA_ALIGN_LEN));
  1218. ASSERT(dma_buf->len != 0);
  1219. /* test 32bit offset arithmetic over dma buffer for loss of carry-over */
  1220. end = (pa_lowaddr + dma_buf->len); /* end address */
  1221. if ((end & 0xFFFFFFFF) < (pa_lowaddr & 0xFFFFFFFF)) { /* exclude carryover */
  1222. DHD_ERROR(("%s: dma_buf %x len %d spans dongle 32bit ptr arithmetic\n",
  1223. __FUNCTION__, pa_lowaddr, dma_buf->len));
  1224. return BCME_ERROR;
  1225. }
  1226. return BCME_OK;
  1227. }
  1228. /**
  1229. * dhd_dma_buf_alloc - Allocate a cache coherent DMA-able buffer.
  1230. * returns BCME_OK=0 on success
  1231. * returns non-zero negative error value on failure.
  1232. */
  1233. int
  1234. dhd_dma_buf_alloc(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf, uint32 buf_len)
  1235. {
  1236. uint32 dma_pad = 0;
  1237. osl_t *osh = dhd->osh;
  1238. uint16 dma_align = DMA_ALIGN_LEN;
  1239. uint32 rem = 0;
  1240. ASSERT(dma_buf != NULL);
  1241. ASSERT(dma_buf->va == NULL);
  1242. ASSERT(dma_buf->len == 0);
  1243. /* Pad the buffer length to align to cacheline size. */
  1244. rem = (buf_len % DHD_DMA_PAD);
  1245. dma_pad = rem ? (DHD_DMA_PAD - rem) : 0;
  1246. dma_buf->va = DMA_ALLOC_CONSISTENT(osh, buf_len + dma_pad,
  1247. dma_align, &dma_buf->_alloced, &dma_buf->pa, &dma_buf->dmah);
  1248. if (dma_buf->va == NULL) {
  1249. DHD_ERROR(("%s: buf_len %d, no memory available\n",
  1250. __FUNCTION__, buf_len));
  1251. return BCME_NOMEM;
  1252. }
  1253. dma_buf->len = buf_len; /* not including padded len */
  1254. if (dhd_dma_buf_audit(dhd, dma_buf) != BCME_OK) { /* audit dma buf */
  1255. dhd_dma_buf_free(dhd, dma_buf);
  1256. return BCME_ERROR;
  1257. }
  1258. dhd_dma_buf_reset(dhd, dma_buf); /* zero out and cache flush */
  1259. return BCME_OK;
  1260. }
  1261. /**
  1262. * dhd_dma_buf_reset - Reset a cache coherent DMA-able buffer.
  1263. */
  1264. static void
  1265. dhd_dma_buf_reset(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf)
  1266. {
  1267. if ((dma_buf == NULL) || (dma_buf->va == NULL))
  1268. return;
  1269. (void)dhd_dma_buf_audit(dhd, dma_buf);
  1270. /* Zero out the entire buffer and cache flush */
  1271. memset((void*)dma_buf->va, 0, dma_buf->len);
  1272. OSL_CACHE_FLUSH((void *)dma_buf->va, dma_buf->len);
  1273. }
  1274. /**
  1275. * dhd_dma_buf_free - Free a DMA-able buffer that was previously allocated using
  1276. * dhd_dma_buf_alloc().
  1277. */
  1278. void
  1279. dhd_dma_buf_free(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf)
  1280. {
  1281. osl_t *osh = dhd->osh;
  1282. ASSERT(dma_buf);
  1283. if (dma_buf->va == NULL)
  1284. return; /* Allow for free invocation, when alloc failed */
  1285. /* DEBUG: dhd_dma_buf_reset(dhd, dma_buf) */
  1286. (void)dhd_dma_buf_audit(dhd, dma_buf);
  1287. /* dma buffer may have been padded at allocation */
  1288. DMA_FREE_CONSISTENT(osh, dma_buf->va, dma_buf->_alloced,
  1289. dma_buf->pa, dma_buf->dmah);
  1290. memset(dma_buf, 0, sizeof(dhd_dma_buf_t));
  1291. }
  1292. /**
  1293. * dhd_dma_buf_init - Initialize a dhd_dma_buf with speicifed values.
  1294. * Do not use dhd_dma_buf_init to zero out a dhd_dma_buf_t object. Use memset 0.
  1295. */
  1296. void
  1297. dhd_dma_buf_init(dhd_pub_t *dhd, void *dhd_dma_buf,
  1298. void *va, uint32 len, dmaaddr_t pa, void *dmah, void *secdma)
  1299. {
  1300. dhd_dma_buf_t *dma_buf;
  1301. ASSERT(dhd_dma_buf);
  1302. dma_buf = (dhd_dma_buf_t *)dhd_dma_buf;
  1303. dma_buf->va = va;
  1304. dma_buf->len = len;
  1305. dma_buf->pa = pa;
  1306. dma_buf->dmah = dmah;
  1307. dma_buf->secdma = secdma;
  1308. /* Audit user defined configuration */
  1309. (void)dhd_dma_buf_audit(dhd, dma_buf);
  1310. }
  1311. /* +------------------ End of PCIE DHD DMA BUF ADT ------------------------+ */
  1312. /*
  1313. * +---------------------------------------------------------------------------+
  1314. * DHD_MAP_PKTID_LOGGING
  1315. * Logging the PKTID and DMA map/unmap information for the SMMU fault issue
  1316. * debugging in customer platform.
  1317. * +---------------------------------------------------------------------------+
  1318. */
  1319. #ifdef DHD_MAP_PKTID_LOGGING
  1320. typedef struct dhd_pktid_log_item {
  1321. dmaaddr_t pa; /* DMA bus address */
  1322. uint64 ts_nsec; /* Timestamp: nsec */
  1323. uint32 size; /* DMA map/unmap size */
  1324. uint32 pktid; /* Packet ID */
  1325. uint8 pkttype; /* Packet Type */
  1326. uint8 rsvd[7]; /* Reserved for future use */
  1327. } dhd_pktid_log_item_t;
  1328. typedef struct dhd_pktid_log {
  1329. uint32 items; /* number of total items */
  1330. uint32 index; /* index of pktid_log_item */
  1331. dhd_pktid_log_item_t map[0]; /* metadata storage */
  1332. } dhd_pktid_log_t;
  1333. typedef void * dhd_pktid_log_handle_t; /* opaque handle to pktid log */
  1334. #define MAX_PKTID_LOG (2048)
  1335. #define DHD_PKTID_LOG_ITEM_SZ (sizeof(dhd_pktid_log_item_t))
  1336. #define DHD_PKTID_LOG_SZ(items) (uint32)((sizeof(dhd_pktid_log_t)) + \
  1337. ((DHD_PKTID_LOG_ITEM_SZ) * (items)))
  1338. #define DHD_PKTID_LOG_INIT(dhd, hdl) dhd_pktid_logging_init((dhd), (hdl))
  1339. #define DHD_PKTID_LOG_FINI(dhd, hdl) dhd_pktid_logging_fini((dhd), (hdl))
  1340. #define DHD_PKTID_LOG(dhd, hdl, pa, pktid, len, pkttype) \
  1341. dhd_pktid_logging((dhd), (hdl), (pa), (pktid), (len), (pkttype))
  1342. #define DHD_PKTID_LOG_DUMP(dhd) dhd_pktid_logging_dump((dhd))
  1343. static dhd_pktid_log_handle_t *
  1344. dhd_pktid_logging_init(dhd_pub_t *dhd, uint32 num_items)
  1345. {
  1346. dhd_pktid_log_t *log;
  1347. uint32 log_size;
  1348. log_size = DHD_PKTID_LOG_SZ(num_items);
  1349. log = (dhd_pktid_log_t *)MALLOCZ(dhd->osh, log_size);
  1350. if (log == NULL) {
  1351. DHD_ERROR(("%s: MALLOC failed for size %d\n",
  1352. __FUNCTION__, log_size));
  1353. return (dhd_pktid_log_handle_t *)NULL;
  1354. }
  1355. log->items = num_items;
  1356. log->index = 0;
  1357. return (dhd_pktid_log_handle_t *)log; /* opaque handle */
  1358. }
  1359. static void
  1360. dhd_pktid_logging_fini(dhd_pub_t *dhd, dhd_pktid_log_handle_t *handle)
  1361. {
  1362. dhd_pktid_log_t *log;
  1363. uint32 log_size;
  1364. if (handle == NULL) {
  1365. DHD_ERROR(("%s: handle is NULL\n", __FUNCTION__));
  1366. return;
  1367. }
  1368. log = (dhd_pktid_log_t *)handle;
  1369. log_size = DHD_PKTID_LOG_SZ(log->items);
  1370. MFREE(dhd->osh, handle, log_size);
  1371. }
  1372. static void
  1373. dhd_pktid_logging(dhd_pub_t *dhd, dhd_pktid_log_handle_t *handle, dmaaddr_t pa,
  1374. uint32 pktid, uint32 len, uint8 pkttype)
  1375. {
  1376. dhd_pktid_log_t *log;
  1377. uint32 idx;
  1378. if (handle == NULL) {
  1379. DHD_ERROR(("%s: handle is NULL\n", __FUNCTION__));
  1380. return;
  1381. }
  1382. log = (dhd_pktid_log_t *)handle;
  1383. idx = log->index;
  1384. log->map[idx].ts_nsec = OSL_LOCALTIME_NS();
  1385. log->map[idx].pa = pa;
  1386. log->map[idx].pktid = pktid;
  1387. log->map[idx].size = len;
  1388. log->map[idx].pkttype = pkttype;
  1389. log->index = (idx + 1) % (log->items); /* update index */
  1390. }
  1391. void
  1392. dhd_pktid_logging_dump(dhd_pub_t *dhd)
  1393. {
  1394. dhd_prot_t *prot = dhd->prot;
  1395. dhd_pktid_log_t *map_log, *unmap_log;
  1396. uint64 ts_sec, ts_usec;
  1397. if (prot == NULL) {
  1398. DHD_ERROR(("%s: prot is NULL\n", __FUNCTION__));
  1399. return;
  1400. }
  1401. map_log = (dhd_pktid_log_t *)(prot->pktid_dma_map);
  1402. unmap_log = (dhd_pktid_log_t *)(prot->pktid_dma_unmap);
  1403. OSL_GET_LOCALTIME(&ts_sec, &ts_usec);
  1404. if (map_log && unmap_log) {
  1405. DHD_ERROR(("%s: map_idx=%d unmap_idx=%d "
  1406. "current time=[%5lu.%06lu]\n", __FUNCTION__,
  1407. map_log->index, unmap_log->index,
  1408. (unsigned long)ts_sec, (unsigned long)ts_usec));
  1409. DHD_ERROR(("%s: pktid_map_log(pa)=0x%llx size=%d, "
  1410. "pktid_unmap_log(pa)=0x%llx size=%d\n", __FUNCTION__,
  1411. (uint64)__virt_to_phys((ulong)(map_log->map)),
  1412. (uint32)(DHD_PKTID_LOG_ITEM_SZ * map_log->items),
  1413. (uint64)__virt_to_phys((ulong)(unmap_log->map)),
  1414. (uint32)(DHD_PKTID_LOG_ITEM_SZ * unmap_log->items)));
  1415. }
  1416. }
  1417. #endif /* DHD_MAP_PKTID_LOGGING */
  1418. /* +----------------- End of DHD_MAP_PKTID_LOGGING -----------------------+ */
  1419. /*
  1420. * +---------------------------------------------------------------------------+
  1421. * PktId Map: Provides a native packet pointer to unique 32bit PktId mapping.
  1422. * Main purpose is to save memory on the dongle, has other purposes as well.
  1423. * The packet id map, also includes storage for some packet parameters that
  1424. * may be saved. A native packet pointer along with the parameters may be saved
  1425. * and a unique 32bit pkt id will be returned. Later, the saved packet pointer
  1426. * and the metadata may be retrieved using the previously allocated packet id.
  1427. * +---------------------------------------------------------------------------+
  1428. */
  1429. #define DHD_PCIE_PKTID
  1430. #define MAX_CTRL_PKTID (1024) /* Maximum number of pktids supported */
  1431. #define MAX_RX_PKTID (1024)
  1432. #define MAX_TX_PKTID (3072 * 12)
  1433. /* On Router, the pktptr serves as a pktid. */
  1434. #if defined(PROP_TXSTATUS) && !defined(DHD_PCIE_PKTID)
  1435. #error "PKTIDMAP must be supported with PROP_TXSTATUS/WLFC"
  1436. #endif // endif
  1437. /* Enum for marking the buffer color based on usage */
  1438. typedef enum dhd_pkttype {
  1439. PKTTYPE_DATA_TX = 0,
  1440. PKTTYPE_DATA_RX,
  1441. PKTTYPE_IOCTL_RX,
  1442. PKTTYPE_EVENT_RX,
  1443. PKTTYPE_INFO_RX,
  1444. /* dhd_prot_pkt_free no check, if pktid reserved and no space avail case */
  1445. PKTTYPE_NO_CHECK,
  1446. PKTTYPE_TSBUF_RX
  1447. } dhd_pkttype_t;
  1448. #define DHD_PKTID_MIN_AVAIL_COUNT 512U
  1449. #define DHD_PKTID_DEPLETED_MAX_COUNT (DHD_PKTID_MIN_AVAIL_COUNT * 2U)
  1450. #define DHD_PKTID_INVALID (0U)
  1451. #define DHD_IOCTL_REQ_PKTID (0xFFFE)
  1452. #define DHD_FAKE_PKTID (0xFACE)
  1453. #define DHD_H2D_DBGRING_REQ_PKTID 0xFFFD
  1454. #define DHD_D2H_DBGRING_REQ_PKTID 0xFFFC
  1455. #define DHD_H2D_HOSTTS_REQ_PKTID 0xFFFB
  1456. #define DHD_H2D_BTLOGRING_REQ_PKTID 0xFFFA
  1457. #define DHD_D2H_BTLOGRING_REQ_PKTID 0xFFF9
  1458. #define DHD_H2D_SNAPSHOT_UPLOAD_REQ_PKTID 0xFFF8
  1459. #ifdef DHD_HP2P
  1460. #define DHD_D2H_HPPRING_TXREQ_PKTID 0xFFF7
  1461. #define DHD_D2H_HPPRING_RXREQ_PKTID 0xFFF6
  1462. #endif /* DHD_HP2P */
  1463. #define IS_FLOWRING(ring) \
  1464. ((strncmp(ring->name, "h2dflr", sizeof("h2dflr"))) == (0))
  1465. typedef void * dhd_pktid_map_handle_t; /* opaque handle to a pktid map */
  1466. /* Construct a packet id mapping table, returning an opaque map handle */
  1467. static dhd_pktid_map_handle_t *dhd_pktid_map_init(dhd_pub_t *dhd, uint32 num_items);
  1468. /* Destroy a packet id mapping table, freeing all packets active in the table */
  1469. static void dhd_pktid_map_fini(dhd_pub_t *dhd, dhd_pktid_map_handle_t *map);
  1470. #define DHD_NATIVE_TO_PKTID_INIT(dhd, items) dhd_pktid_map_init((dhd), (items))
  1471. #define DHD_NATIVE_TO_PKTID_RESET(dhd, map) dhd_pktid_map_reset((dhd), (map))
  1472. #define DHD_NATIVE_TO_PKTID_FINI(dhd, map) dhd_pktid_map_fini((dhd), (map))
  1473. #define DHD_NATIVE_TO_PKTID_FINI_IOCTL(osh, map) dhd_pktid_map_fini_ioctl((osh), (map))
  1474. #ifdef MACOSX_DHD
  1475. #undef DHD_PCIE_PKTID
  1476. #define DHD_PCIE_PKTID 1
  1477. #endif /* MACOSX_DHD */
  1478. #if defined(DHD_PCIE_PKTID)
  1479. #if defined(MACOSX_DHD)
  1480. #define IOCTLRESP_USE_CONSTMEM
  1481. static void free_ioctl_return_buffer(dhd_pub_t *dhd, dhd_dma_buf_t *retbuf);
  1482. static int alloc_ioctl_return_buffer(dhd_pub_t *dhd, dhd_dma_buf_t *retbuf);
  1483. #endif // endif
  1484. /* Determine number of pktids that are available */
  1485. static INLINE uint32 dhd_pktid_map_avail_cnt(dhd_pktid_map_handle_t *handle);
  1486. /* Allocate a unique pktid against which a pkt and some metadata is saved */
  1487. static INLINE uint32 dhd_pktid_map_reserve(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle,
  1488. void *pkt, dhd_pkttype_t pkttype);
  1489. static INLINE void dhd_pktid_map_save(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle,
  1490. void *pkt, uint32 nkey, dmaaddr_t pa, uint32 len, uint8 dma,
  1491. void *dmah, void *secdma, dhd_pkttype_t pkttype);
  1492. static uint32 dhd_pktid_map_alloc(dhd_pub_t *dhd, dhd_pktid_map_handle_t *map,
  1493. void *pkt, dmaaddr_t pa, uint32 len, uint8 dma,
  1494. void *dmah, void *secdma, dhd_pkttype_t pkttype);
  1495. /* Return an allocated pktid, retrieving previously saved pkt and metadata */
  1496. static void *dhd_pktid_map_free(dhd_pub_t *dhd, dhd_pktid_map_handle_t *map,
  1497. uint32 id, dmaaddr_t *pa, uint32 *len, void **dmah,
  1498. void **secdma, dhd_pkttype_t pkttype, bool rsv_locker);
  1499. /*
  1500. * DHD_PKTID_AUDIT_ENABLED: Audit of PktIds in DHD for duplicate alloc and frees
  1501. *
  1502. * DHD_PKTID_AUDIT_MAP: Audit the LIFO or FIFO PktIdMap allocator
  1503. * DHD_PKTID_AUDIT_RING: Audit the pktid during producer/consumer ring operation
  1504. *
  1505. * CAUTION: When DHD_PKTID_AUDIT_ENABLED is defined,
  1506. * either DHD_PKTID_AUDIT_MAP or DHD_PKTID_AUDIT_RING may be selected.
  1507. */
  1508. #if defined(DHD_PKTID_AUDIT_ENABLED)
  1509. #define USE_DHD_PKTID_AUDIT_LOCK 1
  1510. /* Audit the pktidmap allocator */
  1511. /* #define DHD_PKTID_AUDIT_MAP */
  1512. /* Audit the pktid during production/consumption of workitems */
  1513. #define DHD_PKTID_AUDIT_RING
  1514. #if defined(DHD_PKTID_AUDIT_MAP) && defined(DHD_PKTID_AUDIT_RING)
  1515. #error "May only enabled audit of MAP or RING, at a time."
  1516. #endif /* DHD_PKTID_AUDIT_MAP && DHD_PKTID_AUDIT_RING */
  1517. #define DHD_DUPLICATE_ALLOC 1
  1518. #define DHD_DUPLICATE_FREE 2
  1519. #define DHD_TEST_IS_ALLOC 3
  1520. #define DHD_TEST_IS_FREE 4
  1521. typedef enum dhd_pktid_map_type {
  1522. DHD_PKTID_MAP_TYPE_CTRL = 1,
  1523. DHD_PKTID_MAP_TYPE_TX,
  1524. DHD_PKTID_MAP_TYPE_RX,
  1525. DHD_PKTID_MAP_TYPE_UNKNOWN
  1526. } dhd_pktid_map_type_t;
  1527. #ifdef USE_DHD_PKTID_AUDIT_LOCK
  1528. #define DHD_PKTID_AUDIT_LOCK_INIT(osh) dhd_os_spin_lock_init(osh)
  1529. #define DHD_PKTID_AUDIT_LOCK_DEINIT(osh, lock) dhd_os_spin_lock_deinit(osh, lock)
  1530. #define DHD_PKTID_AUDIT_LOCK(lock) dhd_os_spin_lock(lock)
  1531. #define DHD_PKTID_AUDIT_UNLOCK(lock, flags) dhd_os_spin_unlock(lock, flags)
  1532. #else
  1533. #define DHD_PKTID_AUDIT_LOCK_INIT(osh) (void *)(1)
  1534. #define DHD_PKTID_AUDIT_LOCK_DEINIT(osh, lock) do { /* noop */ } while (0)
  1535. #define DHD_PKTID_AUDIT_LOCK(lock) 0
  1536. #define DHD_PKTID_AUDIT_UNLOCK(lock, flags) do { /* noop */ } while (0)
  1537. #endif /* !USE_DHD_PKTID_AUDIT_LOCK */
  1538. #endif /* DHD_PKTID_AUDIT_ENABLED */
  1539. #define USE_DHD_PKTID_LOCK 1
  1540. #ifdef USE_DHD_PKTID_LOCK
  1541. #define DHD_PKTID_LOCK_INIT(osh) dhd_os_spin_lock_init(osh)
  1542. #define DHD_PKTID_LOCK_DEINIT(osh, lock) dhd_os_spin_lock_deinit(osh, lock)
  1543. #define DHD_PKTID_LOCK(lock, flags) (flags) = dhd_os_spin_lock(lock)
  1544. #define DHD_PKTID_UNLOCK(lock, flags) dhd_os_spin_unlock(lock, flags)
  1545. #else
  1546. #define DHD_PKTID_LOCK_INIT(osh) (void *)(1)
  1547. #define DHD_PKTID_LOCK_DEINIT(osh, lock) \
  1548. do { \
  1549. BCM_REFERENCE(osh); \
  1550. BCM_REFERENCE(lock); \
  1551. } while (0)
  1552. #define DHD_PKTID_LOCK(lock) 0
  1553. #define DHD_PKTID_UNLOCK(lock, flags) \
  1554. do { \
  1555. BCM_REFERENCE(lock); \
  1556. BCM_REFERENCE(flags); \
  1557. } while (0)
  1558. #endif /* !USE_DHD_PKTID_LOCK */
  1559. typedef enum dhd_locker_state {
  1560. LOCKER_IS_FREE,
  1561. LOCKER_IS_BUSY,
  1562. LOCKER_IS_RSVD
  1563. } dhd_locker_state_t;
  1564. /* Packet metadata saved in packet id mapper */
  1565. typedef struct dhd_pktid_item {
  1566. dhd_locker_state_t state; /* tag a locker to be free, busy or reserved */
  1567. uint8 dir; /* dma map direction (Tx=flush or Rx=invalidate) */
  1568. dhd_pkttype_t pkttype; /* pktlists are maintained based on pkttype */
  1569. uint16 len; /* length of mapped packet's buffer */
  1570. void *pkt; /* opaque native pointer to a packet */
  1571. dmaaddr_t pa; /* physical address of mapped packet's buffer */
  1572. void *dmah; /* handle to OS specific DMA map */
  1573. void *secdma;
  1574. } dhd_pktid_item_t;
  1575. typedef uint32 dhd_pktid_key_t;
  1576. typedef struct dhd_pktid_map {
  1577. uint32 items; /* total items in map */
  1578. uint32 avail; /* total available items */
  1579. int failures; /* lockers unavailable count */
  1580. /* Spinlock to protect dhd_pktid_map in process/tasklet context */
  1581. void *pktid_lock; /* Used when USE_DHD_PKTID_LOCK is defined */
  1582. #if defined(DHD_PKTID_AUDIT_ENABLED)
  1583. void *pktid_audit_lock;
  1584. struct bcm_mwbmap *pktid_audit; /* multi word bitmap based audit */
  1585. #endif /* DHD_PKTID_AUDIT_ENABLED */
  1586. dhd_pktid_key_t *keys; /* map_items +1 unique pkt ids */
  1587. dhd_pktid_item_t lockers[0]; /* metadata storage */
  1588. } dhd_pktid_map_t;
  1589. /*
  1590. * PktId (Locker) #0 is never allocated and is considered invalid.
  1591. *
  1592. * On request for a pktid, a value DHD_PKTID_INVALID must be treated as a
  1593. * depleted pktid pool and must not be used by the caller.
  1594. *
  1595. * Likewise, a caller must never free a pktid of value DHD_PKTID_INVALID.
  1596. */
  1597. #define DHD_PKTID_FREE_LOCKER (FALSE)
  1598. #define DHD_PKTID_RSV_LOCKER (TRUE)
  1599. #define DHD_PKTID_ITEM_SZ (sizeof(dhd_pktid_item_t))
  1600. #define DHD_PKIDMAP_ITEMS(items) (items)
  1601. #define DHD_PKTID_MAP_SZ(items) (sizeof(dhd_pktid_map_t) + \
  1602. (DHD_PKTID_ITEM_SZ * ((items) + 1)))
  1603. #define DHD_PKTIDMAP_KEYS_SZ(items) (sizeof(dhd_pktid_key_t) * ((items) + 1))
  1604. #define DHD_NATIVE_TO_PKTID_RESET_IOCTL(dhd, map) dhd_pktid_map_reset_ioctl((dhd), (map))
  1605. /* Convert a packet to a pktid, and save pkt pointer in busy locker */
  1606. #define DHD_NATIVE_TO_PKTID_RSV(dhd, map, pkt, pkttype) \
  1607. dhd_pktid_map_reserve((dhd), (map), (pkt), (pkttype))
  1608. /* Reuse a previously reserved locker to save packet params */
  1609. #define DHD_NATIVE_TO_PKTID_SAVE(dhd, map, pkt, nkey, pa, len, dir, dmah, secdma, pkttype) \
  1610. dhd_pktid_map_save((dhd), (map), (void *)(pkt), (nkey), (pa), (uint32)(len), \
  1611. (uint8)(dir), (void *)(dmah), (void *)(secdma), \
  1612. (dhd_pkttype_t)(pkttype))
  1613. /* Convert a packet to a pktid, and save packet params in locker */
  1614. #define DHD_NATIVE_TO_PKTID(dhd, map, pkt, pa, len, dir, dmah, secdma, pkttype) \
  1615. dhd_pktid_map_alloc((dhd), (map), (void *)(pkt), (pa), (uint32)(len), \
  1616. (uint8)(dir), (void *)(dmah), (void *)(secdma), \
  1617. (dhd_pkttype_t)(pkttype))
  1618. /* Convert pktid to a packet, and free the locker */
  1619. #define DHD_PKTID_TO_NATIVE(dhd, map, pktid, pa, len, dmah, secdma, pkttype) \
  1620. dhd_pktid_map_free((dhd), (map), (uint32)(pktid), \
  1621. (dmaaddr_t *)&(pa), (uint32 *)&(len), (void **)&(dmah), \
  1622. (void **)&(secdma), (dhd_pkttype_t)(pkttype), DHD_PKTID_FREE_LOCKER)
  1623. /* Convert the pktid to a packet, empty locker, but keep it reserved */
  1624. #define DHD_PKTID_TO_NATIVE_RSV(dhd, map, pktid, pa, len, dmah, secdma, pkttype) \
  1625. dhd_pktid_map_free((dhd), (map), (uint32)(pktid), \
  1626. (dmaaddr_t *)&(pa), (uint32 *)&(len), (void **)&(dmah), \
  1627. (void **)&(secdma), (dhd_pkttype_t)(pkttype), DHD_PKTID_RSV_LOCKER)
  1628. #define DHD_PKTID_AVAIL(map) dhd_pktid_map_avail_cnt(map)
  1629. #if defined(DHD_PKTID_AUDIT_ENABLED)
  1630. static int
  1631. dhd_get_pktid_map_type(dhd_pub_t *dhd, dhd_pktid_map_t *pktid_map)
  1632. {
  1633. dhd_prot_t *prot = dhd->prot;
  1634. int pktid_map_type;
  1635. if (pktid_map == prot->pktid_ctrl_map) {
  1636. pktid_map_type = DHD_PKTID_MAP_TYPE_CTRL;
  1637. } else if (pktid_map == prot->pktid_tx_map) {
  1638. pktid_map_type = DHD_PKTID_MAP_TYPE_TX;
  1639. } else if (pktid_map == prot->pktid_rx_map) {
  1640. pktid_map_type = DHD_PKTID_MAP_TYPE_RX;
  1641. } else {
  1642. pktid_map_type = DHD_PKTID_MAP_TYPE_UNKNOWN;
  1643. }
  1644. return pktid_map_type;
  1645. }
  1646. /**
  1647. * __dhd_pktid_audit - Use the mwbmap to audit validity of a pktid.
  1648. */
  1649. static int
  1650. __dhd_pktid_audit(dhd_pub_t *dhd, dhd_pktid_map_t *pktid_map, uint32 pktid,
  1651. const int test_for, const char *errmsg)
  1652. {
  1653. #define DHD_PKT_AUDIT_STR "ERROR: %16s Host PktId Audit: "
  1654. struct bcm_mwbmap *handle;
  1655. uint32 flags;
  1656. bool ignore_audit;
  1657. int error = BCME_OK;
  1658. if (pktid_map == (dhd_pktid_map_t *)NULL) {
  1659. DHD_ERROR((DHD_PKT_AUDIT_STR "Pkt id map NULL\n", errmsg));
  1660. return BCME_OK;
  1661. }
  1662. flags = DHD_PKTID_AUDIT_LOCK(pktid_map->pktid_audit_lock);
  1663. handle = pktid_map->pktid_audit;
  1664. if (handle == (struct bcm_mwbmap *)NULL) {
  1665. DHD_ERROR((DHD_PKT_AUDIT_STR "Handle NULL\n", errmsg));
  1666. goto out;
  1667. }
  1668. /* Exclude special pktids from audit */
  1669. ignore_audit = (pktid == DHD_IOCTL_REQ_PKTID) | (pktid == DHD_FAKE_PKTID);
  1670. if (ignore_audit) {
  1671. goto out;
  1672. }
  1673. if ((pktid == DHD_PKTID_INVALID) || (pktid > pktid_map->items)) {
  1674. DHD_ERROR((DHD_PKT_AUDIT_STR "PktId<%d> invalid\n", errmsg, pktid));
  1675. error = BCME_ERROR;
  1676. goto out;
  1677. }
  1678. /* Perform audit */
  1679. switch (test_for) {
  1680. case DHD_DUPLICATE_ALLOC:
  1681. if (!bcm_mwbmap_isfree(handle, pktid)) {
  1682. DHD_ERROR((DHD_PKT_AUDIT_STR "PktId<%d> alloc duplicate\n",
  1683. errmsg, pktid));
  1684. error = BCME_ERROR;
  1685. } else {
  1686. bcm_mwbmap_force(handle, pktid);
  1687. }
  1688. break;
  1689. case DHD_DUPLICATE_FREE:
  1690. if (bcm_mwbmap_isfree(handle, pktid)) {
  1691. DHD_ERROR((DHD_PKT_AUDIT_STR "PktId<%d> free duplicate\n",
  1692. errmsg, pktid));
  1693. error = BCME_ERROR;
  1694. } else {
  1695. bcm_mwbmap_free(handle, pktid);
  1696. }
  1697. break;
  1698. case DHD_TEST_IS_ALLOC:
  1699. if (bcm_mwbmap_isfree(handle, pktid)) {
  1700. DHD_ERROR((DHD_PKT_AUDIT_STR "PktId<%d> is not allocated\n",
  1701. errmsg, pktid));
  1702. error = BCME_ERROR;
  1703. }
  1704. break;
  1705. case DHD_TEST_IS_FREE:
  1706. if (!bcm_mwbmap_isfree(handle, pktid)) {
  1707. DHD_ERROR((DHD_PKT_AUDIT_STR "PktId<%d> is not free",
  1708. errmsg, pktid));
  1709. error = BCME_ERROR;
  1710. }
  1711. break;
  1712. default:
  1713. DHD_ERROR(("%s: Invalid test case: %d\n", __FUNCTION__, test_for));
  1714. error = BCME_ERROR;
  1715. break;
  1716. }
  1717. out:
  1718. DHD_PKTID_AUDIT_UNLOCK(pktid_map->pktid_audit_lock, flags);
  1719. if (error != BCME_OK) {
  1720. dhd->pktid_audit_failed = TRUE;
  1721. }
  1722. return error;
  1723. }
  1724. static int
  1725. dhd_pktid_audit(dhd_pub_t *dhd, dhd_pktid_map_t *pktid_map, uint32 pktid,
  1726. const int test_for, const char *errmsg)
  1727. {
  1728. int ret = BCME_OK;
  1729. ret = __dhd_pktid_audit(dhd, pktid_map, pktid, test_for, errmsg);
  1730. if (ret == BCME_ERROR) {
  1731. DHD_ERROR(("%s: Got Pkt Id Audit failure: PKTID<%d> PKTID MAP TYPE<%d>\n",
  1732. __FUNCTION__, pktid, dhd_get_pktid_map_type(dhd, pktid_map)));
  1733. dhd_pktid_error_handler(dhd);
  1734. }
  1735. return ret;
  1736. }
  1737. #define DHD_PKTID_AUDIT(dhdp, map, pktid, test_for) \
  1738. dhd_pktid_audit((dhdp), (dhd_pktid_map_t *)(map), (pktid), (test_for), __FUNCTION__)
  1739. static int
  1740. dhd_pktid_audit_ring_debug(dhd_pub_t *dhdp, dhd_pktid_map_t *map, uint32 pktid,
  1741. const int test_for, void *msg, uint32 msg_len, const char *func)
  1742. {
  1743. int ret = BCME_OK;
  1744. if (dhd_query_bus_erros(dhdp)) {
  1745. return BCME_ERROR;
  1746. }
  1747. ret = __dhd_pktid_audit(dhdp, map, pktid, test_for, func);
  1748. if (ret == BCME_ERROR) {
  1749. DHD_ERROR(("%s: Got Pkt Id Audit failure: PKTID<%d> PKTID MAP TYPE<%d>\n",
  1750. __FUNCTION__, pktid, dhd_get_pktid_map_type(dhdp, map)));
  1751. prhex(func, (uchar *)msg, msg_len);
  1752. dhd_pktid_error_handler(dhdp);
  1753. }
  1754. return ret;
  1755. }
  1756. #define DHD_PKTID_AUDIT_RING_DEBUG(dhdp, map, pktid, test_for, msg, msg_len) \
  1757. dhd_pktid_audit_ring_debug((dhdp), (dhd_pktid_map_t *)(map), \
  1758. (pktid), (test_for), msg, msg_len, __FUNCTION__)
  1759. #endif /* DHD_PKTID_AUDIT_ENABLED */
  1760. /**
  1761. * +---------------------------------------------------------------------------+
  1762. * Packet to Packet Id mapper using a <numbered_key, locker> paradigm.
  1763. *
  1764. * dhd_pktid_map manages a set of unique Packet Ids range[1..MAX_xxx_PKTID].
  1765. *
  1766. * dhd_pktid_map_alloc() may be used to save some packet metadata, and a unique
  1767. * packet id is returned. This unique packet id may be used to retrieve the
  1768. * previously saved packet metadata, using dhd_pktid_map_free(). On invocation
  1769. * of dhd_pktid_map_free(), the unique packet id is essentially freed. A
  1770. * subsequent call to dhd_pktid_map_alloc() may reuse this packet id.
  1771. *
  1772. * Implementation Note:
  1773. * Convert this into a <key,locker> abstraction and place into bcmutils !
  1774. * Locker abstraction should treat contents as opaque storage, and a
  1775. * callback should be registered to handle busy lockers on destructor.
  1776. *
  1777. * +---------------------------------------------------------------------------+
  1778. */
  1779. /** Allocate and initialize a mapper of num_items <numbered_key, locker> */
  1780. static dhd_pktid_map_handle_t *
  1781. dhd_pktid_map_init(dhd_pub_t *dhd, uint32 num_items)
  1782. {
  1783. void* osh;
  1784. uint32 nkey;
  1785. dhd_pktid_map_t *map;
  1786. uint32 dhd_pktid_map_sz;
  1787. uint32 map_items;
  1788. uint32 map_keys_sz;
  1789. osh = dhd->osh;
  1790. dhd_pktid_map_sz = DHD_PKTID_MAP_SZ(num_items);
  1791. map = (dhd_pktid_map_t *)VMALLOC(osh, dhd_pktid_map_sz);
  1792. if (map == NULL) {
  1793. DHD_ERROR(("%s:%d: MALLOC failed for size %d\n",
  1794. __FUNCTION__, __LINE__, dhd_pktid_map_sz));
  1795. return (dhd_pktid_map_handle_t *)NULL;
  1796. }
  1797. map->items = num_items;
  1798. map->avail = num_items;
  1799. map_items = DHD_PKIDMAP_ITEMS(map->items);
  1800. map_keys_sz = DHD_PKTIDMAP_KEYS_SZ(map->items);
  1801. /* Initialize the lock that protects this structure */
  1802. map->pktid_lock = DHD_PKTID_LOCK_INIT(osh);
  1803. if (map->pktid_lock == NULL) {
  1804. DHD_ERROR(("%s:%d: Lock init failed \r\n", __FUNCTION__, __LINE__));
  1805. goto error;
  1806. }
  1807. map->keys = (dhd_pktid_key_t *)MALLOC(osh, map_keys_sz);
  1808. if (map->keys == NULL) {
  1809. DHD_ERROR(("%s:%d: MALLOC failed for map->keys size %d\n",
  1810. __FUNCTION__, __LINE__, map_keys_sz));
  1811. goto error;
  1812. }
  1813. #if defined(DHD_PKTID_AUDIT_ENABLED)
  1814. /* Incarnate a hierarchical multiword bitmap for auditing pktid allocator */
  1815. map->pktid_audit = bcm_mwbmap_init(osh, map_items + 1);
  1816. if (map->pktid_audit == (struct bcm_mwbmap *)NULL) {
  1817. DHD_ERROR(("%s:%d: pktid_audit init failed\r\n", __FUNCTION__, __LINE__));
  1818. goto error;
  1819. } else {
  1820. DHD_ERROR(("%s:%d: pktid_audit init succeeded %d\n",
  1821. __FUNCTION__, __LINE__, map_items + 1));
  1822. }
  1823. map->pktid_audit_lock = DHD_PKTID_AUDIT_LOCK_INIT(osh);
  1824. #endif /* DHD_PKTID_AUDIT_ENABLED */
  1825. for (nkey = 1; nkey <= map_items; nkey++) { /* locker #0 is reserved */
  1826. map->keys[nkey] = nkey; /* populate with unique keys */
  1827. map->lockers[nkey].state = LOCKER_IS_FREE;
  1828. map->lockers[nkey].pkt = NULL; /* bzero: redundant */
  1829. map->lockers[nkey].len = 0;
  1830. }
  1831. /* Reserve pktid #0, i.e. DHD_PKTID_INVALID to be inuse */
  1832. map->lockers[DHD_PKTID_INVALID].state = LOCKER_IS_BUSY; /* tag locker #0 as inuse */
  1833. map->lockers[DHD_PKTID_INVALID].pkt = NULL; /* bzero: redundant */
  1834. map->lockers[DHD_PKTID_INVALID].len = 0;
  1835. #if defined(DHD_PKTID_AUDIT_ENABLED)
  1836. /* do not use dhd_pktid_audit() here, use bcm_mwbmap_force directly */
  1837. bcm_mwbmap_force(map->pktid_audit, DHD_PKTID_INVALID);
  1838. #endif /* DHD_PKTID_AUDIT_ENABLED */
  1839. return (dhd_pktid_map_handle_t *)map; /* opaque handle */
  1840. error:
  1841. if (map) {
  1842. #if defined(DHD_PKTID_AUDIT_ENABLED)
  1843. if (map->pktid_audit != (struct bcm_mwbmap *)NULL) {
  1844. bcm_mwbmap_fini(osh, map->pktid_audit); /* Destruct pktid_audit */
  1845. map->pktid_audit = (struct bcm_mwbmap *)NULL;
  1846. if (map->pktid_audit_lock)
  1847. DHD_PKTID_AUDIT_LOCK_DEINIT(osh, map->pktid_audit_lock);
  1848. }
  1849. #endif /* DHD_PKTID_AUDIT_ENABLED */
  1850. if (map->keys) {
  1851. MFREE(osh, map->keys, map_keys_sz);
  1852. }
  1853. if (map->pktid_lock) {
  1854. DHD_PKTID_LOCK_DEINIT(osh, map->pktid_lock);
  1855. }
  1856. VMFREE(osh, map, dhd_pktid_map_sz);
  1857. }
  1858. return (dhd_pktid_map_handle_t *)NULL;
  1859. }
  1860. /**
  1861. * Retrieve all allocated keys and free all <numbered_key, locker>.
  1862. * Freeing implies: unmapping the buffers and freeing the native packet
  1863. * This could have been a callback registered with the pktid mapper.
  1864. */
  1865. static void
  1866. dhd_pktid_map_reset(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle)
  1867. {
  1868. void *osh;
  1869. uint32 nkey;
  1870. dhd_pktid_map_t *map;
  1871. dhd_pktid_item_t *locker;
  1872. uint32 map_items;
  1873. unsigned long flags;
  1874. bool data_tx = FALSE;
  1875. map = (dhd_pktid_map_t *)handle;
  1876. DHD_PKTID_LOCK(map->pktid_lock, flags);
  1877. osh = dhd->osh;
  1878. map_items = DHD_PKIDMAP_ITEMS(map->items);
  1879. /* skip reserved KEY #0, and start from 1 */
  1880. for (nkey = 1; nkey <= map_items; nkey++) {
  1881. if (map->lockers[nkey].state == LOCKER_IS_BUSY) {
  1882. locker = &map->lockers[nkey];
  1883. locker->state = LOCKER_IS_FREE;
  1884. data_tx = (locker->pkttype == PKTTYPE_DATA_TX);
  1885. if (data_tx) {
  1886. OSL_ATOMIC_DEC(dhd->osh, &dhd->prot->active_tx_count);
  1887. }
  1888. #ifdef DHD_PKTID_AUDIT_RING
  1889. DHD_PKTID_AUDIT(dhd, map, nkey, DHD_DUPLICATE_FREE); /* duplicate frees */
  1890. #endif /* DHD_PKTID_AUDIT_RING */
  1891. #ifdef DHD_MAP_PKTID_LOGGING
  1892. DHD_PKTID_LOG(dhd, dhd->prot->pktid_dma_unmap,
  1893. locker->pa, nkey, locker->len,
  1894. locker->pkttype);
  1895. #endif /* DHD_MAP_PKTID_LOGGING */
  1896. {
  1897. if (SECURE_DMA_ENAB(dhd->osh))
  1898. SECURE_DMA_UNMAP(osh, locker->pa,
  1899. locker->len, locker->dir, 0,
  1900. locker->dmah, locker->secdma, 0);
  1901. else
  1902. DMA_UNMAP(osh, locker->pa, locker->len,
  1903. locker->dir, 0, locker->dmah);
  1904. }
  1905. dhd_prot_packet_free(dhd, (ulong*)locker->pkt,
  1906. locker->pkttype, data_tx);
  1907. }
  1908. else {
  1909. #ifdef DHD_PKTID_AUDIT_RING
  1910. DHD_PKTID_AUDIT(dhd, map, nkey, DHD_TEST_IS_FREE);
  1911. #endif /* DHD_PKTID_AUDIT_RING */
  1912. }
  1913. map->keys[nkey] = nkey; /* populate with unique keys */
  1914. }
  1915. map->avail = map_items;
  1916. memset(&map->lockers[1], 0, sizeof(dhd_pktid_item_t) * map_items);
  1917. DHD_PKTID_UNLOCK(map->pktid_lock, flags);
  1918. }
  1919. #ifdef IOCTLRESP_USE_CONSTMEM
  1920. /** Called in detach scenario. Releasing IOCTL buffers. */
  1921. static void
  1922. dhd_pktid_map_reset_ioctl(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle)
  1923. {
  1924. uint32 nkey;
  1925. dhd_pktid_map_t *map;
  1926. dhd_pktid_item_t *locker;
  1927. uint32 map_items;
  1928. unsigned long flags;
  1929. map = (dhd_pktid_map_t *)handle;
  1930. DHD_PKTID_LOCK(map->pktid_lock, flags);
  1931. map_items = DHD_PKIDMAP_ITEMS(map->items);
  1932. /* skip reserved KEY #0, and start from 1 */
  1933. for (nkey = 1; nkey <= map_items; nkey++) {
  1934. if (map->lockers[nkey].state == LOCKER_IS_BUSY) {
  1935. dhd_dma_buf_t retbuf;
  1936. #ifdef DHD_PKTID_AUDIT_RING
  1937. DHD_PKTID_AUDIT(dhd, map, nkey, DHD_DUPLICATE_FREE); /* duplicate frees */
  1938. #endif /* DHD_PKTID_AUDIT_RING */
  1939. locker = &map->lockers[nkey];
  1940. retbuf.va = locker->pkt;
  1941. retbuf.len = locker->len;
  1942. retbuf.pa = locker->pa;
  1943. retbuf.dmah = locker->dmah;
  1944. retbuf.secdma = locker->secdma;
  1945. free_ioctl_return_buffer(dhd, &retbuf);
  1946. }
  1947. else {
  1948. #ifdef DHD_PKTID_AUDIT_RING
  1949. DHD_PKTID_AUDIT(dhd, map, nkey, DHD_TEST_IS_FREE);
  1950. #endif /* DHD_PKTID_AUDIT_RING */
  1951. }
  1952. map->keys[nkey] = nkey; /* populate with unique keys */
  1953. }
  1954. map->avail = map_items;
  1955. memset(&map->lockers[1], 0, sizeof(dhd_pktid_item_t) * map_items);
  1956. DHD_PKTID_UNLOCK(map->pktid_lock, flags);
  1957. }
  1958. #endif /* IOCTLRESP_USE_CONSTMEM */
  1959. /**
  1960. * Free the pktid map.
  1961. */
  1962. static void
  1963. dhd_pktid_map_fini(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle)
  1964. {
  1965. dhd_pktid_map_t *map;
  1966. uint32 dhd_pktid_map_sz;
  1967. uint32 map_keys_sz;
  1968. if (handle == NULL)
  1969. return;
  1970. /* Free any pending packets */
  1971. dhd_pktid_map_reset(dhd, handle);
  1972. map = (dhd_pktid_map_t *)handle;
  1973. dhd_pktid_map_sz = DHD_PKTID_MAP_SZ(map->items);
  1974. map_keys_sz = DHD_PKTIDMAP_KEYS_SZ(map->items);
  1975. DHD_PKTID_LOCK_DEINIT(dhd->osh, map->pktid_lock);
  1976. #if defined(DHD_PKTID_AUDIT_ENABLED)
  1977. if (map->pktid_audit != (struct bcm_mwbmap *)NULL) {
  1978. bcm_mwbmap_fini(dhd->osh, map->pktid_audit); /* Destruct pktid_audit */
  1979. map->pktid_audit = (struct bcm_mwbmap *)NULL;
  1980. if (map->pktid_audit_lock) {
  1981. DHD_PKTID_AUDIT_LOCK_DEINIT(dhd->osh, map->pktid_audit_lock);
  1982. }
  1983. }
  1984. #endif /* DHD_PKTID_AUDIT_ENABLED */
  1985. MFREE(dhd->osh, map->keys, map_keys_sz);
  1986. VMFREE(dhd->osh, handle, dhd_pktid_map_sz);
  1987. }
  1988. #ifdef IOCTLRESP_USE_CONSTMEM
  1989. static void
  1990. dhd_pktid_map_fini_ioctl(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle)
  1991. {
  1992. dhd_pktid_map_t *map;
  1993. uint32 dhd_pktid_map_sz;
  1994. uint32 map_keys_sz;
  1995. if (handle == NULL)
  1996. return;
  1997. /* Free any pending packets */
  1998. dhd_pktid_map_reset_ioctl(dhd, handle);
  1999. map = (dhd_pktid_map_t *)handle;
  2000. dhd_pktid_map_sz = DHD_PKTID_MAP_SZ(map->items);
  2001. map_keys_sz = DHD_PKTIDMAP_KEYS_SZ(map->items);
  2002. DHD_PKTID_LOCK_DEINIT(dhd->osh, map->pktid_lock);
  2003. #if defined(DHD_PKTID_AUDIT_ENABLED)
  2004. if (map->pktid_audit != (struct bcm_mwbmap *)NULL) {
  2005. bcm_mwbmap_fini(dhd->osh, map->pktid_audit); /* Destruct pktid_audit */
  2006. map->pktid_audit = (struct bcm_mwbmap *)NULL;
  2007. if (map->pktid_audit_lock) {
  2008. DHD_PKTID_AUDIT_LOCK_DEINIT(dhd->osh, map->pktid_audit_lock);
  2009. }
  2010. }
  2011. #endif /* DHD_PKTID_AUDIT_ENABLED */
  2012. MFREE(dhd->osh, map->keys, map_keys_sz);
  2013. VMFREE(dhd->osh, handle, dhd_pktid_map_sz);
  2014. }
  2015. #endif /* IOCTLRESP_USE_CONSTMEM */
  2016. /** Get the pktid free count */
  2017. static INLINE uint32 BCMFASTPATH
  2018. dhd_pktid_map_avail_cnt(dhd_pktid_map_handle_t *handle)
  2019. {
  2020. dhd_pktid_map_t *map;
  2021. uint32 avail;
  2022. unsigned long flags;
  2023. ASSERT(handle != NULL);
  2024. map = (dhd_pktid_map_t *)handle;
  2025. DHD_PKTID_LOCK(map->pktid_lock, flags);
  2026. avail = map->avail;
  2027. DHD_PKTID_UNLOCK(map->pktid_lock, flags);
  2028. return avail;
  2029. }
  2030. /**
  2031. * dhd_pktid_map_reserve - reserve a unique numbered key. Reserved locker is not
  2032. * yet populated. Invoke the pktid save api to populate the packet parameters
  2033. * into the locker. This function is not reentrant, and is the caller's
  2034. * responsibility. Caller must treat a returned value DHD_PKTID_INVALID as
  2035. * a failure case, implying a depleted pool of pktids.
  2036. */
  2037. static INLINE uint32
  2038. dhd_pktid_map_reserve(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle,
  2039. void *pkt, dhd_pkttype_t pkttype)
  2040. {
  2041. uint32 nkey;
  2042. dhd_pktid_map_t *map;
  2043. dhd_pktid_item_t *locker;
  2044. unsigned long flags;
  2045. ASSERT(handle != NULL);
  2046. map = (dhd_pktid_map_t *)handle;
  2047. DHD_PKTID_LOCK(map->pktid_lock, flags);
  2048. if ((int)(map->avail) <= 0) { /* no more pktids to allocate */
  2049. map->failures++;
  2050. DHD_INFO(("%s:%d: failed, no free keys\n", __FUNCTION__, __LINE__));
  2051. DHD_PKTID_UNLOCK(map->pktid_lock, flags);
  2052. return DHD_PKTID_INVALID; /* failed alloc request */
  2053. }
  2054. ASSERT(map->avail <= map->items);
  2055. nkey = map->keys[map->avail]; /* fetch a free locker, pop stack */
  2056. if ((map->avail > map->items) || (nkey > map->items)) {
  2057. map->failures++;
  2058. DHD_ERROR(("%s:%d: failed to allocate a new pktid,"
  2059. " map->avail<%u>, nkey<%u>, pkttype<%u>\n",
  2060. __FUNCTION__, __LINE__, map->avail, nkey,
  2061. pkttype));
  2062. DHD_PKTID_UNLOCK(map->pktid_lock, flags);
  2063. return DHD_PKTID_INVALID; /* failed alloc request */
  2064. }
  2065. locker = &map->lockers[nkey]; /* save packet metadata in locker */
  2066. map->avail--;
  2067. locker->pkt = pkt; /* pkt is saved, other params not yet saved. */
  2068. locker->len = 0;
  2069. locker->state = LOCKER_IS_BUSY; /* reserve this locker */
  2070. DHD_PKTID_UNLOCK(map->pktid_lock, flags);
  2071. ASSERT(nkey != DHD_PKTID_INVALID);
  2072. return nkey; /* return locker's numbered key */
  2073. }
  2074. /*
  2075. * dhd_pktid_map_save - Save a packet's parameters into a locker
  2076. * corresponding to a previously reserved unique numbered key.
  2077. */
  2078. static INLINE void
  2079. dhd_pktid_map_save(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle, void *pkt,
  2080. uint32 nkey, dmaaddr_t pa, uint32 len, uint8 dir, void *dmah, void *secdma,
  2081. dhd_pkttype_t pkttype)
  2082. {
  2083. dhd_pktid_map_t *map;
  2084. dhd_pktid_item_t *locker;
  2085. unsigned long flags;
  2086. ASSERT(handle != NULL);
  2087. map = (dhd_pktid_map_t *)handle;
  2088. DHD_PKTID_LOCK(map->pktid_lock, flags);
  2089. if ((nkey == DHD_PKTID_INVALID) || (nkey > DHD_PKIDMAP_ITEMS(map->items))) {
  2090. DHD_ERROR(("%s:%d: Error! saving invalid pktid<%u> pkttype<%u>\n",
  2091. __FUNCTION__, __LINE__, nkey, pkttype));
  2092. DHD_PKTID_UNLOCK(map->pktid_lock, flags);
  2093. #ifdef DHD_FW_COREDUMP
  2094. if (dhd->memdump_enabled) {
  2095. /* collect core dump */
  2096. dhd->memdump_type = DUMP_TYPE_PKTID_INVALID;
  2097. dhd_bus_mem_dump(dhd);
  2098. }
  2099. #else
  2100. ASSERT(0);
  2101. #endif /* DHD_FW_COREDUMP */
  2102. return;
  2103. }
  2104. locker = &map->lockers[nkey];
  2105. ASSERT(((locker->state == LOCKER_IS_BUSY) && (locker->pkt == pkt)) ||
  2106. ((locker->state == LOCKER_IS_RSVD) && (locker->pkt == NULL)));
  2107. /* store contents in locker */
  2108. locker->dir = dir;
  2109. locker->pa = pa;
  2110. locker->len = (uint16)len; /* 16bit len */
  2111. locker->dmah = dmah; /* 16bit len */
  2112. locker->secdma = secdma;
  2113. locker->pkttype = pkttype;
  2114. locker->pkt = pkt;
  2115. locker->state = LOCKER_IS_BUSY; /* make this locker busy */
  2116. #ifdef DHD_MAP_PKTID_LOGGING
  2117. DHD_PKTID_LOG(dhd, dhd->prot->pktid_dma_map, pa, nkey, len, pkttype);
  2118. #endif /* DHD_MAP_PKTID_LOGGING */
  2119. DHD_PKTID_UNLOCK(map->pktid_lock, flags);
  2120. }
  2121. /**
  2122. * dhd_pktid_map_alloc - Allocate a unique numbered key and save the packet
  2123. * contents into the corresponding locker. Return the numbered key.
  2124. */
  2125. static uint32 BCMFASTPATH
  2126. dhd_pktid_map_alloc(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle, void *pkt,
  2127. dmaaddr_t pa, uint32 len, uint8 dir, void *dmah, void *secdma,
  2128. dhd_pkttype_t pkttype)
  2129. {
  2130. uint32 nkey;
  2131. nkey = dhd_pktid_map_reserve(dhd, handle, pkt, pkttype);
  2132. if (nkey != DHD_PKTID_INVALID) {
  2133. dhd_pktid_map_save(dhd, handle, pkt, nkey, pa,
  2134. len, dir, dmah, secdma, pkttype);
  2135. }
  2136. return nkey;
  2137. }
  2138. /**
  2139. * dhd_pktid_map_free - Given a numbered key, return the locker contents.
  2140. * dhd_pktid_map_free() is not reentrant, and is the caller's responsibility.
  2141. * Caller may not free a pktid value DHD_PKTID_INVALID or an arbitrary pktid
  2142. * value. Only a previously allocated pktid may be freed.
  2143. */
  2144. static void * BCMFASTPATH
  2145. dhd_pktid_map_free(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle, uint32 nkey,
  2146. dmaaddr_t *pa, uint32 *len, void **dmah, void **secdma, dhd_pkttype_t pkttype,
  2147. bool rsv_locker)
  2148. {
  2149. dhd_pktid_map_t *map;
  2150. dhd_pktid_item_t *locker;
  2151. void * pkt;
  2152. unsigned long long locker_addr;
  2153. unsigned long flags;
  2154. ASSERT(handle != NULL);
  2155. map = (dhd_pktid_map_t *)handle;
  2156. DHD_PKTID_LOCK(map->pktid_lock, flags);
  2157. if ((nkey == DHD_PKTID_INVALID) || (nkey > DHD_PKIDMAP_ITEMS(map->items))) {
  2158. DHD_ERROR(("%s:%d: Error! Try to free invalid pktid<%u>, pkttype<%d>\n",
  2159. __FUNCTION__, __LINE__, nkey, pkttype));
  2160. DHD_PKTID_UNLOCK(map->pktid_lock, flags);
  2161. #ifdef DHD_FW_COREDUMP
  2162. if (dhd->memdump_enabled) {
  2163. /* collect core dump */
  2164. dhd->memdump_type = DUMP_TYPE_PKTID_INVALID;
  2165. dhd_bus_mem_dump(dhd);
  2166. }
  2167. #else
  2168. ASSERT(0);
  2169. #endif /* DHD_FW_COREDUMP */
  2170. return NULL;
  2171. }
  2172. locker = &map->lockers[nkey];
  2173. #if defined(DHD_PKTID_AUDIT_MAP)
  2174. DHD_PKTID_AUDIT(dhd, map, nkey, DHD_DUPLICATE_FREE); /* Audit duplicate FREE */
  2175. #endif /* DHD_PKTID_AUDIT_MAP */
  2176. /* Debug check for cloned numbered key */
  2177. if (locker->state == LOCKER_IS_FREE) {
  2178. DHD_ERROR(("%s:%d: Error! freeing already freed invalid pktid<%u>\n",
  2179. __FUNCTION__, __LINE__, nkey));
  2180. DHD_PKTID_UNLOCK(map->pktid_lock, flags);
  2181. #ifdef DHD_FW_COREDUMP
  2182. if (dhd->memdump_enabled) {
  2183. /* collect core dump */
  2184. dhd->memdump_type = DUMP_TYPE_PKTID_INVALID;
  2185. dhd_bus_mem_dump(dhd);
  2186. }
  2187. #else
  2188. ASSERT(0);
  2189. #endif /* DHD_FW_COREDUMP */
  2190. return NULL;
  2191. }
  2192. /* Check for the colour of the buffer i.e The buffer posted for TX,
  2193. * should be freed for TX completion. Similarly the buffer posted for
  2194. * IOCTL should be freed for IOCT completion etc.
  2195. */
  2196. if ((pkttype != PKTTYPE_NO_CHECK) && (locker->pkttype != pkttype)) {
  2197. DHD_ERROR(("%s:%d: Error! Invalid Buffer Free for pktid<%u> \n",
  2198. __FUNCTION__, __LINE__, nkey));
  2199. #ifdef BCMDMA64OSL
  2200. PHYSADDRTOULONG(locker->pa, locker_addr);
  2201. #else
  2202. locker_addr = PHYSADDRLO(locker->pa);
  2203. #endif /* BCMDMA64OSL */
  2204. DHD_ERROR(("%s:%d: locker->state <%d>, locker->pkttype <%d>,"
  2205. "pkttype <%d> locker->pa <0x%llx> \n",
  2206. __FUNCTION__, __LINE__, locker->state, locker->pkttype,
  2207. pkttype, locker_addr));
  2208. DHD_PKTID_UNLOCK(map->pktid_lock, flags);
  2209. #ifdef DHD_FW_COREDUMP
  2210. if (dhd->memdump_enabled) {
  2211. /* collect core dump */
  2212. dhd->memdump_type = DUMP_TYPE_PKTID_INVALID;
  2213. dhd_bus_mem_dump(dhd);
  2214. }
  2215. #else
  2216. ASSERT(0);
  2217. #endif /* DHD_FW_COREDUMP */
  2218. return NULL;
  2219. }
  2220. if (rsv_locker == DHD_PKTID_FREE_LOCKER) {
  2221. map->avail++;
  2222. map->keys[map->avail] = nkey; /* make this numbered key available */
  2223. locker->state = LOCKER_IS_FREE; /* open and free Locker */
  2224. } else {
  2225. /* pktid will be reused, but the locker does not have a valid pkt */
  2226. locker->state = LOCKER_IS_RSVD;
  2227. }
  2228. #if defined(DHD_PKTID_AUDIT_MAP)
  2229. DHD_PKTID_AUDIT(dhd, map, nkey, DHD_TEST_IS_FREE);
  2230. #endif /* DHD_PKTID_AUDIT_MAP */
  2231. #ifdef DHD_MAP_PKTID_LOGGING
  2232. DHD_PKTID_LOG(dhd, dhd->prot->pktid_dma_unmap, locker->pa, nkey,
  2233. (uint32)locker->len, pkttype);
  2234. #endif /* DHD_MAP_PKTID_LOGGING */
  2235. *pa = locker->pa; /* return contents of locker */
  2236. *len = (uint32)locker->len;
  2237. *dmah = locker->dmah;
  2238. *secdma = locker->secdma;
  2239. pkt = locker->pkt;
  2240. locker->pkt = NULL; /* Clear pkt */
  2241. locker->len = 0;
  2242. DHD_PKTID_UNLOCK(map->pktid_lock, flags);
  2243. return pkt;
  2244. }
  2245. #else /* ! DHD_PCIE_PKTID */
  2246. typedef struct pktlist {
  2247. PKT_LIST *tx_pkt_list; /* list for tx packets */
  2248. PKT_LIST *rx_pkt_list; /* list for rx packets */
  2249. PKT_LIST *ctrl_pkt_list; /* list for ioctl/event buf post */
  2250. } pktlists_t;
  2251. /*
  2252. * Given that each workitem only uses a 32bit pktid, only 32bit hosts may avail
  2253. * of a one to one mapping 32bit pktptr and a 32bit pktid.
  2254. *
  2255. * - When PKTIDMAP is not used, DHD_NATIVE_TO_PKTID variants will never fail.
  2256. * - Neither DHD_NATIVE_TO_PKTID nor DHD_PKTID_TO_NATIVE need to be protected by
  2257. * a lock.
  2258. * - Hence DHD_PKTID_INVALID is not defined when DHD_PCIE_PKTID is undefined.
  2259. */
  2260. #define DHD_PKTID32(pktptr32) ((uint32)(pktptr32))
  2261. #define DHD_PKTPTR32(pktid32) ((void *)(pktid32))
  2262. static INLINE uint32 dhd_native_to_pktid(dhd_pktid_map_handle_t *map, void *pktptr32,
  2263. dmaaddr_t pa, uint32 dma_len, void *dmah, void *secdma,
  2264. dhd_pkttype_t pkttype);
  2265. static INLINE void * dhd_pktid_to_native(dhd_pktid_map_handle_t *map, uint32 pktid32,
  2266. dmaaddr_t *pa, uint32 *dma_len, void **dmah, void **secdma,
  2267. dhd_pkttype_t pkttype);
  2268. static dhd_pktid_map_handle_t *
  2269. dhd_pktid_map_init(dhd_pub_t *dhd, uint32 num_items)
  2270. {
  2271. osl_t *osh = dhd->osh;
  2272. pktlists_t *handle = NULL;
  2273. if ((handle = (pktlists_t *) MALLOCZ(osh, sizeof(pktlists_t))) == NULL) {
  2274. DHD_ERROR(("%s:%d: MALLOC failed for lists allocation, size=%d\n",
  2275. __FUNCTION__, __LINE__, sizeof(pktlists_t)));
  2276. goto error_done;
  2277. }
  2278. if ((handle->tx_pkt_list = (PKT_LIST *) MALLOC(osh, sizeof(PKT_LIST))) == NULL) {
  2279. DHD_ERROR(("%s:%d: MALLOC failed for list allocation, size=%d\n",
  2280. __FUNCTION__, __LINE__, sizeof(PKT_LIST)));
  2281. goto error;
  2282. }
  2283. if ((handle->rx_pkt_list = (PKT_LIST *) MALLOC(osh, sizeof(PKT_LIST))) == NULL) {
  2284. DHD_ERROR(("%s:%d: MALLOC failed for list allocation, size=%d\n",
  2285. __FUNCTION__, __LINE__, sizeof(PKT_LIST)));
  2286. goto error;
  2287. }
  2288. if ((handle->ctrl_pkt_list = (PKT_LIST *) MALLOC(osh, sizeof(PKT_LIST))) == NULL) {
  2289. DHD_ERROR(("%s:%d: MALLOC failed for list allocation, size=%d\n",
  2290. __FUNCTION__, __LINE__, sizeof(PKT_LIST)));
  2291. goto error;
  2292. }
  2293. PKTLIST_INIT(handle->tx_pkt_list);
  2294. PKTLIST_INIT(handle->rx_pkt_list);
  2295. PKTLIST_INIT(handle->ctrl_pkt_list);
  2296. return (dhd_pktid_map_handle_t *) handle;
  2297. error:
  2298. if (handle->ctrl_pkt_list) {
  2299. MFREE(osh, handle->ctrl_pkt_list, sizeof(PKT_LIST));
  2300. }
  2301. if (handle->rx_pkt_list) {
  2302. MFREE(osh, handle->rx_pkt_list, sizeof(PKT_LIST));
  2303. }
  2304. if (handle->tx_pkt_list) {
  2305. MFREE(osh, handle->tx_pkt_list, sizeof(PKT_LIST));
  2306. }
  2307. if (handle) {
  2308. MFREE(osh, handle, sizeof(pktlists_t));
  2309. }
  2310. error_done:
  2311. return (dhd_pktid_map_handle_t *)NULL;
  2312. }
  2313. static void
  2314. dhd_pktid_map_reset(dhd_pub_t *dhd, pktlists_t *handle)
  2315. {
  2316. osl_t *osh = dhd->osh;
  2317. if (handle->ctrl_pkt_list) {
  2318. PKTLIST_FINI(handle->ctrl_pkt_list);
  2319. MFREE(osh, handle->ctrl_pkt_list, sizeof(PKT_LIST));
  2320. }
  2321. if (handle->rx_pkt_list) {
  2322. PKTLIST_FINI(handle->rx_pkt_list);
  2323. MFREE(osh, handle->rx_pkt_list, sizeof(PKT_LIST));
  2324. }
  2325. if (handle->tx_pkt_list) {
  2326. PKTLIST_FINI(handle->tx_pkt_list);
  2327. MFREE(osh, handle->tx_pkt_list, sizeof(PKT_LIST));
  2328. }
  2329. }
  2330. static void
  2331. dhd_pktid_map_fini(dhd_pub_t *dhd, dhd_pktid_map_handle_t *map)
  2332. {
  2333. osl_t *osh = dhd->osh;
  2334. pktlists_t *handle = (pktlists_t *) map;
  2335. ASSERT(handle != NULL);
  2336. if (handle == (pktlists_t *)NULL) {
  2337. return;
  2338. }
  2339. dhd_pktid_map_reset(dhd, handle);
  2340. if (handle) {
  2341. MFREE(osh, handle, sizeof(pktlists_t));
  2342. }
  2343. }
  2344. /** Save dma parameters into the packet's pkttag and convert a pktptr to pktid */
  2345. static INLINE uint32
  2346. dhd_native_to_pktid(dhd_pktid_map_handle_t *map, void *pktptr32,
  2347. dmaaddr_t pa, uint32 dma_len, void *dmah, void *secdma,
  2348. dhd_pkttype_t pkttype)
  2349. {
  2350. pktlists_t *handle = (pktlists_t *) map;
  2351. ASSERT(pktptr32 != NULL);
  2352. DHD_PKT_SET_DMA_LEN(pktptr32, dma_len);
  2353. DHD_PKT_SET_DMAH(pktptr32, dmah);
  2354. DHD_PKT_SET_PA(pktptr32, pa);
  2355. DHD_PKT_SET_SECDMA(pktptr32, secdma);
  2356. if (pkttype == PKTTYPE_DATA_TX) {
  2357. PKTLIST_ENQ(handle->tx_pkt_list, pktptr32);
  2358. } else if (pkttype == PKTTYPE_DATA_RX) {
  2359. PKTLIST_ENQ(handle->rx_pkt_list, pktptr32);
  2360. } else {
  2361. PKTLIST_ENQ(handle->ctrl_pkt_list, pktptr32);
  2362. }
  2363. return DHD_PKTID32(pktptr32);
  2364. }
  2365. /** Convert a pktid to pktptr and retrieve saved dma parameters from packet */
  2366. static INLINE void *
  2367. dhd_pktid_to_native(dhd_pktid_map_handle_t *map, uint32 pktid32,
  2368. dmaaddr_t *pa, uint32 *dma_len, void **dmah, void **secdma,
  2369. dhd_pkttype_t pkttype)
  2370. {
  2371. pktlists_t *handle = (pktlists_t *) map;
  2372. void *pktptr32;
  2373. ASSERT(pktid32 != 0U);
  2374. pktptr32 = DHD_PKTPTR32(pktid32);
  2375. *dma_len = DHD_PKT_GET_DMA_LEN(pktptr32);
  2376. *dmah = DHD_PKT_GET_DMAH(pktptr32);
  2377. *pa = DHD_PKT_GET_PA(pktptr32);
  2378. *secdma = DHD_PKT_GET_SECDMA(pktptr32);
  2379. if (pkttype == PKTTYPE_DATA_TX) {
  2380. PKTLIST_UNLINK(handle->tx_pkt_list, pktptr32);
  2381. } else if (pkttype == PKTTYPE_DATA_RX) {
  2382. PKTLIST_UNLINK(handle->rx_pkt_list, pktptr32);
  2383. } else {
  2384. PKTLIST_UNLINK(handle->ctrl_pkt_list, pktptr32);
  2385. }
  2386. return pktptr32;
  2387. }
  2388. #define DHD_NATIVE_TO_PKTID_RSV(dhd, map, pkt, pkttype) DHD_PKTID32(pkt)
  2389. #define DHD_NATIVE_TO_PKTID_SAVE(dhd, map, pkt, nkey, pa, len, dma_dir, dmah, secdma, pkttype) \
  2390. ({ BCM_REFERENCE(dhd); BCM_REFERENCE(nkey); BCM_REFERENCE(dma_dir); \
  2391. dhd_native_to_pktid((dhd_pktid_map_handle_t *) map, (pkt), (pa), (len), \
  2392. (dmah), (secdma), (dhd_pkttype_t)(pkttype)); \
  2393. })
  2394. #define DHD_NATIVE_TO_PKTID(dhd, map, pkt, pa, len, dma_dir, dmah, secdma, pkttype) \
  2395. ({ BCM_REFERENCE(dhd); BCM_REFERENCE(dma_dir); \
  2396. dhd_native_to_pktid((dhd_pktid_map_handle_t *) map, (pkt), (pa), (len), \
  2397. (dmah), (secdma), (dhd_pkttype_t)(pkttype)); \
  2398. })
  2399. #define DHD_PKTID_TO_NATIVE(dhd, map, pktid, pa, len, dmah, secdma, pkttype) \
  2400. ({ BCM_REFERENCE(dhd); BCM_REFERENCE(pkttype); \
  2401. dhd_pktid_to_native((dhd_pktid_map_handle_t *) map, (uint32)(pktid), \
  2402. (dmaaddr_t *)&(pa), (uint32 *)&(len), (void **)&(dmah), \
  2403. (void **)&secdma, (dhd_pkttype_t)(pkttype)); \
  2404. })
  2405. #define DHD_PKTID_AVAIL(map) (~0)
  2406. #endif /* ! DHD_PCIE_PKTID */
  2407. /* +------------------ End of PCIE DHD PKTID MAPPER -----------------------+ */
  2408. /**
  2409. * The PCIE FD protocol layer is constructed in two phases:
  2410. * Phase 1. dhd_prot_attach()
  2411. * Phase 2. dhd_prot_init()
  2412. *
  2413. * dhd_prot_attach() - Allocates a dhd_prot_t object and resets all its fields.
  2414. * All Common rings are allose attached (msgbuf_ring_t objects are allocated
  2415. * with DMA-able buffers).
  2416. * All dhd_dma_buf_t objects are also allocated here.
  2417. *
  2418. * As dhd_prot_attach is invoked prior to the pcie_shared object is read, any
  2419. * initialization of objects that requires information advertized by the dongle
  2420. * may not be performed here.
  2421. * E.g. the number of TxPost flowrings is not know at this point, neither do
  2422. * we know shich form of D2H DMA sync mechanism is advertized by the dongle, or
  2423. * whether the dongle supports DMA-ing of WR/RD indices for the H2D and/or D2H
  2424. * rings (common + flow).
  2425. *
  2426. * dhd_prot_init() is invoked after the bus layer has fetched the information
  2427. * advertized by the dongle in the pcie_shared_t.
  2428. */
  2429. int
  2430. dhd_prot_attach(dhd_pub_t *dhd)
  2431. {
  2432. osl_t *osh = dhd->osh;
  2433. dhd_prot_t *prot;
  2434. /* FW going to DMA extended trap data,
  2435. * allocate buffer for the maximum extended trap data.
  2436. */
  2437. uint32 trap_buf_len = BCMPCIE_EXT_TRAP_DATA_MAXLEN;
  2438. /* Allocate prot structure */
  2439. if (!(prot = (dhd_prot_t *)DHD_OS_PREALLOC(dhd, DHD_PREALLOC_PROT,
  2440. sizeof(dhd_prot_t)))) {
  2441. DHD_ERROR(("%s: kmalloc failed\n", __FUNCTION__));
  2442. goto fail;
  2443. }
  2444. memset(prot, 0, sizeof(*prot));
  2445. prot->osh = osh;
  2446. dhd->prot = prot;
  2447. /* DMAing ring completes supported? FALSE by default */
  2448. dhd->dma_d2h_ring_upd_support = FALSE;
  2449. dhd->dma_h2d_ring_upd_support = FALSE;
  2450. dhd->dma_ring_upd_overwrite = FALSE;
  2451. dhd->hwa_inited = 0;
  2452. dhd->idma_inited = 0;
  2453. dhd->ifrm_inited = 0;
  2454. dhd->dar_inited = 0;
  2455. /* Common Ring Allocations */
  2456. /* Ring 0: H2D Control Submission */
  2457. if (dhd_prot_ring_attach(dhd, &prot->h2dring_ctrl_subn, "h2dctrl",
  2458. H2DRING_CTRL_SUB_MAX_ITEM, H2DRING_CTRL_SUB_ITEMSIZE,
  2459. BCMPCIE_H2D_MSGRING_CONTROL_SUBMIT) != BCME_OK) {
  2460. DHD_ERROR(("%s: dhd_prot_ring_attach H2D Ctrl Submission failed\n",
  2461. __FUNCTION__));
  2462. goto fail;
  2463. }
  2464. /* Ring 1: H2D Receive Buffer Post */
  2465. if (dhd_prot_ring_attach(dhd, &prot->h2dring_rxp_subn, "h2drxp",
  2466. H2DRING_RXPOST_MAX_ITEM, H2DRING_RXPOST_ITEMSIZE,
  2467. BCMPCIE_H2D_MSGRING_RXPOST_SUBMIT) != BCME_OK) {
  2468. DHD_ERROR(("%s: dhd_prot_ring_attach H2D RxPost failed\n",
  2469. __FUNCTION__));
  2470. goto fail;
  2471. }
  2472. /* Ring 2: D2H Control Completion */
  2473. if (dhd_prot_ring_attach(dhd, &prot->d2hring_ctrl_cpln, "d2hctrl",
  2474. D2HRING_CTRL_CMPLT_MAX_ITEM, D2HRING_CTRL_CMPLT_ITEMSIZE,
  2475. BCMPCIE_D2H_MSGRING_CONTROL_COMPLETE) != BCME_OK) {
  2476. DHD_ERROR(("%s: dhd_prot_ring_attach D2H Ctrl Completion failed\n",
  2477. __FUNCTION__));
  2478. goto fail;
  2479. }
  2480. /* Ring 3: D2H Transmit Complete */
  2481. if (dhd_prot_ring_attach(dhd, &prot->d2hring_tx_cpln, "d2htxcpl",
  2482. D2HRING_TXCMPLT_MAX_ITEM, D2HRING_TXCMPLT_ITEMSIZE,
  2483. BCMPCIE_D2H_MSGRING_TX_COMPLETE) != BCME_OK) {
  2484. DHD_ERROR(("%s: dhd_prot_ring_attach D2H Tx Completion failed\n",
  2485. __FUNCTION__));
  2486. goto fail;
  2487. }
  2488. /* Ring 4: D2H Receive Complete */
  2489. if (dhd_prot_ring_attach(dhd, &prot->d2hring_rx_cpln, "d2hrxcpl",
  2490. D2HRING_RXCMPLT_MAX_ITEM, D2HRING_RXCMPLT_ITEMSIZE,
  2491. BCMPCIE_D2H_MSGRING_RX_COMPLETE) != BCME_OK) {
  2492. DHD_ERROR(("%s: dhd_prot_ring_attach D2H Rx Completion failed\n",
  2493. __FUNCTION__));
  2494. goto fail;
  2495. }
  2496. /*
  2497. * Max number of flowrings is not yet known. msgbuf_ring_t with DMA-able
  2498. * buffers for flowrings will be instantiated, in dhd_prot_init() .
  2499. * See dhd_prot_flowrings_pool_attach()
  2500. */
  2501. /* ioctl response buffer */
  2502. if (dhd_dma_buf_alloc(dhd, &prot->retbuf, IOCT_RETBUF_SIZE)) {
  2503. goto fail;
  2504. }
  2505. /* IOCTL request buffer */
  2506. if (dhd_dma_buf_alloc(dhd, &prot->ioctbuf, IOCT_RETBUF_SIZE)) {
  2507. goto fail;
  2508. }
  2509. /* Host TS request buffer one buffer for now */
  2510. if (dhd_dma_buf_alloc(dhd, &prot->hostts_req_buf, CTRLSUB_HOSTTS_MEESAGE_SIZE)) {
  2511. goto fail;
  2512. }
  2513. prot->hostts_req_buf_inuse = FALSE;
  2514. /* Scratch buffer for dma rx offset */
  2515. #ifdef BCM_HOST_BUF
  2516. if (dhd_dma_buf_alloc(dhd, &prot->d2h_dma_scratch_buf,
  2517. ROUNDUP(DMA_D2H_SCRATCH_BUF_LEN, 16) + DMA_HOST_BUFFER_LEN)) {
  2518. #else
  2519. if (dhd_dma_buf_alloc(dhd, &prot->d2h_dma_scratch_buf, DMA_D2H_SCRATCH_BUF_LEN)) {
  2520. #endif /* BCM_HOST_BUF */
  2521. goto fail;
  2522. }
  2523. /* scratch buffer bus throughput measurement */
  2524. if (dhd_dma_buf_alloc(dhd, &prot->host_bus_throughput_buf, DHD_BUS_TPUT_BUF_LEN)) {
  2525. goto fail;
  2526. }
  2527. #ifdef DHD_RX_CHAINING
  2528. dhd_rxchain_reset(&prot->rxchain);
  2529. #endif // endif
  2530. prot->pktid_ctrl_map = DHD_NATIVE_TO_PKTID_INIT(dhd, MAX_CTRL_PKTID);
  2531. if (prot->pktid_ctrl_map == NULL) {
  2532. goto fail;
  2533. }
  2534. prot->pktid_rx_map = DHD_NATIVE_TO_PKTID_INIT(dhd, MAX_RX_PKTID);
  2535. if (prot->pktid_rx_map == NULL)
  2536. goto fail;
  2537. prot->pktid_tx_map = DHD_NATIVE_TO_PKTID_INIT(dhd, MAX_TX_PKTID);
  2538. if (prot->pktid_tx_map == NULL)
  2539. goto fail;
  2540. #ifdef IOCTLRESP_USE_CONSTMEM
  2541. prot->pktid_map_handle_ioctl = DHD_NATIVE_TO_PKTID_INIT(dhd,
  2542. DHD_FLOWRING_MAX_IOCTLRESPBUF_POST);
  2543. if (prot->pktid_map_handle_ioctl == NULL) {
  2544. goto fail;
  2545. }
  2546. #endif /* IOCTLRESP_USE_CONSTMEM */
  2547. #ifdef DHD_MAP_PKTID_LOGGING
  2548. prot->pktid_dma_map = DHD_PKTID_LOG_INIT(dhd, MAX_PKTID_LOG);
  2549. if (prot->pktid_dma_map == NULL) {
  2550. DHD_ERROR(("%s: failed to allocate pktid_dma_map\n",
  2551. __FUNCTION__));
  2552. }
  2553. prot->pktid_dma_unmap = DHD_PKTID_LOG_INIT(dhd, MAX_PKTID_LOG);
  2554. if (prot->pktid_dma_unmap == NULL) {
  2555. DHD_ERROR(("%s: failed to allocate pktid_dma_unmap\n",
  2556. __FUNCTION__));
  2557. }
  2558. #endif /* DHD_MAP_PKTID_LOGGING */
  2559. /* Initialize the work queues to be used by the Load Balancing logic */
  2560. #if defined(DHD_LB_TXC)
  2561. {
  2562. void *buffer;
  2563. buffer = MALLOC(dhd->osh, sizeof(void*) * DHD_LB_WORKQ_SZ);
  2564. if (buffer == NULL) {
  2565. DHD_ERROR(("%s: failed to allocate RXC work buffer\n", __FUNCTION__));
  2566. goto fail;
  2567. }
  2568. bcm_workq_init(&prot->tx_compl_prod, &prot->tx_compl_cons,
  2569. buffer, DHD_LB_WORKQ_SZ);
  2570. prot->tx_compl_prod_sync = 0;
  2571. DHD_INFO(("%s: created tx_compl_workq <%p,%d>\n",
  2572. __FUNCTION__, buffer, DHD_LB_WORKQ_SZ));
  2573. }
  2574. #endif /* DHD_LB_TXC */
  2575. #if defined(DHD_LB_RXC)
  2576. {
  2577. void *buffer;
  2578. buffer = MALLOC(dhd->osh, sizeof(void*) * DHD_LB_WORKQ_SZ);
  2579. if (buffer == NULL) {
  2580. DHD_ERROR(("%s: failed to allocate RXC work buffer\n", __FUNCTION__));
  2581. goto fail;
  2582. }
  2583. bcm_workq_init(&prot->rx_compl_prod, &prot->rx_compl_cons,
  2584. buffer, DHD_LB_WORKQ_SZ);
  2585. prot->rx_compl_prod_sync = 0;
  2586. DHD_INFO(("%s: created rx_compl_workq <%p,%d>\n",
  2587. __FUNCTION__, buffer, DHD_LB_WORKQ_SZ));
  2588. }
  2589. #endif /* DHD_LB_RXC */
  2590. /* Initialize trap buffer */
  2591. if (dhd_dma_buf_alloc(dhd, &dhd->prot->fw_trap_buf, trap_buf_len)) {
  2592. DHD_ERROR(("%s: dhd_init_trap_buffer falied\n", __FUNCTION__));
  2593. goto fail;
  2594. }
  2595. return BCME_OK;
  2596. fail:
  2597. if (prot) {
  2598. /* Free up all allocated memories */
  2599. dhd_prot_detach(dhd);
  2600. }
  2601. return BCME_NOMEM;
  2602. } /* dhd_prot_attach */
  2603. static int
  2604. dhd_alloc_host_scbs(dhd_pub_t *dhd)
  2605. {
  2606. int ret = BCME_OK;
  2607. sh_addr_t base_addr;
  2608. dhd_prot_t *prot = dhd->prot;
  2609. uint32 host_scb_size = 0;
  2610. if (dhd->hscb_enable) {
  2611. /* read number of bytes to allocate from F/W */
  2612. dhd_bus_cmn_readshared(dhd->bus, &host_scb_size, HOST_SCB_ADDR, 0);
  2613. if (host_scb_size) {
  2614. /* alloc array of host scbs */
  2615. ret = dhd_dma_buf_alloc(dhd, &prot->host_scb_buf, host_scb_size);
  2616. /* write host scb address to F/W */
  2617. if (ret == BCME_OK) {
  2618. dhd_base_addr_htolpa(&base_addr, prot->host_scb_buf.pa);
  2619. dhd_bus_cmn_writeshared(dhd->bus, &base_addr, sizeof(base_addr),
  2620. HOST_SCB_ADDR, 0);
  2621. } else {
  2622. DHD_TRACE(("dhd_alloc_host_scbs: dhd_dma_buf_alloc error\n"));
  2623. }
  2624. } else {
  2625. DHD_TRACE(("dhd_alloc_host_scbs: host_scb_size is 0.\n"));
  2626. }
  2627. } else {
  2628. DHD_TRACE(("dhd_alloc_host_scbs: Host scb not supported in F/W.\n"));
  2629. }
  2630. return ret;
  2631. }
  2632. void
  2633. dhd_set_host_cap(dhd_pub_t *dhd)
  2634. {
  2635. uint32 data = 0;
  2636. dhd_prot_t *prot = dhd->prot;
  2637. if (dhd->bus->api.fw_rev >= PCIE_SHARED_VERSION_6) {
  2638. if (dhd->h2d_phase_supported) {
  2639. data |= HOSTCAP_H2D_VALID_PHASE;
  2640. if (dhd->force_dongletrap_on_bad_h2d_phase)
  2641. data |= HOSTCAP_H2D_ENABLE_TRAP_ON_BADPHASE;
  2642. }
  2643. if (prot->host_ipc_version > prot->device_ipc_version)
  2644. prot->active_ipc_version = prot->device_ipc_version;
  2645. else
  2646. prot->active_ipc_version = prot->host_ipc_version;
  2647. data |= prot->active_ipc_version;
  2648. if (dhdpcie_bus_get_pcie_hostready_supported(dhd->bus)) {
  2649. DHD_INFO(("Advertise Hostready Capability\n"));
  2650. data |= HOSTCAP_H2D_ENABLE_HOSTRDY;
  2651. }
  2652. {
  2653. /* Disable DS altogether */
  2654. data |= HOSTCAP_DS_NO_OOB_DW;
  2655. dhdpcie_bus_enab_pcie_dw(dhd->bus, DEVICE_WAKE_NONE);
  2656. }
  2657. /* Indicate support for extended trap data */
  2658. data |= HOSTCAP_EXTENDED_TRAP_DATA;
  2659. /* Indicate support for TX status metadata */
  2660. if (dhd->pcie_txs_metadata_enable != 0)
  2661. data |= HOSTCAP_TXSTATUS_METADATA;
  2662. /* Enable fast delete ring in firmware if supported */
  2663. if (dhd->fast_delete_ring_support) {
  2664. data |= HOSTCAP_FAST_DELETE_RING;
  2665. }
  2666. if (dhdpcie_bus_get_pcie_hwa_supported(dhd->bus)) {
  2667. DHD_ERROR(("HWA inited\n"));
  2668. /* TODO: Is hostcap needed? */
  2669. dhd->hwa_inited = TRUE;
  2670. }
  2671. if (dhdpcie_bus_get_pcie_idma_supported(dhd->bus)) {
  2672. DHD_ERROR(("IDMA inited\n"));
  2673. data |= HOSTCAP_H2D_IDMA;
  2674. dhd->idma_inited = TRUE;
  2675. }
  2676. if (dhdpcie_bus_get_pcie_ifrm_supported(dhd->bus)) {
  2677. DHD_ERROR(("IFRM Inited\n"));
  2678. data |= HOSTCAP_H2D_IFRM;
  2679. dhd->ifrm_inited = TRUE;
  2680. dhd->dma_h2d_ring_upd_support = FALSE;
  2681. dhd_prot_dma_indx_free(dhd);
  2682. }
  2683. if (dhdpcie_bus_get_pcie_dar_supported(dhd->bus)) {
  2684. DHD_ERROR(("DAR doorbell Use\n"));
  2685. data |= HOSTCAP_H2D_DAR;
  2686. dhd->dar_inited = TRUE;
  2687. }
  2688. data |= HOSTCAP_UR_FW_NO_TRAP;
  2689. if (dhd->hscb_enable) {
  2690. data |= HOSTCAP_HSCB;
  2691. }
  2692. #ifdef EWP_EDL
  2693. if (dhd->dongle_edl_support) {
  2694. data |= HOSTCAP_EDL_RING;
  2695. DHD_ERROR(("Enable EDL host cap\n"));
  2696. } else {
  2697. DHD_ERROR(("DO NOT SET EDL host cap\n"));
  2698. }
  2699. #endif /* EWP_EDL */
  2700. #ifdef DHD_HP2P
  2701. if (dhd->hp2p_capable) {
  2702. data |= HOSTCAP_PKT_TIMESTAMP;
  2703. data |= HOSTCAP_PKT_HP2P;
  2704. DHD_ERROR(("Enable HP2P in host cap\n"));
  2705. } else {
  2706. DHD_ERROR(("HP2P not enabled in host cap\n"));
  2707. }
  2708. #endif // endif
  2709. #ifdef DHD_DB0TS
  2710. if (dhd->db0ts_capable) {
  2711. data |= HOSTCAP_DB0_TIMESTAMP;
  2712. DHD_ERROR(("Enable DB0 TS in host cap\n"));
  2713. } else {
  2714. DHD_ERROR(("DB0 TS not enabled in host cap\n"));
  2715. }
  2716. #endif /* DHD_DB0TS */
  2717. if (dhd->extdtxs_in_txcpl) {
  2718. DHD_ERROR(("Enable hostcap: EXTD TXS in txcpl\n"));
  2719. data |= HOSTCAP_PKT_TXSTATUS;
  2720. }
  2721. else {
  2722. DHD_ERROR(("Enable hostcap: EXTD TXS in txcpl\n"));
  2723. }
  2724. DHD_INFO(("%s:Active Ver:%d, Host Ver:%d, FW Ver:%d\n",
  2725. __FUNCTION__,
  2726. prot->active_ipc_version, prot->host_ipc_version,
  2727. prot->device_ipc_version));
  2728. dhd_bus_cmn_writeshared(dhd->bus, &data, sizeof(uint32), HOST_API_VERSION, 0);
  2729. dhd_bus_cmn_writeshared(dhd->bus, &prot->fw_trap_buf.pa,
  2730. sizeof(prot->fw_trap_buf.pa), DNGL_TO_HOST_TRAP_ADDR, 0);
  2731. }
  2732. }
  2733. /**
  2734. * dhd_prot_init - second stage of dhd_prot_attach. Now that the dongle has
  2735. * completed it's initialization of the pcie_shared structure, we may now fetch
  2736. * the dongle advertized features and adjust the protocol layer accordingly.
  2737. *
  2738. * dhd_prot_init() may be invoked again after a dhd_prot_reset().
  2739. */
  2740. int
  2741. dhd_prot_init(dhd_pub_t *dhd)
  2742. {
  2743. sh_addr_t base_addr;
  2744. dhd_prot_t *prot = dhd->prot;
  2745. int ret = 0;
  2746. uint32 idmacontrol;
  2747. uint32 waitcount = 0;
  2748. #ifdef WL_MONITOR
  2749. dhd->monitor_enable = FALSE;
  2750. #endif /* WL_MONITOR */
  2751. /**
  2752. * A user defined value can be assigned to global variable h2d_max_txpost via
  2753. * 1. DHD IOVAR h2d_max_txpost, before firmware download
  2754. * 2. module parameter h2d_max_txpost
  2755. * prot->h2d_max_txpost is assigned with H2DRING_TXPOST_MAX_ITEM,
  2756. * if user has not defined any buffers by one of the above methods.
  2757. */
  2758. prot->h2d_max_txpost = (uint16)h2d_max_txpost;
  2759. DHD_ERROR(("%s:%d: h2d_max_txpost = %d\n", __FUNCTION__, __LINE__, prot->h2d_max_txpost));
  2760. /* Read max rx packets supported by dongle */
  2761. dhd_bus_cmn_readshared(dhd->bus, &prot->max_rxbufpost, MAX_HOST_RXBUFS, 0);
  2762. if (prot->max_rxbufpost == 0) {
  2763. /* This would happen if the dongle firmware is not */
  2764. /* using the latest shared structure template */
  2765. prot->max_rxbufpost = DEFAULT_RX_BUFFERS_TO_POST;
  2766. }
  2767. DHD_ERROR(("%s:%d: MAX_RXBUFPOST = %d\n", __FUNCTION__, __LINE__, prot->max_rxbufpost));
  2768. /* Initialize. bzero() would blow away the dma pointers. */
  2769. prot->max_eventbufpost = DHD_FLOWRING_MAX_EVENTBUF_POST;
  2770. prot->max_ioctlrespbufpost = DHD_FLOWRING_MAX_IOCTLRESPBUF_POST;
  2771. prot->max_infobufpost = DHD_H2D_INFORING_MAX_BUF_POST;
  2772. prot->max_tsbufpost = DHD_MAX_TSBUF_POST;
  2773. prot->cur_ioctlresp_bufs_posted = 0;
  2774. OSL_ATOMIC_INIT(dhd->osh, &prot->active_tx_count);
  2775. prot->data_seq_no = 0;
  2776. prot->ioctl_seq_no = 0;
  2777. prot->rxbufpost = 0;
  2778. prot->cur_event_bufs_posted = 0;
  2779. prot->ioctl_state = 0;
  2780. prot->curr_ioctl_cmd = 0;
  2781. prot->cur_ts_bufs_posted = 0;
  2782. prot->infobufpost = 0;
  2783. prot->dmaxfer.srcmem.va = NULL;
  2784. prot->dmaxfer.dstmem.va = NULL;
  2785. prot->dmaxfer.in_progress = FALSE;
  2786. prot->metadata_dbg = FALSE;
  2787. prot->rx_metadata_offset = 0;
  2788. prot->tx_metadata_offset = 0;
  2789. prot->txp_threshold = TXP_FLUSH_MAX_ITEMS_FLUSH_CNT;
  2790. /* To catch any rollover issues fast, starting with higher ioctl_trans_id */
  2791. prot->ioctl_trans_id = MAXBITVAL(NBITS(prot->ioctl_trans_id)) - BUFFER_BEFORE_ROLLOVER;
  2792. prot->ioctl_state = 0;
  2793. prot->ioctl_status = 0;
  2794. prot->ioctl_resplen = 0;
  2795. prot->ioctl_received = IOCTL_WAIT;
  2796. /* Initialize Common MsgBuf Rings */
  2797. prot->device_ipc_version = dhd->bus->api.fw_rev;
  2798. prot->host_ipc_version = PCIE_SHARED_VERSION;
  2799. prot->no_tx_resource = FALSE;
  2800. /* Init the host API version */
  2801. dhd_set_host_cap(dhd);
  2802. /* alloc and configure scb host address for dongle */
  2803. if ((ret = dhd_alloc_host_scbs(dhd))) {
  2804. return ret;
  2805. }
  2806. /* Register the interrupt function upfront */
  2807. /* remove corerev checks in data path */
  2808. /* do this after host/fw negotiation for DAR */
  2809. prot->mb_ring_fn = dhd_bus_get_mbintr_fn(dhd->bus);
  2810. prot->mb_2_ring_fn = dhd_bus_get_mbintr_2_fn(dhd->bus);
  2811. dhd->bus->_dar_war = (dhd->bus->sih->buscorerev < 64) ? TRUE : FALSE;
  2812. dhd_prot_ring_init(dhd, &prot->h2dring_ctrl_subn);
  2813. dhd_prot_ring_init(dhd, &prot->h2dring_rxp_subn);
  2814. dhd_prot_ring_init(dhd, &prot->d2hring_ctrl_cpln);
  2815. /* Make it compatibile with pre-rev7 Firmware */
  2816. if (prot->active_ipc_version < PCIE_SHARED_VERSION_7) {
  2817. prot->d2hring_tx_cpln.item_len =
  2818. D2HRING_TXCMPLT_ITEMSIZE_PREREV7;
  2819. prot->d2hring_rx_cpln.item_len =
  2820. D2HRING_RXCMPLT_ITEMSIZE_PREREV7;
  2821. }
  2822. dhd_prot_ring_init(dhd, &prot->d2hring_tx_cpln);
  2823. dhd_prot_ring_init(dhd, &prot->d2hring_rx_cpln);
  2824. dhd_prot_d2h_sync_init(dhd);
  2825. dhd_prot_h2d_sync_init(dhd);
  2826. /* init the scratch buffer */
  2827. dhd_base_addr_htolpa(&base_addr, prot->d2h_dma_scratch_buf.pa);
  2828. dhd_bus_cmn_writeshared(dhd->bus, &base_addr, sizeof(base_addr),
  2829. D2H_DMA_SCRATCH_BUF, 0);
  2830. dhd_bus_cmn_writeshared(dhd->bus, &prot->d2h_dma_scratch_buf.len,
  2831. sizeof(prot->d2h_dma_scratch_buf.len), D2H_DMA_SCRATCH_BUF_LEN, 0);
  2832. /* If supported by the host, indicate the memory block
  2833. * for completion writes / submission reads to shared space
  2834. */
  2835. if (dhd->dma_d2h_ring_upd_support) {
  2836. dhd_base_addr_htolpa(&base_addr, prot->d2h_dma_indx_wr_buf.pa);
  2837. dhd_bus_cmn_writeshared(dhd->bus, &base_addr, sizeof(base_addr),
  2838. D2H_DMA_INDX_WR_BUF, 0);
  2839. dhd_base_addr_htolpa(&base_addr, prot->h2d_dma_indx_rd_buf.pa);
  2840. dhd_bus_cmn_writeshared(dhd->bus, &base_addr, sizeof(base_addr),
  2841. H2D_DMA_INDX_RD_BUF, 0);
  2842. }
  2843. if (dhd->dma_h2d_ring_upd_support || IDMA_ENAB(dhd)) {
  2844. dhd_base_addr_htolpa(&base_addr, prot->h2d_dma_indx_wr_buf.pa);
  2845. dhd_bus_cmn_writeshared(dhd->bus, &base_addr, sizeof(base_addr),
  2846. H2D_DMA_INDX_WR_BUF, 0);
  2847. dhd_base_addr_htolpa(&base_addr, prot->d2h_dma_indx_rd_buf.pa);
  2848. dhd_bus_cmn_writeshared(dhd->bus, &base_addr, sizeof(base_addr),
  2849. D2H_DMA_INDX_RD_BUF, 0);
  2850. }
  2851. /* Signal to the dongle that common ring init is complete */
  2852. if (dhd->hostrdy_after_init)
  2853. dhd_bus_hostready(dhd->bus);
  2854. /*
  2855. * If the DMA-able buffers for flowring needs to come from a specific
  2856. * contiguous memory region, then setup prot->flowrings_dma_buf here.
  2857. * dhd_prot_flowrings_pool_attach() will carve out DMA-able buffers from
  2858. * this contiguous memory region, for each of the flowrings.
  2859. */
  2860. /* Pre-allocate pool of msgbuf_ring for flowrings */
  2861. if (dhd_prot_flowrings_pool_attach(dhd) != BCME_OK) {
  2862. return BCME_ERROR;
  2863. }
  2864. /* If IFRM is enabled, wait for FW to setup the DMA channel */
  2865. if (IFRM_ENAB(dhd)) {
  2866. dhd_base_addr_htolpa(&base_addr, prot->h2d_ifrm_indx_wr_buf.pa);
  2867. dhd_bus_cmn_writeshared(dhd->bus, &base_addr, sizeof(base_addr),
  2868. H2D_IFRM_INDX_WR_BUF, 0);
  2869. }
  2870. /* If IDMA is enabled and initied, wait for FW to setup the IDMA descriptors
  2871. * Waiting just before configuring doorbell
  2872. */
  2873. #define IDMA_ENABLE_WAIT 10
  2874. if (IDMA_ACTIVE(dhd)) {
  2875. /* wait for idma_en bit in IDMAcontrol register to be set */
  2876. /* Loop till idma_en is not set */
  2877. uint buscorerev = dhd->bus->sih->buscorerev;
  2878. idmacontrol = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
  2879. IDMAControl(buscorerev), 0, 0);
  2880. while (!(idmacontrol & PCIE_IDMA_MODE_EN(buscorerev)) &&
  2881. (waitcount++ < IDMA_ENABLE_WAIT)) {
  2882. DHD_ERROR(("iDMA not enabled yet,waiting 1 ms c=%d IDMAControl = %08x\n",
  2883. waitcount, idmacontrol));
  2884. OSL_DELAY(1000); /* 1ms as its onetime only */
  2885. idmacontrol = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
  2886. IDMAControl(buscorerev), 0, 0);
  2887. }
  2888. if (waitcount < IDMA_ENABLE_WAIT) {
  2889. DHD_ERROR(("iDMA enabled PCIEControl = %08x\n", idmacontrol));
  2890. } else {
  2891. DHD_ERROR(("Error: wait for iDMA timed out wait=%d IDMAControl = %08x\n",
  2892. waitcount, idmacontrol));
  2893. return BCME_ERROR;
  2894. }
  2895. }
  2896. /* Host should configure soft doorbells if needed ... here */
  2897. /* Post to dongle host configured soft doorbells */
  2898. dhd_msgbuf_ring_config_d2h_soft_doorbell(dhd);
  2899. dhd_msgbuf_rxbuf_post_ioctlresp_bufs(dhd);
  2900. dhd_msgbuf_rxbuf_post_event_bufs(dhd);
  2901. prot->no_retry = FALSE;
  2902. prot->no_aggr = FALSE;
  2903. prot->fixed_rate = FALSE;
  2904. /*
  2905. * Note that any communication with the Dongle should be added
  2906. * below this point. Any other host data structure initialiation that
  2907. * needs to be done prior to the DPC starts executing should be done
  2908. * befor this point.
  2909. * Because once we start sending H2D requests to Dongle, the Dongle
  2910. * respond immediately. So the DPC context to handle this
  2911. * D2H response could preempt the context in which dhd_prot_init is running.
  2912. * We want to ensure that all the Host part of dhd_prot_init is
  2913. * done before that.
  2914. */
  2915. /* See if info rings could be created, info rings should be created
  2916. * only if dongle does not support EDL
  2917. */
  2918. #ifdef EWP_EDL
  2919. if (dhd->bus->api.fw_rev >= PCIE_SHARED_VERSION_6 && !dhd->dongle_edl_support)
  2920. #else
  2921. if (dhd->bus->api.fw_rev >= PCIE_SHARED_VERSION_6)
  2922. #endif /* EWP_EDL */
  2923. {
  2924. if ((ret = dhd_prot_init_info_rings(dhd)) != BCME_OK) {
  2925. /* For now log and proceed, further clean up action maybe necessary
  2926. * when we have more clarity.
  2927. */
  2928. DHD_ERROR(("%s Info rings couldn't be created: Err Code%d",
  2929. __FUNCTION__, ret));
  2930. }
  2931. }
  2932. #ifdef EWP_EDL
  2933. /* Create Enhanced Debug Lane rings (EDL) if dongle supports it */
  2934. if (dhd->dongle_edl_support) {
  2935. if ((ret = dhd_prot_init_edl_rings(dhd)) != BCME_OK) {
  2936. DHD_ERROR(("%s EDL rings couldn't be created: Err Code%d",
  2937. __FUNCTION__, ret));
  2938. }
  2939. }
  2940. #endif /* EWP_EDL */
  2941. #ifdef DHD_HP2P
  2942. /* create HPP txcmpl/rxcmpl rings */
  2943. if (dhd->bus->api.fw_rev >= PCIE_SHARED_VERSION_7 && dhd->hp2p_capable) {
  2944. if ((ret = dhd_prot_init_hp2p_rings(dhd)) != BCME_OK) {
  2945. /* For now log and proceed, further clean up action maybe necessary
  2946. * when we have more clarity.
  2947. */
  2948. DHD_ERROR(("%s HP2P rings couldn't be created: Err Code%d",
  2949. __FUNCTION__, ret));
  2950. }
  2951. }
  2952. #endif /* DHD_HP2P */
  2953. return BCME_OK;
  2954. } /* dhd_prot_init */
  2955. /**
  2956. * dhd_prot_detach - PCIE FD protocol layer destructor.
  2957. * Unlink, frees allocated protocol memory (including dhd_prot)
  2958. */
  2959. void dhd_prot_detach(dhd_pub_t *dhd)
  2960. {
  2961. dhd_prot_t *prot = dhd->prot;
  2962. /* Stop the protocol module */
  2963. if (prot) {
  2964. /* free up all DMA-able buffers allocated during prot attach/init */
  2965. dhd_dma_buf_free(dhd, &prot->d2h_dma_scratch_buf);
  2966. dhd_dma_buf_free(dhd, &prot->retbuf);
  2967. dhd_dma_buf_free(dhd, &prot->ioctbuf);
  2968. dhd_dma_buf_free(dhd, &prot->host_bus_throughput_buf);
  2969. dhd_dma_buf_free(dhd, &prot->hostts_req_buf);
  2970. dhd_dma_buf_free(dhd, &prot->fw_trap_buf);
  2971. dhd_dma_buf_free(dhd, &prot->host_scb_buf);
  2972. /* DMA-able buffers for DMAing H2D/D2H WR/RD indices */
  2973. dhd_dma_buf_free(dhd, &prot->h2d_dma_indx_wr_buf);
  2974. dhd_dma_buf_free(dhd, &prot->h2d_dma_indx_rd_buf);
  2975. dhd_dma_buf_free(dhd, &prot->d2h_dma_indx_wr_buf);
  2976. dhd_dma_buf_free(dhd, &prot->d2h_dma_indx_rd_buf);
  2977. dhd_dma_buf_free(dhd, &prot->h2d_ifrm_indx_wr_buf);
  2978. /* Common MsgBuf Rings */
  2979. dhd_prot_ring_detach(dhd, &prot->h2dring_ctrl_subn);
  2980. dhd_prot_ring_detach(dhd, &prot->h2dring_rxp_subn);
  2981. dhd_prot_ring_detach(dhd, &prot->d2hring_ctrl_cpln);
  2982. dhd_prot_ring_detach(dhd, &prot->d2hring_tx_cpln);
  2983. dhd_prot_ring_detach(dhd, &prot->d2hring_rx_cpln);
  2984. /* Detach each DMA-able buffer and free the pool of msgbuf_ring_t */
  2985. dhd_prot_flowrings_pool_detach(dhd);
  2986. /* detach info rings */
  2987. dhd_prot_detach_info_rings(dhd);
  2988. #ifdef EWP_EDL
  2989. dhd_prot_detach_edl_rings(dhd);
  2990. #endif // endif
  2991. #ifdef DHD_HP2P
  2992. /* detach HPP rings */
  2993. dhd_prot_detach_hp2p_rings(dhd);
  2994. #endif /* DHD_HP2P */
  2995. /* if IOCTLRESP_USE_CONSTMEM is defined IOCTL PKTs use pktid_map_handle_ioctl
  2996. * handler and PKT memory is allocated using alloc_ioctl_return_buffer(), Otherwise
  2997. * they will be part of pktid_ctrl_map handler and PKT memory is allocated using
  2998. * PKTGET_STATIC (if DHD_USE_STATIC_CTRLBUF is defined) OR PKGET.
  2999. * Similarly for freeing PKT buffers DHD_NATIVE_TO_PKTID_FINI will be used
  3000. * which calls PKTFREE_STATIC (if DHD_USE_STATIC_CTRLBUF is defined) OR PKFREE.
  3001. * Else if IOCTLRESP_USE_CONSTMEM is defined IOCTL PKTs will be freed using
  3002. * DHD_NATIVE_TO_PKTID_FINI_IOCTL which calls free_ioctl_return_buffer.
  3003. */
  3004. DHD_NATIVE_TO_PKTID_FINI(dhd, prot->pktid_ctrl_map);
  3005. DHD_NATIVE_TO_PKTID_FINI(dhd, prot->pktid_rx_map);
  3006. DHD_NATIVE_TO_PKTID_FINI(dhd, prot->pktid_tx_map);
  3007. #ifdef IOCTLRESP_USE_CONSTMEM
  3008. DHD_NATIVE_TO_PKTID_FINI_IOCTL(dhd, prot->pktid_map_handle_ioctl);
  3009. #endif // endif
  3010. #ifdef DHD_MAP_PKTID_LOGGING
  3011. DHD_PKTID_LOG_FINI(dhd, prot->pktid_dma_map);
  3012. DHD_PKTID_LOG_FINI(dhd, prot->pktid_dma_unmap);
  3013. #endif /* DHD_MAP_PKTID_LOGGING */
  3014. #if defined(DHD_LB_TXC)
  3015. if (prot->tx_compl_prod.buffer)
  3016. MFREE(dhd->osh, prot->tx_compl_prod.buffer,
  3017. sizeof(void*) * DHD_LB_WORKQ_SZ);
  3018. #endif /* DHD_LB_TXC */
  3019. #if defined(DHD_LB_RXC)
  3020. if (prot->rx_compl_prod.buffer)
  3021. MFREE(dhd->osh, prot->rx_compl_prod.buffer,
  3022. sizeof(void*) * DHD_LB_WORKQ_SZ);
  3023. #endif /* DHD_LB_RXC */
  3024. DHD_OS_PREFREE(dhd, dhd->prot, sizeof(dhd_prot_t));
  3025. dhd->prot = NULL;
  3026. }
  3027. } /* dhd_prot_detach */
  3028. /**
  3029. * dhd_prot_reset - Reset the protocol layer without freeing any objects.
  3030. * This may be invoked to soft reboot the dongle, without having to
  3031. * detach and attach the entire protocol layer.
  3032. *
  3033. * After dhd_prot_reset(), dhd_prot_init() may be invoked
  3034. * without going througha dhd_prot_attach() phase.
  3035. */
  3036. void
  3037. dhd_prot_reset(dhd_pub_t *dhd)
  3038. {
  3039. struct dhd_prot *prot = dhd->prot;
  3040. DHD_TRACE(("%s\n", __FUNCTION__));
  3041. if (prot == NULL) {
  3042. return;
  3043. }
  3044. dhd_prot_flowrings_pool_reset(dhd);
  3045. /* Reset Common MsgBuf Rings */
  3046. dhd_prot_ring_reset(dhd, &prot->h2dring_ctrl_subn);
  3047. dhd_prot_ring_reset(dhd, &prot->h2dring_rxp_subn);
  3048. dhd_prot_ring_reset(dhd, &prot->d2hring_ctrl_cpln);
  3049. dhd_prot_ring_reset(dhd, &prot->d2hring_tx_cpln);
  3050. dhd_prot_ring_reset(dhd, &prot->d2hring_rx_cpln);
  3051. /* Reset info rings */
  3052. if (prot->h2dring_info_subn) {
  3053. dhd_prot_ring_reset(dhd, prot->h2dring_info_subn);
  3054. }
  3055. if (prot->d2hring_info_cpln) {
  3056. dhd_prot_ring_reset(dhd, prot->d2hring_info_cpln);
  3057. }
  3058. #ifdef EWP_EDL
  3059. if (prot->d2hring_edl) {
  3060. dhd_prot_ring_reset(dhd, prot->d2hring_edl);
  3061. }
  3062. #endif /* EWP_EDL */
  3063. /* Reset all DMA-able buffers allocated during prot attach */
  3064. dhd_dma_buf_reset(dhd, &prot->d2h_dma_scratch_buf);
  3065. dhd_dma_buf_reset(dhd, &prot->retbuf);
  3066. dhd_dma_buf_reset(dhd, &prot->ioctbuf);
  3067. dhd_dma_buf_reset(dhd, &prot->host_bus_throughput_buf);
  3068. dhd_dma_buf_reset(dhd, &prot->hostts_req_buf);
  3069. dhd_dma_buf_reset(dhd, &prot->fw_trap_buf);
  3070. dhd_dma_buf_reset(dhd, &prot->host_scb_buf);
  3071. dhd_dma_buf_reset(dhd, &prot->h2d_ifrm_indx_wr_buf);
  3072. /* Rest all DMA-able buffers for DMAing H2D/D2H WR/RD indices */
  3073. dhd_dma_buf_reset(dhd, &prot->h2d_dma_indx_rd_buf);
  3074. dhd_dma_buf_reset(dhd, &prot->h2d_dma_indx_wr_buf);
  3075. dhd_dma_buf_reset(dhd, &prot->d2h_dma_indx_rd_buf);
  3076. dhd_dma_buf_reset(dhd, &prot->d2h_dma_indx_wr_buf);
  3077. prot->rx_metadata_offset = 0;
  3078. prot->tx_metadata_offset = 0;
  3079. prot->rxbufpost = 0;
  3080. prot->cur_event_bufs_posted = 0;
  3081. prot->cur_ioctlresp_bufs_posted = 0;
  3082. OSL_ATOMIC_INIT(dhd->osh, &prot->active_tx_count);
  3083. prot->data_seq_no = 0;
  3084. prot->ioctl_seq_no = 0;
  3085. prot->ioctl_state = 0;
  3086. prot->curr_ioctl_cmd = 0;
  3087. prot->ioctl_received = IOCTL_WAIT;
  3088. /* To catch any rollover issues fast, starting with higher ioctl_trans_id */
  3089. prot->ioctl_trans_id = MAXBITVAL(NBITS(prot->ioctl_trans_id)) - BUFFER_BEFORE_ROLLOVER;
  3090. /* dhd_flow_rings_init is located at dhd_bus_start,
  3091. * so when stopping bus, flowrings shall be deleted
  3092. */
  3093. if (dhd->flow_rings_inited) {
  3094. dhd_flow_rings_deinit(dhd);
  3095. }
  3096. #ifdef DHD_HP2P
  3097. if (prot->d2hring_hp2p_txcpl) {
  3098. dhd_prot_ring_reset(dhd, prot->d2hring_hp2p_txcpl);
  3099. }
  3100. if (prot->d2hring_hp2p_rxcpl) {
  3101. dhd_prot_ring_reset(dhd, prot->d2hring_hp2p_rxcpl);
  3102. }
  3103. #endif /* DHD_HP2P */
  3104. /* Reset PKTID map */
  3105. DHD_NATIVE_TO_PKTID_RESET(dhd, prot->pktid_ctrl_map);
  3106. DHD_NATIVE_TO_PKTID_RESET(dhd, prot->pktid_rx_map);
  3107. DHD_NATIVE_TO_PKTID_RESET(dhd, prot->pktid_tx_map);
  3108. #ifdef IOCTLRESP_USE_CONSTMEM
  3109. DHD_NATIVE_TO_PKTID_RESET_IOCTL(dhd, prot->pktid_map_handle_ioctl);
  3110. #endif /* IOCTLRESP_USE_CONSTMEM */
  3111. #ifdef DMAMAP_STATS
  3112. dhd->dma_stats.txdata = dhd->dma_stats.txdata_sz = 0;
  3113. dhd->dma_stats.rxdata = dhd->dma_stats.rxdata_sz = 0;
  3114. #ifndef IOCTLRESP_USE_CONSTMEM
  3115. dhd->dma_stats.ioctl_rx = dhd->dma_stats.ioctl_rx_sz = 0;
  3116. #endif /* IOCTLRESP_USE_CONSTMEM */
  3117. dhd->dma_stats.event_rx = dhd->dma_stats.event_rx_sz = 0;
  3118. dhd->dma_stats.info_rx = dhd->dma_stats.info_rx_sz = 0;
  3119. dhd->dma_stats.tsbuf_rx = dhd->dma_stats.tsbuf_rx_sz = 0;
  3120. #endif /* DMAMAP_STATS */
  3121. } /* dhd_prot_reset */
  3122. #if defined(DHD_LB_RXP)
  3123. #define DHD_LB_DISPATCH_RX_PROCESS(dhdp) dhd_lb_dispatch_rx_process(dhdp)
  3124. #else /* !DHD_LB_RXP */
  3125. #define DHD_LB_DISPATCH_RX_PROCESS(dhdp) do { /* noop */ } while (0)
  3126. #endif /* !DHD_LB_RXP */
  3127. #if defined(DHD_LB_RXC)
  3128. #define DHD_LB_DISPATCH_RX_COMPL(dhdp) dhd_lb_dispatch_rx_compl(dhdp)
  3129. #else /* !DHD_LB_RXC */
  3130. #define DHD_LB_DISPATCH_RX_COMPL(dhdp) do { /* noop */ } while (0)
  3131. #endif /* !DHD_LB_RXC */
  3132. #if defined(DHD_LB_TXC)
  3133. #define DHD_LB_DISPATCH_TX_COMPL(dhdp) dhd_lb_dispatch_tx_compl(dhdp)
  3134. #else /* !DHD_LB_TXC */
  3135. #define DHD_LB_DISPATCH_TX_COMPL(dhdp) do { /* noop */ } while (0)
  3136. #endif /* !DHD_LB_TXC */
  3137. #if defined(DHD_LB)
  3138. /* DHD load balancing: deferral of work to another online CPU */
  3139. /* DHD_LB_TXC DHD_LB_RXC DHD_LB_RXP dispatchers, in dhd_linux.c */
  3140. extern void dhd_lb_tx_compl_dispatch(dhd_pub_t *dhdp);
  3141. extern void dhd_lb_rx_compl_dispatch(dhd_pub_t *dhdp);
  3142. extern void dhd_lb_rx_napi_dispatch(dhd_pub_t *dhdp);
  3143. extern void dhd_lb_rx_pkt_enqueue(dhd_pub_t *dhdp, void *pkt, int ifidx);
  3144. #if defined(DHD_LB_RXP)
  3145. /**
  3146. * dhd_lb_dispatch_rx_process - load balance by dispatch Rx processing work
  3147. * to other CPU cores
  3148. */
  3149. static INLINE void
  3150. dhd_lb_dispatch_rx_process(dhd_pub_t *dhdp)
  3151. {
  3152. dhd_lb_rx_napi_dispatch(dhdp); /* dispatch rx_process_napi */
  3153. }
  3154. #endif /* DHD_LB_RXP */
  3155. #if defined(DHD_LB_TXC)
  3156. /**
  3157. * dhd_lb_dispatch_tx_compl - load balance by dispatch Tx complition work
  3158. * to other CPU cores
  3159. */
  3160. static INLINE void
  3161. dhd_lb_dispatch_tx_compl(dhd_pub_t *dhdp, uint16 ring_idx)
  3162. {
  3163. bcm_workq_prod_sync(&dhdp->prot->tx_compl_prod); /* flush WR index */
  3164. dhd_lb_tx_compl_dispatch(dhdp); /* dispatch tx_compl_tasklet */
  3165. }
  3166. /**
  3167. * DHD load balanced tx completion tasklet handler, that will perform the
  3168. * freeing of packets on the selected CPU. Packet pointers are delivered to
  3169. * this tasklet via the tx complete workq.
  3170. */
  3171. void
  3172. dhd_lb_tx_compl_handler(unsigned long data)
  3173. {
  3174. int elem_ix;
  3175. void *pkt, **elem;
  3176. dmaaddr_t pa;
  3177. uint32 pa_len;
  3178. dhd_pub_t *dhd = (dhd_pub_t *)data;
  3179. dhd_prot_t *prot = dhd->prot;
  3180. bcm_workq_t *workq = &prot->tx_compl_cons;
  3181. uint32 count = 0;
  3182. int curr_cpu;
  3183. curr_cpu = get_cpu();
  3184. put_cpu();
  3185. DHD_LB_STATS_TXC_PERCPU_CNT_INCR(dhd);
  3186. while (1) {
  3187. elem_ix = bcm_ring_cons(WORKQ_RING(workq), DHD_LB_WORKQ_SZ);
  3188. if (elem_ix == BCM_RING_EMPTY) {
  3189. break;
  3190. }
  3191. elem = WORKQ_ELEMENT(void *, workq, elem_ix);
  3192. pkt = *elem;
  3193. DHD_INFO(("%s: tx_compl_cons pkt<%p>\n", __FUNCTION__, pkt));
  3194. OSL_PREFETCH(PKTTAG(pkt));
  3195. OSL_PREFETCH(pkt);
  3196. pa = DHD_PKTTAG_PA((dhd_pkttag_fr_t *)PKTTAG(pkt));
  3197. pa_len = DHD_PKTTAG_PA_LEN((dhd_pkttag_fr_t *)PKTTAG(pkt));
  3198. DMA_UNMAP(dhd->osh, pa, pa_len, DMA_RX, 0, 0);
  3199. #if defined(BCMPCIE)
  3200. dhd_txcomplete(dhd, pkt, true);
  3201. #ifdef DHD_4WAYM4_FAIL_DISCONNECT
  3202. dhd_eap_txcomplete(dhd, pkt, TRUE, txstatus->cmn_hdr.if_id);
  3203. #endif /* DHD_4WAYM4_FAIL_DISCONNECT */
  3204. #endif // endif
  3205. PKTFREE(dhd->osh, pkt, TRUE);
  3206. count++;
  3207. }
  3208. /* smp_wmb(); */
  3209. bcm_workq_cons_sync(workq);
  3210. DHD_LB_STATS_UPDATE_TXC_HISTO(dhd, count);
  3211. }
  3212. #endif /* DHD_LB_TXC */
  3213. #if defined(DHD_LB_RXC)
  3214. /**
  3215. * dhd_lb_dispatch_rx_compl - load balance by dispatch rx complition work
  3216. * to other CPU cores
  3217. */
  3218. static INLINE void
  3219. dhd_lb_dispatch_rx_compl(dhd_pub_t *dhdp)
  3220. {
  3221. dhd_prot_t *prot = dhdp->prot;
  3222. /* Schedule the takslet only if we have to */
  3223. if (prot->rxbufpost <= (prot->max_rxbufpost - RXBUFPOST_THRESHOLD)) {
  3224. /* flush WR index */
  3225. bcm_workq_prod_sync(&dhdp->prot->rx_compl_prod);
  3226. dhd_lb_rx_compl_dispatch(dhdp); /* dispatch rx_compl_tasklet */
  3227. }
  3228. }
  3229. void
  3230. dhd_lb_rx_compl_handler(unsigned long data)
  3231. {
  3232. dhd_pub_t *dhd = (dhd_pub_t *)data;
  3233. bcm_workq_t *workq = &dhd->prot->rx_compl_cons;
  3234. DHD_LB_STATS_RXC_PERCPU_CNT_INCR(dhd);
  3235. dhd_msgbuf_rxbuf_post(dhd, TRUE); /* re-use pktids */
  3236. bcm_workq_cons_sync(workq);
  3237. }
  3238. #endif /* DHD_LB_RXC */
  3239. #endif /* DHD_LB */
  3240. void
  3241. dhd_prot_rx_dataoffset(dhd_pub_t *dhd, uint32 rx_offset)
  3242. {
  3243. dhd_prot_t *prot = dhd->prot;
  3244. prot->rx_dataoffset = rx_offset;
  3245. }
  3246. static int
  3247. dhd_check_create_info_rings(dhd_pub_t *dhd)
  3248. {
  3249. dhd_prot_t *prot = dhd->prot;
  3250. int ret = BCME_ERROR;
  3251. uint16 ringid;
  3252. {
  3253. /* dongle may increase max_submission_rings so keep
  3254. * ringid at end of dynamic rings
  3255. */
  3256. ringid = dhd->bus->max_tx_flowrings +
  3257. (dhd->bus->max_submission_rings - dhd->bus->max_tx_flowrings) +
  3258. BCMPCIE_H2D_COMMON_MSGRINGS;
  3259. }
  3260. if (prot->d2hring_info_cpln) {
  3261. /* for d2hring re-entry case, clear inited flag */
  3262. prot->d2hring_info_cpln->inited = FALSE;
  3263. }
  3264. if (prot->h2dring_info_subn && prot->d2hring_info_cpln) {
  3265. return BCME_OK; /* dhd_prot_init rentry after a dhd_prot_reset */
  3266. }
  3267. if (prot->h2dring_info_subn == NULL) {
  3268. prot->h2dring_info_subn = MALLOCZ(prot->osh, sizeof(msgbuf_ring_t));
  3269. if (prot->h2dring_info_subn == NULL) {
  3270. DHD_ERROR(("%s: couldn't alloc memory for h2dring_info_subn\n",
  3271. __FUNCTION__));
  3272. return BCME_NOMEM;
  3273. }
  3274. DHD_INFO(("%s: about to create debug submit ring\n", __FUNCTION__));
  3275. ret = dhd_prot_ring_attach(dhd, prot->h2dring_info_subn, "h2dinfo",
  3276. H2DRING_DYNAMIC_INFO_MAX_ITEM, H2DRING_INFO_BUFPOST_ITEMSIZE,
  3277. ringid);
  3278. if (ret != BCME_OK) {
  3279. DHD_ERROR(("%s: couldn't alloc resources for dbg submit ring\n",
  3280. __FUNCTION__));
  3281. goto err;
  3282. }
  3283. }
  3284. if (prot->d2hring_info_cpln == NULL) {
  3285. prot->d2hring_info_cpln = MALLOCZ(prot->osh, sizeof(msgbuf_ring_t));
  3286. if (prot->d2hring_info_cpln == NULL) {
  3287. DHD_ERROR(("%s: couldn't alloc memory for h2dring_info_subn\n",
  3288. __FUNCTION__));
  3289. return BCME_NOMEM;
  3290. }
  3291. /* create the debug info completion ring next to debug info submit ring
  3292. * ringid = id next to debug info submit ring
  3293. */
  3294. ringid = ringid + 1;
  3295. DHD_INFO(("%s: about to create debug cpl ring\n", __FUNCTION__));
  3296. ret = dhd_prot_ring_attach(dhd, prot->d2hring_info_cpln, "d2hinfo",
  3297. D2HRING_DYNAMIC_INFO_MAX_ITEM, D2HRING_INFO_BUFCMPLT_ITEMSIZE,
  3298. ringid);
  3299. if (ret != BCME_OK) {
  3300. DHD_ERROR(("%s: couldn't alloc resources for dbg cpl ring\n",
  3301. __FUNCTION__));
  3302. dhd_prot_ring_detach(dhd, prot->h2dring_info_subn);
  3303. goto err;
  3304. }
  3305. }
  3306. return ret;
  3307. err:
  3308. MFREE(prot->osh, prot->h2dring_info_subn, sizeof(msgbuf_ring_t));
  3309. prot->h2dring_info_subn = NULL;
  3310. if (prot->d2hring_info_cpln) {
  3311. MFREE(prot->osh, prot->d2hring_info_cpln, sizeof(msgbuf_ring_t));
  3312. prot->d2hring_info_cpln = NULL;
  3313. }
  3314. return ret;
  3315. } /* dhd_check_create_info_rings */
  3316. int
  3317. dhd_prot_init_info_rings(dhd_pub_t *dhd)
  3318. {
  3319. dhd_prot_t *prot = dhd->prot;
  3320. int ret = BCME_OK;
  3321. if ((ret = dhd_check_create_info_rings(dhd)) != BCME_OK) {
  3322. DHD_ERROR(("%s: info rings aren't created! \n",
  3323. __FUNCTION__));
  3324. return ret;
  3325. }
  3326. if ((prot->d2hring_info_cpln->inited) || (prot->d2hring_info_cpln->create_pending)) {
  3327. DHD_INFO(("Info completion ring was created!\n"));
  3328. return ret;
  3329. }
  3330. DHD_TRACE(("trying to send create d2h info ring: id %d\n", prot->d2hring_info_cpln->idx));
  3331. ret = dhd_send_d2h_ringcreate(dhd, prot->d2hring_info_cpln,
  3332. BCMPCIE_D2H_RING_TYPE_DBGBUF_CPL, DHD_D2H_DBGRING_REQ_PKTID);
  3333. if (ret != BCME_OK)
  3334. return ret;
  3335. prot->h2dring_info_subn->seqnum = H2D_EPOCH_INIT_VAL;
  3336. prot->h2dring_info_subn->current_phase = 0;
  3337. prot->d2hring_info_cpln->seqnum = D2H_EPOCH_INIT_VAL;
  3338. prot->d2hring_info_cpln->current_phase = BCMPCIE_CMNHDR_PHASE_BIT_INIT;
  3339. DHD_TRACE(("trying to send create h2d info ring id %d\n", prot->h2dring_info_subn->idx));
  3340. prot->h2dring_info_subn->n_completion_ids = 1;
  3341. prot->h2dring_info_subn->compeltion_ring_ids[0] = prot->d2hring_info_cpln->idx;
  3342. ret = dhd_send_h2d_ringcreate(dhd, prot->h2dring_info_subn,
  3343. BCMPCIE_H2D_RING_TYPE_DBGBUF_SUBMIT, DHD_H2D_DBGRING_REQ_PKTID);
  3344. /* Note that there is no way to delete d2h or h2d ring deletion incase either fails,
  3345. * so can not cleanup if one ring was created while the other failed
  3346. */
  3347. return ret;
  3348. } /* dhd_prot_init_info_rings */
  3349. static void
  3350. dhd_prot_detach_info_rings(dhd_pub_t *dhd)
  3351. {
  3352. if (dhd->prot->h2dring_info_subn) {
  3353. dhd_prot_ring_detach(dhd, dhd->prot->h2dring_info_subn);
  3354. MFREE(dhd->prot->osh, dhd->prot->h2dring_info_subn, sizeof(msgbuf_ring_t));
  3355. dhd->prot->h2dring_info_subn = NULL;
  3356. }
  3357. if (dhd->prot->d2hring_info_cpln) {
  3358. dhd_prot_ring_detach(dhd, dhd->prot->d2hring_info_cpln);
  3359. MFREE(dhd->prot->osh, dhd->prot->d2hring_info_cpln, sizeof(msgbuf_ring_t));
  3360. dhd->prot->d2hring_info_cpln = NULL;
  3361. }
  3362. }
  3363. #ifdef DHD_HP2P
  3364. static int
  3365. dhd_check_create_hp2p_rings(dhd_pub_t *dhd)
  3366. {
  3367. dhd_prot_t *prot = dhd->prot;
  3368. int ret = BCME_ERROR;
  3369. uint16 ringid;
  3370. /* Last 2 dynamic ring indices are used by hp2p rings */
  3371. ringid = dhd->bus->max_submission_rings + dhd->bus->max_completion_rings - 2;
  3372. if (prot->d2hring_hp2p_txcpl == NULL) {
  3373. prot->d2hring_hp2p_txcpl = MALLOCZ(prot->osh, sizeof(msgbuf_ring_t));
  3374. if (prot->d2hring_hp2p_txcpl == NULL) {
  3375. DHD_ERROR(("%s: couldn't alloc memory for d2hring_hp2p_txcpl\n",
  3376. __FUNCTION__));
  3377. return BCME_NOMEM;
  3378. }
  3379. DHD_INFO(("%s: about to create hp2p txcpl ring\n", __FUNCTION__));
  3380. ret = dhd_prot_ring_attach(dhd, prot->d2hring_hp2p_txcpl, "d2hhp2p_txcpl",
  3381. dhd_bus_get_hp2p_ring_max_size(dhd->bus, TRUE), D2HRING_TXCMPLT_ITEMSIZE,
  3382. ringid);
  3383. if (ret != BCME_OK) {
  3384. DHD_ERROR(("%s: couldn't alloc resources for hp2p txcpl ring\n",
  3385. __FUNCTION__));
  3386. goto err2;
  3387. }
  3388. } else {
  3389. /* for re-entry case, clear inited flag */
  3390. prot->d2hring_hp2p_txcpl->inited = FALSE;
  3391. }
  3392. if (prot->d2hring_hp2p_rxcpl == NULL) {
  3393. prot->d2hring_hp2p_rxcpl = MALLOCZ(prot->osh, sizeof(msgbuf_ring_t));
  3394. if (prot->d2hring_hp2p_rxcpl == NULL) {
  3395. DHD_ERROR(("%s: couldn't alloc memory for d2hring_hp2p_rxcpl\n",
  3396. __FUNCTION__));
  3397. return BCME_NOMEM;
  3398. }
  3399. /* create the hp2p rx completion ring next to hp2p tx compl ring
  3400. * ringid = id next to hp2p tx compl ring
  3401. */
  3402. ringid = ringid + 1;
  3403. DHD_INFO(("%s: about to create hp2p rxcpl ring\n", __FUNCTION__));
  3404. ret = dhd_prot_ring_attach(dhd, prot->d2hring_hp2p_rxcpl, "d2hhp2p_rxcpl",
  3405. dhd_bus_get_hp2p_ring_max_size(dhd->bus, FALSE), D2HRING_RXCMPLT_ITEMSIZE,
  3406. ringid);
  3407. if (ret != BCME_OK) {
  3408. DHD_ERROR(("%s: couldn't alloc resources for hp2p rxcpl ring\n",
  3409. __FUNCTION__));
  3410. goto err1;
  3411. }
  3412. } else {
  3413. /* for re-entry case, clear inited flag */
  3414. prot->d2hring_hp2p_rxcpl->inited = FALSE;
  3415. }
  3416. return ret;
  3417. err1:
  3418. MFREE(prot->osh, prot->d2hring_hp2p_rxcpl, sizeof(msgbuf_ring_t));
  3419. prot->d2hring_hp2p_rxcpl = NULL;
  3420. err2:
  3421. MFREE(prot->osh, prot->d2hring_hp2p_txcpl, sizeof(msgbuf_ring_t));
  3422. prot->d2hring_hp2p_txcpl = NULL;
  3423. return ret;
  3424. } /* dhd_check_create_hp2p_rings */
  3425. int
  3426. dhd_prot_init_hp2p_rings(dhd_pub_t *dhd)
  3427. {
  3428. dhd_prot_t *prot = dhd->prot;
  3429. int ret = BCME_OK;
  3430. dhd->hp2p_ring_active = FALSE;
  3431. if ((ret = dhd_check_create_hp2p_rings(dhd)) != BCME_OK) {
  3432. DHD_ERROR(("%s: hp2p rings aren't created! \n",
  3433. __FUNCTION__));
  3434. return ret;
  3435. }
  3436. if ((prot->d2hring_hp2p_txcpl->inited) || (prot->d2hring_hp2p_txcpl->create_pending)) {
  3437. DHD_INFO(("hp2p tx completion ring was created!\n"));
  3438. return ret;
  3439. }
  3440. DHD_TRACE(("trying to send create d2h hp2p txcpl ring: id %d\n",
  3441. prot->d2hring_hp2p_txcpl->idx));
  3442. ret = dhd_send_d2h_ringcreate(dhd, prot->d2hring_hp2p_txcpl,
  3443. BCMPCIE_D2H_RING_TYPE_HPP_TX_CPL, DHD_D2H_HPPRING_TXREQ_PKTID);
  3444. if (ret != BCME_OK)
  3445. return ret;
  3446. prot->d2hring_hp2p_txcpl->seqnum = D2H_EPOCH_INIT_VAL;
  3447. prot->d2hring_hp2p_txcpl->current_phase = BCMPCIE_CMNHDR_PHASE_BIT_INIT;
  3448. if ((prot->d2hring_hp2p_rxcpl->inited) || (prot->d2hring_hp2p_rxcpl->create_pending)) {
  3449. DHD_INFO(("hp2p rx completion ring was created!\n"));
  3450. return ret;
  3451. }
  3452. DHD_TRACE(("trying to send create d2h hp2p rxcpl ring: id %d\n",
  3453. prot->d2hring_hp2p_rxcpl->idx));
  3454. ret = dhd_send_d2h_ringcreate(dhd, prot->d2hring_hp2p_rxcpl,
  3455. BCMPCIE_D2H_RING_TYPE_HPP_RX_CPL, DHD_D2H_HPPRING_RXREQ_PKTID);
  3456. if (ret != BCME_OK)
  3457. return ret;
  3458. prot->d2hring_hp2p_rxcpl->seqnum = D2H_EPOCH_INIT_VAL;
  3459. prot->d2hring_hp2p_rxcpl->current_phase = BCMPCIE_CMNHDR_PHASE_BIT_INIT;
  3460. /* Note that there is no way to delete d2h or h2d ring deletion incase either fails,
  3461. * so can not cleanup if one ring was created while the other failed
  3462. */
  3463. return BCME_OK;
  3464. } /* dhd_prot_init_hp2p_rings */
  3465. static void
  3466. dhd_prot_detach_hp2p_rings(dhd_pub_t *dhd)
  3467. {
  3468. if (dhd->prot->d2hring_hp2p_txcpl) {
  3469. dhd_prot_ring_detach(dhd, dhd->prot->d2hring_hp2p_txcpl);
  3470. MFREE(dhd->prot->osh, dhd->prot->d2hring_hp2p_txcpl, sizeof(msgbuf_ring_t));
  3471. dhd->prot->d2hring_hp2p_txcpl = NULL;
  3472. }
  3473. if (dhd->prot->d2hring_hp2p_rxcpl) {
  3474. dhd_prot_ring_detach(dhd, dhd->prot->d2hring_hp2p_rxcpl);
  3475. MFREE(dhd->prot->osh, dhd->prot->d2hring_hp2p_rxcpl, sizeof(msgbuf_ring_t));
  3476. dhd->prot->d2hring_hp2p_rxcpl = NULL;
  3477. }
  3478. }
  3479. #endif /* DHD_HP2P */
  3480. #ifdef EWP_EDL
  3481. static int
  3482. dhd_check_create_edl_rings(dhd_pub_t *dhd)
  3483. {
  3484. dhd_prot_t *prot = dhd->prot;
  3485. int ret = BCME_ERROR;
  3486. uint16 ringid;
  3487. {
  3488. /* dongle may increase max_submission_rings so keep
  3489. * ringid at end of dynamic rings (re-use info ring cpl ring id)
  3490. */
  3491. ringid = dhd->bus->max_tx_flowrings +
  3492. (dhd->bus->max_submission_rings - dhd->bus->max_tx_flowrings) +
  3493. BCMPCIE_H2D_COMMON_MSGRINGS + 1;
  3494. }
  3495. if (prot->d2hring_edl) {
  3496. prot->d2hring_edl->inited = FALSE;
  3497. return BCME_OK; /* dhd_prot_init rentry after a dhd_prot_reset */
  3498. }
  3499. if (prot->d2hring_edl == NULL) {
  3500. prot->d2hring_edl = MALLOCZ(prot->osh, sizeof(msgbuf_ring_t));
  3501. if (prot->d2hring_edl == NULL) {
  3502. DHD_ERROR(("%s: couldn't alloc memory for d2hring_edl\n",
  3503. __FUNCTION__));
  3504. return BCME_NOMEM;
  3505. }
  3506. DHD_ERROR(("%s: about to create EDL ring, ringid: %u \n", __FUNCTION__,
  3507. ringid));
  3508. ret = dhd_prot_ring_attach(dhd, prot->d2hring_edl, "d2hring_edl",
  3509. D2HRING_EDL_MAX_ITEM, D2HRING_EDL_ITEMSIZE,
  3510. ringid);
  3511. if (ret != BCME_OK) {
  3512. DHD_ERROR(("%s: couldn't alloc resources for EDL ring\n",
  3513. __FUNCTION__));
  3514. goto err;
  3515. }
  3516. }
  3517. return ret;
  3518. err:
  3519. MFREE(prot->osh, prot->d2hring_edl, sizeof(msgbuf_ring_t));
  3520. prot->d2hring_edl = NULL;
  3521. return ret;
  3522. } /* dhd_check_create_btlog_rings */
  3523. int
  3524. dhd_prot_init_edl_rings(dhd_pub_t *dhd)
  3525. {
  3526. dhd_prot_t *prot = dhd->prot;
  3527. int ret = BCME_ERROR;
  3528. if ((ret = dhd_check_create_edl_rings(dhd)) != BCME_OK) {
  3529. DHD_ERROR(("%s: EDL rings aren't created! \n",
  3530. __FUNCTION__));
  3531. return ret;
  3532. }
  3533. if ((prot->d2hring_edl->inited) || (prot->d2hring_edl->create_pending)) {
  3534. DHD_INFO(("EDL completion ring was created!\n"));
  3535. return ret;
  3536. }
  3537. DHD_ERROR(("trying to send create d2h edl ring: idx %d\n", prot->d2hring_edl->idx));
  3538. ret = dhd_send_d2h_ringcreate(dhd, prot->d2hring_edl,
  3539. BCMPCIE_D2H_RING_TYPE_EDL, DHD_D2H_DBGRING_REQ_PKTID);
  3540. if (ret != BCME_OK)
  3541. return ret;
  3542. prot->d2hring_edl->seqnum = D2H_EPOCH_INIT_VAL;
  3543. prot->d2hring_edl->current_phase = BCMPCIE_CMNHDR_PHASE_BIT_INIT;
  3544. return BCME_OK;
  3545. } /* dhd_prot_init_btlog_rings */
  3546. static void
  3547. dhd_prot_detach_edl_rings(dhd_pub_t *dhd)
  3548. {
  3549. if (dhd->prot->d2hring_edl) {
  3550. dhd_prot_ring_detach(dhd, dhd->prot->d2hring_edl);
  3551. MFREE(dhd->prot->osh, dhd->prot->d2hring_edl, sizeof(msgbuf_ring_t));
  3552. dhd->prot->d2hring_edl = NULL;
  3553. }
  3554. }
  3555. #endif /* EWP_EDL */
  3556. /**
  3557. * Initialize protocol: sync w/dongle state.
  3558. * Sets dongle media info (iswl, drv_version, mac address).
  3559. */
  3560. int dhd_sync_with_dongle(dhd_pub_t *dhd)
  3561. {
  3562. int ret = 0;
  3563. wlc_rev_info_t revinfo;
  3564. char buf[128];
  3565. dhd_prot_t *prot = dhd->prot;
  3566. DHD_TRACE(("%s: Enter\n", __FUNCTION__));
  3567. dhd_os_set_ioctl_resp_timeout(IOCTL_RESP_TIMEOUT);
  3568. /* Post ts buffer after shim layer is attached */
  3569. ret = dhd_msgbuf_rxbuf_post_ts_bufs(dhd);
  3570. #ifndef OEM_ANDROID
  3571. /* Get the device MAC address */
  3572. memset(buf, 0, sizeof(buf));
  3573. strncpy(buf, "cur_etheraddr", sizeof(buf) - 1);
  3574. ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, buf, sizeof(buf), FALSE, 0);
  3575. if (ret < 0) {
  3576. DHD_ERROR(("%s: GET iovar cur_etheraddr FAILED\n", __FUNCTION__));
  3577. goto done;
  3578. }
  3579. memcpy(dhd->mac.octet, buf, ETHER_ADDR_LEN);
  3580. if (dhd_msg_level & DHD_INFO_VAL) {
  3581. bcm_print_bytes("CUR_ETHERADDR ", (uchar *)buf, ETHER_ADDR_LEN);
  3582. }
  3583. #endif /* OEM_ANDROID */
  3584. #ifdef DHD_FW_COREDUMP
  3585. /* Check the memdump capability */
  3586. dhd_get_memdump_info(dhd);
  3587. #endif /* DHD_FW_COREDUMP */
  3588. #ifdef BCMASSERT_LOG
  3589. dhd_get_assert_info(dhd);
  3590. #endif /* BCMASSERT_LOG */
  3591. /* Get the device rev info */
  3592. memset(&revinfo, 0, sizeof(revinfo));
  3593. ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_REVINFO, &revinfo, sizeof(revinfo), FALSE, 0);
  3594. if (ret < 0) {
  3595. DHD_ERROR(("%s: GET revinfo FAILED\n", __FUNCTION__));
  3596. goto done;
  3597. }
  3598. DHD_ERROR(("%s: GET_REVINFO device 0x%x, vendor 0x%x, chipnum 0x%x\n", __FUNCTION__,
  3599. revinfo.deviceid, revinfo.vendorid, revinfo.chipnum));
  3600. /* Get the RxBuf post size */
  3601. memset(buf, 0, sizeof(buf));
  3602. bcm_mkiovar("rxbufpost_sz", NULL, 0, buf, sizeof(buf));
  3603. ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, buf, sizeof(buf), FALSE, 0);
  3604. if (ret < 0) {
  3605. DHD_ERROR(("%s: GET RxBuf post FAILED, default to %d\n",
  3606. __FUNCTION__, DHD_FLOWRING_RX_BUFPOST_PKTSZ));
  3607. prot->rxbufpost_sz = DHD_FLOWRING_RX_BUFPOST_PKTSZ;
  3608. } else {
  3609. memcpy_s(&(prot->rxbufpost_sz), sizeof(prot->rxbufpost_sz), buf, sizeof(uint16));
  3610. if (prot->rxbufpost_sz > DHD_FLOWRING_RX_BUFPOST_PKTSZ_MAX) {
  3611. DHD_ERROR(("%s: Invalid RxBuf post size : %d, default to %d\n",
  3612. __FUNCTION__, prot->rxbufpost_sz, DHD_FLOWRING_RX_BUFPOST_PKTSZ));
  3613. prot->rxbufpost_sz = DHD_FLOWRING_RX_BUFPOST_PKTSZ;
  3614. } else {
  3615. DHD_ERROR(("%s: RxBuf Post : %d\n", __FUNCTION__, prot->rxbufpost_sz));
  3616. }
  3617. }
  3618. /* Post buffers for packet reception */
  3619. dhd_msgbuf_rxbuf_post(dhd, FALSE); /* alloc pkt ids */
  3620. DHD_SSSR_DUMP_INIT(dhd);
  3621. dhd_process_cid_mac(dhd, TRUE);
  3622. ret = dhd_preinit_ioctls(dhd);
  3623. dhd_process_cid_mac(dhd, FALSE);
  3624. #if defined(DHD_H2D_LOG_TIME_SYNC)
  3625. #ifdef DHD_HP2P
  3626. if (FW_SUPPORTED(dhd, h2dlogts) || dhd->hp2p_capable) {
  3627. if (dhd->hp2p_enable) {
  3628. dhd->dhd_rte_time_sync_ms = DHD_H2D_LOG_TIME_STAMP_MATCH / 40;
  3629. } else {
  3630. dhd->dhd_rte_time_sync_ms = DHD_H2D_LOG_TIME_STAMP_MATCH;
  3631. }
  3632. #else
  3633. if (FW_SUPPORTED(dhd, h2dlogts)) {
  3634. dhd->dhd_rte_time_sync_ms = DHD_H2D_LOG_TIME_STAMP_MATCH;
  3635. #endif // endif
  3636. dhd->bus->dhd_rte_time_sync_count = OSL_SYSUPTIME_US();
  3637. /* This is during initialization. */
  3638. dhd_h2d_log_time_sync(dhd);
  3639. } else {
  3640. dhd->dhd_rte_time_sync_ms = 0;
  3641. }
  3642. #endif /* DHD_H2D_LOG_TIME_SYNC || DHD_HP2P */
  3643. /* Always assumes wl for now */
  3644. dhd->iswl = TRUE;
  3645. done:
  3646. return ret;
  3647. } /* dhd_sync_with_dongle */
  3648. #define DHD_DBG_SHOW_METADATA 0
  3649. #if DHD_DBG_SHOW_METADATA
  3650. static void BCMFASTPATH
  3651. dhd_prot_print_metadata(dhd_pub_t *dhd, void *ptr, int len)
  3652. {
  3653. uint8 tlv_t;
  3654. uint8 tlv_l;
  3655. uint8 *tlv_v = (uint8 *)ptr;
  3656. if (len <= BCMPCIE_D2H_METADATA_HDRLEN)
  3657. return;
  3658. len -= BCMPCIE_D2H_METADATA_HDRLEN;
  3659. tlv_v += BCMPCIE_D2H_METADATA_HDRLEN;
  3660. while (len > TLV_HDR_LEN) {
  3661. tlv_t = tlv_v[TLV_TAG_OFF];
  3662. tlv_l = tlv_v[TLV_LEN_OFF];
  3663. len -= TLV_HDR_LEN;
  3664. tlv_v += TLV_HDR_LEN;
  3665. if (len < tlv_l)
  3666. break;
  3667. if ((tlv_t == 0) || (tlv_t == WLFC_CTL_TYPE_FILLER))
  3668. break;
  3669. switch (tlv_t) {
  3670. case WLFC_CTL_TYPE_TXSTATUS: {
  3671. uint32 txs;
  3672. memcpy(&txs, tlv_v, sizeof(uint32));
  3673. if (tlv_l < (sizeof(wl_txstatus_additional_info_t) + sizeof(uint32))) {
  3674. printf("METADATA TX_STATUS: %08x\n", txs);
  3675. } else {
  3676. wl_txstatus_additional_info_t tx_add_info;
  3677. memcpy(&tx_add_info, tlv_v + sizeof(uint32),
  3678. sizeof(wl_txstatus_additional_info_t));
  3679. printf("METADATA TX_STATUS: %08x WLFCTS[%04x | %08x - %08x - %08x]"
  3680. " rate = %08x tries = %d - %d\n", txs,
  3681. tx_add_info.seq, tx_add_info.entry_ts,
  3682. tx_add_info.enq_ts, tx_add_info.last_ts,
  3683. tx_add_info.rspec, tx_add_info.rts_cnt,
  3684. tx_add_info.tx_cnt);
  3685. }
  3686. } break;
  3687. case WLFC_CTL_TYPE_RSSI: {
  3688. if (tlv_l == 1)
  3689. printf("METADATA RX_RSSI: rssi = %d\n", *tlv_v);
  3690. else
  3691. printf("METADATA RX_RSSI[%04x]: rssi = %d snr = %d\n",
  3692. (*(tlv_v + 3) << 8) | *(tlv_v + 2),
  3693. (int8)(*tlv_v), *(tlv_v + 1));
  3694. } break;
  3695. case WLFC_CTL_TYPE_FIFO_CREDITBACK:
  3696. bcm_print_bytes("METADATA FIFO_CREDITBACK", tlv_v, tlv_l);
  3697. break;
  3698. case WLFC_CTL_TYPE_TX_ENTRY_STAMP:
  3699. bcm_print_bytes("METADATA TX_ENTRY", tlv_v, tlv_l);
  3700. break;
  3701. case WLFC_CTL_TYPE_RX_STAMP: {
  3702. struct {
  3703. uint32 rspec;
  3704. uint32 bus_time;
  3705. uint32 wlan_time;
  3706. } rx_tmstamp;
  3707. memcpy(&rx_tmstamp, tlv_v, sizeof(rx_tmstamp));
  3708. printf("METADATA RX TIMESTMAP: WLFCTS[%08x - %08x] rate = %08x\n",
  3709. rx_tmstamp.wlan_time, rx_tmstamp.bus_time, rx_tmstamp.rspec);
  3710. } break;
  3711. case WLFC_CTL_TYPE_TRANS_ID:
  3712. bcm_print_bytes("METADATA TRANS_ID", tlv_v, tlv_l);
  3713. break;
  3714. case WLFC_CTL_TYPE_COMP_TXSTATUS:
  3715. bcm_print_bytes("METADATA COMP_TXSTATUS", tlv_v, tlv_l);
  3716. break;
  3717. default:
  3718. bcm_print_bytes("METADATA UNKNOWN", tlv_v, tlv_l);
  3719. break;
  3720. }
  3721. len -= tlv_l;
  3722. tlv_v += tlv_l;
  3723. }
  3724. }
  3725. #endif /* DHD_DBG_SHOW_METADATA */
  3726. static INLINE void BCMFASTPATH
  3727. dhd_prot_packet_free(dhd_pub_t *dhd, void *pkt, uint8 pkttype, bool send)
  3728. {
  3729. if (pkt) {
  3730. if (pkttype == PKTTYPE_IOCTL_RX ||
  3731. pkttype == PKTTYPE_EVENT_RX ||
  3732. pkttype == PKTTYPE_INFO_RX ||
  3733. pkttype == PKTTYPE_TSBUF_RX) {
  3734. #ifdef DHD_USE_STATIC_CTRLBUF
  3735. PKTFREE_STATIC(dhd->osh, pkt, send);
  3736. #else
  3737. PKTFREE(dhd->osh, pkt, send);
  3738. #endif /* DHD_USE_STATIC_CTRLBUF */
  3739. } else {
  3740. PKTFREE(dhd->osh, pkt, send);
  3741. }
  3742. }
  3743. }
  3744. /**
  3745. * dhd_prot_packet_get should be called only for items having pktid_ctrl_map handle
  3746. * and all the bottom most functions like dhd_pktid_map_free hold separate DHD_PKTID_LOCK
  3747. * to ensure thread safety, so no need to hold any locks for this function
  3748. */
  3749. static INLINE void * BCMFASTPATH
  3750. dhd_prot_packet_get(dhd_pub_t *dhd, uint32 pktid, uint8 pkttype, bool free_pktid)
  3751. {
  3752. void *PKTBUF;
  3753. dmaaddr_t pa;
  3754. uint32 len;
  3755. void *dmah;
  3756. void *secdma;
  3757. #ifdef DHD_PCIE_PKTID
  3758. if (free_pktid) {
  3759. PKTBUF = DHD_PKTID_TO_NATIVE(dhd, dhd->prot->pktid_ctrl_map,
  3760. pktid, pa, len, dmah, secdma, pkttype);
  3761. } else {
  3762. PKTBUF = DHD_PKTID_TO_NATIVE_RSV(dhd, dhd->prot->pktid_ctrl_map,
  3763. pktid, pa, len, dmah, secdma, pkttype);
  3764. }
  3765. #else
  3766. PKTBUF = DHD_PKTID_TO_NATIVE(dhd, dhd->prot->pktid_ctrl_map, pktid, pa,
  3767. len, dmah, secdma, pkttype);
  3768. #endif /* DHD_PCIE_PKTID */
  3769. if (PKTBUF) {
  3770. {
  3771. if (SECURE_DMA_ENAB(dhd->osh))
  3772. SECURE_DMA_UNMAP(dhd->osh, pa, (uint) len, DMA_RX, 0, dmah,
  3773. secdma, 0);
  3774. else
  3775. DMA_UNMAP(dhd->osh, pa, (uint) len, DMA_RX, 0, dmah);
  3776. #ifdef DMAMAP_STATS
  3777. switch (pkttype) {
  3778. #ifndef IOCTLRESP_USE_CONSTMEM
  3779. case PKTTYPE_IOCTL_RX:
  3780. dhd->dma_stats.ioctl_rx--;
  3781. dhd->dma_stats.ioctl_rx_sz -= len;
  3782. break;
  3783. #endif /* IOCTLRESP_USE_CONSTMEM */
  3784. case PKTTYPE_EVENT_RX:
  3785. dhd->dma_stats.event_rx--;
  3786. dhd->dma_stats.event_rx_sz -= len;
  3787. break;
  3788. case PKTTYPE_INFO_RX:
  3789. dhd->dma_stats.info_rx--;
  3790. dhd->dma_stats.info_rx_sz -= len;
  3791. break;
  3792. case PKTTYPE_TSBUF_RX:
  3793. dhd->dma_stats.tsbuf_rx--;
  3794. dhd->dma_stats.tsbuf_rx_sz -= len;
  3795. break;
  3796. }
  3797. #endif /* DMAMAP_STATS */
  3798. }
  3799. }
  3800. return PKTBUF;
  3801. }
  3802. #ifdef IOCTLRESP_USE_CONSTMEM
  3803. static INLINE void BCMFASTPATH
  3804. dhd_prot_ioctl_ret_buffer_get(dhd_pub_t *dhd, uint32 pktid, dhd_dma_buf_t *retbuf)
  3805. {
  3806. memset(retbuf, 0, sizeof(dhd_dma_buf_t));
  3807. retbuf->va = DHD_PKTID_TO_NATIVE(dhd, dhd->prot->pktid_map_handle_ioctl, pktid,
  3808. retbuf->pa, retbuf->len, retbuf->dmah, retbuf->secdma, PKTTYPE_IOCTL_RX);
  3809. return;
  3810. }
  3811. #endif // endif
  3812. static void BCMFASTPATH
  3813. dhd_msgbuf_rxbuf_post(dhd_pub_t *dhd, bool use_rsv_pktid)
  3814. {
  3815. dhd_prot_t *prot = dhd->prot;
  3816. int16 fillbufs;
  3817. uint16 cnt = 256;
  3818. int retcount = 0;
  3819. fillbufs = prot->max_rxbufpost - prot->rxbufpost;
  3820. while (fillbufs >= RX_BUF_BURST) {
  3821. cnt--;
  3822. if (cnt == 0) {
  3823. /* find a better way to reschedule rx buf post if space not available */
  3824. DHD_ERROR(("h2d rx post ring not available to post host buffers \n"));
  3825. DHD_ERROR(("Current posted host buf count %d \n", prot->rxbufpost));
  3826. break;
  3827. }
  3828. /* Post in a burst of 32 buffers at a time */
  3829. fillbufs = MIN(fillbufs, RX_BUF_BURST);
  3830. /* Post buffers */
  3831. retcount = dhd_prot_rxbuf_post(dhd, fillbufs, use_rsv_pktid);
  3832. if (retcount >= 0) {
  3833. prot->rxbufpost += (uint16)retcount;
  3834. #ifdef DHD_LB_RXC
  3835. /* dhd_prot_rxbuf_post returns the number of buffers posted */
  3836. DHD_LB_STATS_UPDATE_RXC_HISTO(dhd, retcount);
  3837. #endif /* DHD_LB_RXC */
  3838. /* how many more to post */
  3839. fillbufs = prot->max_rxbufpost - prot->rxbufpost;
  3840. } else {
  3841. /* Make sure we don't run loop any further */
  3842. fillbufs = 0;
  3843. }
  3844. }
  3845. }
  3846. /** Post 'count' no of rx buffers to dongle */
  3847. static int BCMFASTPATH
  3848. dhd_prot_rxbuf_post(dhd_pub_t *dhd, uint16 count, bool use_rsv_pktid)
  3849. {
  3850. void *p, **pktbuf;
  3851. uint8 *rxbuf_post_tmp;
  3852. host_rxbuf_post_t *rxbuf_post;
  3853. void *msg_start;
  3854. dmaaddr_t pa, *pktbuf_pa;
  3855. uint32 *pktlen;
  3856. uint16 i = 0, alloced = 0;
  3857. unsigned long flags;
  3858. uint32 pktid;
  3859. dhd_prot_t *prot = dhd->prot;
  3860. msgbuf_ring_t *ring = &prot->h2dring_rxp_subn;
  3861. void *lcl_buf;
  3862. uint16 lcl_buf_size;
  3863. uint16 pktsz = prot->rxbufpost_sz;
  3864. /* allocate a local buffer to store pkt buffer va, pa and length */
  3865. lcl_buf_size = (sizeof(void *) + sizeof(dmaaddr_t) + sizeof(uint32)) *
  3866. RX_BUF_BURST;
  3867. lcl_buf = MALLOC(dhd->osh, lcl_buf_size);
  3868. if (!lcl_buf) {
  3869. DHD_ERROR(("%s: local scratch buffer allocation failed\n", __FUNCTION__));
  3870. return 0;
  3871. }
  3872. pktbuf = lcl_buf;
  3873. pktbuf_pa = (dmaaddr_t *)((uint8 *)pktbuf + sizeof(void *) * RX_BUF_BURST);
  3874. pktlen = (uint32 *)((uint8 *)pktbuf_pa + sizeof(dmaaddr_t) * RX_BUF_BURST);
  3875. for (i = 0; i < count; i++) {
  3876. if ((p = PKTGET(dhd->osh, pktsz, FALSE)) == NULL) {
  3877. DHD_ERROR(("%s:%d: PKTGET for rxbuf failed\n", __FUNCTION__, __LINE__));
  3878. dhd->rx_pktgetfail++;
  3879. break;
  3880. }
  3881. pktlen[i] = PKTLEN(dhd->osh, p);
  3882. if (SECURE_DMA_ENAB(dhd->osh)) {
  3883. pa = SECURE_DMA_MAP(dhd->osh, PKTDATA(dhd->osh, p), pktlen[i],
  3884. DMA_RX, p, 0, ring->dma_buf.secdma, 0);
  3885. }
  3886. #ifndef BCM_SECURE_DMA
  3887. else
  3888. pa = DMA_MAP(dhd->osh, PKTDATA(dhd->osh, p), pktlen[i], DMA_RX, p, 0);
  3889. #endif /* #ifndef BCM_SECURE_DMA */
  3890. if (PHYSADDRISZERO(pa)) {
  3891. PKTFREE(dhd->osh, p, FALSE);
  3892. DHD_ERROR(("Invalid phyaddr 0\n"));
  3893. ASSERT(0);
  3894. break;
  3895. }
  3896. #ifdef DMAMAP_STATS
  3897. dhd->dma_stats.rxdata++;
  3898. dhd->dma_stats.rxdata_sz += pktlen[i];
  3899. #endif /* DMAMAP_STATS */
  3900. PKTPULL(dhd->osh, p, prot->rx_metadata_offset);
  3901. pktlen[i] = PKTLEN(dhd->osh, p);
  3902. pktbuf[i] = p;
  3903. pktbuf_pa[i] = pa;
  3904. }
  3905. /* only post what we have */
  3906. count = i;
  3907. /* grab the ring lock to allocate pktid and post on ring */
  3908. DHD_RING_LOCK(ring->ring_lock, flags);
  3909. /* Claim space for exactly 'count' no of messages, for mitigation purpose */
  3910. msg_start = (void *)
  3911. dhd_prot_alloc_ring_space(dhd, ring, count, &alloced, TRUE);
  3912. if (msg_start == NULL) {
  3913. DHD_INFO(("%s:%d: Rxbufpost Msgbuf Not available\n", __FUNCTION__, __LINE__));
  3914. DHD_RING_UNLOCK(ring->ring_lock, flags);
  3915. goto cleanup;
  3916. }
  3917. /* if msg_start != NULL, we should have alloced space for atleast 1 item */
  3918. ASSERT(alloced > 0);
  3919. rxbuf_post_tmp = (uint8*)msg_start;
  3920. for (i = 0; i < alloced; i++) {
  3921. rxbuf_post = (host_rxbuf_post_t *)rxbuf_post_tmp;
  3922. p = pktbuf[i];
  3923. pa = pktbuf_pa[i];
  3924. #if defined(DHD_LB_RXC)
  3925. if (use_rsv_pktid == TRUE) {
  3926. bcm_workq_t *workq = &prot->rx_compl_cons;
  3927. int elem_ix = bcm_ring_cons(WORKQ_RING(workq), DHD_LB_WORKQ_SZ);
  3928. if (elem_ix == BCM_RING_EMPTY) {
  3929. DHD_INFO(("%s rx_compl_cons ring is empty\n", __FUNCTION__));
  3930. pktid = DHD_PKTID_INVALID;
  3931. goto alloc_pkt_id;
  3932. } else {
  3933. uint32 *elem = WORKQ_ELEMENT(uint32, workq, elem_ix);
  3934. pktid = *elem;
  3935. }
  3936. rxbuf_post->cmn_hdr.request_id = htol32(pktid);
  3937. /* Now populate the previous locker with valid information */
  3938. if (pktid != DHD_PKTID_INVALID) {
  3939. DHD_NATIVE_TO_PKTID_SAVE(dhd, dhd->prot->pktid_rx_map,
  3940. p, pktid, pa, pktlen[i], DMA_RX, NULL, NULL,
  3941. PKTTYPE_DATA_RX);
  3942. }
  3943. } else
  3944. #endif /* ! DHD_LB_RXC */
  3945. {
  3946. #if defined(DHD_LB_RXC)
  3947. alloc_pkt_id:
  3948. #endif /* DHD_LB_RXC */
  3949. pktid = DHD_NATIVE_TO_PKTID(dhd, dhd->prot->pktid_rx_map, p, pa,
  3950. pktlen[i], DMA_RX, NULL, ring->dma_buf.secdma, PKTTYPE_DATA_RX);
  3951. #if defined(DHD_PCIE_PKTID)
  3952. if (pktid == DHD_PKTID_INVALID) {
  3953. break;
  3954. }
  3955. #endif /* DHD_PCIE_PKTID */
  3956. }
  3957. /* Common msg header */
  3958. rxbuf_post->cmn_hdr.msg_type = MSG_TYPE_RXBUF_POST;
  3959. rxbuf_post->cmn_hdr.if_id = 0;
  3960. rxbuf_post->cmn_hdr.epoch = ring->seqnum % H2D_EPOCH_MODULO;
  3961. rxbuf_post->cmn_hdr.flags = ring->current_phase;
  3962. ring->seqnum++;
  3963. rxbuf_post->data_buf_len = htol16((uint16)pktlen[i]);
  3964. rxbuf_post->data_buf_addr.high_addr = htol32(PHYSADDRHI(pa));
  3965. rxbuf_post->data_buf_addr.low_addr =
  3966. htol32(PHYSADDRLO(pa) + prot->rx_metadata_offset);
  3967. if (prot->rx_metadata_offset) {
  3968. rxbuf_post->metadata_buf_len = prot->rx_metadata_offset;
  3969. rxbuf_post->metadata_buf_addr.high_addr = htol32(PHYSADDRHI(pa));
  3970. rxbuf_post->metadata_buf_addr.low_addr = htol32(PHYSADDRLO(pa));
  3971. } else {
  3972. rxbuf_post->metadata_buf_len = 0;
  3973. rxbuf_post->metadata_buf_addr.high_addr = 0;
  3974. rxbuf_post->metadata_buf_addr.low_addr = 0;
  3975. }
  3976. #ifdef DHD_PKTID_AUDIT_RING
  3977. DHD_PKTID_AUDIT(dhd, prot->pktid_rx_map, pktid, DHD_DUPLICATE_ALLOC);
  3978. #endif /* DHD_PKTID_AUDIT_RING */
  3979. rxbuf_post->cmn_hdr.request_id = htol32(pktid);
  3980. /* Move rxbuf_post_tmp to next item */
  3981. rxbuf_post_tmp = rxbuf_post_tmp + ring->item_len;
  3982. #ifdef DHD_LBUF_AUDIT
  3983. PKTAUDIT(dhd->osh, p);
  3984. #endif // endif
  3985. }
  3986. if (i < alloced) {
  3987. if (ring->wr < (alloced - i))
  3988. ring->wr = ring->max_items - (alloced - i);
  3989. else
  3990. ring->wr -= (alloced - i);
  3991. if (ring->wr == 0) {
  3992. DHD_INFO(("%s: flipping the phase now\n", ring->name));
  3993. ring->current_phase = ring->current_phase ?
  3994. 0 : BCMPCIE_CMNHDR_PHASE_BIT_INIT;
  3995. }
  3996. alloced = i;
  3997. }
  3998. /* update ring's WR index and ring doorbell to dongle */
  3999. if (alloced > 0) {
  4000. dhd_prot_ring_write_complete(dhd, ring, msg_start, alloced);
  4001. }
  4002. DHD_RING_UNLOCK(ring->ring_lock, flags);
  4003. cleanup:
  4004. for (i = alloced; i < count; i++) {
  4005. p = pktbuf[i];
  4006. pa = pktbuf_pa[i];
  4007. if (SECURE_DMA_ENAB(dhd->osh))
  4008. SECURE_DMA_UNMAP(dhd->osh, pa, pktlen[i], DMA_RX, 0,
  4009. DHD_DMAH_NULL, ring->dma_buf.secdma, 0);
  4010. else
  4011. DMA_UNMAP(dhd->osh, pa, pktlen[i], DMA_RX, 0, DHD_DMAH_NULL);
  4012. PKTFREE(dhd->osh, p, FALSE);
  4013. }
  4014. MFREE(dhd->osh, lcl_buf, lcl_buf_size);
  4015. return alloced;
  4016. } /* dhd_prot_rxbufpost */
  4017. static int
  4018. dhd_prot_infobufpost(dhd_pub_t *dhd, msgbuf_ring_t *ring)
  4019. {
  4020. unsigned long flags;
  4021. uint32 pktid;
  4022. dhd_prot_t *prot = dhd->prot;
  4023. uint16 alloced = 0;
  4024. uint16 pktsz = DHD_INFOBUF_RX_BUFPOST_PKTSZ;
  4025. uint32 pktlen;
  4026. info_buf_post_msg_t *infobuf_post;
  4027. uint8 *infobuf_post_tmp;
  4028. void *p;
  4029. void* msg_start;
  4030. uint8 i = 0;
  4031. dmaaddr_t pa;
  4032. int16 count = 0;
  4033. if (ring == NULL)
  4034. return 0;
  4035. if (ring->inited != TRUE)
  4036. return 0;
  4037. if (ring == dhd->prot->h2dring_info_subn) {
  4038. if (prot->max_infobufpost == 0)
  4039. return 0;
  4040. count = prot->max_infobufpost - prot->infobufpost;
  4041. }
  4042. else {
  4043. DHD_ERROR(("Unknown ring\n"));
  4044. return 0;
  4045. }
  4046. if (count <= 0) {
  4047. DHD_INFO(("%s: Cannot post more than max info resp buffers\n",
  4048. __FUNCTION__));
  4049. return 0;
  4050. }
  4051. /* grab the ring lock to allocate pktid and post on ring */
  4052. DHD_RING_LOCK(ring->ring_lock, flags);
  4053. /* Claim space for exactly 'count' no of messages, for mitigation purpose */
  4054. msg_start = (void *) dhd_prot_alloc_ring_space(dhd, ring, count, &alloced, FALSE);
  4055. if (msg_start == NULL) {
  4056. DHD_INFO(("%s:%d: infobufpost Msgbuf Not available\n", __FUNCTION__, __LINE__));
  4057. DHD_RING_UNLOCK(ring->ring_lock, flags);
  4058. return -1;
  4059. }
  4060. /* if msg_start != NULL, we should have alloced space for atleast 1 item */
  4061. ASSERT(alloced > 0);
  4062. infobuf_post_tmp = (uint8*) msg_start;
  4063. /* loop through each allocated message in the host ring */
  4064. for (i = 0; i < alloced; i++) {
  4065. infobuf_post = (info_buf_post_msg_t *) infobuf_post_tmp;
  4066. /* Create a rx buffer */
  4067. #ifdef DHD_USE_STATIC_CTRLBUF
  4068. p = PKTGET_STATIC(dhd->osh, pktsz, FALSE);
  4069. #else
  4070. p = PKTGET(dhd->osh, pktsz, FALSE);
  4071. #endif /* DHD_USE_STATIC_CTRLBUF */
  4072. if (p == NULL) {
  4073. DHD_ERROR(("%s:%d: PKTGET for infobuf failed\n", __FUNCTION__, __LINE__));
  4074. dhd->rx_pktgetfail++;
  4075. break;
  4076. }
  4077. pktlen = PKTLEN(dhd->osh, p);
  4078. if (SECURE_DMA_ENAB(dhd->osh)) {
  4079. pa = SECURE_DMA_MAP(dhd->osh, PKTDATA(dhd->osh, p), pktlen,
  4080. DMA_RX, p, 0, ring->dma_buf.secdma, 0);
  4081. }
  4082. #ifndef BCM_SECURE_DMA
  4083. else
  4084. pa = DMA_MAP(dhd->osh, PKTDATA(dhd->osh, p), pktlen, DMA_RX, p, 0);
  4085. #endif /* #ifndef BCM_SECURE_DMA */
  4086. if (PHYSADDRISZERO(pa)) {
  4087. if (SECURE_DMA_ENAB(dhd->osh)) {
  4088. SECURE_DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, DHD_DMAH_NULL,
  4089. ring->dma_buf.secdma, 0);
  4090. }
  4091. else
  4092. DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, DHD_DMAH_NULL);
  4093. #ifdef DHD_USE_STATIC_CTRLBUF
  4094. PKTFREE_STATIC(dhd->osh, p, FALSE);
  4095. #else
  4096. PKTFREE(dhd->osh, p, FALSE);
  4097. #endif /* DHD_USE_STATIC_CTRLBUF */
  4098. DHD_ERROR(("Invalid phyaddr 0\n"));
  4099. ASSERT(0);
  4100. break;
  4101. }
  4102. #ifdef DMAMAP_STATS
  4103. dhd->dma_stats.info_rx++;
  4104. dhd->dma_stats.info_rx_sz += pktlen;
  4105. #endif /* DMAMAP_STATS */
  4106. pktlen = PKTLEN(dhd->osh, p);
  4107. /* Common msg header */
  4108. infobuf_post->cmn_hdr.msg_type = MSG_TYPE_INFO_BUF_POST;
  4109. infobuf_post->cmn_hdr.if_id = 0;
  4110. infobuf_post->cmn_hdr.epoch = ring->seqnum % H2D_EPOCH_MODULO;
  4111. infobuf_post->cmn_hdr.flags = ring->current_phase;
  4112. ring->seqnum++;
  4113. pktid = DHD_NATIVE_TO_PKTID(dhd, dhd->prot->pktid_ctrl_map, p, pa,
  4114. pktlen, DMA_RX, NULL, ring->dma_buf.secdma, PKTTYPE_INFO_RX);
  4115. #if defined(DHD_PCIE_PKTID)
  4116. if (pktid == DHD_PKTID_INVALID) {
  4117. if (SECURE_DMA_ENAB(dhd->osh)) {
  4118. SECURE_DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, 0,
  4119. ring->dma_buf.secdma, 0);
  4120. } else
  4121. DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, 0);
  4122. #ifdef DHD_USE_STATIC_CTRLBUF
  4123. PKTFREE_STATIC(dhd->osh, p, FALSE);
  4124. #else
  4125. PKTFREE(dhd->osh, p, FALSE);
  4126. #endif /* DHD_USE_STATIC_CTRLBUF */
  4127. DHD_ERROR_RLMT(("%s: Pktid pool depleted.\n", __FUNCTION__));
  4128. break;
  4129. }
  4130. #endif /* DHD_PCIE_PKTID */
  4131. infobuf_post->host_buf_len = htol16((uint16)pktlen);
  4132. infobuf_post->host_buf_addr.high_addr = htol32(PHYSADDRHI(pa));
  4133. infobuf_post->host_buf_addr.low_addr = htol32(PHYSADDRLO(pa));
  4134. #ifdef DHD_PKTID_AUDIT_RING
  4135. DHD_PKTID_AUDIT(dhd, prot->pktid_ctrl_map, pktid, DHD_DUPLICATE_ALLOC);
  4136. #endif /* DHD_PKTID_AUDIT_RING */
  4137. DHD_INFO(("ID %d, low_addr 0x%08x, high_addr 0x%08x\n",
  4138. infobuf_post->cmn_hdr.request_id, infobuf_post->host_buf_addr.low_addr,
  4139. infobuf_post->host_buf_addr.high_addr));
  4140. infobuf_post->cmn_hdr.request_id = htol32(pktid);
  4141. /* Move rxbuf_post_tmp to next item */
  4142. infobuf_post_tmp = infobuf_post_tmp + ring->item_len;
  4143. #ifdef DHD_LBUF_AUDIT
  4144. PKTAUDIT(dhd->osh, p);
  4145. #endif // endif
  4146. }
  4147. if (i < alloced) {
  4148. if (ring->wr < (alloced - i))
  4149. ring->wr = ring->max_items - (alloced - i);
  4150. else
  4151. ring->wr -= (alloced - i);
  4152. alloced = i;
  4153. if (alloced && ring->wr == 0) {
  4154. DHD_INFO(("%s: flipping the phase now\n", ring->name));
  4155. ring->current_phase = ring->current_phase ?
  4156. 0 : BCMPCIE_CMNHDR_PHASE_BIT_INIT;
  4157. }
  4158. }
  4159. /* Update the write pointer in TCM & ring bell */
  4160. if (alloced > 0) {
  4161. if (ring == dhd->prot->h2dring_info_subn) {
  4162. prot->infobufpost += alloced;
  4163. }
  4164. dhd_prot_ring_write_complete(dhd, ring, msg_start, alloced);
  4165. }
  4166. DHD_RING_UNLOCK(ring->ring_lock, flags);
  4167. return alloced;
  4168. } /* dhd_prot_infobufpost */
  4169. #ifdef IOCTLRESP_USE_CONSTMEM
  4170. static int
  4171. alloc_ioctl_return_buffer(dhd_pub_t *dhd, dhd_dma_buf_t *retbuf)
  4172. {
  4173. int err;
  4174. memset(retbuf, 0, sizeof(dhd_dma_buf_t));
  4175. if ((err = dhd_dma_buf_alloc(dhd, retbuf, IOCT_RETBUF_SIZE)) != BCME_OK) {
  4176. DHD_ERROR(("%s: dhd_dma_buf_alloc err %d\n", __FUNCTION__, err));
  4177. ASSERT(0);
  4178. return BCME_NOMEM;
  4179. }
  4180. return BCME_OK;
  4181. }
  4182. static void
  4183. free_ioctl_return_buffer(dhd_pub_t *dhd, dhd_dma_buf_t *retbuf)
  4184. {
  4185. /* retbuf (declared on stack) not fully populated ... */
  4186. if (retbuf->va) {
  4187. uint32 dma_pad;
  4188. dma_pad = (IOCT_RETBUF_SIZE % DHD_DMA_PAD) ? DHD_DMA_PAD : 0;
  4189. retbuf->len = IOCT_RETBUF_SIZE;
  4190. retbuf->_alloced = retbuf->len + dma_pad;
  4191. }
  4192. dhd_dma_buf_free(dhd, retbuf);
  4193. return;
  4194. }
  4195. #endif /* IOCTLRESP_USE_CONSTMEM */
  4196. static int
  4197. dhd_prot_rxbufpost_ctrl(dhd_pub_t *dhd, uint8 msg_type)
  4198. {
  4199. void *p;
  4200. uint16 pktsz;
  4201. ioctl_resp_evt_buf_post_msg_t *rxbuf_post;
  4202. dmaaddr_t pa;
  4203. uint32 pktlen;
  4204. dhd_prot_t *prot = dhd->prot;
  4205. uint16 alloced = 0;
  4206. unsigned long flags;
  4207. dhd_dma_buf_t retbuf;
  4208. void *dmah = NULL;
  4209. uint32 pktid;
  4210. void *map_handle;
  4211. msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn;
  4212. bool non_ioctl_resp_buf = 0;
  4213. dhd_pkttype_t buf_type;
  4214. if (dhd->busstate == DHD_BUS_DOWN) {
  4215. DHD_ERROR(("%s: bus is already down.\n", __FUNCTION__));
  4216. return -1;
  4217. }
  4218. memset(&retbuf, 0, sizeof(dhd_dma_buf_t));
  4219. if (msg_type == MSG_TYPE_IOCTLRESP_BUF_POST)
  4220. buf_type = PKTTYPE_IOCTL_RX;
  4221. else if (msg_type == MSG_TYPE_EVENT_BUF_POST)
  4222. buf_type = PKTTYPE_EVENT_RX;
  4223. else if (msg_type == MSG_TYPE_TIMSTAMP_BUFPOST)
  4224. buf_type = PKTTYPE_TSBUF_RX;
  4225. else {
  4226. DHD_ERROR(("invalid message type to be posted to Ctrl ring %d\n", msg_type));
  4227. return -1;
  4228. }
  4229. if ((msg_type == MSG_TYPE_EVENT_BUF_POST) || (msg_type == MSG_TYPE_TIMSTAMP_BUFPOST))
  4230. non_ioctl_resp_buf = TRUE;
  4231. else
  4232. non_ioctl_resp_buf = FALSE;
  4233. if (non_ioctl_resp_buf) {
  4234. /* Allocate packet for not ioctl resp buffer post */
  4235. pktsz = DHD_FLOWRING_RX_BUFPOST_PKTSZ;
  4236. } else {
  4237. /* Allocate packet for ctrl/ioctl buffer post */
  4238. pktsz = DHD_FLOWRING_IOCTL_BUFPOST_PKTSZ;
  4239. }
  4240. #ifdef IOCTLRESP_USE_CONSTMEM
  4241. if (!non_ioctl_resp_buf) {
  4242. if (alloc_ioctl_return_buffer(dhd, &retbuf) != BCME_OK) {
  4243. DHD_ERROR(("Could not allocate IOCTL response buffer\n"));
  4244. return -1;
  4245. }
  4246. ASSERT(retbuf.len == IOCT_RETBUF_SIZE);
  4247. p = retbuf.va;
  4248. pktlen = retbuf.len;
  4249. pa = retbuf.pa;
  4250. dmah = retbuf.dmah;
  4251. } else
  4252. #endif /* IOCTLRESP_USE_CONSTMEM */
  4253. {
  4254. #ifdef DHD_USE_STATIC_CTRLBUF
  4255. p = PKTGET_STATIC(dhd->osh, pktsz, FALSE);
  4256. #else
  4257. p = PKTGET(dhd->osh, pktsz, FALSE);
  4258. #endif /* DHD_USE_STATIC_CTRLBUF */
  4259. if (p == NULL) {
  4260. DHD_ERROR(("%s:%d: PKTGET for %s buf failed\n",
  4261. __FUNCTION__, __LINE__, non_ioctl_resp_buf ?
  4262. "EVENT" : "IOCTL RESP"));
  4263. dhd->rx_pktgetfail++;
  4264. return -1;
  4265. }
  4266. pktlen = PKTLEN(dhd->osh, p);
  4267. if (SECURE_DMA_ENAB(dhd->osh)) {
  4268. pa = SECURE_DMA_MAP(dhd->osh, PKTDATA(dhd->osh, p), pktlen,
  4269. DMA_RX, p, 0, ring->dma_buf.secdma, 0);
  4270. }
  4271. #ifndef BCM_SECURE_DMA
  4272. else
  4273. pa = DMA_MAP(dhd->osh, PKTDATA(dhd->osh, p), pktlen, DMA_RX, p, 0);
  4274. #endif /* #ifndef BCM_SECURE_DMA */
  4275. if (PHYSADDRISZERO(pa)) {
  4276. DHD_ERROR(("Invalid physaddr 0\n"));
  4277. ASSERT(0);
  4278. goto free_pkt_return;
  4279. }
  4280. #ifdef DMAMAP_STATS
  4281. switch (buf_type) {
  4282. #ifndef IOCTLRESP_USE_CONSTMEM
  4283. case PKTTYPE_IOCTL_RX:
  4284. dhd->dma_stats.ioctl_rx++;
  4285. dhd->dma_stats.ioctl_rx_sz += pktlen;
  4286. break;
  4287. #endif /* !IOCTLRESP_USE_CONSTMEM */
  4288. case PKTTYPE_EVENT_RX:
  4289. dhd->dma_stats.event_rx++;
  4290. dhd->dma_stats.event_rx_sz += pktlen;
  4291. break;
  4292. case PKTTYPE_TSBUF_RX:
  4293. dhd->dma_stats.tsbuf_rx++;
  4294. dhd->dma_stats.tsbuf_rx_sz += pktlen;
  4295. break;
  4296. default:
  4297. break;
  4298. }
  4299. #endif /* DMAMAP_STATS */
  4300. }
  4301. /* grab the ring lock to allocate pktid and post on ring */
  4302. DHD_RING_LOCK(ring->ring_lock, flags);
  4303. rxbuf_post = (ioctl_resp_evt_buf_post_msg_t *)
  4304. dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE);
  4305. if (rxbuf_post == NULL) {
  4306. DHD_RING_UNLOCK(ring->ring_lock, flags);
  4307. DHD_ERROR(("%s:%d: Ctrl submit Msgbuf Not available to post buffer \n",
  4308. __FUNCTION__, __LINE__));
  4309. #ifdef IOCTLRESP_USE_CONSTMEM
  4310. if (non_ioctl_resp_buf)
  4311. #endif /* IOCTLRESP_USE_CONSTMEM */
  4312. {
  4313. if (SECURE_DMA_ENAB(dhd->osh)) {
  4314. SECURE_DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, DHD_DMAH_NULL,
  4315. ring->dma_buf.secdma, 0);
  4316. } else {
  4317. DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, DHD_DMAH_NULL);
  4318. }
  4319. }
  4320. goto free_pkt_return;
  4321. }
  4322. /* CMN msg header */
  4323. rxbuf_post->cmn_hdr.msg_type = msg_type;
  4324. #ifdef IOCTLRESP_USE_CONSTMEM
  4325. if (!non_ioctl_resp_buf) {
  4326. map_handle = dhd->prot->pktid_map_handle_ioctl;
  4327. pktid = DHD_NATIVE_TO_PKTID(dhd, map_handle, p, pa, pktlen, DMA_RX, dmah,
  4328. ring->dma_buf.secdma, buf_type);
  4329. } else
  4330. #endif /* IOCTLRESP_USE_CONSTMEM */
  4331. {
  4332. map_handle = dhd->prot->pktid_ctrl_map;
  4333. pktid = DHD_NATIVE_TO_PKTID(dhd, map_handle,
  4334. p, pa, pktlen, DMA_RX, dmah, ring->dma_buf.secdma,
  4335. buf_type);
  4336. }
  4337. if (pktid == DHD_PKTID_INVALID) {
  4338. if (ring->wr == 0) {
  4339. ring->wr = ring->max_items - 1;
  4340. } else {
  4341. ring->wr--;
  4342. if (ring->wr == 0) {
  4343. ring->current_phase = ring->current_phase ? 0 :
  4344. BCMPCIE_CMNHDR_PHASE_BIT_INIT;
  4345. }
  4346. }
  4347. DHD_RING_UNLOCK(ring->ring_lock, flags);
  4348. DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, DHD_DMAH_NULL);
  4349. DHD_ERROR_RLMT(("%s: Pktid pool depleted.\n", __FUNCTION__));
  4350. goto free_pkt_return;
  4351. }
  4352. #ifdef DHD_PKTID_AUDIT_RING
  4353. DHD_PKTID_AUDIT(dhd, map_handle, pktid, DHD_DUPLICATE_ALLOC);
  4354. #endif /* DHD_PKTID_AUDIT_RING */
  4355. rxbuf_post->cmn_hdr.request_id = htol32(pktid);
  4356. rxbuf_post->cmn_hdr.if_id = 0;
  4357. rxbuf_post->cmn_hdr.epoch = ring->seqnum % H2D_EPOCH_MODULO;
  4358. ring->seqnum++;
  4359. rxbuf_post->cmn_hdr.flags = ring->current_phase;
  4360. #if defined(DHD_PCIE_PKTID)
  4361. if (rxbuf_post->cmn_hdr.request_id == DHD_PKTID_INVALID) {
  4362. if (ring->wr == 0) {
  4363. ring->wr = ring->max_items - 1;
  4364. } else {
  4365. if (ring->wr == 0) {
  4366. ring->current_phase = ring->current_phase ? 0 :
  4367. BCMPCIE_CMNHDR_PHASE_BIT_INIT;
  4368. }
  4369. }
  4370. DHD_RING_UNLOCK(ring->ring_lock, flags);
  4371. #ifdef IOCTLRESP_USE_CONSTMEM
  4372. if (non_ioctl_resp_buf)
  4373. #endif /* IOCTLRESP_USE_CONSTMEM */
  4374. {
  4375. if (SECURE_DMA_ENAB(dhd->osh)) {
  4376. SECURE_DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, DHD_DMAH_NULL,
  4377. ring->dma_buf.secdma, 0);
  4378. } else
  4379. DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, DHD_DMAH_NULL);
  4380. }
  4381. goto free_pkt_return;
  4382. }
  4383. #endif /* DHD_PCIE_PKTID */
  4384. #ifndef IOCTLRESP_USE_CONSTMEM
  4385. rxbuf_post->host_buf_len = htol16((uint16)PKTLEN(dhd->osh, p));
  4386. #else
  4387. rxbuf_post->host_buf_len = htol16((uint16)pktlen);
  4388. #endif /* IOCTLRESP_USE_CONSTMEM */
  4389. rxbuf_post->host_buf_addr.high_addr = htol32(PHYSADDRHI(pa));
  4390. rxbuf_post->host_buf_addr.low_addr = htol32(PHYSADDRLO(pa));
  4391. #ifdef DHD_LBUF_AUDIT
  4392. if (non_ioctl_resp_buf)
  4393. PKTAUDIT(dhd->osh, p);
  4394. #endif // endif
  4395. /* update ring's WR index and ring doorbell to dongle */
  4396. dhd_prot_ring_write_complete(dhd, ring, rxbuf_post, 1);
  4397. DHD_RING_UNLOCK(ring->ring_lock, flags);
  4398. return 1;
  4399. free_pkt_return:
  4400. if (!non_ioctl_resp_buf) {
  4401. #ifdef IOCTLRESP_USE_CONSTMEM
  4402. free_ioctl_return_buffer(dhd, &retbuf);
  4403. #else
  4404. dhd_prot_packet_free(dhd, p, buf_type, FALSE);
  4405. #endif /* IOCTLRESP_USE_CONSTMEM */
  4406. } else {
  4407. dhd_prot_packet_free(dhd, p, buf_type, FALSE);
  4408. }
  4409. return -1;
  4410. } /* dhd_prot_rxbufpost_ctrl */
  4411. static uint16
  4412. dhd_msgbuf_rxbuf_post_ctrlpath(dhd_pub_t *dhd, uint8 msg_type, uint32 max_to_post)
  4413. {
  4414. uint32 i = 0;
  4415. int32 ret_val;
  4416. DHD_INFO(("max to post %d, event %d \n", max_to_post, msg_type));
  4417. if (dhd->busstate == DHD_BUS_DOWN) {
  4418. DHD_ERROR(("%s: bus is already down.\n", __FUNCTION__));
  4419. return 0;
  4420. }
  4421. while (i < max_to_post) {
  4422. ret_val = dhd_prot_rxbufpost_ctrl(dhd, msg_type);
  4423. if (ret_val < 0)
  4424. break;
  4425. i++;
  4426. }
  4427. DHD_INFO(("posted %d buffers of type %d\n", i, msg_type));
  4428. return (uint16)i;
  4429. }
  4430. static void
  4431. dhd_msgbuf_rxbuf_post_ioctlresp_bufs(dhd_pub_t *dhd)
  4432. {
  4433. dhd_prot_t *prot = dhd->prot;
  4434. int max_to_post;
  4435. DHD_INFO(("ioctl resp buf post\n"));
  4436. max_to_post = prot->max_ioctlrespbufpost - prot->cur_ioctlresp_bufs_posted;
  4437. if (max_to_post <= 0) {
  4438. DHD_INFO(("%s: Cannot post more than max IOCTL resp buffers\n",
  4439. __FUNCTION__));
  4440. return;
  4441. }
  4442. prot->cur_ioctlresp_bufs_posted += dhd_msgbuf_rxbuf_post_ctrlpath(dhd,
  4443. MSG_TYPE_IOCTLRESP_BUF_POST, max_to_post);
  4444. }
  4445. static void
  4446. dhd_msgbuf_rxbuf_post_event_bufs(dhd_pub_t *dhd)
  4447. {
  4448. dhd_prot_t *prot = dhd->prot;
  4449. int max_to_post;
  4450. max_to_post = prot->max_eventbufpost - prot->cur_event_bufs_posted;
  4451. if (max_to_post <= 0) {
  4452. DHD_ERROR(("%s: Cannot post more than max event buffers\n",
  4453. __FUNCTION__));
  4454. return;
  4455. }
  4456. prot->cur_event_bufs_posted += dhd_msgbuf_rxbuf_post_ctrlpath(dhd,
  4457. MSG_TYPE_EVENT_BUF_POST, max_to_post);
  4458. }
  4459. static int
  4460. dhd_msgbuf_rxbuf_post_ts_bufs(dhd_pub_t *dhd)
  4461. {
  4462. return 0;
  4463. }
  4464. bool BCMFASTPATH
  4465. dhd_prot_process_msgbuf_infocpl(dhd_pub_t *dhd, uint bound)
  4466. {
  4467. dhd_prot_t *prot = dhd->prot;
  4468. bool more = TRUE;
  4469. uint n = 0;
  4470. msgbuf_ring_t *ring = prot->d2hring_info_cpln;
  4471. unsigned long flags;
  4472. if (ring == NULL)
  4473. return FALSE;
  4474. if (ring->inited != TRUE)
  4475. return FALSE;
  4476. /* Process all the messages - DTOH direction */
  4477. while (!dhd_is_device_removed(dhd)) {
  4478. uint8 *msg_addr;
  4479. uint32 msg_len;
  4480. if (dhd_query_bus_erros(dhd)) {
  4481. more = FALSE;
  4482. break;
  4483. }
  4484. if (dhd->hang_was_sent) {
  4485. more = FALSE;
  4486. break;
  4487. }
  4488. if (dhd->smmu_fault_occurred) {
  4489. more = FALSE;
  4490. break;
  4491. }
  4492. DHD_RING_LOCK(ring->ring_lock, flags);
  4493. /* Get the message from ring */
  4494. msg_addr = dhd_prot_get_read_addr(dhd, ring, &msg_len);
  4495. DHD_RING_UNLOCK(ring->ring_lock, flags);
  4496. if (msg_addr == NULL) {
  4497. more = FALSE;
  4498. break;
  4499. }
  4500. /* Prefetch data to populate the cache */
  4501. OSL_PREFETCH(msg_addr);
  4502. if (dhd_prot_process_msgtype(dhd, ring, msg_addr, msg_len) != BCME_OK) {
  4503. DHD_ERROR(("%s: Error at process rxpl msgbuf of len %d\n",
  4504. __FUNCTION__, msg_len));
  4505. }
  4506. /* Update read pointer */
  4507. dhd_prot_upd_read_idx(dhd, ring);
  4508. /* After batch processing, check RX bound */
  4509. n += msg_len / ring->item_len;
  4510. if (n >= bound) {
  4511. break;
  4512. }
  4513. }
  4514. return more;
  4515. }
  4516. #ifdef EWP_EDL
  4517. bool
  4518. dhd_prot_process_msgbuf_edl(dhd_pub_t *dhd)
  4519. {
  4520. dhd_prot_t *prot = dhd->prot;
  4521. msgbuf_ring_t *ring = prot->d2hring_edl;
  4522. unsigned long flags = 0;
  4523. uint32 items = 0;
  4524. uint16 rd = 0;
  4525. uint16 depth = 0;
  4526. if (ring == NULL)
  4527. return FALSE;
  4528. if (ring->inited != TRUE)
  4529. return FALSE;
  4530. if (ring->item_len == 0) {
  4531. DHD_ERROR(("%s: Bad ring ! ringidx %d, item_len %d \n",
  4532. __FUNCTION__, ring->idx, ring->item_len));
  4533. return FALSE;
  4534. }
  4535. if (dhd_query_bus_erros(dhd)) {
  4536. return FALSE;
  4537. }
  4538. if (dhd->hang_was_sent) {
  4539. return FALSE;
  4540. }
  4541. /* in this DPC context just check if wr index has moved
  4542. * and schedule deferred context to actually process the
  4543. * work items.
  4544. */
  4545. /* update the write index */
  4546. DHD_RING_LOCK(ring->ring_lock, flags);
  4547. if (dhd->dma_d2h_ring_upd_support) {
  4548. /* DMAing write/read indices supported */
  4549. ring->wr = dhd_prot_dma_indx_get(dhd, D2H_DMA_INDX_WR_UPD, ring->idx);
  4550. } else {
  4551. dhd_bus_cmn_readshared(dhd->bus, &ring->wr, RING_WR_UPD, ring->idx);
  4552. }
  4553. rd = ring->rd;
  4554. DHD_RING_UNLOCK(ring->ring_lock, flags);
  4555. depth = ring->max_items;
  4556. /* check for avail space, in number of ring items */
  4557. items = READ_AVAIL_SPACE(ring->wr, rd, depth);
  4558. if (items == 0) {
  4559. /* no work items in edl ring */
  4560. return FALSE;
  4561. }
  4562. if (items > ring->max_items) {
  4563. DHD_ERROR(("\r\n======================= \r\n"));
  4564. DHD_ERROR(("%s(): ring %p, ring->name %s, ring->max_items %d, items %d \r\n",
  4565. __FUNCTION__, ring, ring->name, ring->max_items, items));
  4566. DHD_ERROR(("wr: %d, rd: %d, depth: %d \r\n",
  4567. ring->wr, ring->rd, depth));
  4568. DHD_ERROR(("dhd->busstate %d bus->wait_for_d3_ack %d \r\n",
  4569. dhd->busstate, dhd->bus->wait_for_d3_ack));
  4570. DHD_ERROR(("\r\n======================= \r\n"));
  4571. #ifdef SUPPORT_LINKDOWN_RECOVERY
  4572. if (ring->wr >= ring->max_items) {
  4573. dhd->bus->read_shm_fail = TRUE;
  4574. }
  4575. #else
  4576. #ifdef DHD_FW_COREDUMP
  4577. if (dhd->memdump_enabled) {
  4578. /* collect core dump */
  4579. dhd->memdump_type = DUMP_TYPE_RESUMED_ON_INVALID_RING_RDWR;
  4580. dhd_bus_mem_dump(dhd);
  4581. }
  4582. #endif /* DHD_FW_COREDUMP */
  4583. #endif /* SUPPORT_LINKDOWN_RECOVERY */
  4584. dhd_schedule_reset(dhd);
  4585. return FALSE;
  4586. }
  4587. if (items > D2HRING_EDL_WATERMARK) {
  4588. DHD_ERROR_RLMT(("%s: WARNING! EDL watermark hit, num items=%u;"
  4589. " rd=%u; wr=%u; depth=%u;\n", __FUNCTION__, items,
  4590. ring->rd, ring->wr, depth));
  4591. }
  4592. dhd_schedule_logtrace(dhd->info);
  4593. return FALSE;
  4594. }
  4595. /* This is called either from work queue context of 'event_log_dispatcher_work' or
  4596. * from the kthread context of dhd_logtrace_thread
  4597. */
  4598. int
  4599. dhd_prot_process_edl_complete(dhd_pub_t *dhd, void *evt_decode_data)
  4600. {
  4601. dhd_prot_t *prot = NULL;
  4602. msgbuf_ring_t *ring = NULL;
  4603. int err = 0;
  4604. unsigned long flags = 0;
  4605. cmn_msg_hdr_t *msg = NULL;
  4606. uint8 *msg_addr = NULL;
  4607. uint32 max_items_to_process = 0, n = 0;
  4608. uint32 num_items = 0, new_items = 0;
  4609. uint16 depth = 0;
  4610. volatile uint16 wr = 0;
  4611. if (!dhd || !dhd->prot)
  4612. return 0;
  4613. prot = dhd->prot;
  4614. ring = prot->d2hring_edl;
  4615. if (!ring || !evt_decode_data) {
  4616. return 0;
  4617. }
  4618. if (dhd->hang_was_sent) {
  4619. return FALSE;
  4620. }
  4621. DHD_RING_LOCK(ring->ring_lock, flags);
  4622. ring->curr_rd = ring->rd;
  4623. wr = ring->wr;
  4624. depth = ring->max_items;
  4625. /* check for avail space, in number of ring items
  4626. * Note, that this will only give the # of items
  4627. * from rd to wr if wr>=rd, or from rd to ring end
  4628. * if wr < rd. So in the latter case strictly speaking
  4629. * not all the items are read. But this is OK, because
  4630. * these will be processed in the next doorbell as rd
  4631. * would have wrapped around. Processing in the next
  4632. * doorbell is acceptable since EDL only contains debug data
  4633. */
  4634. num_items = READ_AVAIL_SPACE(wr, ring->rd, depth);
  4635. if (num_items == 0) {
  4636. /* no work items in edl ring */
  4637. DHD_RING_UNLOCK(ring->ring_lock, flags);
  4638. return 0;
  4639. }
  4640. DHD_INFO(("%s: EDL work items [%u] available \n",
  4641. __FUNCTION__, num_items));
  4642. /* if space is available, calculate address to be read */
  4643. msg_addr = (char*)ring->dma_buf.va + (ring->rd * ring->item_len);
  4644. max_items_to_process = MIN(num_items, DHD_EVENT_LOGTRACE_BOUND);
  4645. DHD_RING_UNLOCK(ring->ring_lock, flags);
  4646. /* Prefetch data to populate the cache */
  4647. OSL_PREFETCH(msg_addr);
  4648. n = max_items_to_process;
  4649. while (n > 0) {
  4650. msg = (cmn_msg_hdr_t *)msg_addr;
  4651. /* wait for DMA of work item to complete */
  4652. if ((err = prot->d2h_edl_sync_cb(dhd, ring, msg)) != BCME_OK) {
  4653. DHD_ERROR(("%s: Error waiting for DMA to cmpl in EDL "
  4654. "ring; err = %d\n", __FUNCTION__, err));
  4655. }
  4656. /*
  4657. * Update the curr_rd to the current index in the ring, from where
  4658. * the work item is fetched. This way if the fetched work item
  4659. * fails in LIVELOCK, we can print the exact read index in the ring
  4660. * that shows up the corrupted work item.
  4661. */
  4662. if ((ring->curr_rd + 1) >= ring->max_items) {
  4663. ring->curr_rd = 0;
  4664. } else {
  4665. ring->curr_rd += 1;
  4666. }
  4667. if (err != BCME_OK) {
  4668. return 0;
  4669. }
  4670. /* process the edl work item, i.e, the event log */
  4671. err = dhd_event_logtrace_process_edl(dhd, msg_addr, evt_decode_data);
  4672. /* Dummy sleep so that scheduler kicks in after processing any logprints */
  4673. OSL_SLEEP(0);
  4674. /* Prefetch data to populate the cache */
  4675. OSL_PREFETCH(msg_addr + ring->item_len);
  4676. msg_addr += ring->item_len;
  4677. --n;
  4678. }
  4679. DHD_RING_LOCK(ring->ring_lock, flags);
  4680. /* update host ring read pointer */
  4681. if ((ring->rd + max_items_to_process) >= ring->max_items)
  4682. ring->rd = 0;
  4683. else
  4684. ring->rd += max_items_to_process;
  4685. DHD_RING_UNLOCK(ring->ring_lock, flags);
  4686. /* Now after processing max_items_to_process update dongle rd index.
  4687. * The TCM rd index is updated only if bus is not
  4688. * in D3. Else, the rd index is updated from resume
  4689. * context in - 'dhdpcie_bus_suspend'
  4690. */
  4691. DHD_GENERAL_LOCK(dhd, flags);
  4692. if (DHD_BUS_CHECK_SUSPEND_OR_ANY_SUSPEND_IN_PROGRESS(dhd)) {
  4693. DHD_INFO(("%s: bus is in suspend(%d) or suspending(0x%x) state!!\n",
  4694. __FUNCTION__, dhd->busstate, dhd->dhd_bus_busy_state));
  4695. DHD_GENERAL_UNLOCK(dhd, flags);
  4696. } else {
  4697. DHD_GENERAL_UNLOCK(dhd, flags);
  4698. DHD_EDL_RING_TCM_RD_UPDATE(dhd);
  4699. }
  4700. /* if num_items > bound, then anyway we will reschedule and
  4701. * this function runs again, so that if in between the DPC has
  4702. * updated the wr index, then the updated wr is read. But if
  4703. * num_items <= bound, and if DPC executes and updates the wr index
  4704. * when the above while loop is running, then the updated 'wr' index
  4705. * needs to be re-read from here, If we don't do so, then till
  4706. * the next time this function is scheduled
  4707. * the event logs will not be processed.
  4708. */
  4709. if (num_items <= DHD_EVENT_LOGTRACE_BOUND) {
  4710. /* read the updated wr index if reqd. and update num_items */
  4711. DHD_RING_LOCK(ring->ring_lock, flags);
  4712. if (wr != (volatile uint16)ring->wr) {
  4713. wr = (volatile uint16)ring->wr;
  4714. new_items = READ_AVAIL_SPACE(wr, ring->rd, depth);
  4715. DHD_INFO(("%s: new items [%u] avail in edl\n",
  4716. __FUNCTION__, new_items));
  4717. num_items += new_items;
  4718. }
  4719. DHD_RING_UNLOCK(ring->ring_lock, flags);
  4720. }
  4721. /* if # of items processed is less than num_items, need to re-schedule
  4722. * the deferred ctx
  4723. */
  4724. if (max_items_to_process < num_items) {
  4725. DHD_INFO(("%s: EDL bound hit / new items found, "
  4726. "items processed=%u; remaining=%u, "
  4727. "resched deferred ctx...\n",
  4728. __FUNCTION__, max_items_to_process,
  4729. num_items - max_items_to_process));
  4730. return (num_items - max_items_to_process);
  4731. }
  4732. return 0;
  4733. }
  4734. void
  4735. dhd_prot_edl_ring_tcm_rd_update(dhd_pub_t *dhd)
  4736. {
  4737. dhd_prot_t *prot = NULL;
  4738. unsigned long flags = 0;
  4739. msgbuf_ring_t *ring = NULL;
  4740. if (!dhd)
  4741. return;
  4742. prot = dhd->prot;
  4743. if (!prot || !prot->d2hring_edl)
  4744. return;
  4745. ring = prot->d2hring_edl;
  4746. DHD_RING_LOCK(ring->ring_lock, flags);
  4747. dhd_prot_upd_read_idx(dhd, ring);
  4748. DHD_RING_UNLOCK(ring->ring_lock, flags);
  4749. }
  4750. #endif /* EWP_EDL */
  4751. /* called when DHD needs to check for 'receive complete' messages from the dongle */
  4752. bool BCMFASTPATH
  4753. dhd_prot_process_msgbuf_rxcpl(dhd_pub_t *dhd, uint bound, int ringtype)
  4754. {
  4755. bool more = FALSE;
  4756. uint n = 0;
  4757. dhd_prot_t *prot = dhd->prot;
  4758. msgbuf_ring_t *ring;
  4759. uint16 item_len;
  4760. host_rxbuf_cmpl_t *msg = NULL;
  4761. uint8 *msg_addr;
  4762. uint32 msg_len;
  4763. uint16 pkt_cnt, pkt_cnt_newidx;
  4764. unsigned long flags;
  4765. dmaaddr_t pa;
  4766. uint32 len;
  4767. void *dmah;
  4768. void *secdma;
  4769. int ifidx = 0, if_newidx = 0;
  4770. void *pkt, *pktqhead = NULL, *prevpkt = NULL, *pkt_newidx, *nextpkt;
  4771. uint32 pktid;
  4772. int i;
  4773. uint8 sync;
  4774. ts_timestamp_t *ts;
  4775. BCM_REFERENCE(ts);
  4776. #ifdef DHD_HP2P
  4777. if (ringtype == DHD_HP2P_RING && prot->d2hring_hp2p_rxcpl)
  4778. ring = prot->d2hring_hp2p_rxcpl;
  4779. else
  4780. #endif /* DHD_HP2P */
  4781. ring = &prot->d2hring_rx_cpln;
  4782. item_len = ring->item_len;
  4783. while (1) {
  4784. if (dhd_is_device_removed(dhd))
  4785. break;
  4786. if (dhd_query_bus_erros(dhd))
  4787. break;
  4788. if (dhd->hang_was_sent)
  4789. break;
  4790. if (dhd->smmu_fault_occurred) {
  4791. break;
  4792. }
  4793. pkt_cnt = 0;
  4794. pktqhead = pkt_newidx = NULL;
  4795. pkt_cnt_newidx = 0;
  4796. DHD_RING_LOCK(ring->ring_lock, flags);
  4797. /* Get the address of the next message to be read from ring */
  4798. msg_addr = dhd_prot_get_read_addr(dhd, ring, &msg_len);
  4799. if (msg_addr == NULL) {
  4800. DHD_RING_UNLOCK(ring->ring_lock, flags);
  4801. break;
  4802. }
  4803. while (msg_len > 0) {
  4804. msg = (host_rxbuf_cmpl_t *)msg_addr;
  4805. /* Wait until DMA completes, then fetch msg_type */
  4806. sync = prot->d2h_sync_cb(dhd, ring, &msg->cmn_hdr, item_len);
  4807. /*
  4808. * Update the curr_rd to the current index in the ring, from where
  4809. * the work item is fetched. This way if the fetched work item
  4810. * fails in LIVELOCK, we can print the exact read index in the ring
  4811. * that shows up the corrupted work item.
  4812. */
  4813. if ((ring->curr_rd + 1) >= ring->max_items) {
  4814. ring->curr_rd = 0;
  4815. } else {
  4816. ring->curr_rd += 1;
  4817. }
  4818. if (!sync) {
  4819. msg_len -= item_len;
  4820. msg_addr += item_len;
  4821. continue;
  4822. }
  4823. pktid = ltoh32(msg->cmn_hdr.request_id);
  4824. #ifdef DHD_PKTID_AUDIT_RING
  4825. DHD_PKTID_AUDIT_RING_DEBUG(dhd, dhd->prot->pktid_rx_map, pktid,
  4826. DHD_DUPLICATE_FREE, msg, D2HRING_RXCMPLT_ITEMSIZE);
  4827. #endif /* DHD_PKTID_AUDIT_RING */
  4828. pkt = DHD_PKTID_TO_NATIVE(dhd, prot->pktid_rx_map, pktid, pa,
  4829. len, dmah, secdma, PKTTYPE_DATA_RX);
  4830. if (!pkt) {
  4831. msg_len -= item_len;
  4832. msg_addr += item_len;
  4833. continue;
  4834. }
  4835. if (SECURE_DMA_ENAB(dhd->osh))
  4836. SECURE_DMA_UNMAP(dhd->osh, pa, (uint) len, DMA_RX, 0,
  4837. dmah, secdma, 0);
  4838. else
  4839. DMA_UNMAP(dhd->osh, pa, (uint) len, DMA_RX, 0, dmah);
  4840. #ifdef DMAMAP_STATS
  4841. dhd->dma_stats.rxdata--;
  4842. dhd->dma_stats.rxdata_sz -= len;
  4843. #endif /* DMAMAP_STATS */
  4844. DHD_INFO(("id 0x%04x, offset %d, len %d, idx %d, phase 0x%02x, "
  4845. "pktdata %p, metalen %d\n",
  4846. ltoh32(msg->cmn_hdr.request_id),
  4847. ltoh16(msg->data_offset),
  4848. ltoh16(msg->data_len), msg->cmn_hdr.if_id,
  4849. msg->cmn_hdr.flags, PKTDATA(dhd->osh, pkt),
  4850. ltoh16(msg->metadata_len)));
  4851. pkt_cnt++;
  4852. msg_len -= item_len;
  4853. msg_addr += item_len;
  4854. #if DHD_DBG_SHOW_METADATA
  4855. if (prot->metadata_dbg && prot->rx_metadata_offset &&
  4856. msg->metadata_len) {
  4857. uchar *ptr;
  4858. ptr = PKTDATA(dhd->osh, pkt) - (prot->rx_metadata_offset);
  4859. /* header followed by data */
  4860. bcm_print_bytes("rxmetadata", ptr, msg->metadata_len);
  4861. dhd_prot_print_metadata(dhd, ptr, msg->metadata_len);
  4862. }
  4863. #endif /* DHD_DBG_SHOW_METADATA */
  4864. /* data_offset from buf start */
  4865. if (ltoh16(msg->data_offset)) {
  4866. /* data offset given from dongle after split rx */
  4867. PKTPULL(dhd->osh, pkt, ltoh16(msg->data_offset));
  4868. }
  4869. else if (prot->rx_dataoffset) {
  4870. /* DMA RX offset updated through shared area */
  4871. PKTPULL(dhd->osh, pkt, prot->rx_dataoffset);
  4872. }
  4873. /* Actual length of the packet */
  4874. PKTSETLEN(dhd->osh, pkt, ltoh16(msg->data_len));
  4875. #if defined(WL_MONITOR)
  4876. if (dhd_monitor_enabled(dhd, ifidx)) {
  4877. if (msg->flags & BCMPCIE_PKT_FLAGS_FRAME_802_11) {
  4878. dhd_rx_mon_pkt(dhd, msg, pkt, ifidx);
  4879. continue;
  4880. } else {
  4881. DHD_ERROR(("Received non 802.11 packet, "
  4882. "when monitor mode is enabled\n"));
  4883. }
  4884. }
  4885. #endif /* WL_MONITOR */
  4886. if (!pktqhead) {
  4887. pktqhead = prevpkt = pkt;
  4888. ifidx = msg->cmn_hdr.if_id;
  4889. } else {
  4890. if (ifidx != msg->cmn_hdr.if_id) {
  4891. pkt_newidx = pkt;
  4892. if_newidx = msg->cmn_hdr.if_id;
  4893. pkt_cnt--;
  4894. pkt_cnt_newidx = 1;
  4895. break;
  4896. } else {
  4897. PKTSETNEXT(dhd->osh, prevpkt, pkt);
  4898. prevpkt = pkt;
  4899. }
  4900. }
  4901. #ifdef DHD_HP2P
  4902. if (dhd->hp2p_capable && ring == prot->d2hring_hp2p_rxcpl) {
  4903. #ifdef DHD_HP2P_DEBUG
  4904. bcm_print_bytes("Rxcpl", (uchar *)msg, sizeof(host_rxbuf_cmpl_t));
  4905. #endif /* DHD_HP2P_DEBUG */
  4906. dhd_update_hp2p_rxstats(dhd, msg);
  4907. }
  4908. #endif /* DHD_HP2P */
  4909. #ifdef DHD_LBUF_AUDIT
  4910. PKTAUDIT(dhd->osh, pkt);
  4911. #endif // endif
  4912. }
  4913. /* roll back read pointer for unprocessed message */
  4914. if (msg_len > 0) {
  4915. if (ring->rd < msg_len / item_len)
  4916. ring->rd = ring->max_items - msg_len / item_len;
  4917. else
  4918. ring->rd -= msg_len / item_len;
  4919. }
  4920. /* Update read pointer */
  4921. dhd_prot_upd_read_idx(dhd, ring);
  4922. DHD_RING_UNLOCK(ring->ring_lock, flags);
  4923. pkt = pktqhead;
  4924. for (i = 0; pkt && i < pkt_cnt; i++, pkt = nextpkt) {
  4925. nextpkt = PKTNEXT(dhd->osh, pkt);
  4926. PKTSETNEXT(dhd->osh, pkt, NULL);
  4927. #ifdef DHD_LB_RXP
  4928. dhd_lb_rx_pkt_enqueue(dhd, pkt, ifidx);
  4929. #elif defined(DHD_RX_CHAINING)
  4930. dhd_rxchain_frame(dhd, pkt, ifidx);
  4931. #else
  4932. dhd_bus_rx_frame(dhd->bus, pkt, ifidx, 1);
  4933. #endif /* DHD_LB_RXP */
  4934. }
  4935. if (pkt_newidx) {
  4936. #ifdef DHD_LB_RXP
  4937. dhd_lb_rx_pkt_enqueue(dhd, pkt_newidx, if_newidx);
  4938. #elif defined(DHD_RX_CHAINING)
  4939. dhd_rxchain_frame(dhd, pkt_newidx, if_newidx);
  4940. #else
  4941. dhd_bus_rx_frame(dhd->bus, pkt_newidx, if_newidx, 1);
  4942. #endif /* DHD_LB_RXP */
  4943. }
  4944. pkt_cnt += pkt_cnt_newidx;
  4945. /* Post another set of rxbufs to the device */
  4946. dhd_prot_return_rxbuf(dhd, 0, pkt_cnt);
  4947. #ifdef DHD_RX_CHAINING
  4948. dhd_rxchain_commit(dhd);
  4949. #endif // endif
  4950. /* After batch processing, check RX bound */
  4951. n += pkt_cnt;
  4952. if (n >= bound) {
  4953. more = TRUE;
  4954. break;
  4955. }
  4956. }
  4957. /* Call lb_dispatch only if packets are queued */
  4958. if (n &&
  4959. #ifdef WL_MONITOR
  4960. !(dhd_monitor_enabled(dhd, ifidx)) &&
  4961. #endif /* WL_MONITOR */
  4962. TRUE) {
  4963. DHD_LB_DISPATCH_RX_COMPL(dhd);
  4964. DHD_LB_DISPATCH_RX_PROCESS(dhd);
  4965. }
  4966. return more;
  4967. }
  4968. /**
  4969. * Hands transmit packets (with a caller provided flow_id) over to dongle territory (the flow ring)
  4970. */
  4971. void
  4972. dhd_prot_update_txflowring(dhd_pub_t *dhd, uint16 flowid, void *msgring)
  4973. {
  4974. msgbuf_ring_t *ring = (msgbuf_ring_t *)msgring;
  4975. if (ring == NULL) {
  4976. DHD_ERROR(("%s: NULL txflowring. exiting...\n", __FUNCTION__));
  4977. return;
  4978. }
  4979. /* Update read pointer */
  4980. if (dhd->dma_d2h_ring_upd_support) {
  4981. ring->rd = dhd_prot_dma_indx_get(dhd, H2D_DMA_INDX_RD_UPD, ring->idx);
  4982. }
  4983. DHD_TRACE(("ringid %d flowid %d write %d read %d \n\n",
  4984. ring->idx, flowid, ring->wr, ring->rd));
  4985. /* Need more logic here, but for now use it directly */
  4986. dhd_bus_schedule_queue(dhd->bus, flowid, TRUE); /* from queue to flowring */
  4987. }
  4988. /** called when DHD needs to check for 'transmit complete' messages from the dongle */
  4989. bool BCMFASTPATH
  4990. dhd_prot_process_msgbuf_txcpl(dhd_pub_t *dhd, uint bound, int ringtype)
  4991. {
  4992. bool more = TRUE;
  4993. uint n = 0;
  4994. msgbuf_ring_t *ring;
  4995. unsigned long flags;
  4996. #ifdef DHD_HP2P
  4997. if (ringtype == DHD_HP2P_RING && dhd->prot->d2hring_hp2p_txcpl)
  4998. ring = dhd->prot->d2hring_hp2p_txcpl;
  4999. else
  5000. #endif /* DHD_HP2P */
  5001. ring = &dhd->prot->d2hring_tx_cpln;
  5002. /* Process all the messages - DTOH direction */
  5003. while (!dhd_is_device_removed(dhd)) {
  5004. uint8 *msg_addr;
  5005. uint32 msg_len;
  5006. if (dhd_query_bus_erros(dhd)) {
  5007. more = FALSE;
  5008. break;
  5009. }
  5010. if (dhd->hang_was_sent) {
  5011. more = FALSE;
  5012. break;
  5013. }
  5014. if (dhd->smmu_fault_occurred) {
  5015. more = FALSE;
  5016. break;
  5017. }
  5018. DHD_RING_LOCK(ring->ring_lock, flags);
  5019. /* Get the address of the next message to be read from ring */
  5020. msg_addr = dhd_prot_get_read_addr(dhd, ring, &msg_len);
  5021. DHD_RING_UNLOCK(ring->ring_lock, flags);
  5022. if (msg_addr == NULL) {
  5023. more = FALSE;
  5024. break;
  5025. }
  5026. /* Prefetch data to populate the cache */
  5027. OSL_PREFETCH(msg_addr);
  5028. if (dhd_prot_process_msgtype(dhd, ring, msg_addr, msg_len) != BCME_OK) {
  5029. DHD_ERROR(("%s: process %s msg addr %p len %d\n",
  5030. __FUNCTION__, ring->name, msg_addr, msg_len));
  5031. }
  5032. /* Write to dngl rd ptr */
  5033. dhd_prot_upd_read_idx(dhd, ring);
  5034. /* After batch processing, check bound */
  5035. n += msg_len / ring->item_len;
  5036. if (n >= bound) {
  5037. break;
  5038. }
  5039. }
  5040. DHD_LB_DISPATCH_TX_COMPL(dhd);
  5041. return more;
  5042. }
  5043. int BCMFASTPATH
  5044. dhd_prot_process_trapbuf(dhd_pub_t *dhd)
  5045. {
  5046. uint32 data;
  5047. dhd_dma_buf_t *trap_addr = &dhd->prot->fw_trap_buf;
  5048. /* Interrupts can come in before this struct
  5049. * has been initialized.
  5050. */
  5051. if (trap_addr->va == NULL) {
  5052. DHD_ERROR(("%s: trap_addr->va is NULL\n", __FUNCTION__));
  5053. return 0;
  5054. }
  5055. OSL_CACHE_INV((void *)trap_addr->va, sizeof(uint32));
  5056. data = *(uint32 *)(trap_addr->va);
  5057. if (data & D2H_DEV_FWHALT) {
  5058. DHD_ERROR(("Firmware trapped and trap_data is 0x%04x\n", data));
  5059. if (data & D2H_DEV_EXT_TRAP_DATA)
  5060. {
  5061. if (dhd->extended_trap_data) {
  5062. OSL_CACHE_INV((void *)trap_addr->va,
  5063. BCMPCIE_EXT_TRAP_DATA_MAXLEN);
  5064. memcpy(dhd->extended_trap_data, (uint32 *)trap_addr->va,
  5065. BCMPCIE_EXT_TRAP_DATA_MAXLEN);
  5066. }
  5067. DHD_ERROR(("Extended trap data available\n"));
  5068. }
  5069. return data;
  5070. }
  5071. return 0;
  5072. }
  5073. /** called when DHD needs to check for 'ioctl complete' messages from the dongle */
  5074. int BCMFASTPATH
  5075. dhd_prot_process_ctrlbuf(dhd_pub_t *dhd)
  5076. {
  5077. dhd_prot_t *prot = dhd->prot;
  5078. msgbuf_ring_t *ring = &prot->d2hring_ctrl_cpln;
  5079. unsigned long flags;
  5080. /* Process all the messages - DTOH direction */
  5081. while (!dhd_is_device_removed(dhd)) {
  5082. uint8 *msg_addr;
  5083. uint32 msg_len;
  5084. if (dhd_query_bus_erros(dhd)) {
  5085. break;
  5086. }
  5087. if (dhd->hang_was_sent) {
  5088. break;
  5089. }
  5090. if (dhd->smmu_fault_occurred) {
  5091. break;
  5092. }
  5093. DHD_RING_LOCK(ring->ring_lock, flags);
  5094. /* Get the address of the next message to be read from ring */
  5095. msg_addr = dhd_prot_get_read_addr(dhd, ring, &msg_len);
  5096. DHD_RING_UNLOCK(ring->ring_lock, flags);
  5097. if (msg_addr == NULL) {
  5098. break;
  5099. }
  5100. /* Prefetch data to populate the cache */
  5101. OSL_PREFETCH(msg_addr);
  5102. if (dhd_prot_process_msgtype(dhd, ring, msg_addr, msg_len) != BCME_OK) {
  5103. DHD_ERROR(("%s: process %s msg addr %p len %d\n",
  5104. __FUNCTION__, ring->name, msg_addr, msg_len));
  5105. }
  5106. /* Write to dngl rd ptr */
  5107. dhd_prot_upd_read_idx(dhd, ring);
  5108. }
  5109. return 0;
  5110. }
  5111. /**
  5112. * Consume messages out of the D2H ring. Ensure that the message's DMA to host
  5113. * memory has completed, before invoking the message handler via a table lookup
  5114. * of the cmn_msg_hdr::msg_type.
  5115. */
  5116. static int BCMFASTPATH
  5117. dhd_prot_process_msgtype(dhd_pub_t *dhd, msgbuf_ring_t *ring, uint8 *buf, uint32 len)
  5118. {
  5119. uint32 buf_len = len;
  5120. uint16 item_len;
  5121. uint8 msg_type;
  5122. cmn_msg_hdr_t *msg = NULL;
  5123. int ret = BCME_OK;
  5124. ASSERT(ring);
  5125. item_len = ring->item_len;
  5126. if (item_len == 0) {
  5127. DHD_ERROR(("%s: ringidx %d, item_len %d buf_len %d \n",
  5128. __FUNCTION__, ring->idx, item_len, buf_len));
  5129. return BCME_ERROR;
  5130. }
  5131. while (buf_len > 0) {
  5132. if (dhd->hang_was_sent) {
  5133. ret = BCME_ERROR;
  5134. goto done;
  5135. }
  5136. if (dhd->smmu_fault_occurred) {
  5137. ret = BCME_ERROR;
  5138. goto done;
  5139. }
  5140. msg = (cmn_msg_hdr_t *)buf;
  5141. /* Wait until DMA completes, then fetch msg_type */
  5142. msg_type = dhd->prot->d2h_sync_cb(dhd, ring, msg, item_len);
  5143. /*
  5144. * Update the curr_rd to the current index in the ring, from where
  5145. * the work item is fetched. This way if the fetched work item
  5146. * fails in LIVELOCK, we can print the exact read index in the ring
  5147. * that shows up the corrupted work item.
  5148. */
  5149. if ((ring->curr_rd + 1) >= ring->max_items) {
  5150. ring->curr_rd = 0;
  5151. } else {
  5152. ring->curr_rd += 1;
  5153. }
  5154. /* Prefetch data to populate the cache */
  5155. OSL_PREFETCH(buf + item_len);
  5156. DHD_INFO(("msg_type %d item_len %d buf_len %d\n",
  5157. msg_type, item_len, buf_len));
  5158. if (msg_type == MSG_TYPE_LOOPBACK) {
  5159. bcm_print_bytes("LPBK RESP: ", (uint8 *)msg, item_len);
  5160. DHD_ERROR((" MSG_TYPE_LOOPBACK, len %d\n", item_len));
  5161. }
  5162. ASSERT(msg_type < DHD_PROT_FUNCS);
  5163. if (msg_type >= DHD_PROT_FUNCS) {
  5164. DHD_ERROR(("%s: msg_type %d, item_len %d buf_len %d\n",
  5165. __FUNCTION__, msg_type, item_len, buf_len));
  5166. ret = BCME_ERROR;
  5167. goto done;
  5168. }
  5169. if (msg_type == MSG_TYPE_INFO_BUF_CMPLT) {
  5170. if (ring == dhd->prot->d2hring_info_cpln) {
  5171. if (!dhd->prot->infobufpost) {
  5172. DHD_ERROR(("infobuf posted are zero,"
  5173. "but there is a completion\n"));
  5174. goto done;
  5175. }
  5176. dhd->prot->infobufpost--;
  5177. dhd_prot_infobufpost(dhd, dhd->prot->h2dring_info_subn);
  5178. dhd_prot_process_infobuf_complete(dhd, buf);
  5179. }
  5180. } else
  5181. if (table_lookup[msg_type]) {
  5182. table_lookup[msg_type](dhd, buf);
  5183. }
  5184. if (buf_len < item_len) {
  5185. ret = BCME_ERROR;
  5186. goto done;
  5187. }
  5188. buf_len = buf_len - item_len;
  5189. buf = buf + item_len;
  5190. }
  5191. done:
  5192. #ifdef DHD_RX_CHAINING
  5193. dhd_rxchain_commit(dhd);
  5194. #endif // endif
  5195. return ret;
  5196. } /* dhd_prot_process_msgtype */
  5197. static void
  5198. dhd_prot_noop(dhd_pub_t *dhd, void *msg)
  5199. {
  5200. return;
  5201. }
  5202. /** called on MSG_TYPE_RING_STATUS message received from dongle */
  5203. static void
  5204. dhd_prot_ringstatus_process(dhd_pub_t *dhd, void *msg)
  5205. {
  5206. pcie_ring_status_t *ring_status = (pcie_ring_status_t *) msg;
  5207. uint32 request_id = ltoh32(ring_status->cmn_hdr.request_id);
  5208. uint16 status = ltoh16(ring_status->compl_hdr.status);
  5209. uint16 ring_id = ltoh16(ring_status->compl_hdr.flow_ring_id);
  5210. DHD_ERROR(("ring status: request_id %d, status 0x%04x, flow ring %d, write_idx %d \n",
  5211. request_id, status, ring_id, ltoh16(ring_status->write_idx)));
  5212. if (ltoh16(ring_status->compl_hdr.ring_id) != BCMPCIE_H2D_MSGRING_CONTROL_SUBMIT)
  5213. return;
  5214. if (status == BCMPCIE_BAD_PHASE) {
  5215. /* bad phase report from */
  5216. DHD_ERROR(("Bad phase\n"));
  5217. }
  5218. if (status != BCMPCIE_BADOPTION)
  5219. return;
  5220. if (request_id == DHD_H2D_DBGRING_REQ_PKTID) {
  5221. if (dhd->prot->h2dring_info_subn != NULL) {
  5222. if (dhd->prot->h2dring_info_subn->create_pending == TRUE) {
  5223. DHD_ERROR(("H2D ring create failed for info ring\n"));
  5224. dhd->prot->h2dring_info_subn->create_pending = FALSE;
  5225. }
  5226. else
  5227. DHD_ERROR(("ring create ID for a ring, create not pending\n"));
  5228. } else {
  5229. DHD_ERROR(("%s info submit ring doesn't exist\n", __FUNCTION__));
  5230. }
  5231. }
  5232. else if (request_id == DHD_D2H_DBGRING_REQ_PKTID) {
  5233. if (dhd->prot->d2hring_info_cpln != NULL) {
  5234. if (dhd->prot->d2hring_info_cpln->create_pending == TRUE) {
  5235. DHD_ERROR(("D2H ring create failed for info ring\n"));
  5236. dhd->prot->d2hring_info_cpln->create_pending = FALSE;
  5237. }
  5238. else
  5239. DHD_ERROR(("ring create ID for info ring, create not pending\n"));
  5240. } else {
  5241. DHD_ERROR(("%s info cpl ring doesn't exist\n", __FUNCTION__));
  5242. }
  5243. }
  5244. #ifdef DHD_HP2P
  5245. else if (request_id == DHD_D2H_HPPRING_TXREQ_PKTID) {
  5246. if (dhd->prot->d2hring_hp2p_txcpl != NULL) {
  5247. if (dhd->prot->d2hring_hp2p_txcpl->create_pending == TRUE) {
  5248. DHD_ERROR(("H2D ring create failed for hp2p ring\n"));
  5249. dhd->prot->d2hring_hp2p_txcpl->create_pending = FALSE;
  5250. }
  5251. else
  5252. DHD_ERROR(("ring create ID for a ring, create not pending\n"));
  5253. } else {
  5254. DHD_ERROR(("%s hp2p txcmpl ring doesn't exist\n", __FUNCTION__));
  5255. }
  5256. }
  5257. else if (request_id == DHD_D2H_HPPRING_RXREQ_PKTID) {
  5258. if (dhd->prot->d2hring_hp2p_rxcpl != NULL) {
  5259. if (dhd->prot->d2hring_hp2p_rxcpl->create_pending == TRUE) {
  5260. DHD_ERROR(("D2H ring create failed for hp2p rxcmpl ring\n"));
  5261. dhd->prot->d2hring_hp2p_rxcpl->create_pending = FALSE;
  5262. }
  5263. else
  5264. DHD_ERROR(("ring create ID for hp2p rxcmpl ring, not pending\n"));
  5265. } else {
  5266. DHD_ERROR(("%s hp2p rxcpl ring doesn't exist\n", __FUNCTION__));
  5267. }
  5268. }
  5269. #endif /* DHD_HP2P */
  5270. else {
  5271. DHD_ERROR(("don;t know how to pair with original request\n"));
  5272. }
  5273. /* How do we track this to pair it with ??? */
  5274. return;
  5275. }
  5276. /** called on MSG_TYPE_GEN_STATUS ('general status') message received from dongle */
  5277. static void
  5278. dhd_prot_genstatus_process(dhd_pub_t *dhd, void *msg)
  5279. {
  5280. pcie_gen_status_t *gen_status = (pcie_gen_status_t *)msg;
  5281. DHD_ERROR(("ERROR: gen status: request_id %d, STATUS 0x%04x, flow ring %d \n",
  5282. gen_status->cmn_hdr.request_id, gen_status->compl_hdr.status,
  5283. gen_status->compl_hdr.flow_ring_id));
  5284. /* How do we track this to pair it with ??? */
  5285. return;
  5286. }
  5287. /**
  5288. * Called on MSG_TYPE_IOCTLPTR_REQ_ACK ('ioctl ack') message received from dongle, meaning that the
  5289. * dongle received the ioctl message in dongle memory.
  5290. */
  5291. static void
  5292. dhd_prot_ioctack_process(dhd_pub_t *dhd, void *msg)
  5293. {
  5294. ioctl_req_ack_msg_t *ioct_ack = (ioctl_req_ack_msg_t *)msg;
  5295. unsigned long flags;
  5296. #if defined(DHD_PKTID_AUDIT_RING)
  5297. uint32 pktid = ltoh32(ioct_ack->cmn_hdr.request_id);
  5298. #endif // endif
  5299. #if defined(DHD_PKTID_AUDIT_RING)
  5300. /* Skip audit for ADHD_IOCTL_REQ_PKTID = 0xFFFE */
  5301. if (pktid != DHD_IOCTL_REQ_PKTID) {
  5302. #ifndef IOCTLRESP_USE_CONSTMEM
  5303. DHD_PKTID_AUDIT_RING_DEBUG(dhd, dhd->prot->pktid_ctrl_map, pktid,
  5304. DHD_TEST_IS_ALLOC, msg, D2HRING_CTRL_CMPLT_ITEMSIZE);
  5305. #else
  5306. DHD_PKTID_AUDIT_RING_DEBUG(dhd, dhd->prot->pktid_map_handle_ioctl, pktid,
  5307. DHD_TEST_IS_ALLOC, msg, D2HRING_CTRL_CMPLT_ITEMSIZE);
  5308. #endif /* !IOCTLRESP_USE_CONSTMEM */
  5309. }
  5310. #endif // endif
  5311. dhd->prot->ioctl_ack_time = OSL_LOCALTIME_NS();
  5312. DHD_GENERAL_LOCK(dhd, flags);
  5313. if ((dhd->prot->ioctl_state & MSGBUF_IOCTL_ACK_PENDING) &&
  5314. (dhd->prot->ioctl_state & MSGBUF_IOCTL_RESP_PENDING)) {
  5315. dhd->prot->ioctl_state &= ~MSGBUF_IOCTL_ACK_PENDING;
  5316. } else {
  5317. DHD_ERROR(("%s: received ioctl ACK with state %02x trans_id = %d\n",
  5318. __FUNCTION__, dhd->prot->ioctl_state, dhd->prot->ioctl_trans_id));
  5319. prhex("dhd_prot_ioctack_process:",
  5320. (uchar *)msg, D2HRING_CTRL_CMPLT_ITEMSIZE);
  5321. }
  5322. DHD_GENERAL_UNLOCK(dhd, flags);
  5323. DHD_CTL(("ioctl req ack: request_id %d, status 0x%04x, flow ring %d \n",
  5324. ioct_ack->cmn_hdr.request_id, ioct_ack->compl_hdr.status,
  5325. ioct_ack->compl_hdr.flow_ring_id));
  5326. if (ioct_ack->compl_hdr.status != 0) {
  5327. DHD_ERROR(("got an error status for the ioctl request...need to handle that\n"));
  5328. }
  5329. }
  5330. /** called on MSG_TYPE_IOCTL_CMPLT message received from dongle */
  5331. static void
  5332. dhd_prot_ioctcmplt_process(dhd_pub_t *dhd, void *msg)
  5333. {
  5334. dhd_prot_t *prot = dhd->prot;
  5335. uint32 pkt_id, xt_id;
  5336. ioctl_comp_resp_msg_t *ioct_resp = (ioctl_comp_resp_msg_t *)msg;
  5337. void *pkt;
  5338. unsigned long flags;
  5339. dhd_dma_buf_t retbuf;
  5340. /* Check for ioctl timeout induce flag, which is set by firing
  5341. * dhd iovar to induce IOCTL timeout. If flag is set,
  5342. * return from here, which results in to IOCTL timeout.
  5343. */
  5344. if (dhd->dhd_induce_error == DHD_INDUCE_IOCTL_TIMEOUT) {
  5345. DHD_ERROR(("%s: Inducing resumed on timeout\n", __FUNCTION__));
  5346. return;
  5347. }
  5348. memset(&retbuf, 0, sizeof(dhd_dma_buf_t));
  5349. pkt_id = ltoh32(ioct_resp->cmn_hdr.request_id);
  5350. #if defined(DHD_PKTID_AUDIT_RING)
  5351. #ifndef IOCTLRESP_USE_CONSTMEM
  5352. DHD_PKTID_AUDIT_RING_DEBUG(dhd, prot->pktid_ctrl_map, pkt_id,
  5353. DHD_DUPLICATE_FREE, msg, D2HRING_CTRL_CMPLT_ITEMSIZE);
  5354. #else
  5355. DHD_PKTID_AUDIT_RING_DEBUG(dhd, prot->pktid_map_handle_ioctl, pkt_id,
  5356. DHD_DUPLICATE_FREE, msg, D2HRING_CTRL_CMPLT_ITEMSIZE);
  5357. #endif /* !IOCTLRESP_USE_CONSTMEM */
  5358. #endif // endif
  5359. DHD_GENERAL_LOCK(dhd, flags);
  5360. if ((prot->ioctl_state & MSGBUF_IOCTL_ACK_PENDING) ||
  5361. !(prot->ioctl_state & MSGBUF_IOCTL_RESP_PENDING)) {
  5362. DHD_ERROR(("%s: received ioctl response with state %02x trans_id = %d\n",
  5363. __FUNCTION__, dhd->prot->ioctl_state, dhd->prot->ioctl_trans_id));
  5364. prhex("dhd_prot_ioctcmplt_process:",
  5365. (uchar *)msg, D2HRING_CTRL_CMPLT_ITEMSIZE);
  5366. DHD_GENERAL_UNLOCK(dhd, flags);
  5367. return;
  5368. }
  5369. dhd->prot->ioctl_cmplt_time = OSL_LOCALTIME_NS();
  5370. /* Clear Response pending bit */
  5371. prot->ioctl_state &= ~MSGBUF_IOCTL_RESP_PENDING;
  5372. DHD_GENERAL_UNLOCK(dhd, flags);
  5373. #ifndef IOCTLRESP_USE_CONSTMEM
  5374. pkt = dhd_prot_packet_get(dhd, pkt_id, PKTTYPE_IOCTL_RX, TRUE);
  5375. #else
  5376. dhd_prot_ioctl_ret_buffer_get(dhd, pkt_id, &retbuf);
  5377. pkt = retbuf.va;
  5378. #endif /* !IOCTLRESP_USE_CONSTMEM */
  5379. if (!pkt) {
  5380. DHD_ERROR(("%s: received ioctl response with NULL pkt\n", __FUNCTION__));
  5381. prhex("dhd_prot_ioctcmplt_process:",
  5382. (uchar *)msg, D2HRING_CTRL_CMPLT_ITEMSIZE);
  5383. return;
  5384. }
  5385. prot->ioctl_resplen = ltoh16(ioct_resp->resp_len);
  5386. prot->ioctl_status = ltoh16(ioct_resp->compl_hdr.status);
  5387. xt_id = ltoh16(ioct_resp->trans_id);
  5388. if (xt_id != prot->ioctl_trans_id || prot->curr_ioctl_cmd != ioct_resp->cmd) {
  5389. DHD_ERROR(("%s: transaction id(%d %d) or cmd(%d %d) mismatch\n",
  5390. __FUNCTION__, xt_id, prot->ioctl_trans_id,
  5391. prot->curr_ioctl_cmd, ioct_resp->cmd));
  5392. dhd_wakeup_ioctl_event(dhd, IOCTL_RETURN_ON_ERROR);
  5393. dhd_prot_debug_info_print(dhd);
  5394. #ifdef DHD_FW_COREDUMP
  5395. if (dhd->memdump_enabled) {
  5396. /* collect core dump */
  5397. dhd->memdump_type = DUMP_TYPE_TRANS_ID_MISMATCH;
  5398. dhd_bus_mem_dump(dhd);
  5399. }
  5400. #else
  5401. ASSERT(0);
  5402. #endif /* DHD_FW_COREDUMP */
  5403. dhd_schedule_reset(dhd);
  5404. goto exit;
  5405. }
  5406. DHD_CTL(("IOCTL_COMPLETE: req_id %x transid %d status %x resplen %d\n",
  5407. pkt_id, xt_id, prot->ioctl_status, prot->ioctl_resplen));
  5408. if (prot->ioctl_resplen > 0) {
  5409. #ifndef IOCTLRESP_USE_CONSTMEM
  5410. bcopy(PKTDATA(dhd->osh, pkt), prot->retbuf.va, prot->ioctl_resplen);
  5411. #else
  5412. bcopy(pkt, prot->retbuf.va, prot->ioctl_resplen);
  5413. #endif /* !IOCTLRESP_USE_CONSTMEM */
  5414. }
  5415. /* wake up any dhd_os_ioctl_resp_wait() */
  5416. dhd_wakeup_ioctl_event(dhd, IOCTL_RETURN_ON_SUCCESS);
  5417. exit:
  5418. #ifndef IOCTLRESP_USE_CONSTMEM
  5419. dhd_prot_packet_free(dhd, pkt,
  5420. PKTTYPE_IOCTL_RX, FALSE);
  5421. #else
  5422. free_ioctl_return_buffer(dhd, &retbuf);
  5423. #endif /* !IOCTLRESP_USE_CONSTMEM */
  5424. /* Post another ioctl buf to the device */
  5425. if (prot->cur_ioctlresp_bufs_posted > 0) {
  5426. prot->cur_ioctlresp_bufs_posted--;
  5427. }
  5428. dhd_msgbuf_rxbuf_post_ioctlresp_bufs(dhd);
  5429. }
  5430. int
  5431. dhd_prot_check_tx_resource(dhd_pub_t *dhd)
  5432. {
  5433. return dhd->prot->no_tx_resource;
  5434. }
  5435. void
  5436. dhd_prot_update_pktid_txq_stop_cnt(dhd_pub_t *dhd)
  5437. {
  5438. dhd->prot->pktid_txq_stop_cnt++;
  5439. }
  5440. void
  5441. dhd_prot_update_pktid_txq_start_cnt(dhd_pub_t *dhd)
  5442. {
  5443. dhd->prot->pktid_txq_start_cnt++;
  5444. }
  5445. /** called on MSG_TYPE_TX_STATUS message received from dongle */
  5446. static void BCMFASTPATH
  5447. dhd_prot_txstatus_process(dhd_pub_t *dhd, void *msg)
  5448. {
  5449. dhd_prot_t *prot = dhd->prot;
  5450. host_txbuf_cmpl_t * txstatus;
  5451. unsigned long flags;
  5452. uint32 pktid;
  5453. void *pkt;
  5454. dmaaddr_t pa;
  5455. uint32 len;
  5456. void *dmah;
  5457. void *secdma;
  5458. bool pkt_fate;
  5459. msgbuf_ring_t *ring = &dhd->prot->d2hring_tx_cpln;
  5460. #if defined(TX_STATUS_LATENCY_STATS) || defined(DHD_HP2P)
  5461. flow_info_t *flow_info;
  5462. uint64 tx_status_latency;
  5463. #endif /* TX_STATUS_LATENCY_STATS || DHD_HP2P */
  5464. #if defined(TX_STATUS_LATENCY_STATS)
  5465. flow_ring_node_t *flow_ring_node;
  5466. uint16 flowid;
  5467. #endif // endif
  5468. ts_timestamp_t *ts;
  5469. BCM_REFERENCE(ts);
  5470. txstatus = (host_txbuf_cmpl_t *)msg;
  5471. #if defined(TX_STATUS_LATENCY_STATS)
  5472. flowid = txstatus->compl_hdr.flow_ring_id;
  5473. flow_ring_node = DHD_FLOW_RING(dhd, flowid);
  5474. #endif // endif
  5475. /* locks required to protect circular buffer accesses */
  5476. DHD_RING_LOCK(ring->ring_lock, flags);
  5477. pktid = ltoh32(txstatus->cmn_hdr.request_id);
  5478. pkt_fate = TRUE;
  5479. #if defined(DHD_PKTID_AUDIT_RING)
  5480. DHD_PKTID_AUDIT_RING_DEBUG(dhd, dhd->prot->pktid_tx_map, pktid,
  5481. DHD_DUPLICATE_FREE, msg, D2HRING_TXCMPLT_ITEMSIZE);
  5482. #endif // endif
  5483. DHD_INFO(("txstatus for pktid 0x%04x\n", pktid));
  5484. if (OSL_ATOMIC_DEC_RETURN(dhd->osh, &prot->active_tx_count) < 0) {
  5485. DHD_ERROR(("Extra packets are freed\n"));
  5486. }
  5487. ASSERT(pktid != 0);
  5488. pkt = DHD_PKTID_TO_NATIVE(dhd, dhd->prot->pktid_tx_map, pktid,
  5489. pa, len, dmah, secdma, PKTTYPE_DATA_TX);
  5490. if (!pkt) {
  5491. DHD_RING_UNLOCK(ring->ring_lock, flags);
  5492. DHD_ERROR(("%s: received txstatus with NULL pkt\n", __FUNCTION__));
  5493. prhex("dhd_prot_txstatus_process:", (uchar *)msg, D2HRING_TXCMPLT_ITEMSIZE);
  5494. #ifdef DHD_FW_COREDUMP
  5495. if (dhd->memdump_enabled) {
  5496. /* collect core dump */
  5497. dhd->memdump_type = DUMP_TYPE_PKTID_INVALID;
  5498. dhd_bus_mem_dump(dhd);
  5499. }
  5500. #else
  5501. ASSERT(0);
  5502. #endif /* DHD_FW_COREDUMP */
  5503. return;
  5504. }
  5505. if (DHD_PKTID_AVAIL(dhd->prot->pktid_tx_map) == DHD_PKTID_MIN_AVAIL_COUNT) {
  5506. dhd->prot->no_tx_resource = FALSE;
  5507. dhd_bus_start_queue(dhd->bus);
  5508. }
  5509. if (SECURE_DMA_ENAB(dhd->osh)) {
  5510. int offset = 0;
  5511. BCM_REFERENCE(offset);
  5512. if (dhd->prot->tx_metadata_offset)
  5513. offset = dhd->prot->tx_metadata_offset + ETHER_HDR_LEN;
  5514. SECURE_DMA_UNMAP(dhd->osh, (uint) pa,
  5515. (uint) dhd->prot->tx_metadata_offset, DMA_RX, 0, dmah,
  5516. secdma, offset);
  5517. } else {
  5518. DMA_UNMAP(dhd->osh, pa, (uint) len, DMA_RX, 0, dmah);
  5519. }
  5520. #ifdef TX_STATUS_LATENCY_STATS
  5521. /* update the tx status latency for flowid */
  5522. flow_info = &flow_ring_node->flow_info;
  5523. tx_status_latency = OSL_SYSUPTIME_US() - DHD_PKT_GET_QTIME(pkt);
  5524. flow_info->cum_tx_status_latency += tx_status_latency;
  5525. flow_info->num_tx_status++;
  5526. #endif /* TX_STATUS_LATENCY_STATS */
  5527. #if defined(DHD_LB_TXC) && !defined(BCM_SECURE_DMA)
  5528. {
  5529. int elem_ix;
  5530. void **elem;
  5531. bcm_workq_t *workq;
  5532. workq = &prot->tx_compl_prod;
  5533. /*
  5534. * Produce the packet into the tx_compl workq for the tx compl tasklet
  5535. * to consume.
  5536. */
  5537. OSL_PREFETCH(PKTTAG(pkt));
  5538. /* fetch next available slot in workq */
  5539. elem_ix = bcm_ring_prod(WORKQ_RING(workq), DHD_LB_WORKQ_SZ);
  5540. DHD_PKTTAG_SET_PA((dhd_pkttag_fr_t *)PKTTAG(pkt), pa);
  5541. DHD_PKTTAG_SET_PA_LEN((dhd_pkttag_fr_t *)PKTTAG(pkt), len);
  5542. if (elem_ix == BCM_RING_FULL) {
  5543. DHD_ERROR(("tx_compl_prod BCM_RING_FULL\n"));
  5544. goto workq_ring_full;
  5545. }
  5546. elem = WORKQ_ELEMENT(void *, &prot->tx_compl_prod, elem_ix);
  5547. *elem = pkt;
  5548. smp_wmb();
  5549. /* Sync WR index to consumer if the SYNC threshold has been reached */
  5550. if (++prot->tx_compl_prod_sync >= DHD_LB_WORKQ_SYNC) {
  5551. bcm_workq_prod_sync(workq);
  5552. prot->tx_compl_prod_sync = 0;
  5553. }
  5554. DHD_INFO(("%s: tx_compl_prod pkt<%p> sync<%d>\n",
  5555. __FUNCTION__, pkt, prot->tx_compl_prod_sync));
  5556. DHD_RING_UNLOCK(ring->ring_lock, flags);
  5557. return;
  5558. }
  5559. workq_ring_full:
  5560. #endif /* !DHD_LB_TXC */
  5561. #ifdef DMAMAP_STATS
  5562. dhd->dma_stats.txdata--;
  5563. dhd->dma_stats.txdata_sz -= len;
  5564. #endif /* DMAMAP_STATS */
  5565. pkt_fate = dhd_dbg_process_tx_status(dhd, pkt, pktid,
  5566. ltoh16(txstatus->compl_hdr.status) & WLFC_CTL_PKTFLAG_MASK);
  5567. #ifdef DHD_PKT_LOGGING
  5568. if (dhd->d11_tx_status) {
  5569. uint16 status = ltoh16(txstatus->compl_hdr.status) &
  5570. WLFC_CTL_PKTFLAG_MASK;
  5571. uint32 pkthash = __dhd_dbg_pkt_hash((uintptr_t)pkt, pktid);
  5572. DHD_PKTLOG_TXS(dhd, pkt, pktid, status);
  5573. dhd_dump_pkt(dhd, ltoh32(txstatus->cmn_hdr.if_id),
  5574. (uint8 *)PKTDATA(dhd->osh, pkt), len, TRUE,
  5575. &pkthash, &status);
  5576. }
  5577. #endif /* DHD_PKT_LOGGING */
  5578. #if defined(BCMPCIE)
  5579. dhd_txcomplete(dhd, pkt, pkt_fate);
  5580. #ifdef DHD_4WAYM4_FAIL_DISCONNECT
  5581. dhd_eap_txcomplete(dhd, pkt, pkt_fate, txstatus->cmn_hdr.if_id);
  5582. #endif /* DHD_4WAYM4_FAIL_DISCONNECT */
  5583. #endif // endif
  5584. #if DHD_DBG_SHOW_METADATA
  5585. if (dhd->prot->metadata_dbg &&
  5586. dhd->prot->tx_metadata_offset && txstatus->metadata_len) {
  5587. uchar *ptr;
  5588. /* The Ethernet header of TX frame was copied and removed.
  5589. * Here, move the data pointer forward by Ethernet header size.
  5590. */
  5591. PKTPULL(dhd->osh, pkt, ETHER_HDR_LEN);
  5592. ptr = PKTDATA(dhd->osh, pkt) - (dhd->prot->tx_metadata_offset);
  5593. bcm_print_bytes("txmetadata", ptr, txstatus->metadata_len);
  5594. dhd_prot_print_metadata(dhd, ptr, txstatus->metadata_len);
  5595. }
  5596. #endif /* DHD_DBG_SHOW_METADATA */
  5597. #ifdef DHD_HP2P
  5598. if (dhd->hp2p_capable && flow_ring_node->flow_info.tid == HP2P_PRIO) {
  5599. #ifdef DHD_HP2P_DEBUG
  5600. bcm_print_bytes("txcpl", (uint8 *)txstatus, sizeof(host_txbuf_cmpl_t));
  5601. #endif /* DHD_HP2P_DEBUG */
  5602. dhd_update_hp2p_txstats(dhd, txstatus);
  5603. }
  5604. #endif /* DHD_HP2P */
  5605. #ifdef DHD_LBUF_AUDIT
  5606. PKTAUDIT(dhd->osh, pkt);
  5607. #endif // endif
  5608. DHD_FLOWRING_TXSTATUS_CNT_UPDATE(dhd->bus, txstatus->compl_hdr.flow_ring_id,
  5609. txstatus->tx_status);
  5610. DHD_RING_UNLOCK(ring->ring_lock, flags);
  5611. PKTFREE(dhd->osh, pkt, TRUE);
  5612. return;
  5613. } /* dhd_prot_txstatus_process */
  5614. /** called on MSG_TYPE_WL_EVENT message received from dongle */
  5615. static void
  5616. dhd_prot_event_process(dhd_pub_t *dhd, void *msg)
  5617. {
  5618. wlevent_req_msg_t *evnt;
  5619. uint32 bufid;
  5620. uint16 buflen;
  5621. int ifidx = 0;
  5622. void* pkt;
  5623. dhd_prot_t *prot = dhd->prot;
  5624. /* Event complete header */
  5625. evnt = (wlevent_req_msg_t *)msg;
  5626. bufid = ltoh32(evnt->cmn_hdr.request_id);
  5627. #if defined(DHD_PKTID_AUDIT_RING)
  5628. DHD_PKTID_AUDIT_RING_DEBUG(dhd, dhd->prot->pktid_ctrl_map, bufid,
  5629. DHD_DUPLICATE_FREE, msg, D2HRING_CTRL_CMPLT_ITEMSIZE);
  5630. #endif // endif
  5631. buflen = ltoh16(evnt->event_data_len);
  5632. ifidx = BCMMSGBUF_API_IFIDX(&evnt->cmn_hdr);
  5633. /* Post another rxbuf to the device */
  5634. if (prot->cur_event_bufs_posted)
  5635. prot->cur_event_bufs_posted--;
  5636. dhd_msgbuf_rxbuf_post_event_bufs(dhd);
  5637. pkt = dhd_prot_packet_get(dhd, bufid, PKTTYPE_EVENT_RX, TRUE);
  5638. if (!pkt) {
  5639. DHD_ERROR(("%s: pkt is NULL for pktid %d\n", __FUNCTION__, bufid));
  5640. return;
  5641. }
  5642. /* DMA RX offset updated through shared area */
  5643. if (dhd->prot->rx_dataoffset)
  5644. PKTPULL(dhd->osh, pkt, dhd->prot->rx_dataoffset);
  5645. PKTSETLEN(dhd->osh, pkt, buflen);
  5646. #ifdef DHD_LBUF_AUDIT
  5647. PKTAUDIT(dhd->osh, pkt);
  5648. #endif // endif
  5649. dhd_bus_rx_frame(dhd->bus, pkt, ifidx, 1);
  5650. }
  5651. /** called on MSG_TYPE_INFO_BUF_CMPLT message received from dongle */
  5652. static void BCMFASTPATH
  5653. dhd_prot_process_infobuf_complete(dhd_pub_t *dhd, void* buf)
  5654. {
  5655. info_buf_resp_t *resp;
  5656. uint32 pktid;
  5657. uint16 buflen;
  5658. void * pkt;
  5659. resp = (info_buf_resp_t *)buf;
  5660. pktid = ltoh32(resp->cmn_hdr.request_id);
  5661. buflen = ltoh16(resp->info_data_len);
  5662. #ifdef DHD_PKTID_AUDIT_RING
  5663. DHD_PKTID_AUDIT_RING_DEBUG(dhd, dhd->prot->pktid_ctrl_map, pktid,
  5664. DHD_DUPLICATE_FREE, buf, D2HRING_INFO_BUFCMPLT_ITEMSIZE);
  5665. #endif /* DHD_PKTID_AUDIT_RING */
  5666. DHD_INFO(("id 0x%04x, len %d, phase 0x%02x, seqnum %d, rx_dataoffset %d\n",
  5667. pktid, buflen, resp->cmn_hdr.flags, ltoh16(resp->seqnum),
  5668. dhd->prot->rx_dataoffset));
  5669. if (dhd->debug_buf_dest_support) {
  5670. if (resp->dest < DEBUG_BUF_DEST_MAX) {
  5671. dhd->debug_buf_dest_stat[resp->dest]++;
  5672. }
  5673. }
  5674. pkt = dhd_prot_packet_get(dhd, pktid, PKTTYPE_INFO_RX, TRUE);
  5675. if (!pkt)
  5676. return;
  5677. /* DMA RX offset updated through shared area */
  5678. if (dhd->prot->rx_dataoffset)
  5679. PKTPULL(dhd->osh, pkt, dhd->prot->rx_dataoffset);
  5680. PKTSETLEN(dhd->osh, pkt, buflen);
  5681. #ifdef DHD_LBUF_AUDIT
  5682. PKTAUDIT(dhd->osh, pkt);
  5683. #endif // endif
  5684. /* info ring "debug" data, which is not a 802.3 frame, is sent/hacked with a
  5685. * special ifidx of -1. This is just internal to dhd to get the data to
  5686. * dhd_linux.c:dhd_rx_frame() from here (dhd_prot_infobuf_cmplt_process).
  5687. */
  5688. dhd_bus_rx_frame(dhd->bus, pkt, DHD_DUMMY_INFO_IF /* ifidx HACK */, 1);
  5689. }
  5690. /** called on MSG_TYPE_SNAPSHOT_CMPLT message received from dongle */
  5691. static void BCMFASTPATH
  5692. dhd_prot_process_snapshot_complete(dhd_pub_t *dhd, void *buf)
  5693. {
  5694. }
  5695. /** Stop protocol: sync w/dongle state. */
  5696. void dhd_prot_stop(dhd_pub_t *dhd)
  5697. {
  5698. ASSERT(dhd);
  5699. DHD_TRACE(("%s: Enter\n", __FUNCTION__));
  5700. }
  5701. /* Add any protocol-specific data header.
  5702. * Caller must reserve prot_hdrlen prepend space.
  5703. */
  5704. void BCMFASTPATH
  5705. dhd_prot_hdrpush(dhd_pub_t *dhd, int ifidx, void *PKTBUF)
  5706. {
  5707. return;
  5708. }
  5709. uint
  5710. dhd_prot_hdrlen(dhd_pub_t *dhd, void *PKTBUF)
  5711. {
  5712. return 0;
  5713. }
  5714. #define MAX_MTU_SZ (1600u)
  5715. #define PKTBUF pktbuf
  5716. /**
  5717. * Called when a tx ethernet packet has been dequeued from a flow queue, and has to be inserted in
  5718. * the corresponding flow ring.
  5719. */
  5720. int BCMFASTPATH
  5721. dhd_prot_txdata(dhd_pub_t *dhd, void *PKTBUF, uint8 ifidx)
  5722. {
  5723. unsigned long flags;
  5724. dhd_prot_t *prot = dhd->prot;
  5725. host_txbuf_post_t *txdesc = NULL;
  5726. dmaaddr_t pa, meta_pa;
  5727. uint8 *pktdata;
  5728. uint32 pktlen;
  5729. uint32 pktid;
  5730. uint8 prio;
  5731. uint16 flowid = 0;
  5732. uint16 alloced = 0;
  5733. uint16 headroom;
  5734. msgbuf_ring_t *ring;
  5735. flow_ring_table_t *flow_ring_table;
  5736. flow_ring_node_t *flow_ring_node;
  5737. #ifdef DHD_PKT_LOGGING
  5738. uint32 pkthash;
  5739. #endif /* DHD_PKT_LOGGING */
  5740. if (dhd->flow_ring_table == NULL) {
  5741. DHD_ERROR(("dhd flow_ring_table is NULL\n"));
  5742. return BCME_NORESOURCE;
  5743. }
  5744. #ifdef DHD_PCIE_PKTID
  5745. if (!DHD_PKTID_AVAIL(dhd->prot->pktid_tx_map)) {
  5746. if (dhd->prot->pktid_depleted_cnt == DHD_PKTID_DEPLETED_MAX_COUNT) {
  5747. dhd_bus_stop_queue(dhd->bus);
  5748. dhd->prot->no_tx_resource = TRUE;
  5749. }
  5750. dhd->prot->pktid_depleted_cnt++;
  5751. goto err_no_res;
  5752. } else {
  5753. dhd->prot->pktid_depleted_cnt = 0;
  5754. }
  5755. #endif /* DHD_PCIE_PKTID */
  5756. flowid = DHD_PKT_GET_FLOWID(PKTBUF);
  5757. flow_ring_table = (flow_ring_table_t *)dhd->flow_ring_table;
  5758. flow_ring_node = (flow_ring_node_t *)&flow_ring_table[flowid];
  5759. ring = (msgbuf_ring_t *)flow_ring_node->prot_info;
  5760. DHD_RING_LOCK(ring->ring_lock, flags);
  5761. /* Create a unique 32-bit packet id */
  5762. pktid = DHD_NATIVE_TO_PKTID_RSV(dhd, dhd->prot->pktid_tx_map,
  5763. PKTBUF, PKTTYPE_DATA_TX);
  5764. #if defined(DHD_PCIE_PKTID)
  5765. if (pktid == DHD_PKTID_INVALID) {
  5766. DHD_ERROR_RLMT(("%s: Pktid pool depleted.\n", __FUNCTION__));
  5767. /*
  5768. * If we return error here, the caller would queue the packet
  5769. * again. So we'll just free the skb allocated in DMA Zone.
  5770. * Since we have not freed the original SKB yet the caller would
  5771. * requeue the same.
  5772. */
  5773. goto err_no_res_pktfree;
  5774. }
  5775. #endif /* DHD_PCIE_PKTID */
  5776. /* Reserve space in the circular buffer */
  5777. txdesc = (host_txbuf_post_t *)
  5778. dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE);
  5779. if (txdesc == NULL) {
  5780. DHD_INFO(("%s:%d: HTOD Msgbuf Not available TxCount = %d\n",
  5781. __FUNCTION__, __LINE__, OSL_ATOMIC_READ(dhd->osh, &prot->active_tx_count)));
  5782. goto err_free_pktid;
  5783. }
  5784. /* Extract the data pointer and length information */
  5785. pktdata = PKTDATA(dhd->osh, PKTBUF);
  5786. pktlen = PKTLEN(dhd->osh, PKTBUF);
  5787. DHD_DBG_PKT_MON_TX(dhd, PKTBUF, pktid);
  5788. #ifdef DHD_PKT_LOGGING
  5789. DHD_PKTLOG_TX(dhd, PKTBUF, pktid);
  5790. /* Dump TX packet */
  5791. pkthash = __dhd_dbg_pkt_hash((uintptr_t)PKTBUF, pktid);
  5792. dhd_dump_pkt(dhd, ifidx, pktdata, pktlen, TRUE, &pkthash, NULL);
  5793. #endif /* DHD_PKT_LOGGING */
  5794. /* Ethernet header: Copy before we cache flush packet using DMA_MAP */
  5795. bcopy(pktdata, txdesc->txhdr, ETHER_HDR_LEN);
  5796. /* Extract the ethernet header and adjust the data pointer and length */
  5797. pktdata = PKTPULL(dhd->osh, PKTBUF, ETHER_HDR_LEN);
  5798. pktlen -= ETHER_HDR_LEN;
  5799. /* Map the data pointer to a DMA-able address */
  5800. if (SECURE_DMA_ENAB(dhd->osh)) {
  5801. int offset = 0;
  5802. BCM_REFERENCE(offset);
  5803. if (prot->tx_metadata_offset)
  5804. offset = prot->tx_metadata_offset + ETHER_HDR_LEN;
  5805. pa = SECURE_DMA_MAP(dhd->osh, PKTDATA(dhd->osh, PKTBUF), pktlen,
  5806. DMA_TX, PKTBUF, 0, ring->dma_buf.secdma, offset);
  5807. }
  5808. #ifndef BCM_SECURE_DMA
  5809. else
  5810. pa = DMA_MAP(dhd->osh, PKTDATA(dhd->osh, PKTBUF), pktlen, DMA_TX, PKTBUF, 0);
  5811. #endif /* #ifndef BCM_SECURE_DMA */
  5812. if (PHYSADDRISZERO(pa)) {
  5813. DHD_ERROR(("%s: Something really bad, unless 0 is "
  5814. "a valid phyaddr for pa\n", __FUNCTION__));
  5815. ASSERT(0);
  5816. goto err_rollback_idx;
  5817. }
  5818. #ifdef DMAMAP_STATS
  5819. dhd->dma_stats.txdata++;
  5820. dhd->dma_stats.txdata_sz += pktlen;
  5821. #endif /* DMAMAP_STATS */
  5822. /* No need to lock. Save the rest of the packet's metadata */
  5823. DHD_NATIVE_TO_PKTID_SAVE(dhd, dhd->prot->pktid_tx_map, PKTBUF, pktid,
  5824. pa, pktlen, DMA_TX, NULL, ring->dma_buf.secdma, PKTTYPE_DATA_TX);
  5825. #ifdef TXP_FLUSH_NITEMS
  5826. if (ring->pend_items_count == 0)
  5827. ring->start_addr = (void *)txdesc;
  5828. ring->pend_items_count++;
  5829. #endif // endif
  5830. /* Form the Tx descriptor message buffer */
  5831. /* Common message hdr */
  5832. txdesc->cmn_hdr.msg_type = MSG_TYPE_TX_POST;
  5833. txdesc->cmn_hdr.if_id = ifidx;
  5834. txdesc->cmn_hdr.flags = ring->current_phase;
  5835. txdesc->flags = BCMPCIE_PKT_FLAGS_FRAME_802_3;
  5836. prio = (uint8)PKTPRIO(PKTBUF);
  5837. txdesc->flags |= (prio & 0x7) << BCMPCIE_PKT_FLAGS_PRIO_SHIFT;
  5838. txdesc->seg_cnt = 1;
  5839. txdesc->data_len = htol16((uint16) pktlen);
  5840. txdesc->data_buf_addr.high_addr = htol32(PHYSADDRHI(pa));
  5841. txdesc->data_buf_addr.low_addr = htol32(PHYSADDRLO(pa));
  5842. /* Move data pointer to keep ether header in local PKTBUF for later reference */
  5843. PKTPUSH(dhd->osh, PKTBUF, ETHER_HDR_LEN);
  5844. /* Handle Tx metadata */
  5845. headroom = (uint16)PKTHEADROOM(dhd->osh, PKTBUF);
  5846. if (prot->tx_metadata_offset && (headroom < prot->tx_metadata_offset))
  5847. DHD_ERROR(("No headroom for Metadata tx %d %d\n",
  5848. prot->tx_metadata_offset, headroom));
  5849. if (prot->tx_metadata_offset && (headroom >= prot->tx_metadata_offset)) {
  5850. DHD_TRACE(("Metadata in tx %d\n", prot->tx_metadata_offset));
  5851. /* Adjust the data pointer to account for meta data in DMA_MAP */
  5852. PKTPUSH(dhd->osh, PKTBUF, prot->tx_metadata_offset);
  5853. if (SECURE_DMA_ENAB(dhd->osh)) {
  5854. meta_pa = SECURE_DMA_MAP_TXMETA(dhd->osh, PKTDATA(dhd->osh, PKTBUF),
  5855. prot->tx_metadata_offset + ETHER_HDR_LEN, DMA_RX, PKTBUF,
  5856. 0, ring->dma_buf.secdma);
  5857. }
  5858. #ifndef BCM_SECURE_DMA
  5859. else
  5860. meta_pa = DMA_MAP(dhd->osh, PKTDATA(dhd->osh, PKTBUF),
  5861. prot->tx_metadata_offset, DMA_RX, PKTBUF, 0);
  5862. #endif /* #ifndef BCM_SECURE_DMA */
  5863. if (PHYSADDRISZERO(meta_pa)) {
  5864. /* Unmap the data pointer to a DMA-able address */
  5865. if (SECURE_DMA_ENAB(dhd->osh)) {
  5866. int offset = 0;
  5867. BCM_REFERENCE(offset);
  5868. if (prot->tx_metadata_offset) {
  5869. offset = prot->tx_metadata_offset + ETHER_HDR_LEN;
  5870. }
  5871. SECURE_DMA_UNMAP(dhd->osh, pa, pktlen,
  5872. DMA_TX, 0, DHD_DMAH_NULL, ring->dma_buf.secdma, offset);
  5873. }
  5874. #ifndef BCM_SECURE_DMA
  5875. else {
  5876. DMA_UNMAP(dhd->osh, pa, pktlen, DMA_TX, 0, DHD_DMAH_NULL);
  5877. }
  5878. #endif /* #ifndef BCM_SECURE_DMA */
  5879. #ifdef TXP_FLUSH_NITEMS
  5880. /* update pend_items_count */
  5881. ring->pend_items_count--;
  5882. #endif /* TXP_FLUSH_NITEMS */
  5883. DHD_ERROR(("%s: Something really bad, unless 0 is "
  5884. "a valid phyaddr for meta_pa\n", __FUNCTION__));
  5885. ASSERT(0);
  5886. goto err_rollback_idx;
  5887. }
  5888. /* Adjust the data pointer back to original value */
  5889. PKTPULL(dhd->osh, PKTBUF, prot->tx_metadata_offset);
  5890. txdesc->metadata_buf_len = prot->tx_metadata_offset;
  5891. txdesc->metadata_buf_addr.high_addr = htol32(PHYSADDRHI(meta_pa));
  5892. txdesc->metadata_buf_addr.low_addr = htol32(PHYSADDRLO(meta_pa));
  5893. } else {
  5894. #ifdef DHD_HP2P
  5895. if (dhd->hp2p_capable && flow_ring_node->flow_info.tid == HP2P_PRIO) {
  5896. dhd_update_hp2p_txdesc(dhd, txdesc);
  5897. } else
  5898. #endif /* DHD_HP2P */
  5899. if (1)
  5900. {
  5901. txdesc->metadata_buf_len = htol16(0);
  5902. txdesc->metadata_buf_addr.high_addr = 0;
  5903. txdesc->metadata_buf_addr.low_addr = 0;
  5904. }
  5905. }
  5906. #ifdef DHD_PKTID_AUDIT_RING
  5907. DHD_PKTID_AUDIT(dhd, prot->pktid_tx_map, pktid, DHD_DUPLICATE_ALLOC);
  5908. #endif /* DHD_PKTID_AUDIT_RING */
  5909. txdesc->cmn_hdr.request_id = htol32(pktid);
  5910. DHD_TRACE(("txpost: data_len %d, pktid 0x%04x\n", txdesc->data_len,
  5911. txdesc->cmn_hdr.request_id));
  5912. #ifdef DHD_LBUF_AUDIT
  5913. PKTAUDIT(dhd->osh, PKTBUF);
  5914. #endif // endif
  5915. if (pktlen > MAX_MTU_SZ) {
  5916. DHD_ERROR(("%s: ######## pktlen(%d) > MAX_MTU_SZ(%d) #######\n",
  5917. __FUNCTION__, pktlen, MAX_MTU_SZ));
  5918. dhd_prhex("txringitem", (volatile uchar*)txdesc,
  5919. sizeof(host_txbuf_post_t), DHD_ERROR_VAL);
  5920. }
  5921. /* Update the write pointer in TCM & ring bell */
  5922. #if defined(DHD_HP2P) && defined(TXP_FLUSH_NITEMS)
  5923. if (dhd->hp2p_capable && flow_ring_node->flow_info.tid == HP2P_PRIO) {
  5924. dhd_calc_hp2p_burst(dhd, ring, flowid);
  5925. } else {
  5926. if ((ring->pend_items_count == prot->txp_threshold) ||
  5927. ((uint8 *) txdesc == (uint8 *) DHD_RING_END_VA(ring))) {
  5928. dhd_prot_txdata_write_flush(dhd, flowid);
  5929. }
  5930. }
  5931. #elif defined(TXP_FLUSH_NITEMS)
  5932. /* Flush if we have either hit the txp_threshold or if this msg is */
  5933. /* occupying the last slot in the flow_ring - before wrap around. */
  5934. if ((ring->pend_items_count == prot->txp_threshold) ||
  5935. ((uint8 *) txdesc == (uint8 *) DHD_RING_END_VA(ring))) {
  5936. dhd_prot_txdata_write_flush(dhd, flowid);
  5937. }
  5938. #else
  5939. /* update ring's WR index and ring doorbell to dongle */
  5940. dhd_prot_ring_write_complete(dhd, ring, txdesc, 1);
  5941. #endif /* DHD_HP2P && TXP_FLUSH_NITEMS */
  5942. #if defined(TX_STATUS_LATENCY_STATS)
  5943. /* set the time when pkt is queued to flowring */
  5944. DHD_PKT_SET_QTIME(PKTBUF, OSL_SYSUPTIME_US());
  5945. #endif // endif
  5946. OSL_ATOMIC_INC(dhd->osh, &prot->active_tx_count);
  5947. /*
  5948. * Take a wake lock, do not sleep if we have atleast one packet
  5949. * to finish.
  5950. */
  5951. DHD_TXFL_WAKE_LOCK_TIMEOUT(dhd, MAX_TX_TIMEOUT);
  5952. DHD_RING_UNLOCK(ring->ring_lock, flags);
  5953. #ifdef TX_STATUS_LATENCY_STATS
  5954. flow_ring_node->flow_info.num_tx_pkts++;
  5955. #endif /* TX_STATUS_LATENCY_STATS */
  5956. return BCME_OK;
  5957. err_rollback_idx:
  5958. /* roll back write pointer for unprocessed message */
  5959. if (ring->wr == 0) {
  5960. ring->wr = ring->max_items - 1;
  5961. } else {
  5962. ring->wr--;
  5963. if (ring->wr == 0) {
  5964. DHD_INFO(("%s: flipping the phase now\n", ring->name));
  5965. ring->current_phase = ring->current_phase ?
  5966. 0 : BCMPCIE_CMNHDR_PHASE_BIT_INIT;
  5967. }
  5968. }
  5969. err_free_pktid:
  5970. #if defined(DHD_PCIE_PKTID)
  5971. {
  5972. void *dmah;
  5973. void *secdma;
  5974. /* Free up the PKTID. physaddr and pktlen will be garbage. */
  5975. DHD_PKTID_TO_NATIVE(dhd, dhd->prot->pktid_tx_map, pktid,
  5976. pa, pktlen, dmah, secdma, PKTTYPE_NO_CHECK);
  5977. }
  5978. err_no_res_pktfree:
  5979. #endif /* DHD_PCIE_PKTID */
  5980. DHD_RING_UNLOCK(ring->ring_lock, flags);
  5981. err_no_res:
  5982. return BCME_NORESOURCE;
  5983. } /* dhd_prot_txdata */
  5984. /* called with a ring_lock */
  5985. /** optimization to write "n" tx items at a time to ring */
  5986. void BCMFASTPATH
  5987. dhd_prot_txdata_write_flush(dhd_pub_t *dhd, uint16 flowid)
  5988. {
  5989. #ifdef TXP_FLUSH_NITEMS
  5990. flow_ring_table_t *flow_ring_table;
  5991. flow_ring_node_t *flow_ring_node;
  5992. msgbuf_ring_t *ring;
  5993. if (dhd->flow_ring_table == NULL) {
  5994. return;
  5995. }
  5996. flow_ring_table = (flow_ring_table_t *)dhd->flow_ring_table;
  5997. flow_ring_node = (flow_ring_node_t *)&flow_ring_table[flowid];
  5998. ring = (msgbuf_ring_t *)flow_ring_node->prot_info;
  5999. if (ring->pend_items_count) {
  6000. /* update ring's WR index and ring doorbell to dongle */
  6001. dhd_prot_ring_write_complete(dhd, ring, ring->start_addr,
  6002. ring->pend_items_count);
  6003. ring->pend_items_count = 0;
  6004. ring->start_addr = NULL;
  6005. }
  6006. #endif /* TXP_FLUSH_NITEMS */
  6007. }
  6008. #undef PKTBUF /* Only defined in the above routine */
  6009. int BCMFASTPATH
  6010. dhd_prot_hdrpull(dhd_pub_t *dhd, int *ifidx, void *pkt, uchar *buf, uint *len)
  6011. {
  6012. return 0;
  6013. }
  6014. /** post a set of receive buffers to the dongle */
  6015. static void BCMFASTPATH
  6016. dhd_prot_return_rxbuf(dhd_pub_t *dhd, uint32 pktid, uint32 rxcnt)
  6017. {
  6018. dhd_prot_t *prot = dhd->prot;
  6019. #if defined(DHD_LB_RXC)
  6020. int elem_ix;
  6021. uint32 *elem;
  6022. bcm_workq_t *workq;
  6023. workq = &prot->rx_compl_prod;
  6024. /* Produce the work item */
  6025. elem_ix = bcm_ring_prod(WORKQ_RING(workq), DHD_LB_WORKQ_SZ);
  6026. if (elem_ix == BCM_RING_FULL) {
  6027. DHD_ERROR(("%s LB RxCompl workQ is full\n", __FUNCTION__));
  6028. ASSERT(0);
  6029. return;
  6030. }
  6031. elem = WORKQ_ELEMENT(uint32, workq, elem_ix);
  6032. *elem = pktid;
  6033. smp_wmb();
  6034. /* Sync WR index to consumer if the SYNC threshold has been reached */
  6035. if (++prot->rx_compl_prod_sync >= DHD_LB_WORKQ_SYNC) {
  6036. bcm_workq_prod_sync(workq);
  6037. prot->rx_compl_prod_sync = 0;
  6038. }
  6039. DHD_INFO(("%s: rx_compl_prod pktid<%u> sync<%d>\n",
  6040. __FUNCTION__, pktid, prot->rx_compl_prod_sync));
  6041. #endif /* DHD_LB_RXC */
  6042. if (prot->rxbufpost >= rxcnt) {
  6043. prot->rxbufpost -= (uint16)rxcnt;
  6044. } else {
  6045. /* ASSERT(0); */
  6046. prot->rxbufpost = 0;
  6047. }
  6048. #if !defined(DHD_LB_RXC)
  6049. if (prot->rxbufpost <= (prot->max_rxbufpost - RXBUFPOST_THRESHOLD))
  6050. dhd_msgbuf_rxbuf_post(dhd, FALSE); /* alloc pkt ids */
  6051. #endif /* !DHD_LB_RXC */
  6052. return;
  6053. }
  6054. /* called before an ioctl is sent to the dongle */
  6055. static void
  6056. dhd_prot_wlioctl_intercept(dhd_pub_t *dhd, wl_ioctl_t * ioc, void * buf)
  6057. {
  6058. dhd_prot_t *prot = dhd->prot;
  6059. int slen = 0;
  6060. if (ioc->cmd == WLC_SET_VAR && buf != NULL && !strcmp(buf, "pcie_bus_tput")) {
  6061. pcie_bus_tput_params_t *tput_params;
  6062. slen = strlen("pcie_bus_tput") + 1;
  6063. tput_params = (pcie_bus_tput_params_t*)((char *)buf + slen);
  6064. bcopy(&prot->host_bus_throughput_buf.pa, &tput_params->host_buf_addr,
  6065. sizeof(tput_params->host_buf_addr));
  6066. tput_params->host_buf_len = DHD_BUS_TPUT_BUF_LEN;
  6067. }
  6068. }
  6069. /* called after an ioctl returns from dongle */
  6070. static void
  6071. dhd_prot_wl_ioctl_ret_intercept(dhd_pub_t *dhd, wl_ioctl_t * ioc, void * buf,
  6072. int ifidx, int ret, int len)
  6073. {
  6074. if (!ret && ioc->cmd == WLC_SET_VAR && buf != NULL) {
  6075. /* Intercept the wme_dp ioctl here */
  6076. if (!strcmp(buf, "wme_dp")) {
  6077. int slen, val = 0;
  6078. slen = strlen("wme_dp") + 1;
  6079. if (len >= (int)(slen + sizeof(int)))
  6080. bcopy(((char *)buf + slen), &val, sizeof(int));
  6081. dhd->wme_dp = (uint8) ltoh32(val);
  6082. }
  6083. }
  6084. }
  6085. #ifdef DHD_PM_CONTROL_FROM_FILE
  6086. extern bool g_pm_control;
  6087. #endif /* DHD_PM_CONTROL_FROM_FILE */
  6088. /** Use protocol to issue ioctl to dongle. Only one ioctl may be in transit. */
  6089. int dhd_prot_ioctl(dhd_pub_t *dhd, int ifidx, wl_ioctl_t * ioc, void * buf, int len)
  6090. {
  6091. int ret = -1;
  6092. uint8 action;
  6093. if (dhd->bus->is_linkdown) {
  6094. DHD_ERROR_RLMT(("%s : PCIe link is down. we have nothing to do\n", __FUNCTION__));
  6095. goto done;
  6096. }
  6097. if (dhd_query_bus_erros(dhd)) {
  6098. DHD_ERROR_RLMT(("%s : some BUS error. we have nothing to do\n", __FUNCTION__));
  6099. goto done;
  6100. }
  6101. if ((dhd->busstate == DHD_BUS_DOWN) || dhd->hang_was_sent) {
  6102. DHD_ERROR_RLMT(("%s : bus is down. we have nothing to do -"
  6103. " bus state: %d, sent hang: %d\n", __FUNCTION__,
  6104. dhd->busstate, dhd->hang_was_sent));
  6105. goto done;
  6106. }
  6107. if (dhd->busstate == DHD_BUS_SUSPEND) {
  6108. DHD_ERROR(("%s : bus is suspended\n", __FUNCTION__));
  6109. goto done;
  6110. }
  6111. DHD_TRACE(("%s: Enter\n", __FUNCTION__));
  6112. if (ioc->cmd == WLC_SET_PM) {
  6113. #ifdef DHD_PM_CONTROL_FROM_FILE
  6114. if (g_pm_control == TRUE) {
  6115. DHD_ERROR(("%s: SET PM ignored!(Requested:%d)\n",
  6116. __FUNCTION__, buf ? *(char *)buf : 0));
  6117. goto done;
  6118. }
  6119. #endif /* DHD_PM_CONTROL_FROM_FILE */
  6120. DHD_TRACE_HW4(("%s: SET PM to %d\n", __FUNCTION__, buf ? *(char *)buf : 0));
  6121. }
  6122. ASSERT(len <= WLC_IOCTL_MAXLEN);
  6123. if (len > WLC_IOCTL_MAXLEN)
  6124. goto done;
  6125. action = ioc->set;
  6126. dhd_prot_wlioctl_intercept(dhd, ioc, buf);
  6127. if (action & WL_IOCTL_ACTION_SET) {
  6128. ret = dhd_msgbuf_set_ioctl(dhd, ifidx, ioc->cmd, buf, len, action);
  6129. } else {
  6130. ret = dhd_msgbuf_query_ioctl(dhd, ifidx, ioc->cmd, buf, len, action);
  6131. if (ret > 0)
  6132. ioc->used = ret;
  6133. }
  6134. /* Too many programs assume ioctl() returns 0 on success */
  6135. if (ret >= 0) {
  6136. ret = 0;
  6137. } else {
  6138. DHD_INFO(("%s: status ret value is %d \n", __FUNCTION__, ret));
  6139. dhd->dongle_error = ret;
  6140. }
  6141. dhd_prot_wl_ioctl_ret_intercept(dhd, ioc, buf, ifidx, ret, len);
  6142. done:
  6143. return ret;
  6144. } /* dhd_prot_ioctl */
  6145. /** test / loopback */
  6146. int
  6147. dhdmsgbuf_lpbk_req(dhd_pub_t *dhd, uint len)
  6148. {
  6149. unsigned long flags;
  6150. dhd_prot_t *prot = dhd->prot;
  6151. uint16 alloced = 0;
  6152. ioct_reqst_hdr_t *ioct_rqst;
  6153. uint16 hdrlen = sizeof(ioct_reqst_hdr_t);
  6154. uint16 msglen = len + hdrlen;
  6155. msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn;
  6156. msglen = ALIGN_SIZE(msglen, DMA_ALIGN_LEN);
  6157. msglen = LIMIT_TO_MAX(msglen, MSGBUF_MAX_MSG_SIZE);
  6158. DHD_RING_LOCK(ring->ring_lock, flags);
  6159. ioct_rqst = (ioct_reqst_hdr_t *)
  6160. dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE);
  6161. if (ioct_rqst == NULL) {
  6162. DHD_RING_UNLOCK(ring->ring_lock, flags);
  6163. return 0;
  6164. }
  6165. {
  6166. uint8 *ptr;
  6167. uint16 i;
  6168. ptr = (uint8 *)ioct_rqst;
  6169. for (i = 0; i < msglen; i++) {
  6170. ptr[i] = i % 256;
  6171. }
  6172. }
  6173. /* Common msg buf hdr */
  6174. ioct_rqst->msg.epoch = ring->seqnum % H2D_EPOCH_MODULO;
  6175. ring->seqnum++;
  6176. ioct_rqst->msg.msg_type = MSG_TYPE_LOOPBACK;
  6177. ioct_rqst->msg.if_id = 0;
  6178. ioct_rqst->msg.flags = ring->current_phase;
  6179. bcm_print_bytes("LPBK REQ: ", (uint8 *)ioct_rqst, msglen);
  6180. /* update ring's WR index and ring doorbell to dongle */
  6181. dhd_prot_ring_write_complete(dhd, ring, ioct_rqst, 1);
  6182. DHD_RING_UNLOCK(ring->ring_lock, flags);
  6183. return 0;
  6184. }
  6185. /** test / loopback */
  6186. void dmaxfer_free_dmaaddr(dhd_pub_t *dhd, dhd_dmaxfer_t *dmaxfer)
  6187. {
  6188. if (dmaxfer == NULL)
  6189. return;
  6190. dhd_dma_buf_free(dhd, &dmaxfer->srcmem);
  6191. dhd_dma_buf_free(dhd, &dmaxfer->dstmem);
  6192. }
  6193. /** test / loopback */
  6194. int
  6195. dhd_prepare_schedule_dmaxfer_free(dhd_pub_t *dhdp)
  6196. {
  6197. dhd_prot_t *prot = dhdp->prot;
  6198. dhd_dmaxfer_t *dmaxfer = &prot->dmaxfer;
  6199. dmaxref_mem_map_t *dmap = NULL;
  6200. dmap = MALLOCZ(dhdp->osh, sizeof(dmaxref_mem_map_t));
  6201. if (!dmap) {
  6202. DHD_ERROR(("%s: dmap alloc failed\n", __FUNCTION__));
  6203. goto mem_alloc_fail;
  6204. }
  6205. dmap->srcmem = &(dmaxfer->srcmem);
  6206. dmap->dstmem = &(dmaxfer->dstmem);
  6207. DMAXFER_FREE(dhdp, dmap);
  6208. return BCME_OK;
  6209. mem_alloc_fail:
  6210. if (dmap) {
  6211. MFREE(dhdp->osh, dmap, sizeof(dmaxref_mem_map_t));
  6212. dmap = NULL;
  6213. }
  6214. return BCME_NOMEM;
  6215. } /* dhd_prepare_schedule_dmaxfer_free */
  6216. /** test / loopback */
  6217. void
  6218. dmaxfer_free_prev_dmaaddr(dhd_pub_t *dhdp, dmaxref_mem_map_t *dmmap)
  6219. {
  6220. dhd_dma_buf_free(dhdp, dmmap->srcmem);
  6221. dhd_dma_buf_free(dhdp, dmmap->dstmem);
  6222. MFREE(dhdp->osh, dmmap, sizeof(dmaxref_mem_map_t));
  6223. dhdp->bus->dmaxfer_complete = TRUE;
  6224. dhd_os_dmaxfer_wake(dhdp);
  6225. dmmap = NULL;
  6226. } /* dmaxfer_free_prev_dmaaddr */
  6227. /** test / loopback */
  6228. int dmaxfer_prepare_dmaaddr(dhd_pub_t *dhd, uint len,
  6229. uint srcdelay, uint destdelay, dhd_dmaxfer_t *dmaxfer)
  6230. {
  6231. uint i = 0, j = 0;
  6232. if (!dmaxfer)
  6233. return BCME_ERROR;
  6234. /* First free up existing buffers */
  6235. dmaxfer_free_dmaaddr(dhd, dmaxfer);
  6236. if (dhd_dma_buf_alloc(dhd, &dmaxfer->srcmem, len)) {
  6237. return BCME_NOMEM;
  6238. }
  6239. if (dhd_dma_buf_alloc(dhd, &dmaxfer->dstmem, len + 8)) {
  6240. dhd_dma_buf_free(dhd, &dmaxfer->srcmem);
  6241. return BCME_NOMEM;
  6242. }
  6243. dmaxfer->len = len;
  6244. /* Populate source with a pattern like below
  6245. * 0x00000000
  6246. * 0x01010101
  6247. * 0x02020202
  6248. * 0x03030303
  6249. * 0x04040404
  6250. * 0x05050505
  6251. * ...
  6252. * 0xFFFFFFFF
  6253. */
  6254. while (i < dmaxfer->len) {
  6255. ((uint8*)dmaxfer->srcmem.va)[i] = j % 256;
  6256. i++;
  6257. if (i % 4 == 0) {
  6258. j++;
  6259. }
  6260. }
  6261. OSL_CACHE_FLUSH(dmaxfer->srcmem.va, dmaxfer->len);
  6262. dmaxfer->srcdelay = srcdelay;
  6263. dmaxfer->destdelay = destdelay;
  6264. return BCME_OK;
  6265. } /* dmaxfer_prepare_dmaaddr */
  6266. static void
  6267. dhd_msgbuf_dmaxfer_process(dhd_pub_t *dhd, void *msg)
  6268. {
  6269. dhd_prot_t *prot = dhd->prot;
  6270. uint64 end_usec;
  6271. pcie_dmaxfer_cmplt_t *cmplt = (pcie_dmaxfer_cmplt_t *)msg;
  6272. int buf_free_scheduled;
  6273. BCM_REFERENCE(cmplt);
  6274. end_usec = OSL_SYSUPTIME_US();
  6275. DHD_ERROR(("DMA loopback status: %d\n", cmplt->compl_hdr.status));
  6276. prot->dmaxfer.status = cmplt->compl_hdr.status;
  6277. OSL_CACHE_INV(prot->dmaxfer.dstmem.va, prot->dmaxfer.len);
  6278. if (prot->dmaxfer.srcmem.va && prot->dmaxfer.dstmem.va) {
  6279. if (memcmp(prot->dmaxfer.srcmem.va,
  6280. prot->dmaxfer.dstmem.va, prot->dmaxfer.len) ||
  6281. cmplt->compl_hdr.status != BCME_OK) {
  6282. DHD_ERROR(("DMA loopback failed\n"));
  6283. /* it is observed that some times the completion
  6284. * header status is set as OK, but the memcmp fails
  6285. * hence always explicitly set the dmaxfer status
  6286. * as error if this happens.
  6287. */
  6288. prot->dmaxfer.status = BCME_ERROR;
  6289. prhex("XFER SRC: ",
  6290. prot->dmaxfer.srcmem.va, prot->dmaxfer.len);
  6291. prhex("XFER DST: ",
  6292. prot->dmaxfer.dstmem.va, prot->dmaxfer.len);
  6293. }
  6294. else {
  6295. switch (prot->dmaxfer.d11_lpbk) {
  6296. case M2M_DMA_LPBK: {
  6297. DHD_ERROR(("DMA successful pcie m2m DMA loopback\n"));
  6298. } break;
  6299. case D11_LPBK: {
  6300. DHD_ERROR(("DMA successful with d11 loopback\n"));
  6301. } break;
  6302. case BMC_LPBK: {
  6303. DHD_ERROR(("DMA successful with bmc loopback\n"));
  6304. } break;
  6305. case M2M_NON_DMA_LPBK: {
  6306. DHD_ERROR(("DMA successful pcie m2m NON DMA loopback\n"));
  6307. } break;
  6308. case D11_HOST_MEM_LPBK: {
  6309. DHD_ERROR(("DMA successful d11 host mem loopback\n"));
  6310. } break;
  6311. case BMC_HOST_MEM_LPBK: {
  6312. DHD_ERROR(("DMA successful bmc host mem loopback\n"));
  6313. } break;
  6314. default: {
  6315. DHD_ERROR(("Invalid loopback option\n"));
  6316. } break;
  6317. }
  6318. if (DHD_LPBKDTDUMP_ON()) {
  6319. /* debug info print of the Tx and Rx buffers */
  6320. dhd_prhex("XFER SRC: ", prot->dmaxfer.srcmem.va,
  6321. prot->dmaxfer.len, DHD_INFO_VAL);
  6322. dhd_prhex("XFER DST: ", prot->dmaxfer.dstmem.va,
  6323. prot->dmaxfer.len, DHD_INFO_VAL);
  6324. }
  6325. }
  6326. }
  6327. buf_free_scheduled = dhd_prepare_schedule_dmaxfer_free(dhd);
  6328. end_usec -= prot->dmaxfer.start_usec;
  6329. if (end_usec) {
  6330. prot->dmaxfer.time_taken = end_usec;
  6331. DHD_ERROR(("DMA loopback %d bytes in %lu usec, %u kBps\n",
  6332. prot->dmaxfer.len, (unsigned long)end_usec,
  6333. (prot->dmaxfer.len * (1000 * 1000 / 1024) / (uint32)end_usec)));
  6334. }
  6335. dhd->prot->dmaxfer.in_progress = FALSE;
  6336. if (buf_free_scheduled != BCME_OK) {
  6337. dhd->bus->dmaxfer_complete = TRUE;
  6338. dhd_os_dmaxfer_wake(dhd);
  6339. }
  6340. }
  6341. /** Test functionality.
  6342. * Transfers bytes from host to dongle and to host again using DMA
  6343. * This function is not reentrant, as prot->dmaxfer.in_progress is not protected
  6344. * by a spinlock.
  6345. */
  6346. int
  6347. dhdmsgbuf_dmaxfer_req(dhd_pub_t *dhd, uint len, uint srcdelay, uint destdelay,
  6348. uint d11_lpbk, uint core_num)
  6349. {
  6350. unsigned long flags;
  6351. int ret = BCME_OK;
  6352. dhd_prot_t *prot = dhd->prot;
  6353. pcie_dma_xfer_params_t *dmap;
  6354. uint32 xferlen = LIMIT_TO_MAX(len, DMA_XFER_LEN_LIMIT);
  6355. uint16 alloced = 0;
  6356. msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn;
  6357. if (prot->dmaxfer.in_progress) {
  6358. DHD_ERROR(("DMA is in progress...\n"));
  6359. return BCME_ERROR;
  6360. }
  6361. if (d11_lpbk >= MAX_LPBK) {
  6362. DHD_ERROR(("loopback mode should be either"
  6363. " 0-PCIE_M2M_DMA, 1-D11, 2-BMC or 3-PCIE_M2M_NonDMA\n"));
  6364. return BCME_ERROR;
  6365. }
  6366. DHD_RING_LOCK(ring->ring_lock, flags);
  6367. prot->dmaxfer.in_progress = TRUE;
  6368. if ((ret = dmaxfer_prepare_dmaaddr(dhd, xferlen, srcdelay, destdelay,
  6369. &prot->dmaxfer)) != BCME_OK) {
  6370. prot->dmaxfer.in_progress = FALSE;
  6371. DHD_RING_UNLOCK(ring->ring_lock, flags);
  6372. return ret;
  6373. }
  6374. dmap = (pcie_dma_xfer_params_t *)
  6375. dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE);
  6376. if (dmap == NULL) {
  6377. dmaxfer_free_dmaaddr(dhd, &prot->dmaxfer);
  6378. prot->dmaxfer.in_progress = FALSE;
  6379. DHD_RING_UNLOCK(ring->ring_lock, flags);
  6380. return BCME_NOMEM;
  6381. }
  6382. /* Common msg buf hdr */
  6383. dmap->cmn_hdr.msg_type = MSG_TYPE_LPBK_DMAXFER;
  6384. dmap->cmn_hdr.request_id = htol32(DHD_FAKE_PKTID);
  6385. dmap->cmn_hdr.epoch = ring->seqnum % H2D_EPOCH_MODULO;
  6386. dmap->cmn_hdr.flags = ring->current_phase;
  6387. ring->seqnum++;
  6388. dmap->host_input_buf_addr.high = htol32(PHYSADDRHI(prot->dmaxfer.srcmem.pa));
  6389. dmap->host_input_buf_addr.low = htol32(PHYSADDRLO(prot->dmaxfer.srcmem.pa));
  6390. dmap->host_ouput_buf_addr.high = htol32(PHYSADDRHI(prot->dmaxfer.dstmem.pa));
  6391. dmap->host_ouput_buf_addr.low = htol32(PHYSADDRLO(prot->dmaxfer.dstmem.pa));
  6392. dmap->xfer_len = htol32(prot->dmaxfer.len);
  6393. dmap->srcdelay = htol32(prot->dmaxfer.srcdelay);
  6394. dmap->destdelay = htol32(prot->dmaxfer.destdelay);
  6395. prot->dmaxfer.d11_lpbk = d11_lpbk;
  6396. dmap->flags = (((core_num & PCIE_DMA_XFER_FLG_CORE_NUMBER_MASK)
  6397. << PCIE_DMA_XFER_FLG_CORE_NUMBER_SHIFT) |
  6398. ((prot->dmaxfer.d11_lpbk & PCIE_DMA_XFER_FLG_D11_LPBK_MASK)
  6399. << PCIE_DMA_XFER_FLG_D11_LPBK_SHIFT));
  6400. prot->dmaxfer.start_usec = OSL_SYSUPTIME_US();
  6401. /* update ring's WR index and ring doorbell to dongle */
  6402. dhd_prot_ring_write_complete(dhd, ring, dmap, 1);
  6403. DHD_RING_UNLOCK(ring->ring_lock, flags);
  6404. DHD_ERROR(("DMA loopback Started...\n"));
  6405. return BCME_OK;
  6406. } /* dhdmsgbuf_dmaxfer_req */
  6407. int
  6408. dhdmsgbuf_dmaxfer_status(dhd_pub_t *dhd, dma_xfer_info_t *result)
  6409. {
  6410. dhd_prot_t *prot = dhd->prot;
  6411. if (prot->dmaxfer.in_progress)
  6412. result->status = DMA_XFER_IN_PROGRESS;
  6413. else if (prot->dmaxfer.status == 0)
  6414. result->status = DMA_XFER_SUCCESS;
  6415. else
  6416. result->status = DMA_XFER_FAILED;
  6417. result->type = prot->dmaxfer.d11_lpbk;
  6418. result->error_code = prot->dmaxfer.status;
  6419. result->num_bytes = prot->dmaxfer.len;
  6420. result->time_taken = prot->dmaxfer.time_taken;
  6421. if (prot->dmaxfer.time_taken) {
  6422. /* throughput in kBps */
  6423. result->tput =
  6424. (prot->dmaxfer.len * (1000 * 1000 / 1024)) /
  6425. (uint32)prot->dmaxfer.time_taken;
  6426. }
  6427. return BCME_OK;
  6428. }
  6429. /** Called in the process of submitting an ioctl to the dongle */
  6430. static int
  6431. dhd_msgbuf_query_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd, void *buf, uint len, uint8 action)
  6432. {
  6433. int ret = 0;
  6434. uint copylen = 0;
  6435. DHD_TRACE(("%s: Enter\n", __FUNCTION__));
  6436. if (dhd->bus->is_linkdown) {
  6437. DHD_ERROR(("%s : PCIe link is down. we have nothing to do\n",
  6438. __FUNCTION__));
  6439. return -EIO;
  6440. }
  6441. if (dhd->busstate == DHD_BUS_DOWN) {
  6442. DHD_ERROR(("%s : bus is down. we have nothing to do\n", __FUNCTION__));
  6443. return -EIO;
  6444. }
  6445. /* don't talk to the dongle if fw is about to be reloaded */
  6446. if (dhd->hang_was_sent) {
  6447. DHD_ERROR(("%s: HANG was sent up earlier. Not talking to the chip\n",
  6448. __FUNCTION__));
  6449. return -EIO;
  6450. }
  6451. if (cmd == WLC_GET_VAR && buf)
  6452. {
  6453. if (!len || !*(uint8 *)buf) {
  6454. DHD_ERROR(("%s(): Zero length bailing\n", __FUNCTION__));
  6455. ret = BCME_BADARG;
  6456. goto done;
  6457. }
  6458. /* Respond "bcmerror" and "bcmerrorstr" with local cache */
  6459. copylen = MIN(len, BCME_STRLEN);
  6460. if ((len >= strlen("bcmerrorstr")) &&
  6461. (!strcmp((char *)buf, "bcmerrorstr"))) {
  6462. strncpy((char *)buf, bcmerrorstr(dhd->dongle_error), copylen);
  6463. *(uint8 *)((uint8 *)buf + (copylen - 1)) = '\0';
  6464. goto done;
  6465. } else if ((len >= strlen("bcmerror")) &&
  6466. !strcmp((char *)buf, "bcmerror")) {
  6467. *(uint32 *)(uint32 *)buf = dhd->dongle_error;
  6468. goto done;
  6469. }
  6470. }
  6471. DHD_CTL(("query_ioctl: ACTION %d ifdix %d cmd %d len %d \n",
  6472. action, ifidx, cmd, len));
  6473. ret = dhd_fillup_ioct_reqst(dhd, (uint16)len, cmd, buf, ifidx);
  6474. if (ret < 0) {
  6475. DHD_ERROR(("%s(): dhd_fillup_ioct_reqst failed \r\n", __FUNCTION__));
  6476. goto done;
  6477. }
  6478. /* wait for IOCTL completion message from dongle and get first fragment */
  6479. ret = dhd_msgbuf_wait_ioctl_cmplt(dhd, len, buf);
  6480. done:
  6481. return ret;
  6482. }
  6483. void
  6484. dhd_msgbuf_iovar_timeout_dump(dhd_pub_t *dhd)
  6485. {
  6486. uint32 intstatus;
  6487. dhd_prot_t *prot = dhd->prot;
  6488. dhd->rxcnt_timeout++;
  6489. dhd->rx_ctlerrs++;
  6490. dhd->iovar_timeout_occured = TRUE;
  6491. DHD_ERROR(("%s: resumed on timeout rxcnt_timeout%s %d ioctl_cmd %d "
  6492. "trans_id %d state %d busstate=%d ioctl_received=%d\n", __FUNCTION__,
  6493. dhd->is_sched_error ? " due to scheduling problem" : "",
  6494. dhd->rxcnt_timeout, prot->curr_ioctl_cmd, prot->ioctl_trans_id,
  6495. prot->ioctl_state, dhd->busstate, prot->ioctl_received));
  6496. #if defined(DHD_KERNEL_SCHED_DEBUG) && defined(DHD_FW_COREDUMP)
  6497. if (dhd->is_sched_error && dhd->memdump_enabled == DUMP_MEMFILE_BUGON) {
  6498. /* change g_assert_type to trigger Kernel panic */
  6499. g_assert_type = 2;
  6500. /* use ASSERT() to trigger panic */
  6501. ASSERT(0);
  6502. }
  6503. #endif /* DHD_KERNEL_SCHED_DEBUG && DHD_FW_COREDUMP */
  6504. if (prot->curr_ioctl_cmd == WLC_SET_VAR ||
  6505. prot->curr_ioctl_cmd == WLC_GET_VAR) {
  6506. char iovbuf[32];
  6507. int i;
  6508. int dump_size = 128;
  6509. uint8 *ioctl_buf = (uint8 *)prot->ioctbuf.va;
  6510. memset(iovbuf, 0, sizeof(iovbuf));
  6511. strncpy(iovbuf, ioctl_buf, sizeof(iovbuf) - 1);
  6512. iovbuf[sizeof(iovbuf) - 1] = '\0';
  6513. DHD_ERROR(("Current IOVAR (%s): %s\n",
  6514. prot->curr_ioctl_cmd == WLC_SET_VAR ?
  6515. "WLC_SET_VAR" : "WLC_GET_VAR", iovbuf));
  6516. DHD_ERROR(("========== START IOCTL REQBUF DUMP ==========\n"));
  6517. for (i = 0; i < dump_size; i++) {
  6518. DHD_ERROR(("%02X ", ioctl_buf[i]));
  6519. if ((i % 32) == 31) {
  6520. DHD_ERROR(("\n"));
  6521. }
  6522. }
  6523. DHD_ERROR(("\n========== END IOCTL REQBUF DUMP ==========\n"));
  6524. }
  6525. /* Check the PCIe link status by reading intstatus register */
  6526. intstatus = si_corereg(dhd->bus->sih,
  6527. dhd->bus->sih->buscoreidx, dhd->bus->pcie_mailbox_int, 0, 0);
  6528. if (intstatus == (uint32)-1) {
  6529. DHD_ERROR(("%s : PCIe link might be down\n", __FUNCTION__));
  6530. dhd->bus->is_linkdown = TRUE;
  6531. }
  6532. dhd_bus_dump_console_buffer(dhd->bus);
  6533. dhd_prot_debug_info_print(dhd);
  6534. }
  6535. /**
  6536. * Waits for IOCTL completion message from the dongle, copies this into caller
  6537. * provided parameter 'buf'.
  6538. */
  6539. static int
  6540. dhd_msgbuf_wait_ioctl_cmplt(dhd_pub_t *dhd, uint32 len, void *buf)
  6541. {
  6542. dhd_prot_t *prot = dhd->prot;
  6543. int timeleft;
  6544. unsigned long flags;
  6545. int ret = 0;
  6546. DHD_TRACE(("%s: Enter\n", __FUNCTION__));
  6547. if (dhd_query_bus_erros(dhd)) {
  6548. ret = -EIO;
  6549. goto out;
  6550. }
  6551. timeleft = dhd_os_ioctl_resp_wait(dhd, (uint *)&prot->ioctl_received);
  6552. #ifdef DHD_RECOVER_TIMEOUT
  6553. if (prot->ioctl_received == 0) {
  6554. uint32 intstatus = si_corereg(dhd->bus->sih,
  6555. dhd->bus->sih->buscoreidx, dhd->bus->pcie_mailbox_int, 0, 0);
  6556. int host_irq_disbled = dhdpcie_irq_disabled(dhd->bus);
  6557. if ((intstatus) && (intstatus != (uint32)-1) &&
  6558. (timeleft == 0) && (!dhd_query_bus_erros(dhd))) {
  6559. DHD_ERROR(("%s: iovar timeout trying again intstatus=%x"
  6560. " host_irq_disabled=%d\n",
  6561. __FUNCTION__, intstatus, host_irq_disbled));
  6562. dhd_pcie_intr_count_dump(dhd);
  6563. dhd_print_tasklet_status(dhd);
  6564. dhd_prot_process_ctrlbuf(dhd);
  6565. timeleft = dhd_os_ioctl_resp_wait(dhd, (uint *)&prot->ioctl_received);
  6566. /* Clear Interrupts */
  6567. dhdpcie_bus_clear_intstatus(dhd->bus);
  6568. }
  6569. }
  6570. #endif /* DHD_RECOVER_TIMEOUT */
  6571. if (timeleft == 0 && (!dhd_query_bus_erros(dhd))) {
  6572. /* check if resumed on time out related to scheduling issue */
  6573. dhd->is_sched_error = FALSE;
  6574. if (dhd->bus->isr_entry_time > prot->ioctl_fillup_time) {
  6575. dhd->is_sched_error = dhd_bus_query_dpc_sched_errors(dhd);
  6576. }
  6577. dhd_msgbuf_iovar_timeout_dump(dhd);
  6578. #ifdef DHD_FW_COREDUMP
  6579. /* Collect socram dump */
  6580. if (dhd->memdump_enabled) {
  6581. /* collect core dump */
  6582. dhd->memdump_type = DUMP_TYPE_RESUMED_ON_TIMEOUT;
  6583. dhd_bus_mem_dump(dhd);
  6584. }
  6585. #endif /* DHD_FW_COREDUMP */
  6586. #ifdef SUPPORT_LINKDOWN_RECOVERY
  6587. #ifdef CONFIG_ARCH_MSM
  6588. dhd->bus->no_cfg_restore = 1;
  6589. #endif /* CONFIG_ARCH_MSM */
  6590. #endif /* SUPPORT_LINKDOWN_RECOVERY */
  6591. ret = -ETIMEDOUT;
  6592. goto out;
  6593. } else {
  6594. if (prot->ioctl_received != IOCTL_RETURN_ON_SUCCESS) {
  6595. DHD_ERROR(("%s: IOCTL failure due to ioctl_received = %d\n",
  6596. __FUNCTION__, prot->ioctl_received));
  6597. ret = -EINVAL;
  6598. goto out;
  6599. }
  6600. dhd->rxcnt_timeout = 0;
  6601. dhd->rx_ctlpkts++;
  6602. DHD_CTL(("%s: ioctl resp resumed, got %d\n",
  6603. __FUNCTION__, prot->ioctl_resplen));
  6604. }
  6605. if (dhd->prot->ioctl_resplen > len)
  6606. dhd->prot->ioctl_resplen = (uint16)len;
  6607. if (buf)
  6608. bcopy(dhd->prot->retbuf.va, buf, dhd->prot->ioctl_resplen);
  6609. ret = (int)(dhd->prot->ioctl_status);
  6610. out:
  6611. DHD_GENERAL_LOCK(dhd, flags);
  6612. dhd->prot->ioctl_state = 0;
  6613. dhd->prot->ioctl_resplen = 0;
  6614. dhd->prot->ioctl_received = IOCTL_WAIT;
  6615. dhd->prot->curr_ioctl_cmd = 0;
  6616. DHD_GENERAL_UNLOCK(dhd, flags);
  6617. return ret;
  6618. } /* dhd_msgbuf_wait_ioctl_cmplt */
  6619. static int
  6620. dhd_msgbuf_set_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd, void *buf, uint len, uint8 action)
  6621. {
  6622. int ret = 0;
  6623. DHD_TRACE(("%s: Enter \n", __FUNCTION__));
  6624. if (dhd->bus->is_linkdown) {
  6625. DHD_ERROR(("%s : PCIe link is down. we have nothing to do\n",
  6626. __FUNCTION__));
  6627. return -EIO;
  6628. }
  6629. if (dhd->busstate == DHD_BUS_DOWN) {
  6630. DHD_ERROR(("%s : bus is down. we have nothing to do\n", __FUNCTION__));
  6631. return -EIO;
  6632. }
  6633. /* don't talk to the dongle if fw is about to be reloaded */
  6634. if (dhd->hang_was_sent) {
  6635. DHD_ERROR(("%s: HANG was sent up earlier. Not talking to the chip\n",
  6636. __FUNCTION__));
  6637. return -EIO;
  6638. }
  6639. DHD_CTL(("ACTION %d ifdix %d cmd %d len %d \n",
  6640. action, ifidx, cmd, len));
  6641. /* Fill up msgbuf for ioctl req */
  6642. ret = dhd_fillup_ioct_reqst(dhd, (uint16)len, cmd, buf, ifidx);
  6643. if (ret < 0) {
  6644. DHD_ERROR(("%s(): dhd_fillup_ioct_reqst failed \r\n", __FUNCTION__));
  6645. goto done;
  6646. }
  6647. ret = dhd_msgbuf_wait_ioctl_cmplt(dhd, len, buf);
  6648. done:
  6649. return ret;
  6650. }
  6651. /** Called by upper DHD layer. Handles a protocol control response asynchronously. */
  6652. int dhd_prot_ctl_complete(dhd_pub_t *dhd)
  6653. {
  6654. return 0;
  6655. }
  6656. /** Called by upper DHD layer. Check for and handle local prot-specific iovar commands */
  6657. int dhd_prot_iovar_op(dhd_pub_t *dhd, const char *name,
  6658. void *params, int plen, void *arg, int len, bool set)
  6659. {
  6660. return BCME_UNSUPPORTED;
  6661. }
  6662. #ifdef DHD_DUMP_PCIE_RINGS
  6663. int dhd_d2h_h2d_ring_dump(dhd_pub_t *dhd, void *file, const void *user_buf,
  6664. unsigned long *file_posn, bool file_write)
  6665. {
  6666. dhd_prot_t *prot;
  6667. msgbuf_ring_t *ring;
  6668. int ret = 0;
  6669. uint16 h2d_flowrings_total;
  6670. uint16 flowid;
  6671. if (!(dhd) || !(dhd->prot)) {
  6672. goto exit;
  6673. }
  6674. prot = dhd->prot;
  6675. /* Below is the same ring dump sequence followed in parser as well. */
  6676. ring = &prot->h2dring_ctrl_subn;
  6677. if ((ret = dhd_ring_write(dhd, ring, file, user_buf, file_posn)) < 0)
  6678. goto exit;
  6679. ring = &prot->h2dring_rxp_subn;
  6680. if ((ret = dhd_ring_write(dhd, ring, file, user_buf, file_posn)) < 0)
  6681. goto exit;
  6682. ring = &prot->d2hring_ctrl_cpln;
  6683. if ((ret = dhd_ring_write(dhd, ring, file, user_buf, file_posn)) < 0)
  6684. goto exit;
  6685. ring = &prot->d2hring_tx_cpln;
  6686. if ((ret = dhd_ring_write(dhd, ring, file, user_buf, file_posn)) < 0)
  6687. goto exit;
  6688. ring = &prot->d2hring_rx_cpln;
  6689. if ((ret = dhd_ring_write(dhd, ring, file, user_buf, file_posn)) < 0)
  6690. goto exit;
  6691. h2d_flowrings_total = dhd_get_max_flow_rings(dhd);
  6692. FOREACH_RING_IN_FLOWRINGS_POOL(prot, ring, flowid, h2d_flowrings_total) {
  6693. if ((ret = dhd_ring_write(dhd, ring, file, user_buf, file_posn)) < 0) {
  6694. goto exit;
  6695. }
  6696. }
  6697. #ifdef EWP_EDL
  6698. if (dhd->dongle_edl_support) {
  6699. ring = prot->d2hring_edl;
  6700. if ((ret = dhd_edl_ring_hdr_write(dhd, ring, file, user_buf, file_posn)) < 0)
  6701. goto exit;
  6702. }
  6703. else if (dhd->bus->api.fw_rev >= PCIE_SHARED_VERSION_6 && !dhd->dongle_edl_support)
  6704. #else
  6705. if (dhd->bus->api.fw_rev >= PCIE_SHARED_VERSION_6)
  6706. #endif /* EWP_EDL */
  6707. {
  6708. ring = prot->h2dring_info_subn;
  6709. if ((ret = dhd_ring_write(dhd, ring, file, user_buf, file_posn)) < 0)
  6710. goto exit;
  6711. ring = prot->d2hring_info_cpln;
  6712. if ((ret = dhd_ring_write(dhd, ring, file, user_buf, file_posn)) < 0)
  6713. goto exit;
  6714. }
  6715. exit :
  6716. return ret;
  6717. }
  6718. /* Write to file */
  6719. static
  6720. int dhd_ring_write(dhd_pub_t *dhd, msgbuf_ring_t *ring, void *file,
  6721. const void *user_buf, unsigned long *file_posn)
  6722. {
  6723. int ret = 0;
  6724. if (ring == NULL) {
  6725. DHD_ERROR(("%s: Ring not initialised, failed to dump ring contents\n",
  6726. __FUNCTION__));
  6727. return BCME_ERROR;
  6728. }
  6729. if (file) {
  6730. ret = dhd_os_write_file_posn(file, file_posn, (char *)(ring->dma_buf.va),
  6731. ((unsigned long)(ring->max_items) * (ring->item_len)));
  6732. if (ret < 0) {
  6733. DHD_ERROR(("%s: write file error !\n", __FUNCTION__));
  6734. ret = BCME_ERROR;
  6735. }
  6736. } else if (user_buf) {
  6737. ret = dhd_export_debug_data((char *)(ring->dma_buf.va), NULL, user_buf,
  6738. ((unsigned long)(ring->max_items) * (ring->item_len)), (int *)file_posn);
  6739. }
  6740. return ret;
  6741. }
  6742. #endif /* DHD_DUMP_PCIE_RINGS */
  6743. #ifdef EWP_EDL
  6744. /* Write to file */
  6745. static
  6746. int dhd_edl_ring_hdr_write(dhd_pub_t *dhd, msgbuf_ring_t *ring, void *file, const void *user_buf,
  6747. unsigned long *file_posn)
  6748. {
  6749. int ret = 0, nitems = 0;
  6750. char *buf = NULL, *ptr = NULL;
  6751. uint8 *msg_addr = NULL;
  6752. uint16 rd = 0;
  6753. if (ring == NULL) {
  6754. DHD_ERROR(("%s: Ring not initialised, failed to dump ring contents\n",
  6755. __FUNCTION__));
  6756. ret = BCME_ERROR;
  6757. goto done;
  6758. }
  6759. buf = MALLOCZ(dhd->osh, (D2HRING_EDL_MAX_ITEM * D2HRING_EDL_HDR_SIZE));
  6760. if (buf == NULL) {
  6761. DHD_ERROR(("%s: buffer allocation failed\n", __FUNCTION__));
  6762. ret = BCME_ERROR;
  6763. goto done;
  6764. }
  6765. ptr = buf;
  6766. for (; nitems < D2HRING_EDL_MAX_ITEM; nitems++, rd++) {
  6767. msg_addr = (uint8 *)ring->dma_buf.va + (rd * ring->item_len);
  6768. memcpy(ptr, (char *)msg_addr, D2HRING_EDL_HDR_SIZE);
  6769. ptr += D2HRING_EDL_HDR_SIZE;
  6770. }
  6771. if (file) {
  6772. ret = dhd_os_write_file_posn(file, file_posn, buf,
  6773. (D2HRING_EDL_HDR_SIZE * D2HRING_EDL_MAX_ITEM));
  6774. if (ret < 0) {
  6775. DHD_ERROR(("%s: write file error !\n", __FUNCTION__));
  6776. goto done;
  6777. }
  6778. }
  6779. else {
  6780. ret = dhd_export_debug_data(buf, NULL, user_buf,
  6781. (D2HRING_EDL_HDR_SIZE * D2HRING_EDL_MAX_ITEM), file_posn);
  6782. }
  6783. done:
  6784. if (buf) {
  6785. MFREE(dhd->osh, buf, (D2HRING_EDL_MAX_ITEM * D2HRING_EDL_HDR_SIZE));
  6786. }
  6787. return ret;
  6788. }
  6789. #endif /* EWP_EDL */
  6790. /** Add prot dump output to a buffer */
  6791. void dhd_prot_dump(dhd_pub_t *dhd, struct bcmstrbuf *b)
  6792. {
  6793. if (dhd->d2h_sync_mode & PCIE_SHARED_D2H_SYNC_SEQNUM)
  6794. bcm_bprintf(b, "\nd2h_sync: SEQNUM:");
  6795. else if (dhd->d2h_sync_mode & PCIE_SHARED_D2H_SYNC_XORCSUM)
  6796. bcm_bprintf(b, "\nd2h_sync: XORCSUM:");
  6797. else
  6798. bcm_bprintf(b, "\nd2h_sync: NONE:");
  6799. bcm_bprintf(b, " d2h_sync_wait max<%lu> tot<%lu>\n",
  6800. dhd->prot->d2h_sync_wait_max, dhd->prot->d2h_sync_wait_tot);
  6801. bcm_bprintf(b, "\nDongle DMA Indices: h2d %d d2h %d index size %d bytes\n",
  6802. dhd->dma_h2d_ring_upd_support,
  6803. dhd->dma_d2h_ring_upd_support,
  6804. dhd->prot->rw_index_sz);
  6805. bcm_bprintf(b, "h2d_max_txpost: %d, prot->h2d_max_txpost: %d\n",
  6806. h2d_max_txpost, dhd->prot->h2d_max_txpost);
  6807. bcm_bprintf(b, "pktid_txq_start_cnt: %d\n", dhd->prot->pktid_txq_start_cnt);
  6808. bcm_bprintf(b, "pktid_txq_stop_cnt: %d\n", dhd->prot->pktid_txq_stop_cnt);
  6809. bcm_bprintf(b, "pktid_depleted_cnt: %d\n", dhd->prot->pktid_depleted_cnt);
  6810. }
  6811. /* Update local copy of dongle statistics */
  6812. void dhd_prot_dstats(dhd_pub_t *dhd)
  6813. {
  6814. return;
  6815. }
  6816. /** Called by upper DHD layer */
  6817. int dhd_process_pkt_reorder_info(dhd_pub_t *dhd, uchar *reorder_info_buf,
  6818. uint reorder_info_len, void **pkt, uint32 *free_buf_count)
  6819. {
  6820. return 0;
  6821. }
  6822. /** Debug related, post a dummy message to interrupt dongle. Used to process cons commands. */
  6823. int
  6824. dhd_post_dummy_msg(dhd_pub_t *dhd)
  6825. {
  6826. unsigned long flags;
  6827. hostevent_hdr_t *hevent = NULL;
  6828. uint16 alloced = 0;
  6829. dhd_prot_t *prot = dhd->prot;
  6830. msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn;
  6831. DHD_RING_LOCK(ring->ring_lock, flags);
  6832. hevent = (hostevent_hdr_t *)
  6833. dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE);
  6834. if (hevent == NULL) {
  6835. DHD_RING_UNLOCK(ring->ring_lock, flags);
  6836. return -1;
  6837. }
  6838. /* CMN msg header */
  6839. hevent->msg.epoch = ring->seqnum % H2D_EPOCH_MODULO;
  6840. ring->seqnum++;
  6841. hevent->msg.msg_type = MSG_TYPE_HOST_EVNT;
  6842. hevent->msg.if_id = 0;
  6843. hevent->msg.flags = ring->current_phase;
  6844. /* Event payload */
  6845. hevent->evnt_pyld = htol32(HOST_EVENT_CONS_CMD);
  6846. /* Since, we are filling the data directly into the bufptr obtained
  6847. * from the msgbuf, we can directly call the write_complete
  6848. */
  6849. dhd_prot_ring_write_complete(dhd, ring, hevent, 1);
  6850. DHD_RING_UNLOCK(ring->ring_lock, flags);
  6851. return 0;
  6852. }
  6853. /**
  6854. * If exactly_nitems is true, this function will allocate space for nitems or fail
  6855. * If exactly_nitems is false, this function will allocate space for nitems or less
  6856. */
  6857. static void * BCMFASTPATH
  6858. dhd_prot_alloc_ring_space(dhd_pub_t *dhd, msgbuf_ring_t *ring,
  6859. uint16 nitems, uint16 * alloced, bool exactly_nitems)
  6860. {
  6861. void * ret_buf;
  6862. /* Alloc space for nitems in the ring */
  6863. ret_buf = dhd_prot_get_ring_space(ring, nitems, alloced, exactly_nitems);
  6864. if (ret_buf == NULL) {
  6865. /* HWA TODO, need to get RD pointer from different array
  6866. * which HWA will directly write into host memory
  6867. */
  6868. /* if alloc failed , invalidate cached read ptr */
  6869. if (dhd->dma_d2h_ring_upd_support) {
  6870. ring->rd = dhd_prot_dma_indx_get(dhd, H2D_DMA_INDX_RD_UPD, ring->idx);
  6871. } else {
  6872. dhd_bus_cmn_readshared(dhd->bus, &(ring->rd), RING_RD_UPD, ring->idx);
  6873. #ifdef SUPPORT_LINKDOWN_RECOVERY
  6874. /* Check if ring->rd is valid */
  6875. if (ring->rd >= ring->max_items) {
  6876. DHD_ERROR(("%s: Invalid rd idx=%d\n", ring->name, ring->rd));
  6877. dhd->bus->read_shm_fail = TRUE;
  6878. return NULL;
  6879. }
  6880. #endif /* SUPPORT_LINKDOWN_RECOVERY */
  6881. }
  6882. /* Try allocating once more */
  6883. ret_buf = dhd_prot_get_ring_space(ring, nitems, alloced, exactly_nitems);
  6884. if (ret_buf == NULL) {
  6885. DHD_INFO(("%s: Ring space not available \n", ring->name));
  6886. return NULL;
  6887. }
  6888. }
  6889. if (ret_buf == HOST_RING_BASE(ring)) {
  6890. DHD_INFO(("%s: setting the phase now\n", ring->name));
  6891. ring->current_phase = ring->current_phase ? 0 : BCMPCIE_CMNHDR_PHASE_BIT_INIT;
  6892. }
  6893. /* Return alloced space */
  6894. return ret_buf;
  6895. }
  6896. /**
  6897. * Non inline ioct request.
  6898. * Form a ioctl request first as per ioctptr_reqst_hdr_t header in the circular buffer
  6899. * Form a separate request buffer where a 4 byte cmn header is added in the front
  6900. * buf contents from parent function is copied to remaining section of this buffer
  6901. */
  6902. static int
  6903. dhd_fillup_ioct_reqst(dhd_pub_t *dhd, uint16 len, uint cmd, void* buf, int ifidx)
  6904. {
  6905. dhd_prot_t *prot = dhd->prot;
  6906. ioctl_req_msg_t *ioct_rqst;
  6907. void * ioct_buf; /* For ioctl payload */
  6908. uint16 rqstlen, resplen;
  6909. unsigned long flags;
  6910. uint16 alloced = 0;
  6911. msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn;
  6912. if (dhd_query_bus_erros(dhd)) {
  6913. return -EIO;
  6914. }
  6915. rqstlen = len;
  6916. resplen = len;
  6917. /* Limit ioct request to MSGBUF_MAX_MSG_SIZE bytes including hdrs */
  6918. /* 8K allocation of dongle buffer fails */
  6919. /* dhd doesnt give separate input & output buf lens */
  6920. /* so making the assumption that input length can never be more than 2k */
  6921. rqstlen = MIN(rqstlen, MSGBUF_IOCTL_MAX_RQSTLEN);
  6922. DHD_RING_LOCK(ring->ring_lock, flags);
  6923. if (prot->ioctl_state) {
  6924. DHD_ERROR(("%s: pending ioctl %02x\n", __FUNCTION__, prot->ioctl_state));
  6925. DHD_RING_UNLOCK(ring->ring_lock, flags);
  6926. return BCME_BUSY;
  6927. } else {
  6928. prot->ioctl_state = MSGBUF_IOCTL_ACK_PENDING | MSGBUF_IOCTL_RESP_PENDING;
  6929. }
  6930. /* Request for cbuf space */
  6931. ioct_rqst = (ioctl_req_msg_t*)
  6932. dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE);
  6933. if (ioct_rqst == NULL) {
  6934. DHD_ERROR(("couldn't allocate space on msgring to send ioctl request\n"));
  6935. prot->ioctl_state = 0;
  6936. prot->curr_ioctl_cmd = 0;
  6937. prot->ioctl_received = IOCTL_WAIT;
  6938. DHD_RING_UNLOCK(ring->ring_lock, flags);
  6939. return -1;
  6940. }
  6941. /* Common msg buf hdr */
  6942. ioct_rqst->cmn_hdr.msg_type = MSG_TYPE_IOCTLPTR_REQ;
  6943. ioct_rqst->cmn_hdr.if_id = (uint8)ifidx;
  6944. ioct_rqst->cmn_hdr.flags = ring->current_phase;
  6945. ioct_rqst->cmn_hdr.request_id = htol32(DHD_IOCTL_REQ_PKTID);
  6946. ioct_rqst->cmn_hdr.epoch = ring->seqnum % H2D_EPOCH_MODULO;
  6947. ring->seqnum++;
  6948. ioct_rqst->cmd = htol32(cmd);
  6949. prot->curr_ioctl_cmd = cmd;
  6950. ioct_rqst->output_buf_len = htol16(resplen);
  6951. prot->ioctl_trans_id++;
  6952. ioct_rqst->trans_id = prot->ioctl_trans_id;
  6953. /* populate ioctl buffer info */
  6954. ioct_rqst->input_buf_len = htol16(rqstlen);
  6955. ioct_rqst->host_input_buf_addr.high = htol32(PHYSADDRHI(prot->ioctbuf.pa));
  6956. ioct_rqst->host_input_buf_addr.low = htol32(PHYSADDRLO(prot->ioctbuf.pa));
  6957. /* copy ioct payload */
  6958. ioct_buf = (void *) prot->ioctbuf.va;
  6959. prot->ioctl_fillup_time = OSL_LOCALTIME_NS();
  6960. if (buf)
  6961. memcpy(ioct_buf, buf, len);
  6962. OSL_CACHE_FLUSH((void *) prot->ioctbuf.va, len);
  6963. if (!ISALIGNED(ioct_buf, DMA_ALIGN_LEN))
  6964. DHD_ERROR(("host ioct address unaligned !!!!! \n"));
  6965. DHD_CTL(("submitted IOCTL request request_id %d, cmd %d, output_buf_len %d, tx_id %d\n",
  6966. ioct_rqst->cmn_hdr.request_id, cmd, ioct_rqst->output_buf_len,
  6967. ioct_rqst->trans_id));
  6968. /* update ring's WR index and ring doorbell to dongle */
  6969. dhd_prot_ring_write_complete(dhd, ring, ioct_rqst, 1);
  6970. DHD_RING_UNLOCK(ring->ring_lock, flags);
  6971. return 0;
  6972. } /* dhd_fillup_ioct_reqst */
  6973. /**
  6974. * dhd_prot_ring_attach - Initialize the msgbuf_ring object and attach a
  6975. * DMA-able buffer to it. The ring is NOT tagged as inited until all the ring
  6976. * information is posted to the dongle.
  6977. *
  6978. * Invoked in dhd_prot_attach for the common rings, and in dhd_prot_init for
  6979. * each flowring in pool of flowrings.
  6980. *
  6981. * returns BCME_OK=0 on success
  6982. * returns non-zero negative error value on failure.
  6983. */
  6984. static int
  6985. dhd_prot_ring_attach(dhd_pub_t *dhd, msgbuf_ring_t *ring, const char *name,
  6986. uint16 max_items, uint16 item_len, uint16 ringid)
  6987. {
  6988. int dma_buf_alloced = BCME_NOMEM;
  6989. uint32 dma_buf_len = max_items * item_len;
  6990. dhd_prot_t *prot = dhd->prot;
  6991. uint16 max_flowrings = dhd->bus->max_tx_flowrings;
  6992. dhd_dma_buf_t *dma_buf = NULL;
  6993. ASSERT(ring);
  6994. ASSERT(name);
  6995. ASSERT((max_items < 0xFFFF) && (item_len < 0xFFFF) && (ringid < 0xFFFF));
  6996. /* Init name */
  6997. strncpy(ring->name, name, RING_NAME_MAX_LENGTH);
  6998. ring->name[RING_NAME_MAX_LENGTH - 1] = '\0';
  6999. ring->idx = ringid;
  7000. ring->max_items = max_items;
  7001. ring->item_len = item_len;
  7002. /* A contiguous space may be reserved for all flowrings */
  7003. if (DHD_IS_FLOWRING(ringid, max_flowrings) && (prot->flowrings_dma_buf.va)) {
  7004. /* Carve out from the contiguous DMA-able flowring buffer */
  7005. uint16 flowid;
  7006. uint32 base_offset;
  7007. dhd_dma_buf_t *rsv_buf = &prot->flowrings_dma_buf;
  7008. dma_buf = &ring->dma_buf;
  7009. flowid = DHD_RINGID_TO_FLOWID(ringid);
  7010. base_offset = (flowid - BCMPCIE_H2D_COMMON_MSGRINGS) * dma_buf_len;
  7011. ASSERT(base_offset + dma_buf_len <= rsv_buf->len);
  7012. dma_buf->len = dma_buf_len;
  7013. dma_buf->va = (void *)((uintptr)rsv_buf->va + base_offset);
  7014. PHYSADDRHISET(dma_buf->pa, PHYSADDRHI(rsv_buf->pa));
  7015. PHYSADDRLOSET(dma_buf->pa, PHYSADDRLO(rsv_buf->pa) + base_offset);
  7016. /* On 64bit, contiguous space may not span across 0x00000000FFFFFFFF */
  7017. ASSERT(PHYSADDRLO(dma_buf->pa) >= PHYSADDRLO(rsv_buf->pa));
  7018. dma_buf->dmah = rsv_buf->dmah;
  7019. dma_buf->secdma = rsv_buf->secdma;
  7020. (void)dhd_dma_buf_audit(dhd, &ring->dma_buf);
  7021. } else {
  7022. #ifdef EWP_EDL
  7023. if (ring == dhd->prot->d2hring_edl) {
  7024. /* For EDL ring, memory is alloced during attach,
  7025. * so just need to copy the dma_buf to the ring's dma_buf
  7026. */
  7027. memcpy(&ring->dma_buf, &dhd->edl_ring_mem, sizeof(ring->dma_buf));
  7028. dma_buf = &ring->dma_buf;
  7029. if (dma_buf->va == NULL) {
  7030. return BCME_NOMEM;
  7031. }
  7032. } else
  7033. #endif /* EWP_EDL */
  7034. {
  7035. /* Allocate a dhd_dma_buf */
  7036. dma_buf_alloced = dhd_dma_buf_alloc(dhd, &ring->dma_buf, dma_buf_len);
  7037. if (dma_buf_alloced != BCME_OK) {
  7038. return BCME_NOMEM;
  7039. }
  7040. }
  7041. }
  7042. /* CAUTION: Save ring::base_addr in little endian format! */
  7043. dhd_base_addr_htolpa(&ring->base_addr, ring->dma_buf.pa);
  7044. #ifdef BCM_SECURE_DMA
  7045. if (SECURE_DMA_ENAB(prot->osh)) {
  7046. ring->dma_buf.secdma = MALLOCZ(prot->osh, sizeof(sec_cma_info_t));
  7047. if (ring->dma_buf.secdma == NULL) {
  7048. goto free_dma_buf;
  7049. }
  7050. }
  7051. #endif /* BCM_SECURE_DMA */
  7052. ring->ring_lock = dhd_os_spin_lock_init(dhd->osh);
  7053. DHD_INFO(("RING_ATTACH : %s Max item %d len item %d total size %d "
  7054. "ring start %p buf phys addr %x:%x \n",
  7055. ring->name, ring->max_items, ring->item_len,
  7056. dma_buf_len, ring->dma_buf.va, ltoh32(ring->base_addr.high_addr),
  7057. ltoh32(ring->base_addr.low_addr)));
  7058. return BCME_OK;
  7059. #ifdef BCM_SECURE_DMA
  7060. free_dma_buf:
  7061. if (dma_buf_alloced == BCME_OK) {
  7062. dhd_dma_buf_free(dhd, &ring->dma_buf);
  7063. }
  7064. #endif /* BCM_SECURE_DMA */
  7065. return BCME_NOMEM;
  7066. } /* dhd_prot_ring_attach */
  7067. /**
  7068. * dhd_prot_ring_init - Post the common ring information to dongle.
  7069. *
  7070. * Used only for common rings.
  7071. *
  7072. * The flowrings information is passed via the create flowring control message
  7073. * (tx_flowring_create_request_t) sent over the H2D control submission common
  7074. * ring.
  7075. */
  7076. static void
  7077. dhd_prot_ring_init(dhd_pub_t *dhd, msgbuf_ring_t *ring)
  7078. {
  7079. ring->wr = 0;
  7080. ring->rd = 0;
  7081. ring->curr_rd = 0;
  7082. /* Reset hwa_db_type for all rings,
  7083. * for data path rings, it will be assigned separately post init
  7084. * from dhd_prot_d2h_sync_init and dhd_prot_h2d_sync_init
  7085. */
  7086. ring->hwa_db_type = 0;
  7087. /* CAUTION: ring::base_addr already in Little Endian */
  7088. dhd_bus_cmn_writeshared(dhd->bus, &ring->base_addr,
  7089. sizeof(sh_addr_t), RING_BUF_ADDR, ring->idx);
  7090. dhd_bus_cmn_writeshared(dhd->bus, &ring->max_items,
  7091. sizeof(uint16), RING_MAX_ITEMS, ring->idx);
  7092. dhd_bus_cmn_writeshared(dhd->bus, &ring->item_len,
  7093. sizeof(uint16), RING_ITEM_LEN, ring->idx);
  7094. dhd_bus_cmn_writeshared(dhd->bus, &(ring->wr),
  7095. sizeof(uint16), RING_WR_UPD, ring->idx);
  7096. dhd_bus_cmn_writeshared(dhd->bus, &(ring->rd),
  7097. sizeof(uint16), RING_RD_UPD, ring->idx);
  7098. /* ring inited */
  7099. ring->inited = TRUE;
  7100. } /* dhd_prot_ring_init */
  7101. /**
  7102. * dhd_prot_ring_reset - bzero a ring's DMA-ble buffer and cache flush
  7103. * Reset WR and RD indices to 0.
  7104. */
  7105. static void
  7106. dhd_prot_ring_reset(dhd_pub_t *dhd, msgbuf_ring_t *ring)
  7107. {
  7108. DHD_TRACE(("%s\n", __FUNCTION__));
  7109. dhd_dma_buf_reset(dhd, &ring->dma_buf);
  7110. ring->rd = ring->wr = 0;
  7111. ring->curr_rd = 0;
  7112. ring->inited = FALSE;
  7113. ring->create_pending = FALSE;
  7114. }
  7115. /**
  7116. * dhd_prot_ring_detach - Detach the DMA-able buffer and any other objects
  7117. * hanging off the msgbuf_ring.
  7118. */
  7119. static void
  7120. dhd_prot_ring_detach(dhd_pub_t *dhd, msgbuf_ring_t *ring)
  7121. {
  7122. dhd_prot_t *prot = dhd->prot;
  7123. uint16 max_flowrings = dhd->bus->max_tx_flowrings;
  7124. ASSERT(ring);
  7125. ring->inited = FALSE;
  7126. /* rd = ~0, wr = ring->rd - 1, max_items = 0, len_item = ~0 */
  7127. #ifdef BCM_SECURE_DMA
  7128. if (SECURE_DMA_ENAB(prot->osh)) {
  7129. if (ring->dma_buf.secdma) {
  7130. SECURE_DMA_UNMAP_ALL(prot->osh, ring->dma_buf.secdma);
  7131. MFREE(prot->osh, ring->dma_buf.secdma, sizeof(sec_cma_info_t));
  7132. ring->dma_buf.secdma = NULL;
  7133. }
  7134. }
  7135. #endif /* BCM_SECURE_DMA */
  7136. /* If the DMA-able buffer was carved out of a pre-reserved contiguous
  7137. * memory, then simply stop using it.
  7138. */
  7139. if (DHD_IS_FLOWRING(ring->idx, max_flowrings) && (prot->flowrings_dma_buf.va)) {
  7140. (void)dhd_dma_buf_audit(dhd, &ring->dma_buf);
  7141. memset(&ring->dma_buf, 0, sizeof(dhd_dma_buf_t));
  7142. } else {
  7143. dhd_dma_buf_free(dhd, &ring->dma_buf);
  7144. }
  7145. dhd_os_spin_lock_deinit(dhd->osh, ring->ring_lock);
  7146. } /* dhd_prot_ring_detach */
  7147. /* Fetch number of H2D flowrings given the total number of h2d rings */
  7148. uint16
  7149. dhd_get_max_flow_rings(dhd_pub_t *dhd)
  7150. {
  7151. if (dhd->bus->api.fw_rev >= PCIE_SHARED_VERSION_6)
  7152. return dhd->bus->max_tx_flowrings;
  7153. else
  7154. return (dhd->bus->max_tx_flowrings - BCMPCIE_H2D_COMMON_MSGRINGS);
  7155. }
  7156. /**
  7157. * dhd_prot_flowrings_pool_attach - Initialize a pool of flowring msgbuf_ring_t.
  7158. *
  7159. * Allocate a pool of msgbuf_ring along with DMA-able buffers for flowrings.
  7160. * Dongle includes common rings when it advertizes the number of H2D rings.
  7161. * Allocates a pool of msgbuf_ring_t and invokes dhd_prot_ring_attach to
  7162. * allocate the DMA-able buffer and initialize each msgbuf_ring_t object.
  7163. *
  7164. * dhd_prot_ring_attach is invoked to perform the actual initialization and
  7165. * attaching the DMA-able buffer.
  7166. *
  7167. * Later dhd_prot_flowrings_pool_fetch() may be used to fetch a preallocated and
  7168. * initialized msgbuf_ring_t object.
  7169. *
  7170. * returns BCME_OK=0 on success
  7171. * returns non-zero negative error value on failure.
  7172. */
  7173. static int
  7174. dhd_prot_flowrings_pool_attach(dhd_pub_t *dhd)
  7175. {
  7176. uint16 flowid;
  7177. msgbuf_ring_t *ring;
  7178. uint16 h2d_flowrings_total; /* exclude H2D common rings */
  7179. dhd_prot_t *prot = dhd->prot;
  7180. char ring_name[RING_NAME_MAX_LENGTH];
  7181. if (prot->h2d_flowrings_pool != NULL)
  7182. return BCME_OK; /* dhd_prot_init rentry after a dhd_prot_reset */
  7183. ASSERT(prot->h2d_rings_total == 0);
  7184. /* h2d_rings_total includes H2D common rings: ctrl and rxbuf subn */
  7185. prot->h2d_rings_total = (uint16)dhd_bus_max_h2d_queues(dhd->bus);
  7186. if (prot->h2d_rings_total < BCMPCIE_H2D_COMMON_MSGRINGS) {
  7187. DHD_ERROR(("%s: h2d_rings_total advertized as %u\n",
  7188. __FUNCTION__, prot->h2d_rings_total));
  7189. return BCME_ERROR;
  7190. }
  7191. /* Subtract number of H2D common rings, to determine number of flowrings */
  7192. h2d_flowrings_total = dhd_get_max_flow_rings(dhd);
  7193. DHD_ERROR(("Attach flowrings pool for %d rings\n", h2d_flowrings_total));
  7194. /* Allocate pool of msgbuf_ring_t objects for all flowrings */
  7195. prot->h2d_flowrings_pool = (msgbuf_ring_t *)MALLOCZ(prot->osh,
  7196. (h2d_flowrings_total * sizeof(msgbuf_ring_t)));
  7197. if (prot->h2d_flowrings_pool == NULL) {
  7198. DHD_ERROR(("%s: flowrings pool for %d flowrings, alloc failure\n",
  7199. __FUNCTION__, h2d_flowrings_total));
  7200. goto fail;
  7201. }
  7202. /* Setup & Attach a DMA-able buffer to each flowring in the flowring pool */
  7203. FOREACH_RING_IN_FLOWRINGS_POOL(prot, ring, flowid, h2d_flowrings_total) {
  7204. snprintf(ring_name, sizeof(ring_name), "h2dflr_%03u", flowid);
  7205. if (dhd_prot_ring_attach(dhd, ring, ring_name,
  7206. prot->h2d_max_txpost, H2DRING_TXPOST_ITEMSIZE,
  7207. DHD_FLOWID_TO_RINGID(flowid)) != BCME_OK) {
  7208. goto attach_fail;
  7209. }
  7210. /*
  7211. * TOD0 - Currently flowrings hwa is disabled and can be enabled like below
  7212. * (dhd->bus->hwa_enab_bmap & HWA_ENAB_BITMAP_TXPOSTS) ? HWA_DB_TYPE_TXPOSTS : 0;
  7213. */
  7214. ring->hwa_db_type = 0;
  7215. }
  7216. return BCME_OK;
  7217. attach_fail:
  7218. dhd_prot_flowrings_pool_detach(dhd); /* Free entire pool of flowrings */
  7219. fail:
  7220. prot->h2d_rings_total = 0;
  7221. return BCME_NOMEM;
  7222. } /* dhd_prot_flowrings_pool_attach */
  7223. /**
  7224. * dhd_prot_flowrings_pool_reset - Reset all msgbuf_ring_t objects in the pool.
  7225. * Invokes dhd_prot_ring_reset to perform the actual reset.
  7226. *
  7227. * The DMA-able buffer is not freed during reset and neither is the flowring
  7228. * pool freed.
  7229. *
  7230. * dhd_prot_flowrings_pool_reset will be invoked in dhd_prot_reset. Following
  7231. * the dhd_prot_reset, dhd_prot_init will be re-invoked, and the flowring pool
  7232. * from a previous flowring pool instantiation will be reused.
  7233. *
  7234. * This will avoid a fragmented DMA-able memory condition, if multiple
  7235. * dhd_prot_reset were invoked to reboot the dongle without a full detach/attach
  7236. * cycle.
  7237. */
  7238. static void
  7239. dhd_prot_flowrings_pool_reset(dhd_pub_t *dhd)
  7240. {
  7241. uint16 flowid, h2d_flowrings_total;
  7242. msgbuf_ring_t *ring;
  7243. dhd_prot_t *prot = dhd->prot;
  7244. if (prot->h2d_flowrings_pool == NULL) {
  7245. ASSERT(prot->h2d_rings_total == 0);
  7246. return;
  7247. }
  7248. h2d_flowrings_total = dhd_get_max_flow_rings(dhd);
  7249. /* Reset each flowring in the flowring pool */
  7250. FOREACH_RING_IN_FLOWRINGS_POOL(prot, ring, flowid, h2d_flowrings_total) {
  7251. dhd_prot_ring_reset(dhd, ring);
  7252. ring->inited = FALSE;
  7253. }
  7254. /* Flowring pool state must be as-if dhd_prot_flowrings_pool_attach */
  7255. }
  7256. /**
  7257. * dhd_prot_flowrings_pool_detach - Free pool of msgbuf_ring along with
  7258. * DMA-able buffers for flowrings.
  7259. * dhd_prot_ring_detach is invoked to free the DMA-able buffer and perform any
  7260. * de-initialization of each msgbuf_ring_t.
  7261. */
  7262. static void
  7263. dhd_prot_flowrings_pool_detach(dhd_pub_t *dhd)
  7264. {
  7265. int flowid;
  7266. msgbuf_ring_t *ring;
  7267. uint16 h2d_flowrings_total; /* exclude H2D common rings */
  7268. dhd_prot_t *prot = dhd->prot;
  7269. if (prot->h2d_flowrings_pool == NULL) {
  7270. ASSERT(prot->h2d_rings_total == 0);
  7271. return;
  7272. }
  7273. h2d_flowrings_total = dhd_get_max_flow_rings(dhd);
  7274. /* Detach the DMA-able buffer for each flowring in the flowring pool */
  7275. FOREACH_RING_IN_FLOWRINGS_POOL(prot, ring, flowid, h2d_flowrings_total) {
  7276. dhd_prot_ring_detach(dhd, ring);
  7277. }
  7278. MFREE(prot->osh, prot->h2d_flowrings_pool,
  7279. (h2d_flowrings_total * sizeof(msgbuf_ring_t)));
  7280. prot->h2d_flowrings_pool = (msgbuf_ring_t*)NULL;
  7281. prot->h2d_rings_total = 0;
  7282. } /* dhd_prot_flowrings_pool_detach */
  7283. /**
  7284. * dhd_prot_flowrings_pool_fetch - Fetch a preallocated and initialized
  7285. * msgbuf_ring from the flowring pool, and assign it.
  7286. *
  7287. * Unlike common rings, which uses a dhd_prot_ring_init() to pass the common
  7288. * ring information to the dongle, a flowring's information is passed via a
  7289. * flowring create control message.
  7290. *
  7291. * Only the ring state (WR, RD) index are initialized.
  7292. */
  7293. static msgbuf_ring_t *
  7294. dhd_prot_flowrings_pool_fetch(dhd_pub_t *dhd, uint16 flowid)
  7295. {
  7296. msgbuf_ring_t *ring;
  7297. dhd_prot_t *prot = dhd->prot;
  7298. ASSERT(flowid >= DHD_FLOWRING_START_FLOWID);
  7299. ASSERT(flowid < prot->h2d_rings_total);
  7300. ASSERT(prot->h2d_flowrings_pool != NULL);
  7301. ring = DHD_RING_IN_FLOWRINGS_POOL(prot, flowid);
  7302. /* ASSERT flow_ring->inited == FALSE */
  7303. ring->wr = 0;
  7304. ring->rd = 0;
  7305. ring->curr_rd = 0;
  7306. ring->inited = TRUE;
  7307. /**
  7308. * Every time a flowring starts dynamically, initialize current_phase with 0
  7309. * then flip to BCMPCIE_CMNHDR_PHASE_BIT_INIT
  7310. */
  7311. ring->current_phase = 0;
  7312. return ring;
  7313. }
  7314. /**
  7315. * dhd_prot_flowrings_pool_release - release a previously fetched flowring's
  7316. * msgbuf_ring back to the flow_ring pool.
  7317. */
  7318. void
  7319. dhd_prot_flowrings_pool_release(dhd_pub_t *dhd, uint16 flowid, void *flow_ring)
  7320. {
  7321. msgbuf_ring_t *ring;
  7322. dhd_prot_t *prot = dhd->prot;
  7323. ASSERT(flowid >= DHD_FLOWRING_START_FLOWID);
  7324. ASSERT(flowid < prot->h2d_rings_total);
  7325. ASSERT(prot->h2d_flowrings_pool != NULL);
  7326. ring = DHD_RING_IN_FLOWRINGS_POOL(prot, flowid);
  7327. ASSERT(ring == (msgbuf_ring_t*)flow_ring);
  7328. /* ASSERT flow_ring->inited == TRUE */
  7329. (void)dhd_dma_buf_audit(dhd, &ring->dma_buf);
  7330. ring->wr = 0;
  7331. ring->rd = 0;
  7332. ring->inited = FALSE;
  7333. ring->curr_rd = 0;
  7334. }
  7335. /* Assumes only one index is updated at a time */
  7336. /* If exactly_nitems is true, this function will allocate space for nitems or fail */
  7337. /* Exception: when wrap around is encountered, to prevent hangup (last nitems of ring buffer) */
  7338. /* If exactly_nitems is false, this function will allocate space for nitems or less */
  7339. static void *BCMFASTPATH
  7340. dhd_prot_get_ring_space(msgbuf_ring_t *ring, uint16 nitems, uint16 * alloced,
  7341. bool exactly_nitems)
  7342. {
  7343. void *ret_ptr = NULL;
  7344. uint16 ring_avail_cnt;
  7345. ASSERT(nitems <= ring->max_items);
  7346. ring_avail_cnt = CHECK_WRITE_SPACE(ring->rd, ring->wr, ring->max_items);
  7347. if ((ring_avail_cnt == 0) ||
  7348. (exactly_nitems && (ring_avail_cnt < nitems) &&
  7349. ((ring->max_items - ring->wr) >= nitems))) {
  7350. DHD_INFO(("Space not available: ring %s items %d write %d read %d\n",
  7351. ring->name, nitems, ring->wr, ring->rd));
  7352. return NULL;
  7353. }
  7354. *alloced = MIN(nitems, ring_avail_cnt);
  7355. /* Return next available space */
  7356. ret_ptr = (char *)DHD_RING_BGN_VA(ring) + (ring->wr * ring->item_len);
  7357. /* Update write index */
  7358. if ((ring->wr + *alloced) == ring->max_items)
  7359. ring->wr = 0;
  7360. else if ((ring->wr + *alloced) < ring->max_items)
  7361. ring->wr += *alloced;
  7362. else {
  7363. /* Should never hit this */
  7364. ASSERT(0);
  7365. return NULL;
  7366. }
  7367. return ret_ptr;
  7368. } /* dhd_prot_get_ring_space */
  7369. /**
  7370. * dhd_prot_ring_write_complete - Host updates the new WR index on producing
  7371. * new messages in a H2D ring. The messages are flushed from cache prior to
  7372. * posting the new WR index. The new WR index will be updated in the DMA index
  7373. * array or directly in the dongle's ring state memory.
  7374. * A PCIE doorbell will be generated to wake up the dongle.
  7375. * This is a non-atomic function, make sure the callers
  7376. * always hold appropriate locks.
  7377. */
  7378. static void BCMFASTPATH
  7379. __dhd_prot_ring_write_complete(dhd_pub_t *dhd, msgbuf_ring_t * ring, void* p,
  7380. uint16 nitems)
  7381. {
  7382. dhd_prot_t *prot = dhd->prot;
  7383. uint32 db_index;
  7384. uint16 max_flowrings = dhd->bus->max_tx_flowrings;
  7385. uint corerev;
  7386. /* cache flush */
  7387. OSL_CACHE_FLUSH(p, ring->item_len * nitems);
  7388. /* For HWA, update db_index and ring mb2 DB and return */
  7389. if (HWA_ACTIVE(dhd) && ring->hwa_db_type) {
  7390. db_index = HWA_DB_INDEX_VALUE(ring->wr) | ring->hwa_db_type;
  7391. DHD_TRACE(("%s: ring(%s) wr(%d) hwa_db_type(0x%x) db_index(0x%x)\n",
  7392. __FUNCTION__, ring->name, ring->wr, ring->hwa_db_type, db_index));
  7393. prot->mb_2_ring_fn(dhd->bus, db_index, TRUE);
  7394. return;
  7395. }
  7396. if (IDMA_ACTIVE(dhd) || dhd->dma_h2d_ring_upd_support) {
  7397. dhd_prot_dma_indx_set(dhd, ring->wr,
  7398. H2D_DMA_INDX_WR_UPD, ring->idx);
  7399. } else if (IFRM_ACTIVE(dhd) && DHD_IS_FLOWRING(ring->idx, max_flowrings)) {
  7400. dhd_prot_dma_indx_set(dhd, ring->wr,
  7401. H2D_IFRM_INDX_WR_UPD, ring->idx);
  7402. } else {
  7403. dhd_bus_cmn_writeshared(dhd->bus, &(ring->wr),
  7404. sizeof(uint16), RING_WR_UPD, ring->idx);
  7405. }
  7406. /* raise h2d interrupt */
  7407. if (IDMA_ACTIVE(dhd) ||
  7408. (IFRM_ACTIVE(dhd) && DHD_IS_FLOWRING(ring->idx, max_flowrings))) {
  7409. db_index = IDMA_IDX0;
  7410. /* this api is called in wl down path..in that case sih is freed already */
  7411. if (dhd->bus->sih) {
  7412. corerev = dhd->bus->sih->buscorerev;
  7413. /* We need to explictly configure the type of DMA for core rev >= 24 */
  7414. if (corerev >= 24) {
  7415. db_index |= (DMA_TYPE_IDMA << DMA_TYPE_SHIFT);
  7416. }
  7417. }
  7418. prot->mb_2_ring_fn(dhd->bus, db_index, TRUE);
  7419. } else {
  7420. prot->mb_ring_fn(dhd->bus, ring->wr);
  7421. }
  7422. }
  7423. static void BCMFASTPATH
  7424. dhd_prot_ring_write_complete(dhd_pub_t *dhd, msgbuf_ring_t * ring, void* p,
  7425. uint16 nitems)
  7426. {
  7427. unsigned long flags_bus;
  7428. DHD_BUS_LOCK(dhd->bus->bus_lock, flags_bus);
  7429. __dhd_prot_ring_write_complete(dhd, ring, p, nitems);
  7430. DHD_BUS_UNLOCK(dhd->bus->bus_lock, flags_bus);
  7431. }
  7432. /**
  7433. * dhd_prot_ring_write_complete_mbdata - will be called from dhd_prot_h2d_mbdata_send_ctrlmsg,
  7434. * which will hold DHD_BUS_LOCK to update WR pointer, Ring DB and also update bus_low_power_state
  7435. * to indicate D3_INFORM sent in the same BUS_LOCK.
  7436. */
  7437. static void BCMFASTPATH
  7438. dhd_prot_ring_write_complete_mbdata(dhd_pub_t *dhd, msgbuf_ring_t * ring, void *p,
  7439. uint16 nitems, uint32 mb_data)
  7440. {
  7441. unsigned long flags_bus;
  7442. DHD_BUS_LOCK(dhd->bus->bus_lock, flags_bus);
  7443. __dhd_prot_ring_write_complete(dhd, ring, p, nitems);
  7444. /* Mark D3_INFORM in the same context to skip ringing H2D DB after D3_INFORM */
  7445. if (mb_data == H2D_HOST_D3_INFORM) {
  7446. dhd->bus->bus_low_power_state = DHD_BUS_D3_INFORM_SENT;
  7447. }
  7448. DHD_BUS_UNLOCK(dhd->bus->bus_lock, flags_bus);
  7449. }
  7450. /**
  7451. * dhd_prot_upd_read_idx - Host updates the new RD index on consuming messages
  7452. * from a D2H ring. The new RD index will be updated in the DMA Index array or
  7453. * directly in dongle's ring state memory.
  7454. */
  7455. static void
  7456. dhd_prot_upd_read_idx(dhd_pub_t *dhd, msgbuf_ring_t * ring)
  7457. {
  7458. dhd_prot_t *prot = dhd->prot;
  7459. uint32 db_index;
  7460. uint corerev;
  7461. /* For HWA, update db_index and ring mb2 DB and return */
  7462. if (HWA_ACTIVE(dhd) && ring->hwa_db_type) {
  7463. db_index = HWA_DB_INDEX_VALUE(ring->rd) | ring->hwa_db_type;
  7464. DHD_TRACE(("%s: ring(%s) rd(0x%x) hwa_db_type(0x%x) db_index(0x%x)\n",
  7465. __FUNCTION__, ring->name, ring->rd, ring->hwa_db_type, db_index));
  7466. prot->mb_2_ring_fn(dhd->bus, db_index, FALSE);
  7467. return;
  7468. }
  7469. /* update read index */
  7470. /* If dma'ing h2d indices supported
  7471. * update the r -indices in the
  7472. * host memory o/w in TCM
  7473. */
  7474. if (IDMA_ACTIVE(dhd)) {
  7475. dhd_prot_dma_indx_set(dhd, ring->rd,
  7476. D2H_DMA_INDX_RD_UPD, ring->idx);
  7477. db_index = IDMA_IDX1;
  7478. if (dhd->bus->sih) {
  7479. corerev = dhd->bus->sih->buscorerev;
  7480. /* We need to explictly configure the type of DMA for core rev >= 24 */
  7481. if (corerev >= 24) {
  7482. db_index |= (DMA_TYPE_IDMA << DMA_TYPE_SHIFT);
  7483. }
  7484. }
  7485. prot->mb_2_ring_fn(dhd->bus, db_index, FALSE);
  7486. } else if (dhd->dma_h2d_ring_upd_support) {
  7487. dhd_prot_dma_indx_set(dhd, ring->rd,
  7488. D2H_DMA_INDX_RD_UPD, ring->idx);
  7489. } else {
  7490. dhd_bus_cmn_writeshared(dhd->bus, &(ring->rd),
  7491. sizeof(uint16), RING_RD_UPD, ring->idx);
  7492. }
  7493. }
  7494. static int
  7495. dhd_send_d2h_ringcreate(dhd_pub_t *dhd, msgbuf_ring_t *ring_to_create,
  7496. uint16 ring_type, uint32 req_id)
  7497. {
  7498. unsigned long flags;
  7499. d2h_ring_create_req_t *d2h_ring;
  7500. uint16 alloced = 0;
  7501. int ret = BCME_OK;
  7502. uint16 max_h2d_rings = dhd->bus->max_submission_rings;
  7503. msgbuf_ring_t *ctrl_ring = &dhd->prot->h2dring_ctrl_subn;
  7504. DHD_RING_LOCK(ctrl_ring->ring_lock, flags);
  7505. DHD_TRACE(("%s trying to send D2H ring create Req\n", __FUNCTION__));
  7506. if (ring_to_create == NULL) {
  7507. DHD_ERROR(("%s: FATAL: ring_to_create is NULL\n", __FUNCTION__));
  7508. ret = BCME_ERROR;
  7509. goto err;
  7510. }
  7511. /* Request for ring buffer space */
  7512. d2h_ring = (d2h_ring_create_req_t *) dhd_prot_alloc_ring_space(dhd,
  7513. ctrl_ring, DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D,
  7514. &alloced, FALSE);
  7515. if (d2h_ring == NULL) {
  7516. DHD_ERROR(("%s: FATAL: No space in control ring to send D2H ring create\n",
  7517. __FUNCTION__));
  7518. ret = BCME_NOMEM;
  7519. goto err;
  7520. }
  7521. ring_to_create->create_req_id = (uint16)req_id;
  7522. ring_to_create->create_pending = TRUE;
  7523. /* Common msg buf hdr */
  7524. d2h_ring->msg.msg_type = MSG_TYPE_D2H_RING_CREATE;
  7525. d2h_ring->msg.if_id = 0;
  7526. d2h_ring->msg.flags = ctrl_ring->current_phase;
  7527. d2h_ring->msg.request_id = htol32(ring_to_create->create_req_id);
  7528. d2h_ring->ring_id = htol16(DHD_D2H_RING_OFFSET(ring_to_create->idx, max_h2d_rings));
  7529. DHD_ERROR(("%s ringid: %d idx: %d max_h2d: %d\n", __FUNCTION__, d2h_ring->ring_id,
  7530. ring_to_create->idx, max_h2d_rings));
  7531. d2h_ring->ring_type = ring_type;
  7532. d2h_ring->max_items = htol16(ring_to_create->max_items);
  7533. d2h_ring->len_item = htol16(ring_to_create->item_len);
  7534. d2h_ring->ring_ptr.low_addr = ring_to_create->base_addr.low_addr;
  7535. d2h_ring->ring_ptr.high_addr = ring_to_create->base_addr.high_addr;
  7536. d2h_ring->flags = 0;
  7537. d2h_ring->msg.epoch =
  7538. ctrl_ring->seqnum % H2D_EPOCH_MODULO;
  7539. ctrl_ring->seqnum++;
  7540. #ifdef EWP_EDL
  7541. if (ring_type == BCMPCIE_D2H_RING_TYPE_EDL) {
  7542. DHD_ERROR(("%s: sending d2h EDL ring create: "
  7543. "\n max items=%u; len_item=%u; ring_id=%u; low_addr=0x%x; high_addr=0x%x\n",
  7544. __FUNCTION__, ltoh16(d2h_ring->max_items),
  7545. ltoh16(d2h_ring->len_item),
  7546. ltoh16(d2h_ring->ring_id),
  7547. d2h_ring->ring_ptr.low_addr,
  7548. d2h_ring->ring_ptr.high_addr));
  7549. }
  7550. #endif /* EWP_EDL */
  7551. /* Update the flow_ring's WRITE index */
  7552. dhd_prot_ring_write_complete(dhd, ctrl_ring, d2h_ring,
  7553. DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D);
  7554. DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
  7555. return ret;
  7556. err:
  7557. DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
  7558. return ret;
  7559. }
  7560. static int
  7561. dhd_send_h2d_ringcreate(dhd_pub_t *dhd, msgbuf_ring_t *ring_to_create, uint8 ring_type, uint32 id)
  7562. {
  7563. unsigned long flags;
  7564. h2d_ring_create_req_t *h2d_ring;
  7565. uint16 alloced = 0;
  7566. uint8 i = 0;
  7567. int ret = BCME_OK;
  7568. msgbuf_ring_t *ctrl_ring = &dhd->prot->h2dring_ctrl_subn;
  7569. DHD_RING_LOCK(ctrl_ring->ring_lock, flags);
  7570. DHD_TRACE(("%s trying to send H2D ring create Req\n", __FUNCTION__));
  7571. if (ring_to_create == NULL) {
  7572. DHD_ERROR(("%s: FATAL: ring_to_create is NULL\n", __FUNCTION__));
  7573. ret = BCME_ERROR;
  7574. goto err;
  7575. }
  7576. /* Request for ring buffer space */
  7577. h2d_ring = (h2d_ring_create_req_t *)dhd_prot_alloc_ring_space(dhd,
  7578. ctrl_ring, DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D,
  7579. &alloced, FALSE);
  7580. if (h2d_ring == NULL) {
  7581. DHD_ERROR(("%s: FATAL: No space in control ring to send H2D ring create\n",
  7582. __FUNCTION__));
  7583. ret = BCME_NOMEM;
  7584. goto err;
  7585. }
  7586. ring_to_create->create_req_id = (uint16)id;
  7587. ring_to_create->create_pending = TRUE;
  7588. /* Common msg buf hdr */
  7589. h2d_ring->msg.msg_type = MSG_TYPE_H2D_RING_CREATE;
  7590. h2d_ring->msg.if_id = 0;
  7591. h2d_ring->msg.request_id = htol32(ring_to_create->create_req_id);
  7592. h2d_ring->msg.flags = ctrl_ring->current_phase;
  7593. h2d_ring->ring_id = htol16(DHD_H2D_RING_OFFSET(ring_to_create->idx));
  7594. h2d_ring->ring_type = ring_type;
  7595. h2d_ring->max_items = htol16(H2DRING_DYNAMIC_INFO_MAX_ITEM);
  7596. h2d_ring->n_completion_ids = ring_to_create->n_completion_ids;
  7597. h2d_ring->len_item = htol16(H2DRING_INFO_BUFPOST_ITEMSIZE);
  7598. h2d_ring->ring_ptr.low_addr = ring_to_create->base_addr.low_addr;
  7599. h2d_ring->ring_ptr.high_addr = ring_to_create->base_addr.high_addr;
  7600. for (i = 0; i < ring_to_create->n_completion_ids; i++) {
  7601. h2d_ring->completion_ring_ids[i] = htol16(ring_to_create->compeltion_ring_ids[i]);
  7602. }
  7603. h2d_ring->flags = 0;
  7604. h2d_ring->msg.epoch =
  7605. ctrl_ring->seqnum % H2D_EPOCH_MODULO;
  7606. ctrl_ring->seqnum++;
  7607. /* Update the flow_ring's WRITE index */
  7608. dhd_prot_ring_write_complete(dhd, ctrl_ring, h2d_ring,
  7609. DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D);
  7610. DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
  7611. return ret;
  7612. err:
  7613. DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
  7614. return ret;
  7615. }
  7616. /**
  7617. * dhd_prot_dma_indx_set - set a new WR or RD index in the DMA index array.
  7618. * Dongle will DMA the entire array (if DMA_INDX feature is enabled).
  7619. * See dhd_prot_dma_indx_init()
  7620. */
  7621. void
  7622. dhd_prot_dma_indx_set(dhd_pub_t *dhd, uint16 new_index, uint8 type, uint16 ringid)
  7623. {
  7624. uint8 *ptr;
  7625. uint16 offset;
  7626. dhd_prot_t *prot = dhd->prot;
  7627. uint16 max_h2d_rings = dhd->bus->max_submission_rings;
  7628. switch (type) {
  7629. case H2D_DMA_INDX_WR_UPD:
  7630. ptr = (uint8 *)(prot->h2d_dma_indx_wr_buf.va);
  7631. offset = DHD_H2D_RING_OFFSET(ringid);
  7632. break;
  7633. case D2H_DMA_INDX_RD_UPD:
  7634. ptr = (uint8 *)(prot->d2h_dma_indx_rd_buf.va);
  7635. offset = DHD_D2H_RING_OFFSET(ringid, max_h2d_rings);
  7636. break;
  7637. case H2D_IFRM_INDX_WR_UPD:
  7638. ptr = (uint8 *)(prot->h2d_ifrm_indx_wr_buf.va);
  7639. offset = DHD_H2D_FRM_FLOW_RING_OFFSET(ringid);
  7640. break;
  7641. default:
  7642. DHD_ERROR(("%s: Invalid option for DMAing read/write index\n",
  7643. __FUNCTION__));
  7644. return;
  7645. }
  7646. ASSERT(prot->rw_index_sz != 0);
  7647. ptr += offset * prot->rw_index_sz;
  7648. *(uint16*)ptr = htol16(new_index);
  7649. OSL_CACHE_FLUSH((void *)ptr, prot->rw_index_sz);
  7650. DHD_TRACE(("%s: data %d type %d ringid %d ptr 0x%p offset %d\n",
  7651. __FUNCTION__, new_index, type, ringid, ptr, offset));
  7652. } /* dhd_prot_dma_indx_set */
  7653. /**
  7654. * dhd_prot_dma_indx_get - Fetch a WR or RD index from the dongle DMA-ed index
  7655. * array.
  7656. * Dongle DMAes an entire array to host memory (if the feature is enabled).
  7657. * See dhd_prot_dma_indx_init()
  7658. */
  7659. static uint16
  7660. dhd_prot_dma_indx_get(dhd_pub_t *dhd, uint8 type, uint16 ringid)
  7661. {
  7662. uint8 *ptr;
  7663. uint16 data;
  7664. uint16 offset;
  7665. dhd_prot_t *prot = dhd->prot;
  7666. uint16 max_h2d_rings = dhd->bus->max_submission_rings;
  7667. switch (type) {
  7668. case H2D_DMA_INDX_WR_UPD:
  7669. ptr = (uint8 *)(prot->h2d_dma_indx_wr_buf.va);
  7670. offset = DHD_H2D_RING_OFFSET(ringid);
  7671. break;
  7672. case H2D_DMA_INDX_RD_UPD:
  7673. ptr = (uint8 *)(prot->h2d_dma_indx_rd_buf.va);
  7674. offset = DHD_H2D_RING_OFFSET(ringid);
  7675. break;
  7676. case D2H_DMA_INDX_WR_UPD:
  7677. ptr = (uint8 *)(prot->d2h_dma_indx_wr_buf.va);
  7678. offset = DHD_D2H_RING_OFFSET(ringid, max_h2d_rings);
  7679. break;
  7680. case D2H_DMA_INDX_RD_UPD:
  7681. ptr = (uint8 *)(prot->d2h_dma_indx_rd_buf.va);
  7682. offset = DHD_D2H_RING_OFFSET(ringid, max_h2d_rings);
  7683. break;
  7684. default:
  7685. DHD_ERROR(("%s: Invalid option for DMAing read/write index\n",
  7686. __FUNCTION__));
  7687. return 0;
  7688. }
  7689. ASSERT(prot->rw_index_sz != 0);
  7690. ptr += offset * prot->rw_index_sz;
  7691. OSL_CACHE_INV((void *)ptr, prot->rw_index_sz);
  7692. data = LTOH16(*((uint16*)ptr));
  7693. DHD_TRACE(("%s: data %d type %d ringid %d ptr 0x%p offset %d\n",
  7694. __FUNCTION__, data, type, ringid, ptr, offset));
  7695. return (data);
  7696. } /* dhd_prot_dma_indx_get */
  7697. /**
  7698. * An array of DMA read/write indices, containing information about host rings, can be maintained
  7699. * either in host memory or in device memory, dependent on preprocessor options. This function is,
  7700. * dependent on these options, called during driver initialization. It reserves and initializes
  7701. * blocks of DMA'able host memory containing an array of DMA read or DMA write indices. The physical
  7702. * address of these host memory blocks are communicated to the dongle later on. By reading this host
  7703. * memory, the dongle learns about the state of the host rings.
  7704. */
  7705. static INLINE int
  7706. dhd_prot_dma_indx_alloc(dhd_pub_t *dhd, uint8 type,
  7707. dhd_dma_buf_t *dma_buf, uint32 bufsz)
  7708. {
  7709. int rc;
  7710. if ((dma_buf->len == bufsz) || (dma_buf->va != NULL))
  7711. return BCME_OK;
  7712. rc = dhd_dma_buf_alloc(dhd, dma_buf, bufsz);
  7713. return rc;
  7714. }
  7715. int
  7716. dhd_prot_dma_indx_init(dhd_pub_t *dhd, uint32 rw_index_sz, uint8 type, uint32 length)
  7717. {
  7718. uint32 bufsz;
  7719. dhd_prot_t *prot = dhd->prot;
  7720. dhd_dma_buf_t *dma_buf;
  7721. if (prot == NULL) {
  7722. DHD_ERROR(("prot is not inited\n"));
  7723. return BCME_ERROR;
  7724. }
  7725. /* Dongle advertizes 2B or 4B RW index size */
  7726. ASSERT(rw_index_sz != 0);
  7727. prot->rw_index_sz = rw_index_sz;
  7728. bufsz = rw_index_sz * length;
  7729. switch (type) {
  7730. case H2D_DMA_INDX_WR_BUF:
  7731. dma_buf = &prot->h2d_dma_indx_wr_buf;
  7732. if (dhd_prot_dma_indx_alloc(dhd, type, dma_buf, bufsz))
  7733. goto ret_no_mem;
  7734. DHD_ERROR(("H2D DMA WR INDX : array size %d = %d * %d\n",
  7735. dma_buf->len, rw_index_sz, length));
  7736. break;
  7737. case H2D_DMA_INDX_RD_BUF:
  7738. dma_buf = &prot->h2d_dma_indx_rd_buf;
  7739. if (dhd_prot_dma_indx_alloc(dhd, type, dma_buf, bufsz))
  7740. goto ret_no_mem;
  7741. DHD_ERROR(("H2D DMA RD INDX : array size %d = %d * %d\n",
  7742. dma_buf->len, rw_index_sz, length));
  7743. break;
  7744. case D2H_DMA_INDX_WR_BUF:
  7745. dma_buf = &prot->d2h_dma_indx_wr_buf;
  7746. if (dhd_prot_dma_indx_alloc(dhd, type, dma_buf, bufsz))
  7747. goto ret_no_mem;
  7748. DHD_ERROR(("D2H DMA WR INDX : array size %d = %d * %d\n",
  7749. dma_buf->len, rw_index_sz, length));
  7750. break;
  7751. case D2H_DMA_INDX_RD_BUF:
  7752. dma_buf = &prot->d2h_dma_indx_rd_buf;
  7753. if (dhd_prot_dma_indx_alloc(dhd, type, dma_buf, bufsz))
  7754. goto ret_no_mem;
  7755. DHD_ERROR(("D2H DMA RD INDX : array size %d = %d * %d\n",
  7756. dma_buf->len, rw_index_sz, length));
  7757. break;
  7758. case H2D_IFRM_INDX_WR_BUF:
  7759. dma_buf = &prot->h2d_ifrm_indx_wr_buf;
  7760. if (dhd_prot_dma_indx_alloc(dhd, type, dma_buf, bufsz))
  7761. goto ret_no_mem;
  7762. DHD_ERROR(("H2D IFRM WR INDX : array size %d = %d * %d\n",
  7763. dma_buf->len, rw_index_sz, length));
  7764. break;
  7765. default:
  7766. DHD_ERROR(("%s: Unexpected option\n", __FUNCTION__));
  7767. return BCME_BADOPTION;
  7768. }
  7769. return BCME_OK;
  7770. ret_no_mem:
  7771. DHD_ERROR(("%s: dhd_prot_dma_indx_alloc type %d buf_sz %d failure\n",
  7772. __FUNCTION__, type, bufsz));
  7773. return BCME_NOMEM;
  7774. } /* dhd_prot_dma_indx_init */
  7775. /**
  7776. * Called on checking for 'completion' messages from the dongle. Returns next host buffer to read
  7777. * from, or NULL if there are no more messages to read.
  7778. */
  7779. static uint8*
  7780. dhd_prot_get_read_addr(dhd_pub_t *dhd, msgbuf_ring_t *ring, uint32 *available_len)
  7781. {
  7782. uint16 wr;
  7783. uint16 rd;
  7784. uint16 depth;
  7785. uint16 items;
  7786. void *read_addr = NULL; /* address of next msg to be read in ring */
  7787. uint16 d2h_wr = 0;
  7788. DHD_TRACE(("%s: d2h_dma_indx_rd_buf %p, d2h_dma_indx_wr_buf %p\n",
  7789. __FUNCTION__, (uint32 *)(dhd->prot->d2h_dma_indx_rd_buf.va),
  7790. (uint32 *)(dhd->prot->d2h_dma_indx_wr_buf.va)));
  7791. /* Remember the read index in a variable.
  7792. * This is becuase ring->rd gets updated in the end of this function
  7793. * So if we have to print the exact read index from which the
  7794. * message is read its not possible.
  7795. */
  7796. ring->curr_rd = ring->rd;
  7797. /* update write pointer */
  7798. if (dhd->dma_d2h_ring_upd_support) {
  7799. /* DMAing write/read indices supported */
  7800. d2h_wr = dhd_prot_dma_indx_get(dhd, D2H_DMA_INDX_WR_UPD, ring->idx);
  7801. ring->wr = d2h_wr;
  7802. } else {
  7803. dhd_bus_cmn_readshared(dhd->bus, &(ring->wr), RING_WR_UPD, ring->idx);
  7804. }
  7805. wr = ring->wr;
  7806. rd = ring->rd;
  7807. depth = ring->max_items;
  7808. /* check for avail space, in number of ring items */
  7809. items = READ_AVAIL_SPACE(wr, rd, depth);
  7810. if (items == 0)
  7811. return NULL;
  7812. /*
  7813. * Note that there are builds where Assert translates to just printk
  7814. * so, even if we had hit this condition we would never halt. Now
  7815. * dhd_prot_process_msgtype can get into an big loop if this
  7816. * happens.
  7817. */
  7818. if (items > ring->max_items) {
  7819. DHD_ERROR(("\r\n======================= \r\n"));
  7820. DHD_ERROR(("%s(): ring %p, ring->name %s, ring->max_items %d, items %d \r\n",
  7821. __FUNCTION__, ring, ring->name, ring->max_items, items));
  7822. DHD_ERROR(("wr: %d, rd: %d, depth: %d \r\n", wr, rd, depth));
  7823. DHD_ERROR(("dhd->busstate %d bus->wait_for_d3_ack %d \r\n",
  7824. dhd->busstate, dhd->bus->wait_for_d3_ack));
  7825. DHD_ERROR(("\r\n======================= \r\n"));
  7826. #ifdef SUPPORT_LINKDOWN_RECOVERY
  7827. if (wr >= ring->max_items) {
  7828. dhd->bus->read_shm_fail = TRUE;
  7829. }
  7830. #else
  7831. #ifdef DHD_FW_COREDUMP
  7832. if (dhd->memdump_enabled) {
  7833. /* collect core dump */
  7834. dhd->memdump_type = DUMP_TYPE_RESUMED_ON_INVALID_RING_RDWR;
  7835. dhd_bus_mem_dump(dhd);
  7836. }
  7837. #endif /* DHD_FW_COREDUMP */
  7838. #endif /* SUPPORT_LINKDOWN_RECOVERY */
  7839. *available_len = 0;
  7840. dhd_schedule_reset(dhd);
  7841. return NULL;
  7842. }
  7843. /* if space is available, calculate address to be read */
  7844. read_addr = (char*)ring->dma_buf.va + (rd * ring->item_len);
  7845. /* update read pointer */
  7846. if ((ring->rd + items) >= ring->max_items)
  7847. ring->rd = 0;
  7848. else
  7849. ring->rd += items;
  7850. ASSERT(ring->rd < ring->max_items);
  7851. /* convert items to bytes : available_len must be 32bits */
  7852. *available_len = (uint32)(items * ring->item_len);
  7853. OSL_CACHE_INV(read_addr, *available_len);
  7854. /* return read address */
  7855. return read_addr;
  7856. } /* dhd_prot_get_read_addr */
  7857. /**
  7858. * dhd_prot_h2d_mbdata_send_ctrlmsg is a non-atomic function,
  7859. * make sure the callers always hold appropriate locks.
  7860. */
  7861. int dhd_prot_h2d_mbdata_send_ctrlmsg(dhd_pub_t *dhd, uint32 mb_data)
  7862. {
  7863. h2d_mailbox_data_t *h2d_mb_data;
  7864. uint16 alloced = 0;
  7865. msgbuf_ring_t *ctrl_ring = &dhd->prot->h2dring_ctrl_subn;
  7866. unsigned long flags;
  7867. int num_post = 1;
  7868. int i;
  7869. DHD_INFO(("%s Sending H2D MB data Req data 0x%04x\n",
  7870. __FUNCTION__, mb_data));
  7871. if (!ctrl_ring->inited) {
  7872. DHD_ERROR(("%s: Ctrl Submit Ring: not inited\n", __FUNCTION__));
  7873. return BCME_ERROR;
  7874. }
  7875. for (i = 0; i < num_post; i ++) {
  7876. DHD_RING_LOCK(ctrl_ring->ring_lock, flags);
  7877. /* Request for ring buffer space */
  7878. h2d_mb_data = (h2d_mailbox_data_t *)dhd_prot_alloc_ring_space(dhd,
  7879. ctrl_ring, DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D,
  7880. &alloced, FALSE);
  7881. if (h2d_mb_data == NULL) {
  7882. DHD_ERROR(("%s: FATAL: No space in control ring to send H2D Mb data\n",
  7883. __FUNCTION__));
  7884. DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
  7885. return BCME_NOMEM;
  7886. }
  7887. memset(h2d_mb_data, 0, sizeof(h2d_mailbox_data_t));
  7888. /* Common msg buf hdr */
  7889. h2d_mb_data->msg.msg_type = MSG_TYPE_H2D_MAILBOX_DATA;
  7890. h2d_mb_data->msg.flags = ctrl_ring->current_phase;
  7891. h2d_mb_data->msg.epoch =
  7892. ctrl_ring->seqnum % H2D_EPOCH_MODULO;
  7893. ctrl_ring->seqnum++;
  7894. /* Update flow create message */
  7895. h2d_mb_data->mail_box_data = htol32(mb_data);
  7896. {
  7897. h2d_mb_data->mail_box_data = htol32(mb_data);
  7898. }
  7899. DHD_INFO(("%s Send H2D MB data Req data 0x%04x\n", __FUNCTION__, mb_data));
  7900. /* upd wrt ptr and raise interrupt */
  7901. dhd_prot_ring_write_complete_mbdata(dhd, ctrl_ring, h2d_mb_data,
  7902. DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D, mb_data);
  7903. DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
  7904. }
  7905. return 0;
  7906. }
  7907. /** Creates a flow ring and informs dongle of this event */
  7908. int
  7909. dhd_prot_flow_ring_create(dhd_pub_t *dhd, flow_ring_node_t *flow_ring_node)
  7910. {
  7911. tx_flowring_create_request_t *flow_create_rqst;
  7912. msgbuf_ring_t *flow_ring;
  7913. dhd_prot_t *prot = dhd->prot;
  7914. unsigned long flags;
  7915. uint16 alloced = 0;
  7916. msgbuf_ring_t *ctrl_ring = &prot->h2dring_ctrl_subn;
  7917. uint16 max_flowrings = dhd->bus->max_tx_flowrings;
  7918. /* Fetch a pre-initialized msgbuf_ring from the flowring pool */
  7919. flow_ring = dhd_prot_flowrings_pool_fetch(dhd, flow_ring_node->flowid);
  7920. if (flow_ring == NULL) {
  7921. DHD_ERROR(("%s: dhd_prot_flowrings_pool_fetch TX Flowid %d failed\n",
  7922. __FUNCTION__, flow_ring_node->flowid));
  7923. return BCME_NOMEM;
  7924. }
  7925. DHD_RING_LOCK(ctrl_ring->ring_lock, flags);
  7926. /* Request for ctrl_ring buffer space */
  7927. flow_create_rqst = (tx_flowring_create_request_t *)
  7928. dhd_prot_alloc_ring_space(dhd, ctrl_ring, 1, &alloced, FALSE);
  7929. if (flow_create_rqst == NULL) {
  7930. dhd_prot_flowrings_pool_release(dhd, flow_ring_node->flowid, flow_ring);
  7931. DHD_ERROR(("%s: Flow Create Req flowid %d - failure ring space\n",
  7932. __FUNCTION__, flow_ring_node->flowid));
  7933. DHD_RING_LOCK(ctrl_ring->ring_lock, flags);
  7934. return BCME_NOMEM;
  7935. }
  7936. flow_ring_node->prot_info = (void *)flow_ring;
  7937. /* Common msg buf hdr */
  7938. flow_create_rqst->msg.msg_type = MSG_TYPE_FLOW_RING_CREATE;
  7939. flow_create_rqst->msg.if_id = (uint8)flow_ring_node->flow_info.ifindex;
  7940. flow_create_rqst->msg.request_id = htol32(0); /* TBD */
  7941. flow_create_rqst->msg.flags = ctrl_ring->current_phase;
  7942. flow_create_rqst->msg.epoch = ctrl_ring->seqnum % H2D_EPOCH_MODULO;
  7943. ctrl_ring->seqnum++;
  7944. /* Update flow create message */
  7945. flow_create_rqst->tid = flow_ring_node->flow_info.tid;
  7946. flow_create_rqst->flow_ring_id = htol16((uint16)flow_ring_node->flowid);
  7947. memcpy(flow_create_rqst->sa, flow_ring_node->flow_info.sa, sizeof(flow_create_rqst->sa));
  7948. memcpy(flow_create_rqst->da, flow_ring_node->flow_info.da, sizeof(flow_create_rqst->da));
  7949. /* CAUTION: ring::base_addr already in Little Endian */
  7950. flow_create_rqst->flow_ring_ptr.low_addr = flow_ring->base_addr.low_addr;
  7951. flow_create_rqst->flow_ring_ptr.high_addr = flow_ring->base_addr.high_addr;
  7952. flow_create_rqst->max_items = htol16(prot->h2d_max_txpost);
  7953. flow_create_rqst->len_item = htol16(H2DRING_TXPOST_ITEMSIZE);
  7954. flow_create_rqst->if_flags = 0;
  7955. #ifdef DHD_HP2P
  7956. /* Create HPP flow ring if HP2P is enabled and TID=7 and AWDL interface */
  7957. /* and traffic is not multicast */
  7958. /* Allow infra interface only if user enabled hp2p_infra_enable thru iovar */
  7959. /* Allow only one HP2P Flow active at a time */
  7960. if (dhd->hp2p_capable && !dhd->hp2p_ring_active &&
  7961. flow_ring_node->flow_info.tid == HP2P_PRIO &&
  7962. (dhd->hp2p_infra_enable || flow_create_rqst->msg.if_id) &&
  7963. !ETHER_ISMULTI(flow_create_rqst->da)) {
  7964. flow_create_rqst->if_flags |= BCMPCIE_FLOW_RING_INTF_HP2P;
  7965. flow_ring_node->hp2p_ring = TRUE;
  7966. dhd->hp2p_ring_active = TRUE;
  7967. DHD_ERROR(("%s: flow ring for HP2P tid = %d flowid = %d\n",
  7968. __FUNCTION__, flow_ring_node->flow_info.tid,
  7969. flow_ring_node->flowid));
  7970. }
  7971. #endif /* DHD_HP2P */
  7972. /* definition for ifrm mask : bit0:d11ac core, bit1:d11ad core
  7973. * currently it is not used for priority. so uses solely for ifrm mask
  7974. */
  7975. if (IFRM_ACTIVE(dhd))
  7976. flow_create_rqst->priority_ifrmmask = (1 << IFRM_DEV_0);
  7977. DHD_ERROR(("%s: Send Flow Create Req flow ID %d for peer " MACDBG
  7978. " prio %d ifindex %d\n", __FUNCTION__, flow_ring_node->flowid,
  7979. MAC2STRDBG(flow_ring_node->flow_info.da), flow_ring_node->flow_info.tid,
  7980. flow_ring_node->flow_info.ifindex));
  7981. /* Update the flow_ring's WRITE index */
  7982. if (IDMA_ACTIVE(dhd) || dhd->dma_h2d_ring_upd_support) {
  7983. dhd_prot_dma_indx_set(dhd, flow_ring->wr,
  7984. H2D_DMA_INDX_WR_UPD, flow_ring->idx);
  7985. } else if (IFRM_ACTIVE(dhd) && DHD_IS_FLOWRING(flow_ring->idx, max_flowrings)) {
  7986. dhd_prot_dma_indx_set(dhd, flow_ring->wr,
  7987. H2D_IFRM_INDX_WR_UPD, flow_ring->idx);
  7988. } else {
  7989. dhd_bus_cmn_writeshared(dhd->bus, &(flow_ring->wr),
  7990. sizeof(uint16), RING_WR_UPD, flow_ring->idx);
  7991. }
  7992. /* update control subn ring's WR index and ring doorbell to dongle */
  7993. dhd_prot_ring_write_complete(dhd, ctrl_ring, flow_create_rqst, 1);
  7994. DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
  7995. return BCME_OK;
  7996. } /* dhd_prot_flow_ring_create */
  7997. /** called on receiving MSG_TYPE_FLOW_RING_CREATE_CMPLT message from dongle */
  7998. static void
  7999. dhd_prot_flow_ring_create_response_process(dhd_pub_t *dhd, void *msg)
  8000. {
  8001. tx_flowring_create_response_t *flow_create_resp = (tx_flowring_create_response_t *)msg;
  8002. DHD_ERROR(("%s: Flow Create Response status = %d Flow %d\n", __FUNCTION__,
  8003. ltoh16(flow_create_resp->cmplt.status),
  8004. ltoh16(flow_create_resp->cmplt.flow_ring_id)));
  8005. dhd_bus_flow_ring_create_response(dhd->bus,
  8006. ltoh16(flow_create_resp->cmplt.flow_ring_id),
  8007. ltoh16(flow_create_resp->cmplt.status));
  8008. }
  8009. static void
  8010. dhd_prot_process_h2d_ring_create_complete(dhd_pub_t *dhd, void *buf)
  8011. {
  8012. h2d_ring_create_response_t *resp = (h2d_ring_create_response_t *)buf;
  8013. DHD_INFO(("%s ring create Response status = %d ring %d, id 0x%04x\n", __FUNCTION__,
  8014. ltoh16(resp->cmplt.status),
  8015. ltoh16(resp->cmplt.ring_id),
  8016. ltoh32(resp->cmn_hdr.request_id)));
  8017. if ((ltoh32(resp->cmn_hdr.request_id) != DHD_H2D_DBGRING_REQ_PKTID) &&
  8018. (ltoh32(resp->cmn_hdr.request_id) != DHD_H2D_BTLOGRING_REQ_PKTID)) {
  8019. DHD_ERROR(("invalid request ID with h2d ring create complete\n"));
  8020. return;
  8021. }
  8022. if (dhd->prot->h2dring_info_subn->create_req_id == ltoh32(resp->cmn_hdr.request_id) &&
  8023. !dhd->prot->h2dring_info_subn->create_pending) {
  8024. DHD_ERROR(("info ring create status for not pending submit ring\n"));
  8025. }
  8026. if (ltoh16(resp->cmplt.status) != BCMPCIE_SUCCESS) {
  8027. DHD_ERROR(("info/btlog ring create failed with status %d\n",
  8028. ltoh16(resp->cmplt.status)));
  8029. return;
  8030. }
  8031. if (dhd->prot->h2dring_info_subn->create_req_id == ltoh32(resp->cmn_hdr.request_id)) {
  8032. dhd->prot->h2dring_info_subn->create_pending = FALSE;
  8033. dhd->prot->h2dring_info_subn->inited = TRUE;
  8034. DHD_ERROR(("info buffer post after ring create\n"));
  8035. dhd_prot_infobufpost(dhd, dhd->prot->h2dring_info_subn);
  8036. }
  8037. }
  8038. static void
  8039. dhd_prot_process_d2h_ring_create_complete(dhd_pub_t *dhd, void *buf)
  8040. {
  8041. d2h_ring_create_response_t *resp = (d2h_ring_create_response_t *)buf;
  8042. DHD_INFO(("%s ring create Response status = %d ring %d, id 0x%04x\n", __FUNCTION__,
  8043. ltoh16(resp->cmplt.status),
  8044. ltoh16(resp->cmplt.ring_id),
  8045. ltoh32(resp->cmn_hdr.request_id)));
  8046. if ((ltoh32(resp->cmn_hdr.request_id) != DHD_D2H_DBGRING_REQ_PKTID) &&
  8047. (ltoh32(resp->cmn_hdr.request_id) != DHD_D2H_BTLOGRING_REQ_PKTID) &&
  8048. #ifdef DHD_HP2P
  8049. (ltoh32(resp->cmn_hdr.request_id) != DHD_D2H_HPPRING_TXREQ_PKTID) &&
  8050. (ltoh32(resp->cmn_hdr.request_id) != DHD_D2H_HPPRING_RXREQ_PKTID) &&
  8051. #endif /* DHD_HP2P */
  8052. TRUE) {
  8053. DHD_ERROR(("invalid request ID with d2h ring create complete\n"));
  8054. return;
  8055. }
  8056. if (ltoh32(resp->cmn_hdr.request_id) == DHD_D2H_DBGRING_REQ_PKTID) {
  8057. #ifdef EWP_EDL
  8058. if (!dhd->dongle_edl_support)
  8059. #endif // endif
  8060. {
  8061. if (!dhd->prot->d2hring_info_cpln->create_pending) {
  8062. DHD_ERROR(("info ring create status for not pending cpl ring\n"));
  8063. return;
  8064. }
  8065. if (ltoh16(resp->cmplt.status) != BCMPCIE_SUCCESS) {
  8066. DHD_ERROR(("info cpl ring create failed with status %d\n",
  8067. ltoh16(resp->cmplt.status)));
  8068. return;
  8069. }
  8070. dhd->prot->d2hring_info_cpln->create_pending = FALSE;
  8071. dhd->prot->d2hring_info_cpln->inited = TRUE;
  8072. }
  8073. #ifdef EWP_EDL
  8074. else {
  8075. if (!dhd->prot->d2hring_edl->create_pending) {
  8076. DHD_ERROR(("edl ring create status for not pending cpl ring\n"));
  8077. return;
  8078. }
  8079. if (ltoh16(resp->cmplt.status) != BCMPCIE_SUCCESS) {
  8080. DHD_ERROR(("edl cpl ring create failed with status %d\n",
  8081. ltoh16(resp->cmplt.status)));
  8082. return;
  8083. }
  8084. dhd->prot->d2hring_edl->create_pending = FALSE;
  8085. dhd->prot->d2hring_edl->inited = TRUE;
  8086. }
  8087. #endif /* EWP_EDL */
  8088. }
  8089. #ifdef DHD_HP2P
  8090. if (dhd->prot->d2hring_hp2p_txcpl &&
  8091. ltoh32(resp->cmn_hdr.request_id) == DHD_D2H_HPPRING_TXREQ_PKTID) {
  8092. if (!dhd->prot->d2hring_hp2p_txcpl->create_pending) {
  8093. DHD_ERROR(("HPP tx ring create status for not pending cpl ring\n"));
  8094. return;
  8095. }
  8096. if (ltoh16(resp->cmplt.status) != BCMPCIE_SUCCESS) {
  8097. DHD_ERROR(("HPP tx cpl ring create failed with status %d\n",
  8098. ltoh16(resp->cmplt.status)));
  8099. return;
  8100. }
  8101. dhd->prot->d2hring_hp2p_txcpl->create_pending = FALSE;
  8102. dhd->prot->d2hring_hp2p_txcpl->inited = TRUE;
  8103. }
  8104. if (dhd->prot->d2hring_hp2p_rxcpl &&
  8105. ltoh32(resp->cmn_hdr.request_id) == DHD_D2H_HPPRING_RXREQ_PKTID) {
  8106. if (!dhd->prot->d2hring_hp2p_rxcpl->create_pending) {
  8107. DHD_ERROR(("HPP rx ring create status for not pending cpl ring\n"));
  8108. return;
  8109. }
  8110. if (ltoh16(resp->cmplt.status) != BCMPCIE_SUCCESS) {
  8111. DHD_ERROR(("HPP rx cpl ring create failed with status %d\n",
  8112. ltoh16(resp->cmplt.status)));
  8113. return;
  8114. }
  8115. dhd->prot->d2hring_hp2p_rxcpl->create_pending = FALSE;
  8116. dhd->prot->d2hring_hp2p_rxcpl->inited = TRUE;
  8117. }
  8118. #endif /* DHD_HP2P */
  8119. }
  8120. static void
  8121. dhd_prot_process_d2h_mb_data(dhd_pub_t *dhd, void* buf)
  8122. {
  8123. d2h_mailbox_data_t *d2h_data;
  8124. d2h_data = (d2h_mailbox_data_t *)buf;
  8125. DHD_INFO(("%s dhd_prot_process_d2h_mb_data, 0x%04x\n", __FUNCTION__,
  8126. d2h_data->d2h_mailbox_data));
  8127. dhd_bus_handle_mb_data(dhd->bus, d2h_data->d2h_mailbox_data);
  8128. }
  8129. static void
  8130. dhd_prot_process_d2h_host_ts_complete(dhd_pub_t *dhd, void* buf)
  8131. {
  8132. DHD_ERROR(("Timesunc feature not compiled in but GOT HOST_TS_COMPLETE\n"));
  8133. }
  8134. /** called on e.g. flow ring delete */
  8135. void dhd_prot_clean_flow_ring(dhd_pub_t *dhd, void *msgbuf_flow_info)
  8136. {
  8137. msgbuf_ring_t *flow_ring = (msgbuf_ring_t *)msgbuf_flow_info;
  8138. dhd_prot_ring_detach(dhd, flow_ring);
  8139. DHD_INFO(("%s Cleaning up Flow \n", __FUNCTION__));
  8140. }
  8141. void dhd_prot_print_flow_ring(dhd_pub_t *dhd, void *msgbuf_flow_info,
  8142. struct bcmstrbuf *strbuf, const char * fmt)
  8143. {
  8144. const char *default_fmt =
  8145. "RD %d WR %d BASE(VA) %p BASE(PA) %x:%x SIZE %d "
  8146. "WORK_ITEM_SIZE %d MAX_WORK_ITEMS %d TOTAL_SIZE %d\n";
  8147. msgbuf_ring_t *flow_ring = (msgbuf_ring_t *)msgbuf_flow_info;
  8148. uint16 rd, wr;
  8149. uint32 dma_buf_len = flow_ring->max_items * flow_ring->item_len;
  8150. if (fmt == NULL) {
  8151. fmt = default_fmt;
  8152. }
  8153. if (dhd->bus->is_linkdown) {
  8154. DHD_ERROR(("%s: Skip dumping flowring due to Link down\n", __FUNCTION__));
  8155. return;
  8156. }
  8157. dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, flow_ring->idx);
  8158. dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, flow_ring->idx);
  8159. bcm_bprintf(strbuf, fmt, rd, wr, flow_ring->dma_buf.va,
  8160. ltoh32(flow_ring->base_addr.high_addr),
  8161. ltoh32(flow_ring->base_addr.low_addr),
  8162. flow_ring->item_len, flow_ring->max_items,
  8163. dma_buf_len);
  8164. }
  8165. void dhd_prot_print_info(dhd_pub_t *dhd, struct bcmstrbuf *strbuf)
  8166. {
  8167. dhd_prot_t *prot = dhd->prot;
  8168. bcm_bprintf(strbuf, "IPCrevs: Dev %d, \t Host %d, \tactive %d\n",
  8169. dhd->prot->device_ipc_version,
  8170. dhd->prot->host_ipc_version,
  8171. dhd->prot->active_ipc_version);
  8172. bcm_bprintf(strbuf, "max Host TS bufs to post: %d, \t posted %d \n",
  8173. dhd->prot->max_tsbufpost, dhd->prot->cur_ts_bufs_posted);
  8174. bcm_bprintf(strbuf, "max INFO bufs to post: %d, \t posted %d \n",
  8175. dhd->prot->max_infobufpost, dhd->prot->infobufpost);
  8176. bcm_bprintf(strbuf, "max event bufs to post: %d, \t posted %d \n",
  8177. dhd->prot->max_eventbufpost, dhd->prot->cur_event_bufs_posted);
  8178. bcm_bprintf(strbuf, "max ioctlresp bufs to post: %d, \t posted %d \n",
  8179. dhd->prot->max_ioctlrespbufpost, dhd->prot->cur_ioctlresp_bufs_posted);
  8180. bcm_bprintf(strbuf, "max RX bufs to post: %d, \t posted %d \n",
  8181. dhd->prot->max_rxbufpost, dhd->prot->rxbufpost);
  8182. bcm_bprintf(strbuf,
  8183. "%14s %5s %5s %17s %17s %14s %14s %10s\n",
  8184. "Type", "RD", "WR", "BASE(VA)", "BASE(PA)",
  8185. "WORK_ITEM_SIZE", "MAX_WORK_ITEMS", "TOTAL_SIZE");
  8186. bcm_bprintf(strbuf, "%14s", "H2DCtrlPost");
  8187. dhd_prot_print_flow_ring(dhd, &prot->h2dring_ctrl_subn, strbuf,
  8188. " %5d %5d %17p %8x:%8x %14d %14d %10d\n");
  8189. bcm_bprintf(strbuf, "%14s", "D2HCtrlCpl");
  8190. dhd_prot_print_flow_ring(dhd, &prot->d2hring_ctrl_cpln, strbuf,
  8191. " %5d %5d %17p %8x:%8x %14d %14d %10d\n");
  8192. bcm_bprintf(strbuf, "%14s", "H2DRxPost", prot->rxbufpost);
  8193. dhd_prot_print_flow_ring(dhd, &prot->h2dring_rxp_subn, strbuf,
  8194. " %5d %5d %17p %8x:%8x %14d %14d %10d\n");
  8195. bcm_bprintf(strbuf, "%14s", "D2HRxCpl");
  8196. dhd_prot_print_flow_ring(dhd, &prot->d2hring_rx_cpln, strbuf,
  8197. " %5d %5d %17p %8x:%8x %14d %14d %10d\n");
  8198. bcm_bprintf(strbuf, "%14s", "D2HTxCpl");
  8199. dhd_prot_print_flow_ring(dhd, &prot->d2hring_tx_cpln, strbuf,
  8200. " %5d %5d %17p %8x:%8x %14d %14d %10d\n");
  8201. if (dhd->prot->h2dring_info_subn != NULL && dhd->prot->d2hring_info_cpln != NULL) {
  8202. bcm_bprintf(strbuf, "%14s", "H2DRingInfoSub");
  8203. dhd_prot_print_flow_ring(dhd, prot->h2dring_info_subn, strbuf,
  8204. " %5d %5d %17p %8x:%8x %14d %14d %10d\n");
  8205. bcm_bprintf(strbuf, "%14s", "D2HRingInfoCpl");
  8206. dhd_prot_print_flow_ring(dhd, prot->d2hring_info_cpln, strbuf,
  8207. " %5d %5d %17p %8x:%8x %14d %14d %10d\n");
  8208. }
  8209. if (dhd->prot->d2hring_edl != NULL) {
  8210. bcm_bprintf(strbuf, "%14s", "D2HRingEDL");
  8211. dhd_prot_print_flow_ring(dhd, prot->d2hring_edl, strbuf,
  8212. " %5d %5d %17p %8x:%8x %14d %14d %10d\n");
  8213. }
  8214. bcm_bprintf(strbuf, "active_tx_count %d pktidmap_avail(ctrl/rx/tx) %d %d %d\n",
  8215. OSL_ATOMIC_READ(dhd->osh, &dhd->prot->active_tx_count),
  8216. DHD_PKTID_AVAIL(dhd->prot->pktid_ctrl_map),
  8217. DHD_PKTID_AVAIL(dhd->prot->pktid_rx_map),
  8218. DHD_PKTID_AVAIL(dhd->prot->pktid_tx_map));
  8219. }
  8220. int
  8221. dhd_prot_flow_ring_delete(dhd_pub_t *dhd, flow_ring_node_t *flow_ring_node)
  8222. {
  8223. tx_flowring_delete_request_t *flow_delete_rqst;
  8224. dhd_prot_t *prot = dhd->prot;
  8225. unsigned long flags;
  8226. uint16 alloced = 0;
  8227. msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn;
  8228. DHD_RING_LOCK(ring->ring_lock, flags);
  8229. /* Request for ring buffer space */
  8230. flow_delete_rqst = (tx_flowring_delete_request_t *)
  8231. dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE);
  8232. if (flow_delete_rqst == NULL) {
  8233. DHD_RING_UNLOCK(ring->ring_lock, flags);
  8234. DHD_ERROR(("%s: Flow Delete Req - failure ring space\n", __FUNCTION__));
  8235. return BCME_NOMEM;
  8236. }
  8237. /* Common msg buf hdr */
  8238. flow_delete_rqst->msg.msg_type = MSG_TYPE_FLOW_RING_DELETE;
  8239. flow_delete_rqst->msg.if_id = (uint8)flow_ring_node->flow_info.ifindex;
  8240. flow_delete_rqst->msg.request_id = htol32(0); /* TBD */
  8241. flow_delete_rqst->msg.flags = ring->current_phase;
  8242. flow_delete_rqst->msg.epoch = ring->seqnum % H2D_EPOCH_MODULO;
  8243. ring->seqnum++;
  8244. /* Update Delete info */
  8245. flow_delete_rqst->flow_ring_id = htol16((uint16)flow_ring_node->flowid);
  8246. flow_delete_rqst->reason = htol16(BCME_OK);
  8247. DHD_ERROR(("%s: Send Flow Delete Req RING ID %d for peer " MACDBG
  8248. " prio %d ifindex %d\n", __FUNCTION__, flow_ring_node->flowid,
  8249. MAC2STRDBG(flow_ring_node->flow_info.da), flow_ring_node->flow_info.tid,
  8250. flow_ring_node->flow_info.ifindex));
  8251. /* update ring's WR index and ring doorbell to dongle */
  8252. dhd_prot_ring_write_complete(dhd, ring, flow_delete_rqst, 1);
  8253. DHD_RING_UNLOCK(ring->ring_lock, flags);
  8254. return BCME_OK;
  8255. }
  8256. static void BCMFASTPATH
  8257. dhd_prot_flow_ring_fastdelete(dhd_pub_t *dhd, uint16 flowid, uint16 rd_idx)
  8258. {
  8259. flow_ring_node_t *flow_ring_node = DHD_FLOW_RING(dhd, flowid);
  8260. msgbuf_ring_t *ring = (msgbuf_ring_t *)flow_ring_node->prot_info;
  8261. host_txbuf_cmpl_t txstatus;
  8262. host_txbuf_post_t *txdesc;
  8263. uint16 wr_idx;
  8264. DHD_INFO(("%s: FAST delete ring, flowid=%d, rd_idx=%d, wr_idx=%d\n",
  8265. __FUNCTION__, flowid, rd_idx, ring->wr));
  8266. memset(&txstatus, 0, sizeof(txstatus));
  8267. txstatus.compl_hdr.flow_ring_id = flowid;
  8268. txstatus.cmn_hdr.if_id = flow_ring_node->flow_info.ifindex;
  8269. wr_idx = ring->wr;
  8270. while (wr_idx != rd_idx) {
  8271. if (wr_idx)
  8272. wr_idx--;
  8273. else
  8274. wr_idx = ring->max_items - 1;
  8275. txdesc = (host_txbuf_post_t *)((char *)DHD_RING_BGN_VA(ring) +
  8276. (wr_idx * ring->item_len));
  8277. txstatus.cmn_hdr.request_id = txdesc->cmn_hdr.request_id;
  8278. dhd_prot_txstatus_process(dhd, &txstatus);
  8279. }
  8280. }
  8281. static void
  8282. dhd_prot_flow_ring_delete_response_process(dhd_pub_t *dhd, void *msg)
  8283. {
  8284. tx_flowring_delete_response_t *flow_delete_resp = (tx_flowring_delete_response_t *)msg;
  8285. DHD_ERROR(("%s: Flow Delete Response status = %d Flow %d\n", __FUNCTION__,
  8286. flow_delete_resp->cmplt.status, flow_delete_resp->cmplt.flow_ring_id));
  8287. if (dhd->fast_delete_ring_support) {
  8288. dhd_prot_flow_ring_fastdelete(dhd, flow_delete_resp->cmplt.flow_ring_id,
  8289. flow_delete_resp->read_idx);
  8290. }
  8291. dhd_bus_flow_ring_delete_response(dhd->bus, flow_delete_resp->cmplt.flow_ring_id,
  8292. flow_delete_resp->cmplt.status);
  8293. }
  8294. static void
  8295. dhd_prot_process_flow_ring_resume_response(dhd_pub_t *dhd, void* msg)
  8296. {
  8297. #ifdef IDLE_TX_FLOW_MGMT
  8298. tx_idle_flowring_resume_response_t *flow_resume_resp =
  8299. (tx_idle_flowring_resume_response_t *)msg;
  8300. DHD_ERROR(("%s Flow resume Response status = %d Flow %d\n", __FUNCTION__,
  8301. flow_resume_resp->cmplt.status, flow_resume_resp->cmplt.flow_ring_id));
  8302. dhd_bus_flow_ring_resume_response(dhd->bus, flow_resume_resp->cmplt.flow_ring_id,
  8303. flow_resume_resp->cmplt.status);
  8304. #endif /* IDLE_TX_FLOW_MGMT */
  8305. }
  8306. static void
  8307. dhd_prot_process_flow_ring_suspend_response(dhd_pub_t *dhd, void* msg)
  8308. {
  8309. #ifdef IDLE_TX_FLOW_MGMT
  8310. int16 status;
  8311. tx_idle_flowring_suspend_response_t *flow_suspend_resp =
  8312. (tx_idle_flowring_suspend_response_t *)msg;
  8313. status = flow_suspend_resp->cmplt.status;
  8314. DHD_ERROR(("%s Flow id %d suspend Response status = %d\n",
  8315. __FUNCTION__, flow_suspend_resp->cmplt.flow_ring_id,
  8316. status));
  8317. if (status != BCME_OK) {
  8318. DHD_ERROR(("%s Error in Suspending Flow rings!!"
  8319. "Dongle will still be polling idle rings!!Status = %d \n",
  8320. __FUNCTION__, status));
  8321. }
  8322. #endif /* IDLE_TX_FLOW_MGMT */
  8323. }
  8324. int
  8325. dhd_prot_flow_ring_flush(dhd_pub_t *dhd, flow_ring_node_t *flow_ring_node)
  8326. {
  8327. tx_flowring_flush_request_t *flow_flush_rqst;
  8328. dhd_prot_t *prot = dhd->prot;
  8329. unsigned long flags;
  8330. uint16 alloced = 0;
  8331. msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn;
  8332. DHD_RING_LOCK(ring->ring_lock, flags);
  8333. /* Request for ring buffer space */
  8334. flow_flush_rqst = (tx_flowring_flush_request_t *)
  8335. dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE);
  8336. if (flow_flush_rqst == NULL) {
  8337. DHD_RING_UNLOCK(ring->ring_lock, flags);
  8338. DHD_ERROR(("%s: Flow Flush Req - failure ring space\n", __FUNCTION__));
  8339. return BCME_NOMEM;
  8340. }
  8341. /* Common msg buf hdr */
  8342. flow_flush_rqst->msg.msg_type = MSG_TYPE_FLOW_RING_FLUSH;
  8343. flow_flush_rqst->msg.if_id = (uint8)flow_ring_node->flow_info.ifindex;
  8344. flow_flush_rqst->msg.request_id = htol32(0); /* TBD */
  8345. flow_flush_rqst->msg.flags = ring->current_phase;
  8346. flow_flush_rqst->msg.epoch = ring->seqnum % H2D_EPOCH_MODULO;
  8347. ring->seqnum++;
  8348. flow_flush_rqst->flow_ring_id = htol16((uint16)flow_ring_node->flowid);
  8349. flow_flush_rqst->reason = htol16(BCME_OK);
  8350. DHD_INFO(("%s: Send Flow Flush Req\n", __FUNCTION__));
  8351. /* update ring's WR index and ring doorbell to dongle */
  8352. dhd_prot_ring_write_complete(dhd, ring, flow_flush_rqst, 1);
  8353. DHD_RING_UNLOCK(ring->ring_lock, flags);
  8354. return BCME_OK;
  8355. } /* dhd_prot_flow_ring_flush */
  8356. static void
  8357. dhd_prot_flow_ring_flush_response_process(dhd_pub_t *dhd, void *msg)
  8358. {
  8359. tx_flowring_flush_response_t *flow_flush_resp = (tx_flowring_flush_response_t *)msg;
  8360. DHD_INFO(("%s: Flow Flush Response status = %d\n", __FUNCTION__,
  8361. flow_flush_resp->cmplt.status));
  8362. dhd_bus_flow_ring_flush_response(dhd->bus, flow_flush_resp->cmplt.flow_ring_id,
  8363. flow_flush_resp->cmplt.status);
  8364. }
  8365. /**
  8366. * Request dongle to configure soft doorbells for D2H rings. Host populated soft
  8367. * doorbell information is transferred to dongle via the d2h ring config control
  8368. * message.
  8369. */
  8370. void
  8371. dhd_msgbuf_ring_config_d2h_soft_doorbell(dhd_pub_t *dhd)
  8372. {
  8373. #if defined(DHD_D2H_SOFT_DOORBELL_SUPPORT)
  8374. uint16 ring_idx;
  8375. uint8 *msg_next;
  8376. void *msg_start;
  8377. uint16 alloced = 0;
  8378. unsigned long flags;
  8379. dhd_prot_t *prot = dhd->prot;
  8380. ring_config_req_t *ring_config_req;
  8381. bcmpcie_soft_doorbell_t *soft_doorbell;
  8382. msgbuf_ring_t *ctrl_ring = &prot->h2dring_ctrl_subn;
  8383. const uint16 d2h_rings = BCMPCIE_D2H_COMMON_MSGRINGS;
  8384. /* Claim space for d2h_ring number of d2h_ring_config_req_t messages */
  8385. DHD_RING_LOCK(ctrl_ring->ring_lock, flags);
  8386. msg_start = dhd_prot_alloc_ring_space(dhd, ctrl_ring, d2h_rings, &alloced, TRUE);
  8387. if (msg_start == NULL) {
  8388. DHD_ERROR(("%s Msgbuf no space for %d D2H ring config soft doorbells\n",
  8389. __FUNCTION__, d2h_rings));
  8390. DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
  8391. return;
  8392. }
  8393. msg_next = (uint8*)msg_start;
  8394. for (ring_idx = 0; ring_idx < d2h_rings; ring_idx++) {
  8395. /* position the ring_config_req into the ctrl subm ring */
  8396. ring_config_req = (ring_config_req_t *)msg_next;
  8397. /* Common msg header */
  8398. ring_config_req->msg.msg_type = MSG_TYPE_D2H_RING_CONFIG;
  8399. ring_config_req->msg.if_id = 0;
  8400. ring_config_req->msg.flags = 0;
  8401. ring_config_req->msg.epoch = ctrl_ring->seqnum % H2D_EPOCH_MODULO;
  8402. ctrl_ring->seqnum++;
  8403. ring_config_req->msg.request_id = htol32(DHD_FAKE_PKTID); /* unused */
  8404. /* Ring Config subtype and d2h ring_id */
  8405. ring_config_req->subtype = htol16(D2H_RING_CONFIG_SUBTYPE_SOFT_DOORBELL);
  8406. ring_config_req->ring_id = htol16(DHD_D2H_RINGID(ring_idx));
  8407. /* Host soft doorbell configuration */
  8408. soft_doorbell = &prot->soft_doorbell[ring_idx];
  8409. ring_config_req->soft_doorbell.value = htol32(soft_doorbell->value);
  8410. ring_config_req->soft_doorbell.haddr.high =
  8411. htol32(soft_doorbell->haddr.high);
  8412. ring_config_req->soft_doorbell.haddr.low =
  8413. htol32(soft_doorbell->haddr.low);
  8414. ring_config_req->soft_doorbell.items = htol16(soft_doorbell->items);
  8415. ring_config_req->soft_doorbell.msecs = htol16(soft_doorbell->msecs);
  8416. DHD_INFO(("%s: Soft doorbell haddr 0x%08x 0x%08x value 0x%08x\n",
  8417. __FUNCTION__, ring_config_req->soft_doorbell.haddr.high,
  8418. ring_config_req->soft_doorbell.haddr.low,
  8419. ring_config_req->soft_doorbell.value));
  8420. msg_next = msg_next + ctrl_ring->item_len;
  8421. }
  8422. /* update control subn ring's WR index and ring doorbell to dongle */
  8423. dhd_prot_ring_write_complete(dhd, ctrl_ring, msg_start, d2h_rings);
  8424. DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
  8425. #endif /* DHD_D2H_SOFT_DOORBELL_SUPPORT */
  8426. }
  8427. static void
  8428. dhd_prot_process_d2h_ring_config_complete(dhd_pub_t *dhd, void *msg)
  8429. {
  8430. DHD_INFO(("%s: Ring Config Response - status %d ringid %d\n",
  8431. __FUNCTION__, ltoh16(((ring_config_resp_t *)msg)->compl_hdr.status),
  8432. ltoh16(((ring_config_resp_t *)msg)->compl_hdr.flow_ring_id)));
  8433. }
  8434. #ifdef WL_CFGVENDOR_SEND_HANG_EVENT
  8435. void
  8436. copy_ext_trap_sig(dhd_pub_t *dhd, trap_t *tr)
  8437. {
  8438. uint32 *ext_data = dhd->extended_trap_data;
  8439. hnd_ext_trap_hdr_t *hdr;
  8440. const bcm_tlv_t *tlv;
  8441. if (ext_data == NULL) {
  8442. return;
  8443. }
  8444. /* First word is original trap_data */
  8445. ext_data++;
  8446. /* Followed by the extended trap data header */
  8447. hdr = (hnd_ext_trap_hdr_t *)ext_data;
  8448. tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_SIGNATURE);
  8449. if (tlv) {
  8450. memcpy(tr, &tlv->data, sizeof(struct _trap_struct));
  8451. }
  8452. }
  8453. #define TRAP_T_NAME_OFFSET(var) {#var, OFFSETOF(trap_t, var)}
  8454. typedef struct {
  8455. char name[HANG_INFO_TRAP_T_NAME_MAX];
  8456. uint32 offset;
  8457. } hang_info_trap_t;
  8458. #ifdef DHD_EWPR_VER2
  8459. static hang_info_trap_t hang_info_trap_tbl[] = {
  8460. {"reason", 0},
  8461. {"ver", VENDOR_SEND_HANG_EXT_INFO_VER},
  8462. {"stype", 0},
  8463. TRAP_T_NAME_OFFSET(type),
  8464. TRAP_T_NAME_OFFSET(epc),
  8465. {"resrvd", 0},
  8466. {"resrvd", 0},
  8467. {"resrvd", 0},
  8468. {"resrvd", 0},
  8469. {"", 0}
  8470. };
  8471. #else
  8472. static hang_info_trap_t hang_info_trap_tbl[] = {
  8473. {"reason", 0},
  8474. {"ver", VENDOR_SEND_HANG_EXT_INFO_VER},
  8475. {"stype", 0},
  8476. TRAP_T_NAME_OFFSET(type),
  8477. TRAP_T_NAME_OFFSET(epc),
  8478. TRAP_T_NAME_OFFSET(cpsr),
  8479. TRAP_T_NAME_OFFSET(spsr),
  8480. TRAP_T_NAME_OFFSET(r0),
  8481. TRAP_T_NAME_OFFSET(r1),
  8482. TRAP_T_NAME_OFFSET(r2),
  8483. TRAP_T_NAME_OFFSET(r3),
  8484. TRAP_T_NAME_OFFSET(r4),
  8485. TRAP_T_NAME_OFFSET(r5),
  8486. TRAP_T_NAME_OFFSET(r6),
  8487. TRAP_T_NAME_OFFSET(r7),
  8488. TRAP_T_NAME_OFFSET(r8),
  8489. TRAP_T_NAME_OFFSET(r9),
  8490. TRAP_T_NAME_OFFSET(r10),
  8491. TRAP_T_NAME_OFFSET(r11),
  8492. TRAP_T_NAME_OFFSET(r12),
  8493. TRAP_T_NAME_OFFSET(r13),
  8494. TRAP_T_NAME_OFFSET(r14),
  8495. TRAP_T_NAME_OFFSET(pc),
  8496. {"", 0}
  8497. };
  8498. #endif /* DHD_EWPR_VER2 */
  8499. #define TAG_TRAP_IS_STATE(tag) \
  8500. ((tag == TAG_TRAP_MEMORY) || (tag == TAG_TRAP_PCIE_Q) || \
  8501. (tag == TAG_TRAP_WLC_STATE) || (tag == TAG_TRAP_LOG_DATA) || \
  8502. (tag == TAG_TRAP_CODE))
  8503. static void
  8504. copy_hang_info_head(char *dest, trap_t *src, int len, int field_name,
  8505. int *bytes_written, int *cnt, char *cookie)
  8506. {
  8507. uint8 *ptr;
  8508. int remain_len;
  8509. int i;
  8510. ptr = (uint8 *)src;
  8511. memset(dest, 0, len);
  8512. remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
  8513. /* hang reason, hang info ver */
  8514. for (i = 0; (i < HANG_INFO_TRAP_T_SUBTYPE_IDX) && (*cnt < HANG_FIELD_CNT_MAX);
  8515. i++, (*cnt)++) {
  8516. if (field_name) {
  8517. remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
  8518. *bytes_written += scnprintf(&dest[*bytes_written], remain_len, "%s:%c",
  8519. hang_info_trap_tbl[i].name, HANG_KEY_DEL);
  8520. }
  8521. remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
  8522. *bytes_written += scnprintf(&dest[*bytes_written], remain_len, "%d%c",
  8523. hang_info_trap_tbl[i].offset, HANG_KEY_DEL);
  8524. }
  8525. if (*cnt < HANG_FIELD_CNT_MAX) {
  8526. if (field_name) {
  8527. remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
  8528. *bytes_written += scnprintf(&dest[*bytes_written], remain_len, "%s:%c",
  8529. "cookie", HANG_KEY_DEL);
  8530. }
  8531. remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
  8532. *bytes_written += scnprintf(&dest[*bytes_written], remain_len, "%s%c",
  8533. cookie, HANG_KEY_DEL);
  8534. (*cnt)++;
  8535. }
  8536. if (*cnt < HANG_FIELD_CNT_MAX) {
  8537. if (field_name) {
  8538. remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
  8539. *bytes_written += scnprintf(&dest[*bytes_written], remain_len, "%s:%c",
  8540. hang_info_trap_tbl[HANG_INFO_TRAP_T_SUBTYPE_IDX].name,
  8541. HANG_KEY_DEL);
  8542. }
  8543. remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
  8544. *bytes_written += scnprintf(&dest[*bytes_written], remain_len, "%08x%c",
  8545. hang_info_trap_tbl[HANG_INFO_TRAP_T_SUBTYPE_IDX].offset,
  8546. HANG_KEY_DEL);
  8547. (*cnt)++;
  8548. }
  8549. if (*cnt < HANG_FIELD_CNT_MAX) {
  8550. if (field_name) {
  8551. remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
  8552. *bytes_written += scnprintf(&dest[*bytes_written], remain_len, "%s:%c",
  8553. hang_info_trap_tbl[HANG_INFO_TRAP_T_EPC_IDX].name,
  8554. HANG_KEY_DEL);
  8555. }
  8556. remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
  8557. *bytes_written += scnprintf(&dest[*bytes_written], remain_len, "%08x%c",
  8558. *(uint32 *)
  8559. (ptr + hang_info_trap_tbl[HANG_INFO_TRAP_T_EPC_IDX].offset),
  8560. HANG_KEY_DEL);
  8561. (*cnt)++;
  8562. }
  8563. #ifdef DHD_EWPR_VER2
  8564. /* put 0 for HG03 ~ HG06 (reserved for future use) */
  8565. for (i = 0; (i < HANG_INFO_BIGDATA_EXTRA_KEY) && (*cnt < HANG_FIELD_CNT_MAX);
  8566. i++, (*cnt)++) {
  8567. if (field_name) {
  8568. remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
  8569. *bytes_written += scnprintf(&dest[*bytes_written], remain_len, "%s:%c",
  8570. hang_info_trap_tbl[HANG_INFO_TRAP_T_EXTRA_KEY_IDX+i].name,
  8571. HANG_KEY_DEL);
  8572. }
  8573. remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
  8574. *bytes_written += scnprintf(&dest[*bytes_written], remain_len, "%d%c",
  8575. hang_info_trap_tbl[HANG_INFO_TRAP_T_EXTRA_KEY_IDX+i].offset,
  8576. HANG_KEY_DEL);
  8577. }
  8578. #endif /* DHD_EWPR_VER2 */
  8579. }
  8580. #ifndef DHD_EWPR_VER2
  8581. static void
  8582. copy_hang_info_trap_t(char *dest, trap_t *src, int len, int field_name,
  8583. int *bytes_written, int *cnt, char *cookie)
  8584. {
  8585. uint8 *ptr;
  8586. int remain_len;
  8587. int i;
  8588. ptr = (uint8 *)src;
  8589. remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
  8590. for (i = HANG_INFO_TRAP_T_OFFSET_IDX;
  8591. (hang_info_trap_tbl[i].name[0] != 0) && (*cnt < HANG_FIELD_CNT_MAX);
  8592. i++, (*cnt)++) {
  8593. if (field_name) {
  8594. remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
  8595. *bytes_written += scnprintf(&dest[*bytes_written], remain_len, "%c%s:",
  8596. HANG_RAW_DEL, hang_info_trap_tbl[i].name);
  8597. }
  8598. remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
  8599. *bytes_written += scnprintf(&dest[*bytes_written], remain_len, "%c%08x",
  8600. HANG_RAW_DEL, *(uint32 *)(ptr + hang_info_trap_tbl[i].offset));
  8601. }
  8602. }
  8603. /* Ignore compiler warnings due to -Werror=cast-qual */
  8604. #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
  8605. #pragma GCC diagnostic push
  8606. #pragma GCC diagnostic ignored "-Wcast-qual"
  8607. #endif // endif
  8608. static void
  8609. copy_hang_info_stack(dhd_pub_t *dhd, char *dest, int *bytes_written, int *cnt)
  8610. {
  8611. int remain_len;
  8612. int i = 0;
  8613. const uint32 *stack;
  8614. uint32 *ext_data = dhd->extended_trap_data;
  8615. hnd_ext_trap_hdr_t *hdr;
  8616. const bcm_tlv_t *tlv;
  8617. int remain_stack_cnt = 0;
  8618. uint32 dummy_data = 0;
  8619. int bigdata_key_stack_cnt = 0;
  8620. if (ext_data == NULL) {
  8621. return;
  8622. }
  8623. /* First word is original trap_data */
  8624. ext_data++;
  8625. /* Followed by the extended trap data header */
  8626. hdr = (hnd_ext_trap_hdr_t *)ext_data;
  8627. tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_STACK);
  8628. remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
  8629. if (tlv) {
  8630. stack = (const uint32 *)tlv->data;
  8631. *bytes_written += scnprintf(&dest[*bytes_written], remain_len,
  8632. "%08x", *(uint32 *)(stack++));
  8633. (*cnt)++;
  8634. if (*cnt >= HANG_FIELD_CNT_MAX) {
  8635. return;
  8636. }
  8637. for (i = 1; i < (uint32)(tlv->len / sizeof(uint32)); i++, bigdata_key_stack_cnt++) {
  8638. remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
  8639. /* Raw data for bigdata use '_' and Key data for bigdata use space */
  8640. *bytes_written += scnprintf(&dest[*bytes_written], remain_len,
  8641. "%c%08x",
  8642. i <= HANG_INFO_BIGDATA_KEY_STACK_CNT ? HANG_KEY_DEL : HANG_RAW_DEL,
  8643. *(uint32 *)(stack++));
  8644. (*cnt)++;
  8645. if ((*cnt >= HANG_FIELD_CNT_MAX) ||
  8646. (i >= HANG_FIELD_TRAP_T_STACK_CNT_MAX)) {
  8647. return;
  8648. }
  8649. }
  8650. }
  8651. remain_stack_cnt = HANG_FIELD_TRAP_T_STACK_CNT_MAX - i;
  8652. for (i = 0; i < remain_stack_cnt; i++) {
  8653. remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
  8654. *bytes_written += scnprintf(&dest[*bytes_written], remain_len, "%c%08x",
  8655. HANG_RAW_DEL, dummy_data);
  8656. (*cnt)++;
  8657. if (*cnt >= HANG_FIELD_CNT_MAX) {
  8658. return;
  8659. }
  8660. }
  8661. }
  8662. static void
  8663. copy_hang_info_specific(dhd_pub_t *dhd, char *dest, int *bytes_written, int *cnt)
  8664. {
  8665. int remain_len;
  8666. int i;
  8667. const uint32 *data;
  8668. uint32 *ext_data = dhd->extended_trap_data;
  8669. hnd_ext_trap_hdr_t *hdr;
  8670. const bcm_tlv_t *tlv;
  8671. int remain_trap_data = 0;
  8672. uint8 buf_u8[sizeof(uint32)] = { 0, };
  8673. const uint8 *p_u8;
  8674. if (ext_data == NULL) {
  8675. return;
  8676. }
  8677. /* First word is original trap_data */
  8678. ext_data++;
  8679. /* Followed by the extended trap data header */
  8680. hdr = (hnd_ext_trap_hdr_t *)ext_data;
  8681. tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_SIGNATURE);
  8682. if (tlv) {
  8683. /* header include tlv hader */
  8684. remain_trap_data = (hdr->len - tlv->len - sizeof(uint16));
  8685. }
  8686. tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_STACK);
  8687. if (tlv) {
  8688. /* header include tlv hader */
  8689. remain_trap_data -= (tlv->len + sizeof(uint16));
  8690. }
  8691. data = (const uint32 *)(hdr->data + (hdr->len - remain_trap_data));
  8692. remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
  8693. for (i = 0; i < (uint32)(remain_trap_data / sizeof(uint32)) && *cnt < HANG_FIELD_CNT_MAX;
  8694. i++, (*cnt)++) {
  8695. remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
  8696. *bytes_written += scnprintf(&dest[*bytes_written], remain_len, "%c%08x",
  8697. HANG_RAW_DEL, *(uint32 *)(data++));
  8698. }
  8699. if (*cnt >= HANG_FIELD_CNT_MAX) {
  8700. return;
  8701. }
  8702. remain_trap_data -= (sizeof(uint32) * i);
  8703. if (remain_trap_data > sizeof(buf_u8)) {
  8704. DHD_ERROR(("%s: resize remain_trap_data\n", __FUNCTION__));
  8705. remain_trap_data = sizeof(buf_u8);
  8706. }
  8707. if (remain_trap_data) {
  8708. p_u8 = (const uint8 *)data;
  8709. for (i = 0; i < remain_trap_data; i++) {
  8710. buf_u8[i] = *(const uint8 *)(p_u8++);
  8711. }
  8712. remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
  8713. *bytes_written += scnprintf(&dest[*bytes_written], remain_len, "%c%08x",
  8714. HANG_RAW_DEL, ltoh32_ua(buf_u8));
  8715. (*cnt)++;
  8716. }
  8717. }
  8718. #endif /* DHD_EWPR_VER2 */
  8719. static void
  8720. get_hang_info_trap_subtype(dhd_pub_t *dhd, uint32 *subtype)
  8721. {
  8722. uint32 i;
  8723. uint32 *ext_data = dhd->extended_trap_data;
  8724. hnd_ext_trap_hdr_t *hdr;
  8725. const bcm_tlv_t *tlv;
  8726. /* First word is original trap_data */
  8727. ext_data++;
  8728. /* Followed by the extended trap data header */
  8729. hdr = (hnd_ext_trap_hdr_t *)ext_data;
  8730. /* Dump a list of all tags found before parsing data */
  8731. for (i = TAG_TRAP_DEEPSLEEP; i < TAG_TRAP_LAST; i++) {
  8732. tlv = bcm_parse_tlvs(hdr->data, hdr->len, i);
  8733. if (tlv) {
  8734. if (!TAG_TRAP_IS_STATE(i)) {
  8735. *subtype = i;
  8736. return;
  8737. }
  8738. }
  8739. }
  8740. }
  8741. #ifdef DHD_EWPR_VER2
  8742. static void
  8743. copy_hang_info_etd_base64(dhd_pub_t *dhd, char *dest, int *bytes_written, int *cnt)
  8744. {
  8745. int remain_len;
  8746. uint32 *ext_data = dhd->extended_trap_data;
  8747. hnd_ext_trap_hdr_t *hdr;
  8748. char *base64_out = NULL;
  8749. int base64_cnt;
  8750. int max_base64_len = HANG_INFO_BASE64_BUFFER_SIZE;
  8751. if (ext_data == NULL) {
  8752. return;
  8753. }
  8754. /* First word is original trap_data */
  8755. ext_data++;
  8756. /* Followed by the extended trap data header */
  8757. hdr = (hnd_ext_trap_hdr_t *)ext_data;
  8758. remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
  8759. if (remain_len <= 0) {
  8760. DHD_ERROR(("%s: no space to put etd\n", __FUNCTION__));
  8761. return;
  8762. }
  8763. if (remain_len < max_base64_len) {
  8764. DHD_ERROR(("%s: change max base64 length to remain length %d\n", __FUNCTION__,
  8765. remain_len));
  8766. max_base64_len = remain_len;
  8767. }
  8768. base64_out = MALLOCZ(dhd->osh, HANG_INFO_BASE64_BUFFER_SIZE);
  8769. if (base64_out == NULL) {
  8770. DHD_ERROR(("%s: MALLOC failed for size %d\n",
  8771. __FUNCTION__, HANG_INFO_BASE64_BUFFER_SIZE));
  8772. return;
  8773. }
  8774. if (hdr->len > 0) {
  8775. base64_cnt = dhd_base64_encode(hdr->data, hdr->len, base64_out, max_base64_len);
  8776. if (base64_cnt == 0) {
  8777. DHD_ERROR(("%s: base64 encoding error\n", __FUNCTION__));
  8778. }
  8779. }
  8780. *bytes_written += scnprintf(&dest[*bytes_written], remain_len, "%s",
  8781. base64_out);
  8782. (*cnt)++;
  8783. MFREE(dhd->osh, base64_out, HANG_INFO_BASE64_BUFFER_SIZE);
  8784. }
  8785. #endif /* DHD_EWPR_VER2 */
  8786. #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
  8787. #pragma GCC diagnostic pop
  8788. #endif // endif
  8789. void
  8790. copy_hang_info_trap(dhd_pub_t *dhd)
  8791. {
  8792. trap_t tr;
  8793. int bytes_written;
  8794. int trap_subtype = 0;
  8795. if (!dhd || !dhd->hang_info) {
  8796. DHD_ERROR(("%s dhd=%p hang_info=%p\n", __FUNCTION__,
  8797. dhd, (dhd ? dhd->hang_info : NULL)));
  8798. return;
  8799. }
  8800. if (!dhd->dongle_trap_occured) {
  8801. DHD_ERROR(("%s: dongle_trap_occured is FALSE\n", __FUNCTION__));
  8802. return;
  8803. }
  8804. memset(&tr, 0x00, sizeof(struct _trap_struct));
  8805. copy_ext_trap_sig(dhd, &tr);
  8806. get_hang_info_trap_subtype(dhd, &trap_subtype);
  8807. hang_info_trap_tbl[HANG_INFO_TRAP_T_REASON_IDX].offset = HANG_REASON_DONGLE_TRAP;
  8808. hang_info_trap_tbl[HANG_INFO_TRAP_T_SUBTYPE_IDX].offset = trap_subtype;
  8809. bytes_written = 0;
  8810. dhd->hang_info_cnt = 0;
  8811. get_debug_dump_time(dhd->debug_dump_time_hang_str);
  8812. copy_debug_dump_time(dhd->debug_dump_time_str, dhd->debug_dump_time_hang_str);
  8813. copy_hang_info_head(dhd->hang_info, &tr, VENDOR_SEND_HANG_EXT_INFO_LEN, FALSE,
  8814. &bytes_written, &dhd->hang_info_cnt, dhd->debug_dump_time_hang_str);
  8815. DHD_INFO(("hang info haed cnt: %d len: %d data: %s\n",
  8816. dhd->hang_info_cnt, (int)strlen(dhd->hang_info), dhd->hang_info));
  8817. clear_debug_dump_time(dhd->debug_dump_time_hang_str);
  8818. #ifdef DHD_EWPR_VER2
  8819. /* stack info & trap info are included in etd data */
  8820. /* extended trap data dump */
  8821. if (dhd->hang_info_cnt < HANG_FIELD_CNT_MAX) {
  8822. copy_hang_info_etd_base64(dhd, dhd->hang_info, &bytes_written, &dhd->hang_info_cnt);
  8823. DHD_INFO(("hang info specific cnt: %d len: %d data: %s\n",
  8824. dhd->hang_info_cnt, (int)strlen(dhd->hang_info), dhd->hang_info));
  8825. }
  8826. #else
  8827. if (dhd->hang_info_cnt < HANG_FIELD_CNT_MAX) {
  8828. copy_hang_info_stack(dhd, dhd->hang_info, &bytes_written, &dhd->hang_info_cnt);
  8829. DHD_INFO(("hang info stack cnt: %d len: %d data: %s\n",
  8830. dhd->hang_info_cnt, (int)strlen(dhd->hang_info), dhd->hang_info));
  8831. }
  8832. if (dhd->hang_info_cnt < HANG_FIELD_CNT_MAX) {
  8833. copy_hang_info_trap_t(dhd->hang_info, &tr, VENDOR_SEND_HANG_EXT_INFO_LEN, FALSE,
  8834. &bytes_written, &dhd->hang_info_cnt, dhd->debug_dump_time_hang_str);
  8835. DHD_INFO(("hang info trap_t cnt: %d len: %d data: %s\n",
  8836. dhd->hang_info_cnt, (int)strlen(dhd->hang_info), dhd->hang_info));
  8837. }
  8838. if (dhd->hang_info_cnt < HANG_FIELD_CNT_MAX) {
  8839. copy_hang_info_specific(dhd, dhd->hang_info, &bytes_written, &dhd->hang_info_cnt);
  8840. DHD_INFO(("hang info specific cnt: %d len: %d data: %s\n",
  8841. dhd->hang_info_cnt, (int)strlen(dhd->hang_info), dhd->hang_info));
  8842. }
  8843. #endif /* DHD_EWPR_VER2 */
  8844. }
  8845. #endif /* WL_CFGVENDOR_SEND_HANG_EVENT */
  8846. int
  8847. dhd_prot_debug_info_print(dhd_pub_t *dhd)
  8848. {
  8849. dhd_prot_t *prot = dhd->prot;
  8850. msgbuf_ring_t *ring;
  8851. uint16 rd, wr;
  8852. uint32 dma_buf_len;
  8853. uint64 current_time;
  8854. ulong ring_tcm_rd_addr; /* dongle address */
  8855. ulong ring_tcm_wr_addr; /* dongle address */
  8856. DHD_ERROR(("\n ------- DUMPING VERSION INFORMATION ------- \r\n"));
  8857. DHD_ERROR(("DHD: %s\n", dhd_version));
  8858. DHD_ERROR(("Firmware: %s\n", fw_version));
  8859. DHD_ERROR(("\n ------- DUMPING PROTOCOL INFORMATION ------- \r\n"));
  8860. DHD_ERROR(("ICPrevs: Dev %d, Host %d, active %d\n",
  8861. prot->device_ipc_version,
  8862. prot->host_ipc_version,
  8863. prot->active_ipc_version));
  8864. DHD_ERROR(("d2h_intr_method -> %s\n",
  8865. dhd->bus->d2h_intr_method ? "PCIE_MSI" : "PCIE_INTX"));
  8866. DHD_ERROR(("max Host TS bufs to post: %d, posted %d\n",
  8867. prot->max_tsbufpost, prot->cur_ts_bufs_posted));
  8868. DHD_ERROR(("max INFO bufs to post: %d, posted %d\n",
  8869. prot->max_infobufpost, prot->infobufpost));
  8870. DHD_ERROR(("max event bufs to post: %d, posted %d\n",
  8871. prot->max_eventbufpost, prot->cur_event_bufs_posted));
  8872. DHD_ERROR(("max ioctlresp bufs to post: %d, posted %d\n",
  8873. prot->max_ioctlrespbufpost, prot->cur_ioctlresp_bufs_posted));
  8874. DHD_ERROR(("max RX bufs to post: %d, posted %d\n",
  8875. prot->max_rxbufpost, prot->rxbufpost));
  8876. DHD_ERROR(("h2d_max_txpost: %d, prot->h2d_max_txpost: %d\n",
  8877. h2d_max_txpost, prot->h2d_max_txpost));
  8878. current_time = OSL_LOCALTIME_NS();
  8879. DHD_ERROR(("current_time="SEC_USEC_FMT"\n", GET_SEC_USEC(current_time)));
  8880. DHD_ERROR(("ioctl_fillup_time="SEC_USEC_FMT
  8881. " ioctl_ack_time="SEC_USEC_FMT
  8882. " ioctl_cmplt_time="SEC_USEC_FMT"\n",
  8883. GET_SEC_USEC(prot->ioctl_fillup_time),
  8884. GET_SEC_USEC(prot->ioctl_ack_time),
  8885. GET_SEC_USEC(prot->ioctl_cmplt_time)));
  8886. /* Check PCIe INT registers */
  8887. if (!dhd_pcie_dump_int_regs(dhd)) {
  8888. DHD_ERROR(("%s : PCIe link might be down\n", __FUNCTION__));
  8889. dhd->bus->is_linkdown = TRUE;
  8890. }
  8891. DHD_ERROR(("\n ------- DUMPING IOCTL RING RD WR Pointers ------- \r\n"));
  8892. ring = &prot->h2dring_ctrl_subn;
  8893. dma_buf_len = ring->max_items * ring->item_len;
  8894. ring_tcm_rd_addr = dhd->bus->ring_sh[ring->idx].ring_state_r;
  8895. ring_tcm_wr_addr = dhd->bus->ring_sh[ring->idx].ring_state_w;
  8896. DHD_ERROR(("CtrlPost: Mem Info: BASE(VA) %p BASE(PA) %x:%x tcm_rd_wr 0x%lx:0x%lx "
  8897. "SIZE %d \r\n",
  8898. ring->dma_buf.va, ltoh32(ring->base_addr.high_addr),
  8899. ltoh32(ring->base_addr.low_addr), ring_tcm_rd_addr, ring_tcm_wr_addr, dma_buf_len));
  8900. DHD_ERROR(("CtrlPost: From Host mem: RD: %d WR %d \r\n", ring->rd, ring->wr));
  8901. if (dhd->bus->is_linkdown) {
  8902. DHD_ERROR(("CtrlPost: From Shared Mem: RD and WR are invalid"
  8903. " due to PCIe link down\r\n"));
  8904. } else {
  8905. dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, ring->idx);
  8906. dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, ring->idx);
  8907. DHD_ERROR(("CtrlPost: From Shared Mem: RD: %d WR %d \r\n", rd, wr));
  8908. }
  8909. DHD_ERROR(("CtrlPost: seq num: %d \r\n", ring->seqnum % H2D_EPOCH_MODULO));
  8910. ring = &prot->d2hring_ctrl_cpln;
  8911. dma_buf_len = ring->max_items * ring->item_len;
  8912. ring_tcm_rd_addr = dhd->bus->ring_sh[ring->idx].ring_state_r;
  8913. ring_tcm_wr_addr = dhd->bus->ring_sh[ring->idx].ring_state_w;
  8914. DHD_ERROR(("CtrlCpl: Mem Info: BASE(VA) %p BASE(PA) %x:%x tcm_rd_wr 0x%lx:0x%lx "
  8915. "SIZE %d \r\n",
  8916. ring->dma_buf.va, ltoh32(ring->base_addr.high_addr),
  8917. ltoh32(ring->base_addr.low_addr), ring_tcm_rd_addr, ring_tcm_wr_addr, dma_buf_len));
  8918. DHD_ERROR(("CtrlCpl: From Host mem: RD: %d WR %d \r\n", ring->rd, ring->wr));
  8919. if (dhd->bus->is_linkdown) {
  8920. DHD_ERROR(("CtrlCpl: From Shared Mem: RD and WR are invalid"
  8921. " due to PCIe link down\r\n"));
  8922. } else {
  8923. dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, ring->idx);
  8924. dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, ring->idx);
  8925. DHD_ERROR(("CtrlCpl: From Shared Mem: RD: %d WR %d \r\n", rd, wr));
  8926. }
  8927. DHD_ERROR(("CtrlCpl: Expected seq num: %d \r\n", ring->seqnum % H2D_EPOCH_MODULO));
  8928. ring = prot->h2dring_info_subn;
  8929. if (ring) {
  8930. dma_buf_len = ring->max_items * ring->item_len;
  8931. ring_tcm_rd_addr = dhd->bus->ring_sh[ring->idx].ring_state_r;
  8932. ring_tcm_wr_addr = dhd->bus->ring_sh[ring->idx].ring_state_w;
  8933. DHD_ERROR(("InfoSub: Mem Info: BASE(VA) %p BASE(PA) %x:%x tcm_rd_wr 0x%lx:0x%lx "
  8934. "SIZE %d \r\n",
  8935. ring->dma_buf.va, ltoh32(ring->base_addr.high_addr),
  8936. ltoh32(ring->base_addr.low_addr), ring_tcm_rd_addr, ring_tcm_wr_addr,
  8937. dma_buf_len));
  8938. DHD_ERROR(("InfoSub: From Host mem: RD: %d WR %d \r\n", ring->rd, ring->wr));
  8939. if (dhd->bus->is_linkdown) {
  8940. DHD_ERROR(("InfoSub: From Shared Mem: RD and WR are invalid"
  8941. " due to PCIe link down\r\n"));
  8942. } else {
  8943. dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, ring->idx);
  8944. dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, ring->idx);
  8945. DHD_ERROR(("InfoSub: From Shared Mem: RD: %d WR %d \r\n", rd, wr));
  8946. }
  8947. DHD_ERROR(("InfoSub: seq num: %d \r\n", ring->seqnum % H2D_EPOCH_MODULO));
  8948. }
  8949. ring = prot->d2hring_info_cpln;
  8950. if (ring) {
  8951. dma_buf_len = ring->max_items * ring->item_len;
  8952. ring_tcm_rd_addr = dhd->bus->ring_sh[ring->idx].ring_state_r;
  8953. ring_tcm_wr_addr = dhd->bus->ring_sh[ring->idx].ring_state_w;
  8954. DHD_ERROR(("InfoCpl: Mem Info: BASE(VA) %p BASE(PA) %x:%x tcm_rd_wr 0x%lx:0x%lx "
  8955. "SIZE %d \r\n",
  8956. ring->dma_buf.va, ltoh32(ring->base_addr.high_addr),
  8957. ltoh32(ring->base_addr.low_addr), ring_tcm_rd_addr, ring_tcm_wr_addr,
  8958. dma_buf_len));
  8959. DHD_ERROR(("InfoCpl: From Host mem: RD: %d WR %d \r\n", ring->rd, ring->wr));
  8960. if (dhd->bus->is_linkdown) {
  8961. DHD_ERROR(("InfoCpl: From Shared Mem: RD and WR are invalid"
  8962. " due to PCIe link down\r\n"));
  8963. } else {
  8964. dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, ring->idx);
  8965. dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, ring->idx);
  8966. DHD_ERROR(("InfoCpl: From Shared Mem: RD: %d WR %d \r\n", rd, wr));
  8967. }
  8968. DHD_ERROR(("InfoCpl: Expected seq num: %d \r\n", ring->seqnum % D2H_EPOCH_MODULO));
  8969. }
  8970. ring = &prot->d2hring_tx_cpln;
  8971. if (ring) {
  8972. ring_tcm_rd_addr = dhd->bus->ring_sh[ring->idx].ring_state_r;
  8973. ring_tcm_wr_addr = dhd->bus->ring_sh[ring->idx].ring_state_w;
  8974. dma_buf_len = ring->max_items * ring->item_len;
  8975. DHD_ERROR(("TxCpl: Mem Info: BASE(VA) %p BASE(PA) %x:%x tcm_rd_wr 0x%lx:0x%lx "
  8976. "SIZE %d \r\n",
  8977. ring->dma_buf.va, ltoh32(ring->base_addr.high_addr),
  8978. ltoh32(ring->base_addr.low_addr), ring_tcm_rd_addr, ring_tcm_wr_addr,
  8979. dma_buf_len));
  8980. DHD_ERROR(("TxCpl: From Host mem: RD: %d WR %d \r\n", ring->rd, ring->wr));
  8981. if (dhd->bus->is_linkdown) {
  8982. DHD_ERROR(("TxCpl: From Shared Mem: RD and WR are invalid"
  8983. " due to PCIe link down\r\n"));
  8984. } else {
  8985. dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, ring->idx);
  8986. dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, ring->idx);
  8987. DHD_ERROR(("TxCpl: From Shared Mem: RD: %d WR %d \r\n", rd, wr));
  8988. }
  8989. DHD_ERROR(("TxCpl: Expected seq num: %d \r\n", ring->seqnum % D2H_EPOCH_MODULO));
  8990. }
  8991. ring = &prot->d2hring_rx_cpln;
  8992. if (ring) {
  8993. ring_tcm_rd_addr = dhd->bus->ring_sh[ring->idx].ring_state_r;
  8994. ring_tcm_wr_addr = dhd->bus->ring_sh[ring->idx].ring_state_w;
  8995. dma_buf_len = ring->max_items * ring->item_len;
  8996. DHD_ERROR(("RxCpl: Mem Info: BASE(VA) %p BASE(PA) %x:%x tcm_rd_wr 0x%lx:0x%lx "
  8997. "SIZE %d \r\n",
  8998. ring->dma_buf.va, ltoh32(ring->base_addr.high_addr),
  8999. ltoh32(ring->base_addr.low_addr), ring_tcm_rd_addr, ring_tcm_wr_addr,
  9000. dma_buf_len));
  9001. DHD_ERROR(("RxCpl: From Host mem: RD: %d WR %d \r\n", ring->rd, ring->wr));
  9002. if (dhd->bus->is_linkdown) {
  9003. DHD_ERROR(("RxCpl: From Shared Mem: RD and WR are invalid"
  9004. " due to PCIe link down\r\n"));
  9005. } else {
  9006. dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, ring->idx);
  9007. dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, ring->idx);
  9008. DHD_ERROR(("RxCpl: From Shared Mem: RD: %d WR %d \r\n", rd, wr));
  9009. }
  9010. DHD_ERROR(("RxCpl: Expected seq num: %d \r\n", ring->seqnum % D2H_EPOCH_MODULO));
  9011. }
  9012. #ifdef EWP_EDL
  9013. ring = prot->d2hring_edl;
  9014. if (ring) {
  9015. ring_tcm_rd_addr = dhd->bus->ring_sh[ring->idx].ring_state_r;
  9016. ring_tcm_wr_addr = dhd->bus->ring_sh[ring->idx].ring_state_w;
  9017. dma_buf_len = ring->max_items * ring->item_len;
  9018. DHD_ERROR(("EdlRing: Mem Info: BASE(VA) %p BASE(PA) %x:%x tcm_rd_wr 0x%lx:0x%lx "
  9019. "SIZE %d \r\n",
  9020. ring->dma_buf.va, ltoh32(ring->base_addr.high_addr),
  9021. ltoh32(ring->base_addr.low_addr), ring_tcm_rd_addr, ring_tcm_wr_addr,
  9022. dma_buf_len));
  9023. DHD_ERROR(("EdlRing: From Host mem: RD: %d WR %d \r\n", ring->rd, ring->wr));
  9024. if (dhd->bus->is_linkdown) {
  9025. DHD_ERROR(("EdlRing: From Shared Mem: RD and WR are invalid"
  9026. " due to PCIe link down\r\n"));
  9027. } else {
  9028. dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, ring->idx);
  9029. dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, ring->idx);
  9030. DHD_ERROR(("EdlRing: From Shared Mem: RD: %d WR %d \r\n", rd, wr));
  9031. }
  9032. DHD_ERROR(("EdlRing: Expected seq num: %d \r\n",
  9033. ring->seqnum % D2H_EPOCH_MODULO));
  9034. }
  9035. #endif /* EWP_EDL */
  9036. DHD_ERROR(("%s: cur_ioctlresp_bufs_posted %d cur_event_bufs_posted %d\n",
  9037. __FUNCTION__, prot->cur_ioctlresp_bufs_posted, prot->cur_event_bufs_posted));
  9038. #ifdef DHD_LIMIT_MULTI_CLIENT_FLOWRINGS
  9039. DHD_ERROR(("%s: multi_client_flow_rings:%d max_multi_client_flow_rings:%d\n",
  9040. __FUNCTION__, dhd->multi_client_flow_rings, dhd->max_multi_client_flow_rings));
  9041. #endif /* DHD_LIMIT_MULTI_CLIENT_FLOWRINGS */
  9042. DHD_ERROR(("pktid_txq_start_cnt: %d\n", prot->pktid_txq_start_cnt));
  9043. DHD_ERROR(("pktid_txq_stop_cnt: %d\n", prot->pktid_txq_stop_cnt));
  9044. DHD_ERROR(("pktid_depleted_cnt: %d\n", prot->pktid_depleted_cnt));
  9045. dhd_pcie_debug_info_dump(dhd);
  9046. return 0;
  9047. }
  9048. int
  9049. dhd_prot_ringupd_dump(dhd_pub_t *dhd, struct bcmstrbuf *b)
  9050. {
  9051. uint32 *ptr;
  9052. uint32 value;
  9053. if (dhd->prot->d2h_dma_indx_wr_buf.va) {
  9054. uint32 i;
  9055. uint32 max_h2d_queues = dhd_bus_max_h2d_queues(dhd->bus);
  9056. OSL_CACHE_INV((void *)dhd->prot->d2h_dma_indx_wr_buf.va,
  9057. dhd->prot->d2h_dma_indx_wr_buf.len);
  9058. ptr = (uint32 *)(dhd->prot->d2h_dma_indx_wr_buf.va);
  9059. bcm_bprintf(b, "\n max_tx_queues %d\n", max_h2d_queues);
  9060. bcm_bprintf(b, "\nRPTR block H2D common rings, 0x%04x\n", ptr);
  9061. value = ltoh32(*ptr);
  9062. bcm_bprintf(b, "\tH2D CTRL: value 0x%04x\n", value);
  9063. ptr++;
  9064. value = ltoh32(*ptr);
  9065. bcm_bprintf(b, "\tH2D RXPOST: value 0x%04x\n", value);
  9066. ptr++;
  9067. bcm_bprintf(b, "RPTR block Flow rings , 0x%04x\n", ptr);
  9068. for (i = BCMPCIE_H2D_COMMON_MSGRINGS; i < max_h2d_queues; i++) {
  9069. value = ltoh32(*ptr);
  9070. bcm_bprintf(b, "\tflowring ID %d: value 0x%04x\n", i, value);
  9071. ptr++;
  9072. }
  9073. }
  9074. if (dhd->prot->h2d_dma_indx_rd_buf.va) {
  9075. OSL_CACHE_INV((void *)dhd->prot->h2d_dma_indx_rd_buf.va,
  9076. dhd->prot->h2d_dma_indx_rd_buf.len);
  9077. ptr = (uint32 *)(dhd->prot->h2d_dma_indx_rd_buf.va);
  9078. bcm_bprintf(b, "\nWPTR block D2H common rings, 0x%04x\n", ptr);
  9079. value = ltoh32(*ptr);
  9080. bcm_bprintf(b, "\tD2H CTRLCPLT: value 0x%04x\n", value);
  9081. ptr++;
  9082. value = ltoh32(*ptr);
  9083. bcm_bprintf(b, "\tD2H TXCPLT: value 0x%04x\n", value);
  9084. ptr++;
  9085. value = ltoh32(*ptr);
  9086. bcm_bprintf(b, "\tD2H RXCPLT: value 0x%04x\n", value);
  9087. }
  9088. return 0;
  9089. }
  9090. uint32
  9091. dhd_prot_metadata_dbg_set(dhd_pub_t *dhd, bool val)
  9092. {
  9093. dhd_prot_t *prot = dhd->prot;
  9094. #if DHD_DBG_SHOW_METADATA
  9095. prot->metadata_dbg = val;
  9096. #endif // endif
  9097. return (uint32)prot->metadata_dbg;
  9098. }
  9099. uint32
  9100. dhd_prot_metadata_dbg_get(dhd_pub_t *dhd)
  9101. {
  9102. dhd_prot_t *prot = dhd->prot;
  9103. return (uint32)prot->metadata_dbg;
  9104. }
  9105. uint32
  9106. dhd_prot_metadatalen_set(dhd_pub_t *dhd, uint32 val, bool rx)
  9107. {
  9108. dhd_prot_t *prot = dhd->prot;
  9109. if (rx)
  9110. prot->rx_metadata_offset = (uint16)val;
  9111. else
  9112. prot->tx_metadata_offset = (uint16)val;
  9113. return dhd_prot_metadatalen_get(dhd, rx);
  9114. }
  9115. uint32
  9116. dhd_prot_metadatalen_get(dhd_pub_t *dhd, bool rx)
  9117. {
  9118. dhd_prot_t *prot = dhd->prot;
  9119. if (rx)
  9120. return prot->rx_metadata_offset;
  9121. else
  9122. return prot->tx_metadata_offset;
  9123. }
  9124. /** optimization to write "n" tx items at a time to ring */
  9125. uint32
  9126. dhd_prot_txp_threshold(dhd_pub_t *dhd, bool set, uint32 val)
  9127. {
  9128. dhd_prot_t *prot = dhd->prot;
  9129. if (set)
  9130. prot->txp_threshold = (uint16)val;
  9131. val = prot->txp_threshold;
  9132. return val;
  9133. }
  9134. #ifdef DHD_RX_CHAINING
  9135. static INLINE void BCMFASTPATH
  9136. dhd_rxchain_reset(rxchain_info_t *rxchain)
  9137. {
  9138. rxchain->pkt_count = 0;
  9139. }
  9140. static void BCMFASTPATH
  9141. dhd_rxchain_frame(dhd_pub_t *dhd, void *pkt, uint ifidx)
  9142. {
  9143. uint8 *eh;
  9144. uint8 prio;
  9145. dhd_prot_t *prot = dhd->prot;
  9146. rxchain_info_t *rxchain = &prot->rxchain;
  9147. ASSERT(!PKTISCHAINED(pkt));
  9148. ASSERT(PKTCLINK(pkt) == NULL);
  9149. ASSERT(PKTCGETATTR(pkt) == 0);
  9150. eh = PKTDATA(dhd->osh, pkt);
  9151. prio = IP_TOS46(eh + ETHER_HDR_LEN) >> IPV4_TOS_PREC_SHIFT;
  9152. if (rxchain->pkt_count && !(PKT_CTF_CHAINABLE(dhd, ifidx, eh, prio, rxchain->h_sa,
  9153. rxchain->h_da, rxchain->h_prio))) {
  9154. /* Different flow - First release the existing chain */
  9155. dhd_rxchain_commit(dhd);
  9156. }
  9157. /* For routers, with HNDCTF, link the packets using PKTSETCLINK, */
  9158. /* so that the chain can be handed off to CTF bridge as is. */
  9159. if (rxchain->pkt_count == 0) {
  9160. /* First packet in chain */
  9161. rxchain->pkthead = rxchain->pkttail = pkt;
  9162. /* Keep a copy of ptr to ether_da, ether_sa and prio */
  9163. rxchain->h_da = ((struct ether_header *)eh)->ether_dhost;
  9164. rxchain->h_sa = ((struct ether_header *)eh)->ether_shost;
  9165. rxchain->h_prio = prio;
  9166. rxchain->ifidx = ifidx;
  9167. rxchain->pkt_count++;
  9168. } else {
  9169. /* Same flow - keep chaining */
  9170. PKTSETCLINK(rxchain->pkttail, pkt);
  9171. rxchain->pkttail = pkt;
  9172. rxchain->pkt_count++;
  9173. }
  9174. if ((dhd_rx_pkt_chainable(dhd, ifidx)) && (!ETHER_ISMULTI(rxchain->h_da)) &&
  9175. ((((struct ether_header *)eh)->ether_type == HTON16(ETHER_TYPE_IP)) ||
  9176. (((struct ether_header *)eh)->ether_type == HTON16(ETHER_TYPE_IPV6)))) {
  9177. PKTSETCHAINED(dhd->osh, pkt);
  9178. PKTCINCRCNT(rxchain->pkthead);
  9179. PKTCADDLEN(rxchain->pkthead, PKTLEN(dhd->osh, pkt));
  9180. } else {
  9181. dhd_rxchain_commit(dhd);
  9182. return;
  9183. }
  9184. /* If we have hit the max chain length, dispatch the chain and reset */
  9185. if (rxchain->pkt_count >= DHD_PKT_CTF_MAX_CHAIN_LEN) {
  9186. dhd_rxchain_commit(dhd);
  9187. }
  9188. }
  9189. static void BCMFASTPATH
  9190. dhd_rxchain_commit(dhd_pub_t *dhd)
  9191. {
  9192. dhd_prot_t *prot = dhd->prot;
  9193. rxchain_info_t *rxchain = &prot->rxchain;
  9194. if (rxchain->pkt_count == 0)
  9195. return;
  9196. /* Release the packets to dhd_linux */
  9197. dhd_bus_rx_frame(dhd->bus, rxchain->pkthead, rxchain->ifidx, rxchain->pkt_count);
  9198. /* Reset the chain */
  9199. dhd_rxchain_reset(rxchain);
  9200. }
  9201. #endif /* DHD_RX_CHAINING */
  9202. #ifdef IDLE_TX_FLOW_MGMT
  9203. int
  9204. dhd_prot_flow_ring_resume(dhd_pub_t *dhd, flow_ring_node_t *flow_ring_node)
  9205. {
  9206. tx_idle_flowring_resume_request_t *flow_resume_rqst;
  9207. msgbuf_ring_t *flow_ring;
  9208. dhd_prot_t *prot = dhd->prot;
  9209. unsigned long flags;
  9210. uint16 alloced = 0;
  9211. msgbuf_ring_t *ctrl_ring = &prot->h2dring_ctrl_subn;
  9212. /* Fetch a pre-initialized msgbuf_ring from the flowring pool */
  9213. flow_ring = dhd_prot_flowrings_pool_fetch(dhd, flow_ring_node->flowid);
  9214. if (flow_ring == NULL) {
  9215. DHD_ERROR(("%s: dhd_prot_flowrings_pool_fetch TX Flowid %d failed\n",
  9216. __FUNCTION__, flow_ring_node->flowid));
  9217. return BCME_NOMEM;
  9218. }
  9219. DHD_RING_LOCK(ctrl_ring->ring_lock, flags);
  9220. /* Request for ctrl_ring buffer space */
  9221. flow_resume_rqst = (tx_idle_flowring_resume_request_t *)
  9222. dhd_prot_alloc_ring_space(dhd, ctrl_ring, 1, &alloced, FALSE);
  9223. if (flow_resume_rqst == NULL) {
  9224. dhd_prot_flowrings_pool_release(dhd, flow_ring_node->flowid, flow_ring);
  9225. DHD_ERROR(("%s: Flow resume Req flowid %d - failure ring space\n",
  9226. __FUNCTION__, flow_ring_node->flowid));
  9227. DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
  9228. return BCME_NOMEM;
  9229. }
  9230. flow_ring_node->prot_info = (void *)flow_ring;
  9231. /* Common msg buf hdr */
  9232. flow_resume_rqst->msg.msg_type = MSG_TYPE_FLOW_RING_RESUME;
  9233. flow_resume_rqst->msg.if_id = (uint8)flow_ring_node->flow_info.ifindex;
  9234. flow_resume_rqst->msg.request_id = htol32(0); /* TBD */
  9235. flow_resume_rqst->msg.epoch = ctrl_ring->seqnum % H2D_EPOCH_MODULO;
  9236. ctrl_ring->seqnum++;
  9237. flow_resume_rqst->flow_ring_id = htol16((uint16)flow_ring_node->flowid);
  9238. DHD_ERROR(("%s Send Flow resume Req flow ID %d\n",
  9239. __FUNCTION__, flow_ring_node->flowid));
  9240. /* Update the flow_ring's WRITE index */
  9241. if (IDMA_ACTIVE(dhd) || dhd->dma_h2d_ring_upd_support) {
  9242. dhd_prot_dma_indx_set(dhd, flow_ring->wr,
  9243. H2D_DMA_INDX_WR_UPD, flow_ring->idx);
  9244. } else if (IFRM_ACTIVE(dhd) && (flow_ring->idx >= BCMPCIE_H2D_MSGRING_TXFLOW_IDX_START)) {
  9245. dhd_prot_dma_indx_set(dhd, flow_ring->wr,
  9246. H2D_IFRM_INDX_WR_UPD,
  9247. (flow_ring->idx - BCMPCIE_H2D_MSGRING_TXFLOW_IDX_START));
  9248. } else {
  9249. dhd_bus_cmn_writeshared(dhd->bus, &(flow_ring->wr),
  9250. sizeof(uint16), RING_WR_UPD, flow_ring->idx);
  9251. }
  9252. /* update control subn ring's WR index and ring doorbell to dongle */
  9253. dhd_prot_ring_write_complete(dhd, ctrl_ring, flow_resume_rqst, 1);
  9254. DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
  9255. return BCME_OK;
  9256. } /* dhd_prot_flow_ring_create */
  9257. int
  9258. dhd_prot_flow_ring_batch_suspend_request(dhd_pub_t *dhd, uint16 *ringid, uint16 count)
  9259. {
  9260. tx_idle_flowring_suspend_request_t *flow_suspend_rqst;
  9261. dhd_prot_t *prot = dhd->prot;
  9262. unsigned long flags;
  9263. uint16 index;
  9264. uint16 alloced = 0;
  9265. msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn;
  9266. DHD_RING_LOCK(ring->ring_lock, flags);
  9267. /* Request for ring buffer space */
  9268. flow_suspend_rqst = (tx_idle_flowring_suspend_request_t *)
  9269. dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE);
  9270. if (flow_suspend_rqst == NULL) {
  9271. DHD_RING_UNLOCK(ring->ring_lock, flags);
  9272. DHD_ERROR(("%s: Flow suspend Req - failure ring space\n", __FUNCTION__));
  9273. return BCME_NOMEM;
  9274. }
  9275. /* Common msg buf hdr */
  9276. flow_suspend_rqst->msg.msg_type = MSG_TYPE_FLOW_RING_SUSPEND;
  9277. /* flow_suspend_rqst->msg.if_id = (uint8)flow_ring_node->flow_info.ifindex; */
  9278. flow_suspend_rqst->msg.request_id = htol32(0); /* TBD */
  9279. flow_suspend_rqst->msg.epoch = ring->seqnum % H2D_EPOCH_MODULO;
  9280. ring->seqnum++;
  9281. /* Update flow id info */
  9282. for (index = 0; index < count; index++)
  9283. {
  9284. flow_suspend_rqst->ring_id[index] = ringid[index];
  9285. }
  9286. flow_suspend_rqst->num = count;
  9287. DHD_ERROR(("%s sending batch suspend!! count is %d\n", __FUNCTION__, count));
  9288. /* update ring's WR index and ring doorbell to dongle */
  9289. dhd_prot_ring_write_complete(dhd, ring, flow_suspend_rqst, 1);
  9290. DHD_RING_UNLOCK(ring->ring_lock, flags);
  9291. return BCME_OK;
  9292. }
  9293. #endif /* IDLE_TX_FLOW_MGMT */
  9294. static const char* etd_trap_name(hnd_ext_tag_trap_t tag)
  9295. {
  9296. switch (tag)
  9297. {
  9298. case TAG_TRAP_SIGNATURE: return "TAG_TRAP_SIGNATURE";
  9299. case TAG_TRAP_STACK: return "TAG_TRAP_STACK";
  9300. case TAG_TRAP_MEMORY: return "TAG_TRAP_MEMORY";
  9301. case TAG_TRAP_DEEPSLEEP: return "TAG_TRAP_DEEPSLEEP";
  9302. case TAG_TRAP_PSM_WD: return "TAG_TRAP_PSM_WD";
  9303. case TAG_TRAP_PHY: return "TAG_TRAP_PHY";
  9304. case TAG_TRAP_BUS: return "TAG_TRAP_BUS";
  9305. case TAG_TRAP_MAC_SUSP: return "TAG_TRAP_MAC_SUSP";
  9306. case TAG_TRAP_BACKPLANE: return "TAG_TRAP_BACKPLANE";
  9307. case TAG_TRAP_PCIE_Q: return "TAG_TRAP_PCIE_Q";
  9308. case TAG_TRAP_WLC_STATE: return "TAG_TRAP_WLC_STATE";
  9309. case TAG_TRAP_MAC_WAKE: return "TAG_TRAP_MAC_WAKE";
  9310. case TAG_TRAP_HMAP: return "TAG_TRAP_HMAP";
  9311. case TAG_TRAP_PHYTXERR_THRESH: return "TAG_TRAP_PHYTXERR_THRESH";
  9312. case TAG_TRAP_HC_DATA: return "TAG_TRAP_HC_DATA";
  9313. case TAG_TRAP_LOG_DATA: return "TAG_TRAP_LOG_DATA";
  9314. case TAG_TRAP_CODE: return "TAG_TRAP_CODE";
  9315. case TAG_TRAP_LAST:
  9316. default:
  9317. return "Unknown";
  9318. }
  9319. return "Unknown";
  9320. }
  9321. int dhd_prot_dump_extended_trap(dhd_pub_t *dhdp, struct bcmstrbuf *b, bool raw)
  9322. {
  9323. uint32 i;
  9324. uint32 *ext_data;
  9325. hnd_ext_trap_hdr_t *hdr;
  9326. const bcm_tlv_t *tlv;
  9327. const trap_t *tr;
  9328. const uint32 *stack;
  9329. const hnd_ext_trap_bp_err_t *bpe;
  9330. uint32 raw_len;
  9331. ext_data = dhdp->extended_trap_data;
  9332. /* return if there is no extended trap data */
  9333. if (!ext_data || !(dhdp->dongle_trap_data & D2H_DEV_EXT_TRAP_DATA))
  9334. {
  9335. bcm_bprintf(b, "%d (0x%x)", dhdp->dongle_trap_data, dhdp->dongle_trap_data);
  9336. return BCME_OK;
  9337. }
  9338. bcm_bprintf(b, "Extended trap data\n");
  9339. /* First word is original trap_data */
  9340. bcm_bprintf(b, "trap_data = 0x%08x\n", *ext_data);
  9341. ext_data++;
  9342. /* Followed by the extended trap data header */
  9343. hdr = (hnd_ext_trap_hdr_t *)ext_data;
  9344. bcm_bprintf(b, "version: %d, len: %d\n", hdr->version, hdr->len);
  9345. /* Dump a list of all tags found before parsing data */
  9346. bcm_bprintf(b, "\nTags Found:\n");
  9347. for (i = 0; i < TAG_TRAP_LAST; i++) {
  9348. tlv = bcm_parse_tlvs(hdr->data, hdr->len, i);
  9349. if (tlv)
  9350. bcm_bprintf(b, "Tag: %d (%s), Length: %d\n", i, etd_trap_name(i), tlv->len);
  9351. }
  9352. if (raw)
  9353. {
  9354. raw_len = sizeof(hnd_ext_trap_hdr_t) + (hdr->len / 4) + (hdr->len % 4 ? 1 : 0);
  9355. for (i = 0; i < raw_len; i++)
  9356. {
  9357. bcm_bprintf(b, "0x%08x ", ext_data[i]);
  9358. if (i % 4 == 3)
  9359. bcm_bprintf(b, "\n");
  9360. }
  9361. return BCME_OK;
  9362. }
  9363. /* Extract the various supported TLVs from the extended trap data */
  9364. tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_CODE);
  9365. if (tlv)
  9366. {
  9367. bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_CODE), tlv->len);
  9368. bcm_bprintf(b, "ETD TYPE: %d\n", tlv->data[0]);
  9369. }
  9370. tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_SIGNATURE);
  9371. if (tlv)
  9372. {
  9373. bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_SIGNATURE), tlv->len);
  9374. tr = (const trap_t *)tlv->data;
  9375. bcm_bprintf(b, "TRAP %x: pc %x, lr %x, sp %x, cpsr %x, spsr %x\n",
  9376. tr->type, tr->pc, tr->r14, tr->r13, tr->cpsr, tr->spsr);
  9377. bcm_bprintf(b, " r0 %x, r1 %x, r2 %x, r3 %x, r4 %x, r5 %x, r6 %x\n",
  9378. tr->r0, tr->r1, tr->r2, tr->r3, tr->r4, tr->r5, tr->r6);
  9379. bcm_bprintf(b, " r7 %x, r8 %x, r9 %x, r10 %x, r11 %x, r12 %x\n",
  9380. tr->r7, tr->r8, tr->r9, tr->r10, tr->r11, tr->r12);
  9381. }
  9382. tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_STACK);
  9383. if (tlv)
  9384. {
  9385. bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_STACK), tlv->len);
  9386. stack = (const uint32 *)tlv->data;
  9387. for (i = 0; i < (uint32)(tlv->len / 4); i++)
  9388. {
  9389. bcm_bprintf(b, " 0x%08x\n", *stack);
  9390. stack++;
  9391. }
  9392. }
  9393. tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_BACKPLANE);
  9394. if (tlv)
  9395. {
  9396. bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_BACKPLANE), tlv->len);
  9397. bpe = (const hnd_ext_trap_bp_err_t *)tlv->data;
  9398. bcm_bprintf(b, " error: %x\n", bpe->error);
  9399. bcm_bprintf(b, " coreid: %x\n", bpe->coreid);
  9400. bcm_bprintf(b, " baseaddr: %x\n", bpe->baseaddr);
  9401. bcm_bprintf(b, " ioctrl: %x\n", bpe->ioctrl);
  9402. bcm_bprintf(b, " iostatus: %x\n", bpe->iostatus);
  9403. bcm_bprintf(b, " resetctrl: %x\n", bpe->resetctrl);
  9404. bcm_bprintf(b, " resetstatus: %x\n", bpe->resetstatus);
  9405. bcm_bprintf(b, " errlogctrl: %x\n", bpe->errlogctrl);
  9406. bcm_bprintf(b, " errlogdone: %x\n", bpe->errlogdone);
  9407. bcm_bprintf(b, " errlogstatus: %x\n", bpe->errlogstatus);
  9408. bcm_bprintf(b, " errlogaddrlo: %x\n", bpe->errlogaddrlo);
  9409. bcm_bprintf(b, " errlogaddrhi: %x\n", bpe->errlogaddrhi);
  9410. bcm_bprintf(b, " errlogid: %x\n", bpe->errlogid);
  9411. bcm_bprintf(b, " errloguser: %x\n", bpe->errloguser);
  9412. bcm_bprintf(b, " errlogflags: %x\n", bpe->errlogflags);
  9413. }
  9414. tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_MEMORY);
  9415. if (tlv)
  9416. {
  9417. const hnd_ext_trap_heap_err_t* hme;
  9418. bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_MEMORY), tlv->len);
  9419. hme = (const hnd_ext_trap_heap_err_t *)tlv->data;
  9420. bcm_bprintf(b, " arena total: %d\n", hme->arena_total);
  9421. bcm_bprintf(b, " heap free: %d\n", hme->heap_free);
  9422. bcm_bprintf(b, " heap in use: %d\n", hme->heap_inuse);
  9423. bcm_bprintf(b, " mf count: %d\n", hme->mf_count);
  9424. bcm_bprintf(b, " stack LWM: %x\n", hme->stack_lwm);
  9425. bcm_bprintf(b, " Histogram:\n");
  9426. for (i = 0; i < (HEAP_HISTOGRAM_DUMP_LEN * 2); i += 2) {
  9427. if (hme->heap_histogm[i] == 0xfffe)
  9428. bcm_bprintf(b, " Others\t%d\t?\n", hme->heap_histogm[i + 1]);
  9429. else if (hme->heap_histogm[i] == 0xffff)
  9430. bcm_bprintf(b, " >= 256K\t%d\t?\n", hme->heap_histogm[i + 1]);
  9431. else
  9432. bcm_bprintf(b, " %d\t%d\t%d\n", hme->heap_histogm[i] << 2,
  9433. hme->heap_histogm[i + 1], (hme->heap_histogm[i] << 2)
  9434. * hme->heap_histogm[i + 1]);
  9435. }
  9436. bcm_bprintf(b, " Max free block: %d\n", hme->max_sz_free_blk[0] << 2);
  9437. for (i = 1; i < HEAP_MAX_SZ_BLKS_LEN; i++) {
  9438. bcm_bprintf(b, " Next lgst free block: %d\n", hme->max_sz_free_blk[i] << 2);
  9439. }
  9440. }
  9441. tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_PCIE_Q);
  9442. if (tlv)
  9443. {
  9444. const hnd_ext_trap_pcie_mem_err_t* pqme;
  9445. bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_PCIE_Q), tlv->len);
  9446. pqme = (const hnd_ext_trap_pcie_mem_err_t *)tlv->data;
  9447. bcm_bprintf(b, " d2h queue len: %x\n", pqme->d2h_queue_len);
  9448. bcm_bprintf(b, " d2h req queue len: %x\n", pqme->d2h_req_queue_len);
  9449. }
  9450. tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_WLC_STATE);
  9451. if (tlv)
  9452. {
  9453. const hnd_ext_trap_wlc_mem_err_t* wsme;
  9454. bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_WLC_STATE), tlv->len);
  9455. wsme = (const hnd_ext_trap_wlc_mem_err_t *)tlv->data;
  9456. bcm_bprintf(b, " instance: %d\n", wsme->instance);
  9457. bcm_bprintf(b, " associated: %d\n", wsme->associated);
  9458. bcm_bprintf(b, " peer count: %d\n", wsme->peer_cnt);
  9459. bcm_bprintf(b, " client count: %d\n", wsme->soft_ap_client_cnt);
  9460. bcm_bprintf(b, " TX_AC_BK_FIFO: %d\n", wsme->txqueue_len[0]);
  9461. bcm_bprintf(b, " TX_AC_BE_FIFO: %d\n", wsme->txqueue_len[1]);
  9462. bcm_bprintf(b, " TX_AC_VI_FIFO: %d\n", wsme->txqueue_len[2]);
  9463. bcm_bprintf(b, " TX_AC_VO_FIFO: %d\n", wsme->txqueue_len[3]);
  9464. if (tlv->len >= (sizeof(*wsme) * 2)) {
  9465. wsme++;
  9466. bcm_bprintf(b, "\n instance: %d\n", wsme->instance);
  9467. bcm_bprintf(b, " associated: %d\n", wsme->associated);
  9468. bcm_bprintf(b, " peer count: %d\n", wsme->peer_cnt);
  9469. bcm_bprintf(b, " client count: %d\n", wsme->soft_ap_client_cnt);
  9470. bcm_bprintf(b, " TX_AC_BK_FIFO: %d\n", wsme->txqueue_len[0]);
  9471. bcm_bprintf(b, " TX_AC_BE_FIFO: %d\n", wsme->txqueue_len[1]);
  9472. bcm_bprintf(b, " TX_AC_VI_FIFO: %d\n", wsme->txqueue_len[2]);
  9473. bcm_bprintf(b, " TX_AC_VO_FIFO: %d\n", wsme->txqueue_len[3]);
  9474. }
  9475. }
  9476. tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_PHY);
  9477. if (tlv)
  9478. {
  9479. const hnd_ext_trap_phydbg_t* phydbg;
  9480. bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_PHY), tlv->len);
  9481. phydbg = (const hnd_ext_trap_phydbg_t *)tlv->data;
  9482. bcm_bprintf(b, " err: 0x%x\n", phydbg->err);
  9483. bcm_bprintf(b, " RxFeStatus: 0x%x\n", phydbg->RxFeStatus);
  9484. bcm_bprintf(b, " TxFIFOStatus0: 0x%x\n", phydbg->TxFIFOStatus0);
  9485. bcm_bprintf(b, " TxFIFOStatus1: 0x%x\n", phydbg->TxFIFOStatus1);
  9486. bcm_bprintf(b, " RfseqMode: 0x%x\n", phydbg->RfseqMode);
  9487. bcm_bprintf(b, " RfseqStatus0: 0x%x\n", phydbg->RfseqStatus0);
  9488. bcm_bprintf(b, " RfseqStatus1: 0x%x\n", phydbg->RfseqStatus1);
  9489. bcm_bprintf(b, " RfseqStatus_Ocl: 0x%x\n", phydbg->RfseqStatus_Ocl);
  9490. bcm_bprintf(b, " RfseqStatus_Ocl1: 0x%x\n", phydbg->RfseqStatus_Ocl1);
  9491. bcm_bprintf(b, " OCLControl1: 0x%x\n", phydbg->OCLControl1);
  9492. bcm_bprintf(b, " TxError: 0x%x\n", phydbg->TxError);
  9493. bcm_bprintf(b, " bphyTxError: 0x%x\n", phydbg->bphyTxError);
  9494. bcm_bprintf(b, " TxCCKError: 0x%x\n", phydbg->TxCCKError);
  9495. bcm_bprintf(b, " TxCtrlWrd0: 0x%x\n", phydbg->TxCtrlWrd0);
  9496. bcm_bprintf(b, " TxCtrlWrd1: 0x%x\n", phydbg->TxCtrlWrd1);
  9497. bcm_bprintf(b, " TxCtrlWrd2: 0x%x\n", phydbg->TxCtrlWrd2);
  9498. bcm_bprintf(b, " TxLsig0: 0x%x\n", phydbg->TxLsig0);
  9499. bcm_bprintf(b, " TxLsig1: 0x%x\n", phydbg->TxLsig1);
  9500. bcm_bprintf(b, " TxVhtSigA10: 0x%x\n", phydbg->TxVhtSigA10);
  9501. bcm_bprintf(b, " TxVhtSigA11: 0x%x\n", phydbg->TxVhtSigA11);
  9502. bcm_bprintf(b, " TxVhtSigA20: 0x%x\n", phydbg->TxVhtSigA20);
  9503. bcm_bprintf(b, " TxVhtSigA21: 0x%x\n", phydbg->TxVhtSigA21);
  9504. bcm_bprintf(b, " txPktLength: 0x%x\n", phydbg->txPktLength);
  9505. bcm_bprintf(b, " txPsdulengthCtr: 0x%x\n", phydbg->txPsdulengthCtr);
  9506. bcm_bprintf(b, " gpioClkControl: 0x%x\n", phydbg->gpioClkControl);
  9507. bcm_bprintf(b, " gpioSel: 0x%x\n", phydbg->gpioSel);
  9508. bcm_bprintf(b, " pktprocdebug: 0x%x\n", phydbg->pktprocdebug);
  9509. for (i = 0; i < 3; i++)
  9510. bcm_bprintf(b, " gpioOut[%d]: 0x%x\n", i, phydbg->gpioOut[i]);
  9511. }
  9512. tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_PSM_WD);
  9513. if (tlv)
  9514. {
  9515. const hnd_ext_trap_psmwd_t* psmwd;
  9516. bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_PSM_WD), tlv->len);
  9517. psmwd = (const hnd_ext_trap_psmwd_t *)tlv;
  9518. bcm_bprintf(b, " version: 0x%x\n", psmwd->version);
  9519. bcm_bprintf(b, " maccontrol: 0x%x\n", psmwd->i32_maccontrol);
  9520. bcm_bprintf(b, " maccommand: 0x%x\n", psmwd->i32_maccommand);
  9521. bcm_bprintf(b, " macintstatus: 0x%x\n", psmwd->i32_macintstatus);
  9522. bcm_bprintf(b, " phydebug: 0x%x\n", psmwd->i32_phydebug);
  9523. bcm_bprintf(b, " clk_ctl_st: 0x%x\n", psmwd->i32_clk_ctl_st);
  9524. for (i = 0; i < 3; i++)
  9525. bcm_bprintf(b, " psmdebug[%d]: 0x%x\n", i, psmwd->i32_psmdebug[i]);
  9526. bcm_bprintf(b, " gated clock en: 0x%x\n", psmwd->i16_0x1a8);
  9527. bcm_bprintf(b, " Rcv Fifo Ctrl: 0x%x\n", psmwd->i16_0x406);
  9528. bcm_bprintf(b, " Rx ctrl 1: 0x%x\n", psmwd->i16_0x408);
  9529. bcm_bprintf(b, " Rxe Status 1: 0x%x\n", psmwd->i16_0x41a);
  9530. bcm_bprintf(b, " Rxe Status 2: 0x%x\n", psmwd->i16_0x41c);
  9531. bcm_bprintf(b, " rcv wrd count 0: 0x%x\n", psmwd->i16_0x424);
  9532. bcm_bprintf(b, " rcv wrd count 1: 0x%x\n", psmwd->i16_0x426);
  9533. bcm_bprintf(b, " RCV_LFIFO_STS: 0x%x\n", psmwd->i16_0x456);
  9534. bcm_bprintf(b, " PSM_SLP_TMR: 0x%x\n", psmwd->i16_0x480);
  9535. bcm_bprintf(b, " PSM BRC: 0x%x\n", psmwd->i16_0x490);
  9536. bcm_bprintf(b, " TXE CTRL: 0x%x\n", psmwd->i16_0x500);
  9537. bcm_bprintf(b, " TXE Status: 0x%x\n", psmwd->i16_0x50e);
  9538. bcm_bprintf(b, " TXE_xmtdmabusy: 0x%x\n", psmwd->i16_0x55e);
  9539. bcm_bprintf(b, " TXE_XMTfifosuspflush: 0x%x\n", psmwd->i16_0x566);
  9540. bcm_bprintf(b, " IFS Stat: 0x%x\n", psmwd->i16_0x690);
  9541. bcm_bprintf(b, " IFS_MEDBUSY_CTR: 0x%x\n", psmwd->i16_0x692);
  9542. bcm_bprintf(b, " IFS_TX_DUR: 0x%x\n", psmwd->i16_0x694);
  9543. bcm_bprintf(b, " SLow_CTL: 0x%x\n", psmwd->i16_0x6a0);
  9544. bcm_bprintf(b, " TXE_AQM fifo Ready: 0x%x\n", psmwd->i16_0x838);
  9545. bcm_bprintf(b, " Dagg ctrl: 0x%x\n", psmwd->i16_0x8c0);
  9546. bcm_bprintf(b, " shm_prewds_cnt: 0x%x\n", psmwd->shm_prewds_cnt);
  9547. bcm_bprintf(b, " shm_txtplufl_cnt: 0x%x\n", psmwd->shm_txtplufl_cnt);
  9548. bcm_bprintf(b, " shm_txphyerr_cnt: 0x%x\n", psmwd->shm_txphyerr_cnt);
  9549. }
  9550. tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_MAC_SUSP);
  9551. if (tlv)
  9552. {
  9553. const hnd_ext_trap_macsusp_t* macsusp;
  9554. bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_MAC_SUSP), tlv->len);
  9555. macsusp = (const hnd_ext_trap_macsusp_t *)tlv;
  9556. bcm_bprintf(b, " version: %d\n", macsusp->version);
  9557. bcm_bprintf(b, " trap_reason: %d\n", macsusp->trap_reason);
  9558. bcm_bprintf(b, " maccontrol: 0x%x\n", macsusp->i32_maccontrol);
  9559. bcm_bprintf(b, " maccommand: 0x%x\n", macsusp->i32_maccommand);
  9560. bcm_bprintf(b, " macintstatus: 0x%x\n", macsusp->i32_macintstatus);
  9561. for (i = 0; i < 4; i++)
  9562. bcm_bprintf(b, " phydebug[%d]: 0x%x\n", i, macsusp->i32_phydebug[i]);
  9563. for (i = 0; i < 8; i++)
  9564. bcm_bprintf(b, " psmdebug[%d]: 0x%x\n", i, macsusp->i32_psmdebug[i]);
  9565. bcm_bprintf(b, " Rxe Status_1: 0x%x\n", macsusp->i16_0x41a);
  9566. bcm_bprintf(b, " Rxe Status_2: 0x%x\n", macsusp->i16_0x41c);
  9567. bcm_bprintf(b, " PSM BRC: 0x%x\n", macsusp->i16_0x490);
  9568. bcm_bprintf(b, " TXE Status: 0x%x\n", macsusp->i16_0x50e);
  9569. bcm_bprintf(b, " TXE xmtdmabusy: 0x%x\n", macsusp->i16_0x55e);
  9570. bcm_bprintf(b, " TXE XMTfifosuspflush: 0x%x\n", macsusp->i16_0x566);
  9571. bcm_bprintf(b, " IFS Stat: 0x%x\n", macsusp->i16_0x690);
  9572. bcm_bprintf(b, " IFS MEDBUSY CTR: 0x%x\n", macsusp->i16_0x692);
  9573. bcm_bprintf(b, " IFS TX DUR: 0x%x\n", macsusp->i16_0x694);
  9574. bcm_bprintf(b, " WEP CTL: 0x%x\n", macsusp->i16_0x7c0);
  9575. bcm_bprintf(b, " TXE AQM fifo Ready: 0x%x\n", macsusp->i16_0x838);
  9576. bcm_bprintf(b, " MHP status: 0x%x\n", macsusp->i16_0x880);
  9577. bcm_bprintf(b, " shm_prewds_cnt: 0x%x\n", macsusp->shm_prewds_cnt);
  9578. bcm_bprintf(b, " shm_ucode_dbgst: 0x%x\n", macsusp->shm_ucode_dbgst);
  9579. }
  9580. tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_MAC_WAKE);
  9581. if (tlv)
  9582. {
  9583. const hnd_ext_trap_macenab_t* macwake;
  9584. bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_MAC_WAKE), tlv->len);
  9585. macwake = (const hnd_ext_trap_macenab_t *)tlv;
  9586. bcm_bprintf(b, " version: 0x%x\n", macwake->version);
  9587. bcm_bprintf(b, " trap_reason: 0x%x\n", macwake->trap_reason);
  9588. bcm_bprintf(b, " maccontrol: 0x%x\n", macwake->i32_maccontrol);
  9589. bcm_bprintf(b, " maccommand: 0x%x\n", macwake->i32_maccommand);
  9590. bcm_bprintf(b, " macintstatus: 0x%x\n", macwake->i32_macintstatus);
  9591. for (i = 0; i < 8; i++)
  9592. bcm_bprintf(b, " psmdebug[%d]: 0x%x\n", i, macwake->i32_psmdebug[i]);
  9593. bcm_bprintf(b, " clk_ctl_st: 0x%x\n", macwake->i32_clk_ctl_st);
  9594. bcm_bprintf(b, " powerctl: 0x%x\n", macwake->i32_powerctl);
  9595. bcm_bprintf(b, " gated clock en: 0x%x\n", macwake->i16_0x1a8);
  9596. bcm_bprintf(b, " PSM_SLP_TMR: 0x%x\n", macwake->i16_0x480);
  9597. bcm_bprintf(b, " PSM BRC: 0x%x\n", macwake->i16_0x490);
  9598. bcm_bprintf(b, " TSF CTL: 0x%x\n", macwake->i16_0x600);
  9599. bcm_bprintf(b, " IFS Stat: 0x%x\n", macwake->i16_0x690);
  9600. bcm_bprintf(b, " IFS_MEDBUSY_CTR: 0x%x\n", macwake->i16_0x692);
  9601. bcm_bprintf(b, " Slow_CTL: 0x%x\n", macwake->i16_0x6a0);
  9602. bcm_bprintf(b, " Slow_FRAC: 0x%x\n", macwake->i16_0x6a6);
  9603. bcm_bprintf(b, " fast power up delay: 0x%x\n", macwake->i16_0x6a8);
  9604. bcm_bprintf(b, " Slow_PER: 0x%x\n", macwake->i16_0x6aa);
  9605. bcm_bprintf(b, " shm_ucode_dbgst: 0x%x\n", macwake->shm_ucode_dbgst);
  9606. }
  9607. tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_BUS);
  9608. if (tlv)
  9609. {
  9610. const bcm_dngl_pcie_hc_t* hc;
  9611. bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_BUS), tlv->len);
  9612. hc = (const bcm_dngl_pcie_hc_t *)tlv->data;
  9613. bcm_bprintf(b, " version: 0x%x\n", hc->version);
  9614. bcm_bprintf(b, " reserved: 0x%x\n", hc->reserved);
  9615. bcm_bprintf(b, " pcie_err_ind_type: 0x%x\n", hc->pcie_err_ind_type);
  9616. bcm_bprintf(b, " pcie_flag: 0x%x\n", hc->pcie_flag);
  9617. bcm_bprintf(b, " pcie_control_reg: 0x%x\n", hc->pcie_control_reg);
  9618. for (i = 0; i < HC_PCIEDEV_CONFIG_REGLIST_MAX; i++)
  9619. bcm_bprintf(b, " pcie_config_regs[%d]: 0x%x\n", i, hc->pcie_config_regs[i]);
  9620. }
  9621. tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_HMAP);
  9622. if (tlv)
  9623. {
  9624. const pcie_hmapviolation_t* hmap;
  9625. hmap = (const pcie_hmapviolation_t *)tlv->data;
  9626. bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_HMAP), tlv->len);
  9627. bcm_bprintf(b, " HMAP Vio Addr Low: 0x%x\n", hmap->hmap_violationaddr_lo);
  9628. bcm_bprintf(b, " HMAP Vio Addr Hi: 0x%x\n", hmap->hmap_violationaddr_hi);
  9629. bcm_bprintf(b, " HMAP Vio Info: 0x%x\n", hmap->hmap_violation_info);
  9630. }
  9631. return BCME_OK;
  9632. }
  9633. #ifdef BCMPCIE
  9634. int
  9635. dhd_prot_send_host_timestamp(dhd_pub_t *dhdp, uchar *tlvs, uint16 tlv_len,
  9636. uint16 seqnum, uint16 xt_id)
  9637. {
  9638. dhd_prot_t *prot = dhdp->prot;
  9639. host_timestamp_msg_t *ts_req;
  9640. unsigned long flags;
  9641. uint16 alloced = 0;
  9642. uchar *ts_tlv_buf;
  9643. msgbuf_ring_t *ctrl_ring = &prot->h2dring_ctrl_subn;
  9644. if ((tlvs == NULL) || (tlv_len == 0)) {
  9645. DHD_ERROR(("%s: argument error tlv: %p, tlv_len %d\n",
  9646. __FUNCTION__, tlvs, tlv_len));
  9647. return -1;
  9648. }
  9649. DHD_RING_LOCK(ctrl_ring->ring_lock, flags);
  9650. /* if Host TS req already pending go away */
  9651. if (prot->hostts_req_buf_inuse == TRUE) {
  9652. DHD_ERROR(("one host TS request already pending at device\n"));
  9653. DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
  9654. return -1;
  9655. }
  9656. /* Request for cbuf space */
  9657. ts_req = (host_timestamp_msg_t*)dhd_prot_alloc_ring_space(dhdp, ctrl_ring,
  9658. DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D, &alloced, FALSE);
  9659. if (ts_req == NULL) {
  9660. DHD_ERROR(("couldn't allocate space on msgring to send host TS request\n"));
  9661. DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
  9662. return -1;
  9663. }
  9664. /* Common msg buf hdr */
  9665. ts_req->msg.msg_type = MSG_TYPE_HOSTTIMSTAMP;
  9666. ts_req->msg.if_id = 0;
  9667. ts_req->msg.flags = ctrl_ring->current_phase;
  9668. ts_req->msg.request_id = DHD_H2D_HOSTTS_REQ_PKTID;
  9669. ts_req->msg.epoch = ctrl_ring->seqnum % H2D_EPOCH_MODULO;
  9670. ctrl_ring->seqnum++;
  9671. ts_req->xt_id = xt_id;
  9672. ts_req->seqnum = seqnum;
  9673. /* populate TS req buffer info */
  9674. ts_req->input_data_len = htol16(tlv_len);
  9675. ts_req->host_buf_addr.high = htol32(PHYSADDRHI(prot->hostts_req_buf.pa));
  9676. ts_req->host_buf_addr.low = htol32(PHYSADDRLO(prot->hostts_req_buf.pa));
  9677. /* copy ioct payload */
  9678. ts_tlv_buf = (void *) prot->hostts_req_buf.va;
  9679. prot->hostts_req_buf_inuse = TRUE;
  9680. memcpy(ts_tlv_buf, tlvs, tlv_len);
  9681. OSL_CACHE_FLUSH((void *) prot->hostts_req_buf.va, tlv_len);
  9682. if (ISALIGNED(ts_tlv_buf, DMA_ALIGN_LEN) == FALSE) {
  9683. DHD_ERROR(("host TS req buffer address unaligned !!!!! \n"));
  9684. }
  9685. DHD_CTL(("submitted Host TS request request_id %d, data_len %d, tx_id %d, seq %d\n",
  9686. ts_req->msg.request_id, ts_req->input_data_len,
  9687. ts_req->xt_id, ts_req->seqnum));
  9688. /* upd wrt ptr and raise interrupt */
  9689. dhd_prot_ring_write_complete(dhdp, ctrl_ring, ts_req,
  9690. DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D);
  9691. DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
  9692. return 0;
  9693. } /* dhd_prot_send_host_timestamp */
  9694. bool
  9695. dhd_prot_data_path_tx_timestamp_logging(dhd_pub_t *dhd, bool enable, bool set)
  9696. {
  9697. if (set)
  9698. dhd->prot->tx_ts_log_enabled = enable;
  9699. return dhd->prot->tx_ts_log_enabled;
  9700. }
  9701. bool
  9702. dhd_prot_data_path_rx_timestamp_logging(dhd_pub_t *dhd, bool enable, bool set)
  9703. {
  9704. if (set)
  9705. dhd->prot->rx_ts_log_enabled = enable;
  9706. return dhd->prot->rx_ts_log_enabled;
  9707. }
  9708. bool
  9709. dhd_prot_pkt_noretry(dhd_pub_t *dhd, bool enable, bool set)
  9710. {
  9711. if (set)
  9712. dhd->prot->no_retry = enable;
  9713. return dhd->prot->no_retry;
  9714. }
  9715. bool
  9716. dhd_prot_pkt_noaggr(dhd_pub_t *dhd, bool enable, bool set)
  9717. {
  9718. if (set)
  9719. dhd->prot->no_aggr = enable;
  9720. return dhd->prot->no_aggr;
  9721. }
  9722. bool
  9723. dhd_prot_pkt_fixed_rate(dhd_pub_t *dhd, bool enable, bool set)
  9724. {
  9725. if (set)
  9726. dhd->prot->fixed_rate = enable;
  9727. return dhd->prot->fixed_rate;
  9728. }
  9729. #endif /* BCMPCIE */
  9730. void
  9731. dhd_prot_dma_indx_free(dhd_pub_t *dhd)
  9732. {
  9733. dhd_prot_t *prot = dhd->prot;
  9734. dhd_dma_buf_free(dhd, &prot->h2d_dma_indx_wr_buf);
  9735. dhd_dma_buf_free(dhd, &prot->d2h_dma_indx_rd_buf);
  9736. }
  9737. void
  9738. dhd_msgbuf_delay_post_ts_bufs(dhd_pub_t *dhd)
  9739. {
  9740. if (dhd->prot->max_tsbufpost > 0)
  9741. dhd_msgbuf_rxbuf_post_ts_bufs(dhd);
  9742. }
  9743. static void BCMFASTPATH
  9744. dhd_prot_process_fw_timestamp(dhd_pub_t *dhd, void* buf)
  9745. {
  9746. DHD_ERROR(("Timesunc feature not compiled in but GOT FW TS message\n"));
  9747. }
  9748. uint16
  9749. dhd_prot_get_ioctl_trans_id(dhd_pub_t *dhdp)
  9750. {
  9751. return dhdp->prot->ioctl_trans_id;
  9752. }
  9753. int dhd_get_hscb_info(dhd_pub_t *dhd, void ** va, uint32 *len)
  9754. {
  9755. if (!dhd->hscb_enable) {
  9756. if (len) {
  9757. /* prevent "Operation not supported" dhd message */
  9758. *len = 0;
  9759. return BCME_OK;
  9760. }
  9761. return BCME_UNSUPPORTED;
  9762. }
  9763. if (va) {
  9764. *va = dhd->prot->host_scb_buf.va;
  9765. }
  9766. if (len) {
  9767. *len = dhd->prot->host_scb_buf.len;
  9768. }
  9769. return BCME_OK;
  9770. }
  9771. #ifdef DHD_BUS_MEM_ACCESS
  9772. int dhd_get_hscb_buff(dhd_pub_t *dhd, uint32 offset, uint32 length, void * buff)
  9773. {
  9774. if (!dhd->hscb_enable) {
  9775. return BCME_UNSUPPORTED;
  9776. }
  9777. if (dhd->prot->host_scb_buf.va == NULL ||
  9778. ((uint64)offset + length > (uint64)dhd->prot->host_scb_buf.len)) {
  9779. return BCME_BADADDR;
  9780. }
  9781. memcpy(buff, (char*)dhd->prot->host_scb_buf.va + offset, length);
  9782. return BCME_OK;
  9783. }
  9784. #endif /* DHD_BUS_MEM_ACCESS */
  9785. #ifdef DHD_HP2P
  9786. uint32
  9787. dhd_prot_pkt_threshold(dhd_pub_t *dhd, bool set, uint32 val)
  9788. {
  9789. if (set)
  9790. dhd->pkt_thresh = (uint16)val;
  9791. val = dhd->pkt_thresh;
  9792. return val;
  9793. }
  9794. uint32
  9795. dhd_prot_time_threshold(dhd_pub_t *dhd, bool set, uint32 val)
  9796. {
  9797. if (set)
  9798. dhd->time_thresh = (uint16)val;
  9799. val = dhd->time_thresh;
  9800. return val;
  9801. }
  9802. uint32
  9803. dhd_prot_pkt_expiry(dhd_pub_t *dhd, bool set, uint32 val)
  9804. {
  9805. if (set)
  9806. dhd->pkt_expiry = (uint16)val;
  9807. val = dhd->pkt_expiry;
  9808. return val;
  9809. }
  9810. uint8
  9811. dhd_prot_hp2p_enable(dhd_pub_t *dhd, bool set, int enable)
  9812. {
  9813. uint8 ret = 0;
  9814. if (set) {
  9815. dhd->hp2p_enable = (enable & 0xf) ? TRUE : FALSE;
  9816. dhd->hp2p_infra_enable = ((enable >> 4) & 0xf) ? TRUE : FALSE;
  9817. if (enable) {
  9818. dhd_update_flow_prio_map(dhd, DHD_FLOW_PRIO_TID_MAP);
  9819. } else {
  9820. dhd_update_flow_prio_map(dhd, DHD_FLOW_PRIO_AC_MAP);
  9821. }
  9822. }
  9823. ret = dhd->hp2p_infra_enable ? 0x1:0x0;
  9824. ret <<= 4;
  9825. ret |= dhd->hp2p_enable ? 0x1:0x0;
  9826. return ret;
  9827. }
  9828. static void
  9829. dhd_update_hp2p_rxstats(dhd_pub_t *dhd, host_rxbuf_cmpl_t *rxstatus)
  9830. {
  9831. ts_timestamp_t *ts = (ts_timestamp_t *)&rxstatus->ts;
  9832. hp2p_info_t *hp2p_info;
  9833. uint32 dur1;
  9834. hp2p_info = &dhd->hp2p_info[0];
  9835. dur1 = ((ts->high & 0x3FF) * HP2P_TIME_SCALE) / 100;
  9836. if (dur1 > (MAX_RX_HIST_BIN - 1)) {
  9837. dur1 = MAX_RX_HIST_BIN - 1;
  9838. DHD_ERROR(("%s: 0x%x 0x%x\n",
  9839. __FUNCTION__, ts->low, ts->high));
  9840. }
  9841. hp2p_info->rx_t0[dur1 % MAX_RX_HIST_BIN]++;
  9842. return;
  9843. }
  9844. static void
  9845. dhd_update_hp2p_txstats(dhd_pub_t *dhd, host_txbuf_cmpl_t *txstatus)
  9846. {
  9847. ts_timestamp_t *ts = (ts_timestamp_t *)&txstatus->ts;
  9848. uint16 flowid = txstatus->compl_hdr.flow_ring_id;
  9849. uint32 hp2p_flowid, dur1, dur2;
  9850. hp2p_info_t *hp2p_info;
  9851. hp2p_flowid = dhd->bus->max_submission_rings -
  9852. dhd->bus->max_cmn_rings - flowid + 1;
  9853. hp2p_info = &dhd->hp2p_info[hp2p_flowid];
  9854. ts = (ts_timestamp_t *)&(txstatus->ts);
  9855. dur1 = ((ts->high & 0x3FF) * HP2P_TIME_SCALE) / 1000;
  9856. if (dur1 > (MAX_TX_HIST_BIN - 1)) {
  9857. dur1 = MAX_TX_HIST_BIN - 1;
  9858. DHD_ERROR(("%s: 0x%x 0x%x\n", __FUNCTION__, ts->low, ts->high));
  9859. }
  9860. hp2p_info->tx_t0[dur1 % MAX_TX_HIST_BIN]++;
  9861. dur2 = (((ts->high >> 10) & 0x3FF) * HP2P_TIME_SCALE) / 1000;
  9862. if (dur2 > (MAX_TX_HIST_BIN - 1)) {
  9863. dur2 = MAX_TX_HIST_BIN - 1;
  9864. DHD_ERROR(("%s: 0x%x 0x%x\n", __FUNCTION__, ts->low, ts->high));
  9865. }
  9866. hp2p_info->tx_t1[dur2 % MAX_TX_HIST_BIN]++;
  9867. return;
  9868. }
  9869. enum hrtimer_restart dhd_hp2p_write(struct hrtimer *timer)
  9870. {
  9871. hp2p_info_t *hp2p_info;
  9872. unsigned long flags;
  9873. dhd_pub_t *dhdp;
  9874. #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
  9875. #pragma GCC diagnostic push
  9876. #pragma GCC diagnostic ignored "-Wcast-qual"
  9877. #endif // endif
  9878. hp2p_info = container_of(timer, hp2p_info_t, timer);
  9879. #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
  9880. #pragma GCC diagnostic pop
  9881. #endif // endif
  9882. dhdp = hp2p_info->dhd_pub;
  9883. if (!dhdp) {
  9884. goto done;
  9885. }
  9886. DHD_INFO(("%s: pend_item = %d flowid = %d\n",
  9887. __FUNCTION__, ((msgbuf_ring_t *)hp2p_info->ring)->pend_items_count,
  9888. hp2p_info->flowid));
  9889. flags = dhd_os_hp2plock(dhdp);
  9890. dhd_prot_txdata_write_flush(dhdp, hp2p_info->flowid);
  9891. hp2p_info->hrtimer_init = FALSE;
  9892. hp2p_info->num_timer_limit++;
  9893. dhd_os_hp2punlock(dhdp, flags);
  9894. done:
  9895. return HRTIMER_NORESTART;
  9896. }
  9897. static void
  9898. dhd_calc_hp2p_burst(dhd_pub_t *dhd, msgbuf_ring_t *ring, uint16 flowid)
  9899. {
  9900. hp2p_info_t *hp2p_info;
  9901. uint16 hp2p_flowid;
  9902. hp2p_flowid = dhd->bus->max_submission_rings -
  9903. dhd->bus->max_cmn_rings - flowid + 1;
  9904. hp2p_info = &dhd->hp2p_info[hp2p_flowid];
  9905. if (ring->pend_items_count == dhd->pkt_thresh) {
  9906. dhd_prot_txdata_write_flush(dhd, flowid);
  9907. hp2p_info->hrtimer_init = FALSE;
  9908. hp2p_info->ring = NULL;
  9909. hp2p_info->num_pkt_limit++;
  9910. #if LINUX_VERSION_CODE <= KERNEL_VERSION(5, 1, 21)
  9911. tasklet_hrtimer_cancel(&hp2p_info->timer);
  9912. #else
  9913. hrtimer_cancel(&hp2p_info->timer);
  9914. #endif /* LINUX_VERSION_CODE <= KERNEL_VERSION(5, 1, 21) */
  9915. DHD_INFO(("%s: cancel hrtimer for flowid = %d \n"
  9916. "hp2p_flowid = %d pkt_thresh = %d\n",
  9917. __FUNCTION__, flowid, hp2p_flowid, dhd->pkt_thresh));
  9918. } else {
  9919. if (hp2p_info->hrtimer_init == FALSE) {
  9920. hp2p_info->hrtimer_init = TRUE;
  9921. hp2p_info->flowid = flowid;
  9922. hp2p_info->dhd_pub = dhd;
  9923. hp2p_info->ring = ring;
  9924. hp2p_info->num_timer_start++;
  9925. #if LINUX_VERSION_CODE <= KERNEL_VERSION(5, 1, 21)
  9926. tasklet_hrtimer_start(&hp2p_info->timer,
  9927. ktime_set(0, dhd->time_thresh * 1000), HRTIMER_MODE_REL);
  9928. #else
  9929. hrtimer_start(&hp2p_info->timer,
  9930. ktime_set(0, dhd->time_thresh * 1000), HRTIMER_MODE_REL_SOFT);
  9931. #endif /* LINUX_VERSION_CODE <= KERNEL_VERSION(5, 1, 21) */
  9932. DHD_INFO(("%s: start hrtimer for flowid = %d hp2_flowid = %d\n",
  9933. __FUNCTION__, flowid, hp2p_flowid));
  9934. }
  9935. }
  9936. return;
  9937. }
  9938. static void
  9939. dhd_update_hp2p_txdesc(dhd_pub_t *dhd, host_txbuf_post_t *txdesc)
  9940. {
  9941. uint64 ts;
  9942. ts = local_clock();
  9943. do_div(ts, 1000);
  9944. txdesc->metadata_buf_len = 0;
  9945. txdesc->metadata_buf_addr.high_addr = htol32((ts >> 32) & 0xFFFFFFFF);
  9946. txdesc->metadata_buf_addr.low_addr = htol32(ts & 0xFFFFFFFF);
  9947. txdesc->exp_time = dhd->pkt_expiry;
  9948. DHD_INFO(("%s: metadata_high = 0x%x metadata_low = 0x%x exp_time = %x\n",
  9949. __FUNCTION__, txdesc->metadata_buf_addr.high_addr,
  9950. txdesc->metadata_buf_addr.low_addr,
  9951. txdesc->exp_time));
  9952. return;
  9953. }
  9954. #endif /* DHD_HP2P */
  9955. #ifdef DHD_MAP_LOGGING
  9956. void
  9957. dhd_prot_smmu_fault_dump(dhd_pub_t *dhdp)
  9958. {
  9959. dhd_prot_debug_info_print(dhdp);
  9960. OSL_DMA_MAP_DUMP(dhdp->osh);
  9961. #ifdef DHD_MAP_PKTID_LOGGING
  9962. dhd_pktid_logging_dump(dhdp);
  9963. #endif /* DHD_MAP_PKTID_LOGGING */
  9964. #ifdef DHD_FW_COREDUMP
  9965. dhdp->memdump_type = DUMP_TYPE_SMMU_FAULT;
  9966. #ifdef DNGL_AXI_ERROR_LOGGING
  9967. dhdp->memdump_enabled = DUMP_MEMFILE;
  9968. dhd_bus_get_mem_dump(dhdp);
  9969. #else
  9970. dhdp->memdump_enabled = DUMP_MEMONLY;
  9971. dhd_bus_mem_dump(dhdp);
  9972. #endif /* DNGL_AXI_ERROR_LOGGING */
  9973. #endif /* DHD_FW_COREDUMP */
  9974. }
  9975. #endif /* DHD_MAP_LOGGING */